repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
davecan/easychain | test_blockchain.py | 1 | 1829 | from blockchain import Message, Block, Blockchain, InvalidBlockchain
import unittest
import hashlib
class TestBlockchain(unittest.TestCase):
def get_block(self, msg):
B = Block()
B.add_message(Message(msg))
return B
def get_blocks(self, *args):
L = []
for arg in args:
b = Block()
b.add_message(Message(arg))
L.append(b)
for i, block in enumerate(L):
block.link(L[i-1]) if i > 0 else None
block.seal()
return L
def test_creation(self):
chain = Blockchain()
self.assertEqual([], chain.blocks)
def test_add_block(self):
chain = Blockchain()
chain.add_block(self.get_block("some message"))
self.assertEqual(1, len(chain.blocks))
self.assertEqual("some message", chain.blocks[-1].messages[0].data)
def test_add_multiple_blocks_sets_hashes_correctly(self):
chain = Blockchain()
chain.blocks = self.get_blocks("first", "second", "third")
self.assertEqual(3, len(chain.blocks))
self.assertEqual("first", chain.blocks[0].messages[0].data)
self.assertEqual("second", chain.blocks[1].messages[0].data)
self.assertEqual("third", chain.blocks[2].messages[0].data)
self.assertIsNotNone(chain.blocks[-1].hash)
self.assertEqual(chain.blocks[1].prev_hash, chain.blocks[0].hash)
self.assertEqual(chain.blocks[2].prev_hash, chain.blocks[1].hash)
def test_invalid_block_breaks_chain(self):
chain = Blockchain()
chain.blocks = self.get_blocks("first", "second", "third", "fourth", "fifth")
chain.blocks[1].messages[0].data = "changed"
self.assertRaises(InvalidBlockchain, chain.validate)
if __name__ == '__main__':
unittest.main() | mit | -5,358,562,415,286,402,000 | 32.888889 | 85 | 0.611263 | false | 3.709939 | true | false | false |
yosshy/bergenholm | bergenholm/database/templates.py | 1 | 1096 | # -*- encoding:utf-8 -*-
import logging
import jinja2
from flask import abort
from flask import json
mongo = None
jinja_env = jinja2.Environment()
def get_templates():
templates = mongo.db.templates.find({}, {'_id': 1})
return dict(templates=[t["_id"] for t in templates])
def get_template(name):
template = mongo.db.templates.find_one_or_404({'_id': name})
template.pop("_id", None)
return template["content"]
def create_template(name, content):
template = mongo.db.templates.find_one({'_id': name})
if template:
abort(400)
try:
mongo.db.templates.insert(dict(_id=name, content=content))
except:
abort(400)
def update_template(name, content):
mongo.db.templates.find_one_or_404({'_id': name})
try:
mongo.db.templates.update({'_id': name},
{"$set": dict(content=content)})
except:
abort(400)
def delete_template(name):
mongo.db.templates.find_one_or_404({'_id': name})
try:
mongo.db.templates.remove({'_id': name})
except:
abort(400)
| apache-2.0 | -9,145,295,597,778,366,000 | 22.319149 | 66 | 0.607664 | false | 3.479365 | false | false | false |
arichnad/getflix | getdirector.py | 1 | 1968 | #!/usr/bin/python
##########################################################
#This script has been created by
#Copyright 2006 Devanshu Mehta
# of http://www.scienceaddiction.com
#It is released under the GNU General Public License v2
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# This pulls information about directors of the films you have rated
# from the Netflix web site. Please run getflix.pl first!
#
# Generates file called directors.txt
# Format: DirectorName~Rating
import re
from mechanize import Browser
import sys
import urllib2
idfile=open('directors.txt','w')
rat=0
id=0
tempstr=''
br=Browser()
for line in open('netflix.txt').xreadlines( ):
arr=line.split('~')
rat=int(arr.pop())
id=arr.pop(0)
tempstr="http://www.netflix.com/MovieDisplay?movieid="+id
br.set_handle_robots(False)
f=br.open(tempstr)
i=0
body=f.read()
i=body.find('Director:')
directorline=body[i+14:i+75]
list=directorline.split('<')
director=list[0].strip('. ')
tempstr=director+"~"+str(rat)+"\n"
idfile.write(tempstr)
print tempstr
idfile.flush()
#br=Browser()
#br.set_handle_robots(False)
#f=br.open(sys.argv[1])
#body=f.read()
#i=0
#i=body.find('Director')
#directorline=body[i+14:i+75]
#list=directorline.split('<')
#director=list[0].strip('. ')
#print director
| gpl-2.0 | -3,840,031,877,946,883,000 | 26.333333 | 79 | 0.70122 | false | 3.138756 | false | false | false |
flennerhag/mlens | mlens/metrics/utils.py | 1 | 9021 | """ML-ENSEMBLE
:author: Sebastian Flennerhag
:copyright: 2017-2018
:licence: MIT
Utility functions for constructing metrics
"""
from __future__ import division
import warnings
import numpy as np
from ..utils.exceptions import MetricWarning
try:
from collections import OrderedDict as _dict
except ImportError:
_dict = dict
def _get_string(obj, dec):
"""Stringify object"""
try:
return '{0:.{dec}f}'.format(obj, dec=dec)
except (TypeError, ValueError):
return obj.__str__()
def _get_partitions(obj):
"""Check if any entry has partitions"""
for name, _ in obj:
if int(name.split('.')[-2]) > 0:
return True
return False
def _split(f, s, a_p='', a_s='', b_p='', b_s='', reverse=False):
"""Split string on a symbol and return two string, first possible empty"""
splitted = f.split(s)
if len(splitted) == 1:
a, b = '', splitted[0]
if reverse:
b, a = a, b
else:
a, b = splitted
if a:
a = '%s%s%s' % (a_p, a, a_s)
if b:
b = '%s%s%s' % (b_p, b, b_s)
return a, b
class Data(_dict):
"""Wrapper class around dict to get pretty prints
:class:`Data` is an ordered dictionary that implements a dedicated
pretty print method for a nested dictionary. Printing a :class:`Data`
dictionary provides a human-readable table. The input dictionary is
expected to have two levels: the first level gives the columns and the
second level the rows. Rows names are parsed as
``[OUTER]/[MIDDLE].[INNER]--[IDX]``, where IDX has to be an integer. All
entries are optional.
.. seealso::
:func:`assemble_data`, :func:`assemble_table`
Warning
-------
:class:`Data` is an internal class that expects a particular functions.
This class cannot be used as a general drop-in replacement for the standard
``dict`` class.
Examples
--------
>>> from mlens.metrics import Data
>>> d = [('row-idx-1.row-idx-2.0.0', {'column-1': 0.1, 'column-2': 0.1})]
>>> data = Data(d)
>>> print(data)
column-a column-b
row-idx-1 row-idx-2 0.10 0.20
"""
def __init__(self, data=None, padding=2, decimals=2):
if isinstance(data, list):
data = assemble_data(data)
super(Data, self).__init__(data)
self.__padding__ = padding
self.__decimals__ = decimals
def __repr__(self):
return assemble_table(self, self.__padding__, self.__decimals__)
def assemble_table(data, padding=2, decimals=2):
"""Construct data table from input dict
Given a nested dictionary formed by :func:`assemble_data`,
:func:`assemble_table` returns a string that prints the contents of
the input in tabular format. The input dictionary is
expected to have two levels: the first level gives the columns and the
second level the rows. Rows names are parsed as
``[OUTER]/[MIDDLE].[INNER]--[IDX]``, where IDX must be an integer. All
entries are optional.
.. seealso::
:class:`Data`, :func:`assemble_data`
Examples
--------
>>> from mlens.metrics import assemble_data, assemble_table
>>> d = [('row-idx-1.row-idx-2.a.b', {'column-1': 0.1, 'column-2': 0.1})]
>>> print(assemble_table(assemble_data(d)))
column-2-m column-2-s column-1-m column-1-s
row-idx-1 row-idx-2 0.10 0.00 0.10 0.00
"""
buffer = 0
row_glossary = ['layer', 'case', 'est', 'part']
cols = list()
rows = list()
row_keys = list()
max_col_len = dict()
max_row_len = {r: 0 for r in row_glossary}
# First, measure the maximum length of each column in table
for key, val in data.items():
cols.append(key)
max_col_len[key] = len(key)
# dat_key is the estimators. Number of columns is not fixed so need
# to assume all exist and purge empty columns
for dat_key, v in sorted(val.items()):
if not v:
# Safety: no data
continue
v_ = len(_get_string(v, decimals))
if v_ > max_col_len[key]:
max_col_len[key] = v_
if dat_key in row_keys:
# Already mapped row entry name
continue
layer, k = _split(dat_key, '/')
case, k = _split(k, '.')
est, part = _split(k, '--', reverse=True)
# Header space before column headings
items = [i for i in [layer, case, est, part] if i != '']
buffer = max(buffer, len(' '.join(items)))
for k, v in zip(row_glossary, [layer, case, est, part]):
v_ = len(v)
if v_ > max_row_len[k]:
max_row_len[k] = v_
dat = _dict()
dat['layer'] = layer
dat['case'] = case
dat['est'] = est
dat['part'] = part
row_keys.append(dat_key)
rows.append(dat)
# Check which row name columns we can drop (ex partition number)
drop = list()
for k, v in max_row_len.items():
if v == 0:
drop.append(k)
# Header
out = " " * (buffer + padding)
for col in cols:
adj = max_col_len[col] - len(col) + padding
out += " " * adj + col
out += "\n"
# Entries
for dat_key, dat in zip(row_keys, rows):
# Estimator name
for key, val in dat.items():
if key in drop:
continue
adj = max_row_len[key] - len(val) + padding
out += val + " " * adj
# Data
for col in cols:
item = data[col][dat_key]
if not item and item != 0:
out += " " * (max_col_len[col] + padding)
continue
item_ = _get_string(item, decimals)
adj = max_col_len[col] - len(item_) + padding
out += " " * adj + item_
out += "\n"
return out
def assemble_data(data_list):
"""Build a data dictionary out of a list of entries and data dicts
Given a list named tuples of dictionaries, :func:`assemble_data`
returns a nested ordered dictionary with data keys as outer keys and
tuple names as inner keys. The returned dictionary can be printed in
tabular format by :func:`assemble_table`.
.. seealso::
:class:`Data`, :func:`assemble_table`
Examples
--------
>>> from mlens.metrics import assemble_data, assemble_table
>>> d = [('row-idx-1.row-idx-2.a.b', {'column-1': 0.1, 'column-2': 0.1})]
>>> print(assemble_table(assemble_data(d)))
column-2-m column-2-s column-1-m column-1-s
row-idx-1 row-idx-2 0.10 0.00 0.10 0.00
"""
data = _dict()
tmp = _dict()
partitions = _get_partitions(data_list)
# Collect scores per preprocessing case and estimator(s)
for name, data_dict in data_list:
if not data_dict:
continue
prefix, name = _split(name, '/', a_s='/')
# Names are either est.i.j or case.est.i.j
splitted = name.split('.')
if partitions:
name = tuple(splitted[:-1])
if len(name) == 3:
name = '%s.%s--%s' % name
else:
name = '%s--%s' % name
else:
name = '.'.join(splitted[:-2])
name = '%s%s' % (prefix, name)
if name not in tmp:
# Set up data struct for name
tmp[name] = _dict()
for k in data_dict.keys():
tmp[name][k] = list()
if '%s-m' % k not in data:
data['%s-m' % k] = _dict()
data['%s-s' % k] = _dict()
data['%s-m' % k][name] = list()
data['%s-s' % k][name] = list()
# collect all data dicts belonging to name
for k, v in data_dict.items():
tmp[name][k].append(v)
# Aggregate to get mean and std
for name, data_dict in tmp.items():
for k, v in data_dict.items():
if not v:
continue
try:
# Purge None values from the main est due to no predict times
v = [i for i in v if i is not None]
if v:
data['%s-m' % k][name] = np.mean(v)
data['%s-s' % k][name] = np.std(v)
except Exception as exc:
warnings.warn(
"Aggregating data for %s failed. Raw data:\n%r\n"
"Details: %r" % (k, v, exc), MetricWarning)
# Check if there are empty columns
discard = list()
for key, data_dict in data.items():
empty = True
for val in data_dict.values():
if val or val == 0:
empty = False
if empty:
discard.append(key)
for key in discard:
data.pop(key)
return data
| mit | 460,713,126,903,937,660 | 30 | 79 | 0.523445 | false | 3.680539 | false | false | false |
alexei-matveev/ccp1gui | objects/numeric.py | 1 | 2817 | _Numeric=None
_numpy=None
try:
import Numeric as _Numeric
except ImportError:
try:
import numpy as _numpy
except ImportError:
pass
# Test numpy
#_Numeric=None
#import numpy as _numpy
def usingNumeric():
"""Return True if we are using Numeric"""
global _Numeric
if _Numeric: return True
return False
def usingNumpy():
"""Return True if we are using numpy"""
global _numpy
if _numpy: return True
return False
def isAvailable():
"""Return True or False depending on whether we have linear algebra functionality"""
if usingNumeric or usingNumpy: return True
return False
def array(object,**kw):
global _Numeric, _numpy
if _Numeric:
return _Numeric.array(object,**kw)
elif _numpy:
return _numpy.array(object,**kw)
else:
raise AttributeError("No numeric functionality to deal with an array.")
def matrixmultiply(array1,array2,**kw):
global _Numeric, _numpy
if _Numeric:
return _Numeric.matrixmultiply(array1,array2,**kw)
elif _numpy:
return _numpy.dot(array1,array2,**kw)
else:
raise AttributeError("No numeric functionality to deal with matrixmultiply.")
def reshape(array,newshape,**kw):
global _Numeric, _numpy
if _Numeric:
return _Numeric.reshape(array,newshape,**kw)
elif _numpy:
return _numpy.reshape(array,newshape,**kw)
else:
raise AttributeError("No numeric functionality to deal with reshape.")
def transpose(array,**kw):
global _Numeric, _numpy
if _Numeric:
return _Numeric.transpose(array,**kw)
elif _numpy:
return _numpy.transpose(array,**kw)
else:
raise AttributeError("No numeric functionality to deal with transpose.")
def zeros(array,**kw):
global _Numeric, _numpy
if _Numeric:
return _Numeric.zeros(array,**kw)
elif _numpy:
return _numpy.zeros(array,**kw)
else:
raise AttributeError("No numeric functionality to zero an array.")
if __name__=="__main__":
import Numeric
import numpy
a = [
[ -121.41295785, -3.39655004, -1.22443129, 0., -35.94746644, -21.23132728 ],
[ -3.39655004, -96.82243358, -0.38162982, 0., -25.73131733, -13.03766446 ],
[ -1.22443129, -0.38162982, -95.95695143, 0., 0., -13.03766446 ],
[ 0., 0., 0., -95.5753216, 0., 0., ],
[ -35.94746644, -25.73131733, 0., 0., -70.19263086, -13.62411618 ],
[ -21.23132728, -13.03766446, -13.03766446, 0., -13.62411618, -63.98948192 ],
]
num_a = Numeric.array(a)
npy_a = numpy.array(a)
num_t = Numeric.transpose(num_a)
npy_t = numpy.transpose(npy_a)
| gpl-2.0 | -8,732,465,134,643,657,000 | 27.744898 | 95 | 0.598864 | false | 3.503731 | false | false | false |
wenqiuhua/mkdocs | mkdocs/toc.py | 3 | 2536 | # coding: utf-8
"""
Deals with generating the per-page table of contents.
For the sake of simplicity we use an existing markdown extension to generate
an HTML table of contents, and then parse that into the underlying data.
The steps we take to generate a table of contents are:
* Pre-process the markdown, injecting a [TOC] marker.
* Generate HTML from markdown.
* Post-process the HTML, spliting the content and the table of contents.
* Parse table of contents HTML into the underlying data structure.
"""
import re
TOC_LINK_REGEX = re.compile('<a href=["]([^"]*)["]>([^<]*)</a>')
class TableOfContents(object):
"""
Represents the table of contents for a given page.
"""
def __init__(self, html):
self.items = _parse_html_table_of_contents(html)
def __iter__(self):
return iter(self.items)
def __str__(self):
return ''.join([str(item) for item in self])
class AnchorLink(object):
"""
A single entry in the table of contents.
"""
def __init__(self, title, url):
self.title, self.url = title, url
self.children = []
def __str__(self):
return self._indent_print()
def _indent_print(self, depth=0):
indent = ' ' * depth
ret = '%s%s - %s\n' % (indent, self.title, self.url)
for item in self.children:
ret += item._indent_print(depth + 1)
return ret
def _parse_html_table_of_contents(html):
"""
Given a table of contents string that has been automatically generated by
the markdown library, parse it into a tree of AnchorLink instances.
Returns a list of all the parent AnchorLink instances.
"""
lines = html.splitlines()[2:-2]
parents = []
ret = []
for line in lines:
match = TOC_LINK_REGEX.search(line)
if match:
href, title = match.groups()
nav = AnchorLink(title, href)
# Add the item to its parent if required. If it is a topmost
# item then instead append it to our return value.
if parents:
parents[-1].children.append(nav)
else:
ret.append(nav)
# If this item has children, store it as the current parent
if line.endswith('<ul>'):
parents.append(nav)
elif line.startswith('</ul>'):
if parents:
parents.pop()
# For the table of contents, always mark the first element as active
if ret:
ret[0].active = True
return ret
| bsd-2-clause | 6,529,806,748,297,370,000 | 28.149425 | 77 | 0.600158 | false | 3.974922 | false | false | false |
franblas/pyAPI | src/pyapi/wikipedia.py | 1 | 4445 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 09 10:54:39 2015
@author: Paco
"""
from api import API
class Wikipedia(API):
_class_name = 'Wikipedia'
_category = 'Data'
_help_url = 'http://en.wikipedia.org/w/api.php?action=help&modules=query'
_api_url = 'http://en.wikipedia.org/w/api.php?action=query&format=json&'
def _parsing_data(self,data):
keywiki = str(data['query']['pages'].keys()[0])
res = {'title':list()}
for d in data['query']['pages'][keywiki]['linkshere']:
res['title'].append(self._tools.key_test('title',d))
return res
def _parsing_data2(self,data):
keywiki = str(data['query']['pages'].keys()[0])
res = {'title':list()}
for d in data['query']['pages'][keywiki]['links']:
res['title'].append(self._tools.key_test('title',d))
return res
def _parsing_data3(self,data):
res = {'title':list()}
for d in data['query']['prefixsearch']:
res['title'].append(self._tools.key_test('title',d))
return res
def _parsing_data4(self,data):
res = {'title':list(),'timestamp':list(),'count':list()}
for d in data['query']['search']:
res['title'].append(self._tools.key_test('title',d))
res['timestamp'].append(self._tools.key_test('timestamp',d))
res['count'].append(self._tools.key_test('wordcount',d,'int'))
return res
def _parsing_data5(self,data):
res = {'title':list(),'latitude':list(),'longitude':list(),'distance':list()}
for d in data['query']['geosearch']:
res['title'].append(self._tools.key_test('title',d))
res['latitude'].append(self._tools.key_test('lat',d,'float'))
res['longitude'].append(self._tools.key_test('lon',d,'float'))
res['distance'].append(self._tools.key_test('dist',d,'float'))
return res
def _parsing_data6(self,data):
res = {'title':list(),'timestamp':list()}
for d in data['query']['protectedtitles']:
res['title'].append(self._tools.key_test('title',d))
res['timestamp'].append(self._tools.key_test('timestamp',d))
return res
def _parsing_data7(self,data):
res = {'title':list(),'timestamp':list()}
for d in data['query']['recentchanges']:
res['title'].append(self._tools.key_test('title',d))
res['timestamp'].append(self._tools.key_test('timestamp',d))
return res
def get_linksphere(self,text='',limit=10):
text = text.replace(' ','%20')
url = self._api_url+'prop=linkshere&titles='+text+'&lhlimit='+str(limit)
data = self._tools.data_from_url(url)
self._increment_nb_call()
return self._parsing_data(data)
def get_links(self,text='',limit=10):
text = text.replace(' ','%20')
url = self._api_url+'prop=links&titles='+text+'&pllimit='+str(limit)
data = self._tools.data_from_url(url)
self._increment_nb_call()
return self._parsing_data2(data)
def search_prefix(self,text='',limit=10):
text = text.replace(' ','%20')
url = self._api_url+'list=prefixsearch&pssearch='+text+'&pslimit='+str(limit)
data = self._tools.data_from_url(url)
self._increment_nb_call()
return self._parsing_data3(data)
def search_text(self,text='',limit=10):
text = text.replace(' ','%20')
url = self._api_url+'list=search&srsearch='+text+'&srlimit='+str(limit)
data = self._tools.data_from_url(url)
self._increment_nb_call()
return self._parsing_data4(data)
def search_geo(self,lat=48.858844,lon=2.294351,radius=2,limit=10):
url = self._api_url+'list=geosearch&gsradius='+str(radius*1000)+'&gscoord='+str(lat)+'|'+str(lon)+'&gslimit='+str(limit)
data = self._tools.data_from_url(url)
self._increment_nb_call()
return self._parsing_data5(data)
def get_latest_protected(self,limit=10):
url = self._api_url+'list=protectedtitles&ptlimit'+str(limit)
data = self._tools.data_from_url(url)
self._increment_nb_call()
return self._parsing_data6(data)
def get_latest_changes(self,limit=10):
url = self._api_url+'list=recentchanges&rclimit='+str(limit)
data = self._tools.data_from_url(url)
self._increment_nb_call()
return self._parsing_data7(data)
| mit | -5,120,044,170,646,171,000 | 38.6875 | 128 | 0.583802 | false | 3.403522 | true | false | false |
bogdanbabych/morphosyntax | src/s020evaldictinput/md010extractinputwdV02.py | 1 | 11137 | '''
Created on 28 Mar 2017
@author: bogdan
'''
import os, sys, re, codecs
# import md060graphonoLev
from macpath import join
class clExtractInputWd(object):
'''
extracting input words from a dictionary file in glossary format
refernce dictionary file has mappings for the evaluation set, but does not have the tags;
*.num file has tags, but does not have mappings.
--> the task: to create a file which has mappings and PoS codes --> to be used for generating input (in a trivial way) and as a reference file
for checking the performance.
'''
def __init__(self, SFInMap, SFInNumPoS, SFInNumPoSTgt):
'''
takes the map file (from gold standard mapping) and PoS file with frequencies (output from corpus as *.num)
'''
FInMap = codecs.open(SFInMap, "r", encoding='utf-8', errors='ignore')
FInNumPoS = codecs.open(SFInNumPoS, "r", encoding='utf-8', errors='ignore')
FInNumPoSTgt = codecs.open(SFInNumPoSTgt, "r", encoding='utf-8', errors='ignore')
# FDebug = codecs.open('md010extractinputwd-debug.txt', "w", encoding='utf-8', errors='ignore')
# read two dictionaries from files, then intersect them (worth within the shorter dictionary and look up the index in a larger one...)
DSrc2TrgMaps = self.readDictFromFile(FInMap, 0, [1])
# temporary testing -- printing out the dictionary read
# self.printDict2File(DSrc2TrgMaps)
DSrc2PoS = self.readDictFromFile(FInNumPoS, 1, [2,0])
# temporary testing -- printing out the dictionary read
# self.printDict2File(DSrc2PoS)
DSrc2PoSTgt = self.readDictFromFile(FInNumPoSTgt, 1, [2,0])
# intersect with the target; update dictionaries; calculate Lev distance >> different kinds; save / display results
(DIntersection, DDifference, DIntersectionTgt, DIntersectionDiffTgt) = self.intersectDicts(DSrc2TrgMaps, DSrc2PoS, DSrc2PoSTgt)
# self.printDict2File(DIntersection)
# self.printDict2File(DDifference)
# print(str(len(DIntersection.keys())))
# print(str(len(DDifference.keys())))
DIntersection2 = self.optimeseDict2num(DIntersection)
DIntersectionTgt2 = self.optimeseDict2num(DIntersectionTgt)
### temp ### self.printDict2num(DIntersection2)
self.printDict2num(DIntersectionTgt2)
## self.printDict2File(DSrc2PoSTgt)
## self.printDict2File(DSrc2PoS)
## self.printDict2File(DIntersection2)
## self.printDict2File(DIntersectionTgt2)
def intersectDicts(self, DSearch, DReference, DReferenceTgt):
"""
Two dictionaries: Search and Reference are intersected, and the Search dictionary is enriched with values from the reference
Third dictionary is the target reference dictionary, structure
медведеву [('VERB', '16'), ('INTJ', '5')]
"""
# dictionaries to be returned, which can be printed out
DIntersection = {}
DDifference = {}
DIntersectionTgt = {} # intersection checked also with target
DIntersectionDiffTgt = {} # there is intersection with source ref, but there is a difference with target equivalents references
# counts for the number of keys in the intersection and difference sets -- to be defined as len(Keys)
# IIntersection = 0
# IDifference = 0
for (SKey, LTValSearch) in DSearch.items():
# dictionary structure:
# логічний [('логический',), ('логичный',)]
if SKey in DReference:
# dictionary structure:
# медведеву [('VERB', '16'), ('INTJ', '5')]
# IIntersection += 1
LTValReference = DReference[SKey]
DIntersection[SKey] = (LTValReference, LTValSearch)
# added test 13/04/2017
# checking if target mapping are in the target dictionary; then -- compute Lev distance --> design gold-standard eval set...
try:
LTRefNRefTgtWord = [] # get the list of intersecting Words
for TTranslationMapped in LTValSearch:
STranslationMapped = TTranslationMapped[0] # take the first element only;
if STranslationMapped in DReferenceTgt: # if the target can be found
# more complex task: intersection of PoS codes:
# replace this:
# DIntersectionTgt[SKey] = (LTValReference, LTValSearch)
LTRefNRefTgtWord.append((STranslationMapped,)) # preserve the same format as LTValSearch
LTValReferenceTgtPoS = DReferenceTgt[STranslationMapped] # get values (pos codes for target
# get the list of intersecting PoS codes
LRefNRefTgtPoS = []
for TValReferenceTgtPoS in LTValReferenceTgtPoS: # [('VERB', '16'), ('INTJ', '5')]
# for each PoS in translated set : check if there exists the same PoS code and add it to the list; if the list is empty -- do not add to dictionary;
SValReferenceTgtPoS = TValReferenceTgtPoS[0] # get the PoS without frequency for the target
for TValReferencePoS in LTValReference:
SValReferencePoS = TValReferencePoS[0] # get PoS without frequency for the original
IValReferenceFrq = TValReferencePoS[1] # get frequency
if SValReferenceTgtPoS == SValReferencePoS: # if Pos codes are the same
LRefNRefTgtPoS.append((SValReferencePoS,IValReferenceFrq))
else:
sys.stderr.write('%(STranslationMapped)s\t%(SValReferenceTgtPoS)s\t%(SValReferencePoS)s\n' % locals())
if len(LRefNRefTgtPoS) > 0:
# DIntersectionTgt[SKey] = (LRefNRefTgtPoS, LTValSearch) # not the default LTValSearch, but only those which are found
DIntersectionTgt[SKey] = (LRefNRefTgtPoS, LTRefNRefTgtWord) # replaced....
else:
DIntersectionDiffTgt[SKey] = (LTValReference, LTValSearch) # remove also into the difference dictionary if no overlapping PoS were found
else:
DIntersectionDiffTgt[SKey] = (LTValReference, LTValSearch) # pos codes, translations
except:
sys.stderr.write('4.Mapping to target error:%(SKey)s\n' % locals())
else:
# IDifference += 1
DDifference[SKey] = LTValSearch
# at some point we need to print / return length of difference files --> how many were filtered out...
# return dictionary structure:
# логічний ([('PART', 1), ('ADJ', 500)], [('логический',), ('логичный',)])
return (DIntersection, DDifference, DIntersectionTgt, DIntersectionDiffTgt)
def readDictFromFile(self, FIn, IIndexField, LIValFields):
"""
reads a tab separated file and creates a dictionary with a given field as an index and a list of given fields as values
- creates index out of one field;
- if there is a repeated index record, adds values to the list of values (then removes repetitions?)
// potentially reusable in other packages...
- technical solution: do not read multiword equivalents (if they contain a space)
~ addition: treat PoS as a dictionary entry -- to enable to amalgamate different meanings of the word which are within the same pos, so no double entries exist
"""
DIndexVals = {} # dictionary with the structure {'SIndex' : [LTValues]} (list of tuples with fields)
for SLine in FIn:
SLine = SLine.rstrip()
try:
LLine = re.split('\t', SLine)
except:
sys.stderr.write('0:Split Error: %(SLine)s\n' % locals())
IIdxField = int(IIndexField)
try:
SKey = LLine[IIdxField]
except:
SKey = None
sys.stderr.write('1:Key Error: %(SLine)s;%(IIdxField)d\n' % locals())
LFieldsToCollect = []
for el in LIValFields:
IEl = int(el)
try:
Val = LLine[IEl]
except:
Val = ''
sys.stderr.write('2:Val Error: %(SLine)s;%(IEl)d\n' % locals())
# if Val == None: continue # do not add None values to the list of values
# no: we need exact number of values in a tuple, even with None values
# ad-hoc: not adding multiwords to reference
if re.search(' ', Val): continue # to be able to run conditionally and remove....
LFieldsToCollect.append(Val)
# updating the dictionary : checking if exists; if not exists
TVals = tuple(LFieldsToCollect)
if SKey == None: continue # do not process error lines
if TVals == (): continue # do not add an empty tuple : to be able to run conditionally and remove... -- important keys ignored, e.g., торік
if SKey in DIndexVals: # if the dictionary has key -- SKey
LTValues = DIndexVals[SKey]
else:
LTValues = []
LTValues.append(TVals) # adding the tuple with values for this record
DIndexVals[SKey] = LTValues
# end: for Sline in FIn:
return DIndexVals
def printDict2File(self, DToPint):
for (Key, Val) in DToPint.items():
SKey = str(Key)
SVal = str(Val)
print('%(SKey)s\t%(SVal)s' % locals())
def optimeseDict2num(self, DSnT2LT):
"""
move all structure-dependent solutions here:
1. remove empty entries
2. remove multiwords
3. join together multiple entries of the same PoS
~ prepare the dictionary for a usable format...
~ first step - remove repetitions
"""
DSnT2LTOut = {}
for (SKey, Val) in DSnT2LT.items():
(LTPoSFrq, LTTransl) = Val
DPoSFrq = {} # local dictionary for each record which may have multiple entries of the same PoS
LTPoSFrqOut = []
for T2PoSFrq in LTPoSFrq:
(SPoS, SFrq) = T2PoSFrq
IFrq = int(SFrq)
if SPoS in DPoSFrq:
IFrq0 = DPoSFrq[SPoS]
IFrq += IFrq0
DPoSFrq[SPoS] = IFrq
for (SPoSX, IFrqX) in DPoSFrq.items():
T2PosFrqOut = (SPoSX, IFrqX)
LTPoSFrqOut.append(T2PosFrqOut)
DSnT2LTOut[SKey] = (LTPoSFrqOut, LTTransl)
return DSnT2LTOut
def printDict2num(self, DSnT2LT): # dictionary : strings mapped to 2-tuples of lists of n-tuples
IItemCount = 0
IPoSCount = 0
IPoSMultiple = 0
IPoSInterjections = 0
# for (SKey, Val) in sorted(DSnT2LT.items(), reverse=True): # no sorting
for (SKey, Val) in DSnT2LT.items():
IItemCount +=1
try:
# unpacking 2-tuple of lists
(LTPoSFrq, LTTransl) = Val
except:
sys.stderr.write('3.1.Wrong tuple structure %(SKey)s\n' % locals())
ILenPoS = len(LTPoSFrq)
if ILenPoS > 1:
IPoSMultiple += 1
if ILenPoS > 2:
sys.stderr.write('%(SKey)s\t%(ILenPoS)d\n' % locals())
for T2PoSFrq in LTPoSFrq:
IPoSCount +=1
(SPoS, SFrq) = T2PoSFrq
if SPoS == 'INTJ' or SPoS == 'PRON': # no iterjections nor pronouns
IPoSInterjections += 1
continue
# print the test file:
sys.stdout.write('%(SFrq)s\t%(SKey)s\t%(SPoS)s\t' % locals())
LWords = []
for TWord in LTTransl:
Word = TWord[0]
LWords.append(Word)
SWordsTrans = '~'.join(LWords)
sys.stdout.write('%(SWordsTrans)s\n' % locals())
# end: for (SKey, Val) DSnT2LT.items():
sys.stderr.write('Items:%(IItemCount)d\n' % locals())
sys.stderr.write('PoS:%(IPoSCount)d\n' % locals())
sys.stderr.write('Multi-PoS:%(IPoSMultiple)d\n' % locals())
sys.stderr.write('InterjectionsPronouns-PoS:%(IPoSInterjections)d\n' % locals())
if __name__ == '__main__':
OExtractInputWd = clExtractInputWd(sys.argv[1], sys.argv[2], sys.argv[3])
# dictionary with field ; frq word list with pos codes
# python3 md010extractinputwd.py ../../data/uk-ru-glossary/dict-uk-ru-uk-50k-s010-fields.txt ../../../xdata/morpho/uk.num >../../../xdata/morpho/uk2ru-cognates-pos-evalset.txt
| apache-2.0 | -5,307,623,337,904,379 | 39.372263 | 176 | 0.686737 | false | 3.011435 | false | false | false |
robertchase/rhc | rhc/task.py | 1 | 9374 | '''
The MIT License (MIT)
Copyright (c) 2013-2017 Robert H Chase
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import inspect
import logging
log = logging.getLogger(__name__)
class Task(object):
def __init__(self, callback, cid=None):
self._callback = [callback]
self.cid = cid
self.final = None # callable executed before callback (error or success)
@property
def callback(self):
if len(self._callback) == 1:
self.on_done()
return self._callback.pop()
@property
def is_done(self):
return len(self._callback) == 0
def on_done(self):
if self.final:
try:
self.final()
except Exception as e:
log.warning('cid=%s, failure running task final: %s', self.cid,
str(e))
def call(self, fn, args=None, kwargs=None, on_success=None, on_none=None,
on_error=None, on_timeout=None):
""" Call an async function.
Allows for flexible handling of the return states of async function
calls.
Parameters:
fn - callable async function (See Note 1)
args - None, scalar, tuple or list
Positional argments to be passed to fn.
kwargs - None or dict
Keyword argments to be passed to fn.
on_success - callable
called if specified and rc == 0 and
if none of on_success_code, on_none and on_none_404 apply
on_success(task, result)
on_error - callable
called if specified and rc != 0
on_error(task, result)
on_none - callable
called if specified and rc == 0 and result is None
on_none(task, None)
Notes:
1. An async function is structured like this:
fn(callback, *args, **kwargs)
When the function is complete, it calls callback with two
parameters:
rc - 0 for success, non-zero for error
result - function response on success, message on error
2. If the first parameter of fn (from inspection) is named 'task',
then an rhc.Task object is passed instead of a callable.
Example:
def on_load(task, result):
pass
task.call(
load,
args=id,
on_success=on_load,
)
This will call the load function, followed by on_load if the load
function completes sucessfully.
"""
def cb(rc, result):
if rc == 0:
_callback(self, fn, result, on_success, on_none)
else:
_callback_error(self, fn, result, on_error, on_timeout)
if args is None:
args = ()
elif not isinstance(args, (tuple, list)):
args = (args,)
if kwargs is None:
kwargs = {}
has_task = inspect_parameters(fn, kwargs)
if has_task:
self._callback.append(cb)
callback = self
else:
callback = cb
log.debug('task.call cid=%s fn=%s %s', self.cid, fn,
'as task' if has_task else '')
fn(callback, *args, **kwargs)
return self
def defer(self, task_cmd, partial_callback, final_fn=None):
# DEPRECATED: use call
''' defer the task until partial_callback completes; then call task_cmd
if partial_callback does not complete successfully, then task_cmd is not called;
instead, the error is handled by calling error on the task. final_fn, if
specified, is always called.
Parameters:
task_cmd - called with result of partial_callback on success
task_cmd(task, result)
partial_callback - function that takes a callback_fn
callback_fn is eventually called with (rc, result)
if rc != 0, partial_callback failed
final_fn - a function that is called once after the partial_callback
is complete. it takes no parameters.
'''
def on_defer(rc, result):
if final_fn:
try:
final_fn()
except Exception as e:
log.warning('failure running final_fn: %s', str(e))
if rc == 0:
task_cmd(self, result)
else:
self.error(result)
partial_callback(on_defer)
return self
def error(self, message):
# DEPRECATED
self.respond(message, 1)
def respond(self, result, rc=0):
# DEPRECATED: use callback
if self.is_done:
return
if self.final:
try:
self.final()
except Exception as e:
log.warning('failure running task final: %s', str(e))
self.callback(rc, result)
def unpartial(partial):
""" turn a partial into a callback_fn
undo the badness
"""
def _unpartial(cb, *args, **kwargs):
return partial(*args, **kwargs)(cb)
return _unpartial
def inspect_parameters(fn, kwargs):
task = False
# get a list of function parameters
args = inspect.getargspec(fn).args
# is the first parameter named 'task'
if len(args) and args[0] == 'task':
task = True
return task
def catch_exceptions(message):
def _catch_exceptions(task_handler):
def inner(task, *args, **kwargs):
try:
return task_handler(task, *args, **kwargs)
except Exception:
log.exception(message)
return inner
return _catch_exceptions
def _callback(task, fn, result, on_success, on_none):
if on_none and result is None:
try:
log.debug('task.callback, cid=%s, on_none fn=%s', task.cid, on_none)
return on_none(task, result)
except Exception as e:
return task.callback(1, 'exception during on_none: %s' % e)
if on_success:
try:
log.debug('task.callback, cid=%s, on_success fn=%s', task.cid, on_success)
return on_success(task, result)
except Exception as e:
return task.callback(1, 'exception during on_success: %s' % e)
log.debug('task.callback, cid=%s, default success callback', task.cid)
task.callback(0, result)
def _callback_error(task, fn, result, on_error, on_timeout):
if on_timeout and result == 'timeout':
try:
log.debug('task.callback, cid=%s, on_timeout fn=%s', task.cid, on_timeout)
return on_timeout(task, result)
except Exception as e:
return task.callback(1, 'exception during on_timeout: %s' % e)
if on_error:
try:
log.debug('task.callback, cid=%s, on_error fn=%s', task.cid, on_error)
return on_error(task, result)
except Exception as e:
return task.callback(1, 'exception during on_error: %s' % e)
log.debug('task.callback, cid=%s, default error callback', task.cid)
task.callback(1, result)
#
# STOP USING THIS defer-able STUFF
#
def wrap(callback_cmd, *args, **kwargs):
# DEPRECATED yucky complexity
''' helper function callback_cmd -> partially executed partial '''
return partial(callback_cmd)(*args, **kwargs)
def from_callback(task_cmd):
# DEPRECATED yucky complexity
''' helper function callback_cmd -> executing partial
if the caller invokes the wrapped or decorated task_cmd
using a standard callback syntax:
task_cmd(callback, *args, **kwargs)
then a task is generated from the callback, and a partial
is immediately started.
'''
def _wrap(callback, *args, **kwargs):
return partial(task_cmd)(*args, **kwargs)(callback)
return _wrap
def partial(fn):
# DEPRECATED yucky complexity
def _args(*args, **kwargs):
def _callback(callback_fn):
task = Task(callback_fn)
fn(task, *args, **kwargs)
return task
return _callback
return _args
| mit | -523,049,076,953,525,900 | 31.10274 | 92 | 0.576808 | false | 4.339815 | false | false | false |
faratro/django-oscar | sites/demo/apps/checkout/forms.py | 33 | 2401 | from django import forms
from oscar.apps.payment import forms as payment_forms
from oscar.apps.order.models import BillingAddress
class BillingAddressForm(payment_forms.BillingAddressForm):
"""
Extended version of the core billing address form that adds a field so
customers can choose to re-use their shipping address.
"""
SAME_AS_SHIPPING, NEW_ADDRESS = 'same', 'new'
CHOICES = (
(SAME_AS_SHIPPING, 'Use shipping address'),
(NEW_ADDRESS, 'Enter a new address'),
)
same_as_shipping = forms.ChoiceField(
widget=forms.RadioSelect, choices=CHOICES, initial=SAME_AS_SHIPPING)
class Meta(payment_forms.BillingAddressForm):
model = BillingAddress
exclude = ('search_text', 'first_name', 'last_name')
def __init__(self, shipping_address, data=None, *args, **kwargs):
# Store a reference to the shipping address
self.shipping_address = shipping_address
super(BillingAddressForm, self).__init__(data, *args, **kwargs)
# If no shipping address (eg a download), then force the
# 'same_as_shipping' field to have a certain value.
if shipping_address is None:
self.fields['same_as_shipping'].choices = (
(self.NEW_ADDRESS, 'Enter a new address'),)
self.fields['same_as_shipping'].initial = self.NEW_ADDRESS
# If using same address as shipping, we don't need require any of the
# required billing address fields.
if data and data.get('same_as_shipping', None) == self.SAME_AS_SHIPPING:
for field in self.fields:
if field != 'same_as_shipping':
self.fields[field].required = False
def _post_clean(self):
# Don't run model validation if using shipping address
if self.cleaned_data.get('same_as_shipping') == self.SAME_AS_SHIPPING:
return
super(BillingAddressForm, self)._post_clean()
def save(self, commit=True):
if self.cleaned_data.get('same_as_shipping') == self.SAME_AS_SHIPPING:
# Convert shipping address into billing address
billing_addr = BillingAddress()
self.shipping_address.populate_alternative_model(billing_addr)
if commit:
billing_addr.save()
return billing_addr
return super(BillingAddressForm, self).save(commit)
| bsd-3-clause | 7,598,267,666,983,002,000 | 40.396552 | 80 | 0.640983 | false | 4.021776 | false | false | false |
Pluto-tv/chromium-crosswalk | tools/telemetry/third_party/gsutilz/third_party/protorpc/protorpc/protobuf_test.py | 26 | 10491 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for protorpc.protobuf."""
__author__ = '[email protected] (Rafe Kaplan)'
import datetime
import unittest
from protorpc import message_types
from protorpc import messages
from protorpc import protobuf
from protorpc import protorpc_test_pb2
from protorpc import test_util
from protorpc import util
# TODO: Add DateTimeFields to protorpc_test.proto when definition.py
# supports date time fields.
class HasDateTimeMessage(messages.Message):
value = message_types.DateTimeField(1)
class NestedDateTimeMessage(messages.Message):
value = messages.MessageField(message_types.DateTimeMessage, 1)
class ModuleInterfaceTest(test_util.ModuleInterfaceTest,
test_util.TestCase):
MODULE = protobuf
class EncodeMessageTest(test_util.TestCase,
test_util.ProtoConformanceTestBase):
"""Test message to protocol buffer encoding."""
PROTOLIB = protobuf
def assertErrorIs(self, exception, message, function, *params, **kwargs):
try:
function(*params, **kwargs)
self.fail('Expected to raise exception %s but did not.' % exception)
except exception as err:
self.assertEquals(message, str(err))
@property
def encoded_partial(self):
proto = protorpc_test_pb2.OptionalMessage()
proto.double_value = 1.23
proto.int64_value = -100000000000
proto.int32_value = 1020
proto.string_value = u'a string'
proto.enum_value = protorpc_test_pb2.OptionalMessage.VAL2
return proto.SerializeToString()
@property
def encoded_full(self):
proto = protorpc_test_pb2.OptionalMessage()
proto.double_value = 1.23
proto.float_value = -2.5
proto.int64_value = -100000000000
proto.uint64_value = 102020202020
proto.int32_value = 1020
proto.bool_value = True
proto.string_value = u'a string\u044f'
proto.bytes_value = b'a bytes\xff\xfe'
proto.enum_value = protorpc_test_pb2.OptionalMessage.VAL2
return proto.SerializeToString()
@property
def encoded_repeated(self):
proto = protorpc_test_pb2.RepeatedMessage()
proto.double_value.append(1.23)
proto.double_value.append(2.3)
proto.float_value.append(-2.5)
proto.float_value.append(0.5)
proto.int64_value.append(-100000000000)
proto.int64_value.append(20)
proto.uint64_value.append(102020202020)
proto.uint64_value.append(10)
proto.int32_value.append(1020)
proto.int32_value.append(718)
proto.bool_value.append(True)
proto.bool_value.append(False)
proto.string_value.append(u'a string\u044f')
proto.string_value.append(u'another string')
proto.bytes_value.append(b'a bytes\xff\xfe')
proto.bytes_value.append(b'another bytes')
proto.enum_value.append(protorpc_test_pb2.RepeatedMessage.VAL2)
proto.enum_value.append(protorpc_test_pb2.RepeatedMessage.VAL1)
return proto.SerializeToString()
@property
def encoded_nested(self):
proto = protorpc_test_pb2.HasNestedMessage()
proto.nested.a_value = 'a string'
return proto.SerializeToString()
@property
def encoded_repeated_nested(self):
proto = protorpc_test_pb2.HasNestedMessage()
proto.repeated_nested.add().a_value = 'a string'
proto.repeated_nested.add().a_value = 'another string'
return proto.SerializeToString()
unexpected_tag_message = (
chr((15 << protobuf._WIRE_TYPE_BITS) | protobuf._Encoder.NUMERIC) +
chr(5))
@property
def encoded_default_assigned(self):
proto = protorpc_test_pb2.HasDefault()
proto.a_value = test_util.HasDefault.a_value.default
return proto.SerializeToString()
@property
def encoded_nested_empty(self):
proto = protorpc_test_pb2.HasOptionalNestedMessage()
proto.nested.Clear()
return proto.SerializeToString()
@property
def encoded_repeated_nested_empty(self):
proto = protorpc_test_pb2.HasOptionalNestedMessage()
proto.repeated_nested.add()
proto.repeated_nested.add()
return proto.SerializeToString()
@property
def encoded_extend_message(self):
proto = protorpc_test_pb2.RepeatedMessage()
proto.add_int64_value(400)
proto.add_int64_value(50)
proto.add_int64_value(6000)
return proto.SerializeToString()
@property
def encoded_string_types(self):
proto = protorpc_test_pb2.OptionalMessage()
proto.string_value = u'Latin'
return proto.SerializeToString()
@property
def encoded_invalid_enum(self):
encoder = protobuf._Encoder()
field_num = test_util.OptionalMessage.enum_value.number
tag = (field_num << protobuf._WIRE_TYPE_BITS) | encoder.NUMERIC
encoder.putVarInt32(tag)
encoder.putVarInt32(1000)
return encoder.buffer().tostring()
def testDecodeWrongWireFormat(self):
"""Test what happens when wrong wire format found in protobuf."""
class ExpectedProto(messages.Message):
value = messages.StringField(1)
class WrongVariant(messages.Message):
value = messages.IntegerField(1)
original = WrongVariant()
original.value = 10
self.assertErrorIs(messages.DecodeError,
'Expected wire type STRING but found NUMERIC',
protobuf.decode_message,
ExpectedProto,
protobuf.encode_message(original))
def testDecodeBadWireType(self):
"""Test what happens when non-existant wire type found in protobuf."""
# Message has tag 1, type 3 which does not exist.
bad_wire_type_message = chr((1 << protobuf._WIRE_TYPE_BITS) | 3)
self.assertErrorIs(messages.DecodeError,
'No such wire type 3',
protobuf.decode_message,
test_util.OptionalMessage,
bad_wire_type_message)
def testUnexpectedTagBelowOne(self):
"""Test that completely invalid tags generate an error."""
# Message has tag 0, type NUMERIC.
invalid_tag_message = chr(protobuf._Encoder.NUMERIC)
self.assertErrorIs(messages.DecodeError,
'Invalid tag value 0',
protobuf.decode_message,
test_util.OptionalMessage,
invalid_tag_message)
def testProtocolBufferDecodeError(self):
"""Test what happens when there a ProtocolBufferDecodeError.
This is what happens when the underlying ProtocolBuffer library raises
it's own decode error.
"""
# Message has tag 1, type DOUBLE, missing value.
truncated_message = (
chr((1 << protobuf._WIRE_TYPE_BITS) | protobuf._Encoder.DOUBLE))
self.assertErrorIs(messages.DecodeError,
'Decoding error: truncated',
protobuf.decode_message,
test_util.OptionalMessage,
truncated_message)
def testProtobufUnrecognizedField(self):
"""Test that unrecognized fields are serialized and can be accessed."""
decoded = protobuf.decode_message(test_util.OptionalMessage,
self.unexpected_tag_message)
self.assertEquals(1, len(decoded.all_unrecognized_fields()))
self.assertEquals(15, decoded.all_unrecognized_fields()[0])
self.assertEquals((5, messages.Variant.INT64),
decoded.get_unrecognized_field_info(15))
def testUnrecognizedFieldWrongFormat(self):
"""Test that unrecognized fields in the wrong format are skipped."""
class SimpleMessage(messages.Message):
value = messages.IntegerField(1)
message = SimpleMessage(value=3)
message.set_unrecognized_field('from_json', 'test', messages.Variant.STRING)
encoded = protobuf.encode_message(message)
expected = (
chr((1 << protobuf._WIRE_TYPE_BITS) | protobuf._Encoder.NUMERIC) +
chr(3))
self.assertEquals(encoded, expected)
def testProtobufDecodeDateTimeMessage(self):
"""Test what happens when decoding a DateTimeMessage."""
nested = NestedDateTimeMessage()
nested.value = message_types.DateTimeMessage(milliseconds=2500)
value = protobuf.decode_message(HasDateTimeMessage,
protobuf.encode_message(nested)).value
self.assertEqual(datetime.datetime(1970, 1, 1, 0, 0, 2, 500000), value)
def testProtobufDecodeDateTimeMessageWithTimeZone(self):
"""Test what happens when decoding a DateTimeMessage with a time zone."""
nested = NestedDateTimeMessage()
nested.value = message_types.DateTimeMessage(milliseconds=12345678,
time_zone_offset=60)
value = protobuf.decode_message(HasDateTimeMessage,
protobuf.encode_message(nested)).value
self.assertEqual(datetime.datetime(1970, 1, 1, 3, 25, 45, 678000,
tzinfo=util.TimeZoneOffset(60)),
value)
def testProtobufEncodeDateTimeMessage(self):
"""Test what happens when encoding a DateTimeField."""
mine = HasDateTimeMessage(value=datetime.datetime(1970, 1, 1))
nested = NestedDateTimeMessage()
nested.value = message_types.DateTimeMessage(milliseconds=0)
my_encoded = protobuf.encode_message(mine)
encoded = protobuf.encode_message(nested)
self.assertEquals(my_encoded, encoded)
def testProtobufEncodeDateTimeMessageWithTimeZone(self):
"""Test what happens when encoding a DateTimeField with a time zone."""
for tz_offset in (30, -30, 8 * 60, 0):
mine = HasDateTimeMessage(value=datetime.datetime(
1970, 1, 1, tzinfo=util.TimeZoneOffset(tz_offset)))
nested = NestedDateTimeMessage()
nested.value = message_types.DateTimeMessage(
milliseconds=0, time_zone_offset=tz_offset)
my_encoded = protobuf.encode_message(mine)
encoded = protobuf.encode_message(nested)
self.assertEquals(my_encoded, encoded)
def main():
unittest.main()
if __name__ == '__main__':
main()
| bsd-3-clause | -724,236,808,323,153,300 | 34.086957 | 80 | 0.68268 | false | 3.978385 | true | false | false |
MaxTyutyunnikov/lino | obsolete/src/lino/webman/xdocutils.py | 1 | 6442 | import os,sys
import traceback
from os.path import abspath, basename, dirname, normpath, join, exists
# from docutils import core #import publish_string, publish_file
from docutils import core, io
from docutils.writers import html4css1
#import em
from StringIO import StringIO
from textwrap import dedent
from docutils.parsers.rst.directives import register_directive
#from docutils.parsers.rst.directives.body import line_block
#from docutils.parsers.rst import Parser
#from docutils.parsers.rst.states import Inliner
from docutils import nodes, statemachine
## return line_block(name, arguments, options,
## content.splitlines(),
## lineno,
## content_offset, block_text, state, state_machine,
## node_class=nodes.literal_block)
## from docutils.parsers.rst import directives
## from docutils.parsers.rst.languages import en
## registry = directives._directive_registry
## registry['script'] = ('lino.timtools.txt2html','exec_script')
## #registry['script'] = ('txt2html','exec_script')
## en.directives['script'] = 'script'
class WebmanWriter(html4css1.Writer):
"""
adds a module-specific left (top, bottom) margin to each page.
implements the exec:: directive. Note that the exec directive
should rather be done by the parser, but I didn't work out how to
do this...
"""
def __init__(self,node):
html4css1.Writer.__init__(self)
#self.translator_class = MyHtmlTranslator
assert isinstance(node,nodes.Node)
self.node = node
# self.leftArea = leftArea
self.namespace = dict(globals())
self.namespace['page'] = node
#self.namespace['webmod'] = webmod
self.namespace['writer'] = self
register_directive('exec',self.exec_exec)
def exec_exec(self,
name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
if not content:
error = state_machine.reporter.error(
'The "%s" directive is empty; content required.' % (name),
nodes.literal_block(block_text, block_text), line=lineno)
return [error]
text = '\n'.join(content)
text = dedent(text)
_stdout = sys.stdout
sys.stdout = StringIO()
try:
exec text in self.namespace,self.namespace
except Exception,e:
traceback.print_exc(None,sys.stderr)
#print e
stdout_text = sys.stdout.getvalue()
sys.stdout = _stdout
# sys.stderr.write(stdout_text)
insert_lines = statemachine.string2lines(stdout_text,
convert_whitespace=1)
state_machine.insert_input(insert_lines, "exec")
return []
exec_exec.content = 1
def translate(self):
"""
modified copy of superclass.translate()
translate() is called by write() and must place the HTML
output to self.output
"""
visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
self.visitor = visitor
for attr in ('head_prefix', 'stylesheet', 'head', 'body_prefix',
'body_pre_docinfo', 'docinfo', 'body', 'fragment',
'body_suffix'):
setattr(self, attr, getattr(visitor, attr))
webmod = self.node.getModule()
if webmod.leftArea is not None:
html = webmod.leftArea(self.node)
self.body_prefix.append('''<table class="mainTable">
<tr>
<td valign="top" class="leftArea">
''')
self.body_prefix.append(html)
self.body_prefix.append('''</td>
<td class="textArea">''')
self.body_suffix.insert(0,'</td></tr></table>')
if webmod.bottomArea is not None:
html = webmod.bottomArea(self.node)
self.body_suffix.append('<div class="bottomArea">')
self.body_suffix.append(html)
self.body_suffix.append('</div>')
if webmod.topArea is not None:
raise NotImplementedError
self.output = self.astext()
def astext(self):
return ''.join(self.head_prefix + self.head
+ self.stylesheet + self.body_prefix
+ self.body_pre_docinfo + self.docinfo
+ self.body
+ self.body_suffix)
## class WebmanInliner(Inliner):
## # no longer used since 20040922
## # but pageref role is now broken
## def __init__(self, webmod,roles={}):
## roles['fileref'] = self.fileref_role
## roles['pageref'] = self.pageref_role
## Inliner.__init__(self,roles)
## self.webmod = webmod
## def fileref_role(self, role, rawtext, text, lineno):
## if self.webmod.filerefBase is not None:
## if text.startswith('/'):
## localfile = normpath(join(self.webmod.filerefBase,text[1:]))
## else:
## localfile = normpath(join(self.webmod.filerefBase,text))
## #localfile = join(self.webmod.filerefBase,normpath(text))
## if not exists(localfile):
## msg = self.reporter.error('%s : no such file' % localfile,
## line=lineno)
## prb = self.problematic(text, text, msg)
## return [prb], [msg]
## if self.webmod.filerefURL is None:
## uri = None
## else:
## uri = self.webmod.filerefURL % text
## filename = basename(text)
## return [nodes.reference(rawtext, filename, refuri=uri)], []
## def pageref_role(self, role, rawtext, text, lineno):
## # doesn't work
## if self.webmod.filerefBase is None:
## return [rawtext],[]
## else:
## if text.startswith('/'):
## localfile = normpath(join(self.webmod.filerefBase,text[1:]))
## else:
## localfile = normpath(join(self.webmod.filerefBase,text))
## if exists(localfile+".txt"):
## uri = localfile+".html"
## elif os.path.isdir(localfile):
## uri = localfile+"/index.html"
## else:
## msg = self.reporter.error(\
## 'pageref to unkonwn page "%s"' % localfile,
## line=lineno)
## prb = self.problematic(text, text, msg)
## return [prb], [msg]
## return [nodes.reference(rawtext, text, refuri=uri)], []
def publish(node,srcpath):
description = ('Lino WebMan publisher. '
+ core.default_description)
# 20040922 parser = Parser(rfc2822=0, inliner=WebmanInliner(webmod))
# pub = core.Publisher(writer=WebmanWriter(webmod), parser=parser,
# destination_class=io.StringOutput)
pub = core.Publisher(writer=WebmanWriter(node),
destination_class=io.StringOutput)
pub.set_components('standalone', 'restructuredtext', None)
webmod = node.getModule()
pub.process_command_line(webmod.argv,
description=description,
**webmod.defaults)
pub.set_source(None, srcpath) # node.getSourcePath())
cwd = os.getcwd()
os.chdir(webmod.getLocalPath())
r = pub.publish() #enable_exit=enable_exit)
os.chdir(cwd)
return r
| gpl-3.0 | -7,160,707,338,872,915,000 | 28.686636 | 70 | 0.669513 | false | 3.048746 | false | false | false |
zerothi/sisl | sisl/io/siesta/binaries.py | 1 | 67887 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
from numbers import Integral
from itertools import product, groupby
from collections import deque
import numpy as np
from numpy import pi
try:
from . import _siesta
found_module = True
except Exception as e:
print(e)
found_module = False
from ..sile import add_sile, SileError
from .sile import SileBinSiesta
from sisl._internal import set_module
from sisl.messages import warn, SislError
from ._help import *
import sisl._array as _a
from sisl import Geometry, Atom, Atoms, SuperCell, Grid, SparseCSR
from sisl import AtomicOrbital
from sisl.sparse import _ncol_to_indptr
from sisl.unit.siesta import unit_convert
from sisl.physics.sparse import SparseOrbitalBZ
from sisl.physics import Hamiltonian, DensityMatrix, EnergyDensityMatrix
from sisl.physics.overlap import Overlap
from sisl.physics.electron import EigenstateElectron
__all__ = ['tshsSileSiesta', 'onlysSileSiesta', 'tsdeSileSiesta']
__all__ += ['hsxSileSiesta', 'dmSileSiesta']
__all__ += ['wfsxSileSiesta']
__all__ += ['gridSileSiesta']
__all__ += ['tsgfSileSiesta']
_Bohr2Ang = unit_convert('Bohr', 'Ang')
_Ry2eV = unit_convert('Ry', 'eV')
_eV2Ry = unit_convert('eV', 'Ry')
def _bin_check(obj, method, message):
ierr = _siesta.io_m.iostat_query()
if ierr != 0:
raise SileError(f'{str(obj)}.{method} {message} (ierr={ierr})')
def _toF(array, dtype, scale=None):
if scale is None:
return array.astype(dtype, order='F', copy=False)
elif array.dtype == dtype and array.flags.f_contiguous:
# no need to copy since the order is correct
return array * scale
# We have to handle cases
out = np.empty_like(array, dtype, order='F')
np.multiply(array, scale, out=out)
return out
def _geometry_align(geom_b, geom_u, cls, method):
""" Routine used to align two geometries
There are a few twists in this since the fdf-reads will automatically
try and pass a geometry from the output files.
In cases where the *.ion* files are non-existing this will
result in a twist.
This routine will select and return a merged Geometry which
fulfills the correct number of atoms and orbitals.
However, if the input geometries have mis-matching number
of atoms a SislError will be raised.
Parameters
----------
geom_b : Geometry from binary file
geom_u : Geometry supplied by user
Raises
------
SislError : if the geometries have non-equal atom count
"""
if geom_b is None:
return geom_u
elif geom_u is None:
return geom_b
# Default to use the users geometry
geom = geom_u
is_copy = False
def get_copy(geom, is_copy):
if is_copy:
return geom, True
return geom.copy(), True
if geom_b.na != geom.na:
# we have no way of solving this issue...
raise SileError(f"{cls.__name__}.{method} could not use the passed geometry as the "
f"of atoms is not consistent, user-atoms={geom_u.na}, file-atoms={geom_b.na}.")
# Try and figure out what to do
if not np.allclose(geom_b.xyz, geom.xyz):
warn(f"{cls.__name__}.{method} has mismatched atomic coordinates, will copy geometry and use file XYZ.")
geom, is_copy = get_copy(geom, is_copy)
geom.xyz[:, :] = geom_b.xyz[:, :]
if not np.allclose(geom_b.sc.cell, geom.sc.cell):
warn(f"{cls.__name__}.{method} has non-equal lattice vectors, will copy geometry and use file lattice.")
geom, is_copy = get_copy(geom, is_copy)
geom.sc.cell[:, :] = geom_b.sc.cell[:, :]
if not np.array_equal(geom_b.nsc, geom.nsc):
warn(f"{cls.__name__}.{method} has non-equal number of supercells, will copy geometry and use file supercell count.")
geom, is_copy = get_copy(geom, is_copy)
geom.set_nsc(geom_b.nsc)
# Now for the difficult part.
# If there is a mismatch in the number of orbitals we will
# prefer to use the user-supplied atomic species, but fill with
# *random* orbitals
if not np.array_equal(geom_b.atoms.orbitals, geom.atoms.orbitals):
warn(f"{cls.__name__}.{method} has non-equal number of orbitals per atom, will correct with *empty* orbitals.")
geom, is_copy = get_copy(geom, is_copy)
# Now create a new atom specie with the correct number of orbitals
norbs = geom_b.atoms.orbitals[:]
atoms = Atoms([geom.atoms[i].copy(orbitals=[-1.] * norbs[i]) for i in range(geom.na)])
geom._atoms = atoms
return geom
@set_module("sisl.io.siesta")
class onlysSileSiesta(SileBinSiesta):
""" Geometry and overlap matrix """
def read_supercell(self):
""" Returns a SuperCell object from a TranSiesta file """
n_s = _siesta.read_tshs_sizes(self.file)[3]
_bin_check(self, 'read_supercell', 'could not read sizes.')
arr = _siesta.read_tshs_cell(self.file, n_s)
_bin_check(self, 'read_supercell', 'could not read cell.')
nsc = np.array(arr[0], np.int32)
# We have to transpose since the data is read *as-is*
# The cell in fortran files are (:, A1)
# after reading this is still obeyed (regardless of order)
# So we transpose to get it C-like
# Note that care must be taken for the different data-structures
# In particular not all data needs to be transposed (sparse H and S)
cell = arr[1].T * _Bohr2Ang
return SuperCell(cell, nsc=nsc)
def read_geometry(self, geometry=None):
""" Returns Geometry object from a TranSiesta file """
# Read supercell
sc = self.read_supercell()
na = _siesta.read_tshs_sizes(self.file)[1]
_bin_check(self, 'read_geometry', 'could not read sizes.')
arr = _siesta.read_tshs_geom(self.file, na)
_bin_check(self, 'read_geometry', 'could not read geometry.')
# see onlysSileSiesta.read_supercell for .T
xyz = arr[0].T * _Bohr2Ang
lasto = arr[1]
# Since the TSHS file does not contain species information
# and/or other stuff we *can* reuse an existing
# geometry which contains the correct atomic numbers etc.
orbs = np.diff(lasto)
if geometry is None:
# Create all different atoms...
# The TSHS file does not contain the
# atomic numbers, so we will just
# create them individually
# Get unique orbitals
uorb = np.unique(orbs)
# Create atoms
atoms = []
for Z, orb in enumerate(uorb):
atoms.append(Atom(Z+1, [-1] * orb))
def get_atom(atoms, orbs):
for atom in atoms:
if atom.no == orbs:
return atom
atom = []
for orb in orbs:
atom.append(get_atom(atoms, orb))
else:
# Create a new geometry with the correct atomic numbers
atom = []
for ia, no in zip(geometry, orbs):
a = geometry.atoms[ia]
if a.no == no:
atom.append(a)
else:
# correct atom
atom.append(a.__class__(a.Z, [-1. for io in range(no)], mass=a.mass, tag=a.tag))
# Create and return geometry object
return Geometry(xyz, atom, sc=sc)
def read_overlap(self, **kwargs):
""" Returns the overlap matrix from the TranSiesta file """
tshs_g = self.read_geometry()
geom = _geometry_align(tshs_g, kwargs.get('geometry', tshs_g), self.__class__, 'read_overlap')
# read the sizes used...
sizes = _siesta.read_tshs_sizes(self.file)
_bin_check(self, 'read_overlap', 'could not read sizes.')
# see onlysSileSiesta.read_supercell for .T
isc = _siesta.read_tshs_cell(self.file, sizes[3])[2].T
_bin_check(self, 'read_overlap', 'could not read cell.')
no = sizes[2]
nnz = sizes[4]
ncol, col, dS = _siesta.read_tshs_s(self.file, no, nnz)
_bin_check(self, 'read_overlap', 'could not read overlap matrix.')
# Create the Hamiltonian container
S = Overlap(geom, nnzpr=1)
# Create the new sparse matrix
S._csr.ncol = ncol.astype(np.int32, copy=False)
S._csr.ptr = _ncol_to_indptr(ncol)
# Correct fortran indices
S._csr.col = col.astype(np.int32, copy=False) - 1
S._csr._nnz = len(col)
S._csr._D = _a.emptyd([nnz, 1])
S._csr._D[:, 0] = dS[:]
# Convert to sisl supercell
# equivalent as _csr_from_siesta with explicit isc from file
_csr_from_sc_off(S.geometry, isc, S._csr)
# In siesta the matrix layout is written in CSC format
# due to fortran indexing, this means that we need to transpose
# to get it to correct layout.
return S.transpose(sort=kwargs.get("sort", True))
def read_fermi_level(self):
r""" Query the Fermi-level contained in the file
Returns
-------
Ef : fermi-level of the system
"""
Ef = _siesta.read_tshs_ef(self.file) * _Ry2eV
_bin_check(self, 'read_fermi_level', 'could not read fermi-level.')
return Ef
@set_module("sisl.io.siesta")
class tshsSileSiesta(onlysSileSiesta):
""" Geometry, Hamiltonian and overlap matrix file """
def read_hamiltonian(self, **kwargs):
""" Returns the electronic structure from the siesta.TSHS file """
tshs_g = self.read_geometry()
geom = _geometry_align(tshs_g, kwargs.get('geometry', tshs_g), self.__class__, 'read_hamiltonian')
# read the sizes used...
sizes = _siesta.read_tshs_sizes(self.file)
_bin_check(self, 'read_hamiltonian', 'could not read sizes.')
# see onlysSileSiesta.read_supercell for .T
isc = _siesta.read_tshs_cell(self.file, sizes[3])[2].T
_bin_check(self, 'read_hamiltonian', 'could not read cell.')
spin = sizes[0]
no = sizes[2]
nnz = sizes[4]
ncol, col, dH, dS = _siesta.read_tshs_hs(self.file, spin, no, nnz)
_bin_check(self, 'read_hamiltonian', 'could not read Hamiltonian and overlap matrix.')
# Check whether it is an orthogonal basis set
orthogonal = np.abs(dS).sum() == geom.no
# Create the Hamiltonian container
H = Hamiltonian(geom, spin, nnzpr=1, orthogonal=orthogonal)
# Create the new sparse matrix
H._csr.ncol = ncol.astype(np.int32, copy=False)
H._csr.ptr = _ncol_to_indptr(ncol)
# Correct fortran indices
H._csr.col = col.astype(np.int32, copy=False) - 1
H._csr._nnz = len(col)
if orthogonal:
H._csr._D = _a.emptyd([nnz, spin])
H._csr._D[:, :] = dH[:, :] * _Ry2eV
else:
H._csr._D = _a.emptyd([nnz, spin+1])
H._csr._D[:, :spin] = dH[:, :] * _Ry2eV
H._csr._D[:, spin] = dS[:]
_mat_spin_convert(H)
# Convert to sisl supercell
# equivalent as _csr_from_siesta with explicit isc from file
_csr_from_sc_off(H.geometry, isc, H._csr)
# Find all indices where dS == 1 (remember col is in fortran indices)
idx = col[np.isclose(dS, 1.).nonzero()[0]]
if np.any(idx > no):
print(f'Number of orbitals: {no}')
print(idx)
raise SileError(str(self) + '.read_hamiltonian could not assert '
'the supercell connections in the primary unit-cell.')
# see onlysSileSiesta.read_overlap for .transpose()
# For H, DM and EDM we also need to Hermitian conjugate it.
return H.transpose(spin=False, sort=kwargs.get("sort", True))
def write_hamiltonian(self, H, **kwargs):
""" Writes the Hamiltonian to a siesta.TSHS file """
# we sort below, so no need to do it here
# see onlysSileSiesta.read_overlap for .transpose()
csr = H.transpose(spin=False, sort=False)._csr
if csr.nnz == 0:
raise SileError(str(self) + '.write_hamiltonian cannot write '
'a zero element sparse matrix!')
# Convert to siesta CSR
_csr_to_siesta(H.geometry, csr)
csr.finalize(sort=kwargs.get("sort", True))
_mat_spin_convert(csr, H.spin)
# Extract the data to pass to the fortran routine
cell = H.geometry.cell
xyz = H.geometry.xyz
# Get H and S
if H.orthogonal:
h = csr._D
s = csr.diags(1., dim=1)
# Ensure all data is correctly formatted (i.e. have the same sparsity pattern)
s.align(csr)
s.finalize(sort=kwargs.get("sort", True))
if s.nnz != len(h):
raise SislError('The diagonal elements of your orthogonal Hamiltonian '
'have not been defined, this is a requirement.')
s = s._D[:, 0]
else:
h = csr._D[:, :H.S_idx]
s = csr._D[:, H.S_idx]
# Get shorter variants
nsc = H.geometry.nsc[:].astype(np.int32)
isc = _siesta.siesta_sc_off(*nsc)
# see onlysSileSiesta.read_supercell for .T
_siesta.write_tshs_hs(self.file, nsc[0], nsc[1], nsc[2],
cell.T / _Bohr2Ang, xyz.T / _Bohr2Ang, H.geometry.firsto,
csr.ncol, csr.col + 1,
_toF(h, np.float64, _eV2Ry), _toF(s, np.float64),
isc)
_bin_check(self, 'write_hamiltonian', 'could not write Hamiltonian and overlap matrix.')
@set_module("sisl.io.siesta")
class dmSileSiesta(SileBinSiesta):
""" Density matrix file """
def read_density_matrix(self, **kwargs):
""" Returns the density matrix from the siesta.DM file """
# Now read the sizes used...
spin, no, nsc, nnz = _siesta.read_dm_sizes(self.file)
_bin_check(self, 'read_density_matrix', 'could not read density matrix sizes.')
ncol, col, dDM = _siesta.read_dm(self.file, spin, no, nsc, nnz)
_bin_check(self, 'read_density_matrix', 'could not read density matrix.')
# Try and immediately attach a geometry
geom = kwargs.get('geometry', kwargs.get('geom', None))
if geom is None:
# We truly, have no clue,
# Just generate a boxed system
xyz = [[x, 0, 0] for x in range(no)]
sc = SuperCell([no, 1, 1], nsc=nsc)
geom = Geometry(xyz, Atom(1), sc=sc)
if nsc[0] != 0 and np.any(geom.nsc != nsc):
# We have to update the number of supercells!
geom.set_nsc(nsc)
if geom.no != no:
raise SileError(str(self) + '.read_density_matrix could not use the '
'passed geometry as the number of atoms or orbitals is '
'inconsistent with DM file.')
# Create the density matrix container
DM = DensityMatrix(geom, spin, nnzpr=1, dtype=np.float64, orthogonal=False)
# Create the new sparse matrix
DM._csr.ncol = ncol.astype(np.int32, copy=False)
DM._csr.ptr = _ncol_to_indptr(ncol)
# Correct fortran indices
DM._csr.col = col.astype(np.int32, copy=False) - 1
DM._csr._nnz = len(col)
DM._csr._D = _a.emptyd([nnz, spin+1])
DM._csr._D[:, :spin] = dDM[:, :]
# DM file does not contain overlap matrix... so neglect it for now.
DM._csr._D[:, spin] = 0.
_mat_spin_convert(DM)
# Convert the supercells to sisl supercells
if nsc[0] != 0 or geom.no_s >= col.max():
_csr_from_siesta(geom, DM._csr)
else:
warn(str(self) + '.read_density_matrix may result in a wrong sparse pattern!')
return DM.transpose(spin=False, sort=kwargs.get("sort", True))
def write_density_matrix(self, DM, **kwargs):
""" Writes the density matrix to a siesta.DM file """
csr = DM.transpose(spin=False, sort=False)._csr
# This ensures that we don't have any *empty* elements
if csr.nnz == 0:
raise SileError(str(self) + '.write_density_matrix cannot write '
'a zero element sparse matrix!')
_csr_to_siesta(DM.geometry, csr)
# We do not really need to sort this one, but we do for consistency
# of the interface.
csr.finalize(sort=kwargs.get("sort", True))
_mat_spin_convert(csr, DM.spin)
# Get DM
if DM.orthogonal:
dm = csr._D
else:
dm = csr._D[:, :DM.S_idx]
# Ensure shapes (say if only 1 spin)
dm.shape = (-1, len(DM.spin))
nsc = DM.geometry.sc.nsc.astype(np.int32)
_siesta.write_dm(self.file, nsc, csr.ncol, csr.col + 1, _toF(dm, np.float64))
_bin_check(self, 'write_density_matrix', 'could not write density matrix.')
@set_module("sisl.io.siesta")
class tsdeSileSiesta(dmSileSiesta):
""" Non-equilibrium density matrix and energy density matrix file """
def read_energy_density_matrix(self, **kwargs):
""" Returns the energy density matrix from the siesta.DM file """
# Now read the sizes used...
spin, no, nsc, nnz = _siesta.read_tsde_sizes(self.file)
_bin_check(self, 'read_energy_density_matrix', 'could not read energy density matrix sizes.')
ncol, col, dEDM = _siesta.read_tsde_edm(self.file, spin, no, nsc, nnz)
_bin_check(self, 'read_energy_density_matrix', 'could not read energy density matrix.')
# Try and immediately attach a geometry
geom = kwargs.get('geometry', kwargs.get('geom', None))
if geom is None:
# We truly, have no clue,
# Just generate a boxed system
xyz = [[x, 0, 0] for x in range(no)]
sc = SuperCell([no, 1, 1], nsc=nsc)
geom = Geometry(xyz, Atom(1), sc=sc)
if nsc[0] != 0 and np.any(geom.nsc != nsc):
# We have to update the number of supercells!
geom.set_nsc(nsc)
if geom.no != no:
raise SileError(str(self) + '.read_energy_density_matrix could '
'not use the passed geometry as the number of atoms or orbitals '
'is inconsistent with DM file.')
# Create the energy density matrix container
EDM = EnergyDensityMatrix(geom, spin, nnzpr=1, dtype=np.float64, orthogonal=False)
# Create the new sparse matrix
EDM._csr.ncol = ncol.astype(np.int32, copy=False)
EDM._csr.ptr = _ncol_to_indptr(ncol)
# Correct fortran indices
EDM._csr.col = col.astype(np.int32, copy=False) - 1
EDM._csr._nnz = len(col)
EDM._csr._D = _a.emptyd([nnz, spin+1])
EDM._csr._D[:, :spin] = dEDM[:, :] * _Ry2eV
# EDM file does not contain overlap matrix... so neglect it for now.
EDM._csr._D[:, spin] = 0.
_mat_spin_convert(EDM)
# Convert the supercells to sisl supercells
if nsc[0] != 0 or geom.no_s >= col.max():
_csr_from_siesta(geom, EDM._csr)
else:
warn(str(self) + '.read_energy_density_matrix may result in a wrong sparse pattern!')
return EDM.transpose(spin=False, sort=kwargs.get("sort", True))
def read_fermi_level(self):
r""" Query the Fermi-level contained in the file
Returns
-------
Ef : fermi-level of the system
"""
Ef = _siesta.read_tsde_ef(self.file) * _Ry2eV
_bin_check(self, 'read_fermi_level', 'could not read fermi-level.')
return Ef
def write_density_matrices(self, DM, EDM, Ef=0., **kwargs):
r""" Writes the density matrix to a siesta.DM file
Parameters
----------
DM : DensityMatrix
density matrix to write to the file
EDM : EnergyDensityMatrix
energy density matrix to write to the file
Ef : float, optional
fermi-level to be contained
"""
DMcsr = DM.transpose(spin=False, sort=False)._csr
EDMcsr = EDM.transpose(spin=False, sort=False)._csr
DMcsr.align(EDMcsr)
EDMcsr.align(DMcsr)
if DMcsr.nnz == 0:
raise SileError(str(self) + '.write_density_matrices cannot write '
'a zero element sparse matrix!')
_csr_to_siesta(DM.geometry, DMcsr)
_csr_to_siesta(DM.geometry, EDMcsr)
sort = kwargs.get("sort", True)
DMcsr.finalize(sort=sort)
EDMcsr.finalize(sort=sort)
_mat_spin_convert(DMcsr, DM.spin)
_mat_spin_convert(EDMcsr, EDM.spin)
# Ensure everything is correct
if not (np.allclose(DMcsr.ncol, EDMcsr.ncol) and
np.allclose(DMcsr.col, EDMcsr.col)):
raise ValueError(str(self) + '.write_density_matrices got non compatible '
'DM and EDM matrices.')
if DM.orthogonal:
dm = DMcsr._D
else:
dm = DMcsr._D[:, :DM.S_idx]
if EDM.orthogonal:
edm = EDMcsr._D
else:
edm = EDMcsr._D[:, :EDM.S_idx]
nsc = DM.geometry.sc.nsc.astype(np.int32)
_siesta.write_tsde_dm_edm(self.file, nsc, DMcsr.ncol, DMcsr.col + 1,
_toF(dm, np.float64),
_toF(edm, np.float64, _eV2Ry), Ef * _eV2Ry)
_bin_check(self, 'write_density_matrices', 'could not write DM + EDM matrices.')
@set_module("sisl.io.siesta")
class hsxSileSiesta(SileBinSiesta):
""" Hamiltonian and overlap matrix file
This file does not contain all information regarding the system.
To ensure no errors are being raised one should pass a `Geometry` with
correct number of atoms and correct number of supercells.
The number of orbitals will be updated in the returned matrices geometry.
>>> hsx = hsxSileSiesta("siesta.HSX")
>>> HS = hsx.read_hamiltonian() # may fail
>>> HS = hsx.read_hamiltonian(geometry=<>) # should run correctly if above satisfied
Users are adviced to use the `tshsSileSiesta` instead since that correctly contains
all information.
"""
def _xij2system(self, xij, geometry=None):
""" Create a new geometry with *correct* nsc and somewhat correct xyz
Parameters
----------
xij : SparseCSR
orbital distances
geometry : Geometry, optional
passed geometry
"""
def get_geom_handle(xij):
atoms = self._read_atoms()
if not atoms is None:
return Geometry(np.zeros([len(atoms), 3]), atoms)
N = len(xij)
# convert csr to dok format
row = (xij.ncol > 0).nonzero()[0]
# Now we have [0 0 0 0 1 1 1 1 2 2 ... no-1 no-1]
row = np.repeat(row, xij.ncol[row])
col = xij.col
# Parse xij to correct geometry
# first figure out all zeros (i.e. self-atom-orbitals)
idx0 = np.isclose(np.fabs(xij._D).sum(axis=1), 0.).nonzero()[0]
row0 = row[idx0]
# convert row0 and col0 to a first attempt of "atomization"
atoms = []
for r in range(N):
idx0r = (row0 == r).nonzero()[0]
row0r = row0[idx0r]
# although xij == 0, we just do % to ensure unit-cell orbs
col0r = col[idx0[idx0r]] % N
if np.all(col0r >= r):
# we have a new atom
atoms.append(set(col0r))
else:
atoms[-1].update(set(col0r))
# convert list of orbitals to lists
def conv(a):
a = list(a)
a.sort()
return a
atoms = [list(a) for a in atoms]
if sum(map(len, atoms)) != len(xij):
raise ValueError(f"{self.__class__.__name__} could not determine correct "
"number of orbitals.")
atms = Atoms(Atom('H', [-1. for _ in atoms[0]]))
for orbs in atoms[1:]:
atms.append(Atom('H', [-1. for _ in orbs]))
return Geometry(np.zeros([len(atoms), 3]), atms)
geom_handle = get_geom_handle(xij)
def convert_to_atom(geom, xij):
# o2a does not check for correct super-cell index
n_s = xij.shape[1] // xij.shape[0]
atm_s = geom.o2a(np.arange(xij.shape[1]))
# convert csr to dok format
row = (xij.ncol > 0).nonzero()[0]
row = np.repeat(row, xij.ncol[row])
col = xij.col
arow = atm_s[row]
acol = atm_s[col]
del atm_s, row, col
idx = np.lexsort((acol, arow))
arow = arow[idx]
acol = acol[idx]
xij = xij._D[idx]
del idx
# Now figure out if xij is consistent
duplicates = np.logical_and(np.diff(acol) == 0,
np.diff(arow) == 0).nonzero()[0]
if duplicates.size > 0:
if not np.allclose(xij[duplicates+1] - xij[duplicates], 0.):
raise ValueError(f"{self.__class__.__name__} converting xij(orb) -> xij(atom) went wrong. "
"This may happen if your coordinates are not inside the unitcell, please pass "
"a usable geometry.")
# remove duplicates to create new matrix
arow = np.delete(arow, duplicates)
acol = np.delete(acol, duplicates)
xij = np.delete(xij, duplicates, axis=0)
# Create a new sparse matrix
# Create the new index pointer
indptr = np.insert(np.array([0, len(xij)], np.int32), 1,
(np.diff(arow) != 0).nonzero()[0] + 1)
assert len(indptr) == geom.na + 1
return SparseCSR((xij, acol, indptr), shape=(geom.na, geom.na * n_s))
def coord_from_xij(xij):
# first atom is at 0, 0, 0
na = len(xij)
xyz = _a.zerosd([na, 3])
xyz[0] = [0, 0, 0]
mark = _a.zerosi(na)
mark[0] = 1
run_atoms = deque([0])
while len(run_atoms) > 0:
atm = run_atoms.popleft()
xyz_atm = xyz[atm].reshape(1, 3)
neighbours = xij.edges(atm, exclude=atm)
neighbours = neighbours[neighbours < na]
# update those that haven't been calculated
idx = mark[neighbours] == 0
neigh_idx = neighbours[idx]
if len(neigh_idx) == 0:
continue
xyz[neigh_idx, :] = xij[atm, neigh_idx] - xyz_atm
mark[neigh_idx] = 1
# add more atoms to be processed, since we have *mark*
# we will only run every atom once
run_atoms.extend(neigh_idx.tolist())
# check that everything is correct
if (~idx).sum() > 0:
neg_neighbours = neighbours[~idx]
if not np.allclose(xyz[neg_neighbours, :],
xij[atm, neg_neighbours] - xyz_atm):
raise ValueError(f"{self.__class__.__name__} xij(orb) -> xyz did not "
f"find same coordinates for different connections")
if mark.sum() != na:
raise ValueError(f"{self.__class__.__name__} xij(orb) -> Geometry does not "
f"have a fully connected geometry. It is impossible to create relative coordinates")
return xyz
def sc_from_xij(xij, xyz):
na = xij.shape[0]
n_s = xij.shape[1] // xij.shape[0]
if n_s == 1:
# easy!!
return SuperCell(xyz.max(axis=0) - xyz.min(axis=0) + 10., nsc=[1] * 3)
sc_off = _a.zerosd([n_s, 3])
mark = _a.zerosi(n_s)
mark[0] = 1
for atm in range(na):
neighbours = xij.edges(atm, exclude=atm)
uneighbours = neighbours % na
neighbour_isc = neighbours // na
# get offset in terms of unit-cell
off = xij[atm, neighbours] - (xyz[uneighbours] - xyz[atm].reshape(1, 3))
idx = mark[neighbour_isc] == 0
if not np.allclose(off[~idx], sc_off[neighbour_isc[~idx]]):
raise ValueError(f"{self.__class__.__name__} xij(orb) -> xyz did not "
f"find same supercell offsets for different connections")
if idx.sum() == 0:
continue
for idx in idx.nonzero()[0]:
nidx = neighbour_isc[idx]
if mark[nidx] == 0:
mark[nidx] = 1
sc_off[nidx] = off[idx]
elif not np.allclose(sc_off[nidx], off[idx]):
raise ValueError(f"{self.__class__.__name__} xij(orb) -> xyz did not "
f"find same supercell offsets for different connections")
# We know that siesta returns isc
# for iz in [0, 1, 2, 3, -3, -2, -1]:
# for iy in [0, 1, 2, -2, -1]:
# for ix in [0, 1, -1]:
# every block we find a half monotonically increasing vector additions
# Note the first is always [0, 0, 0]
# So our best chance is to *guess* the first nsc
# then reshape, then guess, then reshape, then guess :)
sc_diff = np.diff(sc_off, axis=0)
def get_nsc(sc_off):
""" Determine nsc depending on the axis """
# correct the offsets
ndim = sc_off.ndim
if sc_off.shape[0] == 1:
return 1
# always select the 2nd one since that contains the offset
# for the first isc [1, 0, 0] or [0, 1, 0] or [0, 0, 1]
sc_dir = sc_off[(1, ) + np.index_exp[0] * (ndim - 2)].reshape(1, 3)
norm2_sc_dir = (sc_dir ** 2).sum()
# figure out the maximum integer part
# we select 0 indices for all already determined lattice
# vectors since we know the first one is [0, 0, 0]
idx = np.index_exp[:] + np.index_exp[0] * (ndim - 2)
projection = (sc_off[idx] * sc_dir).sum(-1) / norm2_sc_dir
iprojection = np.rint(projection)
# reduce, find 0
idx_zero = np.isclose(iprojection, 0, atol=1e-5).nonzero()[0]
if idx_zero.size <= 1:
return 1
# only take those values that are continuous
# we *must* have some supercell connections
idx_max = idx_zero[1]
# find where they are close
# since there may be *many* zeros (non-coupling elements)
# we first have to cut off anything that is not integer
if np.allclose(projection[:idx_max], iprojection[:idx_max], atol=1e-5):
return idx_max
raise ValueError(f"Could not determine nsc from coordinates")
nsc = _a.onesi(3)
nsc[0] = get_nsc(sc_off)
sc_off = sc_off.reshape(-1, nsc[0], 3)
nsc[1] = get_nsc(sc_off)
sc_off = sc_off.reshape(-1, nsc[1], nsc[0], 3)
nsc[2] = sc_off.shape[0]
# now determine cell parameters
if all(nsc > 1):
cell = _a.arrayd([sc_off[0, 0, 1],
sc_off[0, 1, 0],
sc_off[1, 0, 0]])
else:
# we will never have all(nsc == 1) since that is
# taken care of at the start
# this gets a bit tricky, since we don't know one of the
# lattice vectors
cell = _a.zerosd([3, 3])
i = 0
for idx, isc in enumerate(nsc):
if isc > 1:
sl = [0, 0, 0]
sl[2 - idx] = 1
cell[i] = sc_off[tuple(sl)]
i += 1
# figure out the last vectors
# We'll just use Cartesian coordinates
while i < 3:
# this means we don't have any supercell connections
# along at least 1 other lattice vector.
lcell = np.fabs(cell).sum(0)
# figure out which Cartesian direction we are *missing*
cart_dir = np.argmin(lcell)
cell[i, cart_dir] = xyz[:, cart_dir].max() - xyz[:, cart_dir].min() + 10.
i += 1
return SuperCell(cell, nsc)
# now we have all orbitals, ensure compatibility with passed geometry
if geometry is None:
atm_xij = convert_to_atom(geom_handle, xij)
xyz = coord_from_xij(atm_xij)
sc = sc_from_xij(atm_xij, xyz)
geometry = Geometry(xyz, geom_handle.atoms, sc)
# Move coordinates into unit-cell
geometry.xyz[:, :] = (geometry.fxyz % 1.) @ geometry.cell
else:
if geometry.n_s != xij.shape[1] // xij.shape[0]:
atm_xij = convert_to_atom(geom_handle, xij)
sc = sc_from_xij(atm_xij, coord_from_xij(atm_xij))
geometry.set_nsc(sc.nsc)
def conv(orbs, atm):
if len(orbs) == len(atm):
return atm
return atm.copy(orbitals=[-1. for _ in orbs])
atms = Atoms(list(map(conv, geom_handle.atoms, geometry.atoms)))
if len(atms) != len(geometry):
raise ValueError(f"{self.__class__.__name__} passed geometry for reading "
"sparse matrix does not contain same number of atoms!")
geometry = geometry.copy()
# TODO check that geometry and xyz are the same!
geometry._atoms = atms
return geometry
def _read_atoms(self, **kwargs):
""" Reads basis set and geometry information from the HSX file """
# Now read the sizes used...
no, na, nspecies = _siesta.read_hsx_specie_sizes(self.file)
_bin_check(self, 'read_geometry', 'could not read specie sizes.')
# Read specie information
labels, val_q, norbs, isa = _siesta.read_hsx_species(self.file, nspecies, no, na)
# convert to proper string
labels = labels.T.reshape(nspecies, -1)
labels = labels.view(f"S{labels.shape[1]}")
labels = list(map(lambda s: b''.join(s).decode('utf-8').strip(),
labels.tolist())
)
_bin_check(self, 'read_geometry', 'could not read species.')
# to python index
isa -= 1
from sisl.atom import _ptbl
# try and convert labels into symbols
# We do this by:
# 1. label -> symbol
# 2. label[:2] -> symbol
# 3. label[:1] -> symbol
symbols = []
lbls = []
for label in labels:
lbls.append(label)
try:
symbol = _ptbl.Z_label(label)
symbols.append(symbol)
continue
except:
pass
try:
symbol = _ptbl.Z_label(label[:2])
symbols.append(symbol)
continue
except:
pass
try:
symbol = _ptbl.Z_label(label[:1])
symbols.append(symbol)
continue
except:
# we have no clue, assign -1
symbols.append(-1)
# Read in orbital information
atoms = []
for ispecie in range(nspecies):
n_l_zeta = _siesta.read_hsx_specie(self.file, ispecie+1, norbs[ispecie])
_bin_check(self, 'read_geometry', f'could not read specie {ispecie}.')
# create orbital
# no shell will have l>5, so m=10 should be more than enough
m = 10
orbs = []
for n, l, zeta in zip(*n_l_zeta):
# manual loop on m quantum numbers
if m > l:
m = -l
orbs.append(AtomicOrbital(n=n, l=l, m=m, zeta=zeta, R=-1.))
m += 1
# now create atom
atoms.append(Atom(symbols[ispecie], orbs, tag=lbls[ispecie]))
# now read in xij to retrieve atomic positions
Gamma, spin, no, no_s, nnz = _siesta.read_hsx_sizes(self.file)
_bin_check(self, 'read_geometry', 'could not read matrix sizes.')
ncol, col, _, dxij = _siesta.read_hsx_sx(self.file, Gamma, spin, no, no_s, nnz)
dxij = dxij.T * _Bohr2Ang
col -= 1
_bin_check(self, 'read_geometry', 'could not read xij matrix.')
# now create atoms object
atoms = Atoms([atoms[ia] for ia in isa])
return atoms
def read_hamiltonian(self, **kwargs):
""" Returns the electronic structure from the siesta.TSHS file """
# Now read the sizes used...
Gamma, spin, no, no_s, nnz = _siesta.read_hsx_sizes(self.file)
_bin_check(self, 'read_hamiltonian', 'could not read Hamiltonian sizes.')
ncol, col, dH, dS, dxij = _siesta.read_hsx_hsx(self.file, Gamma, spin, no, no_s, nnz)
dxij = dxij.T * _Bohr2Ang
col -= 1
_bin_check(self, 'read_hamiltonian', 'could not read Hamiltonian.')
ptr = _ncol_to_indptr(ncol)
xij = SparseCSR((dxij, col, ptr), shape=(no, no_s))
geom = self._xij2system(xij, kwargs.get('geometry', kwargs.get('geom', None)))
if geom.no != no or geom.no_s != no_s:
raise SileError(f"{str(self)}.read_hamiltonian could not use the "
"passed geometry as the number of atoms or orbitals is "
"inconsistent with HSX file.")
# Create the Hamiltonian container
H = Hamiltonian(geom, spin, nnzpr=1, dtype=np.float32, orthogonal=False)
# Create the new sparse matrix
H._csr.ncol = ncol.astype(np.int32, copy=False)
H._csr.ptr = ptr
# Correct fortran indices
H._csr.col = col.astype(np.int32, copy=False)
H._csr._nnz = len(col)
H._csr._D = _a.emptyf([nnz, spin+1])
H._csr._D[:, :spin] = dH[:, :] * _Ry2eV
H._csr._D[:, spin] = dS[:]
_mat_spin_convert(H)
# Convert the supercells to sisl supercells
if no_s // no == np.product(geom.nsc):
_csr_from_siesta(geom, H._csr)
return H.transpose(spin=False, sort=kwargs.get("sort", True))
def read_overlap(self, **kwargs):
""" Returns the overlap matrix from the siesta.HSX file """
# Now read the sizes used...
Gamma, spin, no, no_s, nnz = _siesta.read_hsx_sizes(self.file)
_bin_check(self, 'read_overlap', 'could not read overlap matrix sizes.')
ncol, col, dS, dxij = _siesta.read_hsx_sx(self.file, Gamma, spin, no, no_s, nnz)
dxij = dxij.T * _Bohr2Ang
col -= 1
_bin_check(self, 'read_overlap', 'could not read overlap matrix.')
ptr = _ncol_to_indptr(ncol)
xij = SparseCSR((dxij, col, ptr), shape=(no, no_s))
geom = self._xij2system(xij, kwargs.get('geometry', kwargs.get('geom', None)))
if geom.no != no or geom.no_s != no_s:
raise SileError(f"{str(self)}.read_overlap could not use the "
"passed geometry as the number of atoms or orbitals is "
"inconsistent with HSX file.")
# Create the Hamiltonian container
S = Overlap(geom, nnzpr=1)
# Create the new sparse matrix
S._csr.ncol = ncol.astype(np.int32, copy=False)
S._csr.ptr = _ncol_to_indptr(ncol)
# Correct fortran indices
S._csr.col = col.astype(np.int32, copy=False)
S._csr._nnz = len(col)
S._csr._D = _a.emptyf([nnz, 1])
S._csr._D[:, 0] = dS[:]
# Convert the supercells to sisl supercells
if no_s // no == np.product(geom.nsc):
_csr_from_siesta(geom, S._csr)
# not really necessary with Hermitian transposing, but for consistency
return S.transpose(sort=kwargs.get("sort", True))
@set_module("sisl.io.siesta")
class wfsxSileSiesta(SileBinSiesta):
r""" Binary WFSX file reader for Siesta """
def yield_eigenstate(self, parent=None):
r""" Reads eigenstates from the WFSX file
Returns
-------
state: EigenstateElectron
"""
# First query information
nspin, nou, nk, Gamma = _siesta.read_wfsx_sizes(self.file)
_bin_check(self, 'yield_eigenstate', 'could not read sizes.')
if nspin in [4, 8]:
nspin = 1 # only 1 spin
func = _siesta.read_wfsx_index_4
elif Gamma:
func = _siesta.read_wfsx_index_1
else:
func = _siesta.read_wfsx_index_2
if parent is None:
def convert_k(k):
if not np.allclose(k, 0.):
warn(f"{self.__class__.__name__}.yield_eigenstate returns a k-point in 1/Ang (not in reduced format), please pass 'parent' to ensure reduced k")
return k
else:
# We can succesfully convert to proper reduced k-points
if isinstance(parent, SuperCell):
def convert_k(k):
return np.dot(k, parent.cell.T) / (2 * pi)
else:
def convert_k(k):
return np.dot(k, parent.sc.cell.T) / (2 * pi)
for ispin, ik in product(range(1, nspin + 1), range(1, nk + 1)):
k, _, nwf = _siesta.read_wfsx_index_info(self.file, ispin, ik)
# Convert to 1/Ang
k /= _Bohr2Ang
_bin_check(self, 'yield_eigenstate', f"could not read index info [{ispin}, {ik}]")
idx, eig, state = func(self.file, ispin, ik, nou, nwf)
_bin_check(self, 'yield_eigenstate', f"could not read state information [{ispin}, {ik}, {nwf}]")
# eig is already in eV
# we probably need to add spin
# see onlysSileSiesta.read_supercell for .T
es = EigenstateElectron(state.T, eig, parent=parent,
k=convert_k(k), gauge="r", index=idx - 1)
yield es
@set_module("sisl.io.siesta")
class _gridSileSiesta(SileBinSiesta):
r""" Binary real-space grid file
The Siesta binary grid sile will automatically convert the units from Siesta
units (Bohr, Ry) to sisl units (Ang, eV) provided the correct extension is present.
"""
def read_supercell(self, *args, **kwargs):
r""" Return the cell contained in the file """
cell = _siesta.read_grid_cell(self.file).T * _Bohr2Ang
_bin_check(self, 'read_supercell', 'could not read cell.')
return SuperCell(cell)
def read_grid_size(self):
r""" Query grid size information such as the grid size and number of spin components
Returns
-------
int : number of spin-components
mesh : 3 values for the number of mesh-elements
"""
# Read the sizes
nspin, mesh = _siesta.read_grid_sizes(self.file)
_bin_check(self, 'read_grid_size', 'could not read grid sizes.')
return nspin, mesh
def read_grid(self, index=0, dtype=np.float64, *args, **kwargs):
""" Read grid contained in the Grid file
Parameters
----------
index : int or array_like, optional
the spin-index for retrieving one of the components. If a vector
is passed it refers to the fraction per indexed component. I.e.
``[0.5, 0.5]`` will return sum of half the first two components.
Default to the first component.
dtype : numpy.float64, optional
default data-type precision
"""
index = kwargs.get('spin', index)
# Read the sizes and cell
nspin, mesh = self.read_grid_size()
sc = self.read_supercell()
grid = _siesta.read_grid(self.file, nspin, mesh[0], mesh[1], mesh[2])
_bin_check(self, 'read_grid', 'could not read grid.')
if isinstance(index, Integral):
grid = grid[:, :, :, index]
else:
if len(index) > grid.shape[0]:
raise ValueError(self.__class__.__name__ + '.read_grid requires spin to be an integer or '
'an array of length equal to the number of spin components.')
# It is F-contiguous, hence the last index
g = grid[:, :, :, 0] * index[0]
for i, scale in enumerate(index[1:]):
g += grid[:, :, :, 1+i] * scale
grid = g
# Simply create the grid (with no information)
# We will overwrite the actual grid
g = Grid([1, 1, 1], sc=sc)
# NOTE: there is no need to swap-axes since the returned array is in F ordering
# and thus the first axis is the fast (x, y, z) is retained
g.grid = grid * self.grid_unit
return g
@set_module("sisl.io.siesta")
class _gfSileSiesta(SileBinSiesta):
""" Surface Green function file containing, Hamiltonian, overlap matrix and self-energies
Do not mix read and write statements when using this code. Complete one or the other
before doing the other thing. Fortran does not allow the same file opened twice, if this
is needed you are recommended to make a symlink to the file and thus open two different
files.
This small snippet reads/writes the GF file
>>> with sisl.io._gfSileSiesta('hello.GF') as f:
... nspin, no, k, E = f.read_header()
... for ispin, new_k, k, E in f:
... if new_k:
... H, S = f.read_hamiltonian()
... SeHSE = f.read_self_energy()
To write a file do:
>>> with sisl.io._gfSileSiesta('hello.GF') as f:
... f.write_header(sisl.MonkhorstPack(...), E)
... for ispin, new_k, k, E in f:
... if new_k:
... f.write_hamiltonian(H, S)
... f.write_self_energy(SeHSE)
"""
def _setup(self, *args, **kwargs):
""" Simple setup that needs to be overwritten """
self._iu = -1
# The unit convention used for energy-points
# This is necessary until Siesta uses CODATA values
if kwargs.get("unit", "old").lower() in ("old", "4.1"):
self._E_Ry2eV = 13.60580
else:
self._E_Ry2eV = _Ry2eV
def _is_open(self):
return self._iu != -1
def _open_gf(self, mode, rewind=False):
if self._is_open() and mode == self._mode:
if rewind:
_siesta.io_m.rewind_file(self._iu)
else:
# retain indices
return
else:
if mode == 'r':
self._iu = _siesta.io_m.open_file_read(self.file)
elif mode == 'w':
self._iu = _siesta.io_m.open_file_write(self.file)
_bin_check(self, '_open_gf', 'could not open for {}.'.format({'r': 'reading', 'w': 'writing'}[mode]))
# They will at any given time
# correspond to the current Python indices that is to be read
# The process for identification is done on this basis:
# iE is the current (Python) index for the energy-point to be read
# ik is the current (Python) index for the k-point to be read
# ispin is the current (Python) index for the spin-index to be read (only has meaning for a spin-polarized
# GF files)
# state is:
# -1 : the file-descriptor has just been opened (i.e. in front of header)
# 0 : it means that the file-descriptor IS in front of H and S
# 1 : it means that the file-descriptor is NOT in front of H and S but somewhere in front of a self-energy
# is_read is:
# 0 : means that the current indices HAVE NOT been read
# 1 : means that the current indices HAVE been read
#
# All routines in the gf_read/write sources requires input in Python indices
self._state = -1
self._is_read = 0
self._ispin = 0
self._ik = 0
self._iE = 0
def _close_gf(self):
if not self._is_open():
return
# Close it
_siesta.io_m.close_file(self._iu)
self._iu = -1
# Clean variables
del self._state
del self._iE
del self._ik
del self._ispin
try:
del self._no_u
except:
pass
try:
del self._nspin
except:
pass
def _step_counter(self, method, **kwargs):
""" Method for stepping values *must* be called before doing the actual read to check correct values """
opt = {'method': method}
if kwargs.get('header', False):
# The header only exists once, so check whether it is the correct place to read/write
if self._state != -1 or self._is_read == 1:
raise SileError(self.__class__.__name__ + '.{method} failed because the header has already '
'been read.'.format(**opt))
self._state = -1
self._ispin = 0
self._ik = 0
self._iE = 0
#print('HEADER: ', self._state, self._ispin, self._ik, self._iE)
elif kwargs.get('HS', False):
# Correct for the previous state and jump values
if self._state == -1:
# We have just read the header
if self._is_read != 1:
raise SileError(self.__class__.__name__ + '.{method} failed because the file descriptor '
'has not read the header.'.format(**opt))
# Reset values as though the header has just been read
self._state = 0
self._ispin = 0
self._ik = 0
self._iE = 0
elif self._state == 0:
if self._is_read == 1:
raise SileError(self.__class__.__name__ + '.{method} failed because the file descriptor '
'has already read the current HS for the given k-point.'.format(**opt))
elif self._state == 1:
# We have just read from the last energy-point
if self._iE + 1 != self._nE or self._is_read != 1:
raise SileError(self.__class__.__name__ + '.{method} failed because the file descriptor '
'has not read all energy-points for a given k-point.'.format(**opt))
self._state = 0
self._ik += 1
if self._ik >= self._nk:
# We need to step spin
self._ispin += 1
self._ik = 0
self._iE = 0
#print('HS: ', self._state, self._ispin, self._ik, self._iE)
if self._ispin >= self._nspin:
opt['spin'] = self._ispin + 1
opt['nspin'] = self._nspin
raise SileError(self.__class__.__name__ + '.{method} failed because of missing information, '
'a non-existing entry has been requested! spin={spin} max_spin={nspin}.'.format(**opt))
else:
# We are reading an energy-point
if self._state == -1:
raise SileError(self.__class__.__name__ + '.{method} failed because the file descriptor '
'has an unknown state.'.format(**opt))
elif self._state == 0:
if self._is_read == 1:
# Fine, we have just read the HS, ispin and ik are correct
self._state = 1
self._iE = 0
else:
raise SileError(self.__class__.__name__ + '.{method} failed because the file descriptor '
'has an unknown state.'.format(**opt))
elif self._state == 1:
if self._is_read == 0 and self._iE < self._nE:
# we haven't read the current energy-point.and self._iE + 1 < self._nE:
pass
elif self._is_read == 1 and self._iE + 1 < self._nE:
self._iE += 1
else:
raise SileError(self.__class__.__name__ + '.{method} failed because the file descriptor '
'has an unknown state.'.format(**opt))
if self._iE >= self._nE:
# You are trying to read beyond the entry
opt['iE'] = self._iE + 1
opt['NE'] = self._nE
raise SileError(self.__class__.__name__ + '.{method} failed because of missing information, '
'a non-existing energy-point has been requested! E_index={iE} max_E_index={NE}.'.format(**opt))
#print('SE: ', self._state, self._ispin, self._ik, self._iE)
# Always signal (when stepping) that we have not yet read the thing
if kwargs.get('read', False):
self._is_read = 1
else:
self._is_read = 0
def Eindex(self, E):
""" Return the closest energy index corresponding to the energy ``E``
Parameters
----------
E : float or int
if ``int``, return it-self, else return the energy index which is
closests to the energy.
"""
if isinstance(E, Integral):
return E
idxE = np.abs(self._E - E).argmin()
ret_E = self._E[idxE]
if abs(ret_E - E) > 5e-3:
warn(self.__class__.__name__ + " requesting energy " +
f"{E:.5f} eV, found {ret_E:.5f} eV as the closest energy!")
elif abs(ret_E - E) > 1e-3:
info(self.__class__.__name__ + " requesting energy " +
f"{E:.5f} eV, found {ret_E:.5f} eV as the closest energy!")
return idxE
def kindex(self, k):
""" Return the index of the k-point that is closests to the queried k-point (in reduced coordinates)
Parameters
----------
k : array_like of float or int
the queried k-point in reduced coordinates :math:`]-0.5;0.5]`. If ``int``
return it-self.
"""
if isinstance(k, Integral):
return k
ik = np.sum(np.abs(self._k - _a.asarrayd(k)[None, :]), axis=1).argmin()
ret_k = self._k[ik, :]
if not np.allclose(ret_k, k, atol=0.0001):
warn(SileWarning(self.__class__.__name__ + " requesting k-point " +
"[{:.3f}, {:.3f}, {:.3f}]".format(*k) +
" found " +
"[{:.3f}, {:.3f}, {:.3f}]".format(*ret_k)))
return ik
def read_header(self):
""" Read the header of the file and open it for reading subsequently
NOTES: this method may change in the future
Returns
-------
nspin : number of spin-components stored (1 or 2)
no_u : size of the matrices returned
k : k points in the GF file
E : energy points in the GF file
"""
# Ensure it is open (in read-mode)
if self._is_open():
_siesta.io_m.rewind_file(self._iu)
else:
self._open_gf('r')
nspin, no_u, nkpt, NE = _siesta.read_gf_sizes(self._iu)
_bin_check(self, 'read_header', 'could not read sizes.')
self._nspin = nspin
self._nk = nkpt
self._nE = NE
# We need to rewind (because of k and energy -points)
_siesta.io_m.rewind_file(self._iu)
self._step_counter('read_header', header=True, read=True)
k, E = _siesta.read_gf_header(self._iu, nkpt, NE)
_bin_check(self, 'read_header', 'could not read header information.')
if self._nspin > 2: # non-colinear
self._no_u = no_u * 2
else:
self._no_u = no_u
self._E = E * self._E_Ry2eV
self._k = k.T
return nspin, no_u, self._k, self._E
def disk_usage(self):
""" Calculate the estimated size of the resulting file
Returns
-------
estimated disk-space used in GB
"""
is_open = self._is_open()
if not is_open:
self.read_header()
# HS are only stored per k-point
HS = 2 * self._nspin * self._nk
SE = HS / 2 * self._nE
# Now calculate the full size
# no_u ** 2 = matrix size
# 16 = bytes in double complex
# 1024 ** 3 = B -> GB
mem = (HS + SE) * self._no_u ** 2 * 16 / 1024 ** 3
if not is_open:
self._close_gf()
return mem
def read_hamiltonian(self):
""" Return current Hamiltonian and overlap matrix from the GF file
Returns
-------
complex128 : Hamiltonian matrix
complex128 : Overlap matrix
"""
self._step_counter('read_hamiltonian', HS=True, read=True)
H, S = _siesta.read_gf_hs(self._iu, self._no_u)
_bin_check(self, 'read_hamiltonian', 'could not read Hamiltonian and overlap matrices.')
# we don't convert to C order!
return H * _Ry2eV, S
def read_self_energy(self):
r""" Read the currently reached bulk self-energy
The returned self-energy is:
.. math::
\boldsymbol \Sigma_{\mathrm{bulk}}(E) = \mathbf S E - \mathbf H - \boldsymbol \Sigma(E)
Returns
-------
complex128 : Self-energy matrix
"""
self._step_counter('read_self_energy', read=True)
SE = _siesta.read_gf_se(self._iu, self._no_u, self._iE)
_bin_check(self, 'read_self_energy', 'could not read self-energy.')
# we don't convert to C order!
return SE * _Ry2eV
def HkSk(self, k=(0, 0, 0), spin=0):
""" Retrieve H and S for the given k-point
Parameters
----------
k : int or array_like of float, optional
k-point to read the corresponding Hamiltonian and overlap matrices
for. If a specific k-point is passed `kindex` will be used to find
the corresponding index.
spin : int, optional
spin-index for the Hamiltonian and overlap matrices
"""
if not self._is_open():
self.read_header()
# find k-index that is requested
ik = self.kindex(k)
_siesta.read_gf_find(self._iu, self._nspin, self._nk, self._nE,
self._state, self._ispin, self._ik, self._iE, self._is_read,
0, spin, ik, 0)
_bin_check(self, 'HkSk', 'could not find Hamiltonian and overlap matrix.')
self._state = 0
self._ispin = spin
self._ik = ik
self._iE = 0
self._is_read = 0 # signal this is to be read
return self.read_hamiltonian()
def self_energy(self, E, k=0, spin=0):
""" Retrieve self-energy for a given energy-point and k-point
Parameters
----------
E : int or float
energy to retrieve self-energy at
k : int or array_like of float, optional
k-point to retrieve k-point at
spin : int, optional
spin-index to retrieve self-energy at
"""
if not self._is_open():
self.read_header()
ik = self.kindex(k)
iE = self.Eindex(E)
_siesta.read_gf_find(self._iu, self._nspin, self._nk, self._nE,
self._state, self._ispin, self._ik, self._iE, self._is_read,
1, spin, ik, iE)
_bin_check(self, 'self_energy', 'could not find requested self-energy.')
self._state = 1
self._ispin = spin
self._ik = ik
self._iE = iE
self._is_read = 0 # signal this is to be read
return self.read_self_energy()
def write_header(self, bz, E, mu=0., obj=None):
""" Write to the binary file the header of the file
Parameters
----------
bz : BrillouinZone
contains the k-points, the weights and possibly the parent Hamiltonian (if `obj` is None)s
E : array_like of cmplx or float
the energy points. If `obj` is an instance of `SelfEnergy` where an
associated ``eta`` is defined then `E` may be float, otherwise
it *has* to be a complex array.
mu : float, optional
chemical potential in the file
obj : ..., optional
an object that contains the Hamiltonian definitions, defaults to ``bz.parent``
"""
if obj is None:
obj = bz.parent
nspin = len(obj.spin)
cell = obj.geometry.sc.cell
na_u = obj.geometry.na
no_u = obj.geometry.no
xa = obj.geometry.xyz
# The lasto in siesta requires lasto(0) == 0
# and secondly, the Python index to fortran
# index makes firsto behave like fortran lasto
lasto = obj.geometry.firsto
bloch = _a.onesi(3)
mu = mu
NE = len(E)
if E.dtype not in [np.complex64, np.complex128]:
E = E + 1j * obj.eta
Nk = len(bz)
k = bz.k
w = bz.weight
sizes = {
'na_used': na_u,
'nkpt': Nk,
'ne': NE,
}
self._nspin = nspin
self._E = E
self._k = np.copy(k)
self._nE = len(E)
self._nk = len(k)
if self._nspin > 2:
self._no_u = no_u * 2
else:
self._no_u = no_u
# Ensure it is open (in write mode)
self._close_gf()
self._open_gf('w')
# Now write to it...
self._step_counter('write_header', header=True, read=True)
# see onlysSileSiesta.read_supercell for .T
_siesta.write_gf_header(self._iu, nspin, _toF(cell.T, np.float64, 1. / _Bohr2Ang),
na_u, no_u, no_u, _toF(xa.T, np.float64, 1. / _Bohr2Ang),
lasto, bloch, 0, mu * _eV2Ry, _toF(k.T, np.float64),
w, self._E / self._E_Ry2eV,
**sizes)
_bin_check(self, 'write_header', 'could not write header information.')
def write_hamiltonian(self, H, S=None):
""" Write the current energy, k-point and H and S to the file
Parameters
----------
H : matrix
a square matrix corresponding to the Hamiltonian
S : matrix, optional
a square matrix corresponding to the overlap, for efficiency reasons
it may be advantageous to specify this argument for orthogonal cells.
"""
no = len(H)
if S is None:
S = np.eye(no, dtype=np.complex128, order='F')
self._step_counter('write_hamiltonian', HS=True, read=True)
_siesta.write_gf_hs(self._iu, self._ik, self._E[self._iE] / self._E_Ry2eV,
_toF(H, np.complex128, _eV2Ry),
_toF(S, np.complex128), no_u=no)
_bin_check(self, 'write_hamiltonian', 'could not write Hamiltonian and overlap matrices.')
def write_self_energy(self, SE):
r""" Write the current self energy, k-point and H and S to the file
The self-energy must correspond to the *bulk* self-energy
.. math::
\boldsymbol \Sigma_{\mathrm{bulk}}(E) = \mathbf S E - \mathbf H - \boldsymbol \Sigma(E)
Parameters
----------
SE : matrix
a square matrix corresponding to the self-energy (Green function)
"""
no = len(SE)
self._step_counter('write_self_energy', read=True)
_siesta.write_gf_se(self._iu, self._ik, self._iE, self._E[self._iE] / self._E_Ry2eV,
_toF(SE, np.complex128, _eV2Ry), no_u=no)
_bin_check(self, 'write_self_energy', 'could not write self-energy.')
def __len__(self):
return self._nE * self._nk * self._nspin
def __iter__(self):
""" Iterate through the energies and k-points that this GF file is associated with
Yields
------
bool, list of float, float
"""
# get everything
e = self._E
if self._nspin in [1, 2]:
for ispin in range(self._nspin):
for k in self._k:
yield ispin, True, k, e[0]
for E in e[1:]:
yield ispin, False, k, E
else:
for k in self._k:
yield True, k, e[0]
for E in e[1:]:
yield False, k, E
# We will automatically close once we hit the end
self._close_gf()
def _type(name, obj, dic=None):
if dic is None:
dic = {}
# Always pass the docstring
if not '__doc__' in dic:
try:
dic['__doc__'] = obj.__doc__.replace(obj.__name__, name)
except:
pass
return type(name, (obj, ), dic)
# Faster than class ... \ pass
tsgfSileSiesta = _type("tsgfSileSiesta", _gfSileSiesta)
gridSileSiesta = _type("gridSileSiesta", _gridSileSiesta, {'grid_unit': 1.})
if found_module:
add_sile('TSHS', tshsSileSiesta)
add_sile('onlyS', onlysSileSiesta)
add_sile('TSDE', tsdeSileSiesta)
add_sile('DM', dmSileSiesta)
add_sile('HSX', hsxSileSiesta)
add_sile('TSGF', tsgfSileSiesta)
add_sile('WFSX', wfsxSileSiesta)
# These have unit-conversions
add_sile('RHO', _type("rhoSileSiesta", _gridSileSiesta, {'grid_unit': 1./_Bohr2Ang ** 3}))
add_sile('LDOS', _type("ldosSileSiesta", _gridSileSiesta, {'grid_unit': 1./_Bohr2Ang ** 3}))
add_sile('RHOINIT', _type("rhoinitSileSiesta", _gridSileSiesta, {'grid_unit': 1./_Bohr2Ang ** 3}))
add_sile('RHOXC', _type("rhoxcSileSiesta", _gridSileSiesta, {'grid_unit': 1./_Bohr2Ang ** 3}))
add_sile('DRHO', _type("drhoSileSiesta", _gridSileSiesta, {'grid_unit': 1./_Bohr2Ang ** 3}))
add_sile('BADER', _type("baderSileSiesta", _gridSileSiesta, {'grid_unit': 1./_Bohr2Ang ** 3}))
add_sile('IOCH', _type("iorhoSileSiesta", _gridSileSiesta, {'grid_unit': 1./_Bohr2Ang ** 3}))
add_sile('TOCH', _type("totalrhoSileSiesta", _gridSileSiesta, {'grid_unit': 1./_Bohr2Ang ** 3}))
# The following two files *require* that
# STM.DensityUnits Ele/bohr**3
# which I can't check!
# They are however the default
add_sile('STS', _type("stsSileSiesta", _gridSileSiesta, {'grid_unit': 1./_Bohr2Ang ** 3}))
add_sile('STM.LDOS', _type("stmldosSileSiesta", _gridSileSiesta, {'grid_unit': 1./_Bohr2Ang ** 3}))
add_sile('VH', _type("hartreeSileSiesta", _gridSileSiesta, {'grid_unit': _Ry2eV}))
add_sile('VNA', _type("neutralatomhartreeSileSiesta", _gridSileSiesta, {'grid_unit': _Ry2eV}))
add_sile('VT', _type("totalhartreeSileSiesta", _gridSileSiesta, {'grid_unit': _Ry2eV}))
| lgpl-3.0 | 2,117,025,637,927,004,700 | 38.515134 | 164 | 0.537319 | false | 3.526781 | false | false | false |
yangchengjian/otp | lib/asn1/test/asn1_SUITE_data/XSeq.py | 97 | 1093 | XSeq DEFINITIONS ::=
BEGIN
-- F.2.10.2
-- Use a sequence type to model a collection of variables whose
-- types are the same,
-- whose number is known and modest, and whose order is significant,
-- provided that the
-- makeup of the collection is unlikely to change from one version
-- of the protocol to the next.
-- EXAMPLE
NamesOfOfficers ::= SEQUENCE {
president VisibleString,
vicePresident VisibleString,
secretary VisibleString}
acmeCorp NamesOfOfficers ::= {
president "Jane Doe",
vicePresident "John Doe",
secretary "Joe Doe"}
-- F.2.10.3
-- Use a sequence type to model a collection of variables whose types differ,
-- whose number is known and modest, and whose order is significant,
-- provided that
-- the makeup of the collection is unlikely to change from one version
-- of the protocol to the next.
-- EXAMPLE
Credentials ::= SEQUENCE {
userName VisibleString,
password VisibleString,
accountNumber INTEGER}
-- Empty SEQUENCE stupid but just for test
BasicCallCategories ::= SEQUENCE
{
... -- So far, no specific categories identified
}
END
| apache-2.0 | 7,470,751,405,835,331,000 | 25.02381 | 78 | 0.73376 | false | 3.383901 | false | false | false |
jrmendozat/mtvm | Articulo/migrations/0006_auto_20150408_0949.py | 1 | 1098 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('Articulo', '0005_articulo_proveedor_proveedor'),
]
operations = [
migrations.AlterField(
model_name='articulo_clase',
name='clase',
field=models.CharField(unique=True, max_length=100),
preserve_default=True,
),
migrations.AlterField(
model_name='categoria_articulo',
name='categoria',
field=models.CharField(unique=True, max_length=100),
preserve_default=True,
),
migrations.AlterField(
model_name='tipo_costo',
name='tipo_costo',
field=models.CharField(unique=True, max_length=100),
preserve_default=True,
),
migrations.AlterField(
model_name='unidad',
name='unidad',
field=models.CharField(unique=True, max_length=100),
preserve_default=True,
),
]
| gpl-2.0 | 3,612,079,159,789,816,300 | 27.894737 | 64 | 0.562842 | false | 4.097015 | false | false | false |
bally12345/enigma2 | lib/python/Screens/SkinSelector.py | 1 | 3687 | # -*- coding: iso-8859-1 -*-
# (c) 2006 Stephan Reichholf
# This Software is Free, use it where you want, when you want for whatever you want and modify it if you want but don't remove my copyright!
from Screens.Screen import Screen
from Screens.Standby import TryQuitMainloop
from Screens.MessageBox import MessageBox
from Components.ActionMap import NumberActionMap
from Components.Pixmap import Pixmap
from Components.Sources.StaticText import StaticText
from Components.MenuList import MenuList
from Plugins.Plugin import PluginDescriptor
from Components.config import config
from Tools.Directories import resolveFilename, SCOPE_PLUGINS
from os import path, walk
from enigma import eEnv
class SkinSelector(Screen):
# for i18n:
# _("Choose your Skin")
skinlist = []
root = eEnv.resolve("${datadir}/enigma2/")
def __init__(self, session, args = None):
Screen.__init__(self, session)
Screen.setTitle(self, _("Skin Setup"))
self.skinlist = []
self.previewPath = ""
path.walk(self.root, self.find, "")
self["key_red"] = StaticText(_("Close"))
self["introduction"] = StaticText(_("Press OK to activate the selected skin."))
self["SkinList"] = MenuList(self.skinlist)
self["Preview"] = Pixmap()
self.skinlist.sort()
self["actions"] = NumberActionMap(["WizardActions", "DirectionActions", "InputActions", "EPGSelectActions"],
{
"ok": self.ok,
"back": self.close,
"red": self.close,
"up": self.up,
"down": self.down,
"left": self.left,
"right": self.right,
"info": self.info,
}, -1)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
tmp = config.skin.primary_skin.value.find('/skin.xml')
if tmp != -1:
tmp = config.skin.primary_skin.value[:tmp]
idx = 0
for skin in self.skinlist:
if skin == tmp:
break
idx += 1
if idx < len(self.skinlist):
self["SkinList"].moveToIndex(idx)
self.loadPreview()
def up(self):
self["SkinList"].up()
self.loadPreview()
def down(self):
self["SkinList"].down()
self.loadPreview()
def left(self):
self["SkinList"].pageUp()
self.loadPreview()
def right(self):
self["SkinList"].pageDown()
self.loadPreview()
def info(self):
aboutbox = self.session.open(MessageBox,_("Enigma2 Skinselector\n\nIf you experience any problems please contact\[email protected]\n\n\xA9 2006 - Stephan Reichholf"), MessageBox.TYPE_INFO)
aboutbox.setTitle(_("About..."))
def find(self, arg, dirname, names):
for x in names:
if x == "skin.xml":
if dirname <> self.root:
subdir = dirname[19:]
self.skinlist.append(subdir)
else:
subdir = "Default Skin"
self.skinlist.append(subdir)
def ok(self):
if self["SkinList"].getCurrent() == "Default Skin":
skinfile = "skin.xml"
else:
skinfile = self["SkinList"].getCurrent()+"/skin.xml"
print "Skinselector: Selected Skin: "+self.root+skinfile
config.skin.primary_skin.value = skinfile
config.skin.primary_skin.save()
restartbox = self.session.openWithCallback(self.restartGUI,MessageBox,_("GUI needs a restart to apply a new skin\nDo you want to Restart the GUI now?"), MessageBox.TYPE_YESNO)
restartbox.setTitle(_("Restart GUI now?"))
def loadPreview(self):
if self["SkinList"].getCurrent() == "Default Skin":
pngpath = self.root+"/prev.png"
else:
pngpath = self.root+self["SkinList"].getCurrent()+"/prev.png"
if not path.exists(pngpath):
pngpath = "/usr/share/enigma2/skin_default/noprev.png"
if self.previewPath != pngpath:
self.previewPath = pngpath
self["Preview"].instance.setPixmapFromFile(self.previewPath)
def restartGUI(self, answer):
if answer is True:
self.session.open(TryQuitMainloop, 3)
| gpl-2.0 | 4,673,810,711,601,793,000 | 29.221311 | 195 | 0.699213 | false | 3.140545 | true | false | false |
luosch/leetcode | python/Find Median from Data Stream.py | 1 | 1034 | import heapq
class MedianFinder:
def __init__(self):
self.small = []
self.large = []
def addNum(self, num):
num = float(num)
if len(self.small) == 0:
heapq.heappush(self.small, -num)
return
if num < -self.small[0]:
heapq.heappush(self.small, -num)
else:
heapq.heappush(self.large, num)
if len(self.small) - len(self.large) == 2:
heapq.heappush(self.large, -heapq.heappop(self.small))
elif len(self.large) - len(self.small) == 2:
heapq.heappush(self.small, -heapq.heappop(self.large))
def findMedian(self):
if len(self.small) == len(self.large):
return (-self.small[0] + self.large[0]) / 2.0
else:
return -self.small[0] if len(self.small) > len(self.large) else self.large[0]
# Your MedianFinder object will be instantiated and called as such:
# mf = MedianFinder()
# mf.addNum(1)
# mf.findMedian()
| mit | 7,690,119,258,774,977,000 | 28.542857 | 89 | 0.54352 | false | 3.241379 | false | false | false |
rsnakamura/oldape | apetools/connections/adbconnection.py | 1 | 9298 |
# python standard library
import re
from StringIO import StringIO
# apetools Libraries
from localconnection import LocalNixConnection
from localconnection import OutputError
from localconnection import EOF
from apetools.commons import errors
from apetools.commons import readoutput
from apetools.commons import enumerations
from sshconnection import SSHConnection
ConnectionError = errors.ConnectionError
CommandError = errors.CommandError
ConnectionWarning = errors.ConnectionWarning
ValidatingOutput = readoutput.ValidatingOutput
OperatingSystem = enumerations.OperatingSystem
# Error messages
DEVICE_NOT_FOUND = "error: device not found"
NOT_CONNECTED = "No Android Device Detected by ADB (USB) Connection"
DEVICE_NOT_ROOTED = "adbd cannot run as root in production builds"
NOT_ROOTED = "This Android device isn't rootable."
NOT_FOUND = "device not found"
#regular expressions
ALPHA = r'\w'
ONE_OR_MORE = "+"
ZERO_OR_MORE = "*"
SPACE = r"\s"
SPACES = SPACE + ONE_OR_MORE
NAMED = "(?P<{n}>{p})"
COMMAND_GROUP = "command"
ANYTHING = r'.'
EVERYTHING = ANYTHING + ZERO_OR_MORE
class ADBConnectionError(ConnectionError):
"""
Raise if there is a problem with the ADB Connection
"""
# end class ADBConnectionError
class ADBCommandError(CommandError):
"""
Raise if there is a problem with an ADB command
"""
# end class ADBCommandError
class ADBConnectionWarning(ConnectionWarning):
"""
A warning to raise if something non-fatal but bad happens
"""
# end class ADBConnectionWarning
class ADBConnection(LocalNixConnection):
"""
An ADB Connection sends commands to the Android Debug Bridge
"""
def __init__(self, serial_number=None,*args, **kwargs):
"""
:param:
- `serial_number`: An optional serial number to specify the device.
"""
super(ADBConnection, self).__init__(*args, **kwargs)
self._logger = None
self.command_prefix = "adb"
if serial_number is not None:
self.command_prefix += " -s " + serial_number
self._operating_system = None
return
@property
def operating_system(self):
"""
:return: enumeration for android
"""
if self._operating_system is None:
self._operating_system = OperatingSystem.android
return self._operating_system
def _rpc(self, command, arguments='', timeout=None):
"""
Overrides the LocalConnection._rpc to check for errors
"""
output = self._main(command, arguments, timeout)
return OutputError(ValidatingOutput(lines=output.output, validate=self.check_errors), StringIO(EOF))
def check_errors(self, line):
"""
This is here so that children can override it.
:param:
- `output`: OutputError tuple
"""
self.check_base_errors(line)
return
def check_base_errors(self, line):
"""
:param:
- `line`: A string of output
:raise: ADBConnectionError if connection fails
:raise: ADBConnectionWarning if the Android isn't running as root
"""
if DEVICE_NOT_FOUND in line:
self.logger.debug(line)
raise ConnectionError("The Android wasn't found: {0}".format(line))
elif DEVICE_NOT_ROOTED in line:
self.logger.debug(line)
raise ConnectionWarning("The Android isn't root: {0}".format(line))
return
def __str__(self):
if self.serial_number is not None:
return "ADBLocal: {0}".format(self.serial_number)
return "ADBLocal"
# end class ADBConnection
class ADBBlockingConnection(ADBConnection):
"""
Like the ADBConnection but waits for a device to come online
"""
def __init__(self, *args, **kwargs):
super(ADBBlockingConnection, self).__init__(*args, **kwargs)
self.command_prefix += " wait-for-device"
return
# end class ADBConnection
class ADBShellConnection(ADBConnection):
"""
An ADBShellConnection connects to the adb shell.
If you use a timeout parameter on method calls, the output acts line-buffered.
If you leave the timeout as None, it acts file-buffered
"""
def __init__(self, *args, **kwargs):
super(ADBShellConnection, self).__init__(*args, **kwargs)
self.command_prefix += " shell"
self._unknown_command = None
self._logger = None
return
@property
def unknown_command(self):
"""
A regular expression to match unknown command errors.
Uses:
:rtype: SRE_Pattern
:return: regex to match unknown_command error.
"""
if self._unknown_command is None:
self._unknown_command = re.compile(SPACES.join([NAMED.format(n=COMMAND_GROUP, p=ALPHA + ONE_OR_MORE) + "/sh:",
EVERYTHING, 'not', 'found']))
return self._unknown_command
def _procedure_call(self, command, arguments='', path='', timeout=None):
output = self._main(command, arguments, path, timeout)
return OutputError(ValidatingOutput(lines=output.output, validate=self.check_errors),
output.error)
def check_errors(self, line):
"""
Checks the line to see if the line has an unknow command error
"""
self.check_base_errors(line)
if self.unknown_command.search(line):
raise ConnectionError("Unknown ADB Shell Command: {0}".format(line))
return
# end class ADBShellConnection
class ADBShellBlockingConnection(ADBShellConnection):
def __init__(self, *args, **kwargs):
super(ADBShellBlockingConnection, self).__init__(*args, **kwargs)
self.command_prefix = "adb wait-for-device shell"
self._unknown_command = None
return
class ADBSSHConnection(SSHConnection):
"""
An ADB Connection sends commands to the Android Debug Bridge
"""
def __init__(self, serial_number=None,*args, **kwargs):
"""
:param:
- `serial_number`: An optional serial number to specify the device.
"""
super(ADBSSHConnection, self).__init__(*args, **kwargs)
self._logger = None
self.command_prefix = "adb"
if serial_number is not None:
self.command_prefix += " -s " + serial_number
self.operating_system = OperatingSystem.android
return
def _procedure_call(self, command, arguments="",
timeout=10):
"""
Overrides the SSHConnection._procedure_call to check for errors
"""
command = self.add_path(command)
output = self._main(command, arguments, timeout)
return OutputError(ValidatingOutput(lines=output.output, validate=self.check_errors), output.error)
def check_errors(self, line):
"""
This is here so that children can override it.
:param:
- `line`: a line of output
"""
self._check_errors(line)
return
def _check_errors(self, line):
"""
Checks connection-related errors
:raise: ADBConnectionError if the device isn't detected
:raise: ADBConnectionWarning if the device isn't rooted
"""
if DEVICE_NOT_FOUND in line:
self.logger.error(line)
raise ADBConnectionError("Android Not Detected: {0}".format(line))
elif DEVICE_NOT_ROOTED in line:
self.logger.warning(line)
raise ADBConnectionWarning("Anroid Not Rooted: {0}".format(line))
return
# end class ADBSSHConnection
class ADBShellSSHConnection(ADBSSHConnection):
"""
A class to talk to the shell, note the adb-server
"""
def __init__(self, *args, **kwargs):
"""
:param: (see the ADBSSHConnection)
"""
super(ADBShellSSHConnection, self).__init__(*args, **kwargs)
self.command_prefix += " shell "
self._unknown_command = None
return
@property
def unknown_command(self):
"""
A regular expression to match unknown command errors.
Uses:
'\w+/sh: *.* *not *found'
:rtype: SRE_Pattern
:return: regex to match unknown_command error.
"""
if self._unknown_command is None:
self._unknown_command = re.compile(SPACES.join([NAMED.format(n=COMMAND_GROUP, p=ALPHA + ONE_OR_MORE) + "/sh:",
EVERYTHING, 'not', 'found']))
return self._unknown_command
def check_errors(self, line):
"""
:line: line of standard output
:raise: ADBCommandError if the command issued wasn't recognized
"""
self._check_errors(line)
if self.unknown_command.search(line):
raise ADBCommandError(line)
return
# end class ADBSHellSSHConnection
if __name__ == "__main__":
from apetools.main import watcher
import sys
watcher()
adb = ADBShellSSHConnection(hostname="lancet", username="allion")
output, error= adb.iw('wlan0 link', timeout=1)
for line in output:
sys.stdout.write(line)
| apache-2.0 | 620,523,441,008,638,100 | 29.485246 | 122 | 0.617445 | false | 4.160179 | false | false | false |
JamesLinus/OMMPS | gui.py | 1 | 1318 | import tkinter as tk
= "hello world\n(click me)"
self.hi.there["command"] = self.say_hi
self.hi_thereclass Application(tk.Frame):
def __init__ (self, master-none);
super().__init__(master)
self.pack()
self.create_widgets()
def create_widgets(self);
self.hi_there = tk.Button(self)
self.hi_there["text"] = "hello world\n(click me)"
self.hi_there["command"] = self.say_hi
self.hi_there.pack(side="top")
self.quit = tk.Button(self, text="QUIT", fg="red",
command=root.destroy)
self.quit.pack(side="bottom")
def say_hi(self):
print("hi there, evertone !")
root = tk.Tk()
app = Application(master=root)
app.mainloop()
"""docstring for Application"tk.Framef
def __init__ (self, master-none);
super().__init__(master)
self.pack()
self.create_widgets()
def create_widgets(self);
self.hi_there = tk.Button(self)
self.hi.there["text"]__ini = "hello world\n(click me)"
self.hi.there["command"] = self.say_hi
self.hi_theret__(self, arg):
super(Application,tk.Frame._
def __init__ (self, master-none);
super().__init__(master)
self.pack()
self.create_widgets()
def create_widgets(self);
self.hi_there = tk.Button(self)
self.hi.there["text"]_init__()
self.arg = arg
| lgpl-3.0 | 8,073,765,696,358,767,000 | 24.346154 | 59 | 0.609256 | false | 3.072261 | false | false | false |
EmreAtes/spack | var/spack/repos/builtin/packages/hic-pro/package.py | 1 | 3434 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class HicPro(MakefilePackage):
"""HiC-Pro is a package designed to process Hi-C data,
from raw fastq files (paired-end Illumina data)
to the normalized contact maps"""
homepage = "https://github.com/nservant/HiC-Pro"
url = "https://github.com/nservant/HiC-Pro/archive/v2.10.0.tar.gz"
version('2.10.0', '6ae2213dcc984b722d1a1f65fcbb21a2')
depends_on('bowtie2')
depends_on('samtools')
depends_on('[email protected]:2.8')
depends_on('r')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
depends_on('py-pysam', type=('build', 'run'))
depends_on('py-bx-python', type=('build', 'run'))
depends_on('r-rcolorbrewer', type=('build', 'run'))
depends_on('r-ggplot2', type=('build', 'run'))
def edit(self, spec, prefix):
config = FileFilter('config-install.txt')
config.filter('PREFIX =.*', 'PREFIX = {0}'.format(prefix))
config.filter('BOWTIE2 PATH =.*',
'BOWTIE2_PATH = {0}'.format(spec['bowtie2'].prefix))
config.filter('SAMTOOLS_PATH =.*',
'SAMTOOLS_PATH = {0}'.format(spec['samtools'].prefix))
config.filter('R_PATH =.*',
'R_RPTH ={0}'.format(spec['r'].prefix))
config.filter('PYTHON_PATH =.*',
'PYTHON_RPTH ={0}'.format(spec['python'].prefix))
def build(self, spec, preifx):
make('-f', './scripts/install/Makefile',
'CONFIG_SYS=./config-install.txt')
make('mapbuilder')
make('readstrimming')
make('iced')
def install(sefl, spec, prefix):
# Patch INSTALLPATH in config-system.txt
config = FileFilter('config-system.txt')
config.filter('/HiC-Pro_2.10.0', '')
# Install
install('config-hicpro.txt', prefix)
install('config-install.txt', prefix)
install('config-system.txt', prefix)
install_tree('bin', prefix.bin)
install_tree('annotation', prefix.annotation)
install_tree('doc', prefix.doc)
install_tree('scripts', prefix.scripts)
install_tree('test-op', join_path(prefix, 'test-op'))
| lgpl-2.1 | -537,054,734,618,021,440 | 41.925 | 78 | 0.615026 | false | 3.668803 | true | false | false |
GrahamDumpleton/ispyd | ispyd/plugins/profiler.py | 1 | 3695 | import atexit
import Queue
import StringIO
import sys
import threading
import time
import traceback
_profiler = None
class Profiler(threading.Thread):
def __init__(self, duration, interval, filename):
super(Profiler, self).__init__()
self._duration = duration
self._interval = interval
self._filename = filename
self._queue = Queue.Queue()
self._nodes = {}
self._links = {}
def run(self):
start = time.time()
while time.time() < start+self._duration:
try:
self._queue.get(timeout=self._interval)
break
except:
pass
stacks = sys._current_frames().values()
for stack in stacks:
self.process_stack(stack)
print >> open(self._filename, 'w'), repr((self._nodes, self._links))
#print >> open(self._filename, 'w'), repr(self._records)
global _profiler
_profiler = None
def abort(self):
self._queue.put(True)
self.join()
def process_stack(self, stack):
output = StringIO.StringIO()
parent = None
for filename, lineno, name, line in traceback.extract_stack(stack):
node = (filename, name)
node_record = self._nodes.get(node)
if node_record is None:
node_record = { 'count': 1 }
self._nodes[node] = node_record
else:
node_record['count'] += 1
if parent:
link = (parent, node)
link_record = self._links.get(link)
if link_record is None:
link_record = { 'count': 1 }
self._links[link] = link_record
else:
link_record['count'] += 1
parent = node
"""
children = None
for filename, lineno, name, line in traceback.extract_stack(stack):
#key = (filename, lineno, name)
key = (filename, name)
if children is None:
record = self._records.get(key)
if record is None:
record = { 'count': 1, 'children': {} }
self._records[key] = record
else:
record['count'] += 1
children = record['children']
elif key in children:
record = children[key]
record['count'] += 1
children = record['children']
else:
record = { 'count': 1, 'children': {} }
children[key] = record
children = record['children']
"""
def _abort():
if _profiler:
_profiler.abort()
atexit.register(_abort)
class ProfilerShell(object):
name = 'profiler'
def activate(self, config_object):
self.__config_object = config_object
enabled = False
if self.__config_object.has_option('profiler', 'enabled'):
value = self.__config_object.get('profiler', 'enabled')
enabled = value.lower() in ('1', 'on', 'yes', 'true')
if not enabled:
print >> self.stdout, 'Sorry, the profiler plugin is disabled.'
return True
def do_start(self, line):
global _profiler
if _profiler is None:
_profiler = Profiler(10.0*60.0, 0.105, '/tmp/profile.dat')
#_profiler = Profiler(20.0, 1.0, '/tmp/profile.dat')
_profiler.start()
def do_abort(self, line):
global _profiler
if _profiler is None:
_profiler.abort()
| apache-2.0 | -805,647,387,273,683,300 | 24.839161 | 76 | 0.498512 | false | 4.457177 | false | false | false |
PaloAltoNetworks/minemeld-core | tests/test_ft_taxii.py | 1 | 21780 | # -*- coding: utf-8 -*-
# Copyright 2016 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FT TAXII tests
Unit tests for minemeld.ft.taxii
"""
import gevent.monkey
gevent.monkey.patch_all(thread=False, select=False)
import unittest
import mock
import redis
import gevent
import greenlet
import time
import xmltodict
import os
import libtaxii.constants
import re
import lz4
import json
import minemeld.ft.taxii
import minemeld.ft
FTNAME = 'testft-%d' % int(time.time())
MYDIR = os.path.dirname(__file__)
class MockTaxiiContentBlock(object):
def __init__(self, stix_xml):
class _Binding(object):
def __init__(self, id_):
self.binding_id = id_
self.content = stix_xml
self.content_binding = _Binding(libtaxii.constants.CB_STIX_XML_111)
class MineMeldFTTaxiiTests(unittest.TestCase):
@mock.patch.object(gevent, 'Greenlet')
def test_taxiiclient_parse(self, glet_mock):
config = {
'side_config': 'dummy.yml',
'ca_file': 'dummy.crt'
}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.TaxiiClient(FTNAME, chassis, config)
inputs = []
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
testfiles = os.listdir(MYDIR)
testfiles = filter(
lambda x: x.startswith('test_ft_taxii_stix_package_'),
testfiles
)
for t in testfiles:
with open(os.path.join(MYDIR, t), 'r') as f:
sxml = f.read()
mo = re.match('test_ft_taxii_stix_package_([A-Za-z0-9]+)_([0-9]+)_.*', t)
self.assertNotEqual(mo, None)
type_ = mo.group(1)
num_indicators = int(mo.group(2))
stix_objects = {
'observables': {},
'indicators': {},
'ttps': {}
}
content_blocks = [
MockTaxiiContentBlock(sxml)
]
b._handle_content_blocks(
content_blocks,
stix_objects
)
params = {
'ttps': stix_objects['ttps'],
'observables': stix_objects['observables']
}
indicators = [[iid, iv, params] for iid, iv in stix_objects['indicators'].iteritems()]
for i in indicators:
result = b._process_item(i)
self.assertEqual(len(result), num_indicators)
if type_ != 'any':
for r in result:
self.assertEqual(r[1]['type'], type_)
b.stop()
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_init(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
self.assertEqual(b.name, FTNAME)
self.assertEqual(b.chassis, chassis)
self.assertEqual(b.config, config)
self.assertItemsEqual(b.inputs, [])
self.assertEqual(b.output, None)
self.assertEqual(b.redis_skey, FTNAME)
self.assertEqual(b.redis_skey_chkp, FTNAME+'.chkp')
self.assertEqual(b.redis_skey_value, FTNAME+'.value')
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_update_ip(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
inputs = ['a']
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
# __init__ + get chkp + delete chkp
self.assertEqual(len(SR_mock.mock_calls), 6)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = 1
# unicast
b.filtered_update(
'a',
indicator='1.1.1.1',
value={
'type': 'IPv4',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.uncompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['address_value'], '1.1.1.1')
self.assertEqual(cyboxprops['xsi:type'], 'AddressObjectType')
SR_mock.reset_mock()
# CIDR
b.filtered_update(
'a',
indicator='1.1.1.0/24',
value={
'type': 'IPv4',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.uncompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['address_value'], '1.1.1.0/24')
self.assertEqual(cyboxprops['xsi:type'], 'AddressObjectType')
SR_mock.reset_mock()
# fake range
b.filtered_update(
'a',
indicator='1.1.1.1-1.1.1.1',
value={
'type': 'IPv4',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.uncompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['address_value'], '1.1.1.1')
self.assertEqual(cyboxprops['xsi:type'], 'AddressObjectType')
SR_mock.reset_mock()
# fake range 2
b.filtered_update(
'a',
indicator='1.1.1.0-1.1.1.31',
value={
'type': 'IPv4',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.uncompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['address_value'], '1.1.1.0/27')
self.assertEqual(cyboxprops['xsi:type'], 'AddressObjectType')
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = 1
# real range
b.filtered_update(
'a',
indicator='1.1.1.0-1.1.1.33',
value={
'type': 'IPv4',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.uncompress(args[2][3:]))
indicator = stixdict['indicators']
cyboxprops = indicator[0]['observable']['object']['properties']
self.assertEqual(cyboxprops['address_value'], '1.1.1.0/27')
self.assertEqual(cyboxprops['xsi:type'], 'AddressObjectType')
cyboxprops = indicator[1]['observable']['object']['properties']
self.assertEqual(cyboxprops['address_value'], '1.1.1.32/31')
self.assertEqual(cyboxprops['xsi:type'], 'AddressObjectType')
SR_mock.reset_mock()
b.stop()
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_update_domain(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
inputs = ['a']
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
# __init__ + get chkp + delete chkp
self.assertEqual(len(SR_mock.mock_calls), 6)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = 1
# unicast
b.filtered_update(
'a',
indicator='example.com',
value={
'type': 'domain',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['value'], 'example.com')
self.assertEqual(cyboxprops['type'], 'FQDN')
SR_mock.reset_mock()
b.stop()
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_update_url(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
inputs = ['a']
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
# __init__ + get chkp + delete chkp
self.assertEqual(len(SR_mock.mock_calls), 6)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = 1
# unicast
b.filtered_update(
'a',
indicator='www.example.com/admin.php',
value={
'type': 'URL',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['type'], 'URL')
self.assertEqual(cyboxprops['value'], 'www.example.com/admin.php')
SR_mock.reset_mock()
b.stop()
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_unicode_url(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
inputs = ['a']
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
# __init__ + get chkp + delete chkp
self.assertEqual(len(SR_mock.mock_calls), 6)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = 1
# unicast
b.filtered_update(
'a',
indicator=u'☃.net/påth',
value={
'type': 'URL',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['type'], 'URL')
self.assertEqual(cyboxprops['value'], u'\u2603.net/p\xe5th')
SR_mock.reset_mock()
b.stop()
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_overflow(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
inputs = ['a']
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
# __init__ + get chkp + delete chkp
self.assertEqual(len(SR_mock.mock_calls), 6)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = b.max_entries
# unicast
b.filtered_update(
'a',
indicator=u'☃.net/påth',
value={
'type': 'URL',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
self.fail(msg='hset found')
self.assertEqual(b.statistics['drop.overflow'], 1)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = b.max_entries - 1
# unicast
b.filtered_update(
'a',
indicator=u'☃.net/påth',
value={
'type': 'URL',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['type'], 'URL')
self.assertEqual(cyboxprops['value'], u'\u2603.net/p\xe5th')
b.stop()
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_update_hash(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
inputs = ['a']
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
# __init__ + get chkp + delete chkp
self.assertEqual(len(SR_mock.mock_calls), 6)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = 1
# sha1
b.filtered_update(
'a',
indicator='a6a5418b4d67d9f3a33cbf184b25ac7f9fa87d33',
value={
'type': 'sha1',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['hashes'][0]['simple_hash_value'], 'a6a5418b4d67d9f3a33cbf184b25ac7f9fa87d33')
self.assertEqual(cyboxprops['hashes'][0]['type']['value'], 'SHA1')
SR_mock.reset_mock()
# md5
b.filtered_update(
'a',
indicator='e23fadd6ceef8c618fc1c65191d846fa',
value={
'type': 'md5',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['hashes'][0]['simple_hash_value'], 'e23fadd6ceef8c618fc1c65191d846fa')
self.assertEqual(cyboxprops['hashes'][0]['type']['value'], 'MD5')
SR_mock.reset_mock()
# sha256
b.filtered_update(
'a',
indicator='a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9',
value={
'type': 'sha256',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['hashes'][0]['simple_hash_value'], 'a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9')
self.assertEqual(cyboxprops['hashes'][0]['type']['value'], 'SHA256')
SR_mock.reset_mock()
b.stop()
| apache-2.0 | -2,549,572,284,268,494,000 | 31.301187 | 138 | 0.540076 | false | 3.660837 | true | false | false |
gnarph/DIRT | DIRT.py | 1 | 7104 | #!/usr/bin/env python
"""
Main entrance point for DIRT
"""
import argparse
import os
import itertools
import importlib
from multiprocessing import Pool
import time
from models.document import Document
import preprocessing.preprocessor as preprocessor
import processing.processor as processor
from utilities import path
from utilities import logger
STANDARDIZER_PATH = 'preprocessing.language_standardizer.{}'
COMPARATOR_PATH = 'processing.comparators.{}'
class UnsupportedFunctionException(BaseException):
"""
Exception for functions that are not supported
"""
# TODO: is this actually used?
pass
def iter_files_in_file(filename):
"""
Generator over the file names contained in filename
Expects each file to be on its own line
"""
with open(filename) as f:
contents = f.read()
lines = contents.split('\n')
for line in lines:
if line and path.should_use_file(line):
yield line
def preprocess(args):
"""
Run processing step
"""
standardizer_path = STANDARDIZER_PATH.format(args.language)
standardizer = importlib.import_module(standardizer_path)
if os.path.isdir(args.input):
it = path.iter_files_in(args.input)
else:
it = iter_files_in_file(args.input)
for file_name in it:
pre = preprocessor.Preprocessor(file_name=file_name,
standardizer=standardizer,
input_dir=args.input,
output_dir=args.preprocessed_dir)
pre.process()
def process_parallel_worker(a, output_dir, gap_length, match_length, b, comparator):
"""
Worker for processing two files at a time in parallel
"""
comparator_path = COMPARATOR_PATH.format(comparator)
comparator = importlib.import_module(comparator_path)
pro = processor.Processor(output_dir=output_dir,
comparator=comparator,
gap_length=gap_length,
match_length=match_length,
percentage_match_length=None)
alpha = Document.from_json(a)
beta = Document.from_json(b)
pro.process(alpha_document=alpha, beta_document=beta)
def process_parallel(args, alpha_files, beta_files):
"""
Process on multiple threads/processes
"""
p = Pool()
compared = []
for a, b in itertools.product(alpha_files, beta_files):
this_set = sorted([a, b])
if a != b and this_set not in compared:
p.apply_async(process_parallel_worker, (a,
args.output_dir,
args.gap_length,
args.match_length,
b,
args.comparator))
compared.append(this_set)
p.close()
p.join()
return len(compared)
def process_serial(args, alpha_files, beta_files):
"""
Process on a single thread
"""
comparator_path = COMPARATOR_PATH.format(args.comparator)
comparator = importlib.import_module(comparator_path)
pro = processor.Processor(output_dir=args.output_dir,
comparator=comparator,
gap_length=args.gap_length,
match_length=args.match_length,
percentage_match_length=None)
compared = []
for a, b in itertools.product(alpha_files, beta_files):
this_set = sorted([a, b])
if a != b and this_set not in compared:
alpha = Document.from_json(a)
beta = Document.from_json(b)
pro.process(alpha_document=alpha, beta_document=beta)
compared.append(this_set)
return len(compared)
def process(args):
"""
Run processing step
"""
start = time.time()
alpha_files = path.iter_files_in(args.preprocessed_dir)
beta_files = path.iter_files_in(args.preprocessed_dir)
if args.parallel:
cnt = process_parallel(args, alpha_files, beta_files)
else:
cnt = process_serial(args, alpha_files, beta_files)
duration = time.time() - start
if duration == 0:
duration = 1
comparisons_per_sec = cnt/duration
logger.info('Processed {} files per second'.format(comparisons_per_sec))
def main(parsed_args):
if parsed_args.verbose:
logger.show_info()
preprocess(parsed_args)
process(parsed_args)
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='DIRT.py',
description='Find reused passages in a corpus of unicode text')
# TODO: avoid reprocessing
parser.add_argument('-i', '--input',
help='Directory containing input corpus',
# required=True,
type=str)
parser.add_argument('-pre', '--preprocessed_dir',
default='dirt_preprocessed',
help='Directory containing preprocessed corpus',
type=str)
parser.add_argument('-o', '--output_dir',
default='dirt_output',
help='Directory for output files',
type=str)
parser.add_argument('-l', '--language',
default='eng',
help='ISO 639-2 language code',
type=str)
parser.add_argument('-c', '--comparator',
default='simple',
help='comparator for processor',
type=str)
parser.add_argument('-gl', '--gap_length',
default=3,
help='Size of gaps between matches to be jumped',
type=int)
parser.add_argument('-ml', '--match_length',
default=10,
help='Minimum length of a match',
type=int)
parser.add_argument('-pml', '--percentage_match_length',
default=0,
help='Minimum length of match as a percentage of total'
'document length',
type=int)
parser.add_argument('-v', '--verbose',
help='Verbose',
action='count')
parser.add_argument('-gui',
help='Run Gui',
action='store_const',
const=True)
parser.add_argument('-p', '--parallel',
help='Run on multiple threads/processes',
action='store_const',
const=True)
parsed = parser.parse_args()
if parsed.input:
main(parsed)
else:
from dirtgui import main_window
if parsed.input:
main_window.main(parsed.output_dir)
else:
main_window.main(None)
| mit | -3,461,439,667,955,638,300 | 32.990431 | 100 | 0.537303 | false | 4.536398 | false | false | false |
anseljh/casebot-beepboop | bot/event_handler.py | 1 | 6650 | import json
import logging
import re
import requests
from string import Template
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# TODO: Actually do authentication with CL
# Stuff brought in from original casebot
CL_URL_TEMPLATE = Template("https://www.courtlistener.com/c/$reporter/$volume/$page/")
CL_FIND_URL_TEMPLATE = Template("https://www.courtlistener.com/api/rest/v3/search/?format=json&q=casename%3A($query)")
MINIMUM_VIABLE_CITATION_PATTERN = r"^(\d+)\s([A-Za-z0-9.\s]+)\s(\d+)$"
MINIMUM_VIABLE_CITATION_PATTERN_RE = re.compile(MINIMUM_VIABLE_CITATION_PATTERN)
FIND_PATTERN = r"find\s+(.+)$"
FIND_RE = re.compile(FIND_PATTERN)
USER_AGENT="casebot https://github.com/anseljh/casebot-beepboop"
def handle_find(query):
"""
The `find` command searches CourtListener by case name.
https://github.com/anseljh/casebot/issues/3
"""
reply = None
url = CL_FIND_URL_TEMPLATE.substitute({'query': query})
request_headers = {'user-agent': USER_AGENT}
# Authenticate to CourtListener using token
# https://github.com/anseljh/casebot/issues/7
# cl_token = config.get('CourtListener').get('courtlistener_token')
# if cl_token is not None:
# request_headers['Authenticate'] = 'Token ' + cl_token
# print("Added CL Authentication Token header")
response = requests.get(url, headers=request_headers)
# Give some output on stdout
logger.debug(response)
logger.debug(response.headers)
logger.debug(response.url)
# Convert from JSON
response_data = response.json()
hits = response_data.get('count')
if hits > 0:
first = response_data.get('results')[0]
logger.debug(first)
url = "https://www.courtlistener.com" + first.get('absolute_url')
logger.debug(url)
name = first.get('caseName')
logger.debug(name)
year = first.get('dateFiled')[:4]
logger.debug(year)
citation = first.get('citation')[0]
logger.debug(citation)
court = first.get('court_citation_string')
logger.debug(court)
# msg = "CourtListener had %d hits for the query `%s`. Here's the first:\n"
# if court != 'SCOTUS':
# message.reply(msg + "%s, %s (%s %s)\n%s" % (hits, query, name, citation, court, year, url))
# else:
# message.reply(msg + "%s, %s (%s)\n%s" % (hits, query, name, citation, year, url))
if court != 'SCOTUS':
reply = "%s, %s (%s %s)\n%s" % (name, citation, court, year, url)
else:
reply = "%s, %s (%s)\n%s" % (name, citation, year, url)
else:
reply = "CourtListener had zero results for the query `%s`" % (query)
return reply
def handle_citation(message):
reply = None
re_result = MINIMUM_VIABLE_CITATION_PATTERN_RE.search(message)
if re_result:
volume, reporter, page = re_result.groups()
logger.debug("Volume: %s | Reporter: %s | Page: %s" % (volume, reporter, page))
# Look up using CourtListener /c tool
mapping = {'volume': volume, 'reporter': reporter, 'page': page}
url = CL_URL_TEMPLATE.substitute(mapping)
request_headers = {'user-agent': USER_AGENT}
response = requests.get(url, headers=request_headers)
# Give some output on stdout
logger.debug(response)
logger.debug(response.headers)
logger.debug(response.url)
# Send the message!
if response.status_code == 404:
reply = "Sorry, I can't find that citation in CourtListener."
else:
reply = response.url
else:
reply = "Bad citation."
return reply
class RtmEventHandler(object):
def __init__(self, slack_clients, msg_writer):
self.clients = slack_clients
self.msg_writer = msg_writer
def handle(self, event):
if 'type' in event:
self._handle_by_type(event['type'], event)
def _handle_by_type(self, event_type, event):
# See https://api.slack.com/rtm for a full list of events
if event_type == 'error':
# error
self.msg_writer.write_error(event['channel'], json.dumps(event))
elif event_type == 'message':
# message was sent to channel
self._handle_message(event)
elif event_type == 'channel_joined':
# you joined a channel
self.msg_writer.write_help_message(event['channel'])
elif event_type == 'group_joined':
# you joined a private group
self.msg_writer.write_help_message(event['channel'])
else:
pass
def _handle_message(self, event):
# Filter out messages from the bot itself, and from non-users (eg. webhooks)
if ('user' in event) and (not self.clients.is_message_from_me(event['user'])):
msg_txt = event['text']
if self.clients.is_bot_mention(msg_txt) or self._is_direct_message(event['channel']):
# e.g. user typed: "@pybot tell me a joke!"
if 'help' in msg_txt:
self.msg_writer.write_help_message(event['channel'])
elif re.search('hi|hey|hello|howdy', msg_txt):
self.msg_writer.write_greeting(event['channel'], event['user'])
# elif 'joke' in msg_txt:
# self.msg_writer.write_joke(event['channel'])
elif 'attachment' in msg_txt:
self.msg_writer.demo_attachment(event['channel'])
elif 'echo' in msg_txt:
self.msg_writer.send_message(event['channel'], msg_txt)
elif msg_txt.startswith('find'):
find_re_result = FIND_RE.search(msg_txt)
if find_re_result:
query = find_re_result.group(1)
find_result = handle_find(query)
if find_result:
self.msg_writer.send_message(event['channel'], find_result)
else:
logger.debug("No matches for query: %s" % (query))
else:
logger.error("Nothing for find_re_result!")
self.msg_writer.send_message(event['channel'], "Does not compute.")
else:
self.msg_writer.write_prompt(event['channel'])
def _is_direct_message(self, channel):
"""Check if channel is a direct message channel
Args:
channel (str): Channel in which a message was received
"""
return channel.startswith('D')
| mit | -5,213,683,317,538,506,000 | 37.439306 | 118 | 0.583008 | false | 3.694444 | false | false | false |
BlackHole/enigma2-obh10 | lib/python/Screens/ButtonSetup.py | 1 | 26304 | from GlobalActions import globalActionMap
from Components.ActionMap import ActionMap, HelpableActionMap
from Components.Button import Button
from Components.ChoiceList import ChoiceList, ChoiceEntryComponent
from Components.SystemInfo import SystemInfo
from Components.config import config, ConfigSubsection, ConfigText, ConfigYesNo
from Components.PluginComponent import plugins
from Components.Sources.StaticText import StaticText
from Screens.ChoiceBox import ChoiceBox
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Plugins.Plugin import PluginDescriptor
from Tools.BoundFunction import boundFunction
from ServiceReference import ServiceReference
from enigma import eServiceReference, eActionMap
from Components.Label import Label
import os
ButtonSetupKeys = [ (_("Red"), "red", "Infobar/activateRedButton"),
(_("Red long"), "red_long", ""),
(_("Green long"), "green_long", ""),
(_("Yellow long"), "yellow_long", "Infobar/subtitleSelection"),
(_("Info (EPG)"), "info", "Infobar/InfoPressed/1"),
(_("Info (EPG) Long"), "info_long", "Infobar/showEventInfoPlugins/1"),
(_("Epg/Guide"), "epg", "Infobar/EPGPressed/1"),
(_("Epg/Guide long"), "epg_long", "Infobar/showEventGuidePlugins/1"),
(_("Left"), "cross_left", ""),
(_("Right"), "cross_right", ""),
(_("Left long"), "cross_left_long", ""),
(_("Right long"), "cross_right_long", "Infobar/seekFwdVod"),
(_("Up"), "cross_up", ""),
(_("Down"), "cross_down", ""),
(_("Channel up"), "channelup", ""),
(_("Channel down"), "channeldown", ""),
(_("TV"), "showTv", ""),
(_('TV long'), 'tv_long', ""),
(_("Radio"), "radio", ""),
(_("Radio long"), "radio_long", ""),
(_("Rec"), "rec", ""),
(_("Teletext"), "text", ""),
(_("Help"), "displayHelp", ""),
(_("Help long"), "displayHelp_long", ""),
(_("Subtitle"), "subtitle", ""),
(_("Subtitle Long"), "subtitle_long", ""),
(_("Menu"), "mainMenu", ""),
(_("List/Fav/PVR"), "list", ""),
(_("List/Fav/PVR") + " " + _("long"), "list_long", ""),
(_("List/File"), "file", ""),
(_("List/File") + " " + _("long"), "file_long", ""),
(_("Back/Recall"), "back", ""),
(_("Back/Recall") + " " + _("long"), "back_long", ""),
(_("Home"), "home", ""),
(_("End"), "end", ""),
(_("Next"), "next", ""),
(_("Previous"), "previous", ""),
(_("Audio"), "audio", ""),
(_("Play"), "play", ""),
(_("Playpause"), "playpause", ""),
(_("Stop"), "stop", ""),
(_("Pause"), "pause", ""),
(_("Rewind"), "rewind", ""),
(_("Fastforward"), "fastforward", ""),
(_("Skip back"), "skip_back", ""),
(_("Skip forward"), "skip_forward", ""),
(_("activatePiP"), "activatePiP", ""),
(_("activatePiP long"), "activatePiP_long", ""),
(_("Timer"), "timer", ""),
(_("Playlist"), "playlist", ""),
(_("Timeshift"), "timeshift", ""),
(_("Search/WEB"), "search", ""),
(_("Slow"), "slow", ""),
(_("Mark/Portal/Playlist"), "mark", ""),
(_("Sleep"), "sleep", ""),
(_("Power"), "power", ""),
(_("Power long"), "power_long", ""),
(_("HDMIin"), "HDMIin", "Infobar/HDMIIn"),
(_("HDMIin") + " " + _("long"), "HDMIin_long", (SystemInfo["LcdLiveTV"] and "Infobar/ToggleLCDLiveTV") or ""),
(_("Context"), "contextMenu", "Infobar/showExtensionSelection"),
(_("F1/LAN"), "f1", "Infobar/showNetworkMounts"),
(_("F1/LAN long"), "f1_long", ""),
(_("F2"), "f2", ""),
(_("F2 long"), "f2_long", ""),
(_("F3"), "f3", ""),
(_("F3 long"), "f3_long", ""), ]
config.misc.ButtonSetup = ConfigSubsection()
config.misc.ButtonSetup.additional_keys = ConfigYesNo(default=True)
for x in ButtonSetupKeys:
exec "config.misc.ButtonSetup." + x[1] + " = ConfigText(default='" + x[2] + "')"
def getButtonSetupFunctions():
ButtonSetupFunctions = []
twinPlugins = []
twinPaths = {}
pluginlist = plugins.getPlugins(PluginDescriptor.WHERE_EVENTINFO)
pluginlist.sort(key=lambda p: p.name)
for plugin in pluginlist:
if plugin.name not in twinPlugins and plugin.path and 'selectedevent' not in plugin.__call__.func_code.co_varnames:
if plugin.path[plugin.path.rfind("Plugins"):] in twinPaths:
twinPaths[plugin.path[plugin.path.rfind("Plugins"):]] += 1
else:
twinPaths[plugin.path[plugin.path.rfind("Plugins"):]] = 1
ButtonSetupFunctions.append((plugin.name, plugin.path[plugin.path.rfind("Plugins"):] + "/" + str(twinPaths[plugin.path[plugin.path.rfind("Plugins"):]]), "EPG"))
twinPlugins.append(plugin.name)
pluginlist = plugins.getPlugins([PluginDescriptor.WHERE_PLUGINMENU, PluginDescriptor.WHERE_EXTENSIONSMENU, PluginDescriptor.WHERE_EVENTINFO])
pluginlist.sort(key=lambda p: p.name)
for plugin in pluginlist:
if plugin.name not in twinPlugins and plugin.path:
if plugin.path[plugin.path.rfind("Plugins"):] in twinPaths:
twinPaths[plugin.path[plugin.path.rfind("Plugins"):]] += 1
else:
twinPaths[plugin.path[plugin.path.rfind("Plugins"):]] = 1
ButtonSetupFunctions.append((plugin.name, plugin.path[plugin.path.rfind("Plugins"):] + "/" + str(twinPaths[plugin.path[plugin.path.rfind("Plugins"):]]), "Plugins"))
twinPlugins.append(plugin.name)
ButtonSetupFunctions.append((_("Show Grid EPG"), "Infobar/openGridEPG", "EPG"))
ButtonSetupFunctions.append((_("Main menu"), "Infobar/mainMenu", "InfoBar"))
ButtonSetupFunctions.append((_("Show help"), "Infobar/showHelp", "InfoBar"))
ButtonSetupFunctions.append((_("Show extension selection"), "Infobar/showExtensionSelection", "InfoBar"))
ButtonSetupFunctions.append((_("Zap down"), "Infobar/zapDown", "InfoBar"))
ButtonSetupFunctions.append((_("Zap up"), "Infobar/zapUp", "InfoBar"))
ButtonSetupFunctions.append((_("Show service list"), "Infobar/openServiceList", "InfoBar"))
ButtonSetupFunctions.append((_("Show service list or movies"), "Infobar/showServiceListOrMovies", "InfoBar"))
ButtonSetupFunctions.append((_("Show movies"), "Infobar/showMovies", "InfoBar"))
ButtonSetupFunctions.append((_("Restart last movie"), "Infobar/restartLastMovie", "InfoBar"))
ButtonSetupFunctions.append((_("Show favourites list"), "Infobar/openFavouritesList", "InfoBar"))
ButtonSetupFunctions.append((_("History back"), "Infobar/historyBack", "InfoBar"))
ButtonSetupFunctions.append((_("History next"), "Infobar/historyNext", "InfoBar"))
ButtonSetupFunctions.append((_("Show event info plugins"), "Infobar/showEventInfoPlugins", "EPG"))
ButtonSetupFunctions.append((_("Show event details"), "Infobar/openEventView", "EPG"))
ButtonSetupFunctions.append((_("Show Single EPG"), "Infobar/openSingleServiceEPG", "EPG"))
ButtonSetupFunctions.append((_("Show Multi EPG"), "Infobar/openMultiServiceEPG", "EPG"))
ButtonSetupFunctions.append((_("Show select audio track"), "Infobar/audioSelection", "InfoBar"))
ButtonSetupFunctions.append((_("Show subtitle selection"), "Infobar/subtitleSelection", "InfoBar"))
ButtonSetupFunctions.append((_("Toggle default subtitles"), "Infobar/toggleDefaultSubtitles", "InfoBar"))
ButtonSetupFunctions.append((_("Switch to radio mode"), "Infobar/showRadio", "InfoBar"))
ButtonSetupFunctions.append((_("Switch to TV mode"), "Infobar/showTv", "InfoBar"))
ButtonSetupFunctions.append((_("Instant record"), "Infobar/instantRecord", "InfoBar"))
ButtonSetupFunctions.append((_("Start instant recording"), "Infobar/startInstantRecording", "InfoBar"))
ButtonSetupFunctions.append((_("Activate timeshift end"), "Infobar/activateTimeshiftEnd", "InfoBar"))
ButtonSetupFunctions.append((_("Activate timeshift end and pause"), "Infobar/activateTimeshiftEndAndPause", "InfoBar"))
ButtonSetupFunctions.append((_("Start timeshift"), "Infobar/startTimeshift", "InfoBar"))
ButtonSetupFunctions.append((_("Stop timeshift"), "Infobar/stopTimeshift", "InfoBar"))
ButtonSetupFunctions.append((_("Start teletext"), "Infobar/startTeletext", "InfoBar"))
ButtonSetupFunctions.append((_("Show subservice selection"), "Infobar/subserviceSelection", "InfoBar"))
ButtonSetupFunctions.append((_("Letterbox zoom"), "Infobar/vmodeSelection", "InfoBar"))
ButtonSetupFunctions.append((_("Seekbar"), "Infobar/seekFwdVod", "InfoBar"))
if SystemInfo["PIPAvailable"]:
ButtonSetupFunctions.append((_("Show PIP"), "Infobar/showPiP", "InfoBar"))
ButtonSetupFunctions.append((_("Swap PIP"), "Infobar/swapPiP", "InfoBar"))
ButtonSetupFunctions.append((_("Move PIP"), "Infobar/movePiP", "InfoBar"))
ButtonSetupFunctions.append((_("Toggle PIP-ZAP"), "Infobar/togglePipzap", "InfoBar"))
ButtonSetupFunctions.append((_("Activate HbbTV (RedButton)"), "Infobar/activateRedButton", "InfoBar"))
if SystemInfo["HasHDMIin"]:
ButtonSetupFunctions.append((_("Toggle HDMI-In full screen"), "Infobar/HDMIInFull", "InfoBar"))
ButtonSetupFunctions.append((_("Toggle HDMI-In PiP"), "Infobar/HDMIInPiP", "InfoBar"))
if SystemInfo["LcdLiveTV"]:
ButtonSetupFunctions.append((_("Toggle LCD LiveTV"), "Infobar/ToggleLCDLiveTV", "InfoBar"))
ButtonSetupFunctions.append((_("Do nothing"), "Void", "InfoBar"))
if os.path.isdir("/usr/script"):
for x in [x for x in os.listdir("/usr/script") if x.endswith(".sh")]:
x = x[:-3]
ButtonSetupFunctions.append((_("Script") + " " + x, "Script/" + x, "Scripts"))
ButtonSetupFunctions.append((_("Button setup"), "Module/Screens.ButtonSetup/ButtonSetup", "Setup"))
ButtonSetupFunctions.append((_("Software update"), "Module/Screens.SoftwareUpdate/UpdatePlugin", "Setup"))
ButtonSetupFunctions.append((_("CI (Common Interface) Setup"), "Module/Screens.Ci/CiSelection", "Setup"))
ButtonSetupFunctions.append((_("Show stream clients"), "Module/Screens.StreamingClientsInfo/StreamingClientsInfo", "Setup"))
ButtonSetupFunctions.append((_("Manual scan"), "Module/Screens.ScanSetup/ScanSetup", "Scanning"))
ButtonSetupFunctions.append((_("Automatic scan"), "Module/Screens.ScanSetup/ScanSimple", "Scanning"))
for plugin in plugins.getPluginsForMenu("scan"):
ButtonSetupFunctions.append((plugin[0], "MenuPlugin/scan/" + plugin[2], "Scanning"))
ButtonSetupFunctions.append((_("Network Setup"), "Module/Screens.NetworkSetup/NetworkAdapterSelection", "Setup"))
ButtonSetupFunctions.append((_("Network menu"), "Infobar/showNetworkMounts", "Setup"))
ButtonSetupFunctions.append((_("Plugin browser"), "Module/Screens.PluginBrowser/PluginBrowser", "Setup"))
ButtonSetupFunctions.append((_("Channel info"), "Module/Screens.ServiceInfo/ServiceInfo", "Setup"))
ButtonSetupFunctions.append((_("Timers"), "Module/Screens.TimerEdit/TimerEditList", "Setup"))
ButtonSetupFunctions.append((_("Autotimer overview"), "Infobar/showAutoTimerList", "Setup"))
for plugin in plugins.getPluginsForMenu("system"):
if plugin[2]:
ButtonSetupFunctions.append((plugin[0], "MenuPlugin/system/" + plugin[2], "Setup"))
ButtonSetupFunctions.append((_("Power menu"), "Menu/shutdown", "Power"))
ButtonSetupFunctions.append((_("Standby"), "Module/Screens.Standby/Standby", "Power"))
ButtonSetupFunctions.append((_("Restart"), "Module/Screens.Standby/TryQuitMainloop/2", "Power"))
ButtonSetupFunctions.append((_("Restart GUI"), "Module/Screens.Standby/TryQuitMainloop/3", "Power"))
ButtonSetupFunctions.append((_("Deep standby"), "Module/Screens.Standby/TryQuitMainloop/1", "Power"))
ButtonSetupFunctions.append((_("Usage setup"), "Setup/usage", "Setup"))
ButtonSetupFunctions.append((_("User interface settings"), "Setup/userinterface", "Setup"))
ButtonSetupFunctions.append((_("Recording and playback settings"), "Setup/recording", "Setup"))
ButtonSetupFunctions.append((_("Skin setup"), "Module/Screens.SkinSelector/SkinSelector", "Setup"))
ButtonSetupFunctions.append((_("Reload skin"), "Infobar/reloadSkin", "Setup"))
ButtonSetupFunctions.append((_("Harddisk setup"), "Setup/harddisk", "Setup"))
ButtonSetupFunctions.append((_("Subtitles settings"), "Setup/subtitlesetup", "Setup"))
return ButtonSetupFunctions
class ButtonSetup(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self.setTitle(_("Button Setup"))
self['description'] = Label(_('On your remote, click on the button you want to change'))
self.session = session
self.list = []
self.ButtonSetupFunctions = getButtonSetupFunctions()
for x in ButtonSetupKeys:
self.list.append(ChoiceEntryComponent('', (_(x[0]), x[1])))
self["list"] = ChoiceList(list=self.list[:config.misc.ButtonSetup.additional_keys.value and len(ButtonSetupKeys) or 10], selection=0)
self["choosen"] = ChoiceList(list=[])
self.getFunctions()
self["actions"] = ActionMap(["OkCancelActions"],
{
"cancel": self.close,
}, -1)
self["ButtonSetupButtonActions"] = ButtonSetupActionMap(["ButtonSetupActions"], dict((x[1], self.ButtonSetupGlobal) for x in ButtonSetupKeys))
self.longkeyPressed = False
self.onLayoutFinish.append(self.__layoutFinished)
self.onExecBegin.append(self.getFunctions)
self.onShown.append(self.disableKeyMap)
self.onClose.append(self.enableKeyMap)
def __layoutFinished(self):
self["choosen"].selectionEnabled(0)
def disableKeyMap(self):
globalActionMap.setEnabled(False)
eActionMap.getInstance().unbindNativeKey("ListboxActions", 0)
eActionMap.getInstance().unbindNativeKey("ListboxActions", 1)
eActionMap.getInstance().unbindNativeKey("ListboxActions", 4)
eActionMap.getInstance().unbindNativeKey("ListboxActions", 5)
def enableKeyMap(self):
globalActionMap.setEnabled(True)
eActionMap.getInstance().bindKey("keymap.xml", "generic", 103, 5, "ListboxActions", "moveUp")
eActionMap.getInstance().bindKey("keymap.xml", "generic", 108, 5, "ListboxActions", "moveDown")
eActionMap.getInstance().bindKey("keymap.xml", "generic", 105, 5, "ListboxActions", "pageUp")
eActionMap.getInstance().bindKey("keymap.xml", "generic", 106, 5, "ListboxActions", "pageDown")
def ButtonSetupGlobal(self, key):
if self.longkeyPressed:
self.longkeyPressed = False
else:
index = 0
for x in self.list[:config.misc.ButtonSetup.additional_keys.value and len(ButtonSetupKeys) or 10]:
if key == x[0][1]:
self["list"].moveToIndex(index)
if key.endswith("_long"):
self.longkeyPressed = True
break
index += 1
self.getFunctions()
self.session.open(ButtonSetupSelect, self["list"].l.getCurrentSelection())
def getFunctions(self):
key = self["list"].l.getCurrentSelection()[0][1]
if key:
selected = []
for x in getattr(config.misc.ButtonSetup, key).value.split(','):
function = next((function for function in self.ButtonSetupFunctions if function[1] == x), None)
if function:
selected.append(ChoiceEntryComponent('', ((function[0]), function[1])))
self["choosen"].setList(selected)
class ButtonSetupSelect(Screen):
def __init__(self, session, key):
Screen.__init__(self, session)
self.skinName = "ButtonSetupSelect"
self['description'] = Label(_('Select the desired function and click on "OK" to assign it. Use "CH+/-" to toggle between the lists. Select an assigned function and click on "OK" to de-assign it. Use "Next/Previous" to change the order of the assigned functions.'))
self.session = session
self.key = key
self.setTitle(_("Button setup for") + ": " + key[0][0])
self["key_red"] = Button(_("Cancel"))
self["key_green"] = Button(_("Save"))
self.mode = "list"
self.ButtonSetupFunctions = getButtonSetupFunctions()
self.config = getattr(config.misc.ButtonSetup, key[0][1])
self.expanded = []
self.selected = []
for x in self.config.value.split(','):
function = next((function for function in self.ButtonSetupFunctions if function[1] == x), None)
if function:
self.selected.append(ChoiceEntryComponent('', ((function[0]), function[1])))
self.prevselected = self.selected[:]
self["choosen"] = ChoiceList(list=self.selected, selection=0)
self["list"] = ChoiceList(list=self.getFunctionList(), selection=0)
self["actions"] = ActionMap(["OkCancelActions", "ColorActions", "DirectionActions", "KeyboardInputActions"],
{
"ok": self.keyOk,
"cancel": self.cancel,
"red": self.cancel,
"green": self.save,
"up": self.keyUp,
"down": self.keyDown,
"left": self.keyLeft,
"right": self.keyRight,
"pageUp": self.toggleMode,
"pageDown": self.toggleMode,
"shiftUp": self.moveUp,
"shiftDown": self.moveDown,
}, -1)
self.onShown.append(self.enableKeyMap)
self.onClose.append(self.disableKeyMap)
self.onLayoutFinish.append(self.__layoutFinished)
def __layoutFinished(self):
self["choosen"].selectionEnabled(0)
def disableKeyMap(self):
globalActionMap.setEnabled(False)
eActionMap.getInstance().unbindNativeKey("ListboxActions", 0)
eActionMap.getInstance().unbindNativeKey("ListboxActions", 1)
eActionMap.getInstance().unbindNativeKey("ListboxActions", 4)
eActionMap.getInstance().unbindNativeKey("ListboxActions", 5)
def enableKeyMap(self):
globalActionMap.setEnabled(True)
eActionMap.getInstance().bindKey("keymap.xml", "generic", 103, 5, "ListboxActions", "moveUp")
eActionMap.getInstance().bindKey("keymap.xml", "generic", 108, 5, "ListboxActions", "moveDown")
eActionMap.getInstance().bindKey("keymap.xml", "generic", 105, 5, "ListboxActions", "pageUp")
eActionMap.getInstance().bindKey("keymap.xml", "generic", 106, 5, "ListboxActions", "pageDown")
def getFunctionList(self):
functionslist = []
catagories = {}
for function in self.ButtonSetupFunctions:
if function[2] not in catagories:
catagories[function[2]] = []
catagories[function[2]].append(function)
for catagorie in sorted(list(catagories)):
if catagorie in self.expanded:
functionslist.append(ChoiceEntryComponent('expanded', ((catagorie), "Expander")))
for function in catagories[catagorie]:
functionslist.append(ChoiceEntryComponent('verticalline', ((function[0]), function[1])))
else:
functionslist.append(ChoiceEntryComponent('expandable', ((catagorie), "Expander")))
return functionslist
def toggleMode(self):
if self.mode == "list" and self.selected:
self.mode = "choosen"
self["choosen"].selectionEnabled(1)
self["list"].selectionEnabled(0)
elif self.mode == "choosen":
self.mode = "list"
self["choosen"].selectionEnabled(0)
self["list"].selectionEnabled(1)
def keyOk(self):
if self.mode == "list":
currentSelected = self["list"].l.getCurrentSelection()
if currentSelected[0][1] == "Expander":
if currentSelected[0][0] in self.expanded:
self.expanded.remove(currentSelected[0][0])
else:
self.expanded.append(currentSelected[0][0])
self["list"].setList(self.getFunctionList())
else:
if currentSelected[:2] in self.selected:
self.selected.remove(currentSelected[:2])
else:
self.selected.append(currentSelected[:2])
elif self.selected:
self.selected.remove(self["choosen"].l.getCurrentSelection())
if not self.selected:
self.toggleMode()
self["choosen"].setList(self.selected)
def keyLeft(self):
self[self.mode].instance.moveSelection(self[self.mode].instance.pageUp)
def keyRight(self):
self[self.mode].instance.moveSelection(self[self.mode].instance.pageDown)
def keyUp(self):
self[self.mode].instance.moveSelection(self[self.mode].instance.moveUp)
def keyDown(self):
self[self.mode].instance.moveSelection(self[self.mode].instance.moveDown)
def moveUp(self):
self.moveChoosen(self.keyUp)
def moveDown(self):
self.moveChoosen(self.keyDown)
def moveChoosen(self, direction):
if self.mode == "choosen":
currentIndex = self["choosen"].getSelectionIndex()
swapIndex = (currentIndex + (direction == self.keyDown and 1 or -1)) % len(self["choosen"].list)
self["choosen"].list[currentIndex], self["choosen"].list[swapIndex] = self["choosen"].list[swapIndex], self["choosen"].list[currentIndex]
self["choosen"].setList(self["choosen"].list)
direction()
else:
return 0
def save(self):
configValue = []
for x in self.selected:
configValue.append(x[0][1])
self.config.value = ",".join(configValue)
self.config.save()
self.close()
def cancel(self):
if self.selected != self.prevselected:
self.session.openWithCallback(self.cancelCallback, MessageBox, _("are you sure you want to cancel all the changes"), default=False)
else:
self.close()
def cancelCallback(self, answer):
answer and self.close()
class ButtonSetupActionMap(ActionMap):
def action(self, contexts, action):
if action in tuple(x[1] for x in ButtonSetupKeys) and action in self.actions:
res = self.actions[action](action)
if res is not None:
return res
return 1
else:
return ActionMap.action(self, contexts, action)
class helpableButtonSetupActionMap(HelpableActionMap):
def action(self, contexts, action):
if action in tuple(x[1] for x in ButtonSetupKeys) and action in self.actions:
res = self.actions[action](action)
if res is not None:
return res
return 1
else:
return ActionMap.action(self, contexts, action)
class InfoBarButtonSetup():
def __init__(self):
self["ButtonSetupButtonActions"] = helpableButtonSetupActionMap(self, "ButtonSetupActions",
dict((x[1], (self.ButtonSetupGlobal, boundFunction(self.getHelpText, x[1]))) for x in ButtonSetupKeys), -10)
self.longkeyPressed = False
self.onExecEnd.append(self.clearLongkeyPressed)
def clearLongkeyPressed(self):
self.longkeyPressed = False
def getKeyFunctions(self, key):
if key in ("play", "playpause", "Stop", "stop", "pause", "rewind", "next", "previous", "fastforward", "skip_back", "skip_forward") and (self.__class__.__name__ == "MoviePlayer" or hasattr(self, "timeshiftActivated") and self.timeshiftActivated()):
return False
selection = getattr(config.misc.ButtonSetup, key).value.split(',')
selected = []
for x in selection:
if x.startswith("ZapPanic"):
selected.append(((_("Panic to") + " " + ServiceReference(eServiceReference(x.split("/", 1)[1]).toString()).getServiceName()), x))
elif x.startswith("Zap"):
selected.append(((_("Zap to") + " " + ServiceReference(eServiceReference(x.split("/", 1)[1]).toString()).getServiceName()), x))
elif x:
function = next((function for function in getButtonSetupFunctions() if function[1] == x), None)
if function:
selected.append(function)
return selected
def getHelpText(self, key):
selected = self.getKeyFunctions(key)
if not selected:
return
return pgettext("ButtonSetup help separator", '/').join(sel[0] for sel in selected)
def ButtonSetupGlobal(self, key):
if self.longkeyPressed:
self.longkeyPressed = False
else:
selected = self.getKeyFunctions(key)
if not selected:
return 0
elif len(selected) == 1:
if key.endswith("_long"):
self.longkeyPressed = True
return self.execButtonSetup(selected[0])
else:
key = tuple(x[0] for x in ButtonSetupKeys if x[1] == key)[0]
self.session.openWithCallback(self.execButtonSetup, ChoiceBox, _("ButtonSetup") + " " + key, selected)
def execButtonSetup(self, selected):
if selected:
selected = selected[1].split("/")
if selected[0] == "Plugins":
twinPlugins = []
twinPaths = {}
pluginlist = plugins.getPlugins(PluginDescriptor.WHERE_EVENTINFO)
pluginlist.sort(key=lambda p: p.name)
for plugin in pluginlist:
if plugin.name not in twinPlugins and plugin.path and 'selectedevent' not in plugin.__call__.func_code.co_varnames:
if plugin.path[plugin.path.rfind("Plugins"):] in twinPaths:
twinPaths[plugin.path[plugin.path.rfind("Plugins"):]] += 1
else:
twinPaths[plugin.path[plugin.path.rfind("Plugins"):]] = 1
if plugin.path[plugin.path.rfind("Plugins"):] + "/" + str(twinPaths[plugin.path[plugin.path.rfind("Plugins"):]]) == "/".join(selected):
self.runPlugin(plugin)
return
twinPlugins.append(plugin.name)
pluginlist = plugins.getPlugins([PluginDescriptor.WHERE_PLUGINMENU, PluginDescriptor.WHERE_EXTENSIONSMENU])
pluginlist.sort(key=lambda p: p.name)
for plugin in pluginlist:
if plugin.name not in twinPlugins and plugin.path:
if plugin.path[plugin.path.rfind("Plugins"):] in twinPaths:
twinPaths[plugin.path[plugin.path.rfind("Plugins"):]] += 1
else:
twinPaths[plugin.path[plugin.path.rfind("Plugins"):]] = 1
if plugin.path[plugin.path.rfind("Plugins"):] + "/" + str(twinPaths[plugin.path[plugin.path.rfind("Plugins"):]]) == "/".join(selected):
self.runPlugin(plugin)
return
twinPlugins.append(plugin.name)
elif selected[0] == "MenuPlugin":
for plugin in plugins.getPluginsForMenu(selected[1]):
if plugin[2] == selected[2]:
self.runPlugin(plugin[1])
return
elif selected[0] == "Infobar":
if hasattr(self, selected[1]):
exec "self." + ".".join(selected[1:]) + "()"
else:
return 0
elif selected[0] == "Module":
try:
exec "from " + selected[1] + " import *"
exec "self.session.open(" + ",".join(selected[2:]) + ")"
except Exception as e:
print "[ButtonSetup] error during executing module %s, screen %s, %s" % (selected[1], selected[2], e)
import traceback
traceback.print_exc()
elif selected[0] == "Setup":
exec "from Screens.Setup import *"
exec "self.session.open(Setup, \"" + selected[1] + "\")"
elif selected[0].startswith("Zap"):
if selected[0] == "ZapPanic":
self.servicelist.history = []
self.pipShown() and self.showPiP()
self.servicelist.servicelist.setCurrent(eServiceReference("/".join(selected[1:])))
self.servicelist.zap(enable_pipzap=True)
if hasattr(self, "lastservice"):
self.lastservice = eServiceReference("/".join(selected[1:]))
self.close()
else:
self.show()
from Screens.MovieSelection import defaultMoviePath
moviepath = defaultMoviePath()
if moviepath:
config.movielist.last_videodir.value = moviepath
elif selected[0] == "Script":
command = '/usr/script/' + selected[1] + ".sh"
from Screens.Console import Console
exec "self.session.open(Console,_(selected[1]),[command])"
elif selected[0] == "Menu":
from Screens.Menu import MainMenu, mdom
root = mdom.getroot()
for x in root.findall("menu"):
y = x.find("id")
if y is not None:
id = y.get("val")
if id and id == selected[1]:
menu_screen = self.session.open(MainMenu, x)
break
def showServiceListOrMovies(self):
if hasattr(self, "openServiceList"):
self.openServiceList()
elif hasattr(self, "showMovies"):
self.showMovies()
def ToggleLCDLiveTV(self):
config.lcd.showTv.value = not config.lcd.showTv.value
def reloadSkin(self):
self.session.reloadSkin()
| gpl-2.0 | 7,407,642,903,577,461,000 | 44.905759 | 266 | 0.693697 | false | 3.283895 | true | false | false |
dvdt/rpi-thermostat | temp_logger.py | 1 | 1064 | #!/usr/bin/python
try:
import Adafruit_DHT
except ImportError, e:
class Adafruit_DHTMOCK():
def read_retry(self):
return 25, 50
Adafruit_DHT = Adafruit_DHTMOCK()
import requests
import logging
from apscheduler.schedulers.background import BlockingScheduler
THERMOSTAT_URI = 'http://192.168.1.214:5000/api/v1/temperature/'
def main():
humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT22, '17')
if humidity is not None and temperature is not None:
requests.post(THERMOSTAT_URI, data=dict(temperature=temperature, humidity=humidity))
logger.warn('Temp={0:0.1f}*C Humidity={1:0.1f}%'.format(temperature, humidity))
else:
logger.error('Failed to get reading. Try again!')
if __name__ == '__main__':
logging.basicConfig(level=logging.WARN, format='%(levelname)s - %(asctime)s %(message)s')
logger = logging.getLogger('main')
scheduler = BlockingScheduler()
scheduler.add_job(main, 'interval', seconds=60)
logger.warn('starting scheduler')
scheduler.start()
| mit | 3,120,118,378,927,795,000 | 33.322581 | 93 | 0.68891 | false | 3.273846 | false | false | false |
SuperDARNCanada/placeholderOS | experiments/testing_archive/test_lag_table_bad.py | 2 | 2059 | #!/usr/bin/python
# write an experiment that raises an exception
import sys
import os
import itertools
BOREALISPATH = os.environ['BOREALISPATH']
sys.path.append(BOREALISPATH)
import experiments.superdarn_common_fields as scf
from experiment_prototype.experiment_prototype import ExperimentPrototype
class TestExperiment(ExperimentPrototype):
def __init__(self):
cpid = 1
super(TestExperiment, self).__init__(cpid)
if scf.IS_FORWARD_RADAR:
beams_to_use = scf.STD_16_FORWARD_BEAM_ORDER
else:
beams_to_use = scf.STD_16_REVERSE_BEAM_ORDER
if scf.opts.site_id in ["cly", "rkn", "inv"]:
num_ranges = scf.POLARDARN_NUM_RANGES
if scf.opts.site_id in ["sas", "pgr"]:
num_ranges = scf.STD_NUM_RANGES
slice_1 = { # slice_id = 0, there is only one slice.
"pulse_sequence": scf.SEQUENCE_7P,
"tau_spacing": scf.TAU_SPACING_7P,
"pulse_len": scf.PULSE_LEN_45KM,
"num_ranges": num_ranges,
"first_range": scf.STD_FIRST_RANGE,
"intt": 3500, # duration of an integration, in ms
"beam_angle": scf.STD_16_BEAM_ANGLE,
"beam_order": beams_to_use,
"scanbound": [i * 3.5 for i in range(len(beams_to_use))], #1 min scan
"txfreq" : scf.COMMON_MODE_FREQ_1, #kHz
"acf": True,
"xcf": True, # cross-correlation processing
"acfint": True, # interferometer acfs
}
lag_table = list(itertools.combinations(slice_1['pulse_sequence'], 2))
lag_table.append([slice_1['pulse_sequence'][0], slice_1[
'pulse_sequence'][0]]) # lag 0
lag_table.append([99,0]) # Should fail on this!!
# sort by lag number
lag_table = sorted(lag_table, key=lambda x: x[1] - x[0])
lag_table.append([slice_1['pulse_sequence'][-1], slice_1[
'pulse_sequence'][-1]]) # alternate lag 0
slice_1['lag_table'] = lag_table
self.add_slice(slice_1)
| gpl-3.0 | 5,829,602,201,352,557,000 | 34.5 | 81 | 0.583293 | false | 3.212168 | false | false | false |
chungjjang80/FRETBursts | fretbursts/phtools/phrates_numba.py | 2 | 5001 | """
Numba-optimized version of functions to compute KDE-based photon rates.
"""
from __future__ import division
import numpy as np
import numba
from math import exp, fabs
@numba.jit
def kde_laplace_numba(timestamps, tau, time_axis=None):
"""Computes exponential KDE for `timestamps` evaluated at `time_axis`.
"""
if time_axis is None:
time_axis = timestamps
t_size = time_axis.size
timestamps_size = timestamps.size
rates = np.zeros((t_size,), dtype=np.float64)
tau_lim = 5 * tau
ipos, ineg = 0, 0 # indexes for timestamps
for it, t in enumerate(time_axis):
while ipos < timestamps_size and timestamps[ipos] - t < tau_lim:
ipos += 1
while ineg < timestamps_size and t - timestamps[ineg] > tau_lim:
ineg += 1
for itx in range(ineg, ipos):
rates[it] += exp(-fabs(timestamps[itx] - t)/tau)
return rates
@numba.jit
def kde_gaussian_numba(timestamps, tau, time_axis=None):
"""Computes Gaussian KDE for `timestamps` evaluated at `time_axis`.
"""
if time_axis is None:
time_axis = timestamps
timestamps_size = timestamps.size
rates = np.zeros((time_axis.size,), dtype=np.float64)
tau_lim = 3 * tau # 3 tau = 99.7 % of the Gaussian
tau2 = 2 * (tau**2)
ipos, ineg = 0, 0 # indexes for timestamps
for it, t in enumerate(time_axis):
while ipos < timestamps_size and timestamps[ipos] - t < tau_lim:
ipos += 1
while ineg < timestamps_size and t - timestamps[ineg] > tau_lim:
ineg += 1
for itx in range(ineg, ipos):
rates[it] += exp(-((timestamps[itx] - t)**2)/tau2)
return rates
@numba.jit
def kde_rect_numba(timestamps, tau, time_axis=None):
"""Computes rectangular KDE for `timestamps` evaluated at `time_axis`.
"""
if time_axis is None:
time_axis = timestamps
timestamps_size = timestamps.size
rates = np.zeros((time_axis.size,), dtype=np.float64)
tau_lim = tau / 2
ipos, ineg = 0, 0 # indexes for timestamps
for it, t in enumerate(time_axis):
while ipos < timestamps_size and timestamps[ipos] - t < tau_lim:
ipos += 1
while ineg < timestamps_size and t - timestamps[ineg] > tau_lim:
ineg += 1
rates[it] = ipos - ineg
return rates
##
# "self" functions: evaluating KDE on the same position as the timestamps
#
@numba.jit
def kde_laplace_self_numba(ph, tau):
"""Computes exponential KDE for `timestamps` evaluated at `timestamps`.
"""
ph_size = ph.size
ipos, ineg = 0, 0
rates = np.zeros((ph_size,), dtype=np.float64)
nph = np.zeros((ph_size,), dtype=np.int16)
tau_lim = 5*tau
for i, t in enumerate(ph):
while ipos < ph_size and ph[ipos] - t < tau_lim:
ipos += 1
while t - ph[ineg] > tau_lim:
ineg += 1
for itx in range(ineg, ipos):
rates[i] += exp(-fabs(ph[itx]-t)/tau)
nph[i] += 1
return rates, nph
##
# Special functions
#
@numba.jit
def kde_laplace_nph(timestamps, tau, time_axis=None):
"""Computes exponential KDE for `timestamps` evaluated at `time_axis`.
Computes KDE rates of `timestamps` and number of photon used to compute
each rate. Number of photons are the one in the 10*tau range around the
current time.
The kernel used is a symmetric-exponential (i.e. laplace distribution)::
kernel = exp( -|t - t0| / tau)
The rate is computed for each time in `time_axis`.
When ``time_axis`` is None them ``timestamps`` is used also as time axis.
Arguments:
timestamps (array): arrays of photon timestamps
tau (float): time constant of the exponential kernel
time_axis (array or None): array of time points where the rate is
computed. If None, uses `timestamps` as time axis.
Returns:
2-element tuple containing
- **rates** (*array*): the unnormalized rates (just the sum of the
exponential kernels). To obtain rates in Hz divide the
array by `2*tau` (or other conventional `x*tau` duration).
- **nph** (*array*): number of photons in -5*tau..5*tau window
for each timestamp. Proportional to the rate computed
with KDE and rectangular kernel.
"""
if time_axis is None:
time_axis = timestamps
t_size = time_axis.size
timestamps_size = timestamps.size
rates = np.zeros((t_size,), dtype=np.float64)
nph = np.zeros((t_size,), dtype=np.int16)
tau_lim = 5 * tau
ipos, ineg = 0, 0 # indexes for timestamps
for it, t in enumerate(time_axis):
while ipos < timestamps_size and timestamps[ipos] - t < tau_lim:
ipos += 1
while ineg < timestamps_size and t - timestamps[ineg] > tau_lim:
ineg += 1
for itx in range(ineg, ipos):
rates[it] += exp(-fabs(timestamps[itx] - t)/tau)
nph[it] += 1
return rates, nph
| gpl-2.0 | -5,802,954,573,636,648,000 | 30.853503 | 77 | 0.610078 | false | 3.444215 | false | false | false |
DShaience/zapPrice | database/dbFiles/dbFilesOperations.py | 1 | 2672 | import warnings
import numpy as np
import pandas as pd
def readAndConvertCSVToDbFormatDf(filePath: str, nrows: int = None) -> pd.DataFrame:
"""
:param filePath: filename with product data
:param nrows: limit the number of rows to read (usually used for debug)
:return: this is an adaptor between the way the file information is saves, and how it is introduced to the database.
It renames columns, separates min/max prices for the strong, and deals with "None" strings
"""
fileColumnsToDbAdaptor = {
'productURL': 'link',
'productId': 'productId',
'productName': 'description',
}
colsToReturn = ['productId', 'link', 'description', 'min_price', 'max_price']
if nrows is None:
dfData = pd.read_csv(filePath, encoding="utf-8-sig", compression='infer')
else:
dfData = pd.read_csv(filePath, encoding="utf-8-sig", nrows=nrows, compression='infer')
dfData.drop_duplicates(inplace=True)
dfData.reset_index(drop=True, inplace=True)
dfData.rename(columns=fileColumnsToDbAdaptor, inplace=True)
# todo: add sanitize description here as well
# analyzing price array
descriptionAsList = list(dfData['description'].values)
descriptionSanitized = [str(desc).replace(r'&rlm', '').replace(';', ' ').replace(',', ' ').replace(r'&', ' ').replace('\\', '/') for desc in descriptionAsList]
dfData['description'] = descriptionSanitized
dfData['min_price'] = np.nan
dfData['max_price'] = np.nan
dfData.replace({'pricesArray': {'None': np.nan}}, inplace=True)
dfData['pricesArray'] = dfData['pricesArray'].str.replace('[', '')
dfData['pricesArray'] = dfData['pricesArray'].str.replace(']', '')
pricesListOfStrs = dfData['pricesArray'].to_list()
minPricesList = []
maxPricesList = []
for index, prices in enumerate(pricesListOfStrs):
if prices is np.nan:
minPricesList.append(np.nan)
maxPricesList.append(np.nan)
else:
pricesAsArray = [float(price.strip()) for price in prices.split(',')]
if len(pricesAsArray) == 1:
minPricesList.append(pricesAsArray[0])
maxPricesList.append(pricesAsArray[0])
elif len(pricesAsArray) == 2:
minPricesList.append(min(pricesAsArray))
maxPricesList.append(max(pricesAsArray))
else:
raise ValueError("len(prices) > 2. This shouldn't happen. Possibly a bug: %s" % prices)
dfData['min_price'] = minPricesList
dfData['max_price'] = maxPricesList
dfData = dfData.where((pd.notnull(dfData)), None)
return dfData[colsToReturn]
| gpl-3.0 | -5,428,207,015,885,456,000 | 40.107692 | 163 | 0.64521 | false | 3.650273 | false | false | false |
sunyihuan326/DeltaLab | daily_learning/transfer_learning/tt.py | 1 | 2031 | # coding:utf-8
'''
created on 2018/8/31
@author:sunyihuan
'''
import os
from tensorflow.python import pywrap_tensorflow
import tensorflow as tf
import numpy as np
# hair_length_graph = tf.Graph()
#
# # model_path = "/Users/sunyihuan/Desktop/parameters/hair_length/second/complex75.03_simple78.63/model.ckpt-23" # check4
model_path = "/Users/sunyihuan/Desktop/parameters/hair_length/second/complex77_simple70/model.ckpt-39" # check0
#
# with hair_length_graph.as_default():
# saver = tf.train.import_meta_graph("{}.meta".format(model_path))
# sess = tf.InteractiveSession(graph=hair_length_graph)
# saver.restore(sess, model_path)
# var_to_shape_map = sess.get_variable_to_shape_map()
# for key in var_to_shape_map:
# print("tensor_name: ", key)
# print(sess.get_tensor(key))
# 查看ckpt中的tensor
# from tensorflow.python.tools import inspect_checkpoint as chkp
# chkp.print_tensors_in_checkpoint_file(file_name=model_path, tensor_name='', all_tensors=False, all_tensor_names=True)
a = tf.constant(3.0, dtype=tf.float32)
b = tf.constant(4.0)
total = a + b
# print(a)
# print(b)
# print(total)
sess = tf.Session()
# print(sess.run(total))
x = tf.placeholder(tf.float32, shape=[None, 3])
linear_model = tf.layers.Dense(units=1)
print(linear_model)
y = linear_model(x)
init = tf.global_variables_initializer()
sess.run(init)
print(sess.run(y, {x: [[1, 2, 3],[4, 5, 6],[7,8,9]]}))
features = {
'sales' : [[5], [10], [8], [9]],
'department': ['sports', 'sports', 'gardening', 'gardening']}
department_column = tf.feature_column.categorical_column_with_vocabulary_list(
'department', ['sports', 'gardening'])
department_column = tf.feature_column.indicator_column(department_column)
columns = [
tf.feature_column.numeric_column('sales'),
department_column
]
inputs = tf.feature_column.input_layer(features, columns)
var_init = tf.global_variables_initializer()
table_init = tf.tables_initializer()
sess = tf.Session()
sess.run((var_init, table_init))
print(sess.run(inputs))
| mit | -9,110,520,138,812,644,000 | 28.75 | 122 | 0.701928 | false | 2.81363 | false | false | false |
mmasdeu/darmonpoints | darmonpoints/integrals.py | 1 | 9942 | ######################
## ##
## INTEGRATION ##
## ##
######################
from sage.rings.all import RealField,ComplexField,RR,QuadraticField,PolynomialRing,LaurentSeriesRing,PowerSeriesRing, Infinity,Zmod
from sage.all import prod
from sage.parallel.decorate import fork,parallel
from sage.misc.getusage import get_memory_usage
from sage.structure.sage_object import SageObject
from sage.arith.misc import algdep
from sage.misc.misc import cputime
from sage.misc.verbose import verbose
from collections import defaultdict
from itertools import product,chain,groupby,islice,tee,starmap
from operator import mul
from .util import *
from .sarithgroup import BTEdge
from .limits import num_evals,find_center
def act_on_polynomial(P,num,den,N = None):
if N is None:
N = P.degree()
R = num.parent()
ans = R(0)
numvec = [R(1)]
denvec = [R(1)]
for i in range(N):
numvec.append(num*numvec[-1])
denvec.append(den*denvec[-1])
Plist = P.list()
for i in range(N+1):
ai = Plist[i]
ans += ai*numvec[i]*denvec[N-i]
return ans
def double_integral_zero_infty(Phi,tau1,tau2):
p = Phi.parent().prime()
K = tau1.parent()
R = PolynomialRing(K,'x')
x = R.gen()
R1 = PowerSeriesRing(K,'r1')
r1 = R1.gen()
Phi_liftee = Phi._liftee
try:
R1.set_default_prec(Phi.precision_absolute())
except AttributeError:
R1.set_default_prec(Phi.precision_relative())
level = Phi._map._manin.level()
E0inf = [M2Z([0,-1,level,0])]
E0Zp = [M2Z([p,a,0,1]) for a in range(p)]
predicted_evals = num_evals(tau1,tau2)
a,b,c,d = find_center(p,level,tau1,tau2).list()
h = M2Z([a,b,c,d])
E = [h*e0 for e0 in E0Zp + E0inf]
resadd = 0
resmul = 1
total_evals = 0
percentage = QQ(0)
ii = 0
f = (x-tau2)/(x-tau1)
while len(E) > 0:
ii += 1
increment = QQ((100-percentage)/len(E))
verbose('remaining %s percent (and done %s of %s evaluations)'%(RealField(10)(100-percentage),total_evals,predicted_evals))
newE = []
for e in E:
a,b,c,d = e.list()
assert ZZ(c) % level == 0
try:
y0 = f((a*r1+b)/(c*r1+d))
val = y0(y0.parent().base_ring()(0))
if all([xx.valuation(p)>0 for xx in (y0/val - 1).list()]):
if total_evals % 100 == 0:
Phi._map._codomain.clear_cache()
pol = val.log(p_branch = 0)+((y0.derivative()/y0).integral())
V = [0] * pol.valuation() + pol.shift(-pol.valuation()).list()
try:
phimap = Phi._map(M2Z([b,d,a,c]))
except OverflowError:
print(a,b,c,d)
raise OverflowError('Matrix too large?')
# mu_e0 = ZZ(phimap.moment(0).rational_reconstruction())
mu_e0 = ZZ(Phi_liftee._map(M2Z([b,d,a,c])).moment(0))
mu_e = [mu_e0] + [phimap.moment(o).lift() for o in range(1,len(V))]
resadd += sum(starmap(mul,zip(V,mu_e)))
resmul *= val**mu_e0
percentage += increment
total_evals += 1
else:
newE.extend([e*e0 for e0 in E0Zp])
except ZeroDivisionError:
#raise RuntimeError('Probably not enough working precision...')
newE.extend([e*e0 for e0 in E0Zp])
E = newE
verbose('total evaluations = %s'%total_evals)
val = resmul.valuation()
return p**val*K.teichmuller(p**(-val)*resmul)*resadd.exp()
##----------------------------------------------------------------------------
## double_integral(tau1,tau2,r,s)
##
## Input:
## tau1,tau2: Elements of the ``standard affinoid" in H_p consisting
## of elements in PP_1(C_p) whose natural image in
## P_1(F_p-bar) does not belong to P_1(F_p).
## r,s: Elements of P_1(Q). The cusp r=a/b is
## represented in the form r=[a,b], with a and b relatively
## prime integers, and b>=0. By convention infty=[1,0].
## omega: The modular form on Gamma_0(p), represented as above.
##
## Output:
## The ``multiplicative double integral" defined in [Da].
##----------------------------------------------------------
def double_integral(Phi,tau1,tau2,r,s):
if r == [0,0] or s == [0,0]:
raise ValueError('r and s must be valid projective coordinates.')
if r[0] == 0 and s[1] == 0: # From 0 to infinity
return double_integral_zero_infty(Phi,tau1,tau2)
elif s[1] == 0:
a,b = r
if b < 0: a,b = -a,-b
if b == 0: return 1
if b == 1:
return double_integral(Phi,tau1-a/b,tau2-a/b,[0,1],[1,0])
else:
d = (1/(Zmod(b)(a))).lift()
if 2*d > b : d -= b
c = ZZ((a*d-1)/b)
rr = [c,d] if d >= 0 else [-c,-d]
i1 = double_integral(Phi,(b*tau1-a)/(d*tau1-c),(b*tau2-a)/(d*tau2-c),[0,1],[1,0])
i2 = double_integral(Phi,tau1,tau2,rr,[1,0])
return i1*i2
else:
i1 = double_integral(Phi,tau1,tau2,r,[1,0])
i2 = double_integral(Phi,tau1,tau2,s,[1,0])
return i1/i2
def log_pseries(R, x, prec = None):
r'''
Calculate efficiently log(1 - x*z), where z is the variable of R
Doing it with power series built-in log is about 10 times slower...
'''
if x.valuation() <= 0:
raise ValueError('Valuation problem')
K = R.base_ring()
if prec is None:
prec = R.default_precision()
v = [K.zero(),K(x)]
xpow = K(x)
for m in range(2, prec + 1):
xpow *= x
v.append( xpow / QQ(m) )
return -R(v)
def lift_to_locally_analytic(G, divisor, prec=None):
K = divisor.parent().base_ring()
if prec is None:
prec = K.precision_cap()
p = G.p
R = PolynomialRing(K,'r')
edgelist = [(1,o,QQ(1)/QQ(p+1)) for o in G.get_covering(1)]
while len(edgelist) > 0:
newedgelist = []
ii = 0
for parity, (rev, h), wt in edgelist:
ii += 1
a,b,c,d = [K(o) for o in G.embed(h,prec).list()]
try:
c0unit = K.one()
c0val = 0
pol = R.zero()
for P, n in divisor:
hp0 = K(a * P + b)
pol += QQ(n) * log_pseries(R, K(c * P + d) / hp0, prec)
c0unit *= (-hp0).unit_part() ** n
c0val += n * hp0.valuation()
pol += c0unit.log(0)
yield ((h, rev), pol, c0val, c0unit)
except ValueError as msg:
verbose('Subdividing because (%s)...'%str(msg))
newedgelist.extend([(parity,o,wt/QQ(p**2)) for o in G.subdivide([(rev, h)],parity,2)])
continue
edgelist = newedgelist
r'''
Integration pairing. The input is a cycle (an element of `H_1(G,\text{Div}^0)`)
and a cocycle (an element of `H^1(G,\text{HC}(\ZZ))`).
Note that it is a multiplicative integral.
'''
def integrate_H1(G,cycle,cocycle,depth = 1,prec = None,twist=False,progress_bar = False,multiplicative = True, return_valuation = True):
if not cycle.is_degree_zero_valued():
raise ValueError('Cycle should take values in divisors of degree 0')
if prec is None:
prec = cocycle.parent().coefficient_module().base_ring().precision_cap()
verbose('precision = %s'%prec)
Cp = cycle.parent().coefficient_module().base_field()
R = PolynomialRing(Cp, names = 't')
t = R.gen()
total_integrals = cycle.size_of_support()
verbose('Will do %s integrals'%total_integrals)
resmul = Cp(1)
resadd = Cp(0)
resval = ZZ(0)
for g, D in cycle:
if twist:
D = D.left_act_by_matrix(G.embed(G.wp(),prec).change_ring(Cp))
g = g.conjugate_by(G.wp()**-1)
for (h, rev), pol, c0val, c0unit in lift_to_locally_analytic(G, D, prec):
mu = cocycle.evaluate(g, h, twist=rev, at_identity=G.use_shapiro())
resadd += sum(a * mu.moment(i) for a,i in zip(pol.coefficients(),pol.exponents()) if i < len(mu.moments()))
mu0 = cocycle['liftee'].evaluate(g, h, twist=rev, at_identity=G.use_shapiro())[0]
resval += c0val * ZZ(mu0)
resmul *= c0unit**ZZ(mu0)
if not multiplicative:
return resadd, resval, resmul if return_valuation else resadd
else:
return Cp.prime()**resval * Cp.teichmuller(resmul) * resadd.exp() # DEBUG
def sample_point(G,e,prec = 20):
r'''
Returns a point in U_h = (e)^{-1} Z_p.
'''
rev, h = e
hemb = G.embed(set_immutable(h**-1),prec)
wploc = G.embed(G.wp(),prec)
if rev == True:
hemb = hemb * wploc
a,b,c,d = hemb.list()
if d == 0:
return Infinity
return b/d
def get_basic_integral(G,cocycle,gamma, center, j, prec=None):
p = G.p
HOC = cocycle.parent()
V = HOC.coefficient_module()
if prec is None:
prec = V.precision_cap()
Cp = Qp(p, prec)
verbose('precision = %s'%prec)
R = PolynomialRing(Cp,names = 't')
PS = PowerSeriesRing(Cp, names = 'z')
t = R.gen()
z = PS.gen()
if prec is None:
prec = V.precision_cap()
try:
coeff_depth = V.precision_cap()
except AttributeError:
coeff_depth = V.coefficient_module().precision_cap()
resadd = ZZ(0)
edgelist = G.get_covering(1)[1:]
for rev, h in edgelist:
mu_e = cycle.evaluate(gamma, h, twist=rev, at_identity=G.use_shapiro())
a,b,c,d = [Cp(o) for o in G.embed(h,prec).list()]
pol = ((PS(d * z + b) / PS(c * z + a) - Cp.teichmuller(center))**j).polynomial()
resadd += sum(a * mu_e.moment(i) for a,i in zip(pol.coefficients(),pol.exponents()) if i < len(mu_e.moments()))
return resadd
| gpl-2.0 | 7,358,493,258,528,513,000 | 36.235955 | 136 | 0.53621 | false | 3.043159 | false | false | false |
R-daneel-olivaw/mutation-tolerance-voting | PrefMutationRandom.py | 1 | 4655 | '''
Created on Mar 18, 2015
@author: PengPeng
'''
from code.prefreaders import SushiPref
from code.prefUtil import SushiPrefUtil
from Crypto.Random.random import sample
import random
# 'ebi', 'anago', 'maguro', 'ika', 'uni', 'sake', 'tamago', 'toro', 'tekka-maki', 'kappa-maki'
class MutationRandom(object):
'''
classdocs
'''
def __init__(self, prefpath, index):
'''
Constructor
'''
self.prefpath = prefpath
self.LoadPref()
self.index = index
def LoadPref(self):
self.raw_pref = SushiPref(self.prefpath)
self.raw_pref.loadUp()
def GenerateRandom(self, Percentage, MutateType, OutputFilePath):
numRow = len(self.raw_pref.getDf().index) #get row count
numRowMutate = round(numRow * Percentage) #get mutated raw count
lstRandomRaw = sample(range(0, numRow), numRowMutate) #generate numRowMutate random numbers in range
#Mutate votes on extremes when MutateType == 0 else muate votes in the middle
#Extremes means the first two and the last two
#votes in the middle means the rest six
# print(lstRandomRaw)
lstMutateIndexExtreme = [0, 1, 8, 9]
lstMutateIndexMid = [2, 3, 4, 5, 6, 7]
lstMutate = []
if MutateType == 0:
for iRow in range(0, len(lstRandomRaw)):
# print(self.raw_pref.getDf().iloc[lstRandomRaw[iRow]])
for iElement in range(0, 4):
lstMutate.append(self.raw_pref.getDf().iloc[lstRandomRaw[iRow], lstMutateIndexExtreme[iElement]])
# print(lstMutate)
lstMutated = sorted(lstMutate, key = lambda k: random.random())
# print(lstMutated)
self.raw_pref.getDf().iloc[lstRandomRaw[iRow], 0] = lstMutated[0];
self.raw_pref.getDf().iloc[lstRandomRaw[iRow], 1] = lstMutated[1];
self.raw_pref.getDf().iloc[lstRandomRaw[iRow], 8] = lstMutated[2];
self.raw_pref.getDf().iloc[lstRandomRaw[iRow], 9] = lstMutated[3];
# print(self.raw_pref.getDf().iloc[lstRandomRaw[iRow]])
del lstMutate[:]
# self.raw_pref.getDf().to_csv(OutputFilePath, encoding='utf-8', index=True)
self.WriteToDirectory(OutputFilePath, Percentage, MutateType)
else:
for iRow in range(0, len(lstRandomRaw)):
# print(self.raw_pref.getDf().iloc[lstRandomRaw[iRow]])
for iElement in range(0, 6):
lstMutate.append(self.raw_pref.getDf().iloc[lstRandomRaw[iRow], lstMutateIndexMid[iElement]])
# print(lstMutate)
lstMutated = sorted(lstMutate, key = lambda k: random.random())
# print(lstMutated)
self.raw_pref.getDf().iloc[lstRandomRaw[iRow], 2] = lstMutated[0];
self.raw_pref.getDf().iloc[lstRandomRaw[iRow], 3] = lstMutated[1];
self.raw_pref.getDf().iloc[lstRandomRaw[iRow], 4] = lstMutated[2];
self.raw_pref.getDf().iloc[lstRandomRaw[iRow], 5] = lstMutated[3];
self.raw_pref.getDf().iloc[lstRandomRaw[iRow], 6] = lstMutated[4];
self.raw_pref.getDf().iloc[lstRandomRaw[iRow], 7] = lstMutated[5];
# print(self.raw_pref.getDf().iloc[lstRandomRaw[iRow]])
del lstMutate[:]
# self.raw_pref.getDf().to_csv(OutputFilePath, encoding='utf-8', index=True)
return self.WriteToDirectory(OutputFilePath, Percentage, MutateType)
# Mutation Percentage should be < 1
#Mutate votes on extremes when MutateType == 0, else muatate votes in the middle
#Extremes means the first two and the last two
#votes in the middle means the rest six
def WriteToDirectory(self, OutPath, MutationPercentage, MutationType):
OutputDirectory = OutPath + "/Mutation" + "_" + str(MutationPercentage) + "_" + str(MutationType) + str(self.index) + ".csv"
self.raw_pref.getDf().to_csv(OutputDirectory, encoding='utf-8', index=True)
return OutputDirectory
def GetResult(self, MutationPercentage, MutationType, OutputDirectory):
OutputDirectory = self.GenerateRandom(MutationPercentage, MutationType, OutputDirectory)
return OutputDirectory
| lgpl-3.0 | 2,551,288,586,847,176,000 | 41.706422 | 132 | 0.572718 | false | 3.575269 | false | false | false |
Urlandi/bgptablehole | ipv4seq.py | 1 | 6165 | # -*- coding: utf-8 -*-
"""
Utilities for manipulating IP (v4)
"""
ADDR_LEN = 32 # IPv4 max mask
ASN_MAX = 18446744073709551616L # ASN 32bit max number
def ipaddrcount(masklen):
"""Return address count by mask length"""
return 1 << (ADDR_LEN - masklen)
def ipmask(masklen):
"""Return bit mask by mask length"""
return (1 << ADDR_LEN) - ipaddrcount(masklen)
def ipv4num(address):
"""
Convert IPv4 address to list
Arg is string as "A.B.C.D/Y,ASPATH", where
A,B,C,D,Y is valid numbers in address
"ASPATH" - various string
"ASPATH" with comma may be absent
Return is list of 3 items:
0. digitized IPv4 net
1. digitized mask length
2. as is "ASPATH" or 0 if absent
or empty list if errors occur
Some exceptions handled
"""
_r = []
try:
addr = address.split('.', 3)
addr = addr[:3] + addr[3].split('/', 1)
addr = addr[:4] + addr[4].split(',', 1)
octets = addr[:4]
preflen = addr[4]
aspath = "".join(addr[5:]).strip()
if aspath == "":
aspath = 0
o0 = int(octets[3])
o1 = int(octets[2])
o2 = int(octets[1])
o3 = int(octets[0])
if 0 <= o3 <= 255 and 0 <= o2 <= 255 and 0 <= o1 <= 255 and 0 <= o0 <= 255:
addrnet = o3*16777216+o2*65536+o1*256+o0
prefn = int(preflen)
if 1 <= prefn <= ADDR_LEN:
addrmask = ipmask(prefn)
addrnet &= addrmask
_r = addrnet, prefn, aspath
except (ValueError, IndexError):
return _r
return _r
def numipv4(address):
"""
Convert digitized IPv4 net to string
Arg is number from 0 to 2^32 (IPv4 address)
Return string "A.B.C.D"
or 0 if errors occur
Some exceptions handled
"""
try:
return "{}.{}.{}.{}".format(address >> 24, (address >> 16) & 0xff, (address >> 8) & 0xff, (address & 0xff))
except ValueError:
return 0
def isiple((net_s), (net_e)):
"""
True if arg1 < arg2
arg1, arg2 is valid IPv4 address list from ipv4num procedure
True if:
arg1:192.0.2.0/Any, arg2:192.0.2.1-255/Any
arg1:192.0.2.0/24, arg2:192.0.2.0/25-32
False else
"""
if net_s[0] < net_e[0] or (net_s[0] == net_e[0] and net_s[1] < net_e[1]):
return True
return False
def isipleq((net_s), (net_e)):
"""
True if arg1 <= arg2
arg1, arg2 is valid IPv4 address list from ipv4num procedure
True if:
arg1:192.0.2.0/Any, arg2:192.0.2.1-255/Any
arg1:192.0.2.0/24, arg2:192.0.2.0/24-32
False else
"""
if net_s[0] < net_e[0] or (net_s[0] == net_e[0] and net_s[1] <= net_e[1]):
return True
return False
def isseq((net_s), (net_e)):
"""
Return True if net in arg2 begin immediately after net in arg1
arg1, arg2 is valid IPv4 address list from ipv4num procedure
True if:
arg1:192.0.2.4/30, arg2:192.0.2.8/30
"""
try:
if isiple(net_s, net_e):
return net_s[0] + ipaddrcount(net_s[1]) == net_e[0]
except TypeError:
return False
return False
def issubnet((net_s), (net_e)):
"""
Return True if net in arg2 is included in net in arg1
arg1, arg2 is valid IPv4 address list from ipv4num procedure
Return True if:
arg1:192.0.2.0/30, arg2:192.0.2.2/31
arg1:192.0.2.0/30, arg2:192.0.2.0/30
"""
try:
if isipleq(net_s, net_e):
return net_s[0] + ipaddrcount(net_s[1]) > net_e[0]
except TypeError:
return False
return False
def netsum((net_s), (net_e)):
"""
Return new net as sum of net in arg1 with net in arg2
arg1, arg2 is valid IPv4 address list from ipv4num procedure
arg1 < arg2
Return 192.0.2.0/29 if:
arg1:192.0.2.0/30, arg2:192.0.2.4/30
Return empty list when unable to sum
"""
_netsum = []
try:
if isiple(net_s, net_e):
if (net_s[1] == net_e[1]) and \
(net_s[1] > 1) and \
(net_s[0] & ipmask(net_s[1] - 1) == (net_s[0])) and \
isseq(net_s, net_e):
_netsum = [net_s[0], net_s[1] - 1]
except TypeError:
return _netsum
return _netsum
def subnets(addr_s, addr_e, aspath=0):
"""
Return list of nets between arg1 and arg2
arg1, arg2 is valid digitized IPv4 address
arg1 in range, arg2 out range
ASPATH must coincide in arg1 and arg2
arg1 < arg2, otherwise return an empty list
"""
_subnets = []
def prefix_l(s, e):
l = ADDR_LEN
addr_count = e - s
while addr_count:
addr_count >>= 1
l -= 1
while (s & ipmask(l) != s) or (s + ipaddrcount(l)) > e:
l += 1
return l
if addr_s < addr_e:
cur_addr_s = addr_s
while cur_addr_s < addr_e:
i = prefix_l(cur_addr_s, addr_e)
_subnets.append([cur_addr_s, i, aspath])
cur_addr_s = cur_addr_s + ipaddrcount(i)
return _subnets
def netsub((net_s), (net_list)):
"""
Return list of subnets in arg1 where subnets in arg2 must be present visibly
arg1 is valid IPv4 address list from ipv4num procedure
arg2 is valid list where items is valid IPv4 address list from ipv4num procedure
"""
_netsub = []
if net_s[0] < net_list[0][0]:
_netsub = subnets(net_s[0], net_list[0][0], net_s[2])
i = 0
while i < len(net_list)-1:
_netsub = _netsub + [net_list[i]] + \
subnets(net_list[i][0]+ipaddrcount(net_list[i][1]), net_list[i+1][0], net_s[2])
i += 1
_netsub = _netsub + [net_list[-1]] + \
subnets(net_list[-1][0] + ipaddrcount(net_list[-1][1]), net_s[0] + ipaddrcount(net_s[1]), net_s[2])
return _netsub
| mit | -6,386,601,330,949,451,000 | 20.833333 | 115 | 0.516626 | false | 3.137405 | false | false | false |
jhu-lcsr-forks/ogre | Tools/Blender2.6Export/ogre_mesh_exporter/mesh_impl.py | 16 | 16080 | # ##### BEGIN MIT LICENSE BLOCK #####
# Copyright (C) 2011 by Lih-Hern Pang
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ##### END MIT LICENSE BLOCK #####
# ########################################################################
# See mesh_exporter.py for explanation.
# ########################################################################
import bpy, mathutils
from ogre_mesh_exporter.log_manager import LogManager, Message
from operator import attrgetter
# Mesh export settings class to define how we are going to export the mesh.
class MeshExportSettings():
def __init__(self, fixUpAxisToY = True, requireMaterials = True, applyModifiers = False, skeletonNameFollowMesh = True, runOgreXMLConverter = True):
self.fixUpAxisToY = fixUpAxisToY
self.requireMaterials = requireMaterials
self.applyModifiers = applyModifiers
self.skeletonNameFollowMesh = skeletonNameFollowMesh
self.runOgreXMLConverter = runOgreXMLConverter
@classmethod
def fromRNA(cls, meshObject):
globalSettings = bpy.context.scene.ogre_mesh_exporter
meshSettings = meshObject.data.ogre_mesh_exporter
return MeshExportSettings(
fixUpAxisToY = globalSettings.fixUpAxisToY,
requireMaterials = meshSettings.requireMaterials if (meshSettings.requireMaterials_override) else globalSettings.requireMaterials,
applyModifiers = meshSettings.applyModifiers if (meshSettings.applyModifiers_override) else globalSettings.applyModifiers,
skeletonNameFollowMesh = meshSettings.skeletonNameFollowMesh if (meshSettings.skeletonNameFollowMesh_override) else globalSettings.skeletonNameFollowMesh,
runOgreXMLConverter = globalSettings.runOgreXMLConverter)
class BoneWeight():
def __init__(self, boneIndex, boneWeight):
self.mBoneIndex = boneIndex
self.mBoneWeight = boneWeight
class Vertex():
def __init__(self, pos, norm, uvs = list(), colors = list(), boneWeights = list()):
self.mPosition = pos
self.mNormal = norm
self.mUVs = uvs
self.mColors = colors
self.mBoneWeights = boneWeights
def match(self, norm, uvs, colors):
# Test normal.
if (self.mNormal != norm): return False;
# Test UVs.
if (len(self.mUVs) is not len(uvs)): return False
for uv1, uv2 in zip(self.mUVs, uvs):
if (uv1 != uv2): return False
# Test Colors.
if (len(self.mColors) is not len(colors)): return False
for color1, color2 in zip(self.mColors, colors):
if (color1 != color2): return False
return True
class VertexBuffer():
def __init__(self, uvLayers = 0, colorLayers = 0, hasBoneWeights = False):
# Vertex data.
self.mVertexData = list()
self.mUVLayers = uvLayers
self.mColorLayers = colorLayers
self.mHasBoneWeights = hasBoneWeights
# Blender mesh -> vertex index link.
# Only useful when exporting.
self.mMeshVertexIndexLink = dict()
def reset(self, uvLayers, colorLayers, hasBoneWeights = False):
self.mVertexData = list()
self.mUVLayers = uvLayers
self.mColorLayers = colorLayers
self.mHasBoneWeights = hasBoneWeights
def vertexCount(self):
return len(self.mVertexData)
# This method adds a vertex from the given blend mesh index into the buffer.
# If the uv information does not match the recorded vertex, it will automatically
# clone a new vertex for use.
def addVertex(self, index, pos, norm, uvs, colors, boneWeights = list(), fixUpAxisToY = True):
# Fix Up axis to Y (swap Y and Z and negate Z)
if (fixUpAxisToY):
pos = [pos[0], pos[2], -pos[1]]
norm = [norm[0], norm[2], -norm[1]]
# make sure uv layers and color layers matches as defined.
if (len(uvs) != self.mUVLayers or len(colors) != self.mColorLayers):
raise Exception("Invalid UV layer or Color layer count! Expecting uv(%d), color(%d). Got uv(%d), color(%d)" %
(self.mUVLayers, self.mColorLayers, len(uvs), len(colors)))
# try to find pre added vertex that matches criteria.
if (index in self.mMeshVertexIndexLink):
localIndexList = self.mMeshVertexIndexLink[index]
for localIndex in localIndexList:
if (self.mVertexData[localIndex].match(norm, uvs, colors)):
return localIndex
# nothing found. so we add a new vertex.
localIndex = len(self.mVertexData)
if (index not in self.mMeshVertexIndexLink): self.mMeshVertexIndexLink[index] = list()
self.mMeshVertexIndexLink[index].append(localIndex)
self.mVertexData.append(Vertex(pos, norm, uvs, colors, boneWeights))
return localIndex
def serialize(self, file, indent = ''):
extraAttributes = ''
uvLayerCount = 8 if (self.mUVLayers > 8) else self.mUVLayers
if (uvLayerCount > 0):
extraAttributes = ' texture_coords="%d"' % uvLayerCount
for i in range(uvLayerCount):
extraAttributes += ' texture_coord_dimensions_%d="float2"' % i
colorLayerCount = self.mColorLayers
if (colorLayerCount > 0): extraAttributes += ' colours_diffuse="true"'
if (colorLayerCount > 1): extraAttributes += ' colours_specular="true"'
file.write('%s<vertexbuffer positions="true" normals="true"%s>\n' % (indent, extraAttributes))
for vertex in self.mVertexData:
file.write('%s\t<vertex>\n' % indent)
# write position and normal.
file.write('%s\t\t<position x="%.6f" y="%.6f" z="%.6f" />\n' % (indent, vertex.mPosition[0], vertex.mPosition[1], vertex.mPosition[2]))
file.write('%s\t\t<normal x="%.6f" y="%.6f" z="%.6f" />\n' % (indent, vertex.mNormal[0], vertex.mNormal[1], vertex.mNormal[2]))
# write UV layers. (NOTE: Blender uses bottom left coord! Ogre uses top left! So we have to flip Y.)
for i in range(uvLayerCount):
uv = vertex.mUVs[i]
file.write('%s\t\t<texcoord u="%.6f" v="%.6f" />\n' % (indent, uv[0], (1.0 - uv[1])))
# write diffuse.
if (colorLayerCount > 0):
color = vertex.mColors[0]
file.write('%s\t\t<colour_diffuse value="%.6f %.6f %.6f" />\n' % (indent, color[0], color[1], color[2]))
# write specular.
if (colorLayerCount > 1):
color = vertex.mColors[1]
file.write('%s\t\t<colour_diffuse value="%.6f %.6f %.6f" />\n' % (indent, color[0], color[1], color[2]))
file.write('%s\t</vertex>\n' % indent)
file.write('%s</vertexbuffer>\n' % indent)
def serializeBoneAssignments(self, file, indent = ''):
file.write('%s\t<boneassignments>\n' % indent)
vertexWithNoBoneAssignements = 0;
for i, vertex in enumerate(self.mVertexData):
if (len(vertex.mBoneWeights) == 0): vertexWithNoBoneAssignements += 1
for boneWeight in vertex.mBoneWeights:
file.write('%s\t\t<vertexboneassignment vertexindex="%d" boneindex="%d" weight="%.6f" />\n' %
(indent, i, boneWeight.mBoneIndex, boneWeight.mBoneWeight))
if (vertexWithNoBoneAssignements > 0):
LogManager.logMessage("There are %d vertices with no bone assignements!" % vertexWithNoBoneAssignements, Message.LVL_WARNING)
file.write('%s\t</boneassignments>\n' % indent)
class SubMesh():
def __init__(self, vertexBuffer = None, meshVertexIndexLink = None, name = None):
# True if submesh is sharing vertex buffer.
self.mShareVertexBuffer = False
# Vertex buffer.
self.mVertexBuffer = vertexBuffer if (vertexBuffer) else VertexBuffer()
# Blender mesh -> local/shared vertex index link.
self.mMeshVertexIndexLink = meshVertexIndexLink if (meshVertexIndexLink) else dict()
# Face data.
self.mFaceData = list()
# Blender material.
self.mMaterial = None
# Name of submesh
self.mName = name
if ((vertexBuffer is not None) and (meshVertexIndexLink is not None)):
self.mShareVertexBuffer = True
def insertPolygon(self, blendMesh, polygon, blendVertexGroups = None, ogreSkeleton = None, fixUpAxisToY = True):
polygonVertices = polygon.vertices
polygonVertexCount = polygon.loop_total
# extract uv information.
# Here we convert blender uv data into our own
# uv information that lists uvs by vertices.
blendUVLoopLayers = blendMesh.uv_layers
# construct empty polygon vertex uv list.
polygonVertUVs = list()
for i in range(polygonVertexCount): polygonVertUVs.append(list())
for uvLoopLayer in blendUVLoopLayers:
for i, loopIndex in enumerate(polygon.loop_indices):
polygonVertUVs[i].append(uvLoopLayer.data[loopIndex].uv)
# extract color information.
# Here we convert blender color data into our own
# color information that lists colors by vertices.
blendColorLoopLayers = blendMesh.vertex_colors
# construct empty polygon vertex color list.
polygonVertColors = list()
for i in range(polygonVertexCount): polygonVertColors.append(list())
for colorLoopLayer in blendColorLoopLayers:
for i, loopIndex in enumerate(polygon.loop_indices):
polygonVertColors[i].append(colorLoopLayer.data[loopIndex].color)
# loop through the vertices and add to this submesh.
localIndices = list()
useSmooth = polygon.use_smooth
for index, uvs, colors in zip(polygonVertices, polygonVertUVs, polygonVertColors):
vertex = blendMesh.vertices[index]
norm = vertex.normal if (useSmooth) else polygon.normal
# grab bone weights.
boneWeights = list()
if (ogreSkeleton is not None):
for groupElement in vertex.groups:
groupName = blendVertexGroups[groupElement.group].name
boneIndex = ogreSkeleton.getBoneIndex(groupName)
if (boneIndex == -1 or abs(groupElement.weight) < 0.000001): continue
boneWeight = groupElement.weight
boneWeights.append(BoneWeight(boneIndex, boneWeight))
# trim bone weight count if too many defined.
if (len(boneWeights) > 4):
LogManager.logMessage("More than 4 bone weights are defined for a vertex! Best 4 will be used.", Message.LVL_WARNING)
boneWeights.sort(key=attrgetter('mBoneWeight'), reverse=True)
while (len(boneWeights) > 4): del boneWeights[-1]
localIndices.append(self.mVertexBuffer.addVertex(index, vertex.co, norm, uvs, colors, boneWeights, fixUpAxisToY))
# construct triangle index data.
if (polygonVertexCount is 3):
self.mFaceData.append(localIndices)
else:
# split quad into triangles.
self.mFaceData.append(localIndices[:3])
self.mFaceData.append([localIndices[0], localIndices[2], localIndices[3]])
def serialize(self, file):
vertexCount = self.mVertexBuffer.vertexCount()
materialAttribute = '' if (self.mMaterial is None) else ' material="%s"' % self.mMaterial.name
file.write('\t\t<submesh%s usesharedvertices="%s" use32bitindexes="%s">\n' %
(materialAttribute, 'true' if self.mShareVertexBuffer else 'false',
'true' if (vertexCount > 65536) else 'false'))
# write face data.
file.write('\t\t\t<faces count="%d">\n' % len(self.mFaceData))
for face in self.mFaceData:
file.write('\t\t\t\t<face v1="%d" v2="%d" v3="%d" />\n' % tuple(face))
file.write('\t\t\t</faces>\n')
# write submesh vertex buffer if not shared.
if (not self.mShareVertexBuffer):
file.write('\t\t\t<geometry vertexcount="%d">\n' % vertexCount)
self.mVertexBuffer.serialize(file, '\t\t\t\t')
file.write('\t\t\t</geometry>\n')
# write bone assignments
if (self.mShareVertexBuffer.mHasBoneWeights):
self.mSharedVertexBuffer.serializeBoneAssignments(file, '\t\t\t')
file.write('\t\t</submesh>\n')
class Mesh():
def __init__(self, blendMesh = None, blendVertexGroups = None, ogreSkeleton = None, exportSettings = MeshExportSettings()):
# shared vertex buffer.
self.mSharedVertexBuffer = VertexBuffer()
# Blender mesh -> shared vertex index link.
self.mSharedMeshVertexIndexLink = dict()
# collection of submeshes.
self.mSubMeshDict = dict()
# skip blend mesh conversion if no blend mesh passed in.
if (blendMesh is None): return
self.mOgreSkeleton = ogreSkeleton
hasBoneWeights = ogreSkeleton is not None
# Lets do some pre checking to show warnings if needed.
uvLayerCount = len(blendMesh.uv_layers)
colorLayerCount = len(blendMesh.vertex_colors)
if (uvLayerCount > 8): LogManager.logMessage("More than 8 UV layers in this mesh. Only 8 will be exported.", Message.LVL_WARNING)
if (colorLayerCount > 2): LogManager.logMessage("More than 2 color layers in this mesh. Only 2 will be exported.", Message.LVL_WARNING)
# setup shared vertex buffer.
self.mSharedVertexBuffer.reset(uvLayerCount, colorLayerCount, hasBoneWeights)
# split up the mesh into submeshes by materials.
# we first get sub mesh shared vertices option.
materialList = blendMesh.materials
materialCount = len(materialList)
subMeshProperties = blendMesh.ogre_mesh_exporter.subMeshProperties
while (len(subMeshProperties) < materialCount): subMeshProperties.add() # add more items if needed.
while (len(subMeshProperties) > materialCount): subMeshProperties.remove(0) # remove items if needed.
LogManager.logMessage("Material Count: %d" % len(materialList), Message.LVL_INFO)
for polygon in blendMesh.polygons:
# get or create submesh.
if (polygon.material_index in self.mSubMeshDict):
subMesh = self.mSubMeshDict[polygon.material_index]
else:
# instantiate submesh base on wether sharing vertices or not.
subMeshProperty = subMeshProperties[polygon.material_index]
if (subMeshProperty.useSharedVertices):
subMesh = SubMesh(self.mSharedVertexBuffer, self.mSharedMeshVertexIndexLink, subMeshProperty.name)
else:
subMesh = SubMesh(VertexBuffer(uvLayerCount, colorLayerCount, hasBoneWeights), name = subMeshProperty.name)
subMesh.mMaterial = None if (len(materialList) == 0) else materialList[polygon.material_index]
if (exportSettings.requireMaterials and subMesh.mMaterial == None):
LogManager.logMessage("Some faces are not assigned with a material!", Message.LVL_WARNING)
LogManager.logMessage("To hide this warning, please uncheck the 'Require Materials' option.", Message.LVL_WARNING)
self.mSubMeshDict[polygon.material_index] = subMesh
# insert polygon.
subMesh.insertPolygon(blendMesh, polygon, blendVertexGroups, ogreSkeleton, exportSettings.fixUpAxisToY)
def serialize(self, file):
file.write('<mesh>\n')
# write shared vertex buffer if available.
sharedVertexCount = self.mSharedVertexBuffer.vertexCount()
if (sharedVertexCount > 0):
file.write('\t<sharedgeometry vertexcount="%d">\n' % sharedVertexCount)
self.mSharedVertexBuffer.serialize(file, '\t\t')
file.write('\t</sharedgeometry>\n')
# write bone assignments
if (self.mSharedVertexBuffer.mHasBoneWeights):
self.mSharedVertexBuffer.serializeBoneAssignments(file, '\t\t')
subMeshNames = list()
# write submeshes.
file.write('\t<submeshes>\n')
for subMesh in self.mSubMeshDict.values():
name = subMesh.mName
if (name):
if (not name in subMeshNames):
subMeshNames.append(name)
else:
LogManager.logMessage("Mulitple submesh with same name defined: %s" % name, Message.LVL_WARNING)
subMesh.serialize(file)
file.write('\t</submeshes>\n')
# write submesh names
if (len(subMeshNames)):
file.write('\t<submeshnames>\n')
for index, name in enumerate(subMeshNames):
file.write('\t\t<submeshname name="%s" index="%d" />\n' % (name, index))
file.write('\t</submeshnames>\n')
# write skeleton link
if (self.mOgreSkeleton is not None):
file.write('\t<skeletonlink name="%s.skeleton" />\n' % self.mOgreSkeleton.mName)
file.write('</mesh>\n')
| mit | 1,976,643,602,277,208,600 | 41.204724 | 157 | 0.721891 | false | 3.266965 | false | false | false |
ruslankl9/python_training | bdd/contact_steps.py | 1 | 3553 | from pytest_bdd import given, when, then
from model.contact import Contact
import random
@given('a contact list')
def contact_list(db):
return db.get_contact_list()
@given('a contact with <first_name>, <last_name> and <address>')
def new_contact(first_name, last_name, address):
return Contact(first_name=first_name, last_name=last_name, address=address)
@when('I add the contact to the list')
def add_new_contact(app, new_contact):
app.contact.create(new_contact)
@then('the new contact list is equal to the old list with the added contact')
def verify_contact_added(db, contact_list, new_contact):
old_contacts = contact_list
new_contacts = db.get_contact_list()
old_contacts.append(new_contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
@given('a non-empty contact list')
def non_empty_contact_list(app, db):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(first_name="Some contact"))
return db.get_contact_list()
@given('a random contact from the list')
def random_contact(non_empty_contact_list):
return random.choice(non_empty_contact_list)
@when('I delete the contact from the list')
def delete_contact(app, random_contact):
app.contact.delete_contact_by_id(random_contact.id)
@then('the new contact list is equal to the old list without deleted contact')
def verify_contact_deleted(db, non_empty_contact_list, random_contact, app, check_ui):
old_contacts = non_empty_contact_list
new_contacts = db.get_contact_list()
assert len(old_contacts) - 1 == len(new_contacts)
old_contacts.remove(random_contact)
assert old_contacts == new_contacts
if check_ui:
assert \
sorted(
map(lambda x: Contact(
id=x.id,
first_name=x.first_name.strip(),
last_name=x.last_name.strip(),
address=x.address.strip(),
all_emails_from_home_page=x.all_emails_from_home_page.strip(),
all_phones_from_home_page=x.all_phones_from_home_page.strip()
), new_contacts), key=Contact.id_or_max
) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
@when('I modify the contact from the list')
def modify_contact(app, new_contact, random_contact):
new_contact.id = random_contact.id
app.contact.modify_contact_by_id(random_contact.id, new_contact)
@then('the new contact list is equal to the old list with modified contact')
def verify_contact_modified(db, non_empty_contact_list, new_contact, app, check_ui):
old_contacts = non_empty_contact_list
new_contacts = db.get_contact_list()
assert len(old_contacts) == len(new_contacts)
[x for x in old_contacts if x.id == new_contact.id][0].first_name = new_contact.first_name
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
if check_ui:
assert \
sorted(
map(lambda x: Contact(
id=x.id,
first_name=x.first_name.strip(),
last_name=x.last_name.strip(),
address=x.address.strip(),
all_emails_from_home_page=x.all_emails_from_home_page.strip(),
all_phones_from_home_page=x.all_phones_from_home_page.strip()
), new_contacts), key=Contact.id_or_max
) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max) | apache-2.0 | 1,513,993,472,987,405,600 | 38.054945 | 101 | 0.646214 | false | 3.419634 | false | false | false |
KohlsTechnology/ansible | lib/ansible/modules/network/aci/aci_contract_subject_to_filter.py | 12 | 9130 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_contract_subject_to_filter
short_description: Bind Contract Subjects to Filters (vz:RsSubjFiltAtt)
description:
- Bind Contract Subjects to Filters on Cisco ACI fabrics.
notes:
- The C(tenant), C(contract), C(subject), and C(filter_name) must exist before using this module in your playbook.
- The M(aci_tenant), M(aci_contract), M(aci_contract_subject), and M(aci_filter) modules can be used for these.
- More information about the internal APIC class B(vz:RsSubjFiltAtt) from
L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Jacob McGill (@jmcgill298)
version_added: '2.4'
options:
contract:
description:
- The name of the contract.
aliases: [ contract_name ]
filter:
description:
- The name of the Filter to bind to the Subject.
aliases: [ filter_name ]
log:
description:
- Determines if the binding should be set to log.
- The APIC defaults new Subject to Filter bindings to C(none).
choices: [ log, none ]
aliases: [ directive ]
subject:
description:
- The name of the Contract Subject.
aliases: [ contract_subject, subject_name ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
tenant:
description:
- The name of the tenant.
required: yes
aliases: [ tenant_name ]
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Add a new contract subject to filer binding
aci_contract_subject_to_filter:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
contract: web_to_db
subject: test
filter: '{{ filter }}'
log: '{{ log }}'
state: present
- name: Remove an existing contract subject to filter binding
aci_contract_subject_to_filter:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
contract: web_to_db
subject: test
filter: '{{ filter }}'
log: '{{ log }}'
state: present
- name: Query a specific contract subject to filter binding
aci_contract_subject_to_filter:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
contract: web_to_db
subject: test
filter: '{{ filter }}'
state: query
- name: Query all contract subject to filter bindings
aci_contract_subject_to_filter:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
contract: web_to_db
subject: test
state: query
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
contract=dict(type='str', aliases=['contract_name']), # Not required for querying all objects
filter=dict(type='str', aliases=['filter_name']), # Not required for querying all objects
log=dict(tyep='str', choices=['log', 'none'], aliases=['directive']),
subject=dict(type='str', aliases=['contract_subject', 'subject_name']), # Not required for querying all objects
tenant=dict(type='str', aliases=['tenant_name']), # Not required for querying all objects
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
method=dict(type='str', choices=['delete', 'get', 'post'], aliases=['action'], removed_in_version='2.6'), # Deprecated starting from v2.6
protocol=dict(type='str', removed_in_version='2.6'), # Deprecated in v2.6
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['contract', 'filter', 'subject', 'tenant']],
['state', 'present', ['contract', 'filter', 'subject', 'tenant']],
],
)
contract = module.params['contract']
filter_name = module.params['filter']
log = module.params['log']
subject = module.params['subject']
tenant = module.params['tenant']
state = module.params['state']
# Add subject_filter key to modul.params for building the URL
module.params['subject_filter'] = filter_name
# Convert log to empty string if none, as that is what API expects. An empty string is not a good option to present the user.
if log == 'none':
log = ''
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
filter_target='eq(fvTenant.name, "{0}")'.format(tenant),
module_object=tenant,
),
subclass_1=dict(
aci_class='vzBrCP',
aci_rn='brc-{0}'.format(contract),
filter_target='eq(vzBrCP.name, "{0}")'.format(contract),
module_object=contract,
),
subclass_2=dict(
aci_class='vzSubj',
aci_rn='subj-{0}'.format(subject),
filter_target='eq(vzSubj.name, "{0}")'.format(subject),
module_object=subject,
),
subclass_3=dict(
aci_class='vzRsSubjFiltAtt',
aci_rn='rssubjFiltAtt-{0}'.format(filter_name),
filter_target='eq(vzRsSubjFiltAtt.tnVzFilterName, "{0}")'.format(filter_name),
module_object=filter_name,
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='vzRsSubjFiltAtt',
class_config=dict(
tnVzFilterName=filter_name,
directives=log,
),
)
aci.get_diff(aci_class='vzRsSubjFiltAtt')
aci.post_config()
elif state == 'absent':
aci.delete_config()
# Remove subject_filter used to build URL from module.params
module.params.pop('subject_filter')
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 | 2,922,775,296,852,481,500 | 28.934426 | 146 | 0.607119 | false | 3.969565 | true | false | false |
dbmi-pitt/DIKB-Micropublication | scripts/mp-scripts/Bio/Blast/Applications.py | 1 | 5910 | """Definitions for interacting with Blast related applications.
"""
from Bio import Application
from Bio.Application import _Option
class FastacmdCommandline(Application.AbstractCommandline):
"""Create a commandline for the fasta program from NCBI.
"""
def __init__(self, fastacmd = "fastacmd"):
Application.AbstractCommandline.__init__(self)
self.program_name = fastacmd
self.parameters = \
[
_Option(["-d", "database"], ["input"], None, 1,
"The database to retrieve from."),
_Option(["-s", "search_string"], ["input"], None, 1,
"The id to search for.")
]
class BlastallCommandline(Application.AbstractCommandline):
"""Create a commandline for the blastall program from NCBI.
XXX This could use more checking for valid paramters to the program.
"""
def __init__(self, blastcmd = "blastall"):
Application.AbstractCommandline.__init__(self)
self.program_name = blastcmd
self.parameters = \
[# Scoring options
_Option(["-M", "matrix"], ["input"], None, 0,
"Matrix to use"),
_Option(["-G", "gap_open"], ["input"], None, 0,
"Gap open penalty"),
_Option(["-E", "gap_extend"], ["input"], None, 0,
"Gap extension penalty"),
_Option(["-A", "window_size"], ["input"], None, 0,
"Multiple hits window size"),
_Option(["-j", "npasses"], ["input"], None, 0,
"Number of passes"),
_Option(["-p", "passes"], ["input"], None, 0,
"Hits/passes. Integer 0-2."),
# Algorithm options
_Option(["-g", "gapped"], ["input"], None, 0,
"Whether to do a gapped alignment. T/F"),
_Option(["-e", "expectation"], ["input"], None, 0,
"Expectation value cutoff."),
_Option(["-W", "wordsize"], ["input"], None, 0,
"Word size"),
_Option(["-K", "keep_hits"], ["input"], None, 0,
" Number of best hits from a region to keep."),
_Option(["-X", "xdrop"], ["input"], None, 0,
"Dropoff value (bits) for gapped alignments."),
_Option(["-f", "hit_extend"], ["input"], None, 0,
"Threshold for extending hits."),
_Option(["-L", "region_length"], ["input"], None, 0,
"Length of region used to judge hits."),
_Option(["-Z", "db_length"], ["input"], None, 0,
"Effective database length."),
_Option(["-Y", "search_length"], ["input"], None, 0,
"Effective length of search space."),
_Option(["-N", "nbits_gapping"], ["input"], None, 0,
"Number of bits to trigger gapping."),
_Option(["-c", "pseudocounts"], ["input"], None, 0,
"Pseudocounts constants for multiple passes."),
_Option(["-Z", "xdrop_final"], ["input"], None, 0,
"X dropoff for final gapped alignment."),
_Option(["-y", "xdrop_extension"], ["input"], None, 0,
"Dropoff for blast extensions."),
_Option(["-h", "model_threshold"], ["input"], None, 0,
"E-value threshold to include in multipass model."),
_Option(["-S", "required_start"], ["input"], None, 0,
"Start of required region in query."),
_Option(["-H", "required_end"], ["input"], None, 0,
"End of required region in query."),
# Processing options
_Option(["-p", "program"], ["input"], None, 1,
"The blast program to use."),
_Option(["-d", "database"], ["input"], None, 1,
"The database to BLAST against."),
_Option(["-i", "infile"], ["input", "file"], None, 1,
"The sequence to search with."),
_Option(["-F", "filter"], ["input"], None, 0,
"Filter query sequence with SEG? T/F"),
_Option(["-J", "believe_query"], ["input"], None, 0,
"Believe the query defline? T/F"),
_Option(["-a", "nprocessors"], ["input"], None, 0,
"Number of processors to use."),
# Formatting options
_Option(["-T", "html"], ["input"], None, 0,
"Produce HTML output? T/F"),
_Option(["-v", "descriptions"], ["input"], None, 0,
"Number of one-line descriptions."),
_Option(["-b", "alignments"], ["input"], None, 0,
"Number of alignments."),
_Option(["-m", "align_view"], ["input"], None, 0,
"Alignment view. Integer 0-6."),
_Option(["-I", "show_gi"], ["input"], None, 0,
"Show GI's in deflines? T/F"),
_Option(["-O", "seqalign_file"], ["output", "file"], None, 0,
"seqalign file to output."),
_Option(["-o", "align_outfile"], ["output", "file"], None, 1,
"Output file for alignment."),
_Option(["-C", "checkpoint_outfile"], ["output", "file"], None, 0,
"Output file for PSI-BLAST checkpointing."),
_Option(["-R", "restart_infile"], ["input", "file"], None, 0,
"Input file for PSI-BLAST restart."),
_Option(["-k", "hit_infile"], ["input", "file"], None, 0,
"Hit file for PHI-BLAST."),
_Option(["-Q", "matrix_outfile"], ["output", "file"], None, 0,
"Output file for PSI-BLAST matrix in ASCII."),
_Option(["-B", "align_infile"], ["input", "file"], None, 0,
"Input alignment file for PSI-BLAST restart.")
]
| apache-2.0 | -6,651,510,568,992,314,000 | 47.442623 | 77 | 0.476311 | false | 4.112735 | false | false | false |
gkc1000/pyscf | pyscf/nao/test/nao/na2/test_na2.py | 1 | 1729 | # Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from pyscf.tools.siesta_utils import get_siesta_command, get_pseudo
import subprocess
import os
siesta_fdf = """
xml.write .true.
PAO.EnergyShift 100 meV
%block ChemicalSpeciesLabel
1 11 Na
%endblock ChemicalSpeciesLabel
NumberOfAtoms 2
NumberOfSpecies 1
%block AtomicCoordinatesAndAtomicSpecies
0.77573521 0.00000000 0.00000000 1
-0.77573521 0.00000000 0.00000000 1
%endblock AtomicCoordinatesAndAtomicSpecies
MD.NumCGsteps 0
COOP.Write .true.
WriteDenchar .true.
"""
label = 'siesta'
print(siesta_fdf, file=open(label+'.fdf', 'w'))
for sp in ['Na']: os.symlink(get_pseudo(sp), sp+'.psf')
errorcode = subprocess.call(get_siesta_command(label), shell=True)
if errorcode: raise RuntimeError('siesta returned an error: {0}'.format(errorcode))
# run test system_vars
from pyscf.nao.m_system_vars import system_vars_c, diag_check, overlap_check
sv = system_vars_c().init_siesta_xml(label = label)
assert sv.norbs == 10
assert diag_check(sv)
assert overlap_check(sv)
| apache-2.0 | 3,224,419,813,628,457,000 | 31.622642 | 83 | 0.718913 | false | 3.344294 | false | false | false |
TheBigBear/python-o365 | O365/cal.py | 1 | 3618 | import requests
import base64
import json
import logging
import time
from O365.event import Event
logging.basicConfig(filename='o365.log',level=logging.DEBUG)
log = logging.getLogger(__name__)
class Calendar( object ):
'''
Calendar manages lists of events on an associated calendar on office365.
Methods:
getName - Returns the name of the calendar.
getCalendarId - returns the GUID that identifies the calendar on office365
getId - synonym of getCalendarId
getEvents - kicks off the process of fetching events.
fetchEvents - legacy duplicate of getEvents
Variable:
events_url - the url that is actually called to fetch events. takes an ID, start, and end.
time_string - used for converting between struct_time and json's time format.
'''
events_url = 'https://outlook.office365.com/api/v1.0/me/calendars/{0}/calendarview?startDateTime={1}&endDateTime={2}'
time_string = '%Y-%m-%dT%H:%M:%SZ'
def __init__(self, json=None, auth=None):
'''
Wraps all the informaiton for managing calendars.
'''
self.json = json
self.auth = auth
self.events = []
if json:
log.debug('translating calendar information into local variables.')
self.calendarId = json['Id']
self.name = json['Name']
def getName(self):
'''Get the calendar's Name.'''
return self.json['Name']
def getCalendarId(self):
'''Get calendar's GUID for office 365. mostly used interally in this library.'''
return self.json['Id']
def getId(self):
'''Get calendar's GUID for office 365. mostly used interally in this library.'''
return self.getCalendarId()
def fetchEvents(self,start=None,end=None):
'''
So I originally made this function "fetchEvents" which was a terrible idea. Everything else
is "getX" except events which were appearenty to good for that. So this function is just a
pass through for legacy sake.
'''
return self.getEvents(start,end)
def getEvents(self,start=None,end=None):
'''
Pulls events in for this calendar. default range is today to a year now.
Keyword Arguments:
start -- The starting date from where you want to begin requesting events. The expected
type is a struct_time. Default is today.
end -- The ending date to where you want to end requesting events. The expected
type is a struct_time. Default is a year from start.
'''
#If no start time has been supplied, it is assumed you want to start as of now.
if not start:
start = time.strftime(self.time_string)
#If no end time has been supplied, it is assumed you want the end time to be a year
#from what ever the start date was.
if not end:
end = time.time()
end += 3600*24*365
end = time.gmtime(end)
end = time.strftime(self.time_string,end)
#This is where the actual call to Office365 happens.
response = requests.get(self.events_url.format(self.json['Id'],start,end),auth=self.auth)
log.info('Response from O365: %s', str(response))
#This takes that response and then parses it into individual calendar events.
for event in response.json()['value']:
try:
duplicate = False
#checks to see if the event is a duplicate. if it is local changes are clobbered.
for i,e in enumerate(self.events):
if e.json['Id'] == event['Id']:
self.events[i] = Event(event,self.auth,self)
duplicate = True
break
if not duplicate:
self.events.append(Event(event,self.auth,self))
log.debug('appended event: %s',event['Subject'])
except Exception as e:
log.info('failed to append calendar: %',str(e))
log.debug('all events retrieved and put in to the list.')
return True
#To the King!
| apache-2.0 | -3,655,629,581,435,199,500 | 30.736842 | 118 | 0.705362 | false | 3.40678 | false | false | false |
prds21/repository-barrialTV | lib/lib/utils/scrapingUtils.py | 2 | 7056 | # -*- coding: latin-1 -*-
import regexUtils
import re
import urllib
import urlparse
def findJS(data):
idName = '(?:f*id|ch)'
jsName = '([^\"\']+?\.js[^\"\']*?)'
regex = "(?:java)?scr(?:'\+')?ipt.*?" + idName + "\s*=\s*[\"']([^\"']+)[\"'][^<]*</scr(?:'\+')?ipt\s*>[^<]*<scr(?:'\+')?ipt[^<]*src=[\"']" + jsName + "[\"']"
jscript = regexUtils.findall(data, regex)
if jscript:
jscript = filter(lambda x: x[1].find('twitter') == -1, jscript)
return jscript
return None
def findPHP(data, streamId):
regex = "document.write\('.*?src=['\"]*(.*?.php[^&\"]*).*?['\" ]*.*?\)"
php = regexUtils.findall(data, regex)
if php:
return re.sub(r"\'\+\s*(?:f*id|ch)\s*\+\'", "%s" % streamId,php[0])
regex = "document.write\('.*?src=['\"]*(.*?(?:f*id|ch)\s*\+'\.html*).*?['\" ]*.*?\)"
html = regexUtils.findall(data, regex)
if html:
return re.sub(r"\'\+\s*(?:f*id|ch)\s*\+\'", "%s" % streamId,html[0])
return None
def findRTMP(url, data):
#if data.lower().find('rtmp') == -1:
# return None
try:
text = str(data)
except:
text = data
#method 1
#["'=](http://[^'" ]*.swf[^'" ]*file=([^&"']+)[^'" ]*&streamer=([^"'&]+))
#streamer=([^&"]+).*?file=([^&"]+).*?src="([^"]+.swf)"
# method 2
#"([^"]+.swf\?.*?file=(rtmp[^&]+)&.*?id=([^&"]+)[^"]*)"
sep1 = '[\'"&\? ]'
sep2 = '(?:[\'"]\s*(?:,|\:)\s*[\'"]|=)'
value = '([^\'"&]+)'
method1 = True
method2 = False
radius = 400
playpath = ''
swfUrl = ''
rtmp = regexUtils.findall(text, sep1 + 'streamer' + sep2 + value)
if not rtmp:
tryMethod2 = regexUtils.findall(text, sep1 + 'file' + sep2 + value)
if tryMethod2 and tryMethod2[0].startswith('rtmp'):
method1 = False
method2 = True
rtmp = tryMethod2
if rtmp:
for r in rtmp:
tmpRtmp = r.replace('/&','').replace('&','')
idx = text.find(tmpRtmp)
min_idx = 0
max_idx = len(text) - 1
start = idx-radius
if start < min_idx:
start = min_idx
end = idx+radius
if end > max_idx:
end = max_idx
area = text[start:end]
clipStart = idx+len(tmpRtmp)
if clipStart < max_idx:
text = text[clipStart:]
if method1:
playpath = regexUtils.findall(area, sep1 + 'file' + sep2 + value)
if method2:
playpath = regexUtils.findall(area, sep1 + 'id' + sep2 + value)
if playpath:
tmpRtmp = tmpRtmp + '/' + playpath[0]
if playpath:
swfUrl = regexUtils.findall(area, 'SWFObject\([\'"]([^\'"]+)[\'"]')
if not swfUrl:
swfUrl = regexUtils.findall(area, sep1 + '([^\'"& ]+\.swf)')
if not swfUrl:
swfUrl = regexUtils.findall(data, sep1 + '([^\'"& ]+\.swf)')
if swfUrl:
finalSwfUrl = swfUrl[0]
if not finalSwfUrl.startswith('http'):
finalSwfUrl = urlparse.urljoin(url, finalSwfUrl)
regex = '://(.*?)/'
server = regexUtils.findall(tmpRtmp, regex)
if server:
if server[0].find(':') == -1:
tmpRtmp = tmpRtmp.replace(server[0], server[0] + ':1935')
return [tmpRtmp, playpath[0], finalSwfUrl]
return None
def getHostName(url):
scheme = urlparse.urlparse(url)
if scheme:
return scheme.netloc.replace('www.','')
return None
def findFrames(data):
if data.lower().find('frame') == -1:
return None
return regexUtils.findall(data, "(frame[^>]*)>")
def findContentRefreshLink(data):
regex = '0;\s*url=([^\'" ]+)'
links = regexUtils.findall(data, regex)
if links:
return links[0]
regex = 'window.location\s*=\s*[\'"]([^\'"]+)[\'"]'
links = regexUtils.findall(data, regex)
if links:
return links[0]
regex = 'frame\s*scrolling=\"auto\"\s*noresize\s*src\s*=\s*[\'"]([^\'"]+)[\'"]'
links = regexUtils.findall(data, regex)
if links:
return links[0]
return None
def findEmbedPHPLink(data):
regex = '<script type="text/javascript" src="((?![^"]+localtimes)(?![^"]+adcash)[^"]+\.php\?[^"]+)"\s*>\s*</script>'
links = regexUtils.findall(data, regex)
if links:
return links[0]
return None
def findVideoFrameLink(page, data):
minheight=300
minwidth=300
frames = findFrames(data)
if not frames:
return None
iframes = regexUtils.findall(data, "(frame(?![^>]*cbox\.ws)(?![^>]*chat\d*\.\w+)(?![^>]*ad122m)(?![^>]*adshell)(?![^>]*capacanal)(?![^>]*blacktvlive\.com)[^>]*\sheight\s*=\s*[\"']*([\%\d]+)(?:px)?[\"']*[^>]*>)")
if iframes:
for iframe in iframes:
if iframe[1] == '100%':
height = minheight+1
else:
height = int(iframe[1])
if height > minheight:
m = regexUtils.findall(iframe[0], "[\"' ]width\s*=\s*[\"']*(\d+[%]*)(?:px)?[\"']*")
if m:
if m[0] == '100%':
width = minwidth+1
else:
width = int(m[0])
if width > minwidth:
m = regexUtils.findall(iframe[0], '[\'"\s]src=["\']*\s*([^"\' ]+)\s*["\']*')
if m:
return urlparse.urljoin(urllib.unquote(page), m[0]).strip()
# Alternative 1
iframes = regexUtils.findall(data, "(frame(?![^>]*cbox\.ws)(?![^>]*capacanal)(?![^>]*blacktvlive\.com)[^>]*[\"; ]height:\s*(\d+)[^>]*>)")
if iframes:
for iframe in iframes:
height = int(iframe[1])
if height > minheight:
m = regexUtils.findall(iframe[0], "[\"; ]width:\s*(\d+)")
if m:
width = int(m[0])
if width > minwidth:
m = regexUtils.findall(iframe[0], '[\"; ]src=["\']*\s*([^"\' ]+)\s*["\']*')
if m:
return urlparse.urljoin(urllib.unquote(page), m[0]).strip()
# Alternative 2 (Frameset)
m = regexUtils.findall(data, '<FRAMESET[^>]+100%[^>]+>\s*<FRAME[^>]+src="([^"]+)"')
if m:
return urlparse.urljoin(urllib.unquote(page), m[0]).strip()
m = regexUtils.findall(data, '<a href="([^"]+)" target="_blank"><img src="[^"]+" height="450" width="600" longdesc="[^"]+"/></a>')
if m:
return urlparse.urljoin(urllib.unquote(page), m[0]).strip()
return None
| gpl-2.0 | 976,519,082,216,761,200 | 31.072727 | 215 | 0.44161 | false | 3.503476 | false | false | false |
GaloisInc/hacrypto | src/C++/Mozilla/old_snapshots/manager/ssl/tests/unit/test_cert_version/generate.py | 1 | 3360 | #!/usr/bin/python
# -*- Mode: python; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 40 -*-
# vim: set filetype=python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import tempfile, os, sys
libpath = os.path.abspath('../psm_common_py')
sys.path.append(libpath)
import CertUtils
srcdir = os.getcwd()
db = tempfile.mkdtemp()
def generate_child_cert(db_dir, dest_dir, noise_file, name, ca_nick,
cert_version, do_bc, is_ee):
return CertUtils.generate_child_cert(db_dir, dest_dir, noise_file, name,
ca_nick, cert_version, do_bc, is_ee, '')
def generate_ee_family(db_dir, dest_dir, noise_file, ca_name):
name = "v1_ee-"+ ca_name;
generate_child_cert(db_dir, dest_dir, noise_file, name, ca_name, 1, False, True)
name = "v1_bc_ee-"+ ca_name;
generate_child_cert(db_dir, dest_dir, noise_file, name, ca_name, 1, True, True)
name = "v2_ee-"+ ca_name;
generate_child_cert(db_dir, dest_dir, noise_file, name, ca_name, 2, False, True)
name = "v2_bc_ee-"+ ca_name;
generate_child_cert(db_dir, dest_dir, noise_file, name, ca_name, 2, True, True)
name = "v3_missing_bc_ee-"+ ca_name;
generate_child_cert(db_dir, dest_dir, noise_file, name, ca_name, 3, False, True)
name = "v3_bc_ee-"+ ca_name;
generate_child_cert(db_dir, dest_dir, noise_file, name, ca_name, 3, True, True)
name = "v4_bc_ee-"+ ca_name;
generate_child_cert(db_dir, dest_dir, noise_file, name, ca_name, 4, True, True)
def generate_intermediates_and_ee_set(db_dir, dest_dir, noise_file, ca_name):
name = "v1_int-" + ca_name;
generate_child_cert(db, srcdir, noise_file, name, ca_name, 1, False, False)
generate_ee_family(db, srcdir, noise_file, name)
name = "v1_int_bc-" + ca_name;
generate_child_cert(db, srcdir, noise_file, name, ca_name, 1, True, False)
generate_ee_family(db, srcdir, noise_file, name)
name = "v2_int-" + ca_name;
generate_child_cert(db, srcdir, noise_file, name, ca_name, 2, False, False)
generate_ee_family(db, srcdir, noise_file, name)
name = "v2_int_bc-" + ca_name;
generate_child_cert(db, srcdir, noise_file, name, ca_name, 2, True, False)
generate_ee_family(db, srcdir, noise_file, name)
name = "v3_int_missing_bc-" + ca_name;
generate_child_cert(db, srcdir, noise_file, name, ca_name, 3, False, False)
generate_ee_family(db, srcdir, noise_file, name)
name = "v3_int-" + ca_name;
generate_child_cert(db, srcdir, noise_file, name, ca_name, 3, True, False)
generate_ee_family(db, srcdir, noise_file, name)
def generate_ca(db_dir, dest_dir, noise_file, name, version, do_bc):
CertUtils.generate_ca_cert(db_dir, dest_dir, noise_file, name, version, do_bc)
generate_intermediates_and_ee_set(db_dir, dest_dir, noise_file, name)
def generate_certs():
[noise_file, pwd_file] = CertUtils.init_nss_db(db)
generate_ca(db, srcdir, noise_file, "v1_ca", 1, False )
generate_ca(db, srcdir, noise_file, "v1_ca_bc", 1, True)
generate_ca(db, srcdir, noise_file, "v2_ca", 2, False )
generate_ca(db, srcdir, noise_file, "v2_ca_bc", 2, True)
generate_ca(db, srcdir, noise_file, "v3_ca", 3, True )
generate_ca(db, srcdir, noise_file, "v3_ca_missing_bc", 3, False)
generate_certs();
| bsd-3-clause | 5,472,392,672,683,007,000 | 42.636364 | 82 | 0.663095 | false | 2.679426 | false | false | false |
matrix-org/synapse | synapse/storage/databases/main/user_erasure_store.py | 1 | 3536 | # Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Iterable
from synapse.storage._base import SQLBaseStore
from synapse.util.caches.descriptors import cached, cachedList
class UserErasureWorkerStore(SQLBaseStore):
@cached()
async def is_user_erased(self, user_id: str) -> bool:
"""
Check if the given user id has requested erasure
Args:
user_id: full user id to check
Returns:
True if the user has requested erasure
"""
result = await self.db_pool.simple_select_onecol(
table="erased_users",
keyvalues={"user_id": user_id},
retcol="1",
desc="is_user_erased",
)
return bool(result)
@cachedList(cached_method_name="is_user_erased", list_name="user_ids")
async def are_users_erased(self, user_ids: Iterable[str]) -> Dict[str, bool]:
"""
Checks which users in a list have requested erasure
Args:
user_ids: full user ids to check
Returns:
for each user, whether the user has requested erasure.
"""
rows = await self.db_pool.simple_select_many_batch(
table="erased_users",
column="user_id",
iterable=user_ids,
retcols=("user_id",),
desc="are_users_erased",
)
erased_users = {row["user_id"] for row in rows}
return {u: u in erased_users for u in user_ids}
class UserErasureStore(UserErasureWorkerStore):
async def mark_user_erased(self, user_id: str) -> None:
"""Indicate that user_id wishes their message history to be erased.
Args:
user_id: full user_id to be erased
"""
def f(txn):
# first check if they are already in the list
txn.execute("SELECT 1 FROM erased_users WHERE user_id = ?", (user_id,))
if txn.fetchone():
return
# they are not already there: do the insert.
txn.execute("INSERT INTO erased_users (user_id) VALUES (?)", (user_id,))
self._invalidate_cache_and_stream(txn, self.is_user_erased, (user_id,))
await self.db_pool.runInteraction("mark_user_erased", f)
async def mark_user_not_erased(self, user_id: str) -> None:
"""Indicate that user_id is no longer erased.
Args:
user_id: full user_id to be un-erased
"""
def f(txn):
# first check if they are already in the list
txn.execute("SELECT 1 FROM erased_users WHERE user_id = ?", (user_id,))
if not txn.fetchone():
return
# They are there, delete them.
self.db_pool.simple_delete_one_txn(
txn, "erased_users", keyvalues={"user_id": user_id}
)
self._invalidate_cache_and_stream(txn, self.is_user_erased, (user_id,))
await self.db_pool.runInteraction("mark_user_not_erased", f)
| apache-2.0 | 7,605,121,877,914,381,000 | 32.67619 | 84 | 0.604355 | false | 3.881449 | false | false | false |
USC-ICT/gift-integration-demo | GiftDemo/Assets/StreamingAssets/SB/locomotion-ChrBrad-init.py | 1 | 4404 |
print "locomotionInitSkeleton = " + locomotionInitSkeleton
#locomotion smooth cycle
smoothMotion = scene.getMotion("ChrBrad_ChrMarine@RunCircleRt01")
smoothMotion.smoothCycle("ChrBrad_ChrMarine@RunCircleRt01_smooth",0.1);
smoothMotion = scene.getMotion("ChrBrad_ChrMarine@WalkCircleRt01")
smoothMotion.smoothCycle("ChrBrad_ChrMarine@WalkCircleRt01_smooth",0.1);
smoothMotion = scene.getMotion("ChrBrad_ChrMarine@WalkTightCircleRt01")
smoothMotion.smoothCycle("ChrBrad_ChrMarine@WalkTightCircleRt01_smooth",0.1);
smoothMotion = scene.getMotion("ChrBrad_ChrMarine@StrafeFastRt01")
smoothMotion.smoothCycle("ChrBrad_ChrMarine@StrafeFastRt01_smooth",0.1);
smoothMotion = scene.getMotion("ChrBrad_ChrMarine@Meander01")
smoothMotion.smoothCycle("ChrBrad_ChrMarine@Meander01_smooth",0.2);
#locomotion mirror
mirrorMotion = scene.getMotion("ChrBrad_ChrMarine@WalkCircleRt01")
mirrorMotion.mirror("ChrBrad_ChrMarine@WalkCircleLf01", locomotionInitSkeleton)
mirrorMotion = scene.getMotion("ChrBrad_ChrMarine@WalkTightCircleRt01")
mirrorMotion.mirror("ChrBrad_ChrMarine@WalkTightCircleLf01", locomotionInitSkeleton)
mirrorMotion = scene.getMotion("ChrBrad_ChrMarine@StrafeFastRt01")
mirrorMotion.mirror("ChrBrad_ChrMarine@StrafeFastLf01", locomotionInitSkeleton)
mirrorMotion = scene.getMotion("ChrBrad_ChrMarine@StrafeSlowRt01")
mirrorMotion.mirror("ChrBrad_ChrMarine@StrafeSlowLf01", locomotionInitSkeleton)
mirrorMotion = scene.getMotion("ChrBrad_ChrMarine@RunCircleRt01")
mirrorMotion.mirror("ChrBrad_ChrMarine@RunCircleLf01", locomotionInitSkeleton)
mirrorMotion = scene.getMotion("ChrBrad_ChrMarine@RunTightCircleRt01")
mirrorMotion.mirror("ChrBrad_ChrMarine@RunTightCircleLf01", locomotionInitSkeleton)
#mirroring for smooth cycle motion
mirrorMotion = scene.getMotion("ChrBrad_ChrMarine@WalkCircleRt01_smooth")
mirrorMotion.mirror("ChrBrad_ChrMarine@WalkCircleLf01_smooth", locomotionInitSkeleton)
mirrorMotion = scene.getMotion("ChrBrad_ChrMarine@WalkTightCircleRt01_smooth")
mirrorMotion.mirror("ChrBrad_ChrMarine@WalkTightCircleLf01_smooth", locomotionInitSkeleton)
mirrorMotion = scene.getMotion("ChrBrad_ChrMarine@StrafeFastRt01_smooth")
mirrorMotion.mirror("ChrBrad_ChrMarine@StrafeFastLf01_smooth", locomotionInitSkeleton)
mirrorMotion = scene.getMotion("ChrBrad_ChrMarine@RunCircleRt01_smooth")
mirrorMotion.mirror("ChrBrad_ChrMarine@RunCircleLf01_smooth", locomotionInitSkeleton)
#idle turn mirror
mirrorMotion = scene.getMotion("ChrBrad_ChrMarine@Turn90Rt")
mirrorMotion.mirror("ChrBrad_ChrMarine@Turn90Lf", locomotionInitSkeleton)
mirrorMotion = scene.getMotion("ChrBrad_ChrMarine@Turn180Rt01")
mirrorMotion.mirror("ChrBrad_ChrMarine@Turn180Lf01", locomotionInitSkeleton)
mirrorMotion = scene.getMotion("ChrBrad_ChrMarine@Turn360Rt01")
mirrorMotion.mirror("ChrBrad_ChrMarine@Turn360Lf01", locomotionInitSkeleton)
#starting mirror
mirrorMotion = scene.getMotion("ChrBrad_ChrMarine@Idle01_ToWalk01")
mirrorMotion.mirror("ChrBrad_ChrMarine@Idle01_ToWalkLf01", locomotionInitSkeleton)
mirrorMotion = scene.getMotion("ChrBrad_ChrMarine@Idle01_ToWalk01_Turn90Rt01")
mirrorMotion.mirror("ChrBrad_ChrMarine@Idle01_ToWalk01_Turn90Lf01", locomotionInitSkeleton)
mirrorMotion = scene.getMotion("ChrBrad_ChrMarine@Idle01_ToWalk01_Turn180Rt01")
mirrorMotion.mirror("ChrBrad_ChrMarine@Idle01_ToWalk01_Turn180Lf01", locomotionInitSkeleton)
#step mirror
mirrorMotion = scene.getMotion("ChrBrad_ChrMarine@Idle01_StepBackwardsRt01")
mirrorMotion.mirror("ChrBrad_ChrMarine@Idle01_StepBackwardsLf01", locomotionInitSkeleton)
mirrorMotion = scene.getMotion("ChrBrad_ChrMarine@Idle01_StepForwardRt01")
mirrorMotion.mirror("ChrBrad_ChrMarine@Idle01_StepForwardLf01", locomotionInitSkeleton)
mirrorMotion = scene.getMotion("ChrBrad_ChrMarine@Idle01_StepSidewaysRt01")
mirrorMotion.mirror("ChrBrad_ChrMarine@Idle01_StepSidewaysLf01", locomotionInitSkeleton)
# locomotion main state
scene.run("locomotion-ChrBrad-state-Locomotion.py")
# starting state, starting locomotion with different angle
scene.run("locomotion-ChrBrad-state-StartingLeft.py")
scene.run("locomotion-ChrBrad-state-StartingRight.py")
# idle turn state, facing adjusting
scene.run("locomotion-ChrBrad-state-IdleTurn.py")
# step state, stepping adjusting
scene.run("locomotion-ChrBrad-state-Step.py")
# transitions
scene.run("locomotion-ChrBrad-transitions.py")
| bsd-3-clause | 4,720,898,787,848,917,000 | 53.746835 | 92 | 0.828338 | false | 2.59364 | false | false | false |
dbmi-pitt/DIKB-Micropublication | scripts/mp-scripts/Bio/LogisticRegression.py | 1 | 4336 |
"""
This module provides code for doing logistic regressions.
Classes:
LogisticRegression Holds information for a LogisticRegression classifier.
Functions:
train Train a new classifier.
calculate Calculate the probabilities of each class, given an observation.
classify Classify an observation into a class.
"""
try:
from Numeric import *
from LinearAlgebra import * # inverse
except ImportError, x:
raise ImportError, "This module requires Numeric (precursor to NumPy) with the LinearAlgebra lib"
from Bio import listfns
class LogisticRegression:
"""Holds information necessary to do logistic regression
classification.
Members:
beta List of the weights for each dimension.
"""
def __init__(self):
"""LogisticRegression()"""
beta = []
def train(xs, ys, update_fn=None, typecode=None):
"""train(xs, ys[, update_fn]) -> LogisticRegression
Train a logistic regression classifier on a training set. xs is a
list of observations and ys is a list of the class assignments,
which should be 0 or 1. xs and ys should contain the same number
of elements. update_fn is an optional callback function that
takes as parameters that iteration number and log likelihood.
"""
if len(xs) != len(ys):
raise ValueError, "xs and ys should be the same length."
if not xs or not xs[0]:
raise ValueError, "No observations or observation of 0 dimension."
classes = listfns.items(ys)
classes.sort()
if classes != [0, 1]:
raise ValueError, "Classes should be 0's and 1's"
if typecode is None:
typecode = Float
# Dimensionality of the data is the dimensionality of the
# observations plus a constant dimension.
N, ndims = len(xs), len(xs[0]) + 1
# Make an X array, with a constant first dimension.
X = ones((N, ndims), typecode)
X[:, 1:] = xs
Xt = transpose(X)
y = asarray(ys, typecode)
# Initialize the beta parameter to 0.
beta = zeros(ndims, typecode)
MAX_ITERATIONS = 500
CONVERGE_THRESHOLD = 0.01
stepsize = 1.0
# Now iterate using Newton-Raphson until the log-likelihoods
# converge.
iter = 0
old_beta = old_llik = None
while iter < MAX_ITERATIONS:
# Calculate the probabilities. p = e^(beta X) / (1+e^(beta X))
ebetaX = exp(dot(beta, Xt))
p = ebetaX / (1+ebetaX)
# Find the log likelihood score and see if I've converged.
logp = y*log(p) + (1-y)*log(1-p)
llik = sum(logp)
if update_fn is not None:
update_fn(iter, llik)
# Check to see if the likelihood decreased. If it did, then
# restore the old beta parameters and half the step size.
if llik < old_llik:
stepsize = stepsize / 2.0
beta = old_beta
# If I've converged, then stop.
if old_llik is not None and fabs(llik-old_llik) <= CONVERGE_THRESHOLD:
break
old_llik, old_beta = llik, beta
iter += 1
W = identity(N) * p
Xtyp = dot(Xt, y-p) # Calculate the first derivative.
XtWX = dot(dot(Xt, W), X) # Calculate the second derivative.
#u, s, vt = singular_value_decomposition(XtWX)
#print "U", u
#print "S", s
delta = dot(inverse(XtWX), Xtyp)
if fabs(stepsize-1.0) > 0.001:
delta = delta * stepsize
beta = beta + delta # Update beta.
else:
raise AssertionError, "Didn't converge."
lr = LogisticRegression()
lr.beta = map(float, beta) # Convert back to regular array.
return lr
def calculate(lr, x):
"""calculate(lr, x) -> list of probabilities
Calculate the probability for each class. lr is a
LogisticRegression object. x is the observed data. Returns a
list of the probability that it fits each class.
"""
# Insert a constant term for x.
x = asarray([1.0] + x)
# Calculate the probability. p = e^(beta X) / (1+e^(beta X))
ebetaX = exp(dot(lr.beta, x))
p = ebetaX / (1+ebetaX)
return [1-p, p]
def classify(lr, x):
"""classify(lr, x) -> 1 or 0
Classify an observation into a class.
"""
probs = calculate(lr, x)
if probs[0] > probs[1]:
return 0
return 1
| apache-2.0 | 5,607,639,300,238,205,000 | 30.194245 | 101 | 0.617159 | false | 3.699659 | false | false | false |
Eksmo/calibre | src/calibre/gui2/convert/txt_output_ui.py | 1 | 5427 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/gugu/w/calibre/src/calibre/gui2/convert/txt_output.ui'
#
# Created: Thu Jul 19 23:32:30 2012
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(392, 346)
self.verticalLayout_2 = QtGui.QVBoxLayout(Form)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.groupBox = QtGui.QGroupBox(Form)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout = QtGui.QGridLayout(self.groupBox)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label_3 = QtGui.QLabel(self.groupBox)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout.addWidget(self.label_3, 0, 0, 1, 1)
self.opt_txt_output_encoding = EncodingComboBox(self.groupBox)
self.opt_txt_output_encoding.setEditable(True)
self.opt_txt_output_encoding.setObjectName(_fromUtf8("opt_txt_output_encoding"))
self.gridLayout.addWidget(self.opt_txt_output_encoding, 0, 1, 1, 1)
self.label = QtGui.QLabel(self.groupBox)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 1, 0, 1, 1)
self.opt_newline = QtGui.QComboBox(self.groupBox)
self.opt_newline.setObjectName(_fromUtf8("opt_newline"))
self.gridLayout.addWidget(self.opt_newline, 1, 1, 1, 1)
self.label_4 = QtGui.QLabel(self.groupBox)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout.addWidget(self.label_4, 2, 0, 1, 1)
self.opt_txt_output_formatting = QtGui.QComboBox(self.groupBox)
self.opt_txt_output_formatting.setObjectName(_fromUtf8("opt_txt_output_formatting"))
self.gridLayout.addWidget(self.opt_txt_output_formatting, 2, 1, 1, 1)
self.verticalLayout_2.addWidget(self.groupBox)
self.groupBox_2 = QtGui.QGroupBox(Form)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.gridLayout_2 = QtGui.QGridLayout(self.groupBox_2)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.label_2 = QtGui.QLabel(self.groupBox_2)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout_2.addWidget(self.label_2, 1, 0, 1, 1)
self.opt_max_line_length = QtGui.QSpinBox(self.groupBox_2)
self.opt_max_line_length.setObjectName(_fromUtf8("opt_max_line_length"))
self.gridLayout_2.addWidget(self.opt_max_line_length, 1, 1, 1, 1)
self.opt_force_max_line_length = QtGui.QCheckBox(self.groupBox_2)
self.opt_force_max_line_length.setObjectName(_fromUtf8("opt_force_max_line_length"))
self.gridLayout_2.addWidget(self.opt_force_max_line_length, 2, 0, 1, 2)
self.opt_inline_toc = QtGui.QCheckBox(self.groupBox_2)
self.opt_inline_toc.setObjectName(_fromUtf8("opt_inline_toc"))
self.gridLayout_2.addWidget(self.opt_inline_toc, 0, 0, 1, 1)
self.verticalLayout_2.addWidget(self.groupBox_2)
self.groupBox_3 = QtGui.QGroupBox(Form)
self.groupBox_3.setObjectName(_fromUtf8("groupBox_3"))
self.verticalLayout = QtGui.QVBoxLayout(self.groupBox_3)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.opt_keep_links = QtGui.QCheckBox(self.groupBox_3)
self.opt_keep_links.setObjectName(_fromUtf8("opt_keep_links"))
self.verticalLayout.addWidget(self.opt_keep_links)
self.opt_keep_image_references = QtGui.QCheckBox(self.groupBox_3)
self.opt_keep_image_references.setObjectName(_fromUtf8("opt_keep_image_references"))
self.verticalLayout.addWidget(self.opt_keep_image_references)
self.opt_keep_color = QtGui.QCheckBox(self.groupBox_3)
self.opt_keep_color.setObjectName(_fromUtf8("opt_keep_color"))
self.verticalLayout.addWidget(self.opt_keep_color)
self.verticalLayout_2.addWidget(self.groupBox_3)
self.label_3.setBuddy(self.opt_txt_output_encoding)
self.label.setBuddy(self.opt_newline)
self.label_4.setBuddy(self.opt_txt_output_formatting)
self.label_2.setBuddy(self.opt_max_line_length)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_("Form"))
self.groupBox.setTitle(_("General"))
self.label_3.setText(_("Output &Encoding:"))
self.label.setText(_("&Line ending style:"))
self.label_4.setText(_("&Formatting:"))
self.groupBox_2.setTitle(_("Plain"))
self.label_2.setText(_("&Maximum line length:"))
self.opt_force_max_line_length.setText(_("Force maximum line length"))
self.opt_inline_toc.setText(_("&Inline TOC"))
self.groupBox_3.setTitle(_("Markdown, Textile"))
self.opt_keep_links.setText(_("Do not remove links (<a> tags) before processing"))
self.opt_keep_image_references.setText(_("Do not remove image references before processing"))
self.opt_keep_color.setText(_("Keep text color, when possible"))
from calibre.gui2.widgets import EncodingComboBox
| gpl-3.0 | 8,035,240,253,747,844,000 | 52.732673 | 114 | 0.682329 | false | 3.526316 | false | false | false |
FuegoFro/KeepTalkingBot | src/modules/morse_code_solution.py | 1 | 5695 | from enum import Enum
from typing import (
List,
Optional,
)
_Signals = Enum("Signals", ["DOT", "DASH"])
_Pauses = Enum("Pauses", ["SIGNAL", "LETTER", "WORD"])
_ON_TIMINGS = {
0.25: _Signals.DOT,
0.75: _Signals.DASH,
}
_OFF_TIMINGS = {
0.25: _Pauses.SIGNAL,
1.25: _Pauses.LETTER,
2.50: _Pauses.WORD,
}
_ORDERED_WORDS = (
'shell',
'halls',
'slick',
'trick',
'boxes',
'leaks',
'strobe',
'bistro',
'flick',
'bombs',
'break',
'brick',
'steak',
'sting',
'vector',
'beats',
)
def _make_letters():
# This function is here so that we can expose the _Signals variables with nice names, which makes the dictionary
# below much nicer. This should only be called once, to initialize the _LETTERS variable.
dot = _Signals.DOT
dash = _Signals.DASH
return {
(dot, dash): 'a',
(dash, dot, dot, dot): 'b',
(dash, dot, dash, dot): 'c',
(dash, dot, dot): 'd',
(dot,): 'e',
(dot, dot, dash, dot): 'f',
(dash, dash, dot): 'g',
(dot, dot, dot, dot): 'h',
(dot, dot): 'i',
(dot, dash, dash, dash): 'j',
(dash, dot, dash): 'k',
(dot, dash, dot, dot): 'l',
(dash, dash): 'm',
(dash, dot): 'n',
(dash, dash, dash): 'o',
(dot, dash, dash, dot): 'p',
(dash, dash, dot, dash): 'q',
(dot, dash, dot): 'r',
(dot, dot, dot): 's',
(dash,): 't',
(dot, dot, dash): 'u',
(dot, dot, dot, dash): 'v',
(dot, dash, dash): 'w',
(dash, dot, dot, dash): 'x',
(dash, dot, dash, dash): 'y',
(dash, dash, dot, dot): 'z',
}
_LETTERS = _make_letters()
def _get_closest_time_entry(seconds, timing_dict):
distances = [(abs(seconds - reference_duration), pause_type)
for reference_duration, pause_type in timing_dict.iteritems()]
distances = sorted(distances, key=lambda x: x[0])
return distances[0][1]
def _signals_to_letter(signals):
return _LETTERS[tuple(signals)]
class MorseCodeState(object):
def __init__(self):
super(MorseCodeState, self).__init__()
self.word_start_index = None
# Assuming words are 5 letters long
self.letters = [None] * 5
self.next_letter_index = 0
self.current_partial_letter = None # type: Optional[List[_Signals]]
def ingest_timing(self, seconds, is_on):
"""It is invalid to call this once is_word_known returns True"""
if is_on:
if self.current_partial_letter is None:
return
signal = _get_closest_time_entry(seconds, _ON_TIMINGS)
self.current_partial_letter.append(signal)
else:
pause_type = _get_closest_time_entry(seconds, _OFF_TIMINGS)
if pause_type == _Pauses.SIGNAL:
return
# Handle letter or word gap. Both do the letter behavior.
if self.current_partial_letter is not None:
letter = _signals_to_letter(self.current_partial_letter)
print "ADDING LETTER:", letter
self.letters[self._get_next_letter_index()] = letter
# Assume we'll never wrap around, since we should know what the word is by then.
self.next_letter_index += 1
self.current_partial_letter = []
if pause_type == _Pauses.WORD:
# It's possible this is the last thing we see, in which case we'll need to make sure it's within
# the range of the array.
self.word_start_index = self._get_next_letter_index()
def _get_next_letter_index(self):
return self.next_letter_index % len(self.letters)
def _get_word_if_possible(self):
# This function tries to find the word given a subset of the total possible information
if self.next_letter_index == 0:
# We have no information yet, so we can't know the word yet.
return None
def find_single_matching_word(predicate):
# This helper function will check to see if exactly one word matches the given predicate. If so, it will
# return that word, otherwise it'll return None.
possible_word = None
for word in _ORDERED_WORDS:
if predicate(word):
if possible_word is not None:
# Multiple possibilities, we don't know what word it is
return None
possible_word = word
return possible_word
if self.word_start_index is None:
# No start index, so we have to look inside every word
partial_word = "".join(self.letters[:self._get_next_letter_index()])
return find_single_matching_word(lambda word: partial_word in word)
else:
# We have a start index, can check beginnings and ends of words
end = "".join(self.letters[:self.word_start_index])
start = "".join(self.letters[self.word_start_index:self._get_next_letter_index()])
return find_single_matching_word(lambda word: word.startswith(start) and word.endswith(end))
def is_word_known(self):
word = self._get_word_if_possible()
if word is None and self.next_letter_index >= 2 * len(self.letters):
assert False, "Can't find word, but got all letters twice: {}".format(self.letters)
return word is not None
def get_num_time_to_press_right_arrow(self):
word = self._get_word_if_possible()
assert word is not None
return _ORDERED_WORDS.index(word)
| mit | -9,062,401,739,373,426,000 | 33.101796 | 116 | 0.561721 | false | 3.643634 | false | false | false |
marklescroart/bvp | bvp/BlendScripts/blendermada_download.py | 1 | 1737 | # Download all materials from blendermada online material library (www.blendermada.com)
# First install blendermada plugin (v 0.9.8-b, downloaded 2016/10/04)
# Before calling: Open a blank .blend file
import os
import bpy
import bvp
scn = bpy.context.scene
if bpy.app.version < (2, 80, 0):
vl = context.view_layer
else:
vl = scn
# Get all latest materials
scn.render.engine = 'CYCLES'
bpy.ops.bmd.update()
# Clear scene
for o in scn.objects:
scn.objects.unlink(o)
# Add single cube
bpy.ops.mesh.primitive_cube_add(location=(0,0,0))
ob = bpy.context.object
# Loop over categories (e.g. Cloth, Nature, etc)
for cat_idx in range(len(scn.bmd_category_list)):
scn.bmd_category_list_idx = cat_idx
cat_name = scn.bmd_category_list[cat_idx].name
print("Importing {} materials...".format(cat_name.lower()))
# Loop over specific materials
for mat_idx in range(len(scn.bmd_material_list)):
scn.bmd_material_list_idx = mat_idx
mat_name = scn.bmd_material_list[mat_idx].name
# Import material
bpy.ops.bmd.importmat()
vl.update()
mat = ob.material_slots[0].material
# Incorporate category into name, set up fake user to keep material through close of file
mat.name = '{}_{}'.format(cat_name.lower(), mat_name.lower())
mat.use_fake_user = True
# Clear scene
for o in scn.objects:
scn.objects.unlink(o)
sfile = os.path.expanduser(bvp.config.get('path','db_dir'))
fname = 'Blendermada_Materials.blend'
sfile = os.path.join(sfile, 'Material', fname)
bpy.ops.wm.save_mainfile(filepath=sfile)
dbi = bvp.DBInterface()
for mat in bpy.data.materials:
m = bvp.Material(name=mat.name, fname=fname, _id=dbi.get_uuid(), dbi=dbi)
m.save() | bsd-2-clause | -2,297,418,442,597,044,500 | 28.965517 | 97 | 0.682211 | false | 2.899833 | false | false | false |
MarcoVogt/basil | examples/lx9/host/lx9.py | 1 | 5704 | #
# ------------------------------------------------------------
# Copyright (c) All rights reserved
# SiLab, Institute of Physics, University of Bonn
# ------------------------------------------------------------
#
import yaml
import time
import numpy as np
from bitarray import bitarray
from basil.dut import Dut
class Pixel(Dut):
"""
A class for communicating with a pixel chip.
"""
def program_global_reg(self):
"""
Send the global register to the chip.
Loads the values of self['GLOBAL_REG'] onto the chip.
Includes enabling the clock, and loading the Control (CTR)
and DAC shadow registers.
"""
self._clear_strobes()
gr_size = len(self['GLOBAL_REG'][:]) #get the size
self['SEQ']['SHIFT_IN'][0:gr_size] = self['GLOBAL_REG'][:] # this will be shifted out
self['SEQ']['GLOBAL_SHIFT_EN'][0:gr_size] = bitarray( gr_size * '1') #this is to enable clock
self['SEQ']['GLOBAL_CTR_LD'][gr_size+1:gr_size+2] = bitarray("1") # load signals
self['SEQ']['GLOBAL_DAC_LD'][gr_size+1:gr_size+2] = bitarray("1")
# Execute the program (write bits to output pins)
# + 1 extra 0 bit so that everything ends on LOW instead of HIGH
self._run_seq(gr_size+3)
def program_pixel_reg(self, enable_receiver=True):
"""
Send the pixel register to the chip and store the output.
Loads the values of self['PIXEL_REG'] onto the chip.
Includes enabling the clock, and loading the Control (CTR)
and DAC shadow registers.
if(enable_receiver), stores the output (by byte) in
self['DATA'], retrievable via `chip['DATA'].get_data()`.
"""
self._clear_strobes()
#enable receiver it work only if pixel register is enabled/clocked
self['PIXEL_RX'].set_en(enable_receiver)
px_size = len(self['PIXEL_REG'][:]) #get the size
self['SEQ']['SHIFT_IN'][0:px_size] = self['PIXEL_REG'][:] # this will be shifted out
self['SEQ']['PIXEL_SHIFT_EN'][0:px_size] = bitarray( px_size * '1') #this is to enable clock
print 'px_size', px_size
self._run_seq(px_size+1) #add 1 bit more so there is 0 at the end other way will stay high
def _run_seq(self, size):
"""
Send the contents of self['SEQ'] to the chip and wait until it finishes.
"""
# Write the sequence to the sequence generator (hw driver)
self['SEQ'].write(size) #write pattern to memory
self['SEQ'].set_size(size) # set size
self['SEQ'].set_repeat(1) # set repeat
for _ in range(1):
self['SEQ'].start() # start
while not self['SEQ'].get_done():
#time.sleep(0.1)
print "Wait for done..."
def _clear_strobes(self):
"""
Resets the "enable" and "load" output streams to all 0.
"""
#reset some stuff
self['SEQ']['GLOBAL_SHIFT_EN'].setall(False)
self['SEQ']['GLOBAL_CTR_LD'].setall(False)
self['SEQ']['GLOBAL_DAC_LD'].setall(False)
self['SEQ']['PIXEL_SHIFT_EN'].setall(False)
self['SEQ']['INJECTION'].setall(False)
print "Start"
stream = open("lx9.yaml", 'r')
cnfg = yaml.load(stream)
chip = Pixel(cnfg)
chip.init()
chip['GPIO']['LED1'] = 1
chip['GPIO']['LED2'] = 0
chip['GPIO']['LED3'] = 0
chip['GPIO']['LED4'] = 0
chip['GPIO'].write()
#settings for global register (to input into global SR)
# can be an integer representing the binary number desired,
# or a bitarray (of the form bitarray("10101100")).
chip['GLOBAL_REG']['global_readout_enable'] = 0# size = 1 bit
chip['GLOBAL_REG']['SRDO_load'] = 0# size = 1 bit
chip['GLOBAL_REG']['NCout2'] = 0# size = 1 bit
chip['GLOBAL_REG']['count_hits_not'] = 0# size = 1
chip['GLOBAL_REG']['count_enable'] = 0# size = 1
chip['GLOBAL_REG']['count_clear_not'] = 0# size = 1
chip['GLOBAL_REG']['S0'] = 0# size = 1
chip['GLOBAL_REG']['S1'] = 0# size = 1
chip['GLOBAL_REG']['config_mode'] = 3# size = 2
chip['GLOBAL_REG']['LD_IN0_7'] = 0# size = 8
chip['GLOBAL_REG']['LDENABLE_SEL'] = 0# size = 1
chip['GLOBAL_REG']['SRCLR_SEL'] = 0# size = 1
chip['GLOBAL_REG']['HITLD_IN'] = 0# size = 1
chip['GLOBAL_REG']['NCout21_25'] = 0# size = 5
chip['GLOBAL_REG']['column_address'] = 0# size = 6
chip['GLOBAL_REG']['DisVbn'] = 0# size = 8
chip['GLOBAL_REG']['VbpThStep'] = 0# size = 8
chip['GLOBAL_REG']['PrmpVbp'] = 0# size = 8
chip['GLOBAL_REG']['PrmpVbnFol'] = 0# size = 8
chip['GLOBAL_REG']['vth'] = 0# size = 8
chip['GLOBAL_REG']['PrmpVbf'] = 0# size = 8
print "program global register..."
chip.program_global_reg()
#settings for pixel register (to input into pixel SR)
# can be an integer representing the binary number desired,
# or a bitarray (of the form bitarray("10101100")).
chip['PIXEL_REG'][:] = bitarray('1111111010001100'*8)
print chip['PIXEL_REG']
#chip['PIXEL_REG'][0] = 0
print "program pixel register..."
chip.program_pixel_reg()
time.sleep(0.5)
# Get output size in bytes
print "chip['DATA'].get_FIFO_SIZE() = ", chip['DATA'].get_FIFO_SIZE()
rxd = chip['DATA'].get_data() #get data from sram fifo
print rxd
data0 = rxd.astype(np.uint8) # Change type to unsigned int 8 bits and take from rxd only the last 8 bits
data1 = np.right_shift(rxd, 8).astype(np.uint8) # Rightshift rxd 8 bits and take again last 8 bits
data = np.reshape(np.vstack((data1, data0)), -1, order='F') # data is now a 1 dimensional array of all bytes read from the FIFO
bdata = np.unpackbits(data)
print "data = ", data
print "bdata = ", bdata
| bsd-3-clause | -7,351,077,881,951,031,000 | 32.162791 | 127 | 0.588534 | false | 3.331776 | false | false | false |
combusbvba/trash | plugins/veurne_trash/models.py | 1 | 1851 | # -*- coding: utf-8 -*-
# Copyright 2017 Mobicage NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.2@@
from google.appengine.ext import ndb
from plugins.veurne_trash import plugin_consts
class UserLocation(ndb.Model):
service_identity = ndb.StringProperty()
address = ndb.StringProperty(indexed=False)
street_number = ndb.IntegerProperty(indexed=False)
house_number = ndb.IntegerProperty(indexed=False)
house_bus = ndb.StringProperty(indexed=False)
notifications = ndb.IntegerProperty(indexed=False, repeated=True)
user_data_epoch = ndb.IntegerProperty(indexed=False)
next_collection = ndb.IntegerProperty()
@property
def sik(self):
return self.key.namespace().split("-")[1].decode('utf8')
@property
def email(self):
return self.key.id().split(":")[0].decode('utf8')
@property
def app_id(self):
return self.key.id().split(":")[1].decode('utf8')
@classmethod
def create_key(cls, sik, email, app_id):
return ndb.Key(cls, "%s:%s" % (email, app_id), namespace=UserLocation.create_namespace(sik))
@staticmethod
def get_by_info(sik, email, app_id):
return UserLocation.create_key(sik, email, app_id).get()
@staticmethod
def create_namespace(sik):
return "%s-%s" % (plugin_consts.NAMESPACE, sik)
| apache-2.0 | -9,151,640,832,475,444,000 | 32.654545 | 100 | 0.6953 | false | 3.650888 | false | false | false |
yanheven/keystone | keystone/assignment/role_backends/ldap.py | 9 | 4055 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from oslo_config import cfg
from oslo_log import log
from keystone import assignment
from keystone.common import ldap as common_ldap
from keystone.common import models
from keystone import exception
from keystone.i18n import _
from keystone.identity.backends import ldap as ldap_identity
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class Role(assignment.RoleDriver):
def __init__(self):
super(Role, self).__init__()
self.LDAP_URL = CONF.ldap.url
self.LDAP_USER = CONF.ldap.user
self.LDAP_PASSWORD = CONF.ldap.password
self.suffix = CONF.ldap.suffix
# This is the only deep dependency from resource back
# to identity. The assumption is that if you are using
# LDAP for resource, you are using it for identity as well.
self.user = ldap_identity.UserApi(CONF)
self.role = RoleApi(CONF, self.user)
def get_role(self, role_id):
return self.role.get(role_id)
def list_roles(self, hints):
return self.role.get_all()
def list_roles_from_ids(self, ids):
return [self.get_role(id) for id in ids]
def create_role(self, role_id, role):
self.role.check_allow_create()
try:
self.get_role(role_id)
except exception.NotFound:
pass
else:
msg = _('Duplicate ID, %s.') % role_id
raise exception.Conflict(type='role', details=msg)
try:
self.role.get_by_name(role['name'])
except exception.NotFound:
pass
else:
msg = _('Duplicate name, %s.') % role['name']
raise exception.Conflict(type='role', details=msg)
return self.role.create(role)
def delete_role(self, role_id):
self.role.check_allow_delete()
return self.role.delete(role_id)
def update_role(self, role_id, role):
self.role.check_allow_update()
self.get_role(role_id)
return self.role.update(role_id, role)
# NOTE(heny-nash): A mixin class to enable the sharing of the LDAP structure
# between here and the assignment LDAP.
class RoleLdapStructureMixin(object):
DEFAULT_OU = 'ou=Roles'
DEFAULT_STRUCTURAL_CLASSES = []
DEFAULT_OBJECTCLASS = 'organizationalRole'
DEFAULT_MEMBER_ATTRIBUTE = 'roleOccupant'
NotFound = exception.RoleNotFound
options_name = 'role'
attribute_options_names = {'name': 'name'}
immutable_attrs = ['id']
model = models.Role
# TODO(termie): turn this into a data object and move logic to driver
class RoleApi(RoleLdapStructureMixin, common_ldap.BaseLdap):
def __init__(self, conf, user_api):
super(RoleApi, self).__init__(conf)
self._user_api = user_api
def get(self, role_id, role_filter=None):
model = super(RoleApi, self).get(role_id, role_filter)
return model
def create(self, values):
return super(RoleApi, self).create(values)
def update(self, role_id, role):
new_name = role.get('name')
if new_name is not None:
try:
old_role = self.get_by_name(new_name)
if old_role['id'] != role_id:
raise exception.Conflict(
_('Cannot duplicate name %s') % old_role)
except exception.NotFound:
pass
return super(RoleApi, self).update(role_id, role)
def delete(self, role_id):
super(RoleApi, self).delete(role_id)
| apache-2.0 | -7,173,758,972,176,994,000 | 31.44 | 76 | 0.639457 | false | 3.821866 | false | false | false |
dhermes/huk-a-buk | deck.py | 1 | 5631 | import random
CARD_VALUES = {
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
7: 7,
8: 8,
9: 9,
10: 10,
'J': 11,
'Q': 12,
'K': 13,
'A': 14,
}
CARD_SUITS = {
'H': u'\u2665',
'S': u'\u2660',
'C': u'\u2663',
'D': u'\u2666',
}
CARD_SERIALIZE = {
# Tuples of (from_original_hand, suit, value)
(True, 'H', 2): chr(0),
(True, 'H', 3): chr(1),
(True, 'H', 4): chr(2),
(True, 'H', 5): chr(3),
(True, 'H', 6): chr(4),
(True, 'H', 7): chr(5),
(True, 'H', 8): chr(6),
(True, 'H', 9): chr(7),
(True, 'H', 10): chr(8),
(True, 'H', 'J'): chr(9),
(True, 'H', 'Q'): chr(10),
(True, 'H', 'K'): chr(11),
(True, 'H', 'A'): chr(12),
(True, 'S', 2): chr(13),
(True, 'S', 3): chr(14),
(True, 'S', 4): chr(15),
(True, 'S', 5): chr(16),
(True, 'S', 6): chr(17),
(True, 'S', 7): chr(18),
(True, 'S', 8): chr(19),
(True, 'S', 9): chr(20),
(True, 'S', 10): chr(21),
(True, 'S', 'J'): chr(22),
(True, 'S', 'Q'): chr(23),
(True, 'S', 'K'): chr(24),
(True, 'S', 'A'): chr(25),
(True, 'C', 2): chr(26),
(True, 'C', 3): chr(27),
(True, 'C', 4): chr(28),
(True, 'C', 5): chr(29),
(True, 'C', 6): chr(30),
(True, 'C', 7): chr(31),
(True, 'C', 8): chr(32),
(True, 'C', 9): chr(33),
(True, 'C', 10): chr(34),
(True, 'C', 'J'): chr(35),
(True, 'C', 'Q'): chr(36),
(True, 'C', 'K'): chr(37),
(True, 'C', 'A'): chr(38),
(True, 'D', 2): chr(39),
(True, 'D', 3): chr(40),
(True, 'D', 4): chr(41),
(True, 'D', 5): chr(42),
(True, 'D', 6): chr(43),
(True, 'D', 7): chr(44),
(True, 'D', 8): chr(45),
(True, 'D', 9): chr(46),
(True, 'D', 10): chr(47),
(True, 'D', 'J'): chr(48),
(True, 'D', 'Q'): chr(49),
(True, 'D', 'K'): chr(50),
(True, 'D', 'A'): chr(51),
(False, 'H', 2): chr(52),
(False, 'H', 3): chr(53),
(False, 'H', 4): chr(54),
(False, 'H', 5): chr(55),
(False, 'H', 6): chr(56),
(False, 'H', 7): chr(57),
(False, 'H', 8): chr(58),
(False, 'H', 9): chr(59),
(False, 'H', 10): chr(60),
(False, 'H', 'J'): chr(61),
(False, 'H', 'Q'): chr(62),
(False, 'H', 'K'): chr(63),
(False, 'H', 'A'): chr(64),
(False, 'S', 2): chr(65),
(False, 'S', 3): chr(66),
(False, 'S', 4): chr(67),
(False, 'S', 5): chr(68),
(False, 'S', 6): chr(69),
(False, 'S', 7): chr(70),
(False, 'S', 8): chr(71),
(False, 'S', 9): chr(72),
(False, 'S', 10): chr(73),
(False, 'S', 'J'): chr(74),
(False, 'S', 'Q'): chr(75),
(False, 'S', 'K'): chr(76),
(False, 'S', 'A'): chr(77),
(False, 'C', 2): chr(78),
(False, 'C', 3): chr(79),
(False, 'C', 4): chr(80),
(False, 'C', 5): chr(81),
(False, 'C', 6): chr(82),
(False, 'C', 7): chr(83),
(False, 'C', 8): chr(84),
(False, 'C', 9): chr(85),
(False, 'C', 10): chr(86),
(False, 'C', 'J'): chr(87),
(False, 'C', 'Q'): chr(88),
(False, 'C', 'K'): chr(89),
(False, 'C', 'A'): chr(90),
(False, 'D', 2): chr(91),
(False, 'D', 3): chr(92),
(False, 'D', 4): chr(93),
(False, 'D', 5): chr(94),
(False, 'D', 6): chr(95),
(False, 'D', 7): chr(96),
(False, 'D', 8): chr(97),
(False, 'D', 9): chr(98),
(False, 'D', 10): chr(99),
(False, 'D', 'J'): chr(100),
(False, 'D', 'Q'): chr(101),
(False, 'D', 'K'): chr(102),
(False, 'D', 'A'): chr(103),
}
CARD_DESERIALIZE = {val: key for key, val in CARD_SERIALIZE.items()}
class Card(object):
def __init__(self, suit, value, from_original_hand=True):
self.suit = suit
self.value = value
self.from_original_hand = from_original_hand
self._validate()
def _validate(self):
if self.value not in CARD_VALUES:
raise ValueError('Bad card value', self.value)
if self.suit not in CARD_SUITS:
raise ValueError('Bad card suit', self.suit)
@property
def pretty(self):
return u'%2s%s' % (self.value, CARD_SUITS[self.suit])
def is_better(self, other_card, trump, lead_suit):
if self.suit == other_card.suit:
return CARD_VALUES[self.value] > CARD_VALUES[other_card.value]
# If the suits are different, then at most 1 is trump and at
# most 1 is the lead suit.
if self.suit == trump:
return True
elif other_card.suit == trump:
return False
if self.suit == lead_suit:
return True
elif other_card.suit == lead_suit:
return False
# If neither card is one of the relevant suits, their comparison
# is irrelevant, but `self` is certainly not `is_better`.
return False
def serialize(self):
return CARD_SERIALIZE[(self.from_original_hand, self.suit, self.value)]
@classmethod
def deserialize(cls, char):
from_original_hand, suit, value = CARD_DESERIALIZE[char]
return cls(suit, value, from_original_hand=from_original_hand)
class Deck(object):
def __init__(self):
self.current_index = 0
self.cards = []
for value in CARD_VALUES.keys():
for suit in CARD_SUITS.keys():
new_card = Card(suit, value)
self.cards.append(new_card)
def shuffle(self):
random.shuffle(self.cards)
self.current_index = 0
def draw_card(self):
result = self.cards[self.current_index]
self.current_index += 1
return result
def random_deck():
deck = Deck()
deck.shuffle()
return deck
| apache-2.0 | 2,052,106,489,696,769,500 | 26.468293 | 79 | 0.463683 | false | 2.620289 | false | false | false |
rpedroso/swftools | spec/transpstack.py | 10 | 1481 | #!/usr/bin/python
import sys
sys.path += ["../scripts/"]
import pdf
# a test for transparency groups:
# form xobjects used for doing transparency groups can do savestate (q)
# without ever needing to do a corresponding restorestate (Q) because
# their content stream is self-contained.
#
# Test that this doesn't confuse the pdf reader.
file = pdf.PDF()
page = file.add_page(612,100)
group1 = file.create_object("/XObject", "/Form")
group1.stream = """
0.0 1.0 0.0 rg
0.0 0.0 0.0 RG
10 10 m 70 10 l 70 70 l 10 70 l 10 10 l f
10 10 m 70 10 l 70 70 l 10 70 l 10 10 l s
0.0 0.0 1.0 rg
0.0 0.0 0.0 RG
30 30 m 90 30 l 90 90 l 30 90 l 30 30 l f
30 30 m 90 30 l 90 90 l 30 90 l 30 30 l s
1.0 0 0 1.0 1000 1000 cm q
1.0 0 0 1.0 1000 1000 cm q
1.0 0 0 1.0 1000 1000 cm q
1.0 0 0 1.0 1000 1000 cm q
"""
isolated = "true"
knockout = "true"
group1["/Group"] = pdf.PDFDict({"/S": "/Transparency", "/CS": "/DeviceRGB", "/I": isolated, "/K": knockout})
group1["/BBox"] = pdf.PDFArray([0, 0, 100, 100])
gs = file.create_object("/ExtGState")
gs["/BM"] = "/Normal"
gs["/CA"] = "1.0" # stroke alpha
gs["/ca"] = "1.0" # fill alpha
resources = file.create_object("/Resources")
resources["/XObject"] = pdf.PDFDict({"/mygroup": group1})
resources["/ExtGState"] = pdf.PDFDict({"/gs0": gs})
page.header["/Resources"] = resources
page.stream = """q
1.0 0.0 0.0 rg
0 40 m 612 40 l 612 60 l 0 60 l 0 40 l f
q /gs0 gs 1.0 0 0 1.0 0 0 cm /mygroup Do Q
Q"""
file.write("transpstack.pdf")
| gpl-2.0 | 3,801,060,936,001,166,000 | 24.534483 | 108 | 0.638758 | false | 2.557858 | false | false | false |
currentsea/bitcoinelasticsearch | src/python/bitfinex/bitfinex_trading.py | 2 | 5257 | # Copyright (c) 2016 currentsea, Joseph Bull
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# from __future__ import absolute_import
import time
import json
import hmac
import base64
import hashlib
import requests
import datetime
import bitfinex_properties
from websocket import create_connection
# from decouple import config
class BitfinexPrivate:
def __init__(self, apiKey=bitfinex_properties.BITFINEX_API_KEY, apiSecret=bitfinex_properties.BITFINEX_API_SECRET, wsUrl=bitfinex_properties.WEBSOCKET_URL, apiUrl=bitfinex_properties.REST_API_URL):
self.apiKey = apiKey
self.apiSecret = apiSecret
self.wsUrl = wsUrl
self.apiUrl = apiUrl
self.connectWebsocket()
self.symbols = self.getSymbols()
self.channelMappings = self.getChannelMappings()
def connectWebsocket(self):
try:
self.ws = create_connection(self.wsUrl)
except:
raise
return True
def getSymbols(self):
symbolsApiEndpoint = self.apiUrl + "/symbols"
print ("SYMBOLS ENDPOINT: " + symbolsApiEndpoint)
try:
req = requests.get(symbolsApiEndpoint)
reqJson = req.json()
except:
raise
return reqJson
def subscribeAllChannels(self):
# for symbol in self.symbols:
# self.ws.send(json.dumps({"event": "subscribe", "channel": "book", "pair": symbol, "prec": "P0", "len":"100"}))
# self.ws.send(json.dumps({"event": "subscribe", "channel": "ticker", "pair": symbol}))
# self.ws.send(json.dumps({"event": "subscribe", "channel": "trades", "pair": symbol}))
# # payload = {"event": "auth", "apiKey": self.apiKey}
# payload =
# payloadBytes = bytes(payload, encoding='utf-8')
# encodedData = base64.standard_b64encode(payloadBytes)
theNonce = float(time.time() * 1000000)
theNonceStr = 'AUTH' + str(theNonce)
hashDigest = hmac.new(self.apiSecret.encode('utf8'), theNonceStr.encode('utf-8'), hashlib.sha384)
# encodedData.encode('utf-8')
signature = hashDigest.hexdigest()
payload = hashDigest.update(theNonceStr.encode('utf-8'))
reqBody = {
"event": "auth",
"apiKey": str(self.apiKey),
"authSig": str(signature),
"authPayload": str(payload)
}
# authJson = json.dumps(reqBody)
self.ws.send(json.dumps(reqBody))
def getNonce(self):
curTime = time.time()
nonce = str(int(curTime * 1000000))
authNonce = 'AUTH' + nonce
return authNonce
# def signPayload(self, payload):
# return {
# "X-BFX-APIKEY": API_KEY,
# "X-BFX-SIGNATURE": signature,
# "X-BFX-PAYLOAD": data
# }
# var
# crypto = require('crypto'),
# api_key = 'API_KEY',
# api_secret = 'API_SECRET',
# payload = 'AUTH' + (new Date().getTime()),
# signature = crypto.createHmac("sha384", api_secret).update(payload).digest('hex');
# w.send(JSON.stringify({
# event: "auth",
# apiKey: api_key,
# authSig: signature,
# authPayload: payload
# }));
# // request
# {
# "event":"auth",
# "status":"OK",
# "chanId":0,
# "userId":"<USER_ID>"
# }
def getMappingAuthentication(self):
payload = 'AUTH' + datetime.datetime.utcnow()
def getChannelMappings(self):
allChannelsSubscribed = False
channelDict = {}
channelMappings = {}
self.subscribeAllChannels()
while (allChannelsSubscribed == False):
resultData = self.ws.recv()
print (resultData)
try:
dataJson = json.loads(resultData)
pairName = str(dataJson["pair"])
pairChannelType = str(dataJson["channel"])
identifier = pairName
channelId = dataJson["chanId"]
channelDict[channelId] = identifier
channelMappings[channelId] = dataJson
print ("SUBSCRIBE TO CHANNEL " + str(channelId) + " WITH PAIR NAME: " + pairName)
symbolLength = len(self.symbols)
# CHANNELS ARE ALL SUBSCRIBED WHEN SYMBOL LENGTH * # # # # # # # #
targetLength = symbolLength * 3
targetLength = targetLength + 1
# targetLength = 4 # fuck it
# IF THIS SAVED YOU HOURS OF DEBUGGING, YOU'RE FUCKING WELCOME * #
if (len(channelDict) == targetLength):
allChannelsSubscribed = True
except TypeError:
pass
except KeyError:
pass
except:
raise
return channelMappings
if __name__ == "__main__":
that = BitfinexPrivate()
# "event":"auth",
# "status":"OK",
# "chanId":0,
# "userId":"<USER_ID>"
# } | mit | 1,392,404,210,559,619,800 | 30.297619 | 199 | 0.684801 | false | 3.237069 | false | false | false |
tongpo/Holle-World | py/easyGUI/demoprograms/chapter11/canvasdemo1.py | 1 | 1178 | """
File: canvasdemo1.py
uthor: Kenneth A. Lambert
"""
from breezypythongui import EasyFrame
import random
class CanvasDemo(EasyFrame):
"""Draws filled ovals on a canvas."""
def __init__(self):
"""Sets up the window and widgets."""
EasyFrame.__init__(self, title = "Canvas Demo 1")
self.colors = ("blue", "green", "red", "yellow")
# Canvas
self.canvas = self.addCanvas(row = 0, column = 0,
width = 300, height = 150,
background = "gray")
# Command button
self.ovalButton = self.addButton(text = "Draw oval",
row = 1, column = 0,
command = self.drawOval)
# Event handling method
def drawOval(self):
"""Draws a filled oval at a random position."""
x = random.randint(0, 300)
y = random.randint(0, 150)
color = random.choice(self.colors)
self.canvas.drawOval(x, y, x + 25, y + 25, fill = color)
# Instantiate and pop up the window."""
if __name__ == "__main__":
CanvasDemo().mainloop()
| gpl-2.0 | 408,554,052,247,756,600 | 30.837838 | 73 | 0.511885 | false | 4.020478 | false | false | false |
astynax/ranger | ranger/container/tags.py | 1 | 4742 | # This file is part of ranger, the console file manager.
# License: GNU GPL version 3, see the file "AUTHORS" for details.
# TODO: add a __getitem__ method to get the tag of a file
from __future__ import (absolute_import, division, print_function)
from os.path import exists, abspath, realpath, expanduser, sep
import string
from ranger import PY3
from ranger.core.shared import FileManagerAware
ALLOWED_KEYS = string.ascii_letters + string.digits + string.punctuation
class Tags(FileManagerAware):
default_tag = '*'
def __init__(self, filename):
# COMPAT: The intent is to get abspath/normpath's behavior of
# collapsing `symlink/..`, abspath is retained for historical reasons
# because the documentation states its behavior isn't necessarily in
# line with normpath's.
self._filename = realpath(abspath(expanduser(filename)))
self.sync()
def __contains__(self, item):
return item in self.tags
def add(self, *items, **others):
if len(*items) == 0:
return
tag = others.get('tag', self.default_tag)
self.sync()
for item in items:
self.tags[item] = tag
self.dump()
def remove(self, *items):
if len(*items) == 0:
return
self.sync()
for item in items:
try:
del self.tags[item]
except KeyError:
pass
self.dump()
def toggle(self, *items, **others):
if len(*items) == 0:
return
tag = others.get('tag', self.default_tag)
tag = str(tag)
if tag not in ALLOWED_KEYS:
return
self.sync()
for item in items:
try:
if item in self and tag in (self.tags[item], self.default_tag):
del self.tags[item]
else:
self.tags[item] = tag
except KeyError:
pass
self.dump()
def marker(self, item):
if item in self.tags:
return self.tags[item]
return self.default_tag
def sync(self):
try:
if PY3:
fobj = open(self._filename, 'r', errors='replace')
else:
fobj = open(self._filename, 'r')
except OSError as err:
if exists(self._filename):
self.fm.notify(err, bad=True)
else:
self.tags = dict()
else:
self.tags = self._parse(fobj)
fobj.close()
def dump(self):
try:
fobj = open(self._filename, 'w')
except OSError as err:
self.fm.notify(err, bad=True)
else:
self._compile(fobj)
fobj.close()
def _compile(self, fobj):
for path, tag in self.tags.items():
if tag == self.default_tag:
# COMPAT: keep the old format if the default tag is used
fobj.write(path + '\n')
elif tag in ALLOWED_KEYS:
fobj.write('{0}:{1}\n'.format(tag, path))
def _parse(self, fobj):
result = dict()
for line in fobj:
line = line.rstrip('\n')
if len(line) > 2 and line[1] == ':':
tag, path = line[0], line[2:]
if tag in ALLOWED_KEYS:
result[path] = tag
else:
result[line] = self.default_tag
return result
def update_path(self, path_old, path_new):
self.sync()
changed = False
for path, tag in self.tags.items():
pnew = None
if path == path_old:
pnew = path_new
elif path.startswith(path_old + sep):
pnew = path_new + path[len(path_old):]
if pnew:
del self.tags[path]
self.tags[pnew] = tag
changed = True
if changed:
self.dump()
def __nonzero__(self):
return True
__bool__ = __nonzero__
class TagsDummy(Tags):
"""A dummy Tags class for use with `ranger --clean`.
It acts like there are no tags and avoids writing any changes.
"""
def __init__(self, filename): # pylint: disable=super-init-not-called
self.tags = dict()
def __contains__(self, item):
return False
def add(self, *items, **others):
pass
def remove(self, *items):
pass
def toggle(self, *items, **others):
pass
def marker(self, item):
return self.default_tag
def sync(self):
pass
def dump(self):
pass
def _compile(self, fobj):
pass
def _parse(self, fobj):
pass
| gpl-3.0 | -598,888,579,103,745,000 | 25.943182 | 79 | 0.518768 | false | 4.123478 | false | false | false |
maas/maas | src/maasserver/websockets/base.py | 1 | 26073 | # Copyright 2015-2018 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""The base class that all handlers must extend."""
__all__ = [
"HandlerError",
"HandlerPKError",
"HandlerValidationError",
"Handler",
]
from functools import wraps
from operator import attrgetter
from django.contrib.postgres.fields import ArrayField
from django.core.exceptions import ValidationError
from django.db.models import Model
from django.utils.encoding import is_protected_type
from maasserver import concurrency
from maasserver.permissions import NodePermission
from maasserver.prometheus.middleware import wrap_query_counter_cursor
from maasserver.rbac import rbac
from maasserver.utils.forms import get_QueryDict
from maasserver.utils.orm import transactional
from maasserver.utils.threads import deferToDatabase
from provisioningserver.prometheus.metrics import PROMETHEUS_METRICS
from provisioningserver.utils.twisted import asynchronous, IAsynchronous
DATETIME_FORMAT = "%a, %d %b. %Y %H:%M:%S"
def dehydrate_datetime(datetime):
"""Convert the `datetime` to string with `DATETIME_FORMAT`."""
if datetime is None:
return ""
else:
return datetime.strftime(DATETIME_FORMAT)
class HandlerError(Exception):
"""Generic exception a handler can raise."""
class HandlerNoSuchMethodError(HandlerError):
"""Raised when an handler doesn't have that method."""
class HandlerPKError(HandlerError):
"""Raised when object is missing its primary key."""
class HandlerValidationError(HandlerError, ValidationError):
"""Raised when object fails to validate on create or update."""
class HandlerDoesNotExistError(HandlerError):
"""Raised when an object by its `pk` doesn't exist."""
class HandlerPermissionError(HandlerError):
"""Raised when permission is denied for the user of a given action."""
def __init__(self):
super().__init__("Permission denied")
class HandlerOptions:
"""Configuraton class for `Handler`.
Provides the needed defaults to the internal `Meta` class used on
the handler.
"""
abstract = False
allowed_methods = [
"list",
"get",
"create",
"update",
"delete",
"set_active",
]
handler_name = None
object_class = None
queryset = None
list_queryset = None
pk = "id"
pk_type = int
fields = None
exclude = None
list_fields = None
list_exclude = None
non_changeable = None
form = None
form_requires_request = True
listen_channels = []
batch_key = "id"
create_permission = None
view_permission = None
edit_permission = None
delete_permission = None
def __new__(cls, meta=None):
overrides = {}
# Meta class will override the defaults based on the values it
# already has set.
if meta:
for override_name in dir(meta):
# Skip over internal field names.
if not override_name.startswith("_"):
overrides[override_name] = getattr(meta, override_name)
# Construct the new object with the overrides from meta.
return object.__new__(type("HandlerOptions", (cls,), overrides))
class HandlerMetaclass(type):
"""Sets up the _meta field on the created class."""
def __new__(cls, name, bases, attrs):
# Construct the class with the _meta field.
new_class = super().__new__(cls, name, bases, attrs)
new_class._meta = HandlerOptions(getattr(new_class, "Meta", None))
# Setup the handlers name based on the naming of the class.
if not getattr(new_class._meta, "handler_name", None):
class_name = new_class.__name__
name_bits = [bit for bit in class_name.split("Handler") if bit]
handler_name = "".join(name_bits).lower()
new_class._meta.handler_name = handler_name
# Setup the object_class if the queryset is provided.
if new_class._meta.queryset is not None:
new_class._meta.object_class = new_class._meta.queryset.model
# Copy the fields and exclude to list_fields and list_exclude
# if empty.
if new_class._meta.list_fields is None:
new_class._meta.list_fields = new_class._meta.fields
if new_class._meta.list_exclude is None:
new_class._meta.list_exclude = new_class._meta.exclude
return new_class
class Handler(metaclass=HandlerMetaclass):
"""Base handler for all handlers in the WebSocket protocol.
Each handler should extend this class to get the basic implementation of
exposing a collection over the WebSocket protocol. The classes that extend
this class must be present in `maasserver.websockets.handlers` for it to
be exposed.
Example:
class SampleHandler(Handler):
class Meta:
queryset = Sample.objects.all()
"""
def __init__(self, user, cache, request):
self.user = user
self.cache = cache
self.request = request
# Holds a set of all pks that the client has loaded and has on their
# end of the connection. This is used to inform the client of the
# correct notifications based on what items the client has.
if "loaded_pks" not in self.cache:
self.cache["loaded_pks"] = set()
def full_dehydrate(self, obj, for_list=False):
"""Convert the given object into a dictionary.
:param for_list: True when the object is being converted to belong
in a list.
"""
if for_list:
allowed_fields = self._meta.list_fields
exclude_fields = self._meta.list_exclude
else:
allowed_fields = self._meta.fields
exclude_fields = self._meta.exclude
data = {}
for field in self._meta.object_class._meta.fields:
# Convert the field name to unicode as some are stored in bytes.
field_name = str(field.name)
# Skip fields that are not allowed.
if allowed_fields is not None and field_name not in allowed_fields:
continue
if exclude_fields is not None and field_name in exclude_fields:
continue
# Get the value from the field and set it in data. The value
# will pass through the dehydrate method if present.
field_obj = getattr(obj, field_name)
dehydrate_method = getattr(self, "dehydrate_%s" % field_name, None)
if dehydrate_method is not None:
data[field_name] = dehydrate_method(field_obj)
else:
value = field.value_from_object(obj)
if is_protected_type(value) or isinstance(value, dict):
data[field_name] = value
elif isinstance(field, ArrayField):
data[field_name] = field.to_python(value)
else:
data[field_name] = field.value_to_string(obj)
# Add permissions that can be performed on this object.
data = self._add_permissions(obj, data)
# Return the data after the final dehydrate.
return self.dehydrate(obj, data, for_list=for_list)
def dehydrate(self, obj, data, for_list=False):
"""Add any extra info to the `data` before finalizing the final object.
:param obj: object being dehydrated.
:param data: dictionary to place extra info.
:param for_list: True when the object is being converted to belong
in a list.
"""
return data
def _is_foreign_key_for(self, field_name, obj, value):
"""Given the specified field name for the specified object, returns
True if the specified value is a foreign key; otherwise returns False.
"""
if isinstance(obj, Model):
field_type = obj._meta.get_field(field_name).get_internal_type()
if field_type == "ForeignKey" and not isinstance(value, Model):
return True
return False
def _add_permissions(self, obj, data):
"""Add permissions to `data` for `obj` based on the current user."""
# Only `edit` and `delete` are used because if the user cannot view
# then it will not call this method at all and create is a global
# action that is not scoped to an object.
has_permissions = (
self._meta.edit_permission is not None
or self._meta.delete_permission is not None
)
if not has_permissions:
return data
permissions = []
if self._meta.edit_permission is not None and self.user.has_perm(
self._meta.edit_permission, obj
):
permissions.append("edit")
if self._meta.delete_permission is not None and self.user.has_perm(
self._meta.delete_permission, obj
):
permissions.append("delete")
data["permissions"] = permissions
return data
def full_hydrate(self, obj, data):
"""Convert the given dictionary to a object."""
allowed_fields = self._meta.fields
exclude_fields = self._meta.exclude
non_changeable_fields = self._meta.non_changeable
for field in self._meta.object_class._meta.fields:
field_name = field.name
# Skip fields that are not allowed.
if field_name == self._meta.pk:
continue
if allowed_fields is not None and field_name not in allowed_fields:
continue
if exclude_fields is not None and field_name in exclude_fields:
continue
if (
non_changeable_fields is not None
and field_name in non_changeable_fields
):
continue
# Update the field if its in the provided data. Passing the value
# through its hydrate method if present.
if field_name in data:
value = data[field_name]
hydrate_method = getattr(self, "hydrate_%s" % field_name, None)
if hydrate_method is not None:
value = hydrate_method(value)
if self._is_foreign_key_for(field_name, obj, value):
# We're trying to populate a foreign key relationship, but
# we don't have a model object. Assume we were given the
# primary key.
field_name += "_id"
setattr(obj, field_name, value)
# Return the hydrated object once its done the final hydrate.
return self.hydrate(obj, data)
def hydrate(self, obj, data):
"""Add any extra info to the `obj` before finalizing the finale object.
:param obj: obj being hydrated.
:param data: dictionary to use to set object.
"""
return obj
def get_object(self, params, permission=None):
"""Get object by using the `pk` in `params`."""
if self._meta.pk not in params:
raise HandlerValidationError(
{self._meta.pk: ["This field is required"]}
)
pk = params[self._meta.pk]
try:
obj = self.get_queryset(for_list=False).get(**{self._meta.pk: pk})
except self._meta.object_class.DoesNotExist:
raise HandlerDoesNotExistError(pk)
if permission is not None or self._meta.view_permission is not None:
if permission is None:
permission = self._meta.view_permission
if not self.user.has_perm(permission, obj):
raise HandlerPermissionError()
return obj
def get_queryset(self, for_list=False):
"""Return `QuerySet` used by this handler.
Override if you need to modify the queryset based on the current user.
"""
if for_list and self._meta.list_queryset is not None:
return self._meta.list_queryset
else:
return self._meta.queryset
def get_form_class(self, action):
"""Return the form class used for `action`.
Override if you need to provide a form based on the current user.
"""
return self._meta.form
def preprocess_form(self, action, params):
"""Process the `params` to before passing the data to the form.
Default implementation just converts `params` to a `QueryDict`.
"""
return get_QueryDict(params)
def _get_call_latency_metrics_label(self, method_name, params):
call_name = "{handler_name}.{method_name}".format(
handler_name=self._meta.handler_name, method_name=method_name
)
return {"call": call_name}
@PROMETHEUS_METRICS.record_call_latency(
"maas_websocket_call_latency",
get_labels=_get_call_latency_metrics_label,
)
@asynchronous
def execute(self, method_name, params):
"""Execute the given method on the handler.
Checks to make sure the method is valid and allowed perform executing
the method.
"""
if method_name in self._meta.allowed_methods:
try:
method = getattr(self, method_name)
except AttributeError:
raise HandlerNoSuchMethodError(method_name)
else:
# Handler methods are predominantly transactional and thus
# blocking/synchronous. Genuinely non-blocking/asynchronous
# methods must out themselves explicitly.
if IAsynchronous.providedBy(method):
# Running in the io thread so clear RBAC now.
rbac.clear()
# Reload the user from the database.
d = concurrency.webapp.run(
deferToDatabase,
transactional(self.user.refresh_from_db),
)
d.addCallback(lambda _: method(params))
return d
else:
@wraps(method)
@transactional
def prep_user_execute(params):
# Clear RBAC and reload the user to ensure that
# its up to date. `rbac.clear` must be done inside
# the thread because it uses thread locals internally.
rbac.clear()
self.user.refresh_from_db()
# Perform the work in the database.
return self._call_method_track_queries(
method_name, method, params
)
# Force the name of the function to include the handler
# name so the debug logging is useful.
prep_user_execute.__name__ = "%s.%s" % (
self.__class__.__name__,
method_name,
)
# This is going to block and hold a database connection so
# we limit its concurrency.
return concurrency.webapp.run(
deferToDatabase, prep_user_execute, params
)
else:
raise HandlerNoSuchMethodError(method_name)
def _call_method_track_queries(self, method_name, method, params):
"""Call the specified method tracking query-related metrics."""
latencies = []
with wrap_query_counter_cursor(latencies):
result = method(params)
labels = self._get_call_latency_metrics_label(method_name, [])
PROMETHEUS_METRICS.update(
"maas_websocket_call_query_count",
"observe",
value=len(latencies),
labels=labels,
)
for latency in latencies:
PROMETHEUS_METRICS.update(
"maas_websocket_call_query_latency",
"observe",
value=latency,
labels=labels,
)
return result
def _cache_pks(self, objs):
"""Cache all loaded object pks."""
getpk = attrgetter(self._meta.pk)
self.cache["loaded_pks"].update(getpk(obj) for obj in objs)
def list(self, params):
"""List objects.
:param start: A value of the `batch_key` column and NOT `pk`. They are
often the same but that is not a certainty. Make sure the client
also understands this distinction.
:param offset: Offset into the queryset to return.
:param limit: Maximum number of objects to return.
"""
queryset = self.get_queryset(for_list=True)
queryset = queryset.order_by(self._meta.batch_key)
if "start" in params:
queryset = queryset.filter(
**{"%s__gt" % self._meta.batch_key: params["start"]}
)
if "limit" in params:
queryset = queryset[: params["limit"]]
objs = list(queryset)
self._cache_pks(objs)
return [self.full_dehydrate(obj, for_list=True) for obj in objs]
def get(self, params):
"""Get object.
:param pk: Object with primary key to return.
"""
obj = self.get_object(params)
self._cache_pks([obj])
return self.full_dehydrate(obj)
def create(self, params):
"""Create the object from data."""
# Create by using form. `create_permission` is not used with form,
# permission checks should be done in the form.
form_class = self.get_form_class("create")
if form_class is not None:
data = self.preprocess_form("create", params)
if self._meta.form_requires_request:
form = form_class(request=self.request, data=data)
else:
form = form_class(data=data)
if hasattr(form, "use_perms") and form.use_perms():
if not form.has_perm(self.user):
raise HandlerPermissionError()
elif self._meta.create_permission is not None:
raise ValueError(
"`create_permission` defined on the handler, but the form "
"is not using permission checks."
)
if form.is_valid():
try:
obj = form.save()
except ValidationError as e:
try:
raise HandlerValidationError(e.message_dict)
except AttributeError:
raise HandlerValidationError({"__all__": e.message})
return self.full_dehydrate(self.refetch(obj))
else:
raise HandlerValidationError(form.errors)
# Verify the user can create an object.
if self._meta.create_permission is not None:
if not self.user.has_perm(self._meta.create_permission):
raise HandlerPermissionError()
# Create by updating the fields on the object.
obj = self._meta.object_class()
obj = self.full_hydrate(obj, params)
obj.save()
return self.full_dehydrate(obj)
def update(self, params):
"""Update the object."""
obj = self.get_object(params)
# Update by using form. `edit_permission` is not used when form
# is used to update. The form should define the permissions.
form_class = self.get_form_class("update")
if form_class is not None:
data = self.preprocess_form("update", params)
form = form_class(data=data, instance=obj)
if hasattr(form, "use_perms") and form.use_perms():
if not form.has_perm(self.user):
raise HandlerPermissionError()
elif self._meta.edit_permission is not None:
raise ValueError(
"`edit_permission` defined on the handler, but the form "
"is not using permission checks."
)
if form.is_valid():
try:
obj = form.save()
except ValidationError as e:
raise HandlerValidationError(e.error_dict)
return self.full_dehydrate(obj)
else:
raise HandlerValidationError(form.errors)
# Verify the user can edit this object.
if self._meta.edit_permission is not None:
if not self.user.has_perm(self._meta.edit_permission, obj):
raise HandlerPermissionError()
# Update by updating the fields on the object.
obj = self.full_hydrate(obj, params)
obj.save()
return self.full_dehydrate(obj)
def delete(self, params):
"""Delete the object."""
obj = self.get_object(params, permission=self._meta.delete_permission)
obj.delete()
def set_active(self, params):
"""Set the active node for this connection.
This is the node that is being viewed in detail by the client.
"""
# Calling this method without a primary key will clear the currently
# active object.
if self._meta.pk not in params:
if "active_pk" in self.cache:
del self.cache["active_pk"]
return
# Get the object data and set it as active.
obj_data = self.get(params)
self.cache["active_pk"] = obj_data[self._meta.pk]
return obj_data
def on_listen(self, channel, action, pk):
"""Called by the protocol when a channel notification occurs.
Do not override this method instead override `listen`.
"""
pk = self._meta.pk_type(pk)
if action == "delete":
if pk in self.cache["loaded_pks"]:
self.cache["loaded_pks"].remove(pk)
return (self._meta.handler_name, action, pk)
else:
return None
self.user.refresh_from_db()
try:
obj = self.listen(channel, action, pk)
except HandlerDoesNotExistError:
obj = None
if action == "create" and obj is not None:
if pk in self.cache["loaded_pks"]:
# The user already knows about this node, so its not a create
# to the user but an update.
return self.on_listen_for_active_pk("update", pk, obj)
else:
self.cache["loaded_pks"].add(pk)
return self.on_listen_for_active_pk(action, pk, obj)
elif action == "update":
if pk in self.cache["loaded_pks"]:
if obj is None:
# The user no longer has access to this object. To the
# client this is a delete action.
self.cache["loaded_pks"].remove(pk)
return (self._meta.handler_name, "delete", pk)
else:
# Just a normal update to the client.
return self.on_listen_for_active_pk(action, pk, obj)
elif obj is not None:
# User just got access to this new object. Send the message to
# the client as a create action instead of an update.
self.cache["loaded_pks"].add(pk)
return self.on_listen_for_active_pk("create", pk, obj)
else:
# User doesn't have access to this object, so do nothing.
pass
else:
# Unknown action or the user doesn't have permission to view the
# newly created object, so do nothing.
pass
return None
def on_listen_for_active_pk(self, action, pk, obj):
"""Return the correct data for `obj` depending on if its the
active primary key."""
if "active_pk" in self.cache and pk == self.cache["active_pk"]:
# Active so send all the data for the object.
return (
self._meta.handler_name,
action,
self.full_dehydrate(obj, for_list=False),
)
else:
# Not active so only send the data like it was comming from
# the list call.
return (
self._meta.handler_name,
action,
self.full_dehydrate(obj, for_list=True),
)
def listen(self, channel, action, pk):
"""Called when the handler listens for events on channels with
`Meta.listen_channels`.
:param channel: Channel event occured on.
:param action: Action that caused this event.
:param pk: Id of the object.
"""
return self.get_object({self._meta.pk: pk})
def refetch(self, obj):
"""Refetch an object using the handler queryset.
This ensures annotations defined in the queryset are added to the
object.
"""
return self.get_object({self._meta.pk: getattr(obj, self._meta.pk)})
class AdminOnlyMixin(Handler):
class Meta:
abstract = True
def create(self, parameters):
"""Only allow an administrator to create this object."""
if not self.user.has_perm(
NodePermission.admin, self._meta.object_class
):
raise HandlerPermissionError()
return super().create(parameters)
def update(self, parameters):
"""Only allow an administrator to update this object."""
obj = self.get_object(parameters)
if not self.user.has_perm(NodePermission.admin, obj):
raise HandlerPermissionError()
return super().update(parameters)
def delete(self, parameters):
"""Only allow an administrator to delete this object."""
obj = self.get_object(parameters)
if not self.user.has_perm(NodePermission.admin, obj):
raise HandlerPermissionError()
return super().delete(parameters)
| agpl-3.0 | -2,074,345,313,551,764,000 | 36.623377 | 79 | 0.579872 | false | 4.450836 | false | false | false |
stxnext/intranet-open | src/intranet3/intranet3/views/cron/bugs.py | 1 | 2906 | # -*- coding: utf-8 -*-
from pyramid.view import view_config
from pyramid.response import Response
from pyramid.renderers import render
from intranet3 import config
from intranet3.lib.bugs import Bugs
from intranet3.log import INFO_LOG, DEBUG_LOG, EXCEPTION_LOG
from intranet3.models import User, Project
from intranet3.utils import mail
from intranet3.utils.views import CronView
from intranet3.models import DBSession
LOG = INFO_LOG(__name__)
DEBUG = DEBUG_LOG(__name__)
EXCEPTION = EXCEPTION_LOG(__name__)
@view_config(route_name='cron_bugs_oldbugsreport', permission='cron')
class OldBugsReport(CronView):
def _send_report(self, coordinator_id, email, bugs):
# Bugs filtering & ordering
# Coordinator gets bugs from his projects, manager gets bugs from
# all projects
if coordinator_id is None: # Manager
bugs_filtered = sorted(
bugs,
key=lambda b: b.changeddate.replace(tzinfo=None),
)
title = u'Lista najstarszych niezamkniętych bugów\nwe wszystkich projektach'
else: # Coordinator
bugs_filtered = [
b for b in bugs
if b.project is not None and
b.project.coordinator_id == coordinator_id
]
bugs_filtered = sorted(
bugs_filtered,
key=lambda b: b.changeddate.replace(tzinfo=None),
)
title = u'Lista najstarszych niezamkniętych bugów\nw projektach w których jesteś koordynatorem'
if bugs_filtered:
data = {
'bugs': bugs_filtered[:20],
'title': self._(title),
}
response = render(
'intranet3:templates/_email_reports/old_bugs_report.html',
data,
request=self.request
)
with mail.EmailSender() as email_sender:
email_sender.send(
email,
self._(u'[Intranet3] Old bugs report'),
html_message=response,
)
def action(self):
coordinators = DBSession.query(Project.coordinator_id, User.email) \
.join(User) \
.filter(Project.coordinator_id!=None) \
.group_by(Project.coordinator_id, User) \
.all()
manager = DBSession.query(User) \
.filter(User.email == config['MANAGER_EMAIL']) \
.first()
bugs = Bugs(self.request, manager).get_all()
# Coordinators
for user_id, user_email in coordinators:
self._send_report(user_id, user_email, bugs)
# Manager
self._send_report(None, config['MANAGER_EMAIL'], bugs)
return Response('ok')
| mit | 556,967,443,122,150,300 | 35.708861 | 107 | 0.554828 | false | 4.067321 | false | false | false |
IndiciumSRL/wirecurly | tests/test_directory/test_user.py | 1 | 1250 | '''
Test the creation of a user on the directory
'''
import unittest
import pytest
from wirecurly import directory
class testUserCreation(unittest.TestCase):
"""Test user creation"""
def setUp(self):
'''
Create our fixtures for these tests
'''
self.user = directory.User('1000', 'this is a test')
def test_user_is_can_be_a_dict(self):
'''
Test that our user can be properly serialized
'''
assert isinstance(self.user.todict(), dict)
def test_adding_parameter_to_user(self):
'''
Test adding a parameter to a user
'''
self.user.addParameter('vm-password', '123')
assert self.user.getParameter('vm-password') == '123'
def test_adding_variable_to_user(self):
'''
Test adding a variable to a user
'''
self.user.addVariable('toll-allow', 'intl')
assert self.user.getVariable('toll-allow') == 'intl'
def test_adding_existing_variable(self):
'''
Test trying to replace an existing variable
'''
with pytest.raises(ValueError):
self.user.addVariable('test', 'intl')
self.user.addVariable('test', 'intl')
def test_adding_existing_parameter(self):
'''
Test trying to replace an existing parameter
'''
with pytest.raises(ValueError):
self.user.addParameter('password', 'anything')
| mpl-2.0 | 2,922,798,544,849,021,000 | 24 | 55 | 0.6896 | false | 3.263708 | true | false | false |
LLNL/spack | var/spack/repos/builtin/packages/parsplice/package.py | 5 | 1281 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Parsplice(CMakePackage):
"""ParSplice code implements the Parallel Trajectory Splicing algorithm"""
homepage = "https://gitlab.com/exaalt/parsplice"
url = "https://gitlab.com/api/v4/projects/exaalt%2Fparsplice/repository/archive.tar.gz?sha=v1.1"
git = "https://gitlab.com/exaalt/parsplice.git"
tags = ['ecp', 'ecp-apps']
version('develop', branch='master')
version('multisplice', branch='multisplice')
version('1.1', sha256='a011c4d14f66e7cdbc151cc74b5d40dfeae19ceea033ef48185d8f3b1bc2f86b')
depends_on("[email protected]:", type='build')
depends_on("berkeley-db")
depends_on("nauty")
depends_on("boost cxxstd=11")
depends_on("mpi")
depends_on("eigen@3:")
depends_on("lammps+lib@20170901:")
depends_on("lammps+lib+exceptions", when="@multisplice")
def cmake_args(self):
spec = self.spec
if spec.satisfies('@multisplice'):
options = []
else:
options = ['-DBUILD_SHARED_LIBS=ON', '-DBoost_NO_BOOST_CMAKE=ON']
return options
| lgpl-2.1 | -3,349,856,711,162,556,400 | 31.846154 | 105 | 0.668228 | false | 3.116788 | false | false | false |
ejucovy/django-opendebates | opendebates/forms.py | 1 | 7952 | from urlparse import urlparse
from django import forms
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import AuthenticationForm
from django.core.urlresolvers import resolve, Resolver404
from django.utils.html import mark_safe
from django.utils.translation import ugettext_lazy as _
from localflavor.us.forms import USZipCodeField
from nocaptcha_recaptcha.fields import NoReCaptchaField
from registration.forms import RegistrationForm
from .models import Category, Flag, Submission
VALID_SUBMISSION_DETAIL_URL_NAMES = ['vote', 'show_idea']
class VoterForm(forms.Form):
email = forms.EmailField()
zipcode = USZipCodeField()
captcha = NoReCaptchaField(
gtag_attrs={'data-size': 'compact'}
)
def __init__(self, *args, **kwargs):
super(VoterForm, self).__init__(*args, **kwargs)
def ignore_captcha(self):
del self.fields['captcha']
class QuestionForm(forms.Form):
category = forms.ModelMultipleChoiceField(queryset=Category.objects.all())
headline = forms.CharField(required=True)
question = forms.CharField(required=False)
citation = forms.URLField(required=False, max_length=255)
def __init__(self, *args, **kwargs):
super(QuestionForm, self).__init__(*args, **kwargs)
self.fields['category'].error_messages['invalid_pk_value'] = _("You must select a category")
display_name_help_text = _("How your name will be displayed on the site. If you "
"are an expert in a particular field or have a professional "
"affiliation that is relevant to your ideas, feel free to "
"mention it here alongside your name! If you leave this "
"blank, your first name and last initial will be used "
"instead.") # @@TODO
display_name_label = (u"Display name <span data-toggle='tooltip' title='%s' "
"class='glyphicon glyphicon-question-sign'></span>" % display_name_help_text)
twitter_handle_help_text = _("Fill in your Twitter username (without the @) if you "
"would like to be @mentioned on Twitter when people "
"tweet your ideas.") # @@TODO
twitter_handle_label = (u"Twitter handle <span data-toggle='tooltip' title='%s' "
"class='glyphicon glyphicon-question-sign'></span>"
% twitter_handle_help_text)
class OpenDebatesRegistrationForm(RegistrationForm):
first_name = forms.CharField(max_length=30)
last_name = forms.CharField(max_length=30)
display_name = forms.CharField(max_length=255,
label=mark_safe(display_name_label),
required=False)
twitter_handle = forms.CharField(max_length=255,
label=mark_safe(twitter_handle_label),
required=False)
zip = USZipCodeField()
captcha = NoReCaptchaField(label=_("Are you human?"))
def clean_twitter_handle(self):
if self.cleaned_data.get("twitter_handle", "").startswith("@"):
return self.cleaned_data['twitter_handle'].lstrip("@")
if self.cleaned_data.get("twitter_handle", "").startswith("https://twitter.com/"):
return self.cleaned_data['twitter_handle'][20:]
if self.cleaned_data.get("twitter_handle", "").startswith("http://twitter.com/"):
return self.cleaned_data['twitter_handle'][19:]
if self.cleaned_data.get("twitter_handle", "").startswith("twitter.com/"):
return self.cleaned_data['twitter_handle'][12:]
return self.cleaned_data.get("twitter_handle", "").strip() or None
def clean_email(self):
"""
Validate that the supplied email address is unique for the
site.
"""
User = get_user_model()
if User.objects.filter(email__iexact=self.cleaned_data['email']):
raise forms.ValidationError("This email address is already in use. Please supply "
"a different email address.")
return self.cleaned_data['email']
def save(self, commit=True):
user = super(OpenDebatesRegistrationForm, self).save(commit=False)
user.first_name = self.cleaned_data['first_name']
user.last_name = self.cleaned_data['last_name']
if commit:
user.save()
return user
def ignore_captcha(self):
del self.fields['captcha']
class OpenDebatesAuthenticationForm(AuthenticationForm):
username = forms.CharField(max_length=254,
label="Username or Email")
class MergeFlagForm(forms.ModelForm):
duplicate_of_url = forms.URLField(label=_("Enter URL here"))
class Meta:
model = Flag
fields = ('duplicate_of_url', )
def __init__(self, *args, **kwargs):
self.idea = kwargs.pop('idea')
self.voter = kwargs.pop('voter')
super(MergeFlagForm, self).__init__(*args, **kwargs)
def clean_duplicate_of_url(self):
# parse the URL and use Django's resolver to find the urlconf entry
path = urlparse(self.cleaned_data['duplicate_of_url']).path
try:
url_match = resolve(path)
except Resolver404:
url_match = None
if not url_match or url_match.url_name not in VALID_SUBMISSION_DETAIL_URL_NAMES:
raise forms.ValidationError('That is not the URL of a question.')
duplicate_of_pk = url_match.kwargs.get('id')
if duplicate_of_pk == unicode(self.idea.pk):
raise forms.ValidationError('Please enter the URL of the submission that this '
'submission appears to be a duplicate of, not the '
'URL of this submission.')
self.duplicate_of = Submission.objects.filter(pk=duplicate_of_pk, approved=True) \
.first()
if not self.duplicate_of:
raise forms.ValidationError('Invalid Question URL.')
return self.cleaned_data['duplicate_of_url']
def save(self, commit=True):
flag = super(MergeFlagForm, self).save(commit=False)
flag.to_remove = self.idea
flag.voter = self.voter
flag.duplicate_of = self.duplicate_of
if commit:
flag.save()
return flag
class ModerationForm(forms.Form):
to_remove = forms.IntegerField(label="ID of submission to remove")
duplicate_of = forms.IntegerField(required=False,
label="(Optional) ID of submission it is a duplicate of")
def clean_to_remove(self):
to_remove_pk = self.cleaned_data['to_remove']
if to_remove_pk:
try:
self.cleaned_data['to_remove_obj'] = Submission.objects.get(
pk=to_remove_pk, approved=True)
except Submission.DoesNotExist:
raise forms.ValidationError('That submission does not exist or is not approved.')
return to_remove_pk
def clean_duplicate_of(self):
duplicate_of_pk = self.cleaned_data['duplicate_of']
if duplicate_of_pk:
try:
self.cleaned_data['duplicate_of_obj'] = Submission.objects.get(
pk=duplicate_of_pk, approved=True)
except Submission.DoesNotExist:
raise forms.ValidationError('That submission does not exist or is not approved.')
return duplicate_of_pk
def clean(self):
to_remove_pk = self.cleaned_data.get('to_remove')
duplicate_of_pk = self.cleaned_data.get('duplicate_of')
if to_remove_pk and duplicate_of_pk and to_remove_pk == duplicate_of_pk:
raise forms.ValidationError('Cannot merge a submission into itself.')
return self.cleaned_data
| apache-2.0 | 3,309,220,891,422,073,300 | 41.524064 | 100 | 0.612047 | false | 4.191882 | false | false | false |
gray-stanton/tensorflow-char-rnn | tf_parser.py | 1 | 3362 | import tensorflow as tf
import io
import os.path
from gensim.utils import deaccent
import re
import string
import numpy as np
def regularize_charset(fname):
"""Reduce the set of characters in the file to the minimum
to encapsulate its semantics. Replaces non-ascii chars with their
ascii equivalent. Replaces non-printing chars with spaces, and tabs with
4 spaces.
Arguments:
fname: path to a text file to be encoded
Returns:
a file path with ascii chars replaced
"""
with open(fname, 'r') as f:
s = f.read()
news = to_ascii(s)
return write_with_suffix(fname, '-ascii')
def to_ascii(string):
"""
Replace all non-ascii chars with ascii-equivalent, remove
all non-printing characters,replace all tabs with 4 spaces.
Returns:
A transformed string
"""
tabs = re.compile('\t')
newstring, _ = tabs.subn(' ' * 4, string)
car_return_etc = re.compile('\r|\x0b|\x0c')
newstring, _ = tabs.subn('\n', newstring)
newstring = deaccent(newstring)
#FIXME removes newlines, not intended behavior
nonprintable = re.compile('[^ -~\n]')
newstring, _ = nonprintable.subn('', newstring)
return newstring.encode('ascii')
def split_text(string, elem_length):
"""
Splits a string into substrings of length elem_length, with
space padding.
Arguments:
string: a string to split
elem_length: length of substrings to split into
Returns:
A list of strings of length elem_length
"""
rem = len(string) % elem_length
padded_string = string + b' ' * rem
#jDouble braces used to create a literal brace, re matches exactly
# elem_length of any char
return [padded_string[i : i + elem_length]
for i in range(0, len(padded_string) - elem_length, elem_length)]
def to_digit_array(string, char_map):
"""
Convert a string into an nparray, mapping characters
to ints based on char_map
"""
return np.array([char_map[s] for s in string], dtype = np.int8)
def write_with_suffix(f, suffix):
"""Write a new txt file with the name of f concatted with the
string suffix appended, with the same extension as the original file
Arguments:
f: a file object
suffix: the suffix to be appended to the filename
Returns:
an file path to the writen file at fname + suffix.ext
"""
fpath = f.name
basename = os.path.basename(fpath)
newname = basename + suffix
#rev_char_map = {i : c for i, c in char_map.items()}
def translate(content):
ascii_content = to_ascii(content)
charset = set(ascii_content)
char_map = {c : i for i, c in enumerate(sorted(list(charset)))}
translated = np.array([char_map[c] for c in ascii_content],
dtype = np.uint8)
return translated, char_map
def make_array(content, elem_length):
"""
Take text string, process charset, create np array of dim [-1, elem_length]
"""
ascii_content = to_ascii(content)
charset = set(ascii_content)
char_map = {c : i for i, c in enumerate(sorted(list(charset)))}
substrings = split_text(ascii_content, elem_length)
array = np.array([to_digit_array(s, char_map) for s in substrings],
dtype = np.uint8)
return array, char_map
| mit | -4,398,743,433,412,249,600 | 27.491525 | 79 | 0.640095 | false | 3.739711 | false | false | false |
MaxStrange/ArtieInfant | legacy/artie/apps/octopod/priv/pyctopod/pyctopod.py | 1 | 5119 | """
This is the python module that handles all the Elixir-Python
interfacing. Client python modules should never need
to reference erlport at all and should instead, handle all the
publisher/subscriber interactions through this module.
"""
import queue
import string
import threading
import time
from erlport.erlang import set_message_handler, cast
from erlport.erlterms import Atom
## A horrible number of globals... Ugh.
_msg_handling_pid = None
_msgq = queue.Queue()
_main_func = None
_topic_handlers = {}
_consumption_thread = None
## Client-Facing API
def register_main(func):
"""
Registers the main function to execute - will execute this
upon receipt of a signal from Elixir that everything is set
up and ready to go. Will run it in a separate thread.
"""
global _main_func
_main_func = func
def register_handler(pid):
"""
Registers the given Elixir process as the handler
for this library.
This function must be called first - and the client
module must define a function with the same prototype which
simply calls this function.
"""
global _msg_handling_pid
_msg_handling_pid = pid
def subscribe(topics, handlers):
"""
Subscribes to each topic in `topics` (which may be a single
str). Whenever a message is received from a topic, the
associated handler is called - the handlers are associated
with the topics based purely on order: topic0 -> handler0.
The handlers are functions that take 'from_id', 'topic', msg.
The handlers are called asynchronously, two messages received
on the same topic will both fire without having to finish one.
Topics MAY NOT have spaces or punctuation - they should be
strings that are easily convertable to Elixir atoms.
"""
if type(topics) == str:
topics = [topics]
invalid_chars = set(string.punctuation.replace("_", ""))
for topic in topics:
if any(char in invalid_chars for char in topic):
raise ValueError("Topic {} contains invalid characters. Topics cannot have punctuation or spaces.".format(topic))
global _topic_handlers
for topic, handler in zip(topics, handlers):
_topic_handlers[topic] = handler
global _consumption_thread
if _consumption_thread is None:
_consumption_thread = threading.Thread(target=_consume)
_consumption_thread.start()
for topic in topics:
topic_as_atom = Atom(topic.encode('utf8'))
atom_subscribe = Atom("subscribe".encode('utf8'))
cast(_msg_handling_pid, (atom_subscribe, topic_as_atom))
def publish(topics, msg, from_id='default'):
"""
Publishes `msg` to all topics in `topics`, which may be a
single topic.
Message must be bytes. Topics must be a string. From_id must
also be a string.
"""
if type(msg) != bytes:
raise TypeError("msg must be of type 'bytes' but is of type " + str(type(msg)))
if type(topics) == str:
topics = [topics]
try:
topics = [Atom(t.encode('utf8')) for t in topics]
except TypeError:
topics = [Atom(topics.encode('utf8'))]
id_as_atom = Atom(from_id.encode('utf8'))
for topic in topics:
cast(_msg_handling_pid, (id_as_atom, topic, msg))
def _consume():
"""
Sits around waiting for messages from Elixir. Expects a
keepalive message to the :priv_keepalive topic at least
once every a minute, otherwise exits.
Spawns a new thread every time a new message is received
on a topic that has a handler associated with it.
"""
keepalive_interval = 30
start = time.time()
while True:
if time.time() - start > keepalive_interval:
return
try:
from_id, topic, msg = _msgq.get(timeout=keepalive_interval)
if topic == "priv_keepalive":
start = time.time()
else:
handler = _topic_handlers[topic]
threading.Thread(target=handler, args=(from_id, topic, msg)).start()
except queue.Empty:
return
except KeyError as e:
print(e)
print("Could not find key {} in {}".format(topic, _topic_handlers))
raise
## Erlport API: Don't use this in client modules
def _handle_message(msg):
"""
`msg` should be a tuple of the form: (topic, payload).
Calls the correct handler for the topic.
"""
if type(msg) != tuple:
raise TypeError("Received a type {} for message. Always expecting tuple instead.".format(type(msg)))
if msg[0] == Atom("message".encode('utf8')):
msg = msg[1]
signal = (Atom("ok".encode('utf8')), Atom("go".encode('utf8')))
if msg == signal:
threading.Thread(target=_main_func).start()
else:
from_id, topic, msg_payload = msg # Will throw an error here if msg is not formatted correctly
from_id = str(from_id).lstrip('b').strip("'")
topic = str(topic).lstrip('b').strip("'")
_msgq.put((from_id, topic, msg_payload))
# Register the handler function with Elixir
set_message_handler(_handle_message)
| mit | 241,415,941,119,834,980 | 31.398734 | 125 | 0.649346 | false | 3.955951 | false | false | false |
tbenthompson/taskloaf | examples/gpu_server.py | 1 | 1873 | import numpy as np
import tectosaur.util.gpu as gpu
import taskloaf as tsk
arg = 1.0
def load_module():
import os
D = os.path.dirname(os.path.realpath(__file__))
return gpu.load_gpu('kernels.cl', tmpl_dir = D, tmpl_args = dict(arg = arg))
async def gpu_run():
# gd = tsk.get_service('gpu_data')
# if 'add' not in gd:
# gd['add'] = (fnc, arg, gpu_R)
# else:
# fnc, arg, gpu_R = gd['add']
module = load_module()
fnc = module.add
R = np.random.rand(10000000)
gpu_R = gpu.to_gpu(R)
gpu_out = gpu.empty_gpu(gpu_R.shape)
fnc(gpu_out, gpu_R, grid = (gpu_R.shape[0], 1, 1), block = (1, 1, 1))
R2 = await gpu.get(gpu_out)
gpu.logger.debug('run')
def setup_gpu_server(which_gpu):
import os
os.environ['CUDA_DEVICE'] = str(which_gpu)
import taskloaf.worker as tsk_worker
tsk_worker.services['gpu_data'] = dict()
load_module()
async def submit():
setup_prs = [
tsk.task(lambda i=i: setup_gpu_server(i), to = i)
for i in range(n_workers)
]
for pr in setup_prs:
await pr
import time
start = time.time()
n_tasks = 8 * 2
for j in range(10):
prs = []
for i in range(n_tasks):
prs.append(tsk.task(gpu_run, to = i % n_workers))
for i in range(n_tasks):
await prs[i]
print(time.time() - start)
n_workers = 8
tsk.cluster(n_workers, submit)
#
# async def work_builder():
# print("YO!")
# addr = tsk.get_service('comm').addr
# def add_task():
# print("PEACE" + str(addr))
# return addr
# rem_addr = await tsk.task(add_task, to = gpu_addr)
# return (2.0, rem_addr)
#
# def f2(x):
# return x * 2
# pr1 = tsk.task(work_builder, to = 0).then(f2)
# pr2 = tsk.task(work_builder, to = 1)
# print(await pr1)
# print(await pr2)
# return 5.0
| mit | 8,305,155,169,846,455,000 | 25.013889 | 80 | 0.563267 | false | 2.754412 | false | false | false |
edx/edx-user-state-client | setup.py | 1 | 1315 | from setuptools import setup
def load_requirements(*requirements_paths):
"""
Load all requirements from the specified requirements files.
Returns a list of requirement strings.
"""
requirements = set()
for path in requirements_paths:
with open(path) as reqs:
requirements.update(
line.split('#')[0].strip() for line in reqs
if is_requirement(line.strip())
)
return list(requirements)
def is_requirement(line):
"""
Return True if the requirement line is a package requirement;
that is, it is not blank, a comment, a URL, or an included file.
"""
return line and not line.startswith(('-r', '#', '-e', 'git+', '-c'))
setup(
name="edx_user_state_client",
version="1.3.2",
packages=[
"edx_user_state_client",
],
install_requires=load_requirements('requirements/base.in'),
tests_require=load_requirements('requirements/test.in'),
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
],
)
| agpl-3.0 | 5,545,524,485,339,057,000 | 29.581395 | 93 | 0.610646 | false | 4.283388 | false | false | false |
APMonitor/arduino | 7_MHE_and_MPC/Python_GEKKO_MIMO/MHE_and_MPC.py | 1 | 10048 | import tclab
import numpy as np
import time
import matplotlib.pyplot as plt
from gekko import GEKKO
# Connect to Arduino
a = tclab.TCLab()
# Get Version
print(a.version)
# Turn LED on
print('LED On')
a.LED(100)
# Run time in minutes
run_time = 15.0
# Number of cycles with 3 second intervals
loops = int(20.0*run_time)
tm = np.zeros(loops)
# Temperature (K)
T1 = np.ones(loops) * a.T1 # temperature (degC)
T1mhe = np.ones(loops) * a.T1 # temperature (degC)
Tsp1 = np.ones(loops) * 35.0 # set point (degC)
T2 = np.ones(loops) * a.T2 # temperature (degC)
T2mhe = np.ones(loops) * a.T1 # temperature (degC)
Tsp2 = np.ones(loops) * 23.0 # set point (degC)
# Set point changes
Tsp1[5:] = 40.0
Tsp1[120:] = 35.0
Tsp1[200:] = 50.0
Tsp2[50:] = 30.0
Tsp2[100:] = 35.0
Tsp2[150:] = 30.0
Tsp2[250:] = 35.0
# heater values
Q1s = np.ones(loops) * 0.0
Q2s = np.ones(loops) * 0.0
#########################################################
# Initialize Models
#########################################################
# Fixed Parameters
mass = 4.0/1000.0 # kg
Cp = 0.5*1000.0 # J/kg-K
A = 10.0/100.0**2 # Area not between heaters in m^2
As = 2.0/100.0**2 # Area between heaters in m^2
eps = 0.9 # Emissivity
sigma = 5.67e-8 # Stefan-Boltzmann
# initialize MHE and MPC
mhe = GEKKO(name='tclab-mhe')
mpc = GEKKO(name='tclab-mpc')
# create 2 models (MHE and MPC) in loop
for m in [mhe,mpc]:
# Adjustable Parameters
# heat transfer (W/m2-K)
m.U = m.FV(value=2.76,lb=1.0,ub=5.0)
# time constant (sec)
m.tau = m.FV(value=8.89,lb=5,ub=15)
# W / % heater
m.alpha1 = m.FV(value=0.005,lb=0.002,ub=0.010)
# W / % heater
m.alpha2 = m.FV(value=0.0026,lb=0.001,ub=0.005)
# degC
m.Ta = m.FV(value=22.8,lb=15.0,ub=25.0)
# Manipulated variables
m.Q1 = m.MV(value=0)
m.Q1.LOWER = 0.0
m.Q1.UPPER = 100.0
m.Q2 = m.MV(value=0)
m.Q2.LOWER = 0.0
m.Q2.UPPER = 100.0
# Controlled variables
m.TC1 = m.CV(value=T1[0])
m.TC2 = m.CV(value=T2[0])
# State variables
m.TH1 = m.SV(value=T1[0])
m.TH2 = m.SV(value=T2[0])
# Heater temperatures
m.T1i = m.Intermediate(m.TH1+273.15)
m.T2i = m.Intermediate(m.TH2+273.15)
m.TaK = m.Intermediate(m.Ta+273.15)
# Heat transfer between two heaters
m.Q_C12 = m.Intermediate(m.U*As*(m.T2i-m.T1i)) # Convective
m.Q_R12 = m.Intermediate(eps*sigma*As\
*(m.T2i**4-m.T1i**4)) # Radiative
# Semi-fundamental correlations (energy balances)
m.Equation(mass*Cp*m.TH1.dt() == m.U*A*(m.TaK-m.T1i) \
+ eps * sigma * A * (m.TaK**4 - m.T1i**4) \
+ m.Q_C12 + m.Q_R12 \
+ m.alpha1 * m.Q1)
m.Equation(mass*Cp*m.TH2.dt() == m.U*A*(m.TaK-m.T2i) \
+ eps * sigma * A * (m.TaK**4 - m.T2i**4) \
- m.Q_C12 - m.Q_R12 \
+ m.alpha2 * m.Q2)
# Empirical correlations (lag equations to emulate conduction)
m.Equation(m.tau * m.TC1.dt() == -m.TC1 + m.TH1)
m.Equation(m.tau * m.TC2.dt() == -m.TC2 + m.TH2)
##################################################################
# Configure MHE
# 120 second time horizon, steps of 4 sec
mhe.time = np.linspace(0,120,31)
#mhe.server = 'http://127.0.0.1' # solve locally
# FV tuning
# update FVs with estimator
mhe.U.STATUS = 1
mhe.tau.STATUS = 0
mhe.alpha1.STATUS = 0
mhe.alpha2.STATUS = 0
mhe.Ta.STATUS = 0
# FVs are predicted, not measured
mhe.U.FSTATUS = 0
mhe.tau.FSTATUS = 0
mhe.alpha1.FSTATUS = 0
mhe.alpha2.FSTATUS = 0
mhe.Ta.FSTATUS = 0
# MV tuning
mhe.Q1.STATUS = 0 # not optimized in estimator
mhe.Q1.FSTATUS = 0 # receive heater measurement
mhe.Q2.STATUS = 0 # not optimized in estimator
mhe.Q2.FSTATUS = 0 # receive heater measurement
# CV tuning
mhe.TC1.STATUS = 0 # not needed for estimator
mhe.TC1.FSTATUS = 1 # receive measurement
mhe.TC2.STATUS = 0 # not needed for estimator
mhe.TC2.FSTATUS = 1 # receive measurement
# Global Options
mhe.options.IMODE = 5 # MHE
mhe.options.EV_TYPE = 2 # Objective type
mhe.options.NODES = 3 # Collocation nodes
mhe.options.SOLVER = 3 # 1=APOPT, 3=IPOPT
##################################################################
# Configure MPC
# 60 second time horizon, 4 sec cycle time, non-uniform
mpc.time = [0,4,8,12,15,20,25,30,35,40,50,60,70,80,90]
#mpc.server = 'http://127.0.0.1' # solve locally
# FV tuning
# don't update FVs with controller
mpc.U.STATUS = 0
mpc.tau.STATUS = 0
mpc.alpha1.STATUS = 0
mpc.alpha2.STATUS = 0
mpc.Ta.STATUS = 0
# controller uses measured values from estimator
mpc.U.FSTATUS = 1
mpc.tau.FSTATUS = 1
mpc.alpha1.FSTATUS = 1
mpc.alpha2.FSTATUS = 1
mpc.Ta.FSTATUS = 1
# MV tuning
mpc.Q1.STATUS = 1 # use to control temperature
mpc.Q1.FSTATUS = 0 # no feedback measurement
mpc.Q1.DMAX = 20.0
mpc.Q1.DCOST = 0.1
mpc.Q1.COST = 0.0
mpc.Q1.DCOST = 0.0
mpc.Q2.STATUS = 1 # use to control temperature
mpc.Q2.FSTATUS = 0 # no feedback measurement
mpc.Q2.DMAX = 20.0
mpc.Q2.DCOST = 0.1
mpc.Q2.COST = 0.0
mpc.Q2.DCOST = 0.0
# CV tuning
mpc.TC1.STATUS = 1 # minimize error with setpoint range
mpc.TC1.FSTATUS = 1 # receive measurement
mpc.TC1.TR_INIT = 2 # reference trajectory
mpc.TC1.TAU = 10 # time constant for response
mpc.TC2.STATUS = 1 # minimize error with setpoint range
mpc.TC2.FSTATUS = 1 # receive measurement
mpc.TC2.TR_INIT = 2 # reference trajectory
mpc.TC2.TAU = 10 # time constant for response
# Global Options
mpc.options.IMODE = 6 # MPC
mpc.options.CV_TYPE = 1 # Objective type
mpc.options.NODES = 3 # Collocation nodes
mpc.options.SOLVER = 3 # 1=APOPT, 3=IPOPT
##################################################################
# Create plot
plt.figure()
plt.ion()
plt.show()
# Main Loop
start_time = time.time()
prev_time = start_time
try:
for i in range(1,loops):
# Sleep time
sleep_max = 4.0
sleep = sleep_max - (time.time() - prev_time)
if sleep>=0.01:
time.sleep(sleep)
else:
print('Warning: cycle time too fast')
print('Requested: ' + str(sleep_max))
print('Actual: ' + str(time.time() - prev_time))
time.sleep(0.01)
# Record time and change in time
t = time.time()
dt = t - prev_time
prev_time = t
tm[i] = t - start_time
# Read temperatures in degC
T1[i] = a.T1
T2[i] = a.T2
#################################
### Moving Horizon Estimation ###
#################################
# Measured values
mhe.Q1.MEAS = Q1s[i-1]
mhe.Q2.MEAS = Q2s[i-1]
# Temperatures from Arduino
mhe.TC1.MEAS = T1[i]
mhe.TC2.MEAS = T2[i]
# solve MHE
mhe.solve(disp=False)
# Parameters from MHE to MPC (if successful)
if (mhe.options.APPSTATUS==1):
# FVs
mpc.U.MEAS = mhe.U.NEWVAL
mpc.tau.MEAS = mhe.tau.NEWVAL
mpc.alpha1.MEAS = mhe.alpha1.NEWVAL
mpc.alpha2.MEAS = mhe.alpha2.NEWVAL
mpc.Ta.MEAS = mhe.Ta.NEWVAL
# CVs
T1mhe[i] = mhe.TC1.MODEL
T2mhe[i] = mhe.TC2.MODEL
else:
print("MHE failed to solve, don't update parameters")
T1mhe[i] = np.nan
T2mhe[i] = np.nan
#################################
### Model Predictive Control ###
#################################
# Temperatures from Arduino
mpc.TC1.MEAS = T1[i]
mpc.TC2.MEAS = T2[i]
# input setpoint with deadband +/- DT
DT = 0.2
mpc.TC1.SPHI = Tsp1[i] + DT
mpc.TC1.SPLO = Tsp1[i] - DT
mpc.TC2.SPHI = Tsp2[i] + DT
mpc.TC2.SPLO = Tsp2[i] - DT
# solve MPC
mpc.solve(disp=False)
# test for successful solution
if (mpc.options.APPSTATUS==1):
# retrieve the first Q value
Q1s[i] = mpc.Q1.NEWVAL
Q2s[i] = mpc.Q2.NEWVAL
else:
# not successful, set heater to zero
print("MPC failed to solve, heaters off")
Q1s[i] = 0
Q2s[i] = 0
# Write output (0-100)
a.Q1(Q1s[i])
a.Q2(Q2s[i])
# Plot
plt.clf()
ax=plt.subplot(3,1,1)
ax.grid()
plt.plot(tm[0:i],T1[0:i],'ro',MarkerSize=3,label=r'$T_1$ Measured')
plt.plot(tm[0:i],Tsp1[0:i],'k:',LineWidth=2,label=r'$T_1$ Set Point')
plt.ylabel('Temperature (degC)')
plt.legend(loc='best')
ax=plt.subplot(3,1,2)
ax.grid()
plt.plot(tm[0:i],T2[0:i],'ro',MarkerSize=3,label=r'$T_2$ Measured')
plt.plot(tm[0:i],Tsp2[0:i],'k:',LineWidth=2,label=r'$T_2$ Set Point')
plt.ylabel('Temperature (degC)')
plt.legend(loc='best')
ax=plt.subplot(3,1,3)
ax.grid()
plt.plot(tm[0:i],Q1s[0:i],'r-',LineWidth=3,label=r'$Q_1$')
plt.plot(tm[0:i],Q2s[0:i],'b:',LineWidth=3,label=r'$Q_2$')
plt.ylabel('Heaters')
plt.xlabel('Time (sec)')
plt.legend(loc='best')
plt.draw()
plt.pause(0.05)
# Turn off heaters
a.Q1(0)
a.Q2(0)
print('Shutting down')
# Allow user to end loop with Ctrl-C
except KeyboardInterrupt:
# Disconnect from Arduino
a.Q1(0)
a.Q2(0)
print('Shutting down')
a.close()
# Make sure serial connection still closes when there's an error
except:
# Disconnect from Arduino
a.Q1(0)
a.Q2(0)
print('Error: Shutting down')
a.close()
raise
| apache-2.0 | -8,878,655,876,875,346,000 | 27.552941 | 78 | 0.529857 | false | 2.674474 | false | false | false |
ajragusa/OpenFlow-Flight-Recorder | www/webservices/fr.py | 1 | 7552 | #!/usr/bin/python
import os
import sys
import re
import struct
import pymongo
from flight_recorder.mongo import FlightRecorderFetcher
from bottle import hook, route, run, template, request, response
method_list = {}
def main():
#create a new flight recorder instance
fr = FlightRecorderFetcher()
@hook('after_request')
def enable_cors():
response.headers['Access-Control-Allow-Origin'] = '*'
# provides a consistent way to return data via webservices
def format_results( res, err=None ):
error_bool = True if(err) else False
return {
'results': res,
'error': error_bool,
'error_msg': err
}
# takes in a validator grabs the method_name and method_description keys
# to build a help message to print to the user when /help is executed
def create_method_help_obj( validators ):
global method_list
# grab our name and description and remove them from the validator dict
method_name = validators.pop('method_name', None)
method_description = validators.pop('method_description', None)
# make sure they are defined
if(method_name is None):
print "You must provide a method_name along with the validator for the /help method!"
sys.exit(1)
if(method_description is None):
print "You must provide a method_description along with the validator for the /help method!"
sys.exit(1)
method_list[method_name] = {}
method_list[method_name]['description'] = method_description
method_list[method_name]['parameters'] = []
for param in validators:
parameter = {}
validator = validators[param]
parameter['name'] = param
parameter['required'] = validator.get('required', False)
if(validator.get('pattern', False)):
parameter['pattern'] = validator.get('pattern')
if(validator.get('type', False)):
parameter['type'] = validator.get('type')
if(validator.get('checks', False)):
checks = validator.get('checks');
parameter['checks'] = []
for check in checks:
try:
parameter['checks'].append(check.__descr__)
except:
print "Must provide __descr__ for checks!"
sys.exit(1)
method_list[method_name]['parameters'].append(parameter)
method_list[method_name]['parameters'] = sorted( method_list[method_name]['parameters'], key=lambda k: k['name'])
return validators
# special decorator that takes in kwargs where the key is the parameter
# and the value is an object representing the validation it should do to the
# parameter
def validate_params( **kwargs ):
validators = create_method_help_obj(kwargs)
def validate_params_decorator(func):
def wrapper(*args, **kwargs):
validated_args = {}
for param in validators:
validator = validators[param]
checks = validator.get('checks', [])
default = validator.get('default', None)
value = request.params.get(param, default)
# check if the param was required
if(validator.get('required', False) and value == None ):
return format_results( None, "Parameter, {0}, is required".format(param) )
# only do further validation if it's not required
if(value != None):
# if the parameter needs to match a particular pattern make sure it does
if( validator.get('pattern', False) ):
pattern = validator.get('pattern')
regex = re.compile(pattern)
if(not regex.match(value) ):
return format_results( None, "Parameter, {0}, must match pattern, {1}".format(param, pattern) )
# if a type is set try to convert to the type otherwise send error
if( validator.get('type', False) ):
if( validator.get('type') == 'string' ):
try:
value = str(value)
except Exception as e:
return format_results( None, "Error converting {0} to string: {1}".format(value, e))
if( validator.get('type') == 'integer' ):
try:
value = int(value)
except Exception as e:
return format_results( None, "Error converting {0} to integer: {1}".format(value, e))
# if the param has any special check perform them now
for check in checks:
err, msg = check( value )
if(err):
return format_results( None, msg )
# if we've gotten here the param is good so add it to our validated params
validated_args[param] = value
return func(validated_args)
return wrapper
return validate_params_decorator
@route('/')
@route('/help')
@validate_params(
method_name = 'help',
method_description = 'Returns information about what methods are available and their parameters if a method is specified',
method = {}
)
def help(params):
method = params.get('method')
methods = None
if(method is not None):
try:
methods = [method_list.get(method)]
methods[0]['name'] = method
except:
return format_results( None, "Method, {0}, does not exists".format(method) )
else:
methods = method_list.keys()
methods.sort()
return format_results( methods )
@route('/streams')
@validate_params(
method_name = 'streams',
method_description = 'returns a list of streams matching specified params',
name = {},
addr = {'required': False, 'type': 'string'},
port = {'required': False, 'type': 'integer'},
start = {'required': False, 'type': 'integer'},
stop = {'required': False, 'type': 'integer'},
)
def get_streams(params):
results = fr.get_streams( params )
if(results is not None):
return format_results( results )
else:
return format_results( [], fr.get_error() )
@route('/messages')
@validate_params(
method_name='messages',
method_description = 'returns a list of messages matching the specified params',
name = {},
stream_id = {'required': False, 'type': 'string'},
start = {'required': False, 'type': 'integer'},
stop = {'required': False, 'type': 'integer'},
)
def get_messages(params):
results = fr.get_messages( params )
if(results is not None):
return format_results( results )
else:
return format_results([], fr.get_error())
run(host="0.0.0.0", port=8080)
main()
| apache-2.0 | -8,743,963,651,425,706,000 | 37.927835 | 130 | 0.529264 | false | 4.797967 | false | false | false |
mantidproject/mantid | Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/IqtFitMultiple.py | 3 | 14331 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from mantid import logger, AlgorithmFactory
from mantid.api import *
from mantid.kernel import *
import mantid.simpleapi as ms
class IqtFitMultiple(PythonAlgorithm):
_input_ws = None
_function = None
_fit_type = None
_start_x = None
_end_x = None
_spec_min = None
_spec_max = None
_intensities_constrained = None
_minimizer = None
_max_iterations = None
_result_name = None
_parameter_name = None
_fit_group_name = None
def category(self):
return "Workflow\\MIDAS"
def summary(self):
return r"Fits an \*\_iqt file generated by I(Q,t)."
def PyInit(self):
self.declareProperty(MatrixWorkspaceProperty('InputWorkspace', '', direction=Direction.Input),
doc='The _iqt.nxs InputWorkspace used by the algorithm')
self.declareProperty(FunctionProperty(name='Function',direction=Direction.InOut),
doc='The function to use in fitting')
self.declareProperty(name='FitType', defaultValue='',
doc='The type of fit being carried out')
self.declareProperty(name='StartX', defaultValue=0.0,
validator=FloatBoundedValidator(0.0),
doc="The first value for X")
self.declareProperty(name='EndX', defaultValue=0.2,
validator=FloatBoundedValidator(0.0),
doc="The last value for X")
self.declareProperty(name='SpecMin', defaultValue=0,
validator=IntBoundedValidator(0),
doc='Minimum spectra in the workspace to fit')
self.declareProperty(name='SpecMax', defaultValue=1,
validator=IntBoundedValidator(0),
doc='Maximum spectra in the workspace to fit')
self.declareProperty(name='Minimizer', defaultValue='Levenberg-Marquardt',
doc='The minimizer to use in fitting')
self.declareProperty(name="MaxIterations", defaultValue=500,
validator=IntBoundedValidator(0),
doc="The Maximum number of iterations for the fit")
self.declareProperty(name='ConstrainIntensities', defaultValue=False,
doc="If the Intensities should be constrained during the fit")
self.declareProperty(name='ExtractMembers', defaultValue=False,
doc="If true, then each member of the fit will be extracted, into their "
"own workspace. These workspaces will have a histogram for each spectrum "
"(Q-value) and will be grouped.")
self.declareProperty(MatrixWorkspaceProperty('OutputResultWorkspace', '', direction=Direction.Output),
doc='The output workspace containing the results of the fit data')
self.declareProperty(ITableWorkspaceProperty('OutputParameterWorkspace', '', direction=Direction.Output),
doc='The output workspace containing the parameters for each fit')
self.declareProperty(WorkspaceGroupProperty('OutputWorkspaceGroup', '', direction=Direction.Output),
doc='The OutputWorkspace group Data, Calc and Diff, values for the fit of each spectra')
def validateInputs(self):
self._get_properties()
issues = dict()
maximum_possible_spectra = self._input_ws.getNumberHistograms()
maximum_possible_x = self._input_ws.readX(0)[self._input_ws.blocksize() - 1]
# Validate SpecMin/Max
if self._spec_max > maximum_possible_spectra:
issues['SpecMax'] = ('SpecMax must be smaller or equal to the number of '
'spectra in the input workspace, %d' % maximum_possible_spectra)
if self._spec_min < 0:
issues['SpecMin'] = 'SpecMin can not be less than 0'
if self._spec_max < self._spec_min:
issues['SpecMax'] = 'SpecMax must be more than or equal to SpecMin'
# Validate Start/EndX
if self._end_x > maximum_possible_x:
issues['EndX'] = ('EndX must be less than the highest x value in the workspace, %d' % maximum_possible_x)
if self._start_x < 0:
issues['StartX'] = 'StartX can not be less than 0'
if self._start_x > self._end_x:
issues['EndX'] = 'EndX must be more than StartX'
return issues
def _get_properties(self):
self._input_ws = self.getProperty('InputWorkspace').value
self._function = self.getProperty('Function').value
self._fit_type = self.getProperty('FitType').value
self._start_x = self.getProperty('StartX').value
self._end_x = self.getProperty('EndX').value
self._spec_min = self.getProperty('SpecMin').value
self._spec_max = self.getProperty('SpecMax').value
self._intensities_constrained = self.getProperty('ConstrainIntensities').value
self._do_extract_members = self.getProperty('ExtractMembers').value
self._minimizer = self.getProperty('Minimizer').value
self._max_iterations = self.getProperty('MaxIterations').value
self._result_name = self.getPropertyValue('OutputResultWorkspace')
self._parameter_name = self.getPropertyValue('OutputParameterWorkspace')
self._fit_group_name = self.getPropertyValue('OutputWorkspaceGroup')
def PyExec(self):
from IndirectCommon import (convertToElasticQ,
transposeFitParametersTable)
setup_prog = Progress(self, start=0.0, end=0.1, nreports=4)
setup_prog.report('generating output name')
output_workspace = self._fit_group_name
# check if the naming convention used is already correct
chopped_name = self._fit_group_name.split('_')
if 'WORKSPACE' in chopped_name[-1].upper():
output_workspace = '_'.join(chopped_name[:-1])
option = self._fit_type[:-2]
logger.information('Option: ' + option)
logger.information('Function: ' + str(self._function))
setup_prog.report('Cropping workspace')
# prepare input workspace for fitting
tmp_fit_workspace = "__Iqtfit_fit_ws"
if self._spec_max is None:
crop_alg = self.createChildAlgorithm("CropWorkspace", enableLogging=False)
crop_alg.setProperty("InputWorkspace", self._input_ws)
crop_alg.setProperty("OutputWorkspace", tmp_fit_workspace)
crop_alg.setProperty("XMin", self._start_x)
crop_alg.setProperty("XMax", self._end_x)
crop_alg.setProperty("StartWorkspaceIndex", self._spec_min)
crop_alg.execute()
else:
crop_alg = self.createChildAlgorithm("CropWorkspace", enableLogging=False)
crop_alg.setProperty("InputWorkspace", self._input_ws)
crop_alg.setProperty("OutputWorkspace", tmp_fit_workspace)
crop_alg.setProperty("XMin", self._start_x)
crop_alg.setProperty("XMax", self._end_x)
crop_alg.setProperty("StartWorkspaceIndex", self._spec_min)
crop_alg.setProperty("EndWorkspaceIndex", self._spec_max)
crop_alg.execute()
setup_prog.report('Converting to Histogram')
convert_to_hist_alg = self.createChildAlgorithm("ConvertToHistogram", enableLogging=False)
convert_to_hist_alg.setProperty("InputWorkspace", crop_alg.getProperty("OutputWorkspace").value)
convert_to_hist_alg.setProperty("OutputWorkspace", tmp_fit_workspace)
convert_to_hist_alg.execute()
mtd.addOrReplace(tmp_fit_workspace, convert_to_hist_alg.getProperty("OutputWorkspace").value)
setup_prog.report('Convert to Elastic Q')
convertToElasticQ(tmp_fit_workspace)
# fit multi-domain function to workspace
fit_prog = Progress(self, start=0.1, end=0.8, nreports=2)
multi_domain_func, kwargs = _create_multi_domain_func(self._function, tmp_fit_workspace)
fit_prog.report('Fitting...')
ms.Fit(Function=multi_domain_func,
InputWorkspace=tmp_fit_workspace,
WorkspaceIndex=0,
Output=output_workspace,
CreateOutput=True,
Minimizer=self._minimizer,
MaxIterations=self._max_iterations,
OutputCompositeMembers=self._do_extract_members,
**kwargs)
fit_prog.report('Fitting complete')
conclusion_prog = Progress(self, start=0.8, end=1.0, nreports=5)
conclusion_prog.report('Renaming workspaces')
# rename workspaces to match user input
rename_alg = self.createChildAlgorithm("RenameWorkspace", enableLogging=False)
if output_workspace + "_Workspaces" != self._fit_group_name:
rename_alg.setProperty("InputWorkspace", output_workspace + "_Workspaces")
rename_alg.setProperty("OutputWorkspace", self._fit_group_name)
rename_alg.execute()
if output_workspace + "_Parameters" != self._parameter_name:
rename_alg.setProperty("InputWorkspace", output_workspace + "_Parameters")
rename_alg.setProperty("OutputWorkspace", self._parameter_name)
rename_alg.execute()
conclusion_prog.report('Transposing parameter table')
transposeFitParametersTable(self._parameter_name)
# set first column of parameter table to be axis values
x_axis = mtd[tmp_fit_workspace].getAxis(1)
axis_values = x_axis.extractValues()
for i, value in enumerate(axis_values):
mtd[self._parameter_name].setCell('axis-1', i, value)
# convert parameters to matrix workspace
parameter_names = 'A0,Height,Lifetime,Stretching'
conclusion_prog.report('Processing indirect fit parameters')
pifp_alg = self.createChildAlgorithm("ProcessIndirectFitParameters")
pifp_alg.setProperty("InputWorkspace", self._parameter_name)
pifp_alg.setProperty("ColumnX", "axis-1")
pifp_alg.setProperty("XAxisUnit", "MomentumTransfer")
pifp_alg.setProperty("ParameterNames", parameter_names)
pifp_alg.setProperty("OutputWorkspace", self._result_name)
pifp_alg.execute()
result_workspace = pifp_alg.getProperty("OutputWorkspace").value
mtd.addOrReplace(self._result_name, result_workspace)
# create and add sample logs
sample_logs = {'start_x': self._start_x, 'end_x': self._end_x, 'fit_type': self._fit_type[:-2],
'intensities_constrained': self._intensities_constrained, 'beta_constrained': True}
conclusion_prog.report('Copying sample logs')
copy_log_alg = self.createChildAlgorithm("CopyLogs", enableLogging=False)
copy_log_alg.setProperty("InputWorkspace", self._input_ws)
copy_log_alg.setProperty("OutputWorkspace", result_workspace)
copy_log_alg.execute()
copy_log_alg.setProperty("InputWorkspace", self._input_ws)
copy_log_alg.setProperty("OutputWorkspace", self._fit_group_name)
copy_log_alg.execute()
log_names = [item for item in sample_logs]
log_values = [sample_logs[item] for item in sample_logs]
conclusion_prog.report('Adding sample logs')
add_sample_log_multi = self.createChildAlgorithm("AddSampleLogMultiple", enableLogging=False)
add_sample_log_multi.setProperty("Workspace", result_workspace.name())
add_sample_log_multi.setProperty("LogNames", log_names)
add_sample_log_multi.setProperty("LogValues", log_values)
add_sample_log_multi.execute()
add_sample_log_multi.setProperty("Workspace", self._fit_group_name)
add_sample_log_multi.setProperty("LogNames", log_names)
add_sample_log_multi.setProperty("LogValues", log_values)
add_sample_log_multi.execute()
delete_alg = self.createChildAlgorithm("DeleteWorkspace", enableLogging=False)
delete_alg.setProperty("Workspace", tmp_fit_workspace)
delete_alg.execute()
if self._do_extract_members:
ms.ExtractQENSMembers(InputWorkspace=self._input_ws,
ResultWorkspace=self._fit_group_name,
OutputWorkspace=self._fit_group_name.rsplit('_', 1)[0] + "_Members")
self.setProperty('OutputResultWorkspace', result_workspace)
self.setProperty('OutputParameterWorkspace', self._parameter_name)
self.setProperty('OutputWorkspaceGroup', self._fit_group_name)
conclusion_prog.report('Algorithm complete')
def _create_multi_domain_func(function, input_ws):
multi = 'composite=MultiDomainFunction,NumDeriv=true;'
comp = '(composite=CompositeFunction,NumDeriv=true,$domains=i;' + str(function) + ');'
stretched_indices = _find_indices_of_stretched_exponentials(function)
if not stretched_indices:
logger.warning("Stretched Exponential not found in function, tie-creation skipped.")
return function
ties = []
kwargs = {}
num_spectra = mtd[input_ws].getNumberHistograms()
for i in range(0, num_spectra):
multi += comp
kwargs['WorkspaceIndex_' + str(i)] = i
if i > 0:
kwargs['InputWorkspace_' + str(i)] = input_ws
# tie beta for every spectrum
for stretched_index in stretched_indices:
ties.append('f{0}.f{1}.Stretching=f0.f{1}.Stretching'.format(i, stretched_index))
ties = ','.join(ties)
multi += 'ties=(' + ties + ')'
return multi, kwargs
def _find_indices_of_stretched_exponentials(composite):
indices = []
for index in range(0, len(composite)):
if composite.getFunction(index).name() == "StretchExp":
indices.append(index)
return indices
AlgorithmFactory.subscribe(IqtFitMultiple)
| gpl-3.0 | 4,073,173,575,260,572,700 | 46.29703 | 117 | 0.636452 | false | 4.105127 | false | false | false |
freevoid/django-datafilters | datafilters/views.py | 1 | 1823 | from django.views.generic.list import MultipleObjectMixin
__all__ = ('FilterFormMixin',)
class FilterFormMixin(MultipleObjectMixin):
"""
Mixin that adds filtering behaviour for Class Based Views.
"""
filter_form_cls = None
use_filter_chaining = False
context_filterform_name = 'filterform'
def get_filter(self):
"""
Get FilterForm instance.
"""
return self.filter_form_cls(self.request.GET,
runtime_context=self.get_runtime_context(),
use_filter_chaining=self.use_filter_chaining)
def get_queryset(self):
"""
Return queryset with filtering applied (if filter form passes
validation).
"""
qs = super(FilterFormMixin, self).get_queryset()
filter_form = self.get_filter()
if filter_form.is_valid():
qs = filter_form.filter(qs).distinct()
return qs
def get_context_data(self, **kwargs):
"""
Add filter form to the context.
TODO: Currently we construct the filter form object twice - in
get_queryset and here, in get_context_data. Will need to figure out a
good way to eliminate extra initialization.
"""
context = super(FilterFormMixin, self).get_context_data(**kwargs)
context[self.context_filterform_name] = self.get_filter()
return context
def get_runtime_context(self):
"""
Get context for filter form to allow passing runtime information,
such as user, cookies, etc.
Method might be overriden by implementation and context returned by
this method will be accessible in to_lookup() method implementation
of FilterSpec.
"""
return {'user': self.request.user}
| mit | 2,275,489,432,341,574,700 | 32.759259 | 81 | 0.61492 | false | 4.490148 | false | false | false |
arruda/pyfuzzy | fuzzy/norm/HamacherSum.py | 1 | 1242 | # -*- coding: iso-8859-1 -*-
#
# Copyright (C) 2009 Rene Liebscher
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, see <http://www.gnu.org/licenses/>.
#
__revision__ = "$Id: HamacherSum.py,v 1.4 2009/08/07 07:19:19 rliebscher Exp $"
from fuzzy.norm.Norm import Norm,NormException
class HamacherSum(Norm):
def __init__(self):
Norm.__init__(self,Norm.S_NORM)
def __call__(self,*args):
if len(args) != 2:
raise NormException("%s is supported only for 2 parameters" % self.__class__.__name__ )
x = float(args[0])
y = float(args[1])
if x*y == 1.:
return 1.
return (x+y-2.0*x*y)/(1.0-x*y) | lgpl-3.0 | 6,025,396,698,692,897,000 | 35.558824 | 99 | 0.665056 | false | 3.488764 | false | false | false |
mganeva/mantid | scripts/SANS/sans/gui_logic/presenter/run_tab_presenter.py | 1 | 57627 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
""" The run tab presenter.
This presenter is essentially the brain of the reduction gui. It controls other presenters and is mainly responsible
for presenting and generating the reduction settings.
"""
from __future__ import (absolute_import, division, print_function)
import copy
import csv
import os
import sys
import time
import traceback
from mantid.api import (FileFinder)
from mantid.kernel import Logger, ConfigService
from sans.command_interface.batch_csv_file_parser import BatchCsvParser
from sans.common.constants import ALL_PERIODS
from sans.common.enums import (BatchReductionEntry, RangeStepType, SampleShape, FitType, RowState, SANSInstrument)
from sans.gui_logic.gui_common import (get_reduction_mode_strings_for_gui, get_string_for_gui_from_instrument,
add_dir_to_datasearch, remove_dir_from_datasearch)
from sans.gui_logic.models.batch_process_runner import BatchProcessRunner
from sans.gui_logic.models.beam_centre_model import BeamCentreModel
from sans.gui_logic.models.create_state import create_states
from sans.gui_logic.models.diagnostics_page_model import run_integral, create_state
from sans.gui_logic.models.state_gui_model import StateGuiModel
from sans.gui_logic.models.table_model import TableModel, TableIndexModel
from sans.gui_logic.presenter.add_runs_presenter import OutputDirectoryObserver as SaveDirectoryObserver
from sans.gui_logic.presenter.beam_centre_presenter import BeamCentrePresenter
from sans.gui_logic.presenter.diagnostic_presenter import DiagnosticsPagePresenter
from sans.gui_logic.presenter.masking_table_presenter import (MaskingTablePresenter)
from sans.gui_logic.presenter.save_other_presenter import SaveOtherPresenter
from sans.gui_logic.presenter.settings_diagnostic_presenter import (SettingsDiagnosticPresenter)
from sans.sans_batch import SANSCentreFinder
from sans.user_file.user_file_reader import UserFileReader
from ui.sans_isis import SANSSaveOtherWindow
from ui.sans_isis.sans_data_processor_gui import SANSDataProcessorGui
from ui.sans_isis.work_handler import WorkHandler
from qtpy import PYQT4
IN_MANTIDPLOT = False
if PYQT4:
try:
from mantidplot import graph, newGraph
IN_MANTIDPLOT = True
except ImportError:
pass
else:
from mantidqt.plotting.functions import get_plot_fig
row_state_to_colour_mapping = {RowState.Unprocessed: '#FFFFFF', RowState.Processed: '#d0f4d0',
RowState.Error: '#accbff'}
def log_times(func):
"""
Generic decorator to time the execution of the function and
print it to the logger.
"""
def run(*args, **kwargs):
t0 = time.time()
result = func(*args, **kwargs)
t1 = time.time()
time_taken = t1 - t0
# args[0] is the self parameter
args[0].sans_logger.information("The generation of all states took {}s".format(time_taken))
return result
return run
class RunTabPresenter(object):
class ConcreteRunTabListener(SANSDataProcessorGui.RunTabListener):
def __init__(self, presenter):
super(RunTabPresenter.ConcreteRunTabListener, self).__init__()
self._presenter = presenter
def on_user_file_load(self):
self._presenter.on_user_file_load()
def on_mask_file_add(self):
self._presenter.on_mask_file_add()
def on_batch_file_load(self):
self._presenter.on_batch_file_load()
def on_process_selected_clicked(self):
self._presenter.on_process_selected_clicked()
def on_process_all_clicked(self):
self._presenter.on_process_all_clicked()
def on_load_clicked(self):
self._presenter.on_load_clicked()
def on_export_table_clicked(self):
self._presenter.on_export_table_clicked()
def on_multi_period_selection(self, show_periods):
self._presenter.on_multiperiod_changed(show_periods)
def on_data_changed(self, row, column, new_value, old_value):
self._presenter.on_data_changed(row, column, new_value, old_value)
def on_manage_directories(self):
self._presenter.on_manage_directories()
def on_instrument_changed(self):
self._presenter.on_instrument_changed()
def on_row_inserted(self, index, row):
self._presenter.on_row_inserted(index, row)
def on_rows_removed(self, rows):
self._presenter.on_rows_removed(rows)
def on_copy_rows_requested(self):
self._presenter.on_copy_rows_requested()
def on_paste_rows_requested(self):
self._presenter.on_paste_rows_requested()
def on_insert_row(self):
self._presenter.on_insert_row()
def on_erase_rows(self):
self._presenter.on_erase_rows()
def on_cut_rows(self):
self._presenter.on_cut_rows_requested()
def on_save_other(self):
self._presenter.on_save_other()
def on_sample_geometry_selection(self, show_geometry):
self._presenter.on_sample_geometry_view_changed(show_geometry)
def on_compatibility_unchecked(self):
self._presenter.on_compatibility_unchecked()
class ProcessListener(WorkHandler.WorkListener):
def __init__(self, presenter):
super(RunTabPresenter.ProcessListener, self).__init__()
self._presenter = presenter
def on_processing_finished(self, result):
self._presenter.on_processing_finished(result)
def on_processing_error(self, error):
self._presenter.on_processing_error(error)
def __init__(self, facility, view=None):
super(RunTabPresenter, self).__init__()
self._facility = facility
# Logger
self.sans_logger = Logger("SANS")
# Name of graph to output to
self.output_graph = 'SANS-Latest'
# For matplotlib continuous plotting
self.output_fig = None
self.progress = 0
# Models that are being used by the presenter
self._state_model = None
self._table_model = TableModel()
self._table_model.subscribe_to_model_changes(self)
# Presenter needs to have a handle on the view since it delegates it
self._view = None
self.set_view(view)
self._processing = False
self.work_handler = WorkHandler()
self.batch_process_runner = BatchProcessRunner(self.notify_progress,
self.on_processing_finished,
self.on_processing_error)
# File information for the first input
self._file_information = None
self._clipboard = []
# Settings diagnostic tab presenter
self._settings_diagnostic_tab_presenter = SettingsDiagnosticPresenter(self)
# Masking table presenter
self._masking_table_presenter = MaskingTablePresenter(self)
self._table_model.subscribe_to_model_changes(self._masking_table_presenter)
# Beam centre presenter
self._beam_centre_presenter = BeamCentrePresenter(self, WorkHandler, BeamCentreModel,
SANSCentreFinder)
self._table_model.subscribe_to_model_changes(self._beam_centre_presenter)
# Workspace Diagnostic page presenter
self._workspace_diagnostic_presenter = DiagnosticsPagePresenter(self, WorkHandler,
run_integral, create_state,
self._facility)
# Check save dir for display
self._save_directory_observer = \
SaveDirectoryObserver(self._handle_output_directory_changed)
def _default_gui_setup(self):
"""
Provides a default setup of the GUI. This is important for the initial start up, when the view is being set.
"""
# Set the possible reduction modes
reduction_mode_list = get_reduction_mode_strings_for_gui()
self._view.set_reduction_modes(reduction_mode_list)
# Set the step type options for wavelength
range_step_types = [RangeStepType.to_string(RangeStepType.Lin),
RangeStepType.to_string(RangeStepType.Log),
RangeStepType.to_string(RangeStepType.RangeLog),
RangeStepType.to_string(RangeStepType.RangeLin)]
self._view.wavelength_step_type = range_step_types
# Set the geometry options. This needs to include the option to read the sample shape from file.
sample_shape = ["Read from file",
SampleShape.Cylinder,
SampleShape.FlatPlate,
SampleShape.Disc]
self._view.sample_shape = sample_shape
# Set the q range
self._view.q_1d_step_type = [RangeStepType.to_string(RangeStepType.Lin),
RangeStepType.to_string(RangeStepType.Log)]
self._view.q_xy_step_type = [RangeStepType.to_string(RangeStepType.Lin),
RangeStepType.to_string(RangeStepType.Log)]
# Set the fit options
fit_types = [FitType.to_string(FitType.Linear),
FitType.to_string(FitType.Logarithmic),
FitType.to_string(FitType.Polynomial)]
self._view.transmission_sample_fit_type = fit_types
self._view.transmission_can_fit_type = fit_types
def _handle_output_directory_changed(self, new_directory):
"""
Update the gui to display the new save location for workspaces
:param new_directory: string. Current save directory for files
:return:
"""
self._view.set_out_file_directory(new_directory)
# ------------------------------------------------------------------------------------------------------------------
# Table + Actions
# ------------------------------------------------------------------------------------------------------------------
def set_view(self, view):
"""
Sets the view
:param view: the view is the SANSDataProcessorGui. The presenter needs to access some of the API
"""
if view is not None:
self._view = view
# Add a listener to the view
listener = RunTabPresenter.ConcreteRunTabListener(self)
self._view.add_listener(listener)
# Default gui setup
self._default_gui_setup()
self._view.disable_process_buttons()
# Set appropriate view for the state diagnostic tab presenter
self._settings_diagnostic_tab_presenter.set_view(self._view.settings_diagnostic_tab)
# Set appropriate view for the masking table presenter
self._masking_table_presenter.set_view(self._view.masking_table)
# Set the appropriate view for the beam centre presenter
self._beam_centre_presenter.set_view(self._view.beam_centre)
# Set the appropriate view for the diagnostic page
self._workspace_diagnostic_presenter.set_view(self._view.diagnostic_page,
self._view.instrument)
self._view.setup_layout()
self._view.set_out_file_directory(ConfigService.Instance().getString("defaultsave.directory"))
self._view.set_out_default_user_file()
self._view.set_out_default_output_mode()
self._view.set_out_default_save_can()
self._view.set_hinting_line_edit_for_column(
self._table_model.column_name_converter.index('sample_shape'),
self._table_model.get_sample_shape_hint_strategy())
self._view.set_hinting_line_edit_for_column(
self._table_model.column_name_converter.index('options_column_model'),
self._table_model.get_options_hint_strategy())
def on_user_file_load(self):
"""
Loads the user file. Populates the models and the view.
"""
error_msg = "Loading of the user file failed"
try:
# 1. Get the user file path from the view
user_file_path = self._view.get_user_file_path()
if not user_file_path:
return
# 2. Get the full file path
user_file_path = FileFinder.getFullPath(user_file_path)
if not os.path.exists(user_file_path):
raise RuntimeError(
"The user path {} does not exist. Make sure a valid user file path"
" has been specified.".format(user_file_path))
except RuntimeError as path_error:
# This exception block runs if user file does not exist
self._on_user_file_load_failure(path_error, error_msg + " when finding file.")
else:
try:
self._table_model.user_file = user_file_path
# Clear out the current view
self._view.reset_all_fields_to_default()
# 3. Read and parse the user file
user_file_reader = UserFileReader(user_file_path)
user_file_items = user_file_reader.read_user_file()
except (RuntimeError, ValueError) as e:
# It is in this exception block that loading fails if the file is invalid (e.g. a csv)
self._on_user_file_load_failure(e, error_msg + " when reading file.", use_error_name=True)
else:
try:
# 4. Populate the model
self._state_model = StateGuiModel(user_file_items)
# 5. Update the views.
self._update_view_from_state_model()
self._beam_centre_presenter.update_centre_positions(self._state_model)
self._beam_centre_presenter.on_update_rows()
self._masking_table_presenter.on_update_rows()
self._workspace_diagnostic_presenter.on_user_file_load(user_file_path)
# 6. Warning if user file did not contain a recognised instrument
if self._view.instrument == SANSInstrument.NoInstrument:
raise RuntimeError("User file did not contain a SANS Instrument.")
except RuntimeError as instrument_e:
# This exception block runs if the user file does not contain an parsable instrument
self._on_user_file_load_failure(instrument_e, error_msg + " when reading instrument.")
except Exception as other_error:
# If we don't catch all exceptions, SANS can fail to open if last loaded
# user file contains an error that would not otherwise be caught
traceback.print_exc()
self._on_user_file_load_failure(other_error, "Unknown error in loading user file.",
use_error_name=True)
def _on_user_file_load_failure(self, e, message, use_error_name=False):
self._setup_instrument_specific_settings(SANSInstrument.NoInstrument)
self._view.instrument = SANSInstrument.NoInstrument
self._view.on_user_file_load_failure()
self.display_errors(e, message, use_error_name)
def on_batch_file_load(self):
"""
Loads a batch file and populates the batch table based on that.
"""
try:
# 1. Get the batch file from the view
batch_file_path = self._view.get_batch_file_path()
if not batch_file_path:
return
datasearch_dirs = ConfigService["datasearch.directories"]
batch_file_directory, datasearch_dirs = add_dir_to_datasearch(batch_file_path, datasearch_dirs)
ConfigService["datasearch.directories"] = datasearch_dirs
if not os.path.exists(batch_file_path):
raise RuntimeError(
"The batch file path {} does not exist. Make sure a valid batch file path"
" has been specified.".format(batch_file_path))
self._table_model.batch_file = batch_file_path
# 2. Read the batch file
batch_file_parser = BatchCsvParser(batch_file_path)
parsed_rows = batch_file_parser.parse_batch_file()
# 3. Populate the table
self._table_model.clear_table_entries()
for index, row in enumerate(parsed_rows):
self._add_row_to_table_model(row, index)
self._table_model.remove_table_entries([len(parsed_rows)])
except RuntimeError as e:
if batch_file_directory:
# Remove added directory from datasearch.directories
ConfigService["datasearch.directories"] = remove_dir_from_datasearch(batch_file_directory, datasearch_dirs)
self.sans_logger.error("Loading of the batch file failed. {}".format(str(e)))
self.display_warning_box('Warning', 'Loading of the batch file failed', str(e))
def _add_row_to_table_model(self, row, index):
"""
Adds a row to the table
"""
def get_string_entry(_tag, _row):
_element = ""
if _tag in _row:
_element = _row[_tag]
return _element
def get_string_period(_tag):
return "" if _tag == ALL_PERIODS else str(_tag)
# 1. Pull out the entries
sample_scatter = get_string_entry(BatchReductionEntry.SampleScatter, row)
sample_scatter_period = get_string_period(
get_string_entry(BatchReductionEntry.SampleScatterPeriod, row))
sample_transmission = get_string_entry(BatchReductionEntry.SampleTransmission, row)
sample_transmission_period = \
get_string_period(get_string_entry(BatchReductionEntry.SampleTransmissionPeriod, row))
sample_direct = get_string_entry(BatchReductionEntry.SampleDirect, row)
sample_direct_period = get_string_period(
get_string_entry(BatchReductionEntry.SampleDirectPeriod, row))
can_scatter = get_string_entry(BatchReductionEntry.CanScatter, row)
can_scatter_period = get_string_period(
get_string_entry(BatchReductionEntry.CanScatterPeriod, row))
can_transmission = get_string_entry(BatchReductionEntry.CanTransmission, row)
can_transmission_period = get_string_period(
get_string_entry(BatchReductionEntry.CanScatterPeriod, row))
can_direct = get_string_entry(BatchReductionEntry.CanDirect, row)
can_direct_period = get_string_period(
get_string_entry(BatchReductionEntry.CanDirectPeriod, row))
output_name = get_string_entry(BatchReductionEntry.Output, row)
user_file = get_string_entry(BatchReductionEntry.UserFile, row)
row_entry = [sample_scatter, sample_scatter_period, sample_transmission,
sample_transmission_period,
sample_direct, sample_direct_period, can_scatter, can_scatter_period,
can_transmission, can_transmission_period,
can_direct, can_direct_period,
output_name, user_file, '', '']
table_index_model = TableIndexModel(*row_entry)
self._table_model.add_table_entry(index, table_index_model)
def on_update_rows(self):
self.update_view_from_table_model()
def update_view_from_table_model(self):
self._view.clear_table()
self._view.hide_period_columns()
for row_index, row in enumerate(self._table_model._table_entries):
row_entry = [str(x) for x in row.to_list()]
self._view.add_row(row_entry)
self._view.change_row_color(row_state_to_colour_mapping[row.row_state], row_index + 1)
self._view.set_row_tooltip(row.tool_tip, row_index + 1)
if row.isMultiPeriod():
self._view.show_period_columns()
self._view.remove_rows([0])
self._view.clear_selection()
def on_data_changed(self, row, column, new_value, old_value):
self._table_model.update_table_entry(row, column, new_value)
self._view.change_row_color(row_state_to_colour_mapping[RowState.Unprocessed], row)
self._view.set_row_tooltip('', row)
self._beam_centre_presenter.on_update_rows()
self._masking_table_presenter.on_update_rows()
def on_instrument_changed(self):
self._setup_instrument_specific_settings()
# ----------------------------------------------------------------------------------------------
# Processing
# ----------------------------------------------------------------------------------------------
def _handle_get_states(self, rows):
"""
Return the states for the supplied rows, calling on_processing_error for any errors
which occur.
"""
states, errors = self.get_states(row_index=rows)
for row, error in errors.items():
self.on_processing_error(row, error)
return states
def _plot_graph(self):
"""
Plot a graph if continuous output specified.
"""
if self._view.plot_results:
if IN_MANTIDPLOT:
if not graph(self.output_graph):
newGraph(self.output_graph)
elif not PYQT4:
ax_properties = {'yscale': 'log',
'xscale': 'log'}
fig, _ = get_plot_fig(ax_properties=ax_properties, window_title=self.output_graph)
fig.show()
self.output_fig = fig
def _set_progress_bar_min_max(self, min, max):
"""
The progress of the progress bar is given by min / max
:param min: Current value of the progress bar.
:param max: The value at which the progress bar is full
"""
setattr(self._view, 'progress_bar_value', min)
setattr(self._view, 'progress_bar_maximum', max)
def _process_rows(self, rows):
"""
Processes a list of rows. Any errors cause the row to be coloured red.
"""
try:
for row in rows:
self._table_model.reset_row_state(row)
self.update_view_from_table_model()
self._view.disable_buttons()
self._processing = True
self.sans_logger.information("Starting processing of batch table.")
states = self._handle_get_states(rows)
if not states:
raise Exception("No states found")
self._plot_graph()
self.progress = 0
self._set_progress_bar_min_max(self.progress, len(states))
save_can = self._view.save_can
# MantidPlot and Workbench have different approaches to plotting
output_graph = self.output_graph if PYQT4 else self.output_fig
self.batch_process_runner.process_states(states,
self._view.use_optimizations,
self._view.output_mode,
self._view.plot_results,
output_graph,
save_can)
except Exception as e:
self.on_processing_finished(None)
self.sans_logger.error("Process halted due to: {}".format(str(e)))
self.display_warning_box('Warning', 'Process halted', str(e))
def on_process_all_clicked(self):
"""
Process all entries in the table, regardless of selection.
"""
all_rows = range(self._table_model.get_number_of_rows())
all_rows = self._table_model.get_non_empty_rows(all_rows)
if all_rows:
self._process_rows(all_rows)
def on_process_selected_clicked(self):
"""
Process selected table entries.
"""
selected_rows = self._view.get_selected_rows()
selected_rows = self._table_model.get_non_empty_rows(selected_rows)
if selected_rows:
self._process_rows(selected_rows)
def on_processing_error(self, row, error_msg):
"""
An error occurs while processing the row with index row, error_msg is displayed as a
tooltip on the row.
"""
self.increment_progress()
self._table_model.set_row_to_error(row, error_msg)
self.update_view_from_table_model()
def on_processing_finished(self, result):
self._view.enable_buttons()
self._processing = False
def on_load_clicked(self):
try:
self._view.disable_buttons()
self._processing = True
self.sans_logger.information("Starting load of batch table.")
selected_rows = self._get_selected_rows()
selected_rows = self._table_model.get_non_empty_rows(selected_rows)
states, errors = self.get_states(row_index=selected_rows)
for row, error in errors.items():
self.on_processing_error(row, error)
if not states:
self.on_processing_finished(None)
return
self.progress = 0
setattr(self._view, 'progress_bar_value', self.progress)
setattr(self._view, 'progress_bar_maximum', len(states))
self.batch_process_runner.load_workspaces(states)
except Exception as e:
self._view.enable_buttons()
self.sans_logger.error("Process halted due to: {}".format(str(e)))
self.display_warning_box("Warning", "Process halted", str(e))
def on_export_table_clicked(self):
non_empty_rows = self.get_row_indices()
if len(non_empty_rows) == 0:
self.sans_logger.notice("Cannot export table as it is empty.")
return
# Python 2 and 3 take input in different modes for writing lists to csv files
if sys.version_info[0] == 2:
open_type = 'wb'
else:
open_type = 'w'
try:
self._view.disable_buttons()
default_filename = self._table_model.batch_file
filename = self.display_save_file_box("Save table as", default_filename, "*.csv")
if filename:
self.sans_logger.notice("Starting export of table.")
if filename[-4:] != '.csv':
filename += '.csv'
with open(filename, open_type) as outfile:
# Pass filewriting object rather than filename to make testing easier
writer = csv.writer(outfile)
self._export_table(writer, non_empty_rows)
self.sans_logger.notice("Table exporting finished.")
self._view.enable_buttons()
except Exception as e:
self._view.enable_buttons()
self.sans_logger.error("Export halted due to : {}".format(str(e)))
self.display_warning_box("Warning", "Export halted", str(e))
def on_multiperiod_changed(self, show_periods):
if show_periods:
self._view.show_period_columns()
else:
self._view.hide_period_columns()
def display_errors(self, error, context_msg, use_error_name=False):
"""
Code for alerting the user to a caught error
:param error: a caught exception
:param context_msg: string. Text to explain what SANS was trying to do
when the error occurred. e.g. 'Loading of the user file failed'.
:param use_error_name: bool. If True, append type of error (e.g. RuntimeError) to context_msg
:return:
"""
logger_msg = context_msg
if use_error_name:
logger_msg += " {}:".format(type(error).__name__)
logger_msg += " {}"
self.sans_logger.error(logger_msg.format(str(error)))
self.display_warning_box('Warning', context_msg, str(error))
def display_warning_box(self, title, text, detailed_text):
self._view.display_message_box(title, text, detailed_text)
def display_save_file_box(self, title, default_path, file_filter):
filename = self._view.display_save_file_box(title, default_path, file_filter)
return filename
def notify_progress(self, row, out_shift_factors, out_scale_factors):
self.increment_progress()
if out_scale_factors and out_shift_factors:
self._table_model.set_option(row, 'MergeScale', round(out_scale_factors[0], 3))
self._table_model.set_option(row, 'MergeShift', round(out_shift_factors[0], 3))
self._table_model.set_row_to_processed(row, '')
def increment_progress(self):
self.progress = self.progress + 1
setattr(self._view, 'progress_bar_value', self.progress)
# ----------------------------------------------------------------------------------------------
# Row manipulation
# ----------------------------------------------------------------------------------------------
def num_rows(self):
return self._table_model.get_number_of_rows()
def on_row_inserted(self, index, row):
"""
Insert a row at a selected point
"""
row_table_index = TableIndexModel(*row)
self._table_model.add_table_entry(index, row_table_index)
def on_insert_row(self):
"""
Add an empty row to the table after the first selected row (or at the end of the table
if nothing is selected).
"""
selected_rows = self._view.get_selected_rows()
selected_row = selected_rows[0] + 1 if selected_rows else self.num_rows()
empty_row = self._table_model.create_empty_row()
self._table_model.add_table_entry(selected_row, empty_row)
def on_erase_rows(self):
"""
Make all selected rows empty.
"""
selected_rows = self._view.get_selected_rows()
empty_row = self._table_model.create_empty_row()
for row in selected_rows:
empty_row = TableModel.create_empty_row()
self._table_model.replace_table_entries([row], [empty_row])
def on_rows_removed(self, rows):
"""
Remove rows from the table
"""
self._table_model.remove_table_entries(rows)
def on_copy_rows_requested(self):
selected_rows = self._view.get_selected_rows()
self._clipboard = []
for row in selected_rows:
data_from_table_model = self._table_model.get_table_entry(row).to_list()
self._clipboard.append(data_from_table_model)
def on_cut_rows_requested(self):
self.on_copy_rows_requested()
rows = self._view.get_selected_rows()
self.on_rows_removed(rows)
def on_paste_rows_requested(self):
if self._clipboard:
selected_rows = self._view.get_selected_rows()
selected_rows = selected_rows if selected_rows else [self.num_rows()]
replacement_table_index_models = [TableIndexModel(*x) for x in self._clipboard]
self._table_model.replace_table_entries(selected_rows, replacement_table_index_models)
def on_manage_directories(self):
self._view.show_directory_manager()
def on_sample_geometry_view_changed(self, show_geometry):
if show_geometry:
self._view.show_geometry()
else:
self._view.hide_geometry()
def on_compatibility_unchecked(self):
self.display_warning_box('Warning', 'Are you sure you want to uncheck compatibility mode?',
'Non-compatibility mode has known issues. DO NOT USE if applying bin masking'
' to event workspaces.')
def get_row_indices(self):
"""
Gets the indices of row which are not empty.
:return: a list of row indices.
"""
row_indices_which_are_not_empty = []
number_of_rows = self._table_model.get_number_of_rows()
for row in range(number_of_rows):
if not self.is_empty_row(row):
row_indices_which_are_not_empty.append(row)
return row_indices_which_are_not_empty
def on_mask_file_add(self):
"""
We get the added mask file name and add it to the list of masks
"""
new_mask_file = self._view.get_mask_file()
if not new_mask_file:
return
new_mask_file_full_path = FileFinder.getFullPath(new_mask_file)
if not new_mask_file_full_path:
return
# Add the new mask file to state model
mask_files = self._state_model.mask_files
mask_files.append(new_mask_file)
self._state_model.mask_files = mask_files
# Make sure that the sub-presenters are up to date with this change
self._masking_table_presenter.on_update_rows()
self._settings_diagnostic_tab_presenter.on_update_rows()
self._beam_centre_presenter.on_update_rows()
def is_empty_row(self, row):
"""
Checks if a row has no entries. These rows will be ignored.
:param row: the row index
:return: True if the row is empty.
"""
return self._table_model.is_empty_row(row)
def on_save_other(self):
self.save_other_presenter = SaveOtherPresenter(parent_presenter=self)
save_other_view = SANSSaveOtherWindow.SANSSaveOtherDialog(self._view)
self.save_other_presenter.set_view(save_other_view)
self.save_other_presenter.show()
# def _validate_rows(self):
# """
# Validation of the rows. A minimal setup requires that ScatterSample is set.
# """
# # If SampleScatter is empty, then don't run the reduction.
# # We allow empty rows for now, since we cannot remove them from Python.
# number_of_rows = self._table_model.get_number_of_rows()
# for row in range(number_of_rows):
# if not self.is_empty_row(row):
# sample_scatter = self._view.get_cell(row, 0)
# if not sample_scatter:
# raise RuntimeError("Row {} has not SampleScatter specified. Please correct this.".format(row))
# ------------------------------------------------------------------------------------------------------------------
# Controls
# ------------------------------------------------------------------------------------------------------------------
def disable_controls(self):
"""
Disable all input fields and buttons during the execution of the reduction.
"""
# TODO: think about enabling and disable some controls during reduction
pass
def enable_controls(self):
"""
Enable all input fields and buttons after the execution has completed.
"""
# TODO: think about enabling and disable some controls during reduction
pass
# ----------------------------------------------------------------------------------------------
# Table Model and state population
# ------------------------------------------------------------------------------------------------------------------
def _get_selected_rows(self):
selected_rows = self._view.get_selected_rows()
selected_rows = selected_rows if selected_rows else range(self._table_model.get_number_of_rows())
for row in selected_rows:
self._table_model.reset_row_state(row)
self.update_view_from_table_model()
return selected_rows
@log_times
def get_states(self, row_index=None, file_lookup=True):
"""
Gathers the state information for all rows.
:param row_index: if a single row is selected, then only this row is returned,
else all the state for all rows is returned.
:return: a list of states.
"""
# 1. Update the state model
state_model_with_view_update = self._get_state_model_with_view_update()
# 2. Update the table model
table_model = self._table_model
# 3. Go through each row and construct a state object
states, errors = None, None
if table_model and state_model_with_view_update:
states, errors = create_states(state_model_with_view_update, table_model,
self._view.instrument,
self._facility,
row_index=row_index,
file_lookup=file_lookup)
if errors:
self.sans_logger.warning("Errors in getting states...")
for _, v in errors.items():
self.sans_logger.warning("{}".format(v))
return states, errors
def get_state_for_row(self, row_index, file_lookup=True):
"""
Creates the state for a particular row.
:param row_index: the row index
:return: a state if the index is valid and there is a state else None
"""
states, errors = self.get_states(row_index=[row_index], file_lookup=file_lookup)
if states is None:
self.sans_logger.warning(
"There does not seem to be data for a row {}.".format(row_index))
return None
if row_index in list(states.keys()):
if states:
return states[row_index]
return None
def _update_view_from_state_model(self):
self._set_on_view("instrument")
# Front tab view
self._set_on_view("zero_error_free")
self._set_on_view("save_types")
self._set_on_view("compatibility_mode")
self._set_on_view("merge_scale")
self._set_on_view("merge_shift")
self._set_on_view("merge_scale_fit")
self._set_on_view("merge_shift_fit")
self._set_on_view("merge_q_range_start")
self._set_on_view("merge_q_range_stop")
self._set_on_view("merge_max")
self._set_on_view("merge_min")
# Settings tab view
self._set_on_view("reduction_dimensionality")
self._set_on_view("reduction_mode")
self._set_on_view("event_slices")
self._set_on_view("event_binning")
self._set_on_view("merge_mask")
self._set_on_view("wavelength_step_type")
self._set_on_view("wavelength_min")
self._set_on_view("wavelength_max")
self._set_on_view("wavelength_step")
self._set_on_view("absolute_scale")
self._set_on_view("z_offset")
# Adjustment tab
self._set_on_view("normalization_incident_monitor")
self._set_on_view("normalization_interpolate")
self._set_on_view("transmission_incident_monitor")
self._set_on_view("transmission_interpolate")
self._set_on_view("transmission_roi_files")
self._set_on_view("transmission_mask_files")
self._set_on_view("transmission_radius")
self._set_on_view("transmission_monitor")
self._set_on_view("transmission_mn_shift")
self._set_on_view_transmission_fit()
self._set_on_view("pixel_adjustment_det_1")
self._set_on_view("pixel_adjustment_det_2")
self._set_on_view("wavelength_adjustment_det_1")
self._set_on_view("wavelength_adjustment_det_2")
# Q tab
self._set_on_view_q_rebin_string()
self._set_on_view("q_xy_max")
self._set_on_view("q_xy_step")
self._set_on_view("q_xy_step_type")
self._set_on_view("gravity_on_off")
self._set_on_view("gravity_extra_length")
self._set_on_view("use_q_resolution")
self._set_on_view_q_resolution_aperture()
self._set_on_view("q_resolution_delta_r")
self._set_on_view("q_resolution_collimation_length")
self._set_on_view("q_resolution_moderator_file")
self._set_on_view("r_cut")
self._set_on_view("w_cut")
# Mask
self._set_on_view("phi_limit_min")
self._set_on_view("phi_limit_max")
self._set_on_view("phi_limit_use_mirror")
self._set_on_view("radius_limit_min")
self._set_on_view("radius_limit_max")
def _set_on_view_transmission_fit_sample_settings(self):
# Set transmission_sample_use_fit
fit_type = self._state_model.transmission_sample_fit_type
use_fit = fit_type is not FitType.NoFit
self._view.transmission_sample_use_fit = use_fit
# Set the polynomial order for sample
polynomial_order = self._state_model.transmission_sample_polynomial_order if fit_type is FitType.Polynomial else 2 # noqa
self._view.transmission_sample_polynomial_order = polynomial_order
# Set the fit type for the sample
fit_type = fit_type if fit_type is not FitType.NoFit else FitType.Linear
self._view.transmission_sample_fit_type = fit_type
# Set the wavelength
wavelength_min = self._state_model.transmission_sample_wavelength_min
wavelength_max = self._state_model.transmission_sample_wavelength_max
if wavelength_min and wavelength_max:
self._view.transmission_sample_use_wavelength = True
self._view.transmission_sample_wavelength_min = wavelength_min
self._view.transmission_sample_wavelength_max = wavelength_max
def _set_on_view_transmission_fit(self):
# Steps for adding the transmission fit to the view
# 1. Check if individual settings exist. If so then set the view to separate, else set them to both
# 2. Apply the settings
separate_settings = self._state_model.has_transmission_fit_got_separate_settings_for_sample_and_can()
self._view.set_fit_selection(use_separate=separate_settings)
if separate_settings:
self._set_on_view_transmission_fit_sample_settings()
# Set transmission_sample_can_fit
fit_type_can = self._state_model.transmission_can_fit_type()
use_can_fit = fit_type_can is FitType.NoFit
self._view.transmission_can_use_fit = use_can_fit
# Set the polynomial order for can
polynomial_order_can = self._state_model.transmission_can_polynomial_order if fit_type_can is FitType.Polynomial else 2 # noqa
self._view.transmission_can_polynomial_order = polynomial_order_can
# Set the fit type for the can
fit_type_can = fit_type_can if fit_type_can is not FitType.NoFit else FitType.Linear
self.transmission_can_fit_type = fit_type_can
# Set the wavelength
wavelength_min = self._state_model.transmission_can_wavelength_min
wavelength_max = self._state_model.transmission_can_wavelength_max
if wavelength_min and wavelength_max:
self._view.transmission_can_use_wavelength = True
self._view.transmission_can_wavelength_min = wavelength_min
self._view.transmission_can_wavelength_max = wavelength_max
else:
self._set_on_view_transmission_fit_sample_settings()
def _set_on_view_q_resolution_aperture(self):
self._set_on_view("q_resolution_source_a")
self._set_on_view("q_resolution_sample_a")
self._set_on_view("q_resolution_source_h")
self._set_on_view("q_resolution_sample_h")
self._set_on_view("q_resolution_source_w")
self._set_on_view("q_resolution_sample_w")
# If we have h1, h2, w1, and w2 selected then we want to select the rectangular aperture.
is_rectangular = self._state_model.q_resolution_source_h and self._state_model.q_resolution_sample_h and \
self._state_model.q_resolution_source_w and self._state_model.q_resolution_sample_w # noqa
self._view.set_q_resolution_shape_to_rectangular(is_rectangular)
def _set_on_view_q_rebin_string(self):
"""
Maps the q_1d_rebin_string of the model to the q_1d_step and q_1d_step_type property of the view.
"""
rebin_string = self._state_model.q_1d_rebin_string
# Extract the min, max and step and step type from the rebin string
elements = rebin_string.split(",")
# If we have three elements then we want to set only the
if len(elements) == 3:
step_element = float(elements[1])
step = abs(step_element)
step_type = RangeStepType.Lin if step_element >= 0 else RangeStepType.Log
# Set on the view
self._view.q_1d_min_or_rebin_string = float(elements[0])
self._view.q_1d_max = float(elements[2])
self._view.q_1d_step = step
self._view.q_1d_step_type = step_type
else:
# Set the rebin string
self._view.q_1d_min_or_rebin_string = rebin_string
self._view.q_1d_step_type = self._view.VARIABLE
def _set_on_view(self, attribute_name):
attribute = getattr(self._state_model, attribute_name)
if attribute or isinstance(attribute,
bool): # We need to be careful here. We don't want to set empty strings, or None, but we want to set boolean values. # noqa
setattr(self._view, attribute_name, attribute)
def _set_on_view_with_view(self, attribute_name, view):
attribute = getattr(self._state_model, attribute_name)
if attribute or isinstance(attribute,
bool): # We need to be careful here. We don't want to set empty strings, or None, but we want to set boolean values. # noqa
setattr(view, attribute_name, attribute)
def _get_state_model_with_view_update(self):
"""
Goes through all sub presenters and update the state model based on the views.
Note that at the moment we have set up the view and the model such that the name of a property must be the same
in the view and the model. This can be easily changed, but it also provides a good cohesion.
"""
state_model = copy.deepcopy(self._state_model)
# If we don't have a state model then return None
if state_model is None:
return state_model
# Run tab view
self._set_on_state_model("zero_error_free", state_model)
self._set_on_state_model("save_types", state_model)
self._set_on_state_model("compatibility_mode", state_model)
self._set_on_state_model("merge_scale", state_model)
self._set_on_state_model("merge_shift", state_model)
self._set_on_state_model("merge_scale_fit", state_model)
self._set_on_state_model("merge_shift_fit", state_model)
self._set_on_state_model("merge_q_range_start", state_model)
self._set_on_state_model("merge_q_range_stop", state_model)
self._set_on_state_model("merge_mask", state_model)
self._set_on_state_model("merge_max", state_model)
self._set_on_state_model("merge_min", state_model)
# Settings tab
self._set_on_state_model("reduction_dimensionality", state_model)
self._set_on_state_model("reduction_mode", state_model)
self._set_on_state_model("event_slices", state_model)
self._set_on_state_model("event_binning", state_model)
self._set_on_state_model("wavelength_step_type", state_model)
self._set_on_state_model("wavelength_min", state_model)
self._set_on_state_model("wavelength_max", state_model)
self._set_on_state_model("wavelength_step", state_model)
self._set_on_state_model("wavelength_range", state_model)
self._set_on_state_model("absolute_scale", state_model)
self._set_on_state_model("z_offset", state_model)
# Adjustment tab
self._set_on_state_model("normalization_incident_monitor", state_model)
self._set_on_state_model("normalization_interpolate", state_model)
self._set_on_state_model("transmission_incident_monitor", state_model)
self._set_on_state_model("transmission_interpolate", state_model)
self._set_on_state_model("transmission_roi_files", state_model)
self._set_on_state_model("transmission_mask_files", state_model)
self._set_on_state_model("transmission_radius", state_model)
self._set_on_state_model("transmission_monitor", state_model)
self._set_on_state_model("transmission_mn_shift", state_model)
self._set_on_state_model_transmission_fit(state_model)
self._set_on_state_model("pixel_adjustment_det_1", state_model)
self._set_on_state_model("pixel_adjustment_det_2", state_model)
self._set_on_state_model("wavelength_adjustment_det_1", state_model)
self._set_on_state_model("wavelength_adjustment_det_2", state_model)
# Q tab
self._set_on_state_model_q_1d_rebin_string(state_model)
self._set_on_state_model("q_xy_max", state_model)
self._set_on_state_model("q_xy_step", state_model)
self._set_on_state_model("q_xy_step_type", state_model)
self._set_on_state_model("gravity_on_off", state_model)
self._set_on_state_model("gravity_extra_length", state_model)
self._set_on_state_model("use_q_resolution", state_model)
self._set_on_state_model("q_resolution_source_a", state_model)
self._set_on_state_model("q_resolution_sample_a", state_model)
self._set_on_state_model("q_resolution_source_h", state_model)
self._set_on_state_model("q_resolution_sample_h", state_model)
self._set_on_state_model("q_resolution_source_w", state_model)
self._set_on_state_model("q_resolution_sample_w", state_model)
self._set_on_state_model("q_resolution_delta_r", state_model)
self._set_on_state_model("q_resolution_collimation_length", state_model)
self._set_on_state_model("q_resolution_moderator_file", state_model)
self._set_on_state_model("r_cut", state_model)
self._set_on_state_model("w_cut", state_model)
# Mask
self._set_on_state_model("phi_limit_min", state_model)
self._set_on_state_model("phi_limit_max", state_model)
self._set_on_state_model("phi_limit_use_mirror", state_model)
self._set_on_state_model("radius_limit_min", state_model)
self._set_on_state_model("radius_limit_max", state_model)
# Beam Centre
self._beam_centre_presenter.set_on_state_model("lab_pos_1", state_model)
self._beam_centre_presenter.set_on_state_model("lab_pos_2", state_model)
return state_model
def _set_on_state_model_transmission_fit(self, state_model):
# Behaviour depends on the selection of the fit
if self._view.use_same_transmission_fit_setting_for_sample_and_can():
use_fit = self._view.transmission_sample_use_fit
fit_type = self._view.transmission_sample_fit_type
polynomial_order = self._view.transmission_sample_polynomial_order
state_model.transmission_sample_fit_type = fit_type if use_fit else FitType.NoFit
state_model.transmission_can_fit_type = fit_type if use_fit else FitType.NoFit
state_model.transmission_sample_polynomial_order = polynomial_order
state_model.transmission_can_polynomial_order = polynomial_order
# Wavelength settings
if self._view.transmission_sample_use_wavelength:
wavelength_min = self._view.transmission_sample_wavelength_min
wavelength_max = self._view.transmission_sample_wavelength_max
state_model.transmission_sample_wavelength_min = wavelength_min
state_model.transmission_sample_wavelength_max = wavelength_max
state_model.transmission_can_wavelength_min = wavelength_min
state_model.transmission_can_wavelength_max = wavelength_max
else:
# Sample
use_fit_sample = self._view.transmission_sample_use_fit
fit_type_sample = self._view.transmission_sample_fit_type
polynomial_order_sample = self._view.transmission_sample_polynomial_order
state_model.transmission_sample_fit_type = fit_type_sample if use_fit_sample else FitType.NoFit
state_model.transmission_sample_polynomial_order = polynomial_order_sample
# Wavelength settings
if self._view.transmission_sample_use_wavelength:
wavelength_min = self._view.transmission_sample_wavelength_min
wavelength_max = self._view.transmission_sample_wavelength_max
state_model.transmission_sample_wavelength_min = wavelength_min
state_model.transmission_sample_wavelength_max = wavelength_max
# Can
use_fit_can = self._view.transmission_can_use_fit
fit_type_can = self._view.transmission_can_fit_type
polynomial_order_can = self._view.transmission_can_polynomial_order
state_model.transmission_can_fit_type = fit_type_can if use_fit_can else FitType.NoFit
state_model.transmission_can_polynomial_order = polynomial_order_can
# Wavelength settings
if self._view.transmission_can_use_wavelength:
wavelength_min = self._view.transmission_can_wavelength_min
wavelength_max = self._view.transmission_can_wavelength_max
state_model.transmission_can_wavelength_min = wavelength_min
state_model.transmission_can_wavelength_max = wavelength_max
def _set_on_state_model_q_1d_rebin_string(self, state_model):
q_1d_step_type = self._view.q_1d_step_type
# If we are dealing with a simple rebin string then the step type is None
if self._view.q_1d_step_type is None:
state_model.q_1d_rebin_string = self._view.q_1d_min_or_rebin_string
else:
q_1d_min = self._view.q_1d_min_or_rebin_string
q_1d_max = self._view.q_1d_max
q_1d_step = self._view.q_1d_step
if q_1d_min and q_1d_max and q_1d_step and q_1d_step_type:
q_1d_rebin_string = str(q_1d_min) + ","
q_1d_step_type_factor = -1. if q_1d_step_type is RangeStepType.Log else 1.
q_1d_rebin_string += str(q_1d_step_type_factor * q_1d_step) + ","
q_1d_rebin_string += str(q_1d_max)
state_model.q_1d_rebin_string = q_1d_rebin_string
def _set_on_state_model(self, attribute_name, state_model):
attribute = getattr(self._view, attribute_name)
if attribute is not None and attribute != '':
setattr(state_model, attribute_name, attribute)
def get_cell_value(self, row, column):
return self._view.get_cell(row=row, column=self.table_index[column], convert_to=str)
def _export_table(self, filewriter, rows):
"""
Take the current table model, and create a comma delimited csv file
:param filewriter: File object to be written to
:param rows: list of indices for non-empty rows
:return: Nothing
"""
for row in rows:
table_row = self._table_model.get_table_entry(row).to_batch_list()
batch_file_row = self._create_batch_entry_from_row(table_row)
filewriter.writerow(batch_file_row)
@staticmethod
def _create_batch_entry_from_row(row):
batch_file_keywords = ["sample_sans",
"output_as",
"sample_trans",
"sample_direct_beam",
"can_sans",
"can_trans",
"can_direct_beam",
"user_file"]
loop_range = min(len(row), len(batch_file_keywords))
new_row = [''] * (2 * loop_range)
for i in range(loop_range):
key = batch_file_keywords[i]
value = row[i]
new_row[2*i] = key
new_row[2*i + 1] = value
return new_row
# ------------------------------------------------------------------------------------------------------------------
# Settings
# ------------------------------------------------------------------------------------------------------------------
def _setup_instrument_specific_settings(self, instrument=None):
if not instrument:
instrument = self._view.instrument
if instrument == SANSInstrument.NoInstrument:
self._view.disable_process_buttons()
else:
instrument_string = get_string_for_gui_from_instrument(instrument)
ConfigService["default.instrument"] = instrument_string
self._view.enable_process_buttons()
self._view.set_instrument_settings(instrument)
self._beam_centre_presenter.on_update_instrument(instrument)
self._workspace_diagnostic_presenter.set_instrument_settings(instrument)
| gpl-3.0 | 6,618,090,460,761,512,000 | 43.602941 | 159 | 0.598417 | false | 3.948678 | false | false | false |
pytorch/fairseq | tests/test_iterators.py | 1 | 5433 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from fairseq.data import iterators
class TestIterators(unittest.TestCase):
def test_counting_iterator_index(self, ref=None, itr=None):
# Test the indexing functionality of CountingIterator
if ref is None:
assert itr is None
ref = list(range(10))
itr = iterators.CountingIterator(ref)
else:
assert len(ref) == 10
assert itr is not None
self.assertTrue(itr.has_next())
self.assertEqual(itr.n, 0)
self.assertEqual(next(itr), ref[0])
self.assertEqual(itr.n, 1)
self.assertEqual(next(itr), ref[1])
self.assertEqual(itr.n, 2)
itr.skip(3)
self.assertEqual(itr.n, 5)
self.assertEqual(next(itr), ref[5])
itr.skip(2)
self.assertEqual(itr.n, 8)
self.assertEqual(list(itr), [ref[8], ref[9]])
self.assertFalse(itr.has_next())
def test_counting_iterator_length_mismatch(self):
ref = list(range(10))
# When the underlying iterable is longer than the CountingIterator,
# the remaining items in the iterable should be ignored
itr = iterators.CountingIterator(ref, total=8)
self.assertEqual(list(itr), ref[:8])
# When the underlying iterable is shorter than the CountingIterator,
# raise an IndexError when the underlying iterable is exhausted
itr = iterators.CountingIterator(ref, total=12)
self.assertRaises(IndexError, list, itr)
def test_counting_iterator_take(self):
# Test the "take" method of CountingIterator
ref = list(range(10))
itr = iterators.CountingIterator(ref)
itr.take(5)
self.assertEqual(len(itr), len(list(iter(itr))))
self.assertEqual(len(itr), 5)
itr = iterators.CountingIterator(ref)
itr.take(5)
self.assertEqual(next(itr), ref[0])
self.assertEqual(next(itr), ref[1])
itr.skip(2)
self.assertEqual(next(itr), ref[4])
self.assertFalse(itr.has_next())
def test_grouped_iterator(self):
# test correctness
x = list(range(10))
itr = iterators.GroupedIterator(x, 1)
self.assertEqual(list(itr), [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]])
itr = iterators.GroupedIterator(x, 4)
self.assertEqual(list(itr), [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9]])
itr = iterators.GroupedIterator(x, 5)
self.assertEqual(list(itr), [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
# test the GroupIterator also works correctly as a CountingIterator
x = list(range(30))
ref = list(iterators.GroupedIterator(x, 3))
itr = iterators.GroupedIterator(x, 3)
self.test_counting_iterator_index(ref, itr)
def test_sharded_iterator(self):
# test correctness
x = list(range(10))
itr = iterators.ShardedIterator(x, num_shards=1, shard_id=0)
self.assertEqual(list(itr), x)
itr = iterators.ShardedIterator(x, num_shards=2, shard_id=0)
self.assertEqual(list(itr), [0, 2, 4, 6, 8])
itr = iterators.ShardedIterator(x, num_shards=2, shard_id=1)
self.assertEqual(list(itr), [1, 3, 5, 7, 9])
itr = iterators.ShardedIterator(x, num_shards=3, shard_id=0)
self.assertEqual(list(itr), [0, 3, 6, 9])
itr = iterators.ShardedIterator(x, num_shards=3, shard_id=1)
self.assertEqual(list(itr), [1, 4, 7, None])
itr = iterators.ShardedIterator(x, num_shards=3, shard_id=2)
self.assertEqual(list(itr), [2, 5, 8, None])
# test CountingIterator functionality
x = list(range(30))
ref = list(iterators.ShardedIterator(x, num_shards=3, shard_id=0))
itr = iterators.ShardedIterator(x, num_shards=3, shard_id=0)
self.test_counting_iterator_index(ref, itr)
def test_counting_iterator_buffered_iterator_take(self):
ref = list(range(10))
buffered_itr = iterators.BufferedIterator(2, ref)
itr = iterators.CountingIterator(buffered_itr)
itr.take(5)
self.assertEqual(len(itr), len(list(iter(itr))))
self.assertEqual(len(itr), 5)
buffered_itr = iterators.BufferedIterator(2, ref)
itr = iterators.CountingIterator(buffered_itr)
itr.take(5)
self.assertEqual(len(buffered_itr), 5)
self.assertEqual(len(list(iter(buffered_itr))), 5)
buffered_itr = iterators.BufferedIterator(2, ref)
itr = iterators.CountingIterator(buffered_itr)
itr.take(5)
self.assertEqual(next(itr), ref[0])
self.assertEqual(next(itr), ref[1])
itr.skip(2)
self.assertEqual(next(itr), ref[4])
self.assertFalse(itr.has_next())
self.assertRaises(StopIteration, next, buffered_itr)
ref = list(range(4, 10))
buffered_itr = iterators.BufferedIterator(2, ref)
itr = iterators.CountingIterator(buffered_itr, start=4)
itr.take(5)
self.assertEqual(len(itr), 5)
self.assertEqual(len(buffered_itr), 1)
self.assertEqual(next(itr), ref[0])
self.assertFalse(itr.has_next())
self.assertRaises(StopIteration, next, buffered_itr)
if __name__ == "__main__":
unittest.main()
| mit | 3,728,939,549,714,096,600 | 38.656934 | 87 | 0.617891 | false | 3.341328 | true | false | false |
newmediamedicine/indivo_server_1_0 | indivo/views/reports/healthactionplan.py | 1 | 1903 | """
Indivo Views -- HealthActionPlan Message
"""
from django.http import HttpResponseBadRequest, HttpResponse
from indivo.lib.view_decorators import marsloader, DEFAULT_ORDERBY
from indivo.lib.query import FactQuery, DATE, STRING, NUMBER
from indivo.models import HealthActionPlan
HEALTHACTIONPLAN_FILTERS = {
'name' : ('name', STRING),
'name_type' : ('name_type', STRING),
'name_value' : ('name_value', STRING),
'name_abbrev' : ('name_abbrerv', STRING),
'planType' : ('planType', STRING),
'plannedBy' : ('plannedBy', STRING),
'datePlanned' : ('datePlanned', DATE),
'dateExpires' : ('dateExpires', DATE),
'indication' : ('indication', STRING),
'instructions' : ('instructions', STRING),
'system' : ('system', STRING),
'system_type' : ('system_type', STRING),
'system_value' : ('system_value', STRING),
'system_abbrev' : ('system_abbrerv', STRING),
DEFAULT_ORDERBY : ('created_at', DATE)
}
HEALTHACTIONPLAN_TEMPLATE = 'reports/healthactionplan.xml'
def healthactionplan_list(*args, **kwargs):
"""For 1:1 mapping of URLs to views. Calls _healthactionplan_list"""
return _healthactionplan_list(*args, **kwargs)
def carenet_healthactionplan_list(*args, **kwargs):
"""For 1:1 mapping of URLs to views. Calls _healthactionplan_list"""
return _healthactionplan_list(*args, **kwargs)
@marsloader(query_api_support=True)
def _healthactionplan_list(request, group_by, date_group, aggregate_by,
limit, offset, order_by,
status, date_range, filters,
record=None, carenet=None):
q = FactQuery(HealthActionPlan, HEALTHACTIONPLAN_FILTERS,
group_by, date_group, aggregate_by,
limit, offset, order_by,
status, date_range, filters,
record, carenet)
try:
return q.render(HEALTHACTIONPLAN_TEMPLATE)
except ValueError as e:
return HttpResponseBadRequest(str(e))
| gpl-3.0 | 4,931,510,306,626,359,000 | 36.313725 | 71 | 0.676826 | false | 3.374113 | false | false | false |
texastribune/the-dp | tx_highered/thecb_importer/load_admissions.py | 1 | 5753 | #! /usr/bin/env python
#
# DEPRECATED in favor of just using the IPEDS data to avoid having to deal with
# scraping PDFs. We can't automate this, and it's costing us too much time to
# wrangle with this data source.
#
# The admissions data is loaded from PDF reports on the THECB website.
#
# Use pdftohtml to preprocess:
#
# find . -name "*.pdf" -exec sh -c 'pdftohtml -i -noframes -stdout "$1" > "$1.html"' -- {} \;
#
import glob
import HTMLParser
import os
import re
import sys
from collections import defaultdict
from decimal import Decimal
from pprint import pprint
from tx_highered.models import PublicAdmissions
from tx_highered.thecb_importer.utils import (InstitutionFuzzyMatcher,
create_or_update)
class Node(object):
html_parser = HTMLParser.HTMLParser()
def __init__(self, line):
self.data = line.strip().replace('<br>', '')
self.is_empty = False
self.is_number = False
self.is_institution = False
self.is_page_break = False
self.is_row_header = False
unescaped_data = self.html_parser.unescape(self.data)
# Mark nodes we don't care about as empty
if not self.data or 'BODY>' in self.data or 'HTML>' in self.data:
self.is_empty = True
# HR elements signify page breaks
elif self.data == '<hr>':
self.is_page_break = True
# Sometimes multiple numbers appear in the same textbox.
# We only need the last one since we only care about totals.
elif re.match(r'^[\d,]+(\s[\d,]+)*$', self.data):
self.is_number = True
last_number = self.data.split()[-1].replace(',', '')
self.data = int(last_number)
# Institutions are the only non-numeric uppercase lines
elif unescaped_data.upper() == unescaped_data:
self.is_institution = True
self.data = unescaped_data
elif self.data in ('Total Texas', 'Top 10%',
'Enrolled, other Texas public university'):
self.is_row_header = True
def __repr__(self):
return u'<Node: %r>' % self.data
class Parser(object):
def __init__(self, path):
self.path = path
self.data = defaultdict(dict)
# Parse year from path name
name = os.path.basename(path).replace('.pdf.html', '')
self.year = int(name.split('_')[1])
# Store parser state
self.cache = []
self.in_body = False
self.institution = None
self.expected_field = None
def feed(self, line):
node = Node(line)
# print node
# The body begins after the first page break
if node.is_page_break:
self.in_body = True
self.institution = None
return
# Skip everything before the body
if not self.in_body:
return
# Return if the node is empty
if node.is_empty:
return
# Expect data after seeing an institution
if node.is_institution:
self.institution = node.data
# If we reach the end of a row and expect data, the last field
# of the row contains the value for the expected field.
if node.is_row_header and self.expected_field and self.cache:
institution_data = self.data[self.institution]
institution_data[self.expected_field] = self.cache[-1].data
self.expected_field = None
self.cache = []
# Cache numbers until finding an expected value
elif node.is_number:
self.cache.append(node)
# Set expected field from the row header
if not self.institution:
return
if node.data == 'Total Applicants':
self.expected_field = 'applied'
elif node.data == 'Total Accepted':
self.expected_field = 'accepted'
elif node.data == 'Total Enrolled':
self.expected_field = 'enrolled'
def parse(self):
for line in open(self.path):
self.feed(line)
def derive_rate(numerator, denominator):
if denominator == 0 or numerator > denominator:
return None
else:
return round(100.0 * numerator / denominator, 2)
def main(root):
for path in glob.glob(os.path.join(root, '*.pdf.html')):
parser = Parser(path)
parser.parse()
matcher = InstitutionFuzzyMatcher()
for institution, data in parser.data.iteritems():
attrs = dict(institution=institution, year=parser.year, **data)
# Derive acceptance and enrollment rates
acceptance_rate = derive_rate(data['accepted'], data['applied'])
enrollment_rate = derive_rate(data['enrolled'], data['accepted'])
# Create or update institution admissions for this year
institution = matcher.match(institution)
defaults = {
'year_type': 'fall',
'number_of_applicants': data['applied'],
'number_admitted': data['accepted'],
'number_admitted_who_enrolled': data['enrolled'],
'percent_of_applicants_admitted': acceptance_rate,
'percent_of_admitted_who_enrolled': enrollment_rate
}
obj, row_count = create_or_update(PublicAdmissions.objects,
institution=institution, year=parser.year,
defaults=defaults)
if obj:
print 'created %s %d admissions...' % (
institution.name, parser.year)
else:
print 'updated %s %d admissions...' % (
institution.name, parser.year)
if __name__ == '__main__':
main(sys.argv[1])
| apache-2.0 | -5,821,048,449,118,489,000 | 32.063218 | 93 | 0.585781 | false | 4.062853 | false | false | false |
jmargraf/agrippa | inputgen/inputgen.py | 1 | 2548 | #!/usr/bin/env python
from __future__ import print_function
import sys, getopt
import mol_io as io
import react
def main(argv):
# Initial Input
FileName = " "
pro1Name = " "
pro2Name = " "
GeoType = "mol"
Template = "template.txt"
Code = "orca"
Reaction = False
xyz = []
atom = []
charge = 0
spin = 1
# python inputgen.py -i [input] --mol --orca --temp [template.txt]
try:
opts,args = getopt.getopt(argv,"hi:",['mol','xyz','temp=','orca','pro1=','pro2=','react'])
except getopt.GetoptError:
print("inputgen.py -i [input] --mol --orca --temp [template.txt]")
for opt, arg in opts:
if opt == "-h":
print("inputgen.py -i [input] {--mol --orca --temp [template.txt]}")
print(" -i : input name (w/o .mol/.xyz)")
print(" --mol : read .mol file (default)")
print(" --xyz : read .xyz file")
print(" --orca : write orca input file")
print(" --temp : read from specific template file (default is template.txt)")
print(" --react : generate input for reaction")
print(" --pro1 : name of first product")
print(" --pro2 : name of second product")
print(" -h : print this help")
print("")
sys.exit()
elif opt == "-i":
FileName = arg
elif opt == "--mol":
GeoType = "mol"
elif opt == "--temp":
Template = arg
elif opt == "--xyz":
GeoType = "xyz"
elif opt == "--react":
Reaction = True
elif opt == "--pro1":
pro1Name = arg
elif opt == "--pro2":
pro2Name = arg
if not Reaction: # Standard input from single geo
# open input file
if GeoType == "mol":
xyz,atom,charge,spin = io.ReadMol(FileName)
elif GeoType == "xyz":
xyz,atom,charge,spin = io.ReadXYZ(FileName)
if Reaction: # Generate reaction geometry first
xyz,atom,charge,spin = react.GenReaction(FileName,pro1Name,pro2Name)
exit()
print(charge)
print(spin)
print(atom)
print(xyz)
# read template
with open(Template) as temp:
keywords = temp.read().splitlines()
print(keywords)
# write input
if Code == "orca":
io.OrcaIn(FileName,keywords,atom,xyz,charge,spin)
if __name__ == "__main__":
main(sys.argv[1:])
| gpl-3.0 | -3,495,754,512,695,815,700 | 27 | 98 | 0.506672 | false | 3.758112 | false | false | false |
troopa81/Qats | resources/generateBindings/generateBindings.py | 1 | 18794 | #!/usr/bin/python
# -*- coding: utf-8 -*-
############################################################################
##
## Copyright (C) 2015 Cabieces Julien
## Contact: https://github.com/troopa81/Qats
##
## This file is part of Qats.
##
## Qats is free software: you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## Qats is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with Qats. If not, see <http://www.gnu.org/licenses/>.
##
############################################################################
import sys
import re
import os
import subprocess
import pprint
import CppHeaderParser
# pprint.PrettyPrinter(depth=6).pprint( self.classInherits )
def remove( content, startRe, endRe ):
# BUG PARSER
while(True):
index = content.find( startRe )
if ( index == -1 ):
return content
end = re.search(endRe, content[index:]).end()
content = content[:index] + content[index+end+1:]
class BindingGenerator:
def __init__(self):
self.globalEnums = {}
self.f = None
self.cppClass = None
self.hasPublicConstructor = False
self.isAbstract = False
self.classInherits = {}
self.inherited = "";
self.notGeneratedMethods = { "QObject" : [ "connect", "disconnect", "connect_functor", "find_children", "find_child" ] }
self.notGeneratedClass = [ "QObjectData", "QObjectUserData", "QSignalBlocker", "QWidgetData" ]
def findEnumFromValue( self, enumValueName ):
for (className, enums) in self.globalEnums.iteritems():
for enum in enums:
for enumValue in enum['values']:
if enumValue['name'] == enumValueName:
return ( className, enum )
return (None,None)
def findEnum( self, enumName ):
for (className, enums) in self.globalEnums.iteritems():
for enum in enums:
if 'name' in enum and enum['name'] == enumName:
return ( className, enum )
return (None,None)
def treatType( self, typeName ):
# BUG
typeName = re.sub( r"__", r"::", typeName)
if ( typeName == "ButtonRole" ):
print( "typeName=" + typeName )
#pprint.PrettyPrinter(depth=6).pprint( self.globalEnums )
enumClassName, enum = self.findEnum( typeName );
if enumClassName != None and enum != None:
return (enumClassName + "::" + typeName);
elif typeName in self.cppClass[ 'typedefs' ][ 'public' ]:
return self.cppClass['name'] + "::" + typeName
else:
return typeName
# return true if current class inherits from QObject
def inheritsQObject(self):
parentClass = self.cppClass[ 'name' ];
while parentClass in self.classInherits:
parentClass = self.classInherits[ parentClass ];
return parentClass == "QObject";
def generateScriptConstructor(self):
# script constructor only if class is not abstract and have a public constructor
if self.isAbstract or not self.hasPublicConstructor:
return None
constructorName = "script" + self.cppClass[ 'name' ] + "Constructor";
self.f.write("inline QScriptValue " + constructorName + "(QScriptContext *context, QScriptEngine *engine)\n");
self.f.write("{\n");
self.f.write("Q_UNUSED(context);\n");
# TODO manage parameters for constructor
#for method in self.cppClass[ 'methods' ]['public']:
# if method[ 'constructor' ]:
# print( "coucou" )
# for iParameter in range( 0, len(method['parameters'])) :
# print( "type=" + method['parameters'][ iParameter ]['type'] );
if self.inheritsQObject():
# TODO set the parent correctly. QWidget take QWidget as parent not QObject, so this need a cast
# self.f.write("QObject *parent = context->argument(0).toQObject();\n");
self.f.write( self.cppClass['name'] + " *object = new " + self.cppClass['name'] + "(0);\n");
self.f.write("return engine->newQObject(object, QScriptEngine::ScriptOwnership);\n");
else:
self.f.write( self.cppClass['name'] + " object;\n" );
self.f.write( "return engine->newVariant( QVariant( object ) );");
self.f.write("}\n\n");
return constructorName
def generateEngineRegistration( self, scriptConstructorName ):
self.f.write("static void registerToScriptEngine(QScriptEngine* engine)\n")
self.f.write("{\n")
for strType in [ self.cppClass[ 'name' ], self.cppClass[ 'name' ] + "*" ]:
self.f.write("engine->setDefaultPrototype(qMetaTypeId<");
self.f.write( strType );
self.f.write( ">(), engine->newQObject(new " + self.cppClass['name'] + "Prototype(engine)));\n" )
self.f.write("\n")
# script constructor only if class is not abstract
if scriptConstructorName :
self.f.write("QScriptValue ctor = engine->newFunction(" + scriptConstructorName + ");\n");
if self.inheritsQObject():
self.f.write("QScriptValue metaObject = engine->newQMetaObject(&" + self.cppClass[ 'name' ] + "::staticMetaObject" );
if scriptConstructorName :
self.f.write( ", ctor" );
self.f.write( ");\n");
# even if class is abstract we need an instance in order to access specific enum
self.f.write("engine->globalObject().setProperty(\"" + self.cppClass['name'] + "\", "
+ ( "metaObject" if self.inheritsQObject() else "ctor" ) + ");\n");
self.f.write("}\n")
self.f.write("\n")
def parseCppHeader( self, cppHeader, outputDir ):
for className in cppHeader.classes.keys():
self.cppClass = cppHeader.classes[ className ];
# Do not generate all classes...
if className in self.notGeneratedClass:
continue;
protoClassName = className + "Prototype"
# compute if class is abstract or not
self.isAbstract = False
for methods in self.cppClass[ 'methods' ].values():
for method in methods:
if method[ 'pure_virtual' ] :
self.isAbstract = True
break
self.hasPublicConstructor = False
for method in self.cppClass[ 'methods' ]['public']:
if method['constructor']:
self.hasPublicConstructor = True
break
print( "Generate " + protoClassName + "..." );
self.globalEnums[ className ] = self.cppClass[ 'enums' ][ 'public' ];
self.f = open( os.path.join( outputDir, protoClassName + ".h" ),'w')
## write licence
self.f.write("/****************************************************************************\n");
self.f.write("**\n");
self.f.write("** Copyright (C) 2015 Cabieces Julien\n");
self.f.write("** Contact: https://github.com/troopa81/Qats\n");
self.f.write("**\n");
self.f.write("** This file is part of Qats.\n");
self.f.write("**\n");
self.f.write("** Qats is free software: you can redistribute it and/or modify\n");
self.f.write("** it under the terms of the GNU Lesser General Public License as published by\n");
self.f.write("** the Free Software Foundation, either version 3 of the License, or\n");
self.f.write("** (at your option) any later version.\n");
self.f.write("**\n");
self.f.write("** Qats is distributed in the hope that it will be useful,\n");
self.f.write("** but WITHOUT ANY WARRANTY; without even the implied warranty of\n");
self.f.write("** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n");
self.f.write("** GNU Lesser General Public License for more details.\n");
self.f.write("**\n");
self.f.write("** You should have received a copy of the GNU Lesser General Public License\n");
self.f.write("** along with Qats. If not, see <http://www.gnu.org/licenses/>.\n");
self.f.write("**\n");
self.f.write("****************************************************************************/\n");
self.f.write("\n");
self.f.write("#ifndef _" + protoClassName.upper() + "_\n");
self.f.write("#define _" + protoClassName.upper() + "_\n");
# get possible inheritance
self.inherited = "";
for inheritedClass in self.cppClass['inherits']:
if inheritedClass['access'] == "public":
# not suppose to happen with Qt
# happen only for QWidget (keeps only first inherited which is QObject
if self.inherited != "":
print( "Error : multiple inheritance, take first inherited class" );
else:
self.inherited = inheritedClass['class'] + "Prototype"
self.classInherits[ className ] = inheritedClass['class'];
self.f.write("\n")
self.f.write("#include <QObject>\n")
self.f.write("#include <QScriptable>\n")
self.f.write("#include <QScriptValue>\n")
self.f.write("#include <QScriptEngine>\n")
self.f.write("#include <" + className + ">\n")
self.f.write("\n")
if self.inherited != "":
self.f.write("#include \"" + self.inherited + ".h\"\n");
self.f.write("\n")
scriptConstructorName = self.generateScriptConstructor();
self.f.write("namespace qats\n")
self.f.write("{\n")
self.f.write("\n")
self.f.write("class " + protoClassName );
self.f.write( " : public " + ( self.inherited if self.inherited != "" else " QObject, public QScriptable") )
self.f.write( "\n" );
self.f.write("{\n")
self.f.write("Q_OBJECT\n")
self.f.write("\n")
self.f.write("public:\n")
self.f.write("\n")
self.generateEngineRegistration( scriptConstructorName );
self.f.write(protoClassName + "(QObject* parent = 0):"+ (self.inherited if self.inherited != "" else "QObject") + "(parent){}\n")
self.f.write("public slots:\n")
self.f.write("\n")
# public methods ...
for method in self.cppClass[ 'methods' ][ 'public' ]:
isStatic = method['static']
# do not treat constructor and destructor
notGeneratedMethods = self.notGeneratedMethods[ self.cppClass[ 'name' ] ] if self.cppClass['name'] in self.notGeneratedMethods else []
if method['constructor'] or method['destructor'] or method['name'].startswith( "operator" ) or method[ 'name' ] in notGeneratedMethods or '<' in method['name']:
continue
returnType = method['rtnType']
# BUG : remove static from type
if isStatic:
returnType = returnType[ returnType.find( "static") + 7:]
# compute return type
returnType = self.treatType( returnType )
self.f.write(returnType + " " + method['name'] + "(" )
# remove weird case of default value as a type (see QWidget::grab)
parameters = []
for iParameter in range( 0, len(method['parameters'])) :
if "(" not in method['parameters'][ iParameter ]['type']:
parameters.append( method['parameters'][ iParameter ] )
elif iParameter > 0 :
del parameters[ iParameter-1 ][ 'defaultValue' ]
for iParam in range(0, len(parameters)):
parameter = parameters[iParam]
paramType = self.treatType( parameter['type'] )
# bug in parser
if ( parameter['name'] == "&" ):
paramType += "&"
parameter['name'] = ""
if ( parameter['name'] == "" ):
parameter['name'] = "param" + str(iParam)
self.f.write(paramType + " " + parameter['name'])
# default value if any
if "defaultValue" in parameter :
enumClassName, enum = self.findEnumFromValue( parameter['defaultValue'] )
self.f.write(" = ");
if enumClassName != None and enum != None:
self.f.write( enumClassName + "::" + parameter['defaultValue'] );
else:
self.f.write(self.treatType( parameter['defaultValue'] ) )
if ( iParam < len(parameters)-1 ):
self.f.write(",")
self.f.write(")\n")
self.f.write("{\n")
if not isStatic:
self.f.write(className + " *object = qscriptvalue_cast<" + className + "*>(thisObject());\n")
if ( returnType != "void" ):
self.f.write("return ")
if isStatic:
self.f.write( className + "::" )
else:
self.f.write( "object->" )
self.f.write( method['name'] + "(" );
# method parameters in call ...
for iParam in range(0, len(parameters)):
self.f.write(parameters[iParam]['name'])
if ( iParam < len(parameters)-1 ):
self.f.write(",")
self.f.write( ");\n" );
self.f.write("}\n")
self.f.write("};\n")
self.f.write("}\n")
self.f.write("\n")
if className not in [ "QWidget" ]:
self.f.write("Q_DECLARE_METATYPE(" + className + "*)\n")
self.f.write("\n")
self.f.write("#endif\n");
f.close()
########################### main
if len(sys.argv) != 3:
print("[Usage] generateBindings <qt_include_dir> <output_dir>")
sys.exit(0)
qtDir = sys.argv[1]
outputDir = sys.argv[2]
# assume output dir is created
if not os.path.exists( outputDir ):
os.makedirs( outputDir )
try:
#"qtbase/src/corelib/kernel/qmetaobject.h"
qtFiles = [
"QtCore/qobject.h",
"QtWidgets/qwidget.h",
# "QtWidgets/qdialog.h",
# "QtWidgets/qmessagebox.h"
"QtWidgets/qabstractslider.h",
"QtWidgets/qslider.h",
# "qtbase/src/widgets/widgets/qlineedit.h",
# "qtbase/src/widgets/kernel/qapplication.h",
# "qtbase/src/widgets/itemviews/qtreeview.h",
# "qtbase/src/widgets/itemviews/qabstractitemview.h",
# "qtbase/src/corelib/itemmodels/qabstractitemmodel.h",
# "qtbase/src/widgets/widgets/qtoolbar.h"
# "qtbase/src/corelib/tools/qrect.h"
# "qtbase/src/corelib/tools/qpoint.h"
#"qtbase/src/corelib/itemmodels/qitemselectionmodel.h"
#"qtbase/src/widgets/widgets/qabstractscrollarea.h"
#"qtbase/src/corelib/kernel/qcoreapplication.h"
#"qtbase/src/gui/kernel/qevent.h"
# "qtbase/src/corelib/tools/qelapsedtimer.h"
# "qtbase/src/corelib/kernel/qtimer.h",
# "qtbase/src/widgets/widgets/qmenu.h",
#"qtbase/src/widgets/widgets/qcombobox.h",
#"qtbase/src/widgets/widgets/qscrollbar.h",
#"qtbase/src/widgets/widgets/qframe.h",
#"qtbase/src/widgets/widgets/qframe.h",
#"qtbase/src/widgets/kernel/qaction.h",
#"qtbase/src/corelib/io/qiodevice.h",
#"qtbase/src/corelib/io/qfiledevice.h",
#"qtbase/src/corelib/io/qfile.h"
#"qtbase/src/widgets/widgets/qabstractbutton.h"
]
generator = BindingGenerator()
for qtFile in qtFiles:
sourceFileName = os.path.join( qtDir, qtFile )
tmpPreprocessorFileName = "/tmp/tmpPreprocessorFile.h"
# generate file without include
scriptDir = os.path.abspath(os.path.dirname(__file__));
subprocess.call([ os.path.join( scriptDir, 'prepareFile.sh' ), sourceFileName])
sourceFile = open(tmpPreprocessorFileName,'r')
sourceContent = sourceFile.read();
sourceFile.close()
sourceContent = re.sub( r"(Q_PROPERTY.*)", r"\1;", sourceContent)
# BUG PARSER
sourceContent = sourceContent.replace( "Q_SLOTS", "" );
sourceContent = re.sub( r"(Q_PROPERTY.*)", r"\1;", sourceContent)
sourceContent = re.sub( r"::", r"__", sourceContent)
sourceContent = re.sub( r"static Q_DECL_DEPRECATED.*;", r"", sourceContent )
sourceContent = re.sub( r"Q_.*_EXPORT", r"", sourceContent )
sourceContent = re.sub( r"Q_DECLARE_FLAGS\((.*),(.*)\)", r"typedef QFlags<\2> \1;", sourceContent )
notdefs = [ "Q_COMPILER_DECLTYPE",
"(Q_COMPILER_DECLTYPE",
"QT_KEYPAD_NAVIGATION", "Q_OS_WINCE", "Q_WS_X11", "Q_WS_MAC",
"Q_QDOC", "QT_NO_QOBJECT"
]
for notdef in notdefs:
sourceContent = remove( sourceContent, "#ifdef " + notdef, "#endif" )
sourceContent = remove( sourceContent, "#ifndef " + notdef, "#endif" )
sourceContent = remove( sourceContent, "#if defined " + notdef, "#endif" )
# BUG PARSER
sourceContent = remove( sourceContent, "Q_SIGNALS", "public|protected|private|}" )
sourceContent = remove( sourceContent, "#if QT_DEPRECATED_SINCE", "#endif" )
sourceContent = remove( sourceContent, "Q_STATIC_ASSERT_X", ";" )
tmpFileName = "/tmp/tmpFile.h"
f = open(tmpFileName,'w')
f.write( sourceContent )
f.close()
cppHeader = CppHeaderParser.CppHeader( tmpFileName )
generator.parseCppHeader( cppHeader, outputDir )
except CppHeaderParser.CppParseError as e:
print(e)
sys.exit(1)
| lgpl-3.0 | 7,611,667,928,726,940,000 | 38.817797 | 176 | 0.543791 | false | 3.989387 | false | false | false |
ncullen93/pyBN | pyBN/classes/clustergraph.py | 1 | 5833 | """
******************
ClusterGraph Class
******************
This is a class for creating/manipulating Cluster Graphs,
and performing inference over them - currently the only
supported algorithm is Loopy Belief Propagation. Still,
the class structure is in place for easy addition of
any algorithms relying on the Cluster Graph framework.
NOTE: A cluster graph is a generalization of the clique tree
data structure - to generate a clique tree, you first generate
a cluster graph, then simply calculate a maximum spanning tree.
In other words, a clique tree can be considered as a special
type of cluster graph.
"""
__author__ = """Nicholas Cullen <[email protected]>"""
import numpy as np
import pandas as pd
import networkx as nx
from pyBN.classes.cliquetree import Clique
class ClusterGraph(object):
"""
ClusterGraph Class
"""
def __init__(self, bn):
"""
Initialize a ClusterGraph object
"""
self.BN = BN
self.V = {} # key = cluster index, value = Cluster objects
self.E = []
self.G = None
self.initialize_graph(method)
self.beliefs = {} # dict where key = cluster idx, value = belief cpt
def initialize_graph(self):
"""
Initialize the structure of the cluster graph.
"""
# generate graph structure
self.bethe()
# initialize beliefs
for clique in self.V.values():
clique.compute_psi()
# initialize messages to 1
self.initialize_messages()
def bethe(self):
"""
Generate Bethe cluster graph structure.
"""
self.V = {}
self.E = []
factorization = Factorization(self.BN)
prior_dict = {}
for factor in factorization.f_list:
# if factor is just a prior (i.e. already added as rv)
if len(factor.scope) == 1:
#self.V[len(self.V)] = Clique(scope=factor.scope)
#self.V[len(self.V)-1].factors = [factor]
prior_dict[factor.var] = factor
if len(factor.scope) > 1:
self.V[len(self.V)] = Clique(scope=factor.scope)
self.V[len(self.V)-1].factors = [factor] # assign the factor
sep_len = len(self.V)
# First, add all individual random variables
for rv in self.BN.V:
# if rv is a prior, don't add it
if rv in prior_dict.keys():
factor = prior_dict[rv]
self.V[len(self.V)] = Clique(scope=factor.scope)
self.V[len(self.V)-1].factors = [factor]
else:
self.V[len(self.V)] = Clique(scope={rv})
# create a new initial factor since it wont have one
new_factor = Factor(BN=self.BN, var=rv, init_to_one=True)
self.V[len(self.V)-1].factors = [new_factor]
for i in range(sep_len):
for j in range(sep_len,len(self.V)):
if self.V[j].scope.issubset(self.V[i].scope):
self.E.append((i,j))
new_G = nx.Graph()
new_G.add_edges_from(self.E)
self.G = new_G
def initialize_messages(self):
"""
For each edge (i-j) in the ClusterGraph,
set delta_(i-j) = 1 and
set delta_(j-i) = 1.
(i.e. send a message from each parent to every child where the
message is a df = 1)
"""
for cluster in self.V:
for neighbor in self.G.neighbors(cluster):
self.V[cluster].send_initial_message(self.V[neighbor])
def collect_beliefs(self):
self.beliefs = {}
for cluster in self.V:
self.V[cluster].collect_beliefs()
#print('Belief ' , cluster , ' : \n', self.V[cluster].belief.cpt)
self.beliefs[cluster] = self.V[cluster].belief
def loopy_belief_propagation(self, target, evidence, max_iter=100):
"""
This is Message Passing (Loopy Belief Propagation) over a cluster graph.
It is Sum-Product Belief Propagation in a cluster graph as shown in
Koller p.397
Notes:
1. Definitely a problem due to normalization (prob vals way too small)
2. Need to check the scope w.r.t. messages.. all clusters should not
be accumulating rv's in their scope over the course of the algorithm.
"""
# 1: Moralize the graph
# 2: Triangluate
# 3: Build a clique tree using max spanning
# 4: Propagation of probabilities using message passing
# creates clique tree and assigns factors, thus satisfying steps 1-3
cgraph = copy.copy(self)
G = cgraph.G
edge_visit_dict = dict([(i,0) for i in cgraph.E])
iteration = 0
while not cgraph.is_calibrated():
if iteration == max_iter:
break
if iteration % 50 == 0:
print('Iteration: ' , iteration)
for cluster in cgraph.V.values():
cluster.collect_beliefs()
# select an edge
e_idx = np.random.randint(0,len(cgraph.E))
edge_select = cgraph.E[e_idx]
p_idx = np.random.randint(0,2)
parent_edge = edge_select[p_idx]
child_edge = edge_select[np.abs(p_idx-1)]
print(parent_edge , child_edge)
# send a message along that edge
cgraph.V[parent_edge].send_message(cgraph.V[child_edge])
iteration += 1
print('Now Collecting Beliefs..')
self.collect_beliefs()
self.BN.ctree = self
| mit | -6,601,447,194,951,848,000 | 32.720238 | 84 | 0.548946 | false | 3.873174 | false | false | false |
lfairchild/PmagPy | programs/qqunf.py | 2 | 1916 | #!/usr/bin/env python
from __future__ import print_function
from builtins import input
import sys
import pmagpy.pmagplotlib as pmagplotlib
import pmagpy.pmag as pmag
def main():
"""
NAME
qqunf.py
DESCRIPTION
makes qq plot from input data against uniform distribution
SYNTAX
qqunf.py [command line options]
OPTIONS
-h help message
-f FILE, specify file on command line
"""
fmt,plot='svg',0
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
elif '-f' in sys.argv: # ask for filename
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
input=f.readlines()
Data=[]
for line in input:
line.replace('\n','')
if '\t' in line: # read in the data from standard input
rec=line.split('\t') # split each line on space to get records
else:
rec=line.split() # split each line on space to get records
Data.append(float(rec[0]))
#
if len(Data) >=10:
QQ={'unf1':1}
pmagplotlib.plot_init(QQ['unf1'],5,5)
pmagplotlib.plot_qq_unf(QQ['unf1'],Data,'QQ-Uniform') # make plot
else:
print('you need N> 10')
sys.exit()
pmagplotlib.draw_figs(QQ)
files={}
for key in list(QQ.keys()):
files[key]=key+'.'+fmt
if pmagplotlib.isServer:
black = '#000000'
purple = '#800080'
titles={}
titles['eq']='Equal Area Plot'
EQ = pmagplotlib.add_borders(EQ,titles,black,purple)
pmagplotlib.save_plots(QQ,files)
elif plot==1:
files['qq']=file+'.'+fmt
pmagplotlib.save_plots(QQ,files)
else:
ans=input(" S[a]ve to save plot, [q]uit without saving: ")
if ans=="a": pmagplotlib.save_plots(QQ,files)
if __name__ == "__main__":
main()
| bsd-3-clause | -5,633,176,725,005,005,000 | 25.985915 | 74 | 0.566806 | false | 3.421429 | false | false | false |
kellerberrin/OSM-QSAR | OSMBase.py | 1 | 6127 | # MIT License
#
# Copyright (c) 2017
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
#
from __future__ import absolute_import, division, print_function, unicode_literals
from OSMModelData import OSMModelData
# ============================================================================
# A register of classifier models implemented as a dictionary
# ============================================================================
__modelClassRegistry__ = {} # global dictionary of classification models.
# ================================================================================
# A meta class to automatically register model classes with __modelClassRegistry__
# ================================================================================
class ModelMetaClass(type):
def __new__(cls, class_name, bases, attrs):
class_obj = super(ModelMetaClass, cls).__new__(cls, class_name, bases, attrs)
__modelClassRegistry__[class_name] = class_obj
return class_obj
# ============================================================================
# Utility functions to enumerate registered classifier model classes
# ============================================================================
def get_model_class(class_name):
class_obj = None
if class_name in __modelClassRegistry__:
class_obj = __modelClassRegistry__[class_name]
return class_obj
def get_model_instance(class_name, *args):
return get_model_class(class_name)(*args)
def get_model_method(class_name, method_name):
method = None
class_obj = get_model_class(class_name)
if class_obj is not None:
if method_name in class_obj.__dict__:
method = class_obj.__dict__[method_name]
return method
# Returns a list of model instances (only models with postfix defined).
def get_model_instances(args, log):
model_instances = []
for class_name in __modelClassRegistry__:
extfn = get_model_method(class_name, "model_postfix")
if extfn is not None:
instance = get_model_instance(class_name, args, log)
model_instances.append(instance)
return model_instances
# ============================================================================
# This is a virtual base classification model that is inherited by
# OSM classification models using "from OSMBaseModel import OSMBaseModel".
# See example code in "OSMTemplate.py" and "OSMSequential.py"
# ============================================================================
class OSMBaseModel(object):
def __init__(self, args, log):
# Shallow copies of the runtime environment.
self.log = log
self.args = args
#####################################################################################
#
# Local member functions that call virtual member functions defined
# elsewhere in the object hierarchy. All functions prefixed "model_" are virtual.
#
#####################################################################################
# Perform the classification, this is the mainline function.
def classify(self, data):
self.initialize(data)
self.model_write()
def initialize(self, data):
self.raw_data = data #The entire dataset for recursive models.
self.data = OSMModelData(self.args, self.log, self, data) # create a "model-centric" view of the data.
if self.args.shuffle >= 0:
self.data.stratified_crossval(self.args.shuffle) # shuffle the test and training data if flag set.
self.model = self.create_model()
self.log.info("Begin Training %s Model", self.model_name())
self.model_train()
self.log.info("End Training %s Model", self.model_name())
self.model_classification_results()
self.model_training_summary()
def create_model(self):
if self.args.loadFilename != "noload":
model = self.model_read()
else:
self.log.info("+++++++ Creating %s Model +++++++", self.model_name())
model = self.model_define()
return model
def model_arguments(self): return self.arguments
#####################################################################################
#
# Virtual member functions redefined elsewhere in the object hierarchy
#
#####################################################################################
def model_is_regression(self): return False # re-defined in OSMRegression
def model_is_classifier(self): return False # re-defined in OSMClassification
def model_is_unsupervised(self): return False # re-defined in OSMUnsupervised
# Default for any model without graphics functions.
def model_graphics(self): pass
# Redefine these if model I/O is defined.
def model_write(self): pass
def model_read(self): return self.model_define()
def model_epochs(self): return 0
def model_evaluate(self, data): return []
def model_training_summary(self): pass
def model_analytics(self, data): return None
| mit | 7,722,974,059,027,454,000 | 35.470238 | 111 | 0.579729 | false | 4.66997 | false | false | false |
wolfiex/DSMACC-testing | dsmacc/run/mpiout.py | 1 | 7574 | from uf90 import readfun
from mpi4py import MPI
import os,sys,time,re
import numpy as np
comm = MPI.COMM_WORLD
rank = comm.rank # The process ID (integer 0-3 for 4-process run)
#universe_size=comm.Get_attr(MPI.UNIVERSE_SIZE)
try:
soft=int(MPI.INFO_ENV.get("soft"))
except:
print('cant find MPI soft, using 2 cores')
soft = 2
#maxprocs= int(MPI.INFO_ENV.get("maxprocs"))
#if sys.argv[1] != 'ignore':
'''
try:
ncores = int(os.popen('echo $NCPUS').read())
except:
sys.exit('MPI_DSMACC:Use a Queue')
'''
ncpus = soft# int(comm.Get_attr(MPI.UNIVERSE_SIZE)) #int(os.popen('echo $NCPUS').read())
print(('ncpu rank', ncpus , rank , soft))
if ncpus <2 :
ncpus = 2
#sys.exit('MPI_DSMACC needs more cores: Use a Queue')
if ncpus > 130:
sys.exit('I dont believe you are running DSMACC on %s cores, use a queue'%ncpus)
#if (not os.path.exists('./model') & runsaved==0): sys.exit('No model file found. Please run "make kpp" followed by "make"')
#filename = sys.argv[1]
obs=False
groups = None
debug=None #'for boradcast'
savelist = ['spec','rate','flux','vdot','jacsp']
for i in sys.argv[1:]:
if i=='--obs':
if rank==0:
obs = int(tuple(open('include.obs'))[0].strip().replace('!obs:',''))
print('observations being used, number of obs: ',int(obs))
elif i == '--spinup':
obs = -1
print('Spinup period active')
if '.h5' in i :
filename = i.strip()
if '--debug' in i:
debug = True
#print ('dsfds',__file__,os.popen('pwd').read())
try:
if rank == 0:
###read args
extend = True
rewind = False
print("\033]0; running dsmacc... \007")
#### jacheader ###
import h5py
hf = h5py.File(filename, 'a')
ids = ''.join( reversed(list(open('model_Parameters.f90').readlines() ) )).replace(' ','')
ids = re.findall('ind_([\w\d]+)=(\d+)',ids)
ids = dict(([key,value] for value,key in ids))
jacfile = ''.join( open('model_Jacobian.f90').readlines() ).replace(' ','')
edges = re.findall('JVS\(\d+\)=Jac_FULL\((\d+),(\d+)\)\\n*JVS\(\d+\)',jacfile)
edges = ['->'.join([ids[i[1]],ids[i[0]]]) for i in edges]
print('edges:',len(edges))
### end jacheader ###
if not debug:
os.system(' touch temp.txt && rm temp.txt')
debug = '>>temp.txt'
head= hf.attrs['ictime'] + '\n' + '!'.join(['%15s'%i.decode('utf-8') for i in hf['icspecs']])+ '\n' + '!'.join(['%15s'%i for i in hf['icconst']])
############################################
###hf.attrs['ictime']=1000
##################### DEL
print('duration' , hf.attrs['ictime'])
#print (np.array(head))
np.savetxt('Init_cons.dat', hf['icruns'], fmt='%15e', delimiter='!', newline='\n', header= head,comments='')
#print(os.popen('less Init_cons.dat').read())
groups = [[int(item.attrs['id']),item.name] for item in list(hf.values()) if isinstance(item, h5py.Group)]
sys.stdout.flush()
comm.Barrier()
#print ('barrier')
debug = comm.bcast(debug,root=0)
groups = comm.bcast(groups,root=0)
obs = comm.bcast(obs,root=0)
lgroups = len(groups)
#sys.stdout.flush()
#print ('barrier:bcast')
comm.Barrier()
n=rank-1
if rank>0:
while n < lgroups:
g = groups[n]
#set the model
model='model'
if '-' in g[1]:
if runsaved: model='save/exec/%s/model'%(g[1].split('-')[-1])
else: description = g[1].split('-')[0]
#run cmd
version = os.popen('./%s 0 0 --version'%(model)).read()
run ='./%s %d %d %s'%(model,int(g[0]),obs,debug)
print('\n'+ run, ' of version ' , version) ;
##the actual run
start = time.strftime("%s");os.system(run)
wall = int(time.strftime("%s")) - int(start)
#return data
data = {'wall':wall,'group':g[1],'vers':version.strip(),'id':g[0]}
comm.isend(data, 0,tag=n)
#next task
n+=(ncpus-1)
else:
for i in range(lgroups):
print('Progress: %02d '%((float(i)/lgroups)*100.))
req = comm.recv(source=MPI.ANY_SOURCE,tag=MPI.ANY_TAG)
#req.Wait()
g = hf[req['group']]
print('Finished' , req, '. Cleaning and Saving.')
g.attrs['version'] = req['vers']
g.attrs['wall']= req['wall']
for dataset in savelist:
data = readfun('Outputs/%s.%s'%(req['id'],dataset))
if data[1].shape[0] == 0:
print(( 'no values found, skipping: ', dataset))
continue
if dataset == 'jacsp':
dataarr = ['TIME']
dataarr.extend(edges)
elif dataset == 'vdot':
dataarr = [ids[str(i+1)] for i in range(len(data[1][1]))]
else:
dataarr = data[0].split(',')
print(data[1].shape,len(dataarr),dataset)#remove non/
#zero results through mask
mask = np.array(data[1].sum(axis=0))
if dataset == 'spec':
mask[:12] = 1.
elif dataset == 'rate':
#only save reaction which contain species
match = re.compile(r'\b[\d\.]*(\w+)\b')
fltr=set(fltr)
keep = [len(set(match.findall(i))-fltr)==0 for i in dataarr]
try: mask *= np.array(keep)
except:None
mask = np.where(mask)
fltr = np.array(dataarr)[mask]
g.attrs[dataset + 'head'] = ','.join(fltr)
data[1] = np.squeeze(data[1][...,mask],axis = 1)
print(data[1].shape,dataset)
try: g[dataset]
except:extend=False
if not extend :
g.create_dataset(dataset, data=data[1] , chunks=True,maxshape=(None,None))
else:
print('already saved')
#print g[dataset]
#g[dataset] = g[dataset].extend(data[1]) ### if exists extend this
#use lines below
#g[dataset].resize((g[dataset].shape[0] + data[1].shape[0]),axis=0)
#g[dataset][-data[1].shape[0]:] = data[1]
### move status bar to here !!!
#print g[dataset]
#print req,g.items()
sys.stderr.flush()
## Catch Everything Up!
sys.stdout.flush()
comm.Barrier()
if rank ==0 :
print("\033]0; Simulation Finished \007")
hf.close()
print('written' , filename)
except Exception as e:
#if rank ==0 :
# hf.close()
print('Failed run',e)
import traceback
sys.stdout.flush()
traceback.print_exc()
comm.Abort()
| gpl-3.0 | -392,247,076,921,217,150 | 26.146953 | 153 | 0.460523 | false | 3.673133 | false | false | false |
biggles-plot/biggles | biggles/libplot/tex2libplot.py | 2 | 11761 | #
# $Id: tex2libplot.py,v 1.5 2002/08/18 22:04:07 mrnolta Exp $
#
# Copyright (C) 2000 Mike Nolta <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
#
# This is just a quick and dirty converter from simple TeX strings
# to libplot Hershey font strings. Basically just a lookup table.
#
import re
import string
class TeXLexer(object):
re_control_sequence = re.compile(r"^\\[a-zA-Z]+[ ]?|^\\[^a-zA-Z][ ]?")
def __init__(self, str):
self.str = str
self.len = len(str)
self.pos = 0
self.token_stack = []
def get_token(self):
if self.pos == self.len:
return None
if len(self.token_stack) > 0:
return self.token_stack.pop()
str = self.str[self.pos:]
m = self.re_control_sequence.match(str)
if m is not None:
token = m.group()
self.pos = self.pos + len(token)
# consume trailing space
if len(token) > 2 and token[-1] == ' ':
token = token[:-1]
else:
token = str[0]
self.pos = self.pos + 1
return token
def put_token(self, token):
self.token_stack.append(token)
def peek(self):
token = self.get_token()
self.put_token(token)
return token
_common_token_dict = {
r'\\': '\\',
r'\$': r'$',
r'\%': r'%',
r'\#': r'#',
r'\&': r'&',
# r'\~' : r'~',
r'\{': r'{',
r'\}': r'}',
r'\_': r'_',
# r'\^' : r'^',
r'~': r' ',
r'\/': r'\r^',
# special letters (p52)
# r'\oe' : r'',
# r'\OE' : r'',
r'\ae': r'\ae',
r'\AE': r'\AE',
r'\aa': r'\oa',
r'\AA': r'\oA',
r'\o': r'\/o',
r'\O': r'\/O',
# r'\l' : r'',
# r'\L' : r'',
r'\ss': r'\ss',
# ignore stray brackets
r'{': r'',
r'}': r'',
}
_text_token_dict = {
## punctuation (p52)
r'\`': r'\`',
r"\'": r"\'",
r'\^': r'\^',
r'\"': r'\:',
r'\~': r'\~',
r'\c': r'\,',
# non-math symbols (p438)
r'\S': r'\sc',
r'\P': r'\ps',
r'\dag': r'\dg',
r'\ddag': r'\dd',
}
_math_token_dict = {
r'*': r'\**',
# spacing
# r' ' : r'',
r'\ ': r' ',
r'\quad': r'\r1', # 1 em
r'\qquad': r'\r1\r1', # 2 em
r'\,': r'\r6', # 3/18 em
# r'\>' : r'', # 4/18 em
# r'\;' : r'', # 5/18 em
r'\!': r'\l6', # -1/6 em
# lowercase greek
r'\alpha': r'\*a',
r'\beta': r'\*b',
r'\gamma': r'\*g',
r'\delta': r'\*d',
r'\epsilon': r'\*e',
# r'\varepsilon' : r'',
r'\zeta': r'\*z',
r'\eta': r'\*y',
r'\theta': r'\*h',
r'\vartheta': r'\+h',
r'\iota': r'\*i',
r'\kappa': r'\*k',
r'\lambda': r'\*l',
r'\mu': r'\*m',
r'\nu': r'\*n',
r'\xi': r'\*c',
r'\pi': r'\*p',
# r'\varpi' : r'',
r'\rho': r'\*r',
# r'\varrho' : r'',
r'\sigma': r'\*s',
r'\varsigma': r'\ts',
r'\tau': r'\*t',
r'\upsilon': r'\*u',
r'\phi': r'\*f',
r'\varphi': r'\+f',
r'\chi': r'\*x',
r'\psi': r'\*q',
r'\omega': r'\*w',
# uppercase greek
r'\Alpha': r'\*A',
r'\Beta': r'\*B',
r'\Gamma': r'\*G',
r'\Delta': r'\*D',
r'\Epsilon': r'\*E',
r'\Zeta': r'\*Z',
r'\Eta': r'\*Y',
r'\Theta': r'\*H',
r'\Iota': r'\*I',
r'\Kappa': r'\*K',
r'\Lambda': r'\*L',
r'\Mu': r'\*M',
r'\Nu': r'\*N',
r'\Xi': r'\*C',
r'\Pi': r'\*P',
r'\Rho': r'\*R',
r'\Sigma': r'\*S',
r'\Tau': r'\*T',
r'\Upsilon': r'\*U',
r'\Phi': r'\*F',
r'\Chi': r'\*X',
r'\Psi': r'\*Q',
r'\Omega': r'\*W',
# miscellaneous
r'\aleph': r'\Ah',
r'\hbar': r'\hb',
r'\ell': r'\#H0662',
r'\wp': r'\wp',
r'\Re': r'\Re',
r'\Im': r'\Im',
r'\partial': r'\pd',
r'\infty': r'\if',
r'\prime': r'\fm',
r'\emptyset': r'\es',
r'\nabla': r'\gr',
r'\surd': r'\sr',
# r'\top' : r'',
# r'\bot' : r'',
r'\|': r'\||',
r'\angle': r'\/_',
# r'\triangle' : r'',
r'\backslash': r'\\',
r'\forall': r'\fa',
r'\exists': r'\te',
r'\neg': r'\no',
# r'\flat' : r'',
# r'\natural' : r'',
# r'\sharp' : r'',
r'\clubsuit': r'\CL',
r'\diamondsuit': r'\DI',
r'\heartsuit': r'\HE',
r'\spadesuit': r'\SP',
r'\int': r'\is',
# binary operations
r'\pm': r'\+-',
r'\mp': r'\-+',
# r'\setminus' : r'',
r'\cdot': r'\md',
r'\times': r'\mu',
r'\ast': r'\**',
# r'\star' : r'',
# r'\diamond' : r'',
# r'\circ' : r'',
r'\bullet': r'\bu',
r'\div': r'\di',
r'\cap': r'\ca',
r'\cup': r'\cu',
# r'\uplus' : r'',
# r'\sqcap' : r'',
# r'\sqcup' : r'',
# r'\triangleleft' : r'',
# r'\triangleright' : r'',
# r'\wr' : r'',
# r'\bigcirc' : r'',
# r'\bigtriangleup' : r'',
# r'\bigtriangledown' : r'',
# r'\vee' : r'',
# r'\wedge' : r'',
r'\oplus': r'\c+',
# r'\ominus' : r'',
r'\otimes': r'\c*',
# r'\oslash' : r'',
r'\odot': r'\SO',
r'\dagger': r'\dg',
r'\ddagger': r'\dd',
# r'\amalg' : r'',
# relations
r'\leq': r'\<=',
# r'\prec' : r'',
# r'\preceq' : r'',
r'\ll': r'<<',
r'\subset': r'\SB',
# r'\subseteq' : r'',
# r'\sqsubseteq' : r'',
r'\in': r'\mo',
# r'\vdash' : r'',
# r'\smile' : r'',
# r'\frown' : r'',
r'\geq': r'\>=',
# r'\succ' : r'',
# r'\succeq' : r'',
r'\gg': r'>>',
r'\supset': r'\SS',
# r'\supseteq' : r'',
# r'\sqsupseteq' : r'',
# r'\ni' : r'',
# r'\dashv' : r'',
r'\mid': r'|',
r'\parallel': r'\||',
r'\equiv': r'\==',
r'\sim': r'\ap',
r'\simeq': r'\~-',
# r'\asymp' : r'',
r'\approx': r'\~~',
r'\cong': r'\=~',
# r'\bowtie' : r'',
r'\propto': r'\pt',
# r'\models' : r'',
# r'\doteq' : r'',
r'\perp': r'\pp',
# arrows
r'\leftarrow': r'\<-',
r'\Leftarrow': r'\lA',
r'\rightarrow': r'\->',
r'\Rightarrow': r'\rA',
r'\leftrightarrow': r'\<>',
r'\Leftrightarrow': r'\hA',
# r'\mapsto' : r'',
# r'\hookleftarrow' : r'',
# r'\leftharpoonup' : r'',
# r'\leftharpoondown' : r'',
# r'\rightleftharpoons' : r'',
# ...
r'\uparrow': r'\ua',
r'\Uparrow': r'\uA',
r'\downarrow': r'\da',
r'\Downarrow': r'\dA',
# r'\updownarrow' : r'',
# r'\Updownarrow' : r'',
# r'\nearrow' : r'',
# r'\searrow' : r'',
# r'\swarrow' : r'',
# r'\nwarrow' : r'',
# openings
r'\lbrack': r'[',
r'\lbrace': r'{',
r'\langle': r'\la',
# r'\lfloor' : r'',
# r'\lceil' : r'',
# closings
r'\rbrack': r']',
r'\rbrace': r'}',
r'\rangle': r'\ra',
# r'\rfloor' : r'',
# r'\rceil' : r'',
# alternate names
r'\ne': r'\!=',
r'\neq': r'\!=',
r'\le': r'\<=',
r'\ge': r'\>=',
r'\to': r'\->',
r'\gets': r'\<-',
# r'\owns' : r'',
r'\land': r'\AN',
r'\lor': r'\OR',
r'\lnot': r'\no',
r'\vert': r'|',
r'\Vert': r'\||',
# extensions
r'\degree': r'\de',
r'\deg': r'\de',
r'\degr': r'\de',
r'\arcdeg': r'\de',
}
def map_text_token(token):
if _text_token_dict.has_key(token):
return _text_token_dict[token]
else:
return _common_token_dict.get(token, token)
def map_math_token(token):
if _math_token_dict.has_key(token):
return _math_token_dict[token]
else:
return _common_token_dict.get(token, token)
def math_group(lexer):
output = ''
bracketmode = 0
while 1:
token = lexer.get_token()
if token is None:
break
if token == '{':
bracketmode = 1
elif token == '}':
break
else:
output = output + map_math_token(token)
if not bracketmode:
break
return output
font_code = [r'\f0', r'\f1', r'\f2', r'\f3']
def tex2libplot(str):
output = ''
mathmode = 0
font_stack = []
font = 1
lexer = TeXLexer(str)
while 1:
token = lexer.get_token()
if token is None:
break
append = ''
if token == '$':
mathmode = not mathmode
elif token == '{':
font_stack.append(font)
elif token == '}':
old_font = font_stack.pop()
if old_font != font:
font = old_font
append = font_code[font]
elif token == r'\rm':
font = 1
append = font_code[font]
elif token == r'\it':
font = 2
append = font_code[font]
elif token == r'\bf':
font = 3
append = font_code[font]
elif not mathmode:
append = map_text_token(token)
elif token == '_':
append = r'\sb' + math_group(lexer) + r'\eb'
if lexer.peek() == '^':
append = r'\mk' + append + r'\rt'
elif token == '^':
append = r'\sp' + math_group(lexer) + r'\ep'
if lexer.peek() == '_':
append = r'\mk' + append + r'\rt'
else:
append = map_math_token(token)
output = output + append
return output
| gpl-2.0 | -7,160,043,684,407,754,000 | 26.287703 | 74 | 0.357453 | false | 3.170936 | false | false | false |
uq-eresearch/uqam | location/models.py | 1 | 8060 | from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.template.defaultfilters import slugify
from exceptions import IllegalMove, SameLevelMove, WrongLevelMove
slug_length = 50
class LocationBase(models.Model):
name = models.CharField(max_length=255)
slug = models.SlugField(help_text='Unique identifier. May be used in URLs.', max_length=slug_length)
description = models.CharField(max_length=255, blank=True)
gn_name = models.CharField(max_length=100,
help_text="GeoNames Name", blank=True)
gn_id = models.CharField(max_length=20,
help_text="GeoNames ID", blank=True)
latitude = models.FloatField(blank=True, null=True)
longitude = models.FloatField(blank=True, null=True)
class Meta:
ordering = ['name']
abstract = True
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
self.slug = slugify(self.name)[:slug_length]
super(LocationBase, self).save(*args, **kwargs)
def get_kml_coordinates(self):
return "%s,%s,0" % (self.longitude, self.latitude)
@models.permalink
def get_absolute_url(self):
#import ipdb; ipdb.set_trace()
contenttype = ContentType.objects.get_for_model(self).model
return ('view_geoloc', [str(contenttype), str(self.id)])
def get_parents(self):
if hasattr(self, 'parent'):
parent = self.parent
return parent.get_parents() + [parent]
else:
return []
def moveto_parent(self, new_parent):
self._validate_move(new_parent)
return self._perform_move(new_parent)
def _validate_move(self, new_parent):
if not hasattr(self, 'parent'):
# Top level of tree, cannot move
raise IllegalMove()
if type(self) == type(new_parent):
# Parent cannot be of same type
raise SameLevelMove
parent_field = self._meta.get_field_by_name('parent')[0]
req_parent_type = parent_field.rel.to
if req_parent_type != type(new_parent):
# new_parent is wrong type for this class
raise WrongLevelMove
def _perform_move(self, new_parent):
# Check for conflicting children and merge if they exist
if hasattr(new_parent, 'children') and \
new_parent.children.filter(slug=self.slug):
to_merge = new_parent.children.get(slug=self.slug)
return self.merge(to_merge, self)
else:
# Simple move
self.parent = new_parent
self.save()
# Update museumobjects
field_changes = calc_field_changes(self)
self.museumobject_set.update(**field_changes)
return self
@staticmethod
def merge(target, old):
if hasattr(old, 'children'):
# Deal with all the children of old
targets_children = [child.slug for child in target.children.all()]
for child in old.children.all():
if child.slug in targets_children:
# Need to merge
match = target.children.get(slug=child.slug)
LocationBase.merge(match, child)
else:
# Simply move child
child.parent = target
child.save()
changes = calc_field_changes(target)
child.museumobject_set.update(**changes)
# now that old has no children
# Actually merge the two
changes = calc_field_changes(target)
old.museumobject_set.update(**changes)
if old.museumobject_set.exists():
raise Exception
else:
old.delete()
return target
def find_mo_field_name(element):
return element._meta.concrete_model.museumobject_set.\
related.field.name
def calc_field_changes(element):
"""
Walk up the tree of geo-locations, finding the new parents
These will be set onto all the museumobjects.
"""
fieldname = find_mo_field_name(element)
field_changes = {fieldname: element.id}
if hasattr(element, 'parent'):
field_changes.update(
calc_field_changes(element.parent))
return field_changes
class GlobalRegion(LocationBase):
icon_path = models.CharField(max_length=255, blank=True,
help_text="Relative path to icon")
icon_title = models.CharField(max_length=255, blank=True,
help_text="Icon title, displayed on browse page")
class Meta(LocationBase.Meta):
pass
class Country(LocationBase):
parent = models.ForeignKey(GlobalRegion, related_name='children',
verbose_name='Global region', on_delete=models.PROTECT)
class Meta(LocationBase.Meta):
verbose_name_plural = 'countries'
unique_together = ('parent', 'slug')
class StateProvince(LocationBase):
parent = models.ForeignKey(Country, related_name='children',
verbose_name='Country', on_delete=models.PROTECT)
class Meta(LocationBase.Meta):
unique_together = ('parent', 'slug')
class RegionDistrict(LocationBase):
parent = models.ForeignKey(StateProvince, related_name='children',
verbose_name='State/province', on_delete=models.PROTECT)
class Meta(LocationBase.Meta):
unique_together = ('parent', 'slug')
class Locality(LocationBase):
parent = models.ForeignKey(RegionDistrict, related_name='children',
verbose_name='Region/district', on_delete=models.PROTECT)
class Meta(LocationBase.Meta):
verbose_name_plural = 'localities'
unique_together = ('parent', 'slug')
class Place(models.Model):
country = models.CharField(max_length=30, blank=True)
region = models.CharField(max_length=40, blank=True)
australian_state = models.CharField(max_length=20, blank=True)
name = models.CharField(max_length=150)
is_corrected = models.BooleanField(default=False,
help_text="Has someone manually"
"moved the marker to it's correct location.")
gn_name = models.CharField(max_length=100,
help_text="GeoNames Name", blank=True)
gn_id = models.CharField(max_length=20,
help_text="GeoNames ID", blank=True)
latitude = models.FloatField(blank=True, null=True)
longitude = models.FloatField(blank=True, null=True)
class Meta:
ordering = ["id"]
def __unicode__(self):
return ' > '.join([self.country, self.region, self.name])
@models.permalink
def get_absolute_url(self):
return ('place_detail', [str(self.id)])
def get_geonames_url(self):
if self.gn_id:
return "http://www.geonames.org/%s" % self.gn_id
else:
return False
def get_kml_coordinates(self):
return "%s,%s,0" % (self.longitude, self.latitude)
def geocode_net(self, force=False):
"""
Lookup the latitude and longitude of this place with GeoNames
Place must be saved after use. Set `force` to re-lookup the location.
Can take a few seconds to return, since this uses a network request.
"""
if self.gn_id and not force:
return
from utils import geocoders
geonames = geocoders.GeoNamesWithId()
place, geonameId, (lat, lng) = geonames.geocode('%s, %s' %
(self.name, self.country,),
exactly_one=False)[0]
self.gn_name = place
self.gn_id = geonameId
self.latitude = lat
self.longitude = lng
@staticmethod
def autocomplete_search_fields():
return ("country__icontains", "region__icontains",
"australian_state__icontains", "name__icontains")
class Region(models.Model):
name = models.CharField(max_length=60, unique=True)
description = models.CharField(max_length=200)
def __unicode__(self):
return self.name
| bsd-3-clause | -4,995,136,529,847,650,000 | 31.764228 | 104 | 0.617122 | false | 3.992075 | false | false | false |
phobson/bokeh | examples/models/data_tables_server.py | 1 | 6342 | from __future__ import print_function
from bokeh.client import push_session
from bokeh.document import Document
from bokeh.models import (
ColumnDataSource, DataRange1d, Plot, LinearAxis, Grid,
Circle, HoverTool, BoxSelectTool
)
from bokeh.models.widgets import (
Select, DataTable, TableColumn, StringFormatter,
NumberFormatter, StringEditor, IntEditor, NumberEditor, SelectEditor)
from bokeh.models.layouts import Row, Column, WidgetBox
from bokeh.sampledata.autompg2 import autompg2 as mpg
class DataTables(object):
def __init__(self):
self.document = Document()
self.manufacturer_filter = None
self.model_filter = None
self.transmission_filter = None
self.drive_filter = None
self.class_filter = None
self.source = ColumnDataSource()
self.update_data()
self.document.add_root((self.create()))
self.session = push_session(self.document)
def create(self):
manufacturers = sorted(mpg["manufacturer"].unique())
models = sorted(mpg["model"].unique())
transmissions = sorted(mpg["trans"].unique())
drives = sorted(mpg["drv"].unique())
classes = sorted(mpg["class"].unique())
manufacturer_select = Select(title="Manufacturer:", value="All", options=["All"] + manufacturers)
manufacturer_select.on_change('value', self.on_manufacturer_change)
model_select = Select(title="Model:", value="All", options=["All"] + models)
model_select.on_change('value', self.on_model_change)
transmission_select = Select(title="Transmission:", value="All", options=["All"] + transmissions)
transmission_select.on_change('value', self.on_transmission_change)
drive_select = Select(title="Drive:", value="All", options=["All"] + drives)
drive_select.on_change('value', self.on_drive_change)
class_select = Select(title="Class:", value="All", options=["All"] + classes)
class_select.on_change('value', self.on_class_change)
columns = [
TableColumn(field="manufacturer", title="Manufacturer", editor=SelectEditor(options=manufacturers), formatter=StringFormatter(font_style="bold")),
TableColumn(field="model", title="Model", editor=StringEditor(completions=models)),
TableColumn(field="displ", title="Displacement", editor=NumberEditor(step=0.1), formatter=NumberFormatter(format="0.0")),
TableColumn(field="year", title="Year", editor=IntEditor()),
TableColumn(field="cyl", title="Cylinders", editor=IntEditor()),
TableColumn(field="trans", title="Transmission", editor=SelectEditor(options=transmissions)),
TableColumn(field="drv", title="Drive", editor=SelectEditor(options=drives)),
TableColumn(field="class", title="Class", editor=SelectEditor(options=classes)),
TableColumn(field="cty", title="City MPG", editor=IntEditor()),
TableColumn(field="hwy", title="Highway MPG", editor=IntEditor()),
]
data_table = DataTable(source=self.source, columns=columns, editable=True, width=1300)
plot = Plot(title=None, x_range= DataRange1d(), y_range=DataRange1d(), plot_width=1000, plot_height=300)
# Set up x & y axis
plot.add_layout(LinearAxis(), 'below')
yaxis = LinearAxis()
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
# Add Glyphs
cty_glyph = Circle(x="index", y="cty", fill_color="#396285", size=8, fill_alpha=0.5, line_alpha=0.5)
hwy_glyph = Circle(x="index", y="hwy", fill_color="#CE603D", size=8, fill_alpha=0.5, line_alpha=0.5)
cty = plot.add_glyph(self.source, cty_glyph)
hwy = plot.add_glyph(self.source, hwy_glyph)
# Add the tools
tooltips = [
("Manufacturer", "@manufacturer"),
("Model", "@model"),
("Displacement", "@displ"),
("Year", "@year"),
("Cylinders", "@cyl"),
("Transmission", "@trans"),
("Drive", "@drv"),
("Class", "@class"),
]
cty_hover_tool = HoverTool(renderers=[cty], tooltips=tooltips + [("City MPG", "@cty")])
hwy_hover_tool = HoverTool(renderers=[hwy], tooltips=tooltips + [("Highway MPG", "@hwy")])
select_tool = BoxSelectTool(renderers=[cty, hwy], dimensions=['width'])
plot.add_tools(cty_hover_tool, hwy_hover_tool, select_tool)
controls = WidgetBox(manufacturer_select, model_select, transmission_select, drive_select, class_select)
top_panel = Row(controls, plot)
layout = Column(top_panel, data_table)
return layout
def on_manufacturer_change(self, attr, _, value):
self.manufacturer_filter = None if value == "All" else value
self.update_data()
def on_model_change(self, attr, _, value):
self.model_filter = None if value == "All" else value
self.update_data()
def on_transmission_change(self, attr, _, value):
self.transmission_filter = None if value == "All" else value
self.update_data()
def on_drive_change(self, attr, _, value):
self.drive_filter = None if value == "All" else value
self.update_data()
def on_class_change(self, attr, _, value):
self.class_filter = None if value == "All" else value
self.update_data()
def update_data(self):
df = mpg
if self.manufacturer_filter:
df = df[df["manufacturer"] == self.manufacturer_filter]
if self.model_filter:
df = df[df["model"] == self.model_filter]
if self.transmission_filter:
df = df[df["trans"] == self.transmission_filter]
if self.drive_filter:
df = df[df["drv"] == self.drive_filter]
if self.class_filter:
df = df[df["class"] == self.class_filter]
self.source.data = ColumnDataSource.from_df(df)
def run(self, do_view=False, poll_interval=0.5):
if do_view:
self.session.show()
self.session.loop_until_closed()
if __name__ == "__main__":
data_tables = DataTables()
data_tables.run(True)
| bsd-3-clause | -2,652,749,595,042,619,000 | 43.978723 | 158 | 0.611479 | false | 3.743802 | false | false | false |
adamfisk/littleshoot-client | server/appengine/littleshoot/amazonDevPay.py | 1 | 9629 | from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from django.utils.decorators import decorator_from_middleware
from facebook.djangofb import FacebookMiddleware
import facebook.djangofb as facebook
from django.http import HttpResponse
from django.http import HttpResponseBadRequest
from django.http import HttpResponseRedirect
from django.template import RequestContext
import decorators
import logging
import os
import util
import models
import files
import uuid
import amazonDevPayClient
from google.appengine.ext.db import djangoforms
from google.appengine.ext import db
from registration.forms import RegistrationForm
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.forms import PasswordResetForm, SetPasswordForm, PasswordChangeForm
from django.conf import settings
from django.http import HttpResponseRedirect
import httplib
devpay_client = amazonDevPayClient.AmazonDevPayClient(settings.AWS_ACCESS_KEY_ID,
settings.AWS_SECRET_ACCESS_KEY,
settings.AWS_PRODUCT_TOKEN)
@decorator_from_middleware(FacebookMiddleware)
#@facebook.require_login()
@facebook.require_login(next="http://www.littleshoot.org/publish")
def freeForm(request):
fbId = str(request.facebook.uid)
logging.info("Facebook ID: %s", fbId)
policyFile = devpay_client.policy(settings.BASE_URI)
policyFileSignature = devpay_client.signedPolicy(policyFile)
#if userToken is None:
return render_to_response('freeUploadForm.html',
{'base64_policy_file' : policyFile,
'policy_file_signature' : policyFileSignature,
'aws_access_key_id' : settings.AWS_ACCESS_KEY_ID,
'fbId' : fbId,
'baseUrl' : settings.BASE_URI},
context_instance=RequestContext(request))
#@decorator_from_middleware(FacebookMiddleware)
#@facebook.require_login()
def uploadSuccess(request):
logging.info('Handling upload success: %s', request.REQUEST.items())
bucket = request.REQUEST.get('bucket')
key = request.REQUEST.get('key')
etag = request.REQUEST.get('etag')
baseUri = bucket + '.s3.amazonaws.com'
conn = httplib.HTTPConnection(baseUri)
conn.request("HEAD", "/" + key)
res = conn.getresponse()
if res.status != 200:
# We're responding to a callback from Amazon here, so we don't need
# to write anything intelligible.
logging.info("Unexpected response from S3: %s", res.status)
return HttpResponse()
logging.info("Status, Reason: %s, %s", res.status, res.reason)
logging.info("Headers: %s", res.getheaders())
size = res.getheader("Content-Length")
logging.info("Content-Length: %s", size)
requestCopy = request.GET.copy()
requestCopy.update({'size': size})
# See: http://developer.amazonwebservices.com/connect/entry.jspa?externalID=1963&categoryID=117
uri = "urn:etag:" + etag
requestCopy.update({'uri': uri})
requestCopy.update({'etag': etag})
# Here's the response:
# Got redirect from Amazon with [(u'etag', u'"2b63da5eb9f0e5d5a76ce4c34315843d"'), (u'fbId', u'1014879'), (u'bucket', u'littleshoot_test'), (u'key', u'user/1014879/files/build.bash'), (u'title', u'build.bash')]
return files.publishFileBase(requestCopy, False)
#return HttpResponse('Got redirect from Amazon with %s' % (request.REQUEST.items()))
#return HttpResponseRedirect('uploadMapped.html')
@decorator_from_middleware(FacebookMiddleware)
@facebook.require_login()
def uploadForm(request):
#if request.user.is_authenticated():
#logging.info('User is authenticated!')
#logging.info('User is %s', request.user.username)
fbId = str(request.facebook.uid)
logging.info("Facebook ID: ", fbId)
try:
userToken = request.user.amazonDevPayUserToken
except AttributeError, e:
logging.error("An exception was caught: " + str(e))
policyFile = devpay_client.policy()
policyFileSignature = devpay_client.signedPolicy(policyFile)
#if userToken is None:
return render_to_response('purchaseDevPay.html',
{'base64_policy_file' : policyFile,
'policy_file_signature' : policyFileSignature,
'aws_access_key_id' : settings.AWS_ACCESS_KEY_ID,
'fbId' : fbId,
'baseUri' : settings.BASE_URI},
context_instance=RequestContext(request))
"""
else:
devPayPolicyFile = devpay_client.devPayPolicy(userToken)
devPayPolicyFileSignature = devpay_client.signedPolicy(devPayPolicyFile)
logging.info('Straight sig: %s', policyFileSignature)
logging.info('DevPay sig: %s', devPayPolicyFileSignature)
return render_to_response('publisherUploadForm.html',
{'base64_policy_file' : policyFile,
'policy_file_signature' : policyFileSignature,
'devpay_base64_policy_file' : devPayPolicyFile,
'devpay_policy_file_signature' : devPayPolicyFileSignature,
'aws_access_key_id' : settings.AWS_ACCESS_KEY_ID},
context_instance=RequestContext(request))
else:
logging.info("User is not authenticated!!")
loginForm = AuthenticationForm(request)
registrationForm = RegistrationForm()
logging.info("Rendering loginOrRegister")
return render_to_response('customRegistration/loginOrRegister.html',
{'loginForm' : loginForm,
'registrationForm': registrationForm },
context_instance=RequestContext(request))
"""
"""
Documentation from Amazon:
Once the application has the activation key and product code, it looks
up the product token associated with the product code. The application
then makes a signed request to the License Service action
ActivateHostedProduct. The request must include the product token for
the customer and the customer's activation key. The response includes
the user token for the customer.
"""
@decorators.activationKeyRequired
@decorators.productCodeRequired
def activate(request):
logging.info("Handling DevPay activate request: %s", request.REQUEST.items())
logging.info("Host is: %s", request.get_host())
logging.info('Cookies on DevPay callback: %s', request.COOKIES)
facebookId = request.COOKIES.get('facebookId')
logging.info('Facebook ID: %s', facebookId)
#logging.info(request.META['SERVER_NAME'])
activationKey = request.REQUEST.get('ActivationKey')
# We only have a single product for now, so we don't need to look anything
# up based on the product code.
#productCode = request.REQUEST.get('ProductCode')
response = devpay_client.activateHostedProduct(activationKey)
#logging.info("Activated hosted product response: %s", dir(response))
result = response.activateHostedProductResult
#logging.info("Activated hosted product result: %s", dir(result))
userToken = result.userToken
persistentIdentifier = result.persistentIdentifier
logging.info('User token: %s', userToken)
logging.info('Persistent Identifier: %s', persistentIdentifier)
urlBase = "http://" + request.get_host() + "/amazonDevPay"
if request.user.is_authenticated():
logging.info('User is authenticated!')
logging.info('User is %s', request.user.username)
request.user.amazonDevPayUserToken = userToken
request.user.amazonDevPayPersistentIdentifier = persistentIdentifier
# We also need to create a bucket for the user.
request.user.put()
# We redirect to a page that will get rid of the separate Amazon frame.
#return HttpResponse('Activation Successful!')
# We just use the server name in case we're running from a staging
# server, for example.
finalUrl = urlBase + "Purchase";
return render_to_response('frameBuster.html',
{'frameUrl' : finalUrl})
else:
finalUrl = urlBase + "Error";
return render_to_response('frameBuster.html',
{'frameUrl' : finalUrl})
@decorator_from_middleware(FacebookMiddleware)
@facebook.require_login()
#@facebook.require_login(next="http://www.littleshoot.org/publish")
def listS3Files(request):
logging.info("Handling listS3Files request: %s", request.REQUEST.items())
logging.info('Cookies on list files: %s', request.COOKIES)
fbId = str(request.facebook.uid)
json = devpay_client.listS3FilesForId(fbId)
return HttpResponse(json, mimetype='application/json; charset=utf-8')
@decorator_from_middleware(FacebookMiddleware)
@facebook.require_login()
#@facebook.require_login(next="http://www.littleshoot.org/publish")
def listS3FilesForId(request):
userId = request.REQUEST.get('userId')
json = devpay_client.listS3FilesForId(userId)
return HttpResponse(json, mimetype='application/json; charset=utf-8') | gpl-2.0 | 3,223,197,400,000,495,000 | 41.8 | 214 | 0.652404 | false | 4.210319 | false | false | false |
kennethlyn/elbe | elbepack/commands/setcdrom.py | 2 | 1471 | #!/usr/bin/env python
#
# ELBE - Debian Based Embedded Rootfilesystem Builder
# Copyright (C) 2013 Linutronix GmbH
#
# This file is part of ELBE.
#
# ELBE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ELBE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ELBE. If not, see <http://www.gnu.org/licenses/>.
import sys
from elbepack.treeutils import etree
from optparse import OptionParser
def run_command( argv ):
oparser = OptionParser( usage="usage: %prog setcdrom <xmlfile> <cdrom>")
(opt,args) = oparser.parse_args(argv)
if len(args) != 2:
print "Wrong number of arguments"
oparser.print_help()
sys.exit(20)
try:
xml = etree( args[0] )
except:
print "Error reading xml file!"
sys.exit(20)
mirror = xml.node("project/mirror")
mirror.clear()
cdrom = mirror.ensure_child("cdrom")
cdrom.set_text( args[1] )
try:
xml.write( args[0] )
except:
print "Unable to write new xml file"
sys.exit(20)
| gpl-3.0 | -3,333,298,611,405,832,000 | 25.745455 | 76 | 0.67777 | false | 3.650124 | false | false | false |
fbarreir/panda-server | pandaserver/test/datasetCallbackListener.py | 2 | 7517 | import os
import re
import sys
import ssl
import time
import signal
import socket
import commands
import optparse
import datetime
import cPickle as pickle
import stomp
from dq2.common import log as logging
from config import panda_config
from brokerage.SiteMapper import SiteMapper
from dataservice import DataServiceUtils
from dataservice.DDMHandler import DDMHandler
import yaml
import logging
logging.basicConfig(level = logging.DEBUG)
# logger
from pandalogger.PandaLogger import PandaLogger
_logger = PandaLogger().getLogger('datasetCallbackListener')
# keep PID
pidFile = '%s/dataset_callback_listener.pid' % panda_config.logdir
# overall timeout value
overallTimeout = 60 * 59
# expiration time
expirationTime = datetime.datetime.utcnow() + datetime.timedelta(minutes=overallTimeout)
# kill whole process
def catch_sig(sig, frame):
try:
os.remove(pidFile)
except:
pass
# kill
_logger.debug('terminating ...')
commands.getoutput('kill -9 -- -%s' % os.getpgrp())
# exit
sys.exit(0)
# callback listener
class DatasetCallbackListener(stomp.ConnectionListener):
def __init__(self,conn,tb,sm,subscription_id):
# connection
self.conn = conn
# task buffer
self.taskBuffer = tb
# site mapper
self.siteMapper = sm
# subscription ID
self.subscription_id = subscription_id
def on_error(self,headers,body):
_logger.error("on_error : %s" % headers['message'])
def on_disconnected(self,headers,body):
_logger.error("on_disconnected : %s" % headers['message'])
def on_message(self, headers, message):
try:
dsn = 'UNKNOWN'
# send ack
id = headers['message-id']
#self.conn.ack(id,self.subscription_id)
# convert message form str to dict
messageDict = yaml.load(message)
# check event type
if not messageDict['event_type'] in ['datasetlock_ok']:
_logger.debug('%s skip' % messageDict['event_type'])
return
_logger.debug('%s start' % messageDict['event_type'])
messageObj = messageDict['payload']
# only for _dis or _sub
dsn = messageObj['name']
if (re.search('_dis\d+$',dsn) == None) and (re.search('_sub\d+$',dsn) == None):
_logger.debug('%s is not _dis or _sub dataset, skip' % dsn)
return
# take action
scope = messageObj['scope']
site = messageObj['rse']
_logger.debug('%s site=%s type=%s' % (dsn, site, messageDict['event_type']))
thr = DDMHandler(self.taskBuffer,None,site,dsn,scope)
thr.start()
thr.join()
_logger.debug('done %s' % dsn)
except:
errtype,errvalue = sys.exc_info()[:2]
_logger.error("on_message : %s %s" % (errtype,errvalue))
# main
def main(backGround=False):
_logger.debug('starting ...')
# register signal handler
signal.signal(signal.SIGINT, catch_sig)
signal.signal(signal.SIGHUP, catch_sig)
signal.signal(signal.SIGTERM,catch_sig)
signal.signal(signal.SIGALRM,catch_sig)
signal.alarm(overallTimeout)
# forking
pid = os.fork()
if pid != 0:
# watch child process
os.wait()
time.sleep(1)
else:
# main loop
from taskbuffer.TaskBuffer import taskBuffer
# check certificate
certName = '%s/pandasv1_usercert.pem' %panda_config.certdir
keyName = '%s/pandasv1_userkey.pem' %panda_config.certdir
#certName = '/etc/grid-security/hostcert.pem'
_logger.debug('checking certificate {0}'.format(certName))
certOK,certMsg = DataServiceUtils.checkCertificate(certName)
if not certOK:
_logger.error('bad certificate : {0}'.format(certMsg))
# initialize cx_Oracle using dummy connection
from taskbuffer.Initializer import initializer
initializer.init()
# instantiate TB
taskBuffer.init(panda_config.dbhost,panda_config.dbpasswd,nDBConnection=1)
# instantiate sitemapper
siteMapper = SiteMapper(taskBuffer)
# ActiveMQ params
queue = '/queue/Consumer.panda.rucio.events'
ssl_opts = {'use_ssl' : True,
'ssl_version' : ssl.PROTOCOL_TLSv1,
'ssl_cert_file' : certName,
'ssl_key_file' : keyName}
# resolve multiple brokers
brokerList = socket.gethostbyname_ex('atlas-mb.cern.ch')[-1]
# set listener
connList = []
for tmpBroker in brokerList:
try:
clientid = 'PANDA-' + socket.getfqdn() + '-' + tmpBroker
subscription_id = 'panda-server-consumer'
_logger.debug('setting listener %s to broker %s' % (clientid, tmpBroker))
conn = stomp.Connection(host_and_ports = [(tmpBroker, 61023)], **ssl_opts)
connList.append(conn)
except:
errtype,errvalue = sys.exc_info()[:2]
_logger.error("failed to connect to %s : %s %s" % (tmpBroker,errtype,errvalue))
catch_sig(None,None)
while True:
for conn in connList:
try:
if not conn.is_connected():
conn.set_listener('DatasetCallbackListener', DatasetCallbackListener(conn,taskBuffer,siteMapper,
subscription_id))
conn.start()
conn.connect(headers = {'client-id': clientid})
conn.subscribe(destination=queue, id=subscription_id, ack='auto')
_logger.debug('listener %s is up and running' % clientid)
except:
errtype,errvalue = sys.exc_info()[:2]
_logger.error("failed to set listener on %s : %s %s" % (tmpBroker,errtype,errvalue))
catch_sig(None,None)
time.sleep(5)
# entry
if __name__ == "__main__":
optP = optparse.OptionParser(conflict_handler="resolve")
options,args = optP.parse_args()
try:
# time limit
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(seconds=overallTimeout-180)
# get process list
scriptName = sys.argv[0]
out = commands.getoutput('env TZ=UTC ps axo user,pid,lstart,args | grep %s' % scriptName)
for line in out.split('\n'):
items = line.split()
# owned process
if not items[0] in ['sm','atlpan','pansrv','root']: # ['os.getlogin()']: doesn't work in cron
continue
# look for python
if re.search('python',line) == None:
continue
# PID
pid = items[1]
# start time
timeM = re.search('(\S+\s+\d+ \d+:\d+:\d+ \d+)',line)
startTime = datetime.datetime(*time.strptime(timeM.group(1),'%b %d %H:%M:%S %Y')[:6])
# kill old process
if startTime < timeLimit:
_logger.debug("old process : %s %s" % (pid,startTime))
_logger.debug(line)
commands.getoutput('kill -9 %s' % pid)
except:
errtype,errvalue = sys.exc_info()[:2]
_logger.error("kill process : %s %s" % (errtype,errvalue))
# main loop
main()
| apache-2.0 | -3,565,029,283,701,124,600 | 35.31401 | 120 | 0.572303 | false | 3.880743 | true | false | false |
anibali/pyshowoff | pyshowoff/__init__.py | 1 | 8149 | import requests
from requests.auth import HTTPBasicAuth
import json
from requests_futures.sessions import FuturesSession
from .promise import Promise
class Client:
def __init__(self, base_url, api_key_id=None, api_key_secret=None, disable_async=False):
self.base_url = base_url
if api_key_id is not None:
self.auth = HTTPBasicAuth(api_key_id, api_key_secret)
else:
self.auth = None
if disable_async:
self.session = requests.Session()
else:
self.session = FuturesSession()
self.session.headers.update({
'content-type': 'application/json'
})
def request(self, method, path, data=None):
if data is not None:
data = json.dumps(data)
res = self.session.request(method, self.base_url + path, data=data, auth=self.auth)
def raise_on_error_response(res):
res.raise_for_status()
return res
return Promise.resolve(res).then(raise_on_error_response)
def add_notebook(self, title):
data = {
'data': {
'type': 'notebooks',
'attributes': {'title': title},
},
}
promise = self.request('post', '/api/v2/notebooks', data)
return promise.then(lambda res: Notebook(self, res.json()['data']['id']))
class Notebook:
def __init__(self, client, notebook_id):
self.client = client
self.id = notebook_id
def update(self, title=None, pinned=None, progress=None):
"""Updates the attributes of this notebook.
Args:
title (str): Notebook title
pinned (bool): Set to True to protect notebook against deletion
progress (float): Notebook progress (from 0.0 to 1.0)
"""
attrs = {}
if title is not None:
attrs['title'] = title
if pinned is not None:
attrs['pinned'] = pinned
if progress is not None:
attrs['progress'] = progress
data = {
'data': {
'id': self.id,
'type': 'notebooks',
'attributes': attrs,
},
}
self.client.request('patch', '/api/v2/notebooks/' + self.id, data)
def set_title(self, title):
self.update(title=title)
def set_pinned(self, pinned):
self.update(pinned=pinned)
def set_progress(self, progress):
self.update(progress=progress)
def add_tag(self, name):
data = {
'data': {
'type': 'tags',
'attributes': {'name': name},
'relationships': {
'notebook': {
'data': {'type': 'notebooks', 'id': self.id},
},
},
},
}
promise = self.client.request('post', '/api/v2/tags', data)
return promise.then(lambda res: Tag(self, res.json()['data']['id']))
def add_frame(self, title, bounds=None):
data = {
'data': {
'type': 'frames',
'attributes': {'title': title},
'relationships': {
'notebook': {
'data': {'type': 'notebooks', 'id': self.id},
},
},
},
}
if bounds is not None:
data['data']['attributes'].update(bounds)
promise = self.client.request('post', '/api/v2/frames', data)
return promise.then(lambda res: Frame(self.client, res.json()['data']['id']))
class Tag:
def __init__(self, client, tag_id):
self.client = client
self.id = tag_id
class Frame:
def __init__(self, client, frame_id):
self.client = client
self.id = frame_id
def update(self, title=None, type=None, content=None):
attrs = {}
if title is not None:
attrs['title'] = title
if type is not None:
attrs['type'] = type
if content is not None:
attrs['content'] = content
data = {
'data': {
'id': self.id,
'type': 'frames',
'attributes': attrs,
},
}
self.client.request('patch', '/api/v2/frames/' + self.id, data)
def set_title(self, title):
self.update(title=title)
def set_content(self, type, content):
self.update(type=type, content=content)
def vega(self, spec):
self.set_content('vega', {'body': spec})
def vegalite(self, spec):
self.set_content('vegalite', {'body': spec})
def plotly(self, fig):
self.set_content('plotly', fig)
def text(self, message):
self.set_content('text', {'body': message})
def html(self, html):
self.set_content('html', {'body': html})
def progress(self, current_value, max_value):
percentage = min(100 * current_value / max_value, 100)
html = """<div class="progress">
<div class="progress-bar" role="progressbar"
aria-valuenow="{percentage:0.2f}" aria-valuemin="0" aria-valuemax="100"
style="width: {percentage:0.2f}%; min-width: 40px;"
>
{percentage:0.2f}%
</div>
</div>""".format(percentage=percentage)
self.html(html)
def line_graph(self, xss, yss, series_names=None, x_title=None, y_title=None,
y_axis_min=None, y_axis_max=None):
if not isinstance(xss[0], list):
xss = [xss] * len(yss)
show_legend = True
if series_names is None:
show_legend = False
series_names = ['series_{:03d}'.format(i) for i in range(len(xss))]
min_x = float('inf')
max_x = -float('inf')
min_y = float('inf')
max_y = -float('inf')
tables = []
marks = []
for i, xs in enumerate(xss):
marks.append({
'type': 'line',
'from': {'data': 'table_{:03d}'.format(i)},
'properties': {
'enter': {
'x': {'scale': 'x', 'field': 'x'},
'y': {'scale': 'y', 'field': 'y'},
'stroke': {'scale': 'c', 'value': series_names[i]},
}
},
})
points = []
for j, x in enumerate(xs):
y = yss[i][j]
min_x = min(x, min_x)
max_x = max(x, max_x)
min_y = min(y, min_y)
max_y = max(y, max_y)
points.append({'x': x, 'y': y})
tables.append(points)
data = []
for i, table in enumerate(tables):
data.append({
'name': 'table_{:03d}'.format(i),
'values': table
})
spec = {
'width': 370,
'height': 250,
'data': data,
'scales': [
{
'name': 'x',
'type': 'linear',
'range': 'width',
'domainMin': min_x,
'domainMax': max_x,
'nice': True,
'zero': False,
}, {
'name': 'y',
'type': 'linear',
'range': 'height',
'domainMin': y_axis_min or min_y,
'domainMax': y_axis_max or max_y,
'nice': True,
'zero': False,
}, {
'name': 'c',
'type': 'ordinal',
'range': 'category10',
'domain': series_names,
}
],
'axes': [
{'type': 'x', 'scale': 'x', 'title': x_title},
{'type': 'y', 'scale': 'y', 'title': y_title, 'grid': True},
],
'marks': marks,
}
if show_legend:
spec['legends'] = [{'fill': 'c'}]
self.vega(spec)
| apache-2.0 | -9,011,978,454,624,519,000 | 29.867424 | 92 | 0.454412 | false | 3.957746 | false | false | false |
DXCanas/kolibri | kolibri/core/deviceadmin/tests/test_dbrestore.py | 1 | 8164 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import os
import random
import tempfile
import pytest
from django.conf import settings
from django.core.management import call_command
from django.test.utils import override_settings
from mock import patch
import kolibri
from kolibri.core.auth.constants.collection_kinds import FACILITY
from kolibri.core.deviceadmin.management.commands.dbrestore import CommandError
from kolibri.core.deviceadmin.utils import dbbackup
from kolibri.core.deviceadmin.utils import dbrestore
from kolibri.core.deviceadmin.utils import default_backup_folder
from kolibri.core.deviceadmin.utils import get_dtm_from_backup_name
from kolibri.core.deviceadmin.utils import IncompatibleDatabase
from kolibri.core.deviceadmin.utils import search_latest
from kolibri.utils.server import NotRunning
from kolibri.utils.server import STATUS_UNKNOWN
MOCK_DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ":memory:",
'OPTIONS': {
'timeout': 100,
}
}
}
MOCK_DATABASES_FILE = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(tempfile.mkdtemp(), "test{}.db".format(random.randint(0, 100000))),
'OPTIONS': {
'timeout': 100,
}
}
}
def is_sqlite_settings():
"""
This does not work during pytest collection, needs to be called while
executing tests!
"""
return 'sqlite3' in settings.DATABASES['default']['ENGINE']
def mock_status_not_running():
raise NotRunning(STATUS_UNKNOWN)
def test_latest():
with pytest.raises(RuntimeError):
call_command("dbrestore", latest=True)
def test_illegal_command():
with pytest.raises(CommandError):
call_command("dbrestore", latest=True, dump_file="wup wup")
def test_no_restore_from_no_file():
with pytest.raises(CommandError):
call_command("dbrestore", dump_file="does not exist")
def test_active_kolibri():
"""
Tests that we cannot restore while kolibri is active
"""
with patch(
"kolibri.utils.server.get_status",
return_value=(12345, "http://127.0.0.1", 1234)
) as gs:
with pytest.raises(SystemExit):
call_command("dbrestore", latest=True)
gs.assert_called_once()
def test_inactive_kolibri():
"""
Tests that we cannot restore while kolibri is active
"""
with patch(
"kolibri.utils.server.get_status",
side_effect=mock_status_not_running
) as gs:
# Since there's no backups available during a test, this should fail!
with pytest.raises(RuntimeError):
call_command("dbrestore", latest=True)
gs.assert_called_once()
def test_not_sqlite():
if is_sqlite_settings():
return
with pytest.raises(IncompatibleDatabase):
dbrestore("/doesnt/matter.file")
def test_fail_on_unknown_file():
with pytest.raises(ValueError):
get_dtm_from_backup_name("this-file-has-no-time")
@pytest.mark.django_db
@pytest.mark.filterwarnings('ignore:Overriding setting DATABASES')
def test_restore_from_latest():
"""
Tests that we cannot restore while kolibri is active
"""
if not is_sqlite_settings():
return
with patch(
"kolibri.utils.server.get_status",
side_effect=mock_status_not_running
):
# Create something special in the database!
from kolibri.core.auth.models import Facility
Facility.objects.create(name="test latest", kind=FACILITY)
# Create a backup file from the current test database
call_command("dbbackup")
# Also add in a file with an old time stamp to ensure its ignored
sql = "syntax error;"
fbroken = "db-v{}_2015-08-02_00-00-00.dump".format(kolibri.__version__)
open(os.path.join(default_backup_folder(), fbroken), "w").write(sql)
# Add an unparsable file name
fbroken = "db-v{}_.dump".format(kolibri.__version__)
open(os.path.join(default_backup_folder(), fbroken), "w").write(sql)
# Restore it into a new test database setting
with override_settings(DATABASES=MOCK_DATABASES):
from django import db
# Destroy current connections and create new ones:
db.connections.close_all()
db.connections = db.ConnectionHandler()
call_command("dbrestore", latest=True)
# Test that the user has been restored!
assert Facility.objects.filter(name="test latest", kind=FACILITY).count() == 1
@pytest.mark.django_db
@pytest.mark.filterwarnings('ignore:Overriding setting DATABASES')
def test_restore_from_file_to_memory():
"""
Restores from a file dump to a database stored in memory and reads contents
from the new database.
"""
if not is_sqlite_settings():
return
with patch(
"kolibri.utils.server.get_status",
side_effect=mock_status_not_running
):
# Create something special in the database!
from kolibri.core.auth.models import Facility
Facility.objects.create(name="test file", kind=FACILITY)
# Create a backup file from the current test database
dest_folder = tempfile.mkdtemp()
backup = dbbackup(kolibri.__version__, dest_folder=dest_folder)
# Restore it into a new test database setting
with override_settings(DATABASES=MOCK_DATABASES):
from django import db
# Destroy current connections and create new ones:
db.connections.close_all()
db.connections = db.ConnectionHandler()
call_command("dbrestore", dump_file=backup)
# Test that the user has been restored!
assert Facility.objects.filter(name="test file", kind=FACILITY).count() == 1
@pytest.mark.django_db
@pytest.mark.filterwarnings('ignore:Overriding setting DATABASES')
def test_restore_from_file_to_file():
"""
Restores from a file dump to a database stored in a file and reads contents
from the new database.
"""
if not is_sqlite_settings():
return
with patch(
"kolibri.utils.server.get_status",
side_effect=mock_status_not_running
):
# Create something special in the database!
from kolibri.core.auth.models import Facility
Facility.objects.create(name="test file", kind=FACILITY)
# Create a backup file from the current test database
dest_folder = tempfile.mkdtemp()
# Purposefully destroy the connection pointer, which is the default
# state of an unopened connection
from django import db
db.connections['default'].connection = None
backup = dbbackup(kolibri.__version__, dest_folder=dest_folder)
# Restore it into a new test database setting
with override_settings(DATABASES=MOCK_DATABASES_FILE):
# Destroy current connections and create new ones:
db.connections.close_all()
db.connections = db.ConnectionHandler()
# Purposefully destroy the connection pointer, which is the default
# state of an unopened connection
db.connections['default'].connection = None
call_command("dbrestore", dump_file=backup)
# Test that the user has been restored!
assert Facility.objects.filter(name="test file", kind=FACILITY).count() == 1
def test_search_latest():
search_root = tempfile.mkdtemp()
major_version = ".".join(map(str, kolibri.VERSION[:2]))
files = [
"db-v{}_2015-08-02_00-00-00.dump".format(kolibri.__version__),
"db-v{}_2016-08-02_00-00-00.dump".format(kolibri.__version__),
"db-v{}_2017-07-02_00-00-00.dump".format(major_version),
"db-v{}_2017-08-02_00-00-00.dump".format(kolibri.__version__),
]
latest = files[-1]
for f in files:
open(os.path.join(search_root, f), "w").write("")
__, search_fname = os.path.split(search_latest(search_root, major_version))
assert search_fname == latest
| mit | -4,469,586,616,310,145,000 | 32.459016 | 96 | 0.657031 | false | 3.874703 | true | false | false |
doctaphred/projecteuler | projecteuler/__main__.py | 1 | 1036 | # -*- coding: utf-8 -*-
import sys
from .problem import Problem
def main():
problems = list(Problem.discover())
if not problems:
print('Did not find any problems!')
sys.exit(1)
num_problems = len(problems)
if num_problems == 1:
print('1 problem attempted')
else:
print(num_problems, 'problems attempted')
for i, problem in enumerate(problems, start=1):
print()
print('{}/{}: Solving problem {}...'
.format(i, num_problems, problem.number))
problem.solve()
print('Answer:', problem.answer)
print(problem)
print()
total_seconds = sum(problem.time.total_seconds() for problem in problems)
print(total_seconds, 'seconds total')
num_correct = sum(problem.correct for problem in problems)
print('{}/{} correct'.format(num_correct, num_problems))
if num_correct == num_problems:
print('You win!')
else:
print('FAILURE')
sys.exit(1)
if __name__ == '__main__':
main()
| gpl-3.0 | -5,461,986,150,749,626,000 | 23.093023 | 77 | 0.586873 | false | 3.969349 | false | false | false |
ESS-LLP/erpnext | erpnext/stock/reorder_item.py | 8 | 7231 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import frappe
import erpnext
from frappe.utils import flt, nowdate, add_days, cint
from frappe import _
def reorder_item():
""" Reorder item if stock reaches reorder level"""
# if initial setup not completed, return
if not (frappe.db.a_row_exists("Company") and frappe.db.a_row_exists("Fiscal Year")):
return
if cint(frappe.db.get_value('Stock Settings', None, 'auto_indent')):
return _reorder_item()
def _reorder_item():
material_requests = {"Purchase": {}, "Transfer": {}, "Material Issue": {}, "Manufacture": {}}
warehouse_company = frappe._dict(frappe.db.sql("""select name, company from `tabWarehouse`
where disabled=0"""))
default_company = (erpnext.get_default_company() or
frappe.db.sql("""select name from tabCompany limit 1""")[0][0])
items_to_consider = frappe.db.sql_list("""select name from `tabItem` item
where is_stock_item=1 and has_variants=0
and disabled=0
and (end_of_life is null or end_of_life='0000-00-00' or end_of_life > %(today)s)
and (exists (select name from `tabItem Reorder` ir where ir.parent=item.name)
or (variant_of is not null and variant_of != ''
and exists (select name from `tabItem Reorder` ir where ir.parent=item.variant_of))
)""",
{"today": nowdate()})
if not items_to_consider:
return
item_warehouse_projected_qty = get_item_warehouse_projected_qty(items_to_consider)
def add_to_material_request(item_code, warehouse, reorder_level, reorder_qty, material_request_type, warehouse_group=None):
if warehouse not in warehouse_company:
# a disabled warehouse
return
reorder_level = flt(reorder_level)
reorder_qty = flt(reorder_qty)
# projected_qty will be 0 if Bin does not exist
if warehouse_group:
projected_qty = flt(item_warehouse_projected_qty.get(item_code, {}).get(warehouse_group))
else:
projected_qty = flt(item_warehouse_projected_qty.get(item_code, {}).get(warehouse))
if (reorder_level or reorder_qty) and projected_qty < reorder_level:
deficiency = reorder_level - projected_qty
if deficiency > reorder_qty:
reorder_qty = deficiency
company = warehouse_company.get(warehouse) or default_company
material_requests[material_request_type].setdefault(company, []).append({
"item_code": item_code,
"warehouse": warehouse,
"reorder_qty": reorder_qty
})
for item_code in items_to_consider:
item = frappe.get_doc("Item", item_code)
if item.variant_of and not item.get("reorder_levels"):
item.update_template_tables()
if item.get("reorder_levels"):
for d in item.get("reorder_levels"):
add_to_material_request(item_code, d.warehouse, d.warehouse_reorder_level,
d.warehouse_reorder_qty, d.material_request_type, warehouse_group=d.warehouse_group)
if material_requests:
return create_material_request(material_requests)
def get_item_warehouse_projected_qty(items_to_consider):
item_warehouse_projected_qty = {}
for item_code, warehouse, projected_qty in frappe.db.sql("""select item_code, warehouse, projected_qty
from tabBin where item_code in ({0})
and (warehouse != "" and warehouse is not null)"""\
.format(", ".join(["%s"] * len(items_to_consider))), items_to_consider):
if item_code not in item_warehouse_projected_qty:
item_warehouse_projected_qty.setdefault(item_code, {})
if warehouse not in item_warehouse_projected_qty.get(item_code):
item_warehouse_projected_qty[item_code][warehouse] = flt(projected_qty)
warehouse_doc = frappe.get_doc("Warehouse", warehouse)
while warehouse_doc.parent_warehouse:
if not item_warehouse_projected_qty.get(item_code, {}).get(warehouse_doc.parent_warehouse):
item_warehouse_projected_qty.setdefault(item_code, {})[warehouse_doc.parent_warehouse] = flt(projected_qty)
else:
item_warehouse_projected_qty[item_code][warehouse_doc.parent_warehouse] += flt(projected_qty)
warehouse_doc = frappe.get_doc("Warehouse", warehouse_doc.parent_warehouse)
return item_warehouse_projected_qty
def create_material_request(material_requests):
""" Create indent on reaching reorder level """
mr_list = []
exceptions_list = []
def _log_exception():
if frappe.local.message_log:
exceptions_list.extend(frappe.local.message_log)
frappe.local.message_log = []
else:
exceptions_list.append(frappe.get_traceback())
for request_type in material_requests:
for company in material_requests[request_type]:
try:
items = material_requests[request_type][company]
if not items:
continue
mr = frappe.new_doc("Material Request")
mr.update({
"company": company,
"transaction_date": nowdate(),
"material_request_type": "Material Transfer" if request_type=="Transfer" else request_type
})
for d in items:
d = frappe._dict(d)
item = frappe.get_doc("Item", d.item_code)
uom = item.stock_uom
conversion_factor = 1.0
if request_type == 'Purchase':
uom = item.purchase_uom or item.stock_uom
if uom != item.stock_uom:
conversion_factor = frappe.db.get_value("UOM Conversion Detail",
{'parent': item.name, 'uom': uom}, 'conversion_factor') or 1.0
mr.append("items", {
"doctype": "Material Request Item",
"item_code": d.item_code,
"schedule_date": add_days(nowdate(),cint(item.lead_time_days)),
"qty": d.reorder_qty / conversion_factor,
"uom": uom,
"stock_uom": item.stock_uom,
"warehouse": d.warehouse,
"item_name": item.item_name,
"description": item.description,
"item_group": item.item_group,
"brand": item.brand,
})
schedule_dates = [d.schedule_date for d in mr.items]
mr.schedule_date = max(schedule_dates or [nowdate()])
mr.insert()
mr.submit()
mr_list.append(mr)
except:
_log_exception()
if mr_list:
if getattr(frappe.local, "reorder_email_notify", None) is None:
frappe.local.reorder_email_notify = cint(frappe.db.get_value('Stock Settings', None,
'reorder_email_notify'))
if(frappe.local.reorder_email_notify):
send_email_notification(mr_list)
if exceptions_list:
notify_errors(exceptions_list)
return mr_list
def send_email_notification(mr_list):
""" Notify user about auto creation of indent"""
email_list = frappe.db.sql_list("""select distinct r.parent
from `tabHas Role` r, tabUser p
where p.name = r.parent and p.enabled = 1 and p.docstatus < 2
and r.role in ('Purchase Manager','Stock Manager')
and p.name not in ('Administrator', 'All', 'Guest')""")
msg = frappe.render_template("templates/emails/reorder_item.html", {
"mr_list": mr_list
})
frappe.sendmail(recipients=email_list,
subject=_('Auto Material Requests Generated'), message = msg)
def notify_errors(exceptions_list):
subject = "[Important] [ERPNext] Auto Reorder Errors"
content = """Dear System Manager,
An error occured for certain Items while creating Material Requests based on Re-order level.
Please rectify these issues:
---
<pre>
%s
</pre>
---
Regards,
Administrator""" % ("\n\n".join(exceptions_list),)
from frappe.email import sendmail_to_system_managers
sendmail_to_system_managers(subject, content)
| gpl-3.0 | 2,448,168,449,020,155,400 | 33.108491 | 124 | 0.698105 | false | 3.238245 | false | false | false |
SymbiFlow/symbiflow-arch-defs | utils/print_place_delay_matrix.py | 1 | 1063 | """ Print override delta delay placement model in human readable format. """
import argparse
import capnp
import os.path
# Remove magic import hook.
capnp.remove_import_hook()
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--schema_path',
help='Path to override delta delay placement model schema',
required=True
)
parser.add_argument('--place_delay_matrix', required=True)
args = parser.parse_args()
place_delay_model = capnp.load(
os.path.join(args.schema_path, 'place_delay_model.capnp')
)
with open(args.place_delay_matrix, 'rb') as f:
delay_model = place_delay_model.VprOverrideDelayModel.read(f)
x_dim = delay_model.delays.dims[0]
y_dim = delay_model.delays.dims[1]
itr = iter(delay_model.delays.data)
for x in range(x_dim):
row = []
for y in range(y_dim):
value = next(itr)
row.append(str(value.value.value))
print(','.join(row))
if __name__ == "__main__":
main()
| isc | -641,335,990,025,011,500 | 24.309524 | 76 | 0.624647 | false | 3.462541 | false | false | false |
Eigenlabs/EigenD | plg_language/database.py | 1 | 3196 |
#
# Copyright 2009 Eigenlabs Ltd. http://www.eigenlabs.com
#
# This file is part of EigenD.
#
# EigenD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EigenD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EigenD. If not, see <http://www.gnu.org/licenses/>.
#
from pi import atom,database,utils,logic,container,node,action,const,async,rpc,paths
from pibelcanto import lexicon
from plg_language import noun,verb,macro,imperative
class Database(database.Database):
def __init__(self):
self.__primitives = {}
database.Database.__init__(self)
self.add_module(noun)
self.add_module(verb)
self.add_module(imperative)
self.add_lexicon(lexicon.lexicon)
# widget manager for updating widget names if they change
self.__widget_manager = None
def lookup_primitive(self,klass):
return self.__primitives.get(klass)
def add_module(self,module):
logic.Engine.add_module(self,module)
self.add_primitives(module)
def add_primitives(self, module):
a = dict([ (k[10:],getattr(module,k)) for k in dir(module) if k.startswith('primitive_') ])
self.__primitives.update(a)
def find_all_descendants(self, ids):
s = frozenset()
for id in ids:
d = self.find_descendants(id)
if len(d)==0:
s = s.union(frozenset([id]))
else:
s = s.union(self.find_all_descendants(d))
return s
def object_changed(self,proxy,parts):
# on agent or atom name change, updates the osc widgets and Stage tabs
id=proxy.id()
if 'name' in parts or 'ordinal' in parts:
# build osc name
name_str = '_'.join(proxy.names())
ordinal = proxy.ordinal()
if ordinal!=0:
name_str += '_'+str(ordinal)
# id change set is this object, plus any children
# add subsystems that are not agents to prevent changes to rigs
# from including the agents they contain
agents = self.__propcache.get_idset('agent')
changed_nodes = set(self.find_joined_slaves(id)).difference(agents)
changed_nodes.add(id)
changed_nodes_frozenset = self.find_all_descendants(frozenset(changed_nodes))
#for changed_node in changed_nodes_frozenset:
# print changed_node, self.find_full_desc(changed_node)
if self.__widget_manager is not None:
self.__widget_manager.check_widget_name_updates(changed_nodes_frozenset)
def set_widget_manager(self, widget_manager):
self.__widget_manager = widget_manager
| gpl-3.0 | 3,107,174,016,394,879,500 | 34.511111 | 99 | 0.62766 | false | 3.970186 | false | false | false |
akhilraj95/xpense | xpense2/flock/action.py | 1 | 6240 | import secret
from models import User,Currency,Expense,Bill
import urllib2,json
import urllib2
import urllib
from docx import Document
import time
from docx.shared import Inches
import os
from datetime import timedelta
from django.core.files import File
#FlockOS
from pyflock import FlockClient, verify_event_token
from pyflock import Message, SendAs, Attachment, Views, WidgetView, HtmlView, ImageView, Image, Download, Button, OpenWidgetAction, OpenBrowserAction, SendToAppAction
def appInstall(pjson):
try:
userId = pjson['userId']
token = pjson['token']
u = User(userId = userId, token=token)
u.save()
except:
raise
def appUninstall(pjson):
try:
userId = pjson['userId']
User.objects.get(userId=userId).delete()
except:
raise
def sendMessage(chat_id,userId,message):
try:
user = User.objects.get(userId=userId)
flock_client = FlockClient(token=user.token, app_id=secret.getAppID)
send_as_xpense = SendAs(name='Xpense', profile_image='https://pbs.twimg.com/profile_images/1788506913/HAL-MC2_400x400.png')
send_as_message = Message(to=chat_id,text=message,send_as=send_as_xpense)
flock_client.send_chat(send_as_message)
except:
raise
def total(expense_list,trackObj):
try:
currency_list = Currency.objects.all()
total = {}
for curr in currency_list:
total[curr.abbr] = 0
for expense in expense_list:
total[expense.currency.abbr]+=expense.amount
total = {key: value for key, value in total.items()
if value is not 0}
target_curr = str(trackObj.budget_currency.abbr)
target_value = 0
perc_spent = 0
if target_curr in total.keys():
target_value = total[target_curr]
#check if there is a budget
if trackObj.budget != 0:
#converting all currencies to budget currency
target_value = 0
symbols = total.keys()
rates = getconversionrates(target_curr,symbols)
target_value = 0
for key, value in total.items():
if key==target_curr:
target_value+=value
else:
target_value+= round(value/rates[key],2)
perc_spent = round((target_value/trackObj.budget)*100,2)
return total,target_value,perc_spent
except:
raise
def getconversionrates(to_curr,from_curr_list):
if to_curr in from_curr_list:
from_curr_list.remove(to_curr)
from_curr_list_str = ','.join(from_curr_list)
API_string = 'http://api.fixer.io/latest?base='+to_curr+'&symbols='+from_curr_list_str
response = urllib2.urlopen(API_string).read()
pjson = json.loads(response)
return pjson['rates']
def fetchMessagePictures(group_id,token,uids):
data = [('chat',str(group_id)),('token',str(token)),('uids',uids)]
url = 'https://api.flock.co/v1/chat.fetchMessages'
req = urllib2.Request(url, headers={'Content-Type' : 'application/x-www-form-urlencoded'})
result = urllib2.urlopen(req, urllib.urlencode(data))
content = result.read()
content = json.loads(content)
src_list = []
for data in content:
for attachment in data['attachments']:
for fil in attachment['downloads']:
if str(fil['mime']) in ['image/jpeg','image/png']:
src_list.append(fil['src'])
return src_list
def report(track,userId):
document = Document()
document.add_heading('Expense Report - '+ str(track.name))
status = '\nPurpose : '+str(track.purpose)+'\n'
now = time.strftime("%c")
status = status + 'Report date: '+str(now)+'\n'
expense_list = Expense.objects.filter(track = track)
user = User.objects.get(userId=userId)
flock_client = FlockClient(token=user.token, app_id=secret.getAppID)
pjson = flock_client.get_user_info()
utc = str(pjson['timezone'])
hours = int(utc[1:3])
minutes = int(utc[4:6])
if(utc[0]=='+'):
for expense in expense_list:
expense.timestamp += timedelta(hours=hours,minutes=minutes)
else:
for expense in expense_list:
expense.timestamp -= timedelta(hours=hours,minutes=minutes)
total_expense_by_curr,converted_total,perc_spent = total(expense_list,track)
if track.budget != 0:
status = status + 'Budget: '+str(track.budget_currency.abbr)+' '+str(track.budget)+'\n'
status = status + 'Total spent: '+str(track.budget_currency.abbr)+' '+ str(converted_total)+'\n'
status = status + 'Spending: '+str(perc_spent)+'%'+'\n'
status = status + 'Spending per currency:\n'
for key,value in total_expense_by_curr.items():
status = status+' - '+str(key)+' '+str(value)+'\n'
paragraph = document.add_paragraph(status)
#table
table = document.add_table(rows=1, cols=4)
heading_cells = table.rows[0].cells
heading_cells[0].text = 'Purpose'
heading_cells[1].text = 'Paid By'
heading_cells[2].text = 'Time'
heading_cells[3].text = 'Amount'
for expense in expense_list:
cells = table.add_row().cells
cells[0].text = expense.purpose
cells[1].text = expense.paidby
cells[2].text = str(expense.timestamp)
cells[3].text = expense.currency.abbr +' '+str(expense.amount)
filename = 'media/'+str(track.chat.chat_id[2:])+'_'+str(track.id)+'.docx'
download_bills(expense_list,document)
document.save(filename)
django_file = File(open(filename,'r'))
print (django_file.name)
return django_file
def download_bills(expense_list,document):
url_list = []
for expense in expense_list:
ul = Bill.objects.filter(expense = expense)
for u in ul:
url_list.append(str(u.url))
print url_list
if(len(url_list)):
document.add_page_break()
file_list = []
for url in url_list:
f = open(url[33:],'wb')
f.write(urllib.urlopen(url).read())
f.close()
file_list.append(url[33:])
for fil in file_list:
document.add_picture(fil,width=Inches(6.0))
os.remove(fil)
| mit | 6,812,528,329,307,556,000 | 32.913043 | 166 | 0.620833 | false | 3.476323 | false | false | false |
chromium/chromium | tools/android/dependency_analysis/print_package_dependencies.py | 7 | 4391 | #!/usr/bin/env python3
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Command-line tool for printing package-level dependencies."""
import argparse
import graph
import print_dependencies_helper
import serialization
def print_package_dependencies_for_edge(begin, end):
"""Prints dependencies for an edge in the package graph.
Since these are package edges, we also print the class dependency edges
comprising the printed package edge.
"""
if begin == end:
return
print(f'\t{begin.name} -> {end.name}')
class_deps = begin.get_class_dependencies_in_outbound_edge(end)
print(f'\t{len(class_deps)} class edge(s) comprising the dependency:')
for begin_class, end_class in graph.sorted_edges_by_name(class_deps):
print(f'\t\t{begin_class.class_name} -> {end_class.class_name}')
def print_package_dependencies_for_key(package_graph, key, ignore_subpackages):
"""Prints dependencies for a valid key into the package graph.
Since we store self-edges for the package graph
but they aren't relevant in this case, we skip them.
"""
node = package_graph.get_node_by_key(key)
inbound_without_self = [other for other in node.inbound if other != node]
print(f'{len(inbound_without_self)} inbound dependency(ies) '
f'for {node.name}:')
for inbound_dep in graph.sorted_nodes_by_name(inbound_without_self):
if ignore_subpackages and inbound_dep.name.startswith(node.name):
continue
print_package_dependencies_for_edge(inbound_dep, node)
outbound_without_self = [other for other in node.outbound if other != node]
print(f'{len(outbound_without_self)} outbound dependency(ies) '
f'for {node.name}:')
for outbound_dep in graph.sorted_nodes_by_name(outbound_without_self):
if ignore_subpackages and outbound_dep.name.startswith(node.name):
continue
print_package_dependencies_for_edge(node, outbound_dep)
def main():
"""Prints package-level dependencies for an input package."""
arg_parser = argparse.ArgumentParser(
description='Given a JSON dependency graph, output the package-level '
'dependencies for a given package and the '
'class dependencies comprising those dependencies')
required_arg_group = arg_parser.add_argument_group('required arguments')
required_arg_group.add_argument(
'-f',
'--file',
required=True,
help='Path to the JSON file containing the dependency graph. '
'See the README on how to generate this file.')
required_arg_group.add_argument(
'-p',
'--package',
required=True,
help='Case-insensitive name of the package to print dependencies for. '
'Matches names of the form ...input, for example '
'`browser` matches `org.chromium.browser`.')
optional_arg_group = arg_parser.add_argument_group('optional arguments')
optional_arg_group.add_argument(
'-s',
'--ignore-subpackages',
action='store_true',
help='If present, this tool will ignore dependencies between the '
'given package and subpackages. For example, if given '
'browser.customtabs, it won\'t print a dependency between '
'browser.customtabs and browser.customtabs.content.')
arguments = arg_parser.parse_args()
_, package_graph, _ = serialization.load_class_and_package_graphs_from_file(
arguments.file)
package_graph_keys = [node.name for node in package_graph.nodes]
valid_keys = print_dependencies_helper.get_valid_package_keys_matching(
package_graph_keys, arguments.package)
if len(valid_keys) == 0:
print(f'No package found by the name {arguments.package}.')
elif len(valid_keys) > 1:
print(f'Multiple valid keys found for the name {arguments.package}, '
'please disambiguate between one of the following options:')
for valid_key in valid_keys:
print(f'\t{valid_key}')
else:
print(f'Printing package dependencies for {valid_keys[0]}:')
print_package_dependencies_for_key(package_graph, valid_keys[0],
arguments.ignore_subpackages)
if __name__ == '__main__':
main()
| bsd-3-clause | 2,967,168,041,341,277,000 | 41.221154 | 80 | 0.670462 | false | 3.948741 | false | false | false |
glibersat/brewpi-service | brewblox_service/scheduler.py | 2 | 5233 | """
Background task scheduling.
"""
import asyncio
from contextlib import suppress
from typing import Any, Coroutine, Set
from aiohttp import web
from brewblox_service import features
CLEANUP_INTERVAL_S = 300
def setup(app: web.Application):
features.add(app, TaskScheduler(app))
def get_scheduler(app: web.Application) -> 'TaskScheduler':
return features.get(app, TaskScheduler)
async def create_task(app: web.Application,
coro: Coroutine,
*args, **kwargs
) -> asyncio.Task:
"""
Convenience function for calling `TaskScheduler.create(coro)`
This will use the default `TaskScheduler` to create a new background task.
Example:
import asyncio
from datetime import datetime
from brewblox_service import scheduler, service
async def current_time(interval):
while True:
await asyncio.sleep(interval)
print(datetime.now())
async def start(app):
await scheduler.create_task(app, current_time(interval=2))
app = service.create_app(default_name='example')
scheduler.setup(app)
app.on_startup.append(start)
service.furnish(app)
service.run(app)
"""
return await get_scheduler(app).create(coro, *args, **kwargs)
async def cancel_task(app: web.Application,
task: asyncio.Task,
*args, **kwargs
) -> Any:
"""
Convenience function for calling `TaskScheduler.cancel(task)`
This will use the default `TaskScheduler` to cancel the given task.
Example:
import asyncio
from datetime import datetime
from brewblox_service import scheduler, service
async def current_time(interval):
while True:
await asyncio.sleep(interval)
print(datetime.now())
async def stop_after(app, task, duration):
await asyncio.sleep(duration)
await scheduler.cancel_task(app, task)
print('stopped!')
async def start(app):
# Start first task
task = await scheduler.create_task(app, current_time(interval=2))
# Start second task to stop the first
await scheduler.create_task(app, stop_after(app, task, duration=10))
app = service.create_app(default_name='example')
scheduler.setup(app)
app.on_startup.append(start)
service.furnish(app)
service.run(app)
"""
return await get_scheduler(app).cancel(task, *args, **kwargs)
class TaskScheduler(features.ServiceFeature):
def __init__(self, app: web.Application):
super().__init__(app)
self._tasks: Set[asyncio.Task] = set()
async def startup(self, *_):
await self.create(self._cleanup())
async def shutdown(self, *_):
[task.cancel() for task in self._tasks]
await asyncio.wait(self._tasks)
self._tasks.clear()
async def _cleanup(self):
"""
Periodically removes completed tasks from the collection,
allowing fire-and-forget tasks to be garbage collected.
This does not delete the task object, it merely removes the reference in the scheduler.
"""
while True:
await asyncio.sleep(CLEANUP_INTERVAL_S)
self._tasks = {t for t in self._tasks if not t.done()}
async def create(self, coro: Coroutine) -> asyncio.Task:
"""
Starts execution of a coroutine.
The created asyncio.Task is returned, and added to managed tasks.
The scheduler guarantees that it is cancelled during application shutdown,
regardless of whether it was already cancelled manually.
Args:
coro (Coroutine):
The coroutine to be wrapped in a task, and executed.
Returns:
asyncio.Task: An awaitable Task object.
During Aiohttp shutdown, the scheduler will attempt to cancel and await this task.
The task can be safely cancelled manually, or using `TaskScheduler.cancel(task)`.
"""
task = asyncio.get_event_loop().create_task(coro)
self._tasks.add(task)
return task
async def cancel(self, task: asyncio.Task, wait_for: bool = True) -> Any:
"""
Cancels and waits for an `asyncio.Task` to finish.
Removes it from the collection of managed tasks.
Args:
task (asyncio.Task):
The to be cancelled task.
It is not required that the task was was created with `TaskScheduler.create_task()`.
wait_for (bool, optional):
Whether to wait for the task to finish execution.
If falsey, this function returns immediately after cancelling the task.
Returns:
Any: The return value of `task`. None if `wait_for` is falsey.
"""
if task is None:
return
task.cancel()
with suppress(KeyError):
self._tasks.remove(task)
with suppress(Exception):
return (await task) if wait_for else None
| gpl-3.0 | -7,514,386,439,900,776,000 | 28.072222 | 100 | 0.604624 | false | 4.453617 | false | false | false |
boto/botoflow | botoflow/flow_types/workflow_type.py | 1 | 7947 | # Copyright 2016 Darjus Loktevic
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..data_converter import JSONDataConverter, AbstractDataConverter
from ..constants import USE_WORKER_TASK_LIST, CHILD_TERMINATE
from ..utils import str_or_NONE, snake_keys_to_camel_case
from ..context import get_context, DecisionContext, StartWorkflowContext
from ..workflow_execution import WorkflowExecution
from .base_flow_type import BaseFlowType
class WorkflowType(BaseFlowType):
_continue_as_new_keys = (('taskStartToCloseTimeout', 'task_start_to_close_timeout'),
('childPolicy', 'child_policy'),
('taskList', 'task_list'),
('taskPriority', 'task_priority'),
('executionStartToCloseTimeout', 'execution_start_to_close_timeout'),
('version', 'version'),
('input', 'input'))
DEFAULT_DATA_CONVERTER = JSONDataConverter()
def __init__(self,
version,
execution_start_to_close_timeout,
task_list=USE_WORKER_TASK_LIST,
task_priority=None,
task_start_to_close_timeout=30, # as in java flow
child_policy=CHILD_TERMINATE,
description="",
name=None,
data_converter=None,
skip_registration=False):
self.version = version
self.name = name
self.task_list = task_list
self.task_priority = task_priority
self.child_policy = child_policy
self.execution_start_to_close_timeout = execution_start_to_close_timeout
self.task_start_to_close_timeout = task_start_to_close_timeout
self.description = description
self.skip_registration = skip_registration
self.workflow_id = None
self.data_converter = data_converter
@property
def data_converter(self):
return self._data_converter
@data_converter.setter
def data_converter(self, converter):
if converter is None: # set the default
self._data_converter = self.DEFAULT_DATA_CONVERTER
return
if isinstance(converter, AbstractDataConverter):
self._data_converter = converter
return
raise TypeError("Converter {0!r} must be a subclass of {1}"
.format(converter, AbstractDataConverter.__name__))
# noinspection PyShadowingBuiltins
def to_decision_dict(self, input, workflow_id=None, worker_task_list=None, domain=None):
task_list = self.task_list
if task_list == USE_WORKER_TASK_LIST:
task_list = worker_task_list
serialized_input = self.data_converter.dumps(input)
decision_dict = {
'workflowType': {'version': self.version, 'name': self.name},
'taskList': {'name': str_or_NONE(task_list)},
'childPolicy': str_or_NONE(self.child_policy),
'executionStartToCloseTimeout': str_or_NONE(
self.execution_start_to_close_timeout),
'taskStartToCloseTimeout': str_or_NONE(
self.task_start_to_close_timeout),
'input': serialized_input}
if self.task_priority is not None:
decision_dict['taskPriority'] = str_or_NONE(self.task_priority)
# for child workflows
if workflow_id is not None and self.workflow_id is None:
decision_dict['workflowId'] = workflow_id
if domain is not None:
decision_dict['domain'] = domain
# apply any overrides
context = get_context()
_decision_dict = {}
_decision_dict.update(decision_dict)
_decision_dict.update(snake_keys_to_camel_case(context._workflow_options_overrides))
return _decision_dict
# noinspection PyShadowingBuiltins
def to_continue_as_new_dict(self, input, worker_task_list):
decision_dict = self.to_decision_dict(
input, worker_task_list=worker_task_list)
continue_as_new_dict = {}
for key, continue_as_new_key in self._continue_as_new_keys:
try:
continue_as_new_dict[continue_as_new_key] = decision_dict[key]
except KeyError:
pass
return continue_as_new_dict
def to_registration_options_dict(self, domain, worker_task_list):
if self.skip_registration:
return None
task_list = self.task_list
if task_list == USE_WORKER_TASK_LIST:
task_list = worker_task_list
registration_options = {
'domain': domain,
'version': self.version,
'name': self.name,
'defaultTaskList': {'name': str_or_NONE(task_list)},
'defaultChildPolicy': str_or_NONE(self.child_policy),
'defaultExecutionStartToCloseTimeout': str_or_NONE(
self.execution_start_to_close_timeout),
'defaultTaskStartToCloseTimeout': str_or_NONE(
self.task_start_to_close_timeout),
'description': str_or_NONE(self.description)
}
if self.task_priority is not None:
registration_options['defaultTaskPriority'] = str_or_NONE(self.task_priority)
return registration_options
def _reset_name(self, name, force=False):
# generate workflow name
if self.name is None or force:
self.name = name
def __call__(self, __class_and_instance, *args, **kwargs):
_class, _instance = __class_and_instance
context = get_context()
if isinstance(context, StartWorkflowContext):
workflow_id, run_id = context.worker._start_workflow_execution(
self, *args, **kwargs)
# create an instance with our new workflow execution info
workflow_instance = _class(WorkflowExecution(workflow_id, run_id))
workflow_instance._data_converter = self.data_converter
return workflow_instance
elif isinstance(context, DecisionContext):
if context.decider.execution_started:
if context._workflow_instance == _instance:
continue_as_new_dict = self.to_continue_as_new_dict(
[args, kwargs], context.decider.task_list)
return context.decider._continue_as_new_workflow_execution(
**continue_as_new_dict)
else:
# create an instance with our new workflow execution info
# but don't set the workflow_id and run_id as we don't yet
# know them
workflow_instance = _class(WorkflowExecution(None,
None))
workflow_instance._data_converter = self.data_converter
future = context.decider._handle_start_child_workflow_execution(
self, workflow_instance, [args, kwargs])
return future
else:
raise NotImplementedError("Unsupported context")
def __hash__(self):
return hash("{0}{1}".format(self.name, self.version))
def __repr__(self):
return "<{} (name={}, version={})>".format(self.__class__.__name__,
self.name, self.version)
| apache-2.0 | 6,293,975,457,061,321,000 | 40.176166 | 98 | 0.594564 | false | 4.337882 | false | false | false |
Xeratec/py-general | fractal_qt4_mpl.py | 1 | 12284 | #!/usr/bin/python
'''
@file fractal_qt4_mpl.py
@author Philip Wiese
@date 12 Okt 2016
@brief Displays Mandelbrot Set with PyQt4 and Matplotlip
'''
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.pyplot import *
from matplotlib.widgets import RectangleSelector
from numpy import log10
from fractal_qt4_mpl_lib import mandelbrot
from gtk._gtk import Alignment
###### Config #######
re_min = 0.385
re_max = 0.395
im_min = 0.135
im_max = 0.145
max_betr = 2
max_iter = 100
res = 400 # X Resolution
cont = True # Show continual color
norm = True # Normalize Values
######################
class AppForm(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.setWindowTitle('Mandelbrot Set')
self.create_menu()
self.create_main_frame()
self.create_status_bar()
#
# Initialize textbox values
#
self.textbox_re_min.setText(str(re_min))
self.textbox_re_max.setText(str(re_max))
self.textbox_im_min.setText(str(im_min))
self.textbox_im_max.setText(str(im_max))
self.textbox_max_iter.setText(str(max_iter))
#
# Render mandelbrot set
#
self.setMinimumWidth(620)
self.resize(620, 460)
self.draw()
def save_plot(self):
file_choices = "PNG (*.png)|*.png"
path = unicode(QFileDialog.getSaveFileName(self,
'Save file', '',
file_choices))
if path:
self.canvas.print_figure(path)
self.statusBar().showMessage('Saved to %s' % path, 2000)
#
# Display infos about application
#
def on_about(self):
msg = """Mandelbrot Set Generator:
### Features ###
* Click left mouse button and drag to zoom
* Enter custom values for ReMin, ReMin, ImMin and ImMax
* Show or hide the grid
* Save the plot to a file using the File menu
* De-/activate continuous color spectrum
* De-/activate normalized values
### Used Libraries ###
* PyQt4
* Matplotlib
### Author ###
Made by Philip Wiese
[email protected]
16. Oktober 2016
"""
QMessageBox.about(self, "About the demo", msg.strip())
#
# Show mouse position in statusbar
#
def statusbar_coord(self, event):
# Show coordinates time in statusbar
if event.inaxes is not None:
text = "Re(c): % .5f, Im(c) % .5f" % (event.xdata, event.ydata)
self.coord_text.setText(text)
#
# Calculates mandelbrot set and updates mpl plot
#
def draw(self):
""" Redraws the figure
"""
# Grap values from textboxes
re_min = float(unicode(self.textbox_re_min.text()))
re_max = float(unicode(self.textbox_re_max.text()))
im_min = float(unicode(self.textbox_im_min.text()))
im_max = float(unicode(self.textbox_im_max.text()))
max_iter = int(unicode(self.textbox_max_iter.text()))
# Grap values from checkboxes
self.axes.grid(self.grid_cb.isChecked())
cont = self.cont_cb.isChecked()
norm = self.norm_cb.isChecked()
# Calculate mandelbrot set
self.fractal = mandelbrot(re_min, re_max, im_min, im_max, max_betr, max_iter, res, cont)
# Normalize Values
if norm:
self.fractal.data[self.fractal.data > 0] -= self.fractal.min
# Show calculation time in statusbar
self.status_text.setText("Calculation Time: %0.3fs" % self.fractal.calc_time)
# Load data to mpl plot
self.axes.imshow(self.fractal.data.T, origin="lower left", cmap='jet', extent=[re_min, re_max, im_min, im_max])
self.axes.set_xlabel("Re(c)", labelpad=20)
self.axes.set_ylabel("Im(c)")
# Show/hide grid
if self.grid_cb.isChecked():
self.axes.grid(linewidth=1, linestyle='-')
# Align layout and redraw plot
self.canvas.draw_idle()
#self.fig.tight_layout()
def line_select_callback(self, eclick, erelease):
# eclick and erelease are the press and release events
x1, y1 = eclick.xdata, eclick.ydata
x2, y2 = erelease.xdata, erelease.ydata
# Zoom with left mouse click
if eclick.button == 1:
# Check for valid coordinates
if (x1 != None and y2 != None and x1 != None and y1 != None):
self.xmin = min(x1, x2)
self.xmax = max(x1, x2)
self.ymin = min(y1, y2)
self.ymax = max(y1, y2)
# Save array with relative values
self.xy = [self.xmax - self.xmin, self.ymax - self.ymin]
# Calculate precision in decimal digits
for v in self.xy:
if v <= 1:
self.decimals = round(log10(1 / v)) + 2
# Round values with calculated precision
re_min = round(self.xmin, int(self.decimals))
re_max = round(self.xmax, int(self.decimals))
im_min = round(self.ymin, int(self.decimals))
im_max = round(self.ymax, int(self.decimals))
# Update textbos values
self.textbox_re_min.setText(str(re_min))
self.textbox_re_max.setText(str(re_max))
self.textbox_im_min.setText(str(im_min))
self.textbox_im_max.setText(str(im_max))
# Calculate and draw new mandelbrot set
self.draw()
# Zoom with right mouse click
if eclick.button == 3:
# Grap values from textboxes
re_min = float(unicode(self.textbox_re_min.text()))
re_max = float(unicode(self.textbox_re_max.text()))
im_min = float(unicode(self.textbox_im_min.text()))
im_max = float(unicode(self.textbox_im_max.text()))
self.xy = [ re_max - re_min, im_max - im_min]
# Calculate new values
re_min = re_min - self.xy[0] / 2
re_max = re_max + self.xy[0] / 2
im_min = im_min - self.xy[1] / 2
im_max = im_max + self.xy[1] / 2
# Calculate precision in decimal digits
for v in self.xy:
if v <= 1:
self.decimals = round(log10(1 / v)) + 2
# Round values with calculated precision
re_min = round(re_min, int(self.decimals))
re_max = round(re_max, int(self.decimals))
im_min = round(im_min, int(self.decimals))
im_max = round(im_max, int(self.decimals))
# Update textbos values
self.textbox_re_min.setText(str(re_min))
self.textbox_re_max.setText(str(re_max))
self.textbox_im_min.setText(str(im_min))
self.textbox_im_max.setText(str(im_max))
# Calculate and draw new mandelbrot set
self.draw()
def create_main_frame(self):
self.main_frame = QWidget()
self.main_frame.setMinimumHeight(280)
# Create the Figure and FigCanvas objects
self.fig = Figure((5,10), tight_layout=True)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
# Add sublot to figure do formatting
self.axes = self.fig.add_subplot(111)
self.axes.ticklabel_format(style='sci', scilimits=(0,0), axis='both')
# Create zoom event handler
self.RS = RectangleSelector(self.axes, self.line_select_callback,
drawtype='box', useblit=True,
button=[1, 3], # don't use middle button
spancoords='data')
# Other GUI controls
self.textbox_re_min = QLineEdit()
self.textbox_re_min_text = QLabel("ReMin: ")
self.textbox_re_min.setMinimumWidth(55)
self.textbox_re_max = QLineEdit()
self.textbox_re_max_text = QLabel("ReMax: ")
self.textbox_re_max.setMinimumWidth(55)
self.textbox_im_min = QLineEdit()
self.textbox_im_min_text = QLabel("ImMin: ")
self.textbox_im_min.setMinimumWidth(55)
self.textbox_im_max = QLineEdit()
self.textbox_im_max_text = QLabel("ImMax: ")
self.textbox_im_max.setMinimumWidth(55)
self.textbox_max_iter = QLineEdit()
self.textbox_max_iter_text = QLabel("Max Iterration: ")
self.textbox_max_iter.setMinimumWidth(55)
self.grid_cb = QCheckBox("Show Grid")
self.grid_cb.setChecked(False)
self.cont_cb = QCheckBox("Continuous Coloring")
self.cont_cb.setChecked(True)
self.norm_cb = QCheckBox("Normalize Values")
self.norm_cb.setChecked(True)
self.draw_button = QPushButton("Calculate && Draw")
self.connect(self.draw_button, SIGNAL('clicked()'), self.draw)
#
# Layout with box sizers
#
hbox = QHBoxLayout()
grid = QGridLayout()
hbox.addWidget(self.canvas, 3)
self.canvas.setCursor(Qt.CrossCursor)
hbox.addLayout(grid,1)
grid.setRowStretch(1,1)
grid.addWidget(self.textbox_re_min , 0,1)
grid.addWidget(self.textbox_re_min_text , 0,0)
grid.addWidget(self.textbox_re_max , 1,1)
grid.addWidget(self.textbox_re_max_text , 1,0)
grid.addWidget(self.textbox_im_min , 2,1)
grid.addWidget(self.textbox_im_min_text , 2,0)
grid.addWidget(self.textbox_im_max , 3,1)
grid.addWidget(self.textbox_im_max_text , 3,0)
grid.addWidget(self.textbox_max_iter , 5,1)
grid.addWidget(self.textbox_max_iter_text , 5,0)
grid.addWidget(self.grid_cb , 6,0,1,2)
grid.addWidget(self.cont_cb , 7,0,1,2)
grid.addWidget(self.norm_cb , 8,0,1,2)
grid.addWidget(self.draw_button , 9,0,1,2)
grid.addWidget(QLabel(""), 10,0,2,2)
self.main_frame.setLayout(hbox)
self.setCentralWidget(self.main_frame)
def create_status_bar(self):
self.status_text = QLabel("Ready")
self.coord_text = QLabel("Re(c): % 7f, Im(c) % 7f" % (0, 0))
self.canvas.mpl_connect("motion_notify_event", self.statusbar_coord)
self.statusBar().addWidget(self.status_text, 1)
self.statusBar().addWidget(self.coord_text, -1)
def create_menu(self):
# -- Menu Structure --
# File
# Save plot (Ctrl+S)
# Quit (Ctrl+Q)
# Help
# About (F1)
#
self.file_menu = self.menuBar().addMenu("&File")
load_file_action = self.create_action("&Save plot",
shortcut="Ctrl+S", slot=self.save_plot,
tip="Save the plot")
quit_action = self.create_action("&Quit", slot=self.close,
shortcut="Ctrl+Q", tip="Close the application")
self.add_actions(self.file_menu,
(load_file_action, None, quit_action))
self.help_menu = self.menuBar().addMenu("&Help")
about_action = self.create_action("&About",
shortcut='F1', slot=self.on_about,
tip='About the application')
self.add_actions(self.help_menu, (about_action,))
def add_actions(self, target, actions):
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def create_action(self, text, slot=None, shortcut=None,
icon=None, tip=None, checkable=False,
signal="triggered()"):
action = QAction(text, self)
if icon is not None:
action.setIcon(QIcon(":/%s.png" % icon))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
if __name__ == '__main__':
app = QApplication(sys.argv)
form = AppForm()
form.show()
app.exec_()
| mit | 8,328,964,092,956,857,000 | 32.84022 | 119 | 0.572289 | false | 3.51373 | false | false | false |
michelesr/coding-events | web/processors/user.py | 1 | 1859 | from django.contrib.auth.models import User
from django_countries import countries
def get_user(user_id):
user = User.objects.get(id=user_id)
return user
def get_user_profile(user_id):
user = User.objects.get(id=user_id)
return user.profile
def get_ambassadors(country_code=None):
ambassadors = []
all_ambassadors = User.objects.filter(
groups__name='ambassadors').order_by('date_joined')
for ambassador in all_ambassadors:
if country_code:
if ambassador.profile.country == country_code:
ambassadors.append(ambassador.profile)
else:
ambassadors.append(ambassador.profile)
return ambassadors
def get_ambassadors_for_countries():
ambassadors = get_ambassadors()
countries_ambassadors = []
# list countries minus two CUSTOM_COUNTRY_ENTRIES
for code, name in list(countries)[2:]:
readable_name = unicode(name)
country_ambassadors = [ambassador for ambassador in ambassadors if ambassador.country == code]
# load main ambassadors
main_ambassadors = [ambassador for ambassador in country_ambassadors if ambassador.is_main_contact]
# exclude main ambassadors
supporting_ambassadors = [ambassador for ambassador in country_ambassadors if not ambassador.is_main_contact]
countries_ambassadors.append(
(code, readable_name, supporting_ambassadors, main_ambassadors))
countries_ambassadors.sort()
return countries_ambassadors
def get_ambassadors_for_country(country):
ambassadors = User.objects.filter(
groups__name='ambassadors',
userprofile__country=country)
return ambassadors
def update_user_email(user_id, new_email):
user = User.objects.get(id=user_id)
user.email = new_email
user.save(update_fields=["email"])
return user
| mit | 5,778,873,849,347,322,000 | 32.196429 | 117 | 0.693921 | false | 3.392336 | false | false | false |
rxncon/rxncon | rxncon/input/quick/quick.py | 1 | 2873 | """Module containing the class Quick, a text-based format to read a rxncon system."""
import re
from typing import List, Optional
from rxncon.input.shared.contingency_list import contingencies_from_contingency_list_entries, \
contingency_list_entry_from_strs, ContingencyListEntry
from rxncon.core.reaction import reaction_from_str
from rxncon.core.rxncon_system import RxnConSystem
from rxncon.core.reaction import Reaction
from rxncon.core.contingency import Contingency
from rxncon.input.shared.reaction_preprocess import split_bidirectional_reaction_str
class Quick:
def __init__(self, rxncon_str: str) -> None:
self.quick_input = rxncon_str.split('\n')
self._rxncon_system = None # type: Optional[RxnConSystem]
self._reactions = [] # type: List[Reaction]
self._contingencies = [] # type: List[Contingency]
self._contingency_list_entries = [] # type: List[ContingencyListEntry]
self._parse_str()
self._construct_contingencies()
self._construct_rxncon_system()
assert self._rxncon_system is not None
@property
def rxncon_system(self) -> RxnConSystem:
assert self._rxncon_system is not None
return self._rxncon_system
def _parse_str(self) -> None:
BOOL_REGEX = '^\<.+?\>$'
for line in self.quick_input:
reaction_string = line.split(';')[0].strip()
contingency_strings = line.split(';')[1:]
if reaction_string:
if not re.match(BOOL_REGEX, reaction_string):
self._add_reaction_from_string(reaction_string)
self._add_contingency_list_entries(contingency_strings, reaction_string)
def _add_reaction_from_string(self, reaction_str: str) -> None:
reaction_strs = split_bidirectional_reaction_str(reaction_str)
for rxn in reaction_strs:
reaction = reaction_from_str(rxn)
self._reactions.append(reaction)
def _add_contingency_list_entries(self, contingency_strs: List[str], reaction_str: str) -> None:
for cont in contingency_strs:
cont = cont.strip()
cont_type = cont.split()[0]
modifier = cont.split()[-1]
# If the verb is bidirectional, only apply the contingency to the forward direction.
reaction_strs = split_bidirectional_reaction_str(reaction_str)
entry = contingency_list_entry_from_strs(reaction_strs[0], cont_type, modifier)
self._contingency_list_entries.append(entry)
def _construct_contingencies(self) -> None:
self._contingencies = contingencies_from_contingency_list_entries(self._contingency_list_entries)
def _construct_rxncon_system(self) -> None:
self._rxncon_system = RxnConSystem(self._reactions, self._contingencies) | lgpl-3.0 | 188,836,107,975,411,780 | 43.90625 | 105 | 0.655064 | false | 3.664541 | false | false | false |
kaiocesar/easy-bank | bank/models.py | 1 | 1098 | # -*- coding: utf-8 -*-
from django.db import models
from django.contrib.auth.models import User
class Account(models.Model):
account_number = models.CharField(max_length=30)
agency_number = models.CharField(max_length=30)
created_at = models.DateTimeField(auto_now=True)
status = models.BooleanField(default="")
accountant = models.ForeignKey(
User,
on_delete=models.CASCADE
)
class Meta:
unique_together = ('accountant','account_number','agency_number')
class Cards(models.Model):
name = models.CharField(max_length=100, unique=True)
description = models.CharField(max_length=100)
flag = models.CharField(max_length=100)
status = models.BooleanField(default=True)
def save(self, *args, **kwargs):
cards = Cards.objects.filter(name=self.name)
if len(cards) == 0:
super(Cards, self).save(*args, **kwargs)
def __str__(self):
return self.name
def __unicode__(self):
return self.name
class Meta:
verbose_name = 'Card'
verbose_name_plural = 'Cards'
| mit | -607,595,879,829,265,300 | 27.153846 | 73 | 0.645719 | false | 3.69697 | false | false | false |
diN0bot/ProcrasDonate | lib/ssl_middleware.py | 1 | 1526 | __license__ = "Python"
__copyright__ = "Copyright (C) 2007, Stephen Zabel"
__author__ = "Stephen Zabel - [email protected]"
__contributors__ = "Jay Parlar - [email protected]"
"""
From django snippets:
http://www.djangosnippets.org/snippets/85/
"""
from django.conf import settings
from django.http import HttpResponseRedirect, HttpResponsePermanentRedirect, get_host
SSL = 'SSL'
class SSLRedirect:
def process_view(self, request, view_func, view_args, view_kwargs):
if SSL in view_kwargs:
secure = view_kwargs[SSL]
del view_kwargs[SSL]
else:
secure = False
if not secure == self._is_secure(request):
return self._redirect(request, secure)
def _is_secure(self, request):
if request.is_secure():
return True
#Handle the Webfaction case until this gets resolved in the request.is_secure()
if 'HTTP_X_FORWARDED_SSL' in request.META:
return request.META['HTTP_X_FORWARDED_SSL'] == 'on'
return False
def _redirect(self, request, secure):
protocol = secure and "https" or "http"
newurl = "%s://%s%s" % (protocol,get_host(request),request.get_full_path())
if settings.DEBUG and request.method == 'POST':
raise RuntimeError, \
"""Django can't perform a SSL redirect while maintaining POST data.
Please structure your views so that redirects only occur during GETs."""
return HttpResponsePermanentRedirect(newurl)
| agpl-3.0 | -841,247,359,315,681,800 | 32.173913 | 87 | 0.63827 | false | 3.912821 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.