repo_name
stringlengths
5
92
path
stringlengths
4
221
copies
stringclasses
19 values
size
stringlengths
4
6
content
stringlengths
766
896k
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
32
997
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
ratio
float64
1.5
13.6
config_test
bool
2 classes
has_no_keywords
bool
2 classes
few_assignments
bool
1 class
dc3-plaso/plaso
tests/storage/fake_storage.py
1
6205
#!/usr/bin/python # -*- coding: utf-8 -*- """Tests for the fake storage.""" import unittest from plaso.containers import errors from plaso.containers import event_sources from plaso.containers import reports from plaso.containers import sessions from plaso.containers import tasks from plaso.lib import definitions from plaso.storage import fake_storage from plaso.storage import zip_file from tests import test_lib as shared_test_lib from tests.storage import test_lib class FakeStorageWriterTest(test_lib.StorageTestCase): """Tests for the fake storage writer object.""" def testAddAnalysisReport(self): """Tests the AddAnalysisReport function.""" session = sessions.Session() analysis_report = reports.AnalysisReport( plugin_name=u'test', text=u'test report') storage_writer = fake_storage.FakeStorageWriter(session) storage_writer.Open() storage_writer.AddAnalysisReport(analysis_report) storage_writer.Close() with self.assertRaises(IOError): storage_writer.AddAnalysisReport(analysis_report) def testAddError(self): """Tests the AddError function.""" session = sessions.Session() extraction_error = errors.ExtractionError( message=u'Test extraction error') storage_writer = fake_storage.FakeStorageWriter(session) storage_writer.Open() storage_writer.AddError(extraction_error) storage_writer.Close() with self.assertRaises(IOError): storage_writer.AddError(extraction_error) def testAddEvent(self): """Tests the AddEvent function.""" session = sessions.Session() test_events = self._CreateTestEvents() storage_writer = fake_storage.FakeStorageWriter(session) storage_writer.Open() event = None for event in test_events: storage_writer.AddEvent(event) storage_writer.Close() with self.assertRaises(IOError): storage_writer.AddEvent(event) def testAddEventSource(self): """Tests the AddEventSource function.""" session = sessions.Session() event_source = event_sources.EventSource() storage_writer = fake_storage.FakeStorageWriter(session) storage_writer.Open() storage_writer.AddEventSource(event_source) storage_writer.Close() with self.assertRaises(IOError): storage_writer.AddEventSource(event_source) def testAddEventTag(self): """Tests the AddEventTag function.""" session = sessions.Session() test_events = self._CreateTestEvents() event_tags = self._CreateTestEventTags() storage_writer = fake_storage.FakeStorageWriter(session) storage_writer.Open() for event in test_events: storage_writer.AddEvent(event) event_tag = None for event_tag in event_tags: storage_writer.AddEventTag(event_tag) storage_writer.Close() with self.assertRaises(IOError): storage_writer.AddEventTag(event_tag) def testOpenClose(self): """Tests the Open and Close functions.""" session = sessions.Session() storage_writer = fake_storage.FakeStorageWriter(session) storage_writer.Open() storage_writer.Close() storage_writer.Open() storage_writer.Close() storage_writer = fake_storage.FakeStorageWriter( session, storage_type=definitions.STORAGE_TYPE_TASK) storage_writer.Open() storage_writer.Close() storage_writer.Open() with self.assertRaises(IOError): storage_writer.Open() storage_writer.Close() with self.assertRaises(IOError): storage_writer.Close() # TODO: add test for GetEvents. # TODO: add test for GetFirstWrittenEventSource and # GetNextWrittenEventSource. @shared_test_lib.skipUnlessHasTestFile([u'psort_test.json.plaso']) @shared_test_lib.skipUnlessHasTestFile([u'pinfo_test.json.plaso']) def testMergeFromStorage(self): """Tests the MergeFromStorage function.""" session = sessions.Session() storage_writer = fake_storage.FakeStorageWriter(session) storage_writer.Open() test_file = self._GetTestFilePath([u'psort_test.json.plaso']) storage_reader = zip_file.ZIPStorageFileReader(test_file) storage_writer.MergeFromStorage(storage_reader) test_file = self._GetTestFilePath([u'pinfo_test.json.plaso']) storage_reader = zip_file.ZIPStorageFileReader(test_file) storage_writer.MergeFromStorage(storage_reader) storage_writer.Close() # TODO: add test for GetNextEventSource. def testWriteSessionStartAndCompletion(self): """Tests the WriteSessionStart and WriteSessionCompletion functions.""" session = sessions.Session() storage_writer = fake_storage.FakeStorageWriter(session) storage_writer.Open() storage_writer.WriteSessionStart() storage_writer.WriteSessionCompletion() storage_writer.Close() with self.assertRaises(IOError): storage_writer.WriteSessionStart() with self.assertRaises(IOError): storage_writer.WriteSessionCompletion() storage_writer = fake_storage.FakeStorageWriter( session, storage_type=definitions.STORAGE_TYPE_TASK) storage_writer.Open() with self.assertRaises(IOError): storage_writer.WriteSessionStart() with self.assertRaises(IOError): storage_writer.WriteSessionCompletion() storage_writer.Close() def testWriteTaskStartAndCompletion(self): """Tests the WriteTaskStart and WriteTaskCompletion functions.""" session = sessions.Session() task = tasks.Task(session_identifier=session.identifier) storage_writer = fake_storage.FakeStorageWriter( session, storage_type=definitions.STORAGE_TYPE_TASK, task=task) storage_writer.Open() storage_writer.WriteTaskStart() storage_writer.WriteTaskCompletion() storage_writer.Close() with self.assertRaises(IOError): storage_writer.WriteTaskStart() with self.assertRaises(IOError): storage_writer.WriteTaskCompletion() storage_writer = fake_storage.FakeStorageWriter(session) storage_writer.Open() with self.assertRaises(IOError): storage_writer.WriteTaskStart() with self.assertRaises(IOError): storage_writer.WriteTaskCompletion() storage_writer.Close() if __name__ == '__main__': unittest.main()
apache-2.0
-6,102,286,598,320,775,000
27.204545
75
0.72361
false
4.026606
true
false
false
ella/django-ratings
django_ratings/aggregation.py
1
1768
""" This file is for aggregation records from Rating,Agg tables to Agg and TotalRate table """ import logging from datetime import datetime, timedelta from django_ratings.models import Rating, Agg, TotalRate logger = logging.getLogger('django_ratings') # aggregate ratings older than 2 years by year DELTA_TIME_YEAR = 2*365*24*60*60 # ratings older than 2 months by month DELTA_TIME_MONTH = 2*30*24*60*60 # rest of the ratings (last 2 months) aggregate daily DELTA_TIME_DAY = -24*60*60 TIMES_ALL = {DELTA_TIME_YEAR : 'year', DELTA_TIME_MONTH : 'month', DELTA_TIME_DAY : 'day'} def transfer_agg_to_totalrate(): """ Transfer aggregation data from table Agg to table TotalRate """ logger.info("transfer_agg_to_totalrate BEGIN") if TotalRate.objects.count() != 0: TotalRate.objects.all().delete() Agg.objects.agg_to_totalrate() logger.info("transfer_agg_to_totalrate END") def transfer_agg_to_agg(): """ aggregation data from table Agg to table Agg """ logger.info("transfer_agg_to_agg BEGIN") timenow = datetime.now() for t in TIMES_ALL: TIME_DELTA = t time_agg = timenow - timedelta(seconds=TIME_DELTA) Agg.objects.move_agg_to_agg(time_agg, TIMES_ALL[t]) Agg.objects.agg_assume() logger.info("transfer_agg_to_agg END") def transfer_data(): """ transfer data from table Rating to table Agg """ logger.info("transfer_data BEGIN") timenow = datetime.now() for t in sorted(TIMES_ALL.keys(), reverse=True): TIME_DELTA = t time_agg = timenow - timedelta(seconds=TIME_DELTA) Rating.objects.move_rate_to_agg(time_agg, TIMES_ALL[t]) transfer_agg_to_agg() transfer_agg_to_totalrate() logger.info("transfer_data END")
bsd-3-clause
4,587,567,461,580,501,500
28.966102
90
0.675339
false
3.292365
false
false
false
javipalanca/ojoalplato
ojoalplato/users/models.py
1
1358
# -*- coding: utf-8 -*- from __future__ import unicode_literals, absolute_import from django.contrib.auth.models import AbstractUser from django.core.urlresolvers import reverse from django.db import models from django.utils.encoding import python_2_unicode_compatible from django.utils.translation import ugettext_lazy as _ USER_STATUS_CHOICES = ( (0, "active"), ) @python_2_unicode_compatible class User(AbstractUser): # First Name and Last Name do not cover name patterns # around the globe. name = models.CharField(_("Name of User"), blank=True, max_length=255) login = models.CharField(max_length=60, default="") url = models.URLField(max_length=100, blank=True) activation_key = models.CharField(max_length=60, default="0") status = models.IntegerField(default=0, choices=USER_STATUS_CHOICES) def __str__(self): return self.username def get_absolute_url(self): return reverse('users:detail', kwargs={'username': self.username}) class UserMeta(models.Model): """ Meta information about a user. """ id = models.IntegerField(primary_key=True) user = models.ForeignKey(User, related_name="meta", blank=True, null=True) key = models.CharField(max_length=255) value = models.TextField() def __unicode__(self): return u"%s: %s" % (self.key, self.value)
mit
3,922,233,845,667,998,000
30.581395
78
0.694404
false
3.720548
false
false
false
dwitvliet/CATMAID
django/applications/catmaid/control/link.py
1
6452
import json from django.http import HttpResponse from django.core.exceptions import ObjectDoesNotExist from catmaid.models import UserRole, Project, Relation, Treenode, Connector, \ TreenodeConnector, ClassInstance from catmaid.control.authentication import requires_user_role, can_edit_or_fail @requires_user_role(UserRole.Annotate) def create_link(request, project_id=None): """ Create a link, currently only a presynaptic_to or postsynaptic_to relationship between a treenode and a connector. """ from_id = int(request.POST.get('from_id', 0)) to_id = int(request.POST.get('to_id', 0)) link_type = request.POST.get('link_type', 'none') try: project = Project.objects.get(id=project_id) relation = Relation.objects.get(project=project, relation_name=link_type) from_treenode = Treenode.objects.get(id=from_id) to_connector = Connector.objects.get(id=to_id, project=project) links = TreenodeConnector.objects.filter( connector=to_id, treenode=from_id, relation=relation.id) except ObjectDoesNotExist as e: return HttpResponse(json.dumps({'error': e.message})) if links.count() > 0: return HttpResponse(json.dumps({'error': "A relation '%s' between these two elements already exists!" % link_type})) related_skeleton_count = ClassInstance.objects.filter(project=project, id=from_treenode.skeleton.id).count() if related_skeleton_count > 1: # Can never happen. What motivated this check for an error of this kind? Would imply that a treenode belongs to more than one skeleton, which was possible when skeletons owned treendoes via element_of relations rather than by the skeleton_id column. return HttpResponse(json.dumps({'error': 'Multiple rows for treenode with ID #%s found' % from_id})) elif related_skeleton_count == 0: return HttpResponse(json.dumps({'error': 'Failed to retrieve skeleton id of treenode #%s' % from_id})) if link_type == 'presynaptic_to': # Enforce only one presynaptic link presyn_links = TreenodeConnector.objects.filter(project=project, connector=to_connector, relation=relation) if (presyn_links.count() != 0): return HttpResponse(json.dumps({'error': 'Connector %s does not have zero presynaptic connections.' % to_id})) # The object returned in case of success result = {} if link_type == 'postsynaptic_to': # Warn if there is already a link from the source skeleton to the # target skeleton. This can happen and is not necessarely wrong, but # worth to double check, because it is likely a mistake. post_links_to_skeleton = TreenodeConnector.objects.filter(project=project, connector=to_connector, relation=relation, skeleton_id=from_treenode.skeleton_id).count() if post_links_to_skeleton == 1: result['warning'] = 'There is already one post-synaptic ' \ 'connection to the target skeleton' elif post_links_to_skeleton > 1: result['warning'] = 'There are already %s post-synaptic ' \ 'connections to the target skeleton' % post_links_to_skeleton # Enforce only synaptic links gapjunction_links = TreenodeConnector.objects.filter(project=project, connector=to_connector, relation__relation_name='gapjunction_with') if (gapjunction_links.count() != 0): return HttpResponse(json.dumps({'error': 'Connector %s cannot have both a gap junction and a postsynaptic node.' % to_id})) if link_type == 'gapjunction_with': # Enforce only two gap junction links gapjunction_links = TreenodeConnector.objects.filter(project=project, connector=to_connector, relation=relation) synapse_links = TreenodeConnector.objects.filter(project=project, connector=to_connector, relation__relation_name__endswith='synaptic_to') if (gapjunction_links.count() > 1): return HttpResponse(json.dumps({'error': 'Connector %s can only have two gap junction connections.' % to_id})) if (synapse_links.count() != 0): return HttpResponse(json.dumps({'error': 'Connector %s is part of a synapse, and gap junction can not be added.' % to_id})) # Enforce same relations across all linked connectors; only new postsynaptic links are valid if any([to_connector.children.exists(), to_connector.parent]) and link_type != 'postsynaptic_to': return HttpResponse(json.dumps({'error': 'Cannot add %s connection to a linked connector.' % link_type})) TreenodeConnector( user=request.user, project=project, relation=relation, treenode=from_treenode, # treenode_id = from_id skeleton=from_treenode.skeleton, # treenode.skeleton_id where treenode.id = from_id connector=to_connector # connector_id = to_id ).save() result['message'] = 'success' return HttpResponse(json.dumps(result), content_type='application/json') @requires_user_role(UserRole.Annotate) def delete_link(request, project_id=None): connector_id = int(request.POST.get('connector_id', 0)) treenode_id = int(request.POST.get('treenode_id', 0)) links = TreenodeConnector.objects.filter( connector=connector_id, treenode=treenode_id) if links.count() == 0: return HttpResponse(json.dumps({'error': 'Failed to delete connector #%s from geometry domain.' % connector_id})) # Enforce same relations across all linked connectors; only removal of postsynaptic links are valid try: to_connector = Connector.objects.get(id=connector_id, project=project_id) link_type = links[0].relation.relation_name except ObjectDoesNotExist as e: return HttpResponse(json.dumps({'error': e.message})) if any([to_connector.children.exists(), to_connector.parent]) and link_type != 'postsynaptic_to': return HttpResponse(json.dumps({'error': 'Cannot remove %s connection to a linked connector.' % link_type})) # Could be done by filtering above when obtaining the links, # but then one cannot distinguish between the link not existing # and the user_id not matching or not being superuser. can_edit_or_fail(request.user, links[0].id, 'treenode_connector') links[0].delete() return HttpResponse(json.dumps({'result': 'Removed treenode to connector link'}))
gpl-3.0
-8,270,022,877,487,769,000
51.032258
257
0.688314
false
3.83591
false
false
false
manankalra/Twitter-Sentiment-Analysis
main/sentiment/tweepy_demo/tweep.py
1
1099
#!/usr/bin/env python """ tweepy(Twitter API) demo """ __author__ = "Manan Kalra" __email__ = "[email protected]" from tweepy import Stream, OAuthHandler from tweepy.streaming import StreamListener import time # Add your own consumer_key = "" consumer_secret = "" access_token = "" access_token_secret = "" class listener(StreamListener): def on_data(self, raw_data): try: # print(raw_data) tweet = raw_data.split(",\"text\":")[1].split(",\"source\"")[0] print(tweet) save_time = str(time.time()) + "::" + tweet save_file = open('tweetDB.csv', 'a') save_file.write(save_time) save_file.write("\n") save_file.close() return True except BaseException: print("Failed") def on_error(self, status_code): print(status_code) auth = OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) twitterStream = Stream(auth, listener()) twitterStream.filter(track=["<anything: noun/verb/adverb/...>"])
mit
3,081,403,557,936,290,300
23.422222
75
0.605096
false
3.423676
false
false
false
Bladefidz/wfuzz
plugins/iterations.py
1
2703
from externals.moduleman.plugin import moduleman_plugin import itertools class piterator_void: text="void" def count(self): return self.__count def __init__(self, *i): self._dic = i self.__count = max(map(lambda x:x.count(), i)) self.it = self._dic[0] def next(self): return (self.it.next(),) def restart(self): for dic in self._dic: dic.restart() self.it = self._dic[0] def __iter__(self): self.restart() return self @moduleman_plugin("restart", "count", "next", "__iter__") class zip: name = "zip" description = "Returns an iterator that aggregates elements from each of the iterables." category = ["default"] priority = 99 def __init__(self, *i): self._dic = i self.it = itertools.izip(*self._dic) self.__count = min(map(lambda x:x.count(), i)) # Only possible match counted. def count(self): return self.__count def restart(self): for dic in self._dic: dic.restart() self.it = itertools.izip.__init__(self, *self._dic) def next(self): return self.it.next() def __iter__(self): self.restart() return self @moduleman_plugin("restart", "count", "next", "__iter__") class product: name = "product" description = "Returns an iterator cartesian product of input iterables." category = ["default"] priority = 99 def __init__(self, *i): self._dic = i self.it = itertools.product(*self._dic) self.__count = reduce(lambda x,y:x*y.count(), i[1:], i[0].count()) def restart(self): for dic in self._dic: dic.restart() self.it = itertools.product(*self._dic) def count(self): return self.__count def next(self): return self.it.next() def __iter__(self): self.restart() return self @moduleman_plugin("restart", "count", "next", "__iter__") class chain: name = "chain" description = "Returns an iterator returns elements from the first iterable until it is exhausted, then proceeds to the next iterable, until all of the iterables are exhausted." category = ["default"] priority = 99 def count(self): return self.__count def __init__(self, *i): self.__count = sum(map(lambda x:x.count(), i)) self._dic = i self.it = itertools.chain(*i) def restart(self): for dic in self._dic: dic.restart() self.it = itertools.chain(*self._dic) def next(self): return (self.it.next(),) def __iter__(self): self.restart() return self
gpl-2.0
2,271,427,541,823,712,800
24.261682
181
0.564928
false
3.785714
false
false
false
berkmancenter/mediacloud
apps/common/src/python/mediawords/db/locks.py
1
3477
"""Constants and routines for handling advisory postgres locks.""" import mediawords.db from mediawords.util.log import create_logger from mediawords.util.perl import decode_object_from_bytes_if_needed log = create_logger(__name__) """ This package just has constants that can be passed to the first value of the postgres pg_advisory_*lock functions. If you are using an advisory lock, you should use the two key version and use a constant from this package to avoid conflicts. """ # locks to make sure we are not mining or snapshotting a topic in more than one process at a time LOCK_TYPES = { 'test-a': 10, 'test-b': 11, 'MediaWords::Job::TM::MineTopic': 12, 'MediaWords::Job::TM::SnapshotTopic': 13, 'MediaWords::TM::Media::media_normalized_urls': 14, 'MediaWords::Crawler::Engine::run_fetcher': 15, # Testing lock types 'TestPerlWorkerLock': 900, 'TestPythonWorkerLock': 901, } class McDBLocksException(Exception): """Default exception for package.""" pass def get_session_lock(db: mediawords.db.DatabaseHandler, lock_type: str, lock_id: int, wait: bool = False) -> bool: """Get a postgres advisory lock with the lock_type and lock_id as the two keys. Arguments: db - db handle lock_type - must be in LOCK_TYPES dict above lock_id - id for the particular lock within the type wait - if true, block while waiting for the lock, else return false if the lock is not available Returns: True if the lock is available """ lock_type = str(decode_object_from_bytes_if_needed(lock_type)) if isinstance(lock_id, bytes): lock_id = decode_object_from_bytes_if_needed(lock_id) lock_id = int(lock_id) if isinstance(wait, bytes): wait = decode_object_from_bytes_if_needed(wait) wait = bool(wait) log.debug("trying for lock: %s, %d" % (lock_type, lock_id)) if lock_type not in LOCK_TYPES: raise McDBLocksException("lock type not in LOCK_TYPES: %s" % lock_type) lock_type_id = LOCK_TYPES[lock_type] if wait: db.query("select pg_advisory_lock(%(a)s, %(b)s)", {'a': lock_type_id, 'b': lock_id}) return True else: r = db.query("select pg_try_advisory_lock(%(a)s, %(b)s) as locked", {'a': lock_type_id, 'b': lock_id}).hash() return r['locked'] def release_session_lock(db: mediawords.db.DatabaseHandler, lock_type: str, lock_id: int) -> None: """Release the postgres advisory lock if it is held.""" lock_type = str(decode_object_from_bytes_if_needed(lock_type)) if isinstance(lock_id, bytes): lock_id = decode_object_from_bytes_if_needed(lock_id) lock_id = int(lock_id) if lock_type not in LOCK_TYPES: raise McDBLocksException("lock type not in LOCK_TYPES: %s" % lock_type) lock_type_id = LOCK_TYPES[lock_type] db.query("select pg_advisory_unlock(%(a)s, %(b)s)", {'a': lock_type_id, 'b': lock_id}) def list_session_locks(db: mediawords.db.DatabaseHandler, lock_type: str) -> list: """Return a list of all locked ids for the given lock_type.""" lock_type = str(decode_object_from_bytes_if_needed(lock_type)) if lock_type not in LOCK_TYPES: raise McDBLocksException("lock type not in LOCK_TYPES: %s" % lock_type) lock_type_id = LOCK_TYPES[lock_type] # noinspection SqlResolve return db.query( "select objid from pg_locks where locktype = 'advisory' and classid = %(a)s", {'a': lock_type_id}).flat()
agpl-3.0
1,828,699,678,270,009,600
33.425743
117
0.667242
false
3.261726
false
false
false
QTek/QRadio
tramatego/src/tramatego/transforms/ipv4_to_score.py
1
1161
#!/usr/bin/env python from canari.maltego.utils import debug, progress from canari.framework import configure #, superuser from canari.maltego.entities import IPv4Address, Phrase from common.launchers import get_qradio_data __author__ = 'Zappus' __copyright__ = 'Copyright 2016, TramaTego Project' __credits__ = [] __license__ = 'GPL' __version__ = '0.1' __maintainer__ = 'Zappus' __email__ = '[email protected]' __status__ = 'Development' __all__ = [ 'dotransform', #'onterminate' # comment out this line if you don't need this function. ] #@superuser @configure( label='IPv4 to Score', description='Converts IPv4 into Score using QRadio.', uuids=[ 'TramaTego.v1.IPv4ToScore' ], inputs=[ ( 'TramaTego', IPv4Address ) ], debug=True ) def dotransform(request, response, config): command = "--ipv4_to_score " + request.value qradio_output = get_qradio_data(command, 3) for entry in qradio_output: response += Phrase(entry) return response def onterminate(): """ TODO: Write your cleanup logic below or delete the onterminate function and remove it from the __all__ variable """ pass
apache-2.0
7,825,119,063,382,835,000
24.822222
115
0.676141
false
3.345821
false
false
false
coinkite/connectrum
connectrum/findall.py
1
4527
#!/usr/bin/env python3 # # import bottom, random, time, asyncio from .svr_info import ServerInfo import logging logger = logging.getLogger('connectrum') class IrcListener(bottom.Client): def __init__(self, irc_nickname=None, irc_password=None, ssl=True): self.my_nick = irc_nickname or 'XC%d' % random.randint(1E11, 1E12) self.password = irc_password or None self.results = {} # by hostname self.servers = set() self.all_done = asyncio.Event() super(IrcListener, self).__init__(host='irc.freenode.net', port=6697 if ssl else 6667, ssl=ssl) # setup event handling self.on('CLIENT_CONNECT', self.connected) self.on('PING', self.keepalive) self.on('JOIN', self.joined) self.on('RPL_NAMREPLY', self.got_users) self.on('RPL_WHOREPLY', self.got_who_reply) self.on("client_disconnect", self.reconnect) self.on('RPL_ENDOFNAMES', self.got_end_of_names) async def collect_data(self): # start it process self.loop.create_task(self.connect()) # wait until done await self.all_done.wait() # return the results return self.results def connected(self, **kwargs): logger.debug("Connected") self.send('NICK', nick=self.my_nick) self.send('USER', user=self.my_nick, realname='Connectrum Client') # long delay here as it does an failing Ident probe (10 seconds min) self.send('JOIN', channel='#electrum') #self.send('WHO', mask='E_*') def keepalive(self, message, **kwargs): self.send('PONG', message=message) async def joined(self, nick=None, **kwargs): # happens when we or someone else joins the channel # seem to take 10 seconds or longer for me to join logger.debug('Joined: %r' % kwargs) if nick != self.my_nick: await self.add_server(nick) async def got_who_reply(self, nick=None, real_name=None, **kws): ''' Server replied to one of our WHO requests, with details. ''' #logger.debug('who reply: %r' % kws) nick = nick[2:] if nick[0:2] == 'E_' else nick host, ports = real_name.split(' ', 1) self.servers.remove(nick) logger.debug("Found: '%s' at %s with port list: %s",nick, host, ports) self.results[host.lower()] = ServerInfo(nick, host, ports) if not self.servers: self.all_done.set() async def got_users(self, users=[], **kws): # After successful join to channel, we are given a list of # users on the channel. Happens a few times for busy channels. logger.debug('Got %d (more) users in channel', len(users)) for nick in users: await self.add_server(nick) async def add_server(self, nick): # ignore everyone but electrum servers if nick.startswith('E_'): self.servers.add(nick[2:]) async def who_worker(self): # Fetch details on each Electrum server nick we see logger.debug('who task starts') copy = self.servers.copy() for nn in copy: logger.debug('do WHO for: ' + nn) self.send('WHO', mask='E_'+nn) logger.debug('who task done') def got_end_of_names(self, *a, **k): logger.debug('Got all the user names') assert self.servers, "No one on channel!" # ask for details on all of those users self.loop.create_task(self.who_worker()) async def reconnect(self, **kwargs): # Trigger an event that may cascade to a client_connect. # Don't continue until a client_connect occurs, which may be never. logger.warn("Disconnected (will reconnect)") # Note that we're not in a coroutine, so we don't have access # to await and asyncio.sleep time.sleep(3) # After this line we won't necessarily be connected. # We've simply scheduled the connect to happen in the future self.loop.create_task(self.connect()) logger.debug("Reconnect scheduled.") if __name__ == '__main__': import logging logging.getLogger('bottom').setLevel(logging.DEBUG) logging.getLogger('connectrum').setLevel(logging.DEBUG) logging.getLogger('asyncio').setLevel(logging.DEBUG) bot = IrcListener(ssl=False) bot.loop.set_debug(True) fut = bot.collect_data() #bot.loop.create_task(bot.connect()) rv = bot.loop.run_until_complete(fut) print(rv)
mit
8,429,156,360,593,355,000
31.106383
103
0.610559
false
3.69551
false
false
false
zstyblik/infernal-twin
sql_insert.py
1
3025
import MySQLdb import db_connect_creds from datetime import datetime username, password = db_connect_creds.read_creds() cxn = MySQLdb.connect('localhost', user=username, passwd=password) date = datetime.now() cxn.query('CREATE DATABASE IF NOT EXISTS InfernalWireless') cxn.commit() cxn.close() cxn = MySQLdb.connect(db='InfernalWireless') cur = cxn.cursor() current_project_id = 0 #~ cxn = MySQLdb.connect('localhost','root',"") #~ #~ date = datetime.now() #~ #~ #~ cxn.query('CREATE DATABASE IF NOT EXISTS InfernalWireless') #~ #~ cxn.commit() #~ cxn.close() #~ #~ cxn = MySQLdb.connect(db='InfernalWireless') #~ #~ cur = cxn.cursor() #~ #~ current_project_id = 0 def create_project_table(): ##############3333 THIS IS GOING TO CRAETE A TABLE FOR PROJECT #~ cur.execute("CREATE TABLE mytable (id AUTO_INCREMENT") PROJECT_TITLE = '''CREATE TABLE IF NOT EXISTS Projects ( ProjectId MEDIUMINT NOT NULL AUTO_INCREMENT, ProjectName TEXT, PRIMARY KEY (ProjectId), AuditorName TEXT, TargetName TEXT, date TEXT)''' cur.execute(PROJECT_TITLE) create_project_table() def project_details(projectname, Authors_name, TargetName, date): PROJECT_DETAILS = 'INSERT INTO Projects (ProjectName, AuditorName, TargetName, date) VALUES ("%s","%s","%s","%s")'%(projectname, Authors_name, TargetName, date) cur.execute(PROJECT_DETAILS) current_project_id_tmp = cur.lastrowid current_project_id = current_project_id_tmp print "report is generated" return current_project_id_tmp def create_report_table(): ##############3333 THIS IS GOING TO CRAETE A TABLE FOR PROJECT report_table = '''CREATE TABLE IF NOT EXISTS Reports (findingID MEDIUMINT NOT NULL AUTO_INCREMENT, finding_name TEXT, phase TEXT, PRIMARY KEY (findingID), risk_level TEXT, risk_category TEXT, Findings_detail TEXT, Notes TEXT, Project_fk_Id MEDIUMINT, FOREIGN KEY (Project_fk_Id) REFERENCES Projects (ProjectId))''' cur.execute(report_table) create_report_table() def create_report(self, finding_name, phase, risk_level, risk_category, Findings_detail, Notes, Project_fk_Id): ########## THIS IS GOING TO INSERT DATA INTO FINDINGS TABLE pID = current_project_id REPORT_DETAILS = 'INSERT INTO Reports (finding_name, phase, risk_level, risk_category, Findings_detail, Notes, Project_fk_Id) VALUES ("%s","%s","%s","%s","%s","%s","%s")'%( finding_name, phase, risk_level, risk_category, Findings_detail, Notes, Project_fk_Id) cur.execute(REPORT_DETAILS) print pID def print_hello(test_data): print test_data ################ DB POPULATE DATABASE ########### #~ prID = project_details('test','est','23s','12/12/12') #~ #~ create_report('Title of the finding','Choose a phase','Choose a category','Choose risk level','Enter the findings details','Notes on the findings',int(prID)) ################################################################### DUMMY DATABASE QUERIES ############## #~ print type(prID) cur.close() cxn.commit() cxn.close() print "DB has been updated"
gpl-3.0
-3,604,638,107,569,597,000
25.077586
315
0.676694
false
3.238758
false
false
false
ksteinfe/decodes
src/decodes/core/dc_mesh.py
1
6004
from decodes.core import * from . import dc_base, dc_vec, dc_point, dc_has_pts #here we may only import modules that have been loaded before this one. see core/__init__.py for proper order if VERBOSE_FS: print("mesh.py loaded") import copy, collections class Mesh(HasPts): """ a very simple mesh class """ subclass_attr = [] # this list of props is unset any time this HasPts object changes def __init__(self, vertices=None, faces=None, basis=None): """ Mesh Constructor. :param vertices: The vertices of the mesh. :type vertices: [Point] :param faces: List of ordered faces. :type faces: [int] :param basis: The (optional) basis of the mesh. :type basis: Basis :result: Mesh object. :rtype: Mesh :: pts=[ Point(0,0,0), Point(0,1,0), Point(1,1,0), Point(1,0,0), Point(0,0,1), Point(0,1,1), Point(1,1,1), Point(1,0,1), ] quad_faces=[[0,1,2,3],[4,5,6,7],[0,4,5,1],[3,7,6,2]] quadmesh=Mesh(pts,quad_faces) """ super(Mesh,self).__init__(vertices,basis) #HasPts constructor handles initalization of verts and basis self._faces = [] if (faces is None) else faces @property def faces(self): """ Returns a list of mesh faces. :result: List of mesh faces. :rtype: list """ return self._faces def add_face(self,a,b,c,d=-1): """ Adds a face to the mesh. :param a,b,c,d: Face to be added to the list of faces. :type a,b,c,d: int. :result: Modifies list of faces. :rtype: None :: quadmesh.add_face(4,5,6,7) """ #TODO: add lists of faces just the same if max(a,b,c,d) < len(self.pts): if (d>=0) : self._faces.append([a,b,c,d]) else: self._faces.append([a,b,c]) def face_pts(self,index): """ Returns the points of a given face. :param index: Face's index :type index: int :returns: Vertices. :rtype: Point :: quadmesh.face_pts(0) """ return [self.pts[i] for i in self.faces[index]] def face_centroid(self,index): """ Returns the centroids of individual mesh faces. :param index: Index of a face. :type index: int :returns: The centroid of a face. :rtype: Point :: quadmesh.face_centroid(0) """ return Point.centroid(self.face_pts(index)) def face_normal(self,index): """ Returns the normal vector of a face. :param index: Index of a face. :type index: int :returns: Normal vector. :rtype: Vec :: quadmesh.face_normal(0) """ verts = self.face_pts(index) if len(verts) == 3 : return Vec(verts[0],verts[1]).cross(Vec(verts[0],verts[2])).normalized() else : v0 = Vec(verts[0],verts[1]).cross(Vec(verts[0],verts[3])).normalized() v1 = Vec(verts[2],verts[3]).cross(Vec(verts[2],verts[1])).normalized() return Vec.bisector(v0,v1).normalized() def __repr__(self): return "msh[{0}v,{1}f]".format(len(self._verts),len(self._faces)) @staticmethod def explode(msh): """ Explodes a mesh into individual faces. :param msh: Mesh to explode. :type msh: Mesh :returns: List of meshes. :type: [Mesh] :: Mesh.explode(quadmesh) """ exploded_meshes = [] for face in msh.faces: pts = [msh.pts[v] for v in face] nface = [0,1,2] if len(face)==3 else [0,1,2,3] exploded_meshes.append(Mesh(pts,[nface])) return exploded_meshes def to_pt_graph(self): """ Returns a Graph representation of the mesh points by index. :returns: A Graph of point indexes. :rtype: Graph :: quadmesh.to_pt_graph() """ graph = Graph() for index in range(len(self.pts)): for face in self.faces: for px in face: if index in face and index!=px: graph.add_edge(index, px) return graph def to_face_graph(self, val=1): """ Returns a Graph representation of the mesh faces by index. :param val: number of coincident points for neighborness. :type val: int :returns: A Graph of face indexes. :rtype: Graph :: quadmesh.to_face_graph(2) """ from decodes.extensions.graph import Graph graph = Graph() graph.naked_nodes = [] for f1 in range(len(self.faces)): for f2 in range(len(self.faces)): if f1 != f2: count = 0 for index in self.faces[f2]: if index in self.faces[f1]: count+=1 if count >= val: graph.add_edge(f1,f2) if len(graph.edges[f1]) < len(self.faces[f1]): if f1 not in graph.naked_nodes: graph.naked_nodes.append(f1) return graph
gpl-3.0
8,685,016,591,844,526,000
30.276042
164
0.460693
false
4.067751
false
false
false
kmiller96/Shipping-Containers-Software
lib/core.py
1
8600
# AUTHOR: Kale Miller # DESCRIPTION: The 'main brain' of the program is held in here. # 50726f6772616d6d696e6720697320627265616b696e67206f66206f6e652062696720696d706f737369626c65207461736b20696e746f20736576 # 6572616c207665727920736d616c6c20706f737369626c65207461736b732e # DEVELOPMENT LOG: # 07/12/16: Initialized file. Moved IDGenerator class into the script. Added holding bay class. # 12/12/16: Tweaked the IDGenerator class to help remove dependancy. # 13/12/16: Fleshed out the NewHoldingBay class. # 15/12/16: Added methods to add auxilary labels. Added method to generate information label. Small bug fixes. # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~IMPORTS/GLOBALS~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ import os, time import numpy as np from lib import containers CONTAINER_CLASSES = [ containers.BasicContainer, containers.HeavyContainer, containers.RefrigeratedContainer, containers.LiquidContainer, containers.ExplosivesContainer, containers.ToxicContainer, containers.ChemicalContainer ] CONTAINER_TYPES = ['basic', 'heavy', 'refrigerated', 'liquid', 'explosive', 'toxic', 'chemical'] SERIAL_CODES = ['B', 'H', 'R', 'L', 'E', 'T', 'C'] TAG_APPLICATION_TIME = 0.2 PRINTALL_TIME = 1 # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~.:.~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~MAIN~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def processshipfile(filename, path): """Processes the csv file that the ship supplies.""" def _deletenewline(string): """Deletes the \n symbol from a string if it exists.""" try: truncatedstring = string[:string.index('\n')] except ValueError: truncatedstring = string finally: return truncatedstring try: home = os.getcwd() os.chdir(path) except WindowsError: # Would this hold true on all machines? raise NameError, "The path specified does not exist." rawfile = open(filename, 'r') arylines = rawfile.readlines() basematrix = map(lambda x: _deletenewline(x).split(','), arylines) numpyarray = np.array(basematrix) return numpyarray class IDGenerator: """Controls the assignment of id tags on the containers.""" # TODO: Change the __init__ such that it works by reading a collection of tuples instead of two lists. def __init__(self): """Initialise the id generator.""" self._COUNTERS = [0] * len(CONTAINER_TYPES) return def _findindex(self, container): """Determines the index in the lists the class should use.""" return CONTAINER_TYPES.index(container) def _serialcode(self, index): """Fetches the serial code for a supplied index.""" return SERIAL_CODES[index] def _counter(self, index): """Fetches the counter for a specific serial type and increments it by one.""" self._COUNTERS[index] += 1 return self._COUNTERS[index] def newid(self, containertype): """Generates a new id.""" ii = self._findindex(containertype) idtag = self._serialcode(ii) + str(self._counter(ii)).zfill(5) return idtag class NewHoldingBay: """Creates a new holding bay for the containers. Thus it contains all of the information about the containers along with the methods controlling unloading and loading them.""" def __init__(self): self._path = os.getcwd() self.idgenerator = IDGenerator() self.containerlist = list() self._iOnship = 0 self._iLoaded = 0 self._iHolding = 0 return None def _createcontainer(self, containerstr, parameters): """Creates a new container class based off the first column of the CSV.""" # TODO: Fix this method up to catch more and print useful error messages. if not isinstance(containerstr, str): raise TypeError, "The parameter passed must be a string." elif len(containerstr) == 1: try: ii = SERIAL_CODES.index(containerstr) except ValueError: raise Exception("Bad input.") # TODO: Fix this area up. elif len(containerstr) != 1: try: ii = CONTAINER_TYPES.index(containerstr) except ValueError: raise Exception("Bad input.") idtag = self.idgenerator.newid(CONTAINER_TYPES[ii]) return CONTAINER_CLASSES[ii](idtag, *parameters) def defineship(self, file): """Pass in the CSV file of the ship in order to unload it.""" shipdata = processshipfile(file, self._path) shipdata = shipdata[1::] # Throw out the headers. for line in shipdata: newcontainer = self._createcontainer(line[0], (line[1], line[3])) self.containerlist.append(newcontainer) self._iOnship += 1 def printcontainer(self, serial): """Prints the information about a specific container.""" for container in self.containerlist: if container.id() == serial: container.information() return None else: continue raise NameError, "Unable to find container with serial code %s" % serial return -1 def printallinformation(self): """Prints the information of all the containers.""" for container in self.containerlist: container.information() time.sleep(PRINTALL_TIME) return None def unloadall(self, debug=False): """Unloads all of the containers from the ship.""" for container in self.containerlist: container.unload(debug=debug) self._iHolding += 1 self._iOnship -= 1 return None def loadall(self, debug=False): """Loads all of the containers into trucks and trains.""" # TODO: Proper loading locations. ii = 1 for container in self.containerlist: container.load('Truck ' + str(ii).zfill(3), debug=debug) self._iHolding -= 1 self._iLoaded += 1 ii += 1 return None def printauditedload(self): """Prints information about the holding bay at this time.""" iOnship = 0; iLoaded = 0; iHolding = 0 iContainercount = [0] * len(CONTAINER_TYPES) for container in self.containerlist: try: ii = CONTAINER_TYPES.index(container._type) iContainercount[ii] += 1 except ValueError: raise NameError, "One (or more) containers don't have a valid type." # Print the appropriate information. print "----------------------------------------------------------------------" print "TOTAL CONTAINERS: %i" % len(self.containerlist); time.sleep(0.3) print "CONTAINERS CURRENTLY STILL ON SHIP: %i" % self._iOnship; time.sleep(0.3) print "CONTAINERS LOADED ON TRUCKS AND TRAINS: %i" % self._iLoaded; time.sleep(0.3) print "CONTAINERS BEING HELD IN THE HOLDING BAY: %i" % self._iHolding; time.sleep(0.3) print "" print "THE NUMBER OF CONTAINERS FOR EACH TYPE:"; time.sleep(0.3) for ii in xrange(len(CONTAINER_TYPES)): if iContainercount[ii] == 0: continue print "\t%s: %i" % (CONTAINER_TYPES[ii], iContainercount[ii]); time.sleep(0.3) print "----------------------------------------------------------------------" return None def addidtags(self, debug=False): """Applys appropriate serial numbers to all of the containers.""" for container in self.containerlist: print "Applying id tag to container %s" % container.id() if not debug: time.sleep(TAG_APPLICATION_TIME) container.addidtag() return None def applyauxilarylabels(self): """Applys the labels that should go on containers about their contents and handling.""" for container in self.containerlist: print "Adding labels to container %s" % container.id() container.addauxilarylabels() return None # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~.:.~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
mit
3,282,703,609,942,576,000
39.148325
120
0.570698
false
4.232283
false
false
false
amerlyq/airy
vim/res/ycm_extra_conf.py
1
5213
# SEE: CACHE/bundle/YouCompleteMe/cpp/ycm/.ycm_extra_conf.py import os import ycm_core # These are the compilation flags that will be used in case there's no # compilation database set (by default, one is not set). # CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR. flags = [ '-Wall', '-Wextra', '-Werror', '-Wc++98-compat', '-Wno-long-long', '-Wno-variadic-macros', '-fexceptions', '-DNDEBUG', # You 100% do NOT need -DUSE_CLANG_COMPLETER in your flags; only the YCM # source code needs it. #'-DUSE_CLANG_COMPLETER', # THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which # language to use when compiling headers. So it will guess. Badly. So C++ # headers will be compiled as C headers. You don't want that so ALWAYS specify # a "-std=<something>". # For a C project, you would set this to something like 'c99' instead of # 'c++11'. '-std=c++11', # ...and the same thing goes for the magic -x option which specifies the # language that the files to be compiled are written in. This is mostly # relevant for c++ headers. # For a C project, you would set this to 'c' instead of 'c++'. '-x', 'c++', '-isystem', '../BoostParts', # This path will only work on OS X, but extra paths that don't exist are not harmful '-isystem', '/System/Library/Frameworks/Python.framework/Headers', '-isystem', '../llvm/include', '-isystem', '../llvm/tools/clang/include', '-I', '.', '-I', './ClangCompleter', '-isystem', './tests/gmock/gtest', '-isystem', './tests/gmock/gtest/include', '-isystem', './tests/gmock', '-isystem', './tests/gmock/include', '-isystem', '/usr/include', '-isystem', '/usr/local/include', '-isystem', '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/../lib/c++/v1', '-isystem', '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include', ] # Set this to the absolute path to the folder (NOT the file!) containing the # compile_commands.json file to use that instead of 'flags'. See here for # more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html # # Most projects will NOT need to set this to anything; you can just change the # 'flags' list of compilation flags. Notice that YCM itself uses that approach. compilation_database_folder = os.path.abspath( '~/aura/pdrm/gerrit/build' ) if os.path.exists( compilation_database_folder ): database = ycm_core.CompilationDatabase( compilation_database_folder ) else: database = None SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ] def DirectoryOfThisScript(): return os.path.dirname( os.path.abspath( __file__ ) ) def MakeRelativePathsInFlagsAbsolute( flags, working_directory ): if not working_directory: return list( flags ) new_flags = [] make_next_absolute = False path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ] for flag in flags: new_flag = flag if make_next_absolute: make_next_absolute = False if not flag.startswith( '/' ): new_flag = os.path.join( working_directory, flag ) for path_flag in path_flags: if flag == path_flag: make_next_absolute = True break if flag.startswith( path_flag ): path = flag[ len( path_flag ): ] new_flag = path_flag + os.path.join( working_directory, path ) break if new_flag: new_flags.append( new_flag ) return new_flags def IsHeaderFile( filename ): extension = os.path.splitext( filename )[ 1 ] return extension in [ '.h', '.hxx', '.hpp', '.hh' ] def GetCompilationInfoForFile( filename ): # The compilation_commands.json file generated by CMake does not have entries # for header files. So we do our best by asking the db for flags for a # corresponding source file, if any. If one exists, the flags for that file # should be good enough. if IsHeaderFile( filename ): basename = os.path.splitext( filename )[ 0 ] for extension in SOURCE_EXTENSIONS: replacement_file = basename + extension if os.path.exists( replacement_file ): compilation_info = database.GetCompilationInfoForFile( replacement_file ) if compilation_info.compiler_flags_: return compilation_info return None return database.GetCompilationInfoForFile( filename ) def FlagsForFile( filename, **kwargs ): if database: # Bear in mind that compilation_info.compiler_flags_ does NOT return a # python list, but a "list-like" StringVec object compilation_info = GetCompilationInfoForFile( filename ) if not compilation_info: return None final_flags = MakeRelativePathsInFlagsAbsolute( compilation_info.compiler_flags_, compilation_info.compiler_working_dir_ ) # NOTE: This is just for YouCompleteMe; it's highly likely that your project # does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR # ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT. #try: # final_flags.remove( '-stdlib=libc++' ) #except ValueError: # pass else: relative_to = DirectoryOfThisScript() final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to ) return { 'flags': final_flags, 'do_cache': True }
mit
1,835,712,547,090,115,600
36.235714
115
0.689047
false
3.479973
true
false
false
sangwonl/stage34
webapp/api/handlers/stage.py
1
6612
from django.views import View from django.conf import settings from datetime import datetime from api.helpers.mixins import AuthRequiredMixin from api.helpers.http.jsend import JSENDSuccess, JSENDError from api.models.resources import Membership, Stage from libs.utils.model_ext import model_to_dict from worker.tasks.deployment import ( task_provision_stage, task_change_stage_status, task_delete_stage, task_refresh_stage ) import pytz import os import json import jwt SERIALIZE_FIELDS = [ 'id', 'title', 'endpoint', 'status', 'repo', 'default_branch', 'branch', 'created_at' ] class StageRootHandler(AuthRequiredMixin, View): def get(self, request, *args, **kwargs): org = Membership.get_org_of_user(request.user) if not org: return JSENDError(status_code=400, msg='org not found') stages_qs = Stage.objects.filter(org=org) stages = [model_to_dict(s, fields=SERIALIZE_FIELDS) for s in stages_qs] return JSENDSuccess(status_code=200, data=stages) def post(self, request, *args, **kwargs): json_body = json.loads(request.body) title = json_body.get('title') repo = json_body.get('repo') branch= json_body.get('branch') default_branch= json_body.get('default_branch') run_on_create = json_body.get('run_on_create', False) if not (title and repo and default_branch and branch): return JSENDError(status_code=400, msg='invalid stage info') org = Membership.get_org_of_user(request.user) if not org: return JSENDError(status_code=400, msg='org not found') stage = Stage.objects.create( org=org, title=title, repo=repo, default_branch=default_branch, branch=branch ) github_access_key = request.user.jwt_payload.get('access_token') task_provision_stage.apply_async(args=[github_access_key, stage.id, repo, branch, run_on_create]) stage_dict = model_to_dict(stage, fields=SERIALIZE_FIELDS) return JSENDSuccess(status_code=200, data=stage_dict) class StageDetailHandler(AuthRequiredMixin, View): def get_stage(self, org, stage_id): try: stage = Stage.objects.get(org=org, id=stage_id) except Stage.DoesNotExist: return None return stage def get(self, request, stage_id, *args, **kwargs): org = Membership.get_org_of_user(request.user) if not org: return JSENDError(status_code=400, msg='org not found') stage = self.get_stage(org, stage_id) if not stage: return JSENDError(status_code=404, msg='stage not found') stage_dict = model_to_dict(stage, fields=SERIALIZE_FIELDS) return JSENDSuccess(status_code=200, data=stage_dict) def put(self, request, stage_id, *args, **kwargs): json_body = json.loads(request.body) new_status = json_body.get('status') if not new_status or new_status not in ('running', 'paused'): return JSENDError(status_code=400, msg='invalid stage status') org = Membership.get_org_of_user(request.user) if not org: return JSENDError(status_code=400, msg='org not found') stage = self.get_stage(org, stage_id) if not stage: return JSENDError(status_code=404, msg='stage not found') cur_status = stage.status if cur_status != new_status: github_access_key = request.user.jwt_payload.get('access_token') task_change_stage_status.apply_async(args=[github_access_key, stage_id, new_status]) new_status = 'changing' stage.title = json_body.get('title', stage.title) stage.repo = json_body.get('repo', stage.repo) stage.default_branch = json_body.get('default_branch', stage.default_branch) stage.branch = json_body.get('branch', stage.branch) stage.status = new_status stage.save() stage_dict = model_to_dict(stage, fields=SERIALIZE_FIELDS) return JSENDSuccess(status_code=204) def delete(self, request, stage_id, *args, **kwargs): org = Membership.get_org_of_user(request.user) if not org: return JSENDError(status_code=400, msg='org not found') stage = self.get_stage(org, stage_id) if not stage: return JSENDError(status_code=404, msg='stage not found') stage.status = 'deleting' stage.save() github_access_key = request.user.jwt_payload.get('access_token') task_delete_stage.apply_async(args=[github_access_key, stage_id]) return JSENDSuccess(status_code=204) class StageLogHandler(AuthRequiredMixin, View): def get_log_path(self, stage_id): return os.path.join(settings.STAGE_REPO_HOME, stage_id, 'output.log') def get(self, request, stage_id, *args, **kwargs): org = Membership.get_org_of_user(request.user) if not org: return JSENDError(status_code=400, msg='org not found') log_path = self.get_log_path(stage_id) if not os.path.exists(log_path): return JSENDError(status_code=404, msg='log file not found') log_msgs = [] with open(log_path, 'rt') as f: log_msg = f.read() log_msgs = [l for l in log_msg.split('\n') if l] ts = os.path.getmtime(log_path) tz = pytz.timezone(settings.TIME_ZONE) dt = datetime.fromtimestamp(ts, tz=tz) log_data = {'log_messages': log_msgs, 'log_time': dt.isoformat()} return JSENDSuccess(status_code=200, data=log_data) class StageRefreshHandler(AuthRequiredMixin, View): def get_stage(self, org, stage_id): try: stage = Stage.objects.get(org=org, id=stage_id) except Stage.DoesNotExist: return None return stage def post(self, request, stage_id, *args, **kwargs): org = Membership.get_org_of_user(request.user) if not org: return JSENDError(status_code=400, msg='org not found') stage = self.get_stage(org, stage_id) if not stage: return JSENDError(status_code=404, msg='stage not found') github_access_key = request.user.jwt_payload.get('access_token') task_refresh_stage.apply_async(args=[github_access_key, stage_id]) stage.status = 'changing' stage.save() stage_dict = model_to_dict(stage, fields=SERIALIZE_FIELDS) return JSENDSuccess(status_code=204)
mit
3,603,858,691,656,666,600
33.082474
105
0.628403
false
3.552929
false
false
false
globaltoken/globaltoken
test/functional/test_framework/authproxy.py
1
7759
# Copyright (c) 2011 Jeff Garzik # # Previous copyright, from python-jsonrpc/jsonrpc/proxy.py: # # Copyright (c) 2007 Jan-Klaas Kollhof # # This file is part of jsonrpc. # # jsonrpc is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 2.1 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this software; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """HTTP proxy for opening RPC connection to globaltokend. AuthServiceProxy has the following improvements over python-jsonrpc's ServiceProxy class: - HTTP connections persist for the life of the AuthServiceProxy object (if server supports HTTP/1.1) - sends protocol 'version', per JSON-RPC 1.1 - sends proper, incrementing 'id' - sends Basic HTTP authentication headers - parses all JSON numbers that look like floats as Decimal - uses standard Python json lib """ import base64 import decimal import http.client import json import logging import socket import time import urllib.parse HTTP_TIMEOUT = 30 USER_AGENT = "AuthServiceProxy/0.1" log = logging.getLogger("BitcoinRPC") class JSONRPCException(Exception): def __init__(self, rpc_error): try: errmsg = '%(message)s (%(code)i)' % rpc_error except (KeyError, TypeError): errmsg = '' super().__init__(errmsg) self.error = rpc_error def EncodeDecimal(o): if isinstance(o, decimal.Decimal): return str(o) raise TypeError(repr(o) + " is not JSON serializable") class AuthServiceProxy(): __id_count = 0 # ensure_ascii: escape unicode as \uXXXX, passed to json.dumps def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None, ensure_ascii=True): self.__service_url = service_url self._service_name = service_name self.ensure_ascii = ensure_ascii # can be toggled on the fly by tests self.__url = urllib.parse.urlparse(service_url) port = 80 if self.__url.port is None else self.__url.port user = None if self.__url.username is None else self.__url.username.encode('utf8') passwd = None if self.__url.password is None else self.__url.password.encode('utf8') authpair = user + b':' + passwd self.__auth_header = b'Basic ' + base64.b64encode(authpair) if connection: # Callables re-use the connection of the original proxy self.__conn = connection elif self.__url.scheme == 'https': self.__conn = http.client.HTTPSConnection(self.__url.hostname, port, timeout=timeout) else: self.__conn = http.client.HTTPConnection(self.__url.hostname, port, timeout=timeout) def __getattr__(self, name): if name.startswith('__') and name.endswith('__'): # Python internal stuff raise AttributeError if self._service_name is not None: name = "%s.%s" % (self._service_name, name) return AuthServiceProxy(self.__service_url, name, connection=self.__conn) def _request(self, method, path, postdata): ''' Do a HTTP request, with retry if we get disconnected (e.g. due to a timeout). This is a workaround for https://bugs.python.org/issue3566 which is fixed in Python 3.5. ''' headers = {'Host': self.__url.hostname, 'User-Agent': USER_AGENT, 'Authorization': self.__auth_header, 'Content-type': 'application/json'} try: self.__conn.request(method, path, postdata, headers) return self._get_response() except http.client.BadStatusLine as e: if e.line == "''": # if connection was closed, try again self.__conn.close() self.__conn.request(method, path, postdata, headers) return self._get_response() else: raise except (BrokenPipeError, ConnectionResetError): # Python 3.5+ raises BrokenPipeError instead of BadStatusLine when the connection was reset # ConnectionResetError happens on FreeBSD with Python 3.4 self.__conn.close() self.__conn.request(method, path, postdata, headers) return self._get_response() def get_request(self, *args, **argsn): AuthServiceProxy.__id_count += 1 log.debug("-%s-> %s %s" % (AuthServiceProxy.__id_count, self._service_name, json.dumps(args, default=EncodeDecimal, ensure_ascii=self.ensure_ascii))) if args and argsn: raise ValueError('Cannot handle both named and positional arguments') return {'version': '1.1', 'method': self._service_name, 'params': args or argsn, 'id': AuthServiceProxy.__id_count} def __call__(self, *args, **argsn): postdata = json.dumps(self.get_request(*args, **argsn), default=EncodeDecimal, ensure_ascii=self.ensure_ascii) response = self._request('POST', self.__url.path, postdata.encode('utf-8')) if response['error'] is not None: raise JSONRPCException(response['error']) elif 'result' not in response: raise JSONRPCException({ 'code': -343, 'message': 'missing JSON-RPC result'}) else: return response['result'] def batch(self, rpc_call_list): postdata = json.dumps(list(rpc_call_list), default=EncodeDecimal, ensure_ascii=self.ensure_ascii) log.debug("--> " + postdata) return self._request('POST', self.__url.path, postdata.encode('utf-8')) def _get_response(self): req_start_time = time.time() try: http_response = self.__conn.getresponse() except socket.timeout as e: raise JSONRPCException({ 'code': -344, 'message': '%r RPC took longer than %f seconds. Consider ' 'using larger timeout for calls that take ' 'longer to return.' % (self._service_name, self.__conn.timeout)}) if http_response is None: raise JSONRPCException({ 'code': -342, 'message': 'missing HTTP response from server'}) content_type = http_response.getheader('Content-Type') if content_type != 'application/json': raise JSONRPCException({ 'code': -342, 'message': 'non-JSON HTTP response with \'%i %s\' from server' % (http_response.status, http_response.reason)}) responsedata = http_response.read().decode('utf8') response = json.loads(responsedata, parse_float=decimal.Decimal) elapsed = time.time() - req_start_time if "error" in response and response["error"] is None: log.debug("<-%s- [%.6f] %s" % (response["id"], elapsed, json.dumps(response["result"], default=EncodeDecimal, ensure_ascii=self.ensure_ascii))) else: log.debug("<-- [%.6f] %s" % (elapsed, responsedata)) return response def __truediv__(self, relative_uri): return AuthServiceProxy("{}/{}".format(self.__service_url, relative_uri), self._service_name, connection=self.__conn)
mit
-3,351,838,090,374,952,400
42.105556
155
0.621601
false
4.087987
false
false
false
jtomasek/tuskar-ui-1
tuskar_ui/infrastructure/resource_management/resource_classes/workflows.py
1
12384
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.core.urlresolvers import reverse from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon import forms from horizon import workflows from tuskar_ui import api as tuskar import tuskar_ui.workflows import re from tuskar_ui.infrastructure. \ resource_management.resource_classes.tables import FlavorTemplatesTable from tuskar_ui.infrastructure. \ resource_management.resource_classes.tables import RacksTable class ResourceClassInfoAndFlavorsAction(workflows.Action): name = forms.CharField(max_length=255, label=_("Class Name"), help_text="", required=True) service_type = forms.ChoiceField(label=_('Class Type'), required=True, choices=[('', ''), ('compute', ('Compute')), ('not_compute', ('Non Compute')), ], widget=forms.Select( attrs={'class': 'switchable'}) ) image = forms.ChoiceField(label=_('Provisioning Image'), required=True, choices=[('compute-img', ('overcloud-compute'))], widget=forms.Select( attrs={'class': 'switchable'}) ) def clean(self): cleaned_data = super(ResourceClassInfoAndFlavorsAction, self).clean() name = cleaned_data.get('name') resource_class_id = self.initial.get('resource_class_id', None) try: resource_classes = tuskar.ResourceClass.list(self.request) except Exception: resource_classes = [] msg = _('Unable to get resource class list') exceptions.check_message(["Connection", "refused"], msg) raise for resource_class in resource_classes: if resource_class.name == name and \ resource_class_id != resource_class.id: raise forms.ValidationError( _('The name "%s" is already used by' ' another resource class.') % name ) return cleaned_data class Meta: name = _("Class Settings") help_text = _("From here you can fill the class " "settings and add flavors to class.") class CreateResourceClassInfoAndFlavors(tuskar_ui.workflows.TableStep): table_classes = (FlavorTemplatesTable,) action_class = ResourceClassInfoAndFlavorsAction template_name = 'infrastructure/resource_management/resource_classes/'\ '_resource_class_info_and_flavors_step.html' contributes = ("name", "service_type", "flavors_object_ids", 'max_vms') def contribute(self, data, context): request = self.workflow.request if data: context["flavors_object_ids"] =\ request.POST.getlist("flavors_object_ids") # todo: lsmola django can't parse dictionaruy from POST # this should be rewritten to django formset context["max_vms"] = {} for index, value in request.POST.items(): match = re.match( '^(flavors_object_ids__max_vms__(.*?))$', index) if match: context["max_vms"][match.groups()[1]] = value context.update(data) return context def get_flavors_data(self): try: resource_class_id = self.workflow.context.get("resource_class_id") if resource_class_id: resource_class = tuskar.ResourceClass.get( self.workflow.request, resource_class_id) # TODO(lsmola ugly interface, rewrite) self._tables['flavors'].active_multi_select_values = \ resource_class.flavortemplates_ids all_flavors = resource_class.all_flavors else: all_flavors = tuskar.FlavorTemplate.list( self.workflow.request) except Exception: all_flavors = [] exceptions.handle(self.workflow.request, _('Unable to retrieve resource flavors list.')) return all_flavors class RacksAction(workflows.Action): class Meta: name = _("Racks") class CreateRacks(tuskar_ui.workflows.TableStep): table_classes = (RacksTable,) action_class = RacksAction contributes = ("racks_object_ids") template_name = 'infrastructure/resource_management/'\ 'resource_classes/_racks_step.html' def contribute(self, data, context): request = self.workflow.request context["racks_object_ids"] =\ request.POST.getlist("racks_object_ids") context.update(data) return context def get_racks_data(self): try: resource_class_id = self.workflow.context.get("resource_class_id") if resource_class_id: resource_class = tuskar.ResourceClass.get( self.workflow.request, resource_class_id) # TODO(lsmola ugly interface, rewrite) self._tables['racks'].active_multi_select_values = \ resource_class.racks_ids racks = \ resource_class.all_racks else: racks = \ tuskar.Rack.list(self.workflow.request, True) except Exception: racks = [] exceptions.handle(self.workflow.request, _('Unable to retrieve racks list.')) return racks class ResourceClassWorkflowMixin: # FIXME active tabs coflict # When on page with tabs, the workflow with more steps is used, # there is a conflict of active tabs and it always shows the # first tab after an action. So I explicitly specify to what # tab it should redirect after action, until the coflict will # be fixed in Horizon. def get_index_url(self): """This url is used both as success and failure url""" return "%s?tab=resource_management_tabs__resource_classes_tab" %\ reverse("horizon:infrastructure:resource_management:index") def get_success_url(self): return self.get_index_url() def get_failure_url(self): return self.get_index_url() def format_status_message(self, message): name = self.context.get('name') return message % name def _get_flavors(self, request, data): flavors = [] flavor_ids = data.get('flavors_object_ids') or [] max_vms = data.get('max_vms') resource_class_name = data['name'] for template_id in flavor_ids: template = tuskar.FlavorTemplate.get(request, template_id) capacities = [] for c in template.capacities: capacities.append({'name': c.name, 'value': str(c.value), 'unit': c.unit}) # FIXME: tuskar uses resource-class-name prefix for flavors, # e.g. m1.large, we add rc name to the template name: flavor_name = "%s.%s" % (resource_class_name, template.name) flavors.append({'name': flavor_name, 'max_vms': max_vms.get(template.id, None), 'capacities': capacities}) return flavors def _add_racks(self, request, data, resource_class): ids_to_add = data.get('racks_object_ids') or [] resource_class.set_racks(request, ids_to_add) class CreateResourceClass(ResourceClassWorkflowMixin, workflows.Workflow): default_steps = (CreateResourceClassInfoAndFlavors, CreateRacks) slug = "create_resource_class" name = _("Create Class") finalize_button_name = _("Create Class") success_message = _('Created class "%s".') failure_message = _('Unable to create class "%s".') def _create_resource_class_info(self, request, data): try: flavors = self._get_flavors(request, data) return tuskar.ResourceClass.create( request, name=data['name'], service_type=data['service_type'], flavors=flavors) except Exception: redirect = self.get_failure_url() exceptions.handle(request, _('Unable to create resource class.'), redirect=redirect) return None def handle(self, request, data): resource_class = self._create_resource_class_info(request, data) self._add_racks(request, data, resource_class) return True class UpdateResourceClassInfoAndFlavors(CreateResourceClassInfoAndFlavors): depends_on = ("resource_class_id",) class UpdateRacks(CreateRacks): depends_on = ("resource_class_id",) class UpdateResourceClass(ResourceClassWorkflowMixin, workflows.Workflow): default_steps = (UpdateResourceClassInfoAndFlavors, UpdateRacks) slug = "update_resource_class" name = _("Update Class") finalize_button_name = _("Update Class") success_message = _('Updated class "%s".') failure_message = _('Unable to update class "%s".') def _update_resource_class_info(self, request, data): try: flavors = self._get_flavors(request, data) return tuskar.ResourceClass.update( request, data['resource_class_id'], name=data['name'], service_type=data['service_type'], flavors=flavors) except Exception: redirect = self.get_failure_url() exceptions.handle(request, _('Unable to create resource class.'), redirect=redirect) return None def handle(self, request, data): resource_class = self._update_resource_class_info(request, data) self._add_racks(request, data, resource_class) return True class DetailUpdateWorkflow(UpdateResourceClass): def get_index_url(self): """This url is used both as success and failure url""" url = "horizon:infrastructure:resource_management:resource_classes:"\ "detail" return "%s?tab=resource_class_details__overview" % ( reverse(url, args=(self.context["resource_class_id"]))) class UpdateRacksWorkflow(UpdateResourceClass): def get_index_url(self): """This url is used both as success and failure url""" url = "horizon:infrastructure:resource_management:resource_classes:"\ "detail" return "%s?tab=resource_class_details__racks" % ( reverse(url, args=(self.context["resource_class_id"]))) class UpdateFlavorsWorkflow(UpdateResourceClass): def get_index_url(self): """This url is used both as success and failure url""" url = "horizon:infrastructure:resource_management:resource_classes:"\ "detail" return "%s?tab=resource_class_details__flavors" % ( reverse(url, args=(self.context["resource_class_id"])))
apache-2.0
-6,819,238,934,805,473,000
37.222222
79
0.566053
false
4.488583
false
false
false
forman/dectree
examples/intertidal_flat_classif/intertidal_flat_classif.py
1
12362
from numba import jit, jitclass, float64 import numpy as np @jit(nopython=True) def _B1_LT_085(x): # B1.LT_085: lt(0.85) if 0.0 == 0.0: return 1.0 if x < 0.85 else 0.0 x1 = 0.85 - 0.0 x2 = 0.85 + 0.0 if x <= x1: return 1.0 if x <= x2: return 1.0 - (x - x1) / (x2 - x1) return 0.0 @jit(nopython=True) def _B1_GT_1(x): # B1.GT_1: gt(1.0) if 0.0 == 0.0: return 1.0 if x > 1.0 else 0.0 x1 = 1.0 - 0.0 x2 = 1.0 + 0.0 if x <= x1: return 0.0 if x <= x2: return (x - x1) / (x2 - x1) return 1.0 @jit(nopython=True) def _B2_GT_0(x): # B2.GT_0: gt(0.0) if 0.0 == 0.0: return 1.0 if x > 0.0 else 0.0 x1 = 0.0 - 0.0 x2 = 0.0 + 0.0 if x <= x1: return 0.0 if x <= x2: return (x - x1) / (x2 - x1) return 1.0 @jit(nopython=True) def _B3_LT_005(x): # B3.LT_005: lt(0.05) if 0.0 == 0.0: return 1.0 if x < 0.05 else 0.0 x1 = 0.05 - 0.0 x2 = 0.05 + 0.0 if x <= x1: return 1.0 if x <= x2: return 1.0 - (x - x1) / (x2 - x1) return 0.0 @jit(nopython=True) def _B3_LT_01(x): # B3.LT_01: lt(0.1) if 0.0 == 0.0: return 1.0 if x < 0.1 else 0.0 x1 = 0.1 - 0.0 x2 = 0.1 + 0.0 if x <= x1: return 1.0 if x <= x2: return 1.0 - (x - x1) / (x2 - x1) return 0.0 @jit(nopython=True) def _B3_LT_015(x): # B3.LT_015: lt(0.15) if 0.0 == 0.0: return 1.0 if x < 0.15 else 0.0 x1 = 0.15 - 0.0 x2 = 0.15 + 0.0 if x <= x1: return 1.0 if x <= x2: return 1.0 - (x - x1) / (x2 - x1) return 0.0 @jit(nopython=True) def _B3_LT_02(x): # B3.LT_02: lt(0.2) if 0.0 == 0.0: return 1.0 if x < 0.2 else 0.0 x1 = 0.2 - 0.0 x2 = 0.2 + 0.0 if x <= x1: return 1.0 if x <= x2: return 1.0 - (x - x1) / (x2 - x1) return 0.0 @jit(nopython=True) def _B4_NODATA(x): # B4.NODATA: eq(0.0) if 0.0 == 0.0: return 1.0 if x == 0.0 else 0.0 x1 = 0.0 - 0.0 x2 = 0.0 x3 = 0.0 + 0.0 if x <= x1: return 0.0 if x <= x2: return (x - x1) / (x2 - x1) if x <= x3: return 1.0 - (x - x2) / (x3 - x2) return 0.0 @jit(nopython=True) def _B5_LT_01(x): # B5.LT_01: lt(0.1) if 0.0 == 0.0: return 1.0 if x < 0.1 else 0.0 x1 = 0.1 - 0.0 x2 = 0.1 + 0.0 if x <= x1: return 1.0 if x <= x2: return 1.0 - (x - x1) / (x2 - x1) return 0.0 @jit(nopython=True) def _B7_LT_05(x): # B7.LT_05: lt(0.5) if 0.0 == 0.0: return 1.0 if x < 0.5 else 0.0 x1 = 0.5 - 0.0 x2 = 0.5 + 0.0 if x <= x1: return 1.0 if x <= x2: return 1.0 - (x - x1) / (x2 - x1) return 0.0 @jit(nopython=True) def _B8_GT_0(x): # B8.GT_0: gt(0.0) if 0.0 == 0.0: return 1.0 if x > 0.0 else 0.0 x1 = 0.0 - 0.0 x2 = 0.0 + 0.0 if x <= x1: return 0.0 if x <= x2: return (x - x1) / (x2 - x1) return 1.0 @jit(nopython=True) def _B8_LT_009(x): # B8.LT_009: lt(0.09) if 0.0 == 0.0: return 1.0 if x < 0.09 else 0.0 x1 = 0.09 - 0.0 x2 = 0.09 + 0.0 if x <= x1: return 1.0 if x <= x2: return 1.0 - (x - x1) / (x2 - x1) return 0.0 @jit(nopython=True) def _B8_GT_033(x): # B8.GT_033: gt(0.33) if 0.0 == 0.0: return 1.0 if x > 0.33 else 0.0 x1 = 0.33 - 0.0 x2 = 0.33 + 0.0 if x <= x1: return 0.0 if x <= x2: return (x - x1) / (x2 - x1) return 1.0 @jit(nopython=True) def _B8_GT_035(x): # B8.GT_035: gt(0.35) if 0.0 == 0.0: return 1.0 if x > 0.35 else 0.0 x1 = 0.35 - 0.0 x2 = 0.35 + 0.0 if x <= x1: return 0.0 if x <= x2: return (x - x1) / (x2 - x1) return 1.0 @jit(nopython=True) def _B8_GT_04(x): # B8.GT_04: gt(0.4) if 0.0 == 0.0: return 1.0 if x > 0.4 else 0.0 x1 = 0.4 - 0.0 x2 = 0.4 + 0.0 if x <= x1: return 0.0 if x <= x2: return (x - x1) / (x2 - x1) return 1.0 @jit(nopython=True) def _B8_GT_045(x): # B8.GT_045: gt(0.45) if 0.0 == 0.0: return 1.0 if x > 0.45 else 0.0 x1 = 0.45 - 0.0 x2 = 0.45 + 0.0 if x <= x1: return 0.0 if x <= x2: return (x - x1) / (x2 - x1) return 1.0 @jit(nopython=True) def _B8_LT_085(x): # B8.LT_085: lt(0.85) if 0.0 == 0.0: return 1.0 if x < 0.85 else 0.0 x1 = 0.85 - 0.0 x2 = 0.85 + 0.0 if x <= x1: return 1.0 if x <= x2: return 1.0 - (x - x1) / (x2 - x1) return 0.0 @jit(nopython=True) def _B16_GT_0(x): # B16.GT_0: gt(0.0) if 0.0 == 0.0: return 1.0 if x > 0.0 else 0.0 x1 = 0.0 - 0.0 x2 = 0.0 + 0.0 if x <= x1: return 0.0 if x <= x2: return (x - x1) / (x2 - x1) return 1.0 @jit(nopython=True) def _B19_GT_015(x): # B19.GT_015: gt(0.15) if 0.0 == 0.0: return 1.0 if x > 0.15 else 0.0 x1 = 0.15 - 0.0 x2 = 0.15 + 0.0 if x <= x1: return 0.0 if x <= x2: return (x - x1) / (x2 - x1) return 1.0 @jit(nopython=True) def _BSum_GT_011(x): # BSum.GT_011: gt(0.11) if 0.0 == 0.0: return 1.0 if x > 0.11 else 0.0 x1 = 0.11 - 0.0 x2 = 0.11 + 0.0 if x <= x1: return 0.0 if x <= x2: return (x - x1) / (x2 - x1) return 1.0 @jit(nopython=True) def _BSum_GT_013(x): # BSum.GT_013: gt(0.13) if 0.0 == 0.0: return 1.0 if x > 0.13 else 0.0 x1 = 0.13 - 0.0 x2 = 0.13 + 0.0 if x <= x1: return 0.0 if x <= x2: return (x - x1) / (x2 - x1) return 1.0 @jit(nopython=True) def _BSum_GT_016(x): # BSum.GT_016: gt(0.16) if 0.0 == 0.0: return 1.0 if x > 0.16 else 0.0 x1 = 0.16 - 0.0 x2 = 0.16 + 0.0 if x <= x1: return 0.0 if x <= x2: return (x - x1) / (x2 - x1) return 1.0 @jit(nopython=True) def _Class_FALSE(x): # Class.FALSE: false() return 0.0 @jit(nopython=True) def _Class_TRUE(x): # Class.TRUE: true() return 1.0 _InputsSpec = [ ("b1", float64[:]), ("b2", float64[:]), ("b3", float64[:]), ("b4", float64[:]), ("b5", float64[:]), ("b6", float64[:]), ("b7", float64[:]), ("b8", float64[:]), ("b12", float64[:]), ("b13", float64[:]), ("b14", float64[:]), ("b15", float64[:]), ("b16", float64[:]), ("b19", float64[:]), ("b100", float64[:]), ("bsum", float64[:]), ] @jitclass(_InputsSpec) class Inputs: def __init__(self, size: int): self.b1 = np.zeros(size, dtype=np.float64) self.b2 = np.zeros(size, dtype=np.float64) self.b3 = np.zeros(size, dtype=np.float64) self.b4 = np.zeros(size, dtype=np.float64) self.b5 = np.zeros(size, dtype=np.float64) self.b6 = np.zeros(size, dtype=np.float64) self.b7 = np.zeros(size, dtype=np.float64) self.b8 = np.zeros(size, dtype=np.float64) self.b12 = np.zeros(size, dtype=np.float64) self.b13 = np.zeros(size, dtype=np.float64) self.b14 = np.zeros(size, dtype=np.float64) self.b15 = np.zeros(size, dtype=np.float64) self.b16 = np.zeros(size, dtype=np.float64) self.b19 = np.zeros(size, dtype=np.float64) self.b100 = np.zeros(size, dtype=np.float64) self.bsum = np.zeros(size, dtype=np.float64) _OutputsSpec = [ ("nodata", float64[:]), ("Wasser", float64[:]), ("Schill", float64[:]), ("Muschel", float64[:]), ("dense2", float64[:]), ("dense1", float64[:]), ("Strand", float64[:]), ("Sand", float64[:]), ("Misch", float64[:]), ("Misch2", float64[:]), ("Schlick", float64[:]), ("schlick_t", float64[:]), ("Wasser2", float64[:]), ] @jitclass(_OutputsSpec) class Outputs: def __init__(self, size: int): self.nodata = np.zeros(size, dtype=np.float64) self.Wasser = np.zeros(size, dtype=np.float64) self.Schill = np.zeros(size, dtype=np.float64) self.Muschel = np.zeros(size, dtype=np.float64) self.dense2 = np.zeros(size, dtype=np.float64) self.dense1 = np.zeros(size, dtype=np.float64) self.Strand = np.zeros(size, dtype=np.float64) self.Sand = np.zeros(size, dtype=np.float64) self.Misch = np.zeros(size, dtype=np.float64) self.Misch2 = np.zeros(size, dtype=np.float64) self.Schlick = np.zeros(size, dtype=np.float64) self.schlick_t = np.zeros(size, dtype=np.float64) self.Wasser2 = np.zeros(size, dtype=np.float64) @jit(nopython=True) def apply_rules(inputs: Inputs, outputs: Outputs): for i in range(len(outputs.nodata)): t0 = 1.0 # if b4 is NODATA: t1 = min(t0, _B4_NODATA(inputs.b4[i])) # nodata = TRUE outputs.nodata[i] = t1 # else: t1 = min(t0, 1.0 - t1) # if (b8 is GT_033 and b1 is LT_085) or b8 is LT_009: t2 = min(t1, max(min(_B8_GT_033(inputs.b8[i]), _B1_LT_085(inputs.b1[i])), _B8_LT_009(inputs.b8[i]))) # if b5 is LT_01: t3 = min(t2, _B5_LT_01(inputs.b5[i])) # Wasser = TRUE outputs.Wasser[i] = t3 # else: t3 = min(t2, 1.0 - t3) # if (b19 is GT_015 and (b8 is GT_04 and b8 is LT_085) and b7 is LT_05) or (b8 is GT_04 and bsum is GT_011) or (b8 is GT_035 and bsum is GT_016): t4 = min(t3, max(max(min(min(_B19_GT_015(inputs.b19[i]), min(_B8_GT_04(inputs.b8[i]), _B8_LT_085(inputs.b8[i]))), _B7_LT_05(inputs.b7[i])), min(_B8_GT_04(inputs.b8[i]), _BSum_GT_011(inputs.bsum[i]))), min(_B8_GT_035(inputs.b8[i]), _BSum_GT_016(inputs.bsum[i])))) # if bsum is GT_013: t5 = min(t4, _BSum_GT_013(inputs.bsum[i])) # Schill = TRUE outputs.Schill[i] = t5 # else: t5 = min(t4, 1.0 - t5) # Muschel = TRUE outputs.Muschel[i] = t5 # else: t4 = min(t3, 1.0 - t4) # if b8 is GT_045: t5 = min(t4, _B8_GT_045(inputs.b8[i])) # dense2 = TRUE outputs.dense2[i] = t5 # else: t5 = min(t4, 1.0 - t5) # dense1 = TRUE outputs.dense1[i] = t5 # else: t2 = min(t1, 1.0 - t2) # if b1 is GT_1: t3 = min(t2, _B1_GT_1(inputs.b1[i])) # Strand = TRUE outputs.Strand[i] = t3 # else: t3 = min(t2, 1.0 - t3) # if b3 is LT_005: t4 = min(t3, _B3_LT_005(inputs.b3[i])) # Sand = TRUE outputs.Sand[i] = t4 # else: t4 = min(t3, 1.0 - t4) # if b3 is LT_01 and b8 is GT_0: t5 = min(t4, min(_B3_LT_01(inputs.b3[i]), _B8_GT_0(inputs.b8[i]))) # Misch = TRUE outputs.Misch[i] = t5 # else: t5 = min(t4, 1.0 - t5) # if b3 is LT_015 and b8 is GT_0: t6 = min(t5, min(_B3_LT_015(inputs.b3[i]), _B8_GT_0(inputs.b8[i]))) # Misch2 = TRUE outputs.Misch2[i] = t6 # else: t6 = min(t5, 1.0 - t6) # if b3 is LT_02 and b2 is GT_0 and b8 is GT_0: t7 = min(t6, min(min(_B3_LT_02(inputs.b3[i]), _B2_GT_0(inputs.b2[i])), _B8_GT_0(inputs.b8[i]))) # Schlick = TRUE outputs.Schlick[i] = t7 # else: t7 = min(t6, 1.0 - t7) # if b16 is GT_0 and b8 is GT_0: t8 = min(t7, min(_B16_GT_0(inputs.b16[i]), _B8_GT_0(inputs.b8[i]))) # schlick_t = TRUE outputs.schlick_t[i] = t8 # else: t8 = min(t7, 1.0 - t8) # Wasser2 = TRUE outputs.Wasser2[i] = t8
mit
-7,789,529,066,740,844,000
24.647303
270
0.44928
false
2.405526
false
false
false
isabellemao/Hello-World
python/Junior2015CCCJ4.py
1
1278
#Problem J4: Arrival Time departure_time = input() split_departure = list(departure_time) #The time of departure, split into a list. #Split the list departure_hour = split_departure[0:2] departure_minute = split_departure[3:5] #Change the split list to integers. departure_hour = int("".join(departure_hour)) departure_minute = int("".join(departure_minute)) #The start and end of the rush hours rh_start_1 = 7 rh_end_1 = 10 rh_start_2 = 15 rh_end_2 = 19 #Set the current time hour = departure_hour minute = departure_minute #For the 120 minutes it usually takes Fiona to commute for counter in range(1, 121): #If it's currently rush hour if hour >= rh_start_1 and hour < rh_end_1 or hour >= rh_start_2 and hour < rh_end_2: #Twice as slow if rush hour minute += 2 else: #Normal speed if normal time minute += 1 if minute >= 60: minute = 0 #Reset hour hour += 1 if hour == 24: hour = 0 #Add fake zeroes if required. if hour < 10: hour = str(hour) hour = "0" + hour else: hour = str(hour) if minute < 10: minute = str(minute) minute = "0" + minute else: minute = str(minute) #Make a valid output. output = hour , ":" , minute output = "".join(output) print(output)
apache-2.0
-2,193,184,907,640,587,300
22.666667
88
0.640063
false
3.124694
false
false
false
robosafe/testbench_vRAL_hydro
bert2_simulator/sim_step_monitors/assertion_monitor_manager.py
1
2830
#!/usr/bin/env python """ Assertion Monitor Manager Created by David Western, June 2015. """ from coverage import coverage import imp import rospkg import rospy from std_msgs.msg import UInt64 from std_srvs.srv import Empty import sys class AMM: def __init__(self,AM_list_file,trace_label): # Read list of assertion monitors to run (from file?): rospack = rospkg.RosPack() path = rospack.get_path('bert2_simulator') path = path+'/sim_step_monitors/' print("--- Assertion monitors to run:") self.AM_names = [line.rstrip('\n') for line in open(path+AM_list_file)] print(self.AM_names) # Instantiate assertion monitors: self.AMs = [] # Initialise empty list of AMs. for idx, class_name in enumerate(self.AM_names): print(class_name) print path+class_name+'.py' module = imp.load_source(class_name, path+class_name+'.py') #module = __import__(path+class_name) # N.B. These two lines imply that we class_ = getattr(module, class_name) # require the AM to be defined in a # file with the same name as the class. self.AMs.append(class_(trace_label)) # Check AM has the mandatory attributes: mand_attrs = ['step'] for attr in mand_attrs: if not hasattr(self.AMs[idx],attr): rospy.logerr("Assertion monitor specification '%s' does not define the attribute \ '%s', which is required by AMM (assertion_monitor_manager.py). \ Does %s inherite from an assertion monitor base class?", self.AMs[idx].__name__, attr, self.AMs[idx].__name__) # Get service self.unpause_gazebo = rospy.ServiceProxy('gazebo/unpause_physics',Empty) # Subscriber to triggers, which come on each sim step: rospy.Subscriber("AM_trigger", UInt64, self.trigger_AMs) def trigger_AMs(self,data): iteration = data.data sim_time = rospy.get_time() # Step all assertion monitors: for idx, AM in enumerate(self.AMs): AM.step(iteration,sim_time) # Release gazebo now we've finished the checks for this step: #print "unpausing" #self.unpause_gazebo() # Problem: This line prevents Gazebo's pause button from working (unless you # get a lucky click). if __name__ == '__main__': try: if len(sys.argv) < 3: print("usage: rosrun [package_name] assertion_monitor_manager.py AM_list_file.txt report_file_name") else: rospy.init_node('AMM') AMMInst = AMM(sys.argv[1],sys.argv[2]) rospy.spin() except rospy.ROSInterruptException: #to stop the code when pressing Ctr+c pass
gpl-3.0
1,416,007,924,697,365,000
31.906977
109
0.602473
false
3.723684
false
false
false
eayunstack/eayunstack-upgrade
ansible/library/keystone_v2_endpoint.py
1
9178
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2014, Kevin Carter <[email protected]> # # Copyright 2014, Rackspace US, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Based on Jimmy Tang's implementation DOCUMENTATION = """ --- module: keystone_v2_endpoint short_description: - Manage OpenStack Identity (keystone) v2 endpoint. description: - Manage OpenStack Identity (keystone) v2 endpoint. endpoints. options: token: description: - The token to be uses in case the password is not specified required: true default: None endpoint: description: - The keystone url for authentication required: true service_name: description: - Name of the service. required: true default: None region_name: description: - Name of the region. required: true default: None service_type: description: - Type of service. required: true default: None endpoint_dict: description: - Dict of endpoint urls to add to keystone for a service required: true default: None type: dict state: description: - Ensuring the endpoint is either present, absent. - It always ensures endpoint is updated to latest url. required: False default: 'present' requirements: [ python-keystoneclient ] """ EXAMPLES = """ # Create an endpoint - keystone_v2_endpoint: region_name: "RegionOne" service_name: "glance" service_type: "image" endpoint: "http://127.0.0.1:5000/v2.0/" token: "ChangeMe" endpoint_dict: publicurl: "http://127.0.0.1:9292" adminurl: "http://127.0.0.1:9292" internalurl: "http://127.0.0.1:9292" """ try: from keystoneclient.v2_0 import client except ImportError: keystoneclient_found = False else: keystoneclient_found = True class ManageKeystoneV2Endpoint(object): def __init__(self, module): """Manage Keystone via Ansible.""" self.state_change = False self.keystone = None # Load AnsibleModule self.module = module @staticmethod def _facts(facts): """Return a dict for our Ansible facts. :param facts: ``dict`` Dict with data to return """ return {'keystone_facts': facts} def failure(self, error, rc, msg): """Return a Failure when running an Ansible command. :param error: ``str`` Error that occurred. :param rc: ``int`` Return code while executing an Ansible command. :param msg: ``str`` Message to report. """ self.module.fail_json(msg=msg, rc=rc, err=error) def _authenticate(self): """Return a keystone client object.""" endpoint = self.module.params.get('endpoint') token = self.module.params.get('token') if token is None: self.failure( error='Missing Auth Token', rc=2, msg='Auto token is required!' ) if token: self.keystone = client.Client( endpoint=endpoint, token=token ) def _get_service(self, name, srv_type=None): for entry in self.keystone.services.list(): if srv_type is not None: if entry.type == srv_type and name == entry.name: return entry elif entry.name == name: return entry else: return None def _get_endpoint(self, region, service_id): """ Getting endpoints per complete definition Returns the endpoint details for an endpoint matching region, service id. :param service_id: service to which the endpoint belongs :param region: geographic location of the endpoint """ for entry in self.keystone.endpoints.list(): check = [ entry.region == region, entry.service_id == service_id, ] if all(check): return entry else: return None def _compare_endpoint_info(self, endpoint, endpoint_dict): """ Compare existed endpoint with module parameters Return True if public, admin, internal urls are all the same. :param endpoint: endpoint existed :param endpoint_dict: endpoint info passed in """ check = [ endpoint.adminurl == endpoint_dict.get('adminurl'), endpoint.publicurl == endpoint_dict.get('publicurl'), endpoint.internalurl == endpoint_dict.get('internalurl') ] if all(check): return True else: return False def ensure_endpoint(self): """Ensures the deletion/modification/addition of endpoints within Keystone. Returns the endpoint ID on a successful run. """ self._authenticate() service_name = self.module.params.get('service_name') service_type = self.module.params.get('service_type') region = self.module.params.get('region_name') endpoint_dict = self.module.params.get('endpoint_dict') state = self.module.params.get('state') endpoint_dict = { 'adminurl': endpoint_dict.get('adminurl', ''), 'publicurl': endpoint_dict.get('publicurl', ''), 'internalurl': endpoint_dict.get('internalurl', '') } service = self._get_service(name=service_name, srv_type=service_type) if service is None: self.failure( error='service [ %s ] was not found.' % service_name, rc=2, msg='Service was not found, does it exist?' ) existed_endpoint = self._get_endpoint( region=region, service_id=service.id, ) delete_existed = False if state == 'present': ''' Creating an endpoint (if it does not exist) or creating a new one, and then deleting the existing endpoint that matches the service type, name, and region. ''' if existed_endpoint: if not self._compare_endpoint_info(existed_endpoint, endpoint_dict): delete_existed = True else: endpoint = existed_endpoint if (not existed_endpoint or delete_existed): self.state_change = True endpoint = self.keystone.endpoints.create( region=region, service_id=service.id, **endpoint_dict ) elif state == 'absent': if existed_endpoint is not None: self.state_change = True delete_existed = True if delete_existed: result = self.keystone.endpoints.delete(existed_endpoint.id) if result[0].status_code != 204: self.module.fail() if state != 'absent': facts = self._facts(endpoint.to_dict()) else: facts = self._facts({}) self.module.exit_json( changed=self.state_change, ansible_facts=facts ) # TODO(evrardjp): Deprecate state=update in Q. def main(): module = AnsibleModule( argument_spec=dict( token=dict( required=True ), endpoint=dict( required=True, ), region_name=dict( required=True ), service_name=dict( required=True ), service_type=dict( required=True ), endpoint_dict=dict( required=True, type='dict' ), state=dict( choices=['present', 'absent'], required=False, default='present' ) ), supports_check_mode=False, ) km = ManageKeystoneV2Endpoint(module=module) if not keystoneclient_found: km.failure( error='python-keystoneclient is missing', rc=2, msg='keystone client was not importable, is it installed?' ) facts = km.ensure_endpoint() # import module snippets from ansible.module_utils.basic import * # NOQA if __name__ == '__main__': main()
apache-2.0
-101,433,040,338,243,950
28.322684
78
0.552953
false
4.541316
false
false
false
Kivvix/stage-LPC
compareSrc/searchSDSSdata.py
1
4221
#!/usr/bin/env python # -*- coding: utf-8 -*- import time import os import glob from config import * import data.calexp import data.src ## @def attributs # @brief attributs which we select in SDSS DB and src fits file attributs = 'objid,run,camcol,field,ra,dec,u,g,r,i,z' ## Calexp treatment ## def coordCalexp( fitsNum , calexpFits , first=True ): coordMin, coordMax = data.calexp.coord( calexpFits , first ) if ( first ): return coordMin else: return coordMax def savCalexp( coordMin , coordMax , fitsNum ): global attributs , PATH_OUTPUT calexpLines = data.calexp.query( coordMin , coordMax , attributs , fitsNum ) data.calexp.write( calexpLines , attributs , fitsNum , PATH_OUTPUT , True ) def calexp( fitsNum , calexpFits , first=True ): """ find and write calexp data (id,ra,dec,mag) :param fitsNum: number of fits file (``rrrrrr-bc-ffff``) :param calexpFits: name of calexp fits file :param first: take all the picture or less 128 first pixels :type fitsNum: string :type calexpFits: string :type first: boolean """ global attributs , PATH_OUTPUT coordMin, coordMax = data.calexp.coord( calexpFits , first ) calexpLines = data.calexp.query( coordMin , coordMax , attributs , fitsNum ) data.calexp.write( calexpLines , attributs , fitsNum[0:9] , PATH_OUTPUT , first ) ## Src treatment ## def src( fitsNum , srcFits , first=True ): """ find and write src data (id,ra,dec,mag) :param fitsNum: number of fits file (``rrrrrr-bc-ffff``) :param srcFits: name of src fits file :param first: take all the picture or less 128 first pixels :type fitsNum: string :type srcFits: string :type first: boolean """ global attributs , PATH_OUTPUT srcCoord,srcMag = data.src.coord( srcFits , fitsNum , first ) srcLines = data.src.map( srcCoord , srcMag ) data.src.write( srcLines , attributs , fitsNum[0:9] , PATH_OUTPUT , first ) def analyCol( runNum , c ): """ function threaded calling research of data :param runNum_c: tupe with run number and column of the CCD (1-6) :type runNum_c: tuple of string """ global b , PATH_DATA , PWD print " " + str(c) + " ", # data of each pair of fits files first = True for fits in glob.glob( c + "/" + b + "/calexp/calexp*.fits" ): fitsNum = fits[18:32] ## @def calexpFits # @brief path and name of calexp fits file calexpFits = PATH_DATA + "/" + runNum + "/" + c + "/" + b + "/calexp/calexp-" + fitsNum + ".fits" ## @def srcFits # @brief path and name of src fits file #srcFits = PATH_DATA + "/" + runNum + "/" + c + "/" + b + "/src/src-" + fitsNum + ".fits" #calexp( fitsNum , calexpFits , first ) if ( first ): coordMin = coordCalexp( fitsNum , calexpFits , first ) else: coordMax = coordCalexp( fitsNum , calexpFits , first ) #src( fitsNum , srcFits , first ) first = False savCalexp( coordMin , coordMax , "%06d" % int(runNum) + "-" + b + c ) def analyRun( runNum ): global b , PWD , PATH_DATA , PATH_OUTPUT , attributs print "run : " + str(runNum ) + " : ", os.chdir( PATH_DATA + "/" + runNum ) columns = glob.glob( "*" ) for c in columns : analyCol( runNum , c ) if __name__ == '__main__': os.chdir( PATH_DATA ) runs = glob.glob( "*" ) #runs = ( 7158, 7112, 5924, 5566, 6421, 7057, 6430, 4895, 5895, 6474, 6383, 7038, 5642, 6409, 6513, 6501, 6552, 2650, 6559, 6355, 7177, 7121, 3465, 7170, 7051, 6283, 6458, 5853, 6484, 5765, 2708, 5786, 4253, 6934, 6508, 2662, 6518, 6584, 4188, 6976, 7202, 7173, 4153, 5820, 2649, 7140, 6330, 3388, 7117, 6504, 6314, 4128, 6596, 6564, 5807, 6367, 6373, 5622, 5882, 7034, 7136, 6577, 6600, 2768, 3437, 4927, 6414, 3434, 5813, 7084, 4858, 7124, 6982, 4917, 4192, 5898, 6479, 4868, 7106, 7195, 5744, 3360, 4198, 6963, 6533, 4933, 5603, 3384, 7155, 5619, 4207, 4849, 5582, 7024, 1755, 5709, 5781, 5770, 7145, 5754, 5646, 5800, 5759, 6287, 6568, 7054, 4203, 5776, 6433, 4247, 5823, 5052, 3325, 5836, 5590, 6580, 7161, 2728, 4145, 5633, 6461, 6555, 6955, 4874, 5792, 5918, 6425, 6377, 4263, 5878, 6441, 6447, 7080, 5905, 5713, 6618, 6537, 5637, 6402, 6530, 7047, 6524, 7101, 6293 ) for r in runs : analyRun( r ) print " " time.sleep(60)
mit
-5,985,041,647,614,164,000
33.040323
875
0.644871
false
2.594345
false
false
false
xaled/wunderous-analytics
wunderous/drive.py
1
5688
import os import sys import httplib2 from oauth2client.file import Storage from apiclient import discovery from oauth2client.client import OAuth2WebServerFlow from wunderous.config import config OAUTH_SCOPE = 'https://www.googleapis.com/auth/drive' SHEETS_OAUTH_SCOPE = 'https://www.googleapis.com/auth/drive https://www.googleapis.com/auth/drive.readonly https://www.googleapis.com/auth/drive.file https://www.googleapis.com/auth/spreadsheets https://www.googleapis.com/auth/spreadsheets.readonly' REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob' CREDS_FILE = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), 'credentials.json') SHEETS_CREDS_FILE = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), 'sheets_credentials.json') # CONFIG_FILE = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), "wunderous.config.json") sheet_service = None drive_service = None # def load_configs(): # client_secret = config['client_secret'] # client_id = config['client_id'] # return client_id, client_secret def init_drive_service(): global drive_service if drive_service: return drive_service storage = Storage(CREDS_FILE) credentials = storage.get() if credentials is None: # Run through the OAuth flow and retrieve credentials # client_id, client_secret = load_configs() flow = OAuth2WebServerFlow(config['drive']['client_id'], config['drive']['client_secret'], OAUTH_SCOPE, REDIRECT_URI) authorize_url = flow.step1_get_authorize_url() print('Go to the following link in your browser: ' + authorize_url) code = input('Enter verification code: ').strip() credentials = flow.step2_exchange(code) storage.put(credentials) # Create an httplib2.Http object and authorize it with our credentials http = httplib2.Http() http = credentials.authorize(http) drive_service = discovery.build('drive', 'v2', http=http) return drive_service def init_sheet_service(): global sheet_service if sheet_service: return sheet_service storage = Storage(SHEETS_CREDS_FILE) credentials = storage.get() if credentials is None: # Run through the OAuth flow and retrieve credentials # client_id, client_secret = load_configs() flow = OAuth2WebServerFlow(config['drive']['client_id'], config['drive']['client_secret'], OAUTH_SCOPE, REDIRECT_URI) authorize_url = flow.step1_get_authorize_url() print('Go to the following link in your browser: ' + authorize_url) code = input('Enter verification code: ').strip() credentials = flow.step2_exchange(code) storage.put(credentials) # Create an httplib2.Http object and authorize it with our credentials http = httplib2.Http() http = credentials.authorize(http) sheet_service = discovery.build('sheets', 'v4', http=http) return sheet_service def list_files(service): page_token = None while True: param = {} if page_token: param['pageToken'] = page_token files = service.files().list(**param).execute() for item in files['items']: yield item page_token = files.get('nextPageToken') if not page_token: break def _download_file(drive_service, download_url, outfile): resp, content = drive_service._http.request(download_url) if resp.status == 200: with open(outfile, 'wb') as f: f.write(content) print("OK") return else: raise Exception("ERROR downloading %s, response code is not 200!" % outfile) def download_file(outfile, fileid): drive_service = init_drive_service() for item in list_files(drive_service): if fileid == item.get('id'): if 'downloadUrl' in item: _download_file(drive_service, item['downloadUrl'], outfile) return else: raise Exception("No download link is found for file: %s" % item['title']) raise Exception("No file with id: %s is found " % fileid) def get_sheet_metadata(spreadsheet_id): sheet_service = init_sheet_service() sheet_metadata = sheet_service.spreadsheets().get(spreadsheetId=spreadsheet_id).execute() return sheet_metadata def get_sheet_values(spreadsheet_id, range_): sheet_service = init_sheet_service() request = sheet_service.spreadsheets().values().get(spreadsheetId=spreadsheet_id, range=range_, valueRenderOption='FORMATTED_VALUE', dateTimeRenderOption='SERIAL_NUMBER') response = request.execute() return response def get_sheet_value(spreadsheet_id, range_): response = get_sheet_values(spreadsheet_id, range_) try: return response['values'][0][0] except: return '' def update_sheet_values(spreadsheet_id, range_, values): sheet_service = init_sheet_service() body = {'values': values} result = sheet_service.spreadsheets().values().update(spreadsheetId=spreadsheet_id, range=range_, body=body, valueInputOption='USER_ENTERED').execute() return result.get('updatedCells') def append_sheet_values(spreadsheet_id, range_, values): sheet_service = init_sheet_service() body = {'values': values} result = sheet_service.spreadsheets().values().append(spreadsheetId=spreadsheet_id, range=range_, body=body, valueInputOption='USER_ENTERED').execute() return result.get('updates').get('updatedCells')
mit
7,538,950,005,650,568,000
37.174497
249
0.651371
false
3.93361
true
false
false
Goamaral/SCC
inputWindow.py
1
31922
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'inputWindow.ui' # # Created by: PyQt4 UI code generator 4.11.4 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(_fromUtf8("MainWindow")) MainWindow.resize(708, 428) self.centralwidget = QtGui.QWidget(MainWindow) self.centralwidget.setObjectName(_fromUtf8("centralwidget")) self.verticalLayout_4 = QtGui.QVBoxLayout(self.centralwidget) self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4")) self.List = QtGui.QVBoxLayout() self.List.setObjectName(_fromUtf8("List")) self.listItem_3 = QtGui.QWidget(self.centralwidget) self.listItem_3.setMinimumSize(QtCore.QSize(0, 0)) self.listItem_3.setMaximumSize(QtCore.QSize(10000, 100)) self.listItem_3.setObjectName(_fromUtf8("listItem_3")) self.horizontalLayout_5 = QtGui.QHBoxLayout(self.listItem_3) self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5")) self.nameLabel_3 = QtGui.QLabel(self.listItem_3) self.nameLabel_3.setMinimumSize(QtCore.QSize(125, 0)) self.nameLabel_3.setMaximumSize(QtCore.QSize(75, 16777215)) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.nameLabel_3.setFont(font) self.nameLabel_3.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.nameLabel_3.setObjectName(_fromUtf8("nameLabel_3")) self.horizontalLayout_5.addWidget(self.nameLabel_3) self.nameLabel_27 = QtGui.QLabel(self.listItem_3) self.nameLabel_27.setMinimumSize(QtCore.QSize(100, 0)) self.nameLabel_27.setMaximumSize(QtCore.QSize(100, 16777215)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.nameLabel_27.setFont(font) self.nameLabel_27.setAlignment(QtCore.Qt.AlignCenter) self.nameLabel_27.setObjectName(_fromUtf8("nameLabel_27")) self.horizontalLayout_5.addWidget(self.nameLabel_27) self.mediaChegadaA = QtGui.QLineEdit(self.listItem_3) self.mediaChegadaA.setMinimumSize(QtCore.QSize(50, 25)) self.mediaChegadaA.setMaximumSize(QtCore.QSize(50, 25)) self.mediaChegadaA.setText(_fromUtf8("")) self.mediaChegadaA.setObjectName(_fromUtf8("mediaChegadaA")) self.horizontalLayout_5.addWidget(self.mediaChegadaA) spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_5.addItem(spacerItem) self.List.addWidget(self.listItem_3) self.listItem_6 = QtGui.QWidget(self.centralwidget) self.listItem_6.setMinimumSize(QtCore.QSize(0, 0)) self.listItem_6.setMaximumSize(QtCore.QSize(10000, 100)) self.listItem_6.setObjectName(_fromUtf8("listItem_6")) self.horizontalLayout_7 = QtGui.QHBoxLayout(self.listItem_6) self.horizontalLayout_7.setObjectName(_fromUtf8("horizontalLayout_7")) self.nameLabel_7 = QtGui.QLabel(self.listItem_6) self.nameLabel_7.setMinimumSize(QtCore.QSize(125, 0)) self.nameLabel_7.setMaximumSize(QtCore.QSize(75, 16777215)) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.nameLabel_7.setFont(font) self.nameLabel_7.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.nameLabel_7.setObjectName(_fromUtf8("nameLabel_7")) self.horizontalLayout_7.addWidget(self.nameLabel_7) self.nameLabel_8 = QtGui.QLabel(self.listItem_6) self.nameLabel_8.setMinimumSize(QtCore.QSize(100, 0)) self.nameLabel_8.setMaximumSize(QtCore.QSize(100, 16777215)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.nameLabel_8.setFont(font) self.nameLabel_8.setAlignment(QtCore.Qt.AlignCenter) self.nameLabel_8.setObjectName(_fromUtf8("nameLabel_8")) self.horizontalLayout_7.addWidget(self.nameLabel_8) self.mediaPerfuracaoA = QtGui.QLineEdit(self.listItem_6) self.mediaPerfuracaoA.setMinimumSize(QtCore.QSize(50, 25)) self.mediaPerfuracaoA.setMaximumSize(QtCore.QSize(50, 25)) self.mediaPerfuracaoA.setText(_fromUtf8("")) self.mediaPerfuracaoA.setObjectName(_fromUtf8("mediaPerfuracaoA")) self.horizontalLayout_7.addWidget(self.mediaPerfuracaoA) self.nameLabel_9 = QtGui.QLabel(self.listItem_6) self.nameLabel_9.setMinimumSize(QtCore.QSize(100, 0)) self.nameLabel_9.setMaximumSize(QtCore.QSize(100, 16777215)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.nameLabel_9.setFont(font) self.nameLabel_9.setAlignment(QtCore.Qt.AlignCenter) self.nameLabel_9.setObjectName(_fromUtf8("nameLabel_9")) self.horizontalLayout_7.addWidget(self.nameLabel_9) self.desvioPerfuracaoA = QtGui.QLineEdit(self.listItem_6) self.desvioPerfuracaoA.setMinimumSize(QtCore.QSize(50, 25)) self.desvioPerfuracaoA.setMaximumSize(QtCore.QSize(50, 25)) self.desvioPerfuracaoA.setText(_fromUtf8("")) self.desvioPerfuracaoA.setObjectName(_fromUtf8("desvioPerfuracaoA")) self.horizontalLayout_7.addWidget(self.desvioPerfuracaoA) self.nameLabel_10 = QtGui.QLabel(self.listItem_6) self.nameLabel_10.setMinimumSize(QtCore.QSize(100, 0)) self.nameLabel_10.setMaximumSize(QtCore.QSize(100, 16777215)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.nameLabel_10.setFont(font) self.nameLabel_10.setAlignment(QtCore.Qt.AlignCenter) self.nameLabel_10.setObjectName(_fromUtf8("nameLabel_10")) self.horizontalLayout_7.addWidget(self.nameLabel_10) self.nMaquinasPerfuracaoA = QtGui.QLineEdit(self.listItem_6) self.nMaquinasPerfuracaoA.setMinimumSize(QtCore.QSize(50, 25)) self.nMaquinasPerfuracaoA.setMaximumSize(QtCore.QSize(50, 25)) self.nMaquinasPerfuracaoA.setText(_fromUtf8("")) self.nMaquinasPerfuracaoA.setObjectName(_fromUtf8("nMaquinasPerfuracaoA")) self.horizontalLayout_7.addWidget(self.nMaquinasPerfuracaoA) spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_7.addItem(spacerItem1) self.List.addWidget(self.listItem_6) self.listItem_7 = QtGui.QWidget(self.centralwidget) self.listItem_7.setMinimumSize(QtCore.QSize(0, 0)) self.listItem_7.setMaximumSize(QtCore.QSize(10000, 100)) self.listItem_7.setObjectName(_fromUtf8("listItem_7")) self.horizontalLayout_8 = QtGui.QHBoxLayout(self.listItem_7) self.horizontalLayout_8.setObjectName(_fromUtf8("horizontalLayout_8")) self.nameLabel_11 = QtGui.QLabel(self.listItem_7) self.nameLabel_11.setMinimumSize(QtCore.QSize(125, 0)) self.nameLabel_11.setMaximumSize(QtCore.QSize(75, 16777215)) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.nameLabel_11.setFont(font) self.nameLabel_11.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.nameLabel_11.setObjectName(_fromUtf8("nameLabel_11")) self.horizontalLayout_8.addWidget(self.nameLabel_11) self.nameLabel_12 = QtGui.QLabel(self.listItem_7) self.nameLabel_12.setMinimumSize(QtCore.QSize(100, 0)) self.nameLabel_12.setMaximumSize(QtCore.QSize(100, 16777215)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.nameLabel_12.setFont(font) self.nameLabel_12.setAlignment(QtCore.Qt.AlignCenter) self.nameLabel_12.setObjectName(_fromUtf8("nameLabel_12")) self.horizontalLayout_8.addWidget(self.nameLabel_12) self.mediaPolimentoA = QtGui.QLineEdit(self.listItem_7) self.mediaPolimentoA.setMinimumSize(QtCore.QSize(50, 25)) self.mediaPolimentoA.setMaximumSize(QtCore.QSize(50, 25)) self.mediaPolimentoA.setText(_fromUtf8("")) self.mediaPolimentoA.setObjectName(_fromUtf8("mediaPolimentoA")) self.horizontalLayout_8.addWidget(self.mediaPolimentoA) self.nameLabel_13 = QtGui.QLabel(self.listItem_7) self.nameLabel_13.setMinimumSize(QtCore.QSize(100, 0)) self.nameLabel_13.setMaximumSize(QtCore.QSize(100, 16777215)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.nameLabel_13.setFont(font) self.nameLabel_13.setAlignment(QtCore.Qt.AlignCenter) self.nameLabel_13.setObjectName(_fromUtf8("nameLabel_13")) self.horizontalLayout_8.addWidget(self.nameLabel_13) self.desvioPolimentoA = QtGui.QLineEdit(self.listItem_7) self.desvioPolimentoA.setMinimumSize(QtCore.QSize(50, 25)) self.desvioPolimentoA.setMaximumSize(QtCore.QSize(50, 25)) self.desvioPolimentoA.setText(_fromUtf8("")) self.desvioPolimentoA.setObjectName(_fromUtf8("desvioPolimentoA")) self.horizontalLayout_8.addWidget(self.desvioPolimentoA) self.nameLabel_14 = QtGui.QLabel(self.listItem_7) self.nameLabel_14.setMinimumSize(QtCore.QSize(100, 0)) self.nameLabel_14.setMaximumSize(QtCore.QSize(100, 16777215)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.nameLabel_14.setFont(font) self.nameLabel_14.setAlignment(QtCore.Qt.AlignCenter) self.nameLabel_14.setObjectName(_fromUtf8("nameLabel_14")) self.horizontalLayout_8.addWidget(self.nameLabel_14) self.nMaquinasPolimentoA = QtGui.QLineEdit(self.listItem_7) self.nMaquinasPolimentoA.setMinimumSize(QtCore.QSize(50, 25)) self.nMaquinasPolimentoA.setMaximumSize(QtCore.QSize(50, 25)) self.nMaquinasPolimentoA.setText(_fromUtf8("")) self.nMaquinasPolimentoA.setObjectName(_fromUtf8("nMaquinasPolimentoA")) self.horizontalLayout_8.addWidget(self.nMaquinasPolimentoA) spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_8.addItem(spacerItem2) self.List.addWidget(self.listItem_7) self.line_2 = QtGui.QFrame(self.centralwidget) self.line_2.setMinimumSize(QtCore.QSize(5, 0)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.line_2.setFont(font) self.line_2.setFrameShape(QtGui.QFrame.HLine) self.line_2.setFrameShadow(QtGui.QFrame.Sunken) self.line_2.setObjectName(_fromUtf8("line_2")) self.List.addWidget(self.line_2) self.listItem_4 = QtGui.QWidget(self.centralwidget) self.listItem_4.setMinimumSize(QtCore.QSize(0, 0)) self.listItem_4.setMaximumSize(QtCore.QSize(10000, 100)) self.listItem_4.setObjectName(_fromUtf8("listItem_4")) self.horizontalLayout_6 = QtGui.QHBoxLayout(self.listItem_4) self.horizontalLayout_6.setObjectName(_fromUtf8("horizontalLayout_6")) self.nameLabel_4 = QtGui.QLabel(self.listItem_4) self.nameLabel_4.setMinimumSize(QtCore.QSize(125, 0)) self.nameLabel_4.setMaximumSize(QtCore.QSize(75, 16777215)) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.nameLabel_4.setFont(font) self.nameLabel_4.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.nameLabel_4.setObjectName(_fromUtf8("nameLabel_4")) self.horizontalLayout_6.addWidget(self.nameLabel_4) self.nameLabel_31 = QtGui.QLabel(self.listItem_4) self.nameLabel_31.setMinimumSize(QtCore.QSize(100, 0)) self.nameLabel_31.setMaximumSize(QtCore.QSize(100, 16777215)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.nameLabel_31.setFont(font) self.nameLabel_31.setAlignment(QtCore.Qt.AlignCenter) self.nameLabel_31.setObjectName(_fromUtf8("nameLabel_31")) self.horizontalLayout_6.addWidget(self.nameLabel_31) self.mediaChegadaB = QtGui.QLineEdit(self.listItem_4) self.mediaChegadaB.setMinimumSize(QtCore.QSize(50, 25)) self.mediaChegadaB.setMaximumSize(QtCore.QSize(50, 25)) self.mediaChegadaB.setText(_fromUtf8("")) self.mediaChegadaB.setObjectName(_fromUtf8("mediaChegadaB")) self.horizontalLayout_6.addWidget(self.mediaChegadaB) spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_6.addItem(spacerItem3) self.List.addWidget(self.listItem_4) self.listItem_9 = QtGui.QWidget(self.centralwidget) self.listItem_9.setMinimumSize(QtCore.QSize(0, 0)) self.listItem_9.setMaximumSize(QtCore.QSize(10000, 100)) self.listItem_9.setObjectName(_fromUtf8("listItem_9")) self.horizontalLayout_13 = QtGui.QHBoxLayout(self.listItem_9) self.horizontalLayout_13.setObjectName(_fromUtf8("horizontalLayout_13")) self.nameLabel_36 = QtGui.QLabel(self.listItem_9) self.nameLabel_36.setMinimumSize(QtCore.QSize(125, 0)) self.nameLabel_36.setMaximumSize(QtCore.QSize(75, 16777215)) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.nameLabel_36.setFont(font) self.nameLabel_36.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.nameLabel_36.setObjectName(_fromUtf8("nameLabel_36")) self.horizontalLayout_13.addWidget(self.nameLabel_36) self.nameLabel_37 = QtGui.QLabel(self.listItem_9) self.nameLabel_37.setMinimumSize(QtCore.QSize(100, 0)) self.nameLabel_37.setMaximumSize(QtCore.QSize(100, 16777215)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.nameLabel_37.setFont(font) self.nameLabel_37.setAlignment(QtCore.Qt.AlignCenter) self.nameLabel_37.setObjectName(_fromUtf8("nameLabel_37")) self.horizontalLayout_13.addWidget(self.nameLabel_37) self.mediaPerfuracaoB = QtGui.QLineEdit(self.listItem_9) self.mediaPerfuracaoB.setMinimumSize(QtCore.QSize(50, 25)) self.mediaPerfuracaoB.setMaximumSize(QtCore.QSize(50, 25)) self.mediaPerfuracaoB.setText(_fromUtf8("")) self.mediaPerfuracaoB.setObjectName(_fromUtf8("mediaPerfuracaoB")) self.horizontalLayout_13.addWidget(self.mediaPerfuracaoB) self.nameLabel_38 = QtGui.QLabel(self.listItem_9) self.nameLabel_38.setMinimumSize(QtCore.QSize(100, 0)) self.nameLabel_38.setMaximumSize(QtCore.QSize(100, 16777215)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.nameLabel_38.setFont(font) self.nameLabel_38.setAlignment(QtCore.Qt.AlignCenter) self.nameLabel_38.setObjectName(_fromUtf8("nameLabel_38")) self.horizontalLayout_13.addWidget(self.nameLabel_38) self.desvioPerfuracaoB = QtGui.QLineEdit(self.listItem_9) self.desvioPerfuracaoB.setMinimumSize(QtCore.QSize(50, 25)) self.desvioPerfuracaoB.setMaximumSize(QtCore.QSize(50, 25)) self.desvioPerfuracaoB.setText(_fromUtf8("")) self.desvioPerfuracaoB.setObjectName(_fromUtf8("desvioPerfuracaoB")) self.horizontalLayout_13.addWidget(self.desvioPerfuracaoB) self.nameLabel_39 = QtGui.QLabel(self.listItem_9) self.nameLabel_39.setMinimumSize(QtCore.QSize(100, 0)) self.nameLabel_39.setMaximumSize(QtCore.QSize(100, 16777215)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.nameLabel_39.setFont(font) self.nameLabel_39.setAlignment(QtCore.Qt.AlignCenter) self.nameLabel_39.setObjectName(_fromUtf8("nameLabel_39")) self.horizontalLayout_13.addWidget(self.nameLabel_39) self.nMaquinasPerfuracaoB = QtGui.QLineEdit(self.listItem_9) self.nMaquinasPerfuracaoB.setMinimumSize(QtCore.QSize(50, 25)) self.nMaquinasPerfuracaoB.setMaximumSize(QtCore.QSize(50, 25)) self.nMaquinasPerfuracaoB.setText(_fromUtf8("")) self.nMaquinasPerfuracaoB.setObjectName(_fromUtf8("nMaquinasPerfuracaoB")) self.horizontalLayout_13.addWidget(self.nMaquinasPerfuracaoB) spacerItem4 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_13.addItem(spacerItem4) self.List.addWidget(self.listItem_9) self.listItem_8 = QtGui.QWidget(self.centralwidget) self.listItem_8.setMinimumSize(QtCore.QSize(0, 0)) self.listItem_8.setMaximumSize(QtCore.QSize(10000, 100)) self.listItem_8.setObjectName(_fromUtf8("listItem_8")) self.horizontalLayout_10 = QtGui.QHBoxLayout(self.listItem_8) self.horizontalLayout_10.setObjectName(_fromUtf8("horizontalLayout_10")) self.nameLabel_19 = QtGui.QLabel(self.listItem_8) self.nameLabel_19.setMinimumSize(QtCore.QSize(125, 0)) self.nameLabel_19.setMaximumSize(QtCore.QSize(75, 16777215)) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.nameLabel_19.setFont(font) self.nameLabel_19.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.nameLabel_19.setObjectName(_fromUtf8("nameLabel_19")) self.horizontalLayout_10.addWidget(self.nameLabel_19) self.nameLabel_20 = QtGui.QLabel(self.listItem_8) self.nameLabel_20.setMinimumSize(QtCore.QSize(100, 0)) self.nameLabel_20.setMaximumSize(QtCore.QSize(100, 16777215)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.nameLabel_20.setFont(font) self.nameLabel_20.setAlignment(QtCore.Qt.AlignCenter) self.nameLabel_20.setObjectName(_fromUtf8("nameLabel_20")) self.horizontalLayout_10.addWidget(self.nameLabel_20) self.mediaPolimentoB = QtGui.QLineEdit(self.listItem_8) self.mediaPolimentoB.setMinimumSize(QtCore.QSize(50, 25)) self.mediaPolimentoB.setMaximumSize(QtCore.QSize(50, 25)) self.mediaPolimentoB.setText(_fromUtf8("")) self.mediaPolimentoB.setObjectName(_fromUtf8("mediaPolimentoB")) self.horizontalLayout_10.addWidget(self.mediaPolimentoB) self.nameLabel_21 = QtGui.QLabel(self.listItem_8) self.nameLabel_21.setMinimumSize(QtCore.QSize(100, 0)) self.nameLabel_21.setMaximumSize(QtCore.QSize(100, 16777215)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.nameLabel_21.setFont(font) self.nameLabel_21.setAlignment(QtCore.Qt.AlignCenter) self.nameLabel_21.setObjectName(_fromUtf8("nameLabel_21")) self.horizontalLayout_10.addWidget(self.nameLabel_21) self.desvioPolimentoB = QtGui.QLineEdit(self.listItem_8) self.desvioPolimentoB.setMinimumSize(QtCore.QSize(50, 25)) self.desvioPolimentoB.setMaximumSize(QtCore.QSize(50, 25)) self.desvioPolimentoB.setText(_fromUtf8("")) self.desvioPolimentoB.setObjectName(_fromUtf8("desvioPolimentoB")) self.horizontalLayout_10.addWidget(self.desvioPolimentoB) self.nameLabel_22 = QtGui.QLabel(self.listItem_8) self.nameLabel_22.setMinimumSize(QtCore.QSize(100, 0)) self.nameLabel_22.setMaximumSize(QtCore.QSize(100, 16777215)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.nameLabel_22.setFont(font) self.nameLabel_22.setAlignment(QtCore.Qt.AlignCenter) self.nameLabel_22.setObjectName(_fromUtf8("nameLabel_22")) self.horizontalLayout_10.addWidget(self.nameLabel_22) self.nMaquinasPolimentoB = QtGui.QLineEdit(self.listItem_8) self.nMaquinasPolimentoB.setMinimumSize(QtCore.QSize(50, 25)) self.nMaquinasPolimentoB.setMaximumSize(QtCore.QSize(50, 25)) self.nMaquinasPolimentoB.setText(_fromUtf8("")) self.nMaquinasPolimentoB.setObjectName(_fromUtf8("nMaquinasPolimentoB")) self.horizontalLayout_10.addWidget(self.nMaquinasPolimentoB) spacerItem5 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_10.addItem(spacerItem5) self.List.addWidget(self.listItem_8) self.line = QtGui.QFrame(self.centralwidget) self.line.setMinimumSize(QtCore.QSize(0, 5)) self.line.setFrameShape(QtGui.QFrame.HLine) self.line.setFrameShadow(QtGui.QFrame.Sunken) self.line.setObjectName(_fromUtf8("line")) self.List.addWidget(self.line) self.listItem_11 = QtGui.QWidget(self.centralwidget) self.listItem_11.setMinimumSize(QtCore.QSize(0, 0)) self.listItem_11.setMaximumSize(QtCore.QSize(10000, 100)) self.listItem_11.setObjectName(_fromUtf8("listItem_11")) self.horizontalLayout_12 = QtGui.QHBoxLayout(self.listItem_11) self.horizontalLayout_12.setObjectName(_fromUtf8("horizontalLayout_12")) self.nameLabel_23 = QtGui.QLabel(self.listItem_11) self.nameLabel_23.setMinimumSize(QtCore.QSize(125, 0)) self.nameLabel_23.setMaximumSize(QtCore.QSize(125, 16777215)) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.nameLabel_23.setFont(font) self.nameLabel_23.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.nameLabel_23.setObjectName(_fromUtf8("nameLabel_23")) self.horizontalLayout_12.addWidget(self.nameLabel_23) self.nameLabel_24 = QtGui.QLabel(self.listItem_11) self.nameLabel_24.setMinimumSize(QtCore.QSize(100, 0)) self.nameLabel_24.setMaximumSize(QtCore.QSize(100, 16777215)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.nameLabel_24.setFont(font) self.nameLabel_24.setAlignment(QtCore.Qt.AlignCenter) self.nameLabel_24.setObjectName(_fromUtf8("nameLabel_24")) self.horizontalLayout_12.addWidget(self.nameLabel_24) self.mediaEnvernizamento = QtGui.QLineEdit(self.listItem_11) self.mediaEnvernizamento.setMinimumSize(QtCore.QSize(50, 25)) self.mediaEnvernizamento.setMaximumSize(QtCore.QSize(50, 25)) self.mediaEnvernizamento.setText(_fromUtf8("")) self.mediaEnvernizamento.setObjectName(_fromUtf8("mediaEnvernizamento")) self.horizontalLayout_12.addWidget(self.mediaEnvernizamento) self.nameLabel_25 = QtGui.QLabel(self.listItem_11) self.nameLabel_25.setMinimumSize(QtCore.QSize(100, 0)) self.nameLabel_25.setMaximumSize(QtCore.QSize(100, 16777215)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.nameLabel_25.setFont(font) self.nameLabel_25.setAlignment(QtCore.Qt.AlignCenter) self.nameLabel_25.setObjectName(_fromUtf8("nameLabel_25")) self.horizontalLayout_12.addWidget(self.nameLabel_25) self.desvioEnvernizamento = QtGui.QLineEdit(self.listItem_11) self.desvioEnvernizamento.setMinimumSize(QtCore.QSize(50, 25)) self.desvioEnvernizamento.setMaximumSize(QtCore.QSize(50, 25)) self.desvioEnvernizamento.setText(_fromUtf8("")) self.desvioEnvernizamento.setObjectName(_fromUtf8("desvioEnvernizamento")) self.horizontalLayout_12.addWidget(self.desvioEnvernizamento) self.nameLabel_26 = QtGui.QLabel(self.listItem_11) self.nameLabel_26.setMinimumSize(QtCore.QSize(100, 0)) self.nameLabel_26.setMaximumSize(QtCore.QSize(100, 16777215)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.nameLabel_26.setFont(font) self.nameLabel_26.setAlignment(QtCore.Qt.AlignCenter) self.nameLabel_26.setObjectName(_fromUtf8("nameLabel_26")) self.horizontalLayout_12.addWidget(self.nameLabel_26) self.nMaquinasEnvernizamento = QtGui.QLineEdit(self.listItem_11) self.nMaquinasEnvernizamento.setMinimumSize(QtCore.QSize(50, 25)) self.nMaquinasEnvernizamento.setMaximumSize(QtCore.QSize(50, 25)) self.nMaquinasEnvernizamento.setText(_fromUtf8("")) self.nMaquinasEnvernizamento.setObjectName(_fromUtf8("nMaquinasEnvernizamento")) self.horizontalLayout_12.addWidget(self.nMaquinasEnvernizamento) spacerItem6 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_12.addItem(spacerItem6) self.List.addWidget(self.listItem_11) self.verticalLayout_4.addLayout(self.List) self.footer = QtGui.QWidget(self.centralwidget) self.footer.setMaximumSize(QtCore.QSize(100000, 50)) self.footer.setObjectName(_fromUtf8("footer")) self.horizontalLayout = QtGui.QHBoxLayout(self.footer) self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout")) self.nameLabel_30 = QtGui.QLabel(self.footer) self.nameLabel_30.setMinimumSize(QtCore.QSize(130, 0)) self.nameLabel_30.setMaximumSize(QtCore.QSize(130, 16777215)) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.nameLabel_30.setFont(font) self.nameLabel_30.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.nameLabel_30.setObjectName(_fromUtf8("nameLabel_30")) self.horizontalLayout.addWidget(self.nameLabel_30) self.tipoLimite = QtGui.QComboBox(self.footer) self.tipoLimite.setMinimumSize(QtCore.QSize(125, 0)) self.tipoLimite.setMaximumSize(QtCore.QSize(125, 16777215)) self.tipoLimite.setObjectName(_fromUtf8("tipoLimite")) self.tipoLimite.addItem(_fromUtf8("")) self.tipoLimite.addItem(_fromUtf8("")) self.horizontalLayout.addWidget(self.tipoLimite) self.nameLabel_28 = QtGui.QLabel(self.footer) self.nameLabel_28.setMinimumSize(QtCore.QSize(50, 0)) self.nameLabel_28.setMaximumSize(QtCore.QSize(50, 16777215)) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.nameLabel_28.setFont(font) self.nameLabel_28.setAlignment(QtCore.Qt.AlignCenter) self.nameLabel_28.setObjectName(_fromUtf8("nameLabel_28")) self.horizontalLayout.addWidget(self.nameLabel_28) self.valorLimite = QtGui.QLineEdit(self.footer) self.valorLimite.setMinimumSize(QtCore.QSize(75, 25)) self.valorLimite.setMaximumSize(QtCore.QSize(75, 25)) self.valorLimite.setText(_fromUtf8("")) self.valorLimite.setObjectName(_fromUtf8("valorLimite")) self.horizontalLayout.addWidget(self.valorLimite) spacerItem7 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem7) self.nameLabel_29 = QtGui.QLabel(self.footer) self.nameLabel_29.setMinimumSize(QtCore.QSize(100, 0)) self.nameLabel_29.setMaximumSize(QtCore.QSize(100, 16777215)) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.nameLabel_29.setFont(font) self.nameLabel_29.setAlignment(QtCore.Qt.AlignCenter) self.nameLabel_29.setObjectName(_fromUtf8("nameLabel_29")) self.horizontalLayout.addWidget(self.nameLabel_29) self.nRepeticoes = QtGui.QLineEdit(self.footer) self.nRepeticoes.setMinimumSize(QtCore.QSize(50, 25)) self.nRepeticoes.setMaximumSize(QtCore.QSize(50, 25)) self.nRepeticoes.setText(_fromUtf8("")) self.nRepeticoes.setObjectName(_fromUtf8("nRepeticoes")) self.horizontalLayout.addWidget(self.nRepeticoes) self.botaoSimular = QtGui.QPushButton(self.footer) self.botaoSimular.setMinimumSize(QtCore.QSize(100, 25)) self.botaoSimular.setMaximumSize(QtCore.QSize(100, 25)) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.botaoSimular.setFont(font) self.botaoSimular.setLayoutDirection(QtCore.Qt.RightToLeft) self.botaoSimular.setAutoFillBackground(False) self.botaoSimular.setStyleSheet(_fromUtf8("")) self.botaoSimular.setFlat(False) self.botaoSimular.setObjectName(_fromUtf8("botaoSimular")) self.horizontalLayout.addWidget(self.botaoSimular) self.verticalLayout_4.addWidget(self.footer) MainWindow.setCentralWidget(self.centralwidget) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): MainWindow.setWindowTitle(_translate("MainWindow", "Descriçao da simulaçao", None)) self.nameLabel_3.setText(_translate("MainWindow", "Peças grandes (A)", None)) self.nameLabel_27.setText(_translate("MainWindow", "Media chegada", None)) self.nameLabel_7.setText(_translate("MainWindow", "Perfuraçao", None)) self.nameLabel_8.setText(_translate("MainWindow", "Media", None)) self.nameLabel_9.setText(_translate("MainWindow", "Desvio padrao", None)) self.nameLabel_10.setText(_translate("MainWindow", "Nº maquinas", None)) self.nameLabel_11.setText(_translate("MainWindow", "Polimento", None)) self.nameLabel_12.setText(_translate("MainWindow", "Media", None)) self.nameLabel_13.setText(_translate("MainWindow", "Desvio padrao", None)) self.nameLabel_14.setText(_translate("MainWindow", "Nº maquinas", None)) self.nameLabel_4.setText(_translate("MainWindow", "Peças grandes (B)", None)) self.nameLabel_31.setText(_translate("MainWindow", "Media chegada", None)) self.nameLabel_36.setText(_translate("MainWindow", "Perfuraçao", None)) self.nameLabel_37.setText(_translate("MainWindow", "Media", None)) self.nameLabel_38.setText(_translate("MainWindow", "Desvio padrao", None)) self.nameLabel_39.setText(_translate("MainWindow", "Nº maquinas", None)) self.nameLabel_19.setText(_translate("MainWindow", "Polimento", None)) self.nameLabel_20.setText(_translate("MainWindow", "Media", None)) self.nameLabel_21.setText(_translate("MainWindow", "Desvio padrao", None)) self.nameLabel_22.setText(_translate("MainWindow", "Nº maquinas", None)) self.nameLabel_23.setText(_translate("MainWindow", "Envernizamento", None)) self.nameLabel_24.setText(_translate("MainWindow", "Media", None)) self.nameLabel_25.setText(_translate("MainWindow", "Desvio padrao", None)) self.nameLabel_26.setText(_translate("MainWindow", "Nº maquinas", None)) self.nameLabel_30.setText(_translate("MainWindow", "Limites da simulacao", None)) self.tipoLimite.setItemText(0, _translate("MainWindow", "Tempo simulacao", None)) self.tipoLimite.setItemText(1, _translate("MainWindow", "Nº Clientes", None)) self.nameLabel_28.setText(_translate("MainWindow", "Valor", None)) self.nameLabel_29.setText(_translate("MainWindow", "Nº Repeticoes", None)) self.botaoSimular.setText(_translate("MainWindow", "Simular", None))
mit
-5,630,341,833,755,718,000
55.576241
105
0.701934
false
3.376614
false
false
false
toinbis/369old
src/web369/conf/base.py
1
2325
from pkg_resources import resource_filename DEBUG = False TEMPLATE_DEBUG = DEBUG ADMINS = ( # ('Your Name', '[email protected]'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', 'NAME': 'web369', 'USER': 'root', 'PASSWORD': '', } } TIME_ZONE = 'Europe/Vilnius' LANGUAGE_CODE = 'lt' SITE_ID = 1 USE_I18N = True USE_L10N = True STATIC_URL = '/static/' STATIC_ROOT = resource_filename('web369', '../../var/htdocs/static') STATICFILES_DIRS = ( resource_filename('web369', 'static'), ) MEDIA_URL = '/media/' MEDIA_ROOT = resource_filename('web369', '../../var/htdocs/media') ADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/' SECRET_KEY = 'SBX*YTL!cANetM&uFTf6R5Je(@PX3!rtgo)kgwNT' TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.locale.LocaleMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ) ROOT_URLCONF = 'web369.urls.default' TEMPLATE_DIRS = ( resource_filename('web369', 'templates'), ) TEMPLATE_CONTEXT_PROCESSORS = ( 'django.contrib.auth.context_processors.auth', # 'django.core.context_processors.debug', 'django.core.context_processors.i18n', 'django.core.context_processors.media', 'django.core.context_processors.static', 'django.core.context_processors.request', ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.sitemaps', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.admin', # 'south', 'web369', ) CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache', 'LOCATION': '/tmp/django_cache', 'TIMEOUT': 60, 'OPTIONS': { 'MAX_ENTRIES': 1000 } } } # Word count will be updated when new documents are scrapped: LIVE_WORD_COUNT = True
bsd-3-clause
-2,580,828,059,716,364,300
23.734043
73
0.667097
false
3.31669
false
true
false
smallyear/linuxLearn
salt/salt/client/ssh/state.py
1
6047
# -*- coding: utf-8 -*- ''' Create ssh executor system ''' from __future__ import absolute_import # Import python libs import os import tarfile import tempfile import json import shutil from contextlib import closing # Import salt libs import salt.client.ssh.shell import salt.client.ssh import salt.utils import salt.utils.thin import salt.utils.url import salt.roster import salt.state import salt.loader import salt.minion class SSHState(salt.state.State): ''' Create a State object which wraps the SSH functions for state operations ''' def __init__(self, opts, pillar=None, wrapper=None): self.wrapper = wrapper super(SSHState, self).__init__(opts, pillar) def load_modules(self, data=None, proxy=None): ''' Load up the modules for remote compilation via ssh ''' self.functions = self.wrapper self.utils = salt.loader.utils(self.opts) locals_ = salt.loader.minion_mods(self.opts, utils=self.utils) self.states = salt.loader.states(self.opts, locals_, self.utils) self.rend = salt.loader.render(self.opts, self.functions) def check_refresh(self, data, ret): ''' Stub out check_refresh ''' return def module_refresh(self): ''' Module refresh is not needed, stub it out ''' return class SSHHighState(salt.state.BaseHighState): ''' Used to compile the highstate on the master ''' stack = [] def __init__(self, opts, pillar=None, wrapper=None, fsclient=None): self.client = fsclient salt.state.BaseHighState.__init__(self, opts) self.state = SSHState(opts, pillar, wrapper) self.matcher = salt.minion.Matcher(self.opts) def load_dynamic(self, matches): ''' Stub out load_dynamic ''' return def lowstate_file_refs(chunks, extras=''): ''' Create a list of file ref objects to reconcile ''' refs = {} for chunk in chunks: if not isinstance(chunk, dict): continue saltenv = 'base' crefs = [] for state in chunk: if state == '__env__': saltenv = chunk[state] elif state.startswith('__'): continue crefs.extend(salt_refs(chunk[state])) if crefs: if saltenv not in refs: refs[saltenv] = [] refs[saltenv].append(crefs) if extras: extra_refs = extras.split(',') if extra_refs: for env in refs: for x in extra_refs: refs[env].append([x]) return refs def salt_refs(data, ret=None): ''' Pull salt file references out of the states ''' proto = 'salt://' if ret is None: ret = [] if isinstance(data, str): if data.startswith(proto) and data not in ret: ret.append(data) if isinstance(data, list): for comp in data: salt_refs(comp, ret) if isinstance(data, dict): for comp in data: salt_refs(data[comp], ret) return ret def prep_trans_tar(file_client, chunks, file_refs, pillar=None, id_=None): ''' Generate the execution package from the saltenv file refs and a low state data structure ''' gendir = tempfile.mkdtemp() trans_tar = salt.utils.mkstemp() lowfn = os.path.join(gendir, 'lowstate.json') pillarfn = os.path.join(gendir, 'pillar.json') sync_refs = [ [salt.utils.url.create('_modules')], [salt.utils.url.create('_states')], [salt.utils.url.create('_grains')], [salt.utils.url.create('_renderers')], [salt.utils.url.create('_returners')], [salt.utils.url.create('_output')], [salt.utils.url.create('_utils')], ] with salt.utils.fopen(lowfn, 'w+') as fp_: fp_.write(json.dumps(chunks)) if pillar: with salt.utils.fopen(pillarfn, 'w+') as fp_: fp_.write(json.dumps(pillar)) cachedir = os.path.join('salt-ssh', id_) for saltenv in file_refs: file_refs[saltenv].extend(sync_refs) env_root = os.path.join(gendir, saltenv) if not os.path.isdir(env_root): os.makedirs(env_root) for ref in file_refs[saltenv]: for name in ref: short = salt.utils.url.parse(name)[0] path = file_client.cache_file(name, saltenv, cachedir=cachedir) if path: tgt = os.path.join(env_root, short) tgt_dir = os.path.dirname(tgt) if not os.path.isdir(tgt_dir): os.makedirs(tgt_dir) shutil.copy(path, tgt) continue files = file_client.cache_dir(name, saltenv, cachedir=cachedir) if files: for filename in files: fn = filename[filename.find(short) + len(short):] if fn.startswith('/'): fn = fn.strip('/') tgt = os.path.join( env_root, short, fn, ) tgt_dir = os.path.dirname(tgt) if not os.path.isdir(tgt_dir): os.makedirs(tgt_dir) shutil.copy(filename, tgt) continue try: # cwd may not exist if it was removed but salt was run from it cwd = os.getcwd() except OSError: cwd = None os.chdir(gendir) with closing(tarfile.open(trans_tar, 'w:gz')) as tfp: for root, dirs, files in os.walk(gendir): for name in files: full = os.path.join(root, name) tfp.add(full[len(gendir):].lstrip(os.sep)) if cwd: os.chdir(cwd) shutil.rmtree(gendir) return trans_tar
apache-2.0
7,802,137,446,918,748,000
30.331606
79
0.539937
false
3.952288
false
false
false
rapidpro/chatpro
chatpro/rooms/models.py
1
2494
from __future__ import absolute_import, unicode_literals from chatpro.profiles.tasks import sync_org_contacts from dash.orgs.models import Org from django.contrib.auth.models import User from django.db import models from django.utils.translation import ugettext_lazy as _ class Room(models.Model): """ Corresponds to a RapidPro contact group """ uuid = models.CharField(max_length=36, unique=True) org = models.ForeignKey(Org, verbose_name=_("Organization"), related_name='rooms') name = models.CharField(verbose_name=_("Name"), max_length=128, blank=True, help_text=_("Name of this room")) users = models.ManyToManyField(User, verbose_name=_("Users"), related_name='rooms', help_text=_("Users who can chat in this room")) managers = models.ManyToManyField(User, verbose_name=_("Managers"), related_name='manage_rooms', help_text=_("Users who can manage contacts in this room")) is_active = models.BooleanField(default=True, help_text="Whether this room is active") @classmethod def create(cls, org, name, uuid): return cls.objects.create(org=org, name=name, uuid=uuid) @classmethod def get_all(cls, org): return cls.objects.filter(org=org, is_active=True) @classmethod def update_room_groups(cls, org, group_uuids): """ Updates an org's chat rooms based on the selected groups UUIDs """ # de-activate rooms not included org.rooms.exclude(uuid__in=group_uuids).update(is_active=False) # fetch group details groups = org.get_temba_client().get_groups() group_names = {group.uuid: group.name for group in groups} for group_uuid in group_uuids: existing = org.rooms.filter(uuid=group_uuid).first() if existing: existing.name = group_names[group_uuid] existing.is_active = True existing.save() else: cls.create(org, group_names[group_uuid], group_uuid) sync_org_contacts.delay(org.id) def get_contacts(self): return self.contacts.filter(is_active=True) def get_users(self): return self.users.filter(is_active=True).select_related('profile') def get_managers(self): return self.managers.filter(is_active=True).select_related('profile') def __unicode__(self): return self.name
bsd-3-clause
-7,098,435,995,584,484,000
34.628571
100
0.631917
false
4.048701
false
false
false
hkemmel/tal
affichage.py
1
2209
# -*- coding: utf-8 -*- """ Created on Tue Apr 25 14:34:25 2017 @author: manfred.madelaine """ import time def affStart(): msg1 = "*** Binvenue dans i-Opinion ou Opinion Way ***" msg2 = "Le logiciel d'analyse et de classification des revues cinématographiques !" listMsg = [] listMsg.append("") listMsg.append(msg1) listMsg.append("") listMsg.append(msg2) listMsg.append("") print(affBox(listMsg, 1, 1, len(msg2))) delai() def affEnd(): msg1 = "*** Opinion Way vous remercie de votre viste, à bientôt ! ***" msg = [] msg.append(msg1) box = affBox(msg, 1, 1, len(msg1)-1) print(box) def affMessage(msg): deb = "\n\t--- " fin = " ---\n\n" print(deb + msg + fin) delai() def delai(): time.sleep(0.8) """ Affiche un message dans une boite msg : message à afficher x : décalage horizontal y : décalage vertical L : largeur de la boite """ def affBox(msg, x, y, L): box = "" #décalage vertical box += multChaine("\n", y) indiceLine = 0 #gestion d'une ligne for txt in msg: #bord suppérieur if(indiceLine == 0): #décalage horizontal box += "\n" + multChaine("\t", x) box += multChaine("-", L+3) #décalage horizontal box += "\n" + multChaine("\t", x) esp = "" mult = 1 #message if(len(txt) < L ): esp = " " mult = (L - len(txt)) / 2 box += "| " + multChaine(esp, mult) + txt + multChaine(esp, mult) + " |" #bord inférieur if(indiceLine == len(msg) - 1 ): #décalage horizontal box += "\n" + multChaine("\t", x) box += multChaine("-", L+3) indiceLine += 1 box+="\n" return(box) def affErr(): affMessage("Votre réponse est incorrecte !") def multChaine(chaine, mult): i = 0 msg = "" while i < mult: msg += chaine i += 1 return msg
gpl-3.0
181,424,775,493,336,640
19.342593
87
0.474954
false
3.132668
false
false
false
LuizGsa21/p4-conference-central
models.py
1
7226
#!/usr/bin/env python """models.py Udacity conference server-side Python App Engine data & ProtoRPC models $Id: models.py,v 1.1 2014/05/24 22:01:10 wesc Exp $ created/forked from conferences.py by wesc on 2014 may 24 """ __author__ = '[email protected] (Wesley Chun)' import httplib import endpoints from protorpc import messages from google.appengine.ext import ndb import datetime class ConflictException(endpoints.ServiceException): """ConflictException -- exception mapped to HTTP 409 response""" http_status = httplib.CONFLICT class StringMessage(messages.Message): """StringMessage-- outbound (single) string message""" data = messages.StringField(1, required=True) class BooleanMessage(messages.Message): """BooleanMessage-- outbound Boolean value message""" data = messages.BooleanField(1) class TeeShirtSize(messages.Enum): """TeeShirtSize -- t-shirt size enumeration value""" NOT_SPECIFIED = 1 XS_M = 2 XS_W = 3 S_M = 4 S_W = 5 M_M = 6 M_W = 7 L_M = 8 L_W = 9 XL_M = 10 XL_W = 11 XXL_M = 12 XXL_W = 13 XXXL_M = 14 XXXL_W = 15 class Profile(ndb.Model): """Profile -- User profile object""" displayName = ndb.StringProperty(default='') mainEmail = ndb.StringProperty() teeShirtSize = ndb.StringProperty(default='NOT_SPECIFIED') conferenceKeysToAttend = ndb.KeyProperty(kind='Conference', repeated=True) wishList = ndb.KeyProperty(kind='Session', repeated=True) def toForm(self): form = ProfileForm( displayName=self.displayName, mainEmail=self.mainEmail, teeShirtSize=getattr(TeeShirtSize, self.teeShirtSize), conferenceKeysToAttend=[key.urlsafe() for key in self.conferenceKeysToAttend] ) form.check_initialized() return form def toMiniForm(self): form = ProfileMiniForm( displayName=self.displayName, teeShirtSize=getattr(TeeShirtSize, self.teeShirtSize) ) form.check_initialized() return form class ProfileMiniForm(messages.Message): """ProfileMiniForm -- update Profile form message""" displayName = messages.StringField(1) teeShirtSize = messages.EnumField('TeeShirtSize', 2) class ProfileForm(messages.Message): """ProfileForm -- Profile outbound form message""" displayName = messages.StringField(1) mainEmail = messages.StringField(2) teeShirtSize = messages.EnumField('TeeShirtSize', 3) conferenceKeysToAttend = messages.StringField(4, repeated=True) class Conference(ndb.Model): """Conference -- Conference object""" required_fields_schema = ('name', 'organizerUserId', 'startDate', 'endDate') name = ndb.StringProperty(required=True) description = ndb.StringProperty() organizerUserId = ndb.StringProperty(required=True) topics = ndb.StringProperty(repeated=True) city = ndb.StringProperty() startDate = ndb.DateProperty(required=True) month = ndb.IntegerProperty() endDate = ndb.DateProperty(required=True) maxAttendees = ndb.IntegerProperty() seatsAvailable = ndb.IntegerProperty() @property def sessions(self): return Session.query(ancestor=self.key) def toForm(self, display_name=''): form = ConferenceForm( websafeKey=self.key.urlsafe(), name=self.name, description=self.description, organizerUserId=self.organizerUserId, topics=self.topics, city=self.city, startDate=self.startDate.strftime('%Y-%m-%d'), month=self.month, endDate=self.endDate.strftime('%Y-%m-%d'), maxAttendees=self.maxAttendees, seatsAvailable=self.seatsAvailable, organizerDisplayName=display_name ) form.check_initialized() return form class ConferenceForm(messages.Message): """ConferenceForm -- Conference outbound form message""" name = messages.StringField(1) description = messages.StringField(2) organizerUserId = messages.StringField(3) topics = messages.StringField(4, repeated=True) city = messages.StringField(5) startDate = messages.StringField(6) # DateTimeField() month = messages.IntegerField(7) maxAttendees = messages.IntegerField(8) seatsAvailable = messages.IntegerField(9) endDate = messages.StringField(10) # DateTimeField() websafeKey = messages.StringField(11) organizerDisplayName = messages.StringField(12) class ConferenceForms(messages.Message): """ConferenceForms -- multiple Conference outbound form message""" items = messages.MessageField(ConferenceForm, 1, repeated=True) class ConferenceQueryForm(messages.Message): """ConferenceQueryForm -- Conference query inbound form message""" field = messages.StringField(1) operator = messages.StringField(2) value = messages.StringField(3) class ConferenceQueryForms(messages.Message): """ConferenceQueryForms -- multiple ConferenceQueryForm inbound form message""" filters = messages.MessageField(ConferenceQueryForm, 1, repeated=True) class Speaker(ndb.Model): """Speaker -- Speaker object""" name = ndb.StringProperty(required=True) class Session(ndb.Model): """Session -- Session object""" required_fields_schema = ('name', 'speaker', 'duration', 'typeOfSession', 'date', 'startTime') name = ndb.StringProperty(required=True) highlights = ndb.StringProperty() speaker = ndb.StructuredProperty(modelclass=Speaker, required=True) duration = ndb.IntegerProperty(required=True) typeOfSession = ndb.StringProperty(required=True) date = ndb.DateProperty(required=True) startTime = ndb.TimeProperty(required=True) def toForm(self): form = SessionForm( websafeKey=self.key.urlsafe(), name=self.name, highlights=self.highlights, speaker=self.speaker.name, duration=self.duration, typeOfSession=self.typeOfSession, date=self.date.strftime('%Y-%m-%d'), startTime=self.startTime.strftime('%H:%M') ) form.check_initialized() return form class SessionForm(messages.Message): """SessionForm -- Session outbound form message""" websafeKey = messages.StringField(1) name = messages.StringField(2) highlights = messages.StringField(3) speaker = messages.StringField(4) duration = messages.IntegerField(5) typeOfSession = messages.StringField(6) date = messages.StringField(7) startTime = messages.StringField(8) class SessionForms(messages.Message): """SessionForm -- multiple SessionForm outbound form message""" items = messages.MessageField(SessionForm, 1, repeated=True) class SessionQueryForm(messages.Message): """SessionQueryForm -- Session query inbound form message""" field = messages.StringField(1) operator = messages.StringField(2) value = messages.StringField(3) class SessionQueryForms(messages.Message): """SessionQueryForms -- multiple SessionQueryForm inbound form message""" filters = messages.MessageField(SessionQueryForm, 1, repeated=True)
apache-2.0
4,306,855,950,322,396,700
31.696833
98
0.687517
false
3.887036
false
false
false
PyBossa/pybossa
pybossa/default_settings.py
1
4813
# -*- coding: utf8 -*- # This file is part of PYBOSSA. # # Copyright (C) 2015 Scifabric LTD. # # PYBOSSA is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # PYBOSSA is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>. DEBUG = False # webserver host and port HOST = '0.0.0.0' PORT = 5000 SECRET = 'foobar' SECRET_KEY = 'my-session-secret' ITSDANGEROUSKEY = 'its-dangerous-key' ## project configuration BRAND = 'PYBOSSA' TITLE = 'PYBOSSA' COPYRIGHT = 'Set Your Institution' DESCRIPTION = 'Set the description in your config' TERMSOFUSE = 'http://okfn.org/terms-of-use/' DATAUSE = 'http://opendatacommons.org/licenses/by/' LOGO = '' DEFAULT_LOCALE = 'en' LOCALES = [('en', 'English'), ('es', u'Español'), ('it', 'Italiano'), ('fr', u'Français'), ('ja', u'日本語'), ('el', u'ελληνικά')] ## Default THEME THEME = 'default' ## Default number of apps per page APPS_PER_PAGE = 20 ## Default allowed extensions ALLOWED_EXTENSIONS = ['js', 'css', 'png', 'jpg', 'jpeg', 'gif', 'zip'] UPLOAD_METHOD = 'local' ## Default number of users shown in the leaderboard LEADERBOARD = 20 ## Default configuration for debug toolbar ENABLE_DEBUG_TOOLBAR = False # Cache default key prefix REDIS_SENTINEL = [('localhost', 26379)] REDIS_MASTER = 'mymaster' REDIS_DB = 0 REDIS_KEYPREFIX = 'pybossa_cache' ## Default cache timeouts # Project cache AVATAR_TIMEOUT = 30 * 24 * 60 * 60 APP_TIMEOUT = 15 * 60 REGISTERED_USERS_TIMEOUT = 15 * 60 ANON_USERS_TIMEOUT = 5 * 60 * 60 STATS_FRONTPAGE_TIMEOUT = APP_TIMEOUT STATS_APP_TIMEOUT = 12 * 60 * 60 STATS_DRAFT_TIMEOUT = 24 * 60 * 60 N_APPS_PER_CATEGORY_TIMEOUT = 60 * 60 BROWSE_TASKS_TIMEOUT = 3 * 60 * 60 # Category cache CATEGORY_TIMEOUT = 24 * 60 * 60 # User cache USER_TIMEOUT = 15 * 60 USER_TOP_TIMEOUT = 24 * 60 * 60 USER_TOTAL_TIMEOUT = 24 * 60 * 60 # Project Presenters PRESENTERS = ["basic", "image", "sound", "video", "map", "pdf"] # Default Google Docs spreadsheet template tasks URLs TEMPLATE_TASKS = { 'image': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdHFEN29mZUF0czJWMUhIejF6dWZXdkE&usp=sharing", 'sound': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdEczcWduOXRUb1JUc1VGMmJtc2xXaXc&usp=sharing", 'video': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdGZ2UGhxSTJjQl9YNVhfUVhGRUdoRWc&usp=sharing", 'map': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdGZnbjdwcnhKRVNlN1dGXy0tTnNWWXc&usp=sharing", 'pdf': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdEVVamc0R0hrcjlGdXRaUXlqRXlJMEE&usp=sharing"} # Rate limits default values LIMIT = 300 PER = 15 * 60 # Expiration time for password protected project cookies PASSWD_COOKIE_TIMEOUT = 60 * 30 # Expiration time for account confirmation / password recovery links ACCOUNT_LINK_EXPIRATION = 5 * 60 * 60 # Rate limits default values LIMIT = 300 PER = 15 * 60 # Disable new account confirmation (via email) ACCOUNT_CONFIRMATION_DISABLED = True # Send emails weekly update every WEEKLY_UPDATE_STATS = 'Sunday' # Enable Server Sent Events SSE = False # Pro user features. False will make the feature available to all regular users, # while True will make it available only to pro users PRO_FEATURES = { 'auditlog': True, 'webhooks': True, 'updated_exports': True, 'notify_blog_updates': True, 'project_weekly_report': True, 'autoimporter': True, 'better_stats': True } CORS_RESOURCES = {r"/api/*": {"origins": "*", "allow_headers": ['Content-Type', 'Authorization'], "max_age": 21600 }} FAILED_JOBS_RETRIES = 3 FAILED_JOBS_MAILS = 7 FULLTEXTSEARCH_LANGUAGE = 'english' STRICT_SLASHES = True # Background jobs default time outs MINUTE = 60 TIMEOUT = 10 * MINUTE # OneSignal GCM Sender ID # DO NOT MODIFY THIS GCM_SENDER_ID = "482941778795" # Unpublish inactive projects UNPUBLISH_PROJECTS = True # TTL for ZIP files of personal data TTL_ZIP_SEC_FILES = 3 # Default cryptopan key CRYPTOPAN_KEY = '32-char-str-for-AES-key-and-pad.' # Instruct PYBOSSA to generate absolute paths or not for avatars AVATAR_ABSOLUTE = True # Spam accounts to avoid SPAM = []
agpl-3.0
167,789,960,090,039,200
28.429448
116
0.689806
false
3.051527
false
false
false
Naoto-Imamachi/MIRAGE
scripts/module/preparation/phastcons_score_list.py
1
3683
#!usr/bin/env python import sys import re import shelve from parameter.common_parameters import common_parameters import utils.setting_utils as utils utils.now_time("phastcons_score_list script starting...") p = utils.Bunch(common_parameters) def main(): utils.now_time("Input_file: " + p.phastcons_score_list_db_input) utils.now_time("Reference_file: " + p.phastcons_score_list_reference) utils.now_time("Output_file: " + p.phastcons_score_list_db_output) output_merge = p.phastcons_score_list_db_output + 'phastCons46way_Refseq_for_MIRAGE_CDS.db' #'phastCons46way_miRBase_v21_hg38Tohg19_for_MIRAGE.db' output_merge_shelve = shelve.open(output_merge) #for x in ['chr21']: for x in ['chr1','chr2','chr3','chr4','chr5','chr6','chr7','chr8','chr9','chr10','chr11','chr12','chr13','chr14','chr15','chr16','chr17','chr18','chr19','chr20','chr21','chr22','chrX','chrY','chrM']: ref_s = p.phastcons_score_list_reference #mirBase, Refseq etc... ref_file = open(ref_s,'r') input_s = p.phastcons_score_list_db_input + x + '.phastCons46way_Refseq_CDS.db' #'.phastCons46way_miRBase_v21_hg38Tohg19.db' output_s = p.phastcons_score_list_db_output + x + '.phastCons46way_Refseq_for_MIRAGE_CDS.db' #'.phastCons46way_miRBase_v21_hg38Tohg19_for_MIRAGE.db' input_shelve = shelve.open(input_s) output_shelve = shelve.open(output_s) score_list_dict = {} for line in ref_file: line = line.rstrip() data = line.split("\t") chrom = data[0] if not chrom == x: continue strand = data[5] if len(data) >= 12: #12bed format exon_block = data[10].split(',') exon_block.pop() #Remove the last item '' exon_st = data[11].split(',') exon_st.pop() #Remove the last item '' name = data[3] score_list_dict[name] = [] for y in range(len(exon_block)): st = int(data[1]) + int(exon_st[y]) ed = int(data[1]) + int(exon_st[y]) + int(exon_block[y]) length = ed - st for z in range(length): score = input_shelve[str(st)] score_list_dict[name].append(score) st += 1 if strand == '-': rev_score = score_list_dict[name][::-1] score_list_dict[name] = rev_score elif len(data) >= 3: #6bed format st = int(data[1]) ed = int(data[2]) length = ed - st name = data[3] score_list_dict[name] = [] for z in range(length): score = input_shelve[str(st)] score_list_dict[name].append(score) st += 1 if strand == '-': rev_score = score_list_dict[name][::-1] score_list_dict[name] = rev_score else: print('ERROR: Your BED format file have less than three column.') print ('BED format file need to have at least three column [chr, st, ed]...') sys.exit(1) output_shelve.update(score_list_dict) output_merge_shelve.update(score_list_dict) input_shelve.close() output_shelve.close() utils.now_time("phastcons_score_list script was successfully finished!!") output_merge_shelve.close() if __name__ == '__main__': main()
mit
-2,538,876,580,107,515,400
41.329412
203
0.524572
false
3.435634
false
false
false
DerekK88/PICwriter
picwriter/components/stripslotconverter.py
1
9317
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals import numpy as np import gdspy import picwriter.toolkit as tk class StripSlotConverter(tk.Component): """Strip-to-Slot Side Converter Cell class. Adiabatically transforms a strip to a slot waveguide mode, with two sections. Section 1 introduces a narrow waveguide alongside the input strip waveguide and gradually lowers the gap between the strip waveguide and narrow side waveguide. Section 2 gradually converts the widths of the two waveguides until they are equal to the slot rail widths. Args: * **wgt_input** (WaveguideTemplate): WaveguideTemplate object for the input waveguide (should be either of type `strip` or `slot`). * **wgt_output** (WaveguideTemplate): WaveguideTemplate object for the output waveguide (should be either of type `strip` or `slot`, opposite of the input type). * **length1** (float): Length of section 1 that gradually changes the distance between the two waveguides. * **length2** (float): Length of section 2 that gradually changes the widths of the two waveguides until equal to the slot waveguide rail widths. * **start_rail_width** (float): Width of the narrow waveguide appearing next to the strip waveguide. * **end_strip_width** (float): Width of the strip waveguide at the end of `length1` and before `length2` * **d** (float): Distance between the outer edge of the strip waveguide and the start of the slot waveguide rail. Keyword Args: * **input_strip** (Boolean): If `True`, sets the input port to be the strip waveguide side. If `False`, slot waveguide is on the input. Defaults to `None`, in which case the input port waveguide template is used to choose. * **port** (tuple): Cartesian coordinate of the input port. Defaults to (0,0). * **direction** (string): Direction that the component will point *towards*, can be of type `'NORTH'`, `'WEST'`, `'SOUTH'`, `'EAST'`, OR an angle (float, in radians) Members: * **portlist** (dict): Dictionary with the relevant port information Portlist format: * portlist['input'] = {'port': (x1,y1), 'direction': 'dir1'} * portlist['output'] = {'port': (x2, y2), 'direction': 'dir2'} Where in the above (x1,y1) is the same as the 'port' input, (x2, y2) is the end of the taper, and 'dir1', 'dir2' are of type `'NORTH'`, `'WEST'`, `'SOUTH'`, `'EAST'`, *or* an angle in *radians*. 'Direction' points *towards* the waveguide that will connect to it. Note: The waveguide and cladding layer/datatype are taken from the `wgt_slot` by default. """ def __init__( self, wgt_input, wgt_output, length1, length2, start_rail_width, end_strip_width, d, input_strip=None, port=(0, 0), direction="EAST", ): tk.Component.__init__(self, "StripSlotConverter", locals()) self.portlist = {} if (not isinstance(input_strip, bool)) and (input_strip != None): raise ValueError( "Invalid input provided for `input_strip`. Please specify a boolean." ) if input_strip == None: # Auto-detect based on wgt_input self.input_strip = ( wgt_input.wg_type == "strip" or wgt_input.wg_type == "swg" ) else: # User-override self.input_strip = input_strip if self.input_strip: self.wgt_strip = wgt_input self.wgt_slot = wgt_output else: self.wgt_strip = wgt_output self.wgt_slot = wgt_input self.wg_spec = { "layer": wgt_output.wg_layer, "datatype": wgt_output.wg_datatype, } self.clad_spec = { "layer": wgt_output.clad_layer, "datatype": wgt_output.clad_datatype, } self.length1 = length1 self.length2 = length2 self.d = d self.start_rail_width = start_rail_width self.end_strip_width = end_strip_width self.port = port self.direction = direction self.__build_cell() self.__build_ports() """ Translate & rotate the ports corresponding to this specific component object """ self._auto_transform_() def __build_cell(self): # Sequentially build all the geometric shapes using polygons # Add strip waveguide taper for region 1 x0, y0 = (0, 0) pts = [ (x0, y0 - self.wgt_strip.wg_width / 2.0), (x0, y0 + self.wgt_strip.wg_width / 2.0), ( x0 + self.length1, y0 - self.wgt_strip.wg_width / 2.0 + self.end_strip_width, ), (x0 + self.length1, y0 - self.wgt_strip.wg_width / 2.0), ] strip1 = gdspy.Polygon( pts, layer=self.wgt_strip.wg_layer, datatype=self.wgt_strip.wg_datatype ) # Add the thin side waveguide for region 1 pts = [ (x0, y0 + self.wgt_strip.wg_width / 2.0 + self.d), (x0, y0 + self.wgt_strip.wg_width / 2.0 + self.d + self.start_rail_width), ( x0 + self.length1, y0 - self.wgt_strip.wg_width / 2.0 + self.end_strip_width + self.wgt_slot.slot + self.start_rail_width, ), ( x0 + self.length1, y0 - self.wgt_strip.wg_width / 2.0 + self.end_strip_width + self.wgt_slot.slot, ), ] thin_strip = gdspy.Polygon( pts, layer=self.wgt_strip.wg_layer, datatype=self.wgt_strip.wg_datatype ) # Add the bottom rail for region 2 pts = [ ( x0 + self.length1, y0 - self.wgt_strip.wg_width / 2.0 + self.end_strip_width, ), (x0 + self.length1, y0 - self.wgt_strip.wg_width / 2.0), (x0 + self.length1 + self.length2, y0 - self.wgt_slot.wg_width / 2.0), ( x0 + self.length1 + self.length2, y0 - self.wgt_slot.wg_width / 2.0 + self.wgt_slot.rail, ), ] rail1 = gdspy.Polygon( pts, layer=self.wgt_strip.wg_layer, datatype=self.wgt_strip.wg_datatype ) # Add the top rail for region 2 pts = [ ( x0 + self.length1, y0 - self.wgt_strip.wg_width / 2.0 + self.end_strip_width + self.wgt_slot.slot + self.start_rail_width, ), ( x0 + self.length1, y0 - self.wgt_strip.wg_width / 2.0 + self.end_strip_width + self.wgt_slot.slot, ), ( x0 + self.length1 + self.length2, y0 + self.wgt_slot.wg_width / 2.0 - self.wgt_slot.rail, ), (x0 + self.length1 + self.length2, y0 + self.wgt_slot.wg_width / 2.0), ] rail2 = gdspy.Polygon( pts, layer=self.wgt_strip.wg_layer, datatype=self.wgt_strip.wg_datatype ) # Add a cladding polygon pts = [ (x0, y0 + self.wgt_strip.clad_width + self.wgt_strip.wg_width / 2.0), ( x0 + self.length1 + self.length2, y0 + self.wgt_slot.clad_width + self.wgt_slot.wg_width / 2.0, ), ( x0 + self.length1 + self.length2, y0 - self.wgt_slot.clad_width - self.wgt_slot.wg_width / 2.0, ), (x0, y0 - self.wgt_strip.clad_width - self.wgt_strip.wg_width / 2.0), ] clad = gdspy.Polygon( pts, layer=self.wgt_strip.clad_layer, datatype=self.wgt_strip.clad_datatype ) self.add(strip1) self.add(thin_strip) self.add(rail1) self.add(rail2) self.add(clad) def __build_ports(self): # Portlist format: # example: example: {'port':(x_position, y_position), 'direction': 'NORTH'} self.portlist["input"] = {"port": (0, 0), "direction": "WEST"} self.portlist["output"] = { "port": (self.length1 + self.length2, 0), "direction": "EAST", } if __name__ == "__main__": from . import * top = gdspy.Cell("top") wgt_strip = WaveguideTemplate(bend_radius=50, wg_type="strip", wg_width=0.7) wgt_slot = WaveguideTemplate(bend_radius=50, wg_type="slot", wg_width=0.7, slot=0.2) wg1 = Waveguide([(0, 0), (100, 0)], wgt_strip) tk.add(top, wg1) ssc = StripSlotConverter( wgt_strip, wgt_slot, length1=15.0, length2=15.0, start_rail_width=0.1, end_strip_width=0.4, d=1.0, **wg1.portlist["output"] ) tk.add(top, ssc) (x1, y1) = ssc.portlist["output"]["port"] wg2 = Waveguide([(x1, y1), (x1 + 100, y1)], wgt_slot) tk.add(top, wg2) gdspy.LayoutViewer(cells=top) # gdspy.write_gds('StripSlotConverter.gds', unit=1.0e-6, precision=1.0e-9)
mit
7,310,835,208,231,276,000
36.268
396
0.545347
false
3.359899
false
false
false
dzamie/weasyl
weasyl/blocktag.py
1
4024
# blocktag.py from error import PostgresError import define as d import profile import searchtag from libweasyl import ratings from weasyl.cache import region # For blocked tags, `rating` refers to the lowest rating for which that tag is # blocked; for example, (X, Y, 10) would block tag Y for all ratings, whereas # (X, Y, 30) would block tag Y for only adult ratings. def check(userid, submitid=None, charid=None, journalid=None): """ Returns True if the submission, character, or journal contains a search tag that the user has blocked, else False. """ if not userid: return False if submitid: map_table = "searchmapsubmit" content_table = "submission" id_field = "submitid" target = submitid elif charid: map_table = "searchmapchar" content_table = "character" id_field = "charid" target = charid else: map_table = "searchmapjournal" content_table = "journal" id_field = "journalid" target = journalid query = """ SELECT EXISTS ( SELECT 0 FROM {map_table} searchmap INNER JOIN {content_table} content ON searchmap.targetid = content.{id_field} WHERE searchmap.targetid = %(id)s AND content.userid != %(user)s AND searchmap.tagid IN ( SELECT blocktag.tagid FROM blocktag WHERE userid = %(user)s AND blocktag.rating <= content.rating)) AS block """.format(map_table=map_table, content_table=content_table, id_field=id_field) return d.engine.execute(query, id=target, user=userid).first().block def check_list(rating, tags, blocked_tags): return any(rating >= b['rating'] and b['title'] in tags for b in blocked_tags) def suggest(userid, target): if not target: return [] return d.execute("SELECT title FROM searchtag" " WHERE title LIKE '%s%%' AND tagid NOT IN (SELECT tagid FROM blocktag WHERE userid = %i)" " ORDER BY title LIMIT 10", [target, userid], options="within") def select(userid): return [{ "title": i[0], "rating": i[1], } for i in d.execute("SELECT st.title, bt.rating FROM searchtag st " " INNER JOIN blocktag bt ON st.tagid = bt.tagid" " WHERE bt.userid = %i" " ORDER BY st.title", [userid])] @region.cache_on_arguments() @d.record_timing def cached_select(userid): return select(userid) def insert(userid, tagid=None, title=None, rating=None): if rating not in ratings.CODE_MAP: rating = ratings.GENERAL.code profile.check_user_rating_allowed(userid, rating) if tagid: tag = int(tagid) try: d.engine.execute("INSERT INTO blocktag VALUES (%s, %s, %s)", userid, tag, rating) except PostgresError: return elif title: tag_name = d.get_search_tag(title) try: d.engine.execute(""" INSERT INTO blocktag (userid, tagid, rating) VALUES ( %(user)s, (SELECT tagid FROM searchtag WHERE title = %(tag_name)s), %(rating)s ) """, user=userid, tag_name=tag_name, rating=rating) except PostgresError: try: tag = searchtag.create(title) except PostgresError: return d.engine.execute("INSERT INTO blocktag VALUES (%s, %s, %s)", userid, tag, rating) cached_select.invalidate(userid) def remove(userid, tagid=None, title=None): if tagid: d.execute("DELETE FROM blocktag WHERE (userid, tagid) = (%i, %i)", [userid, tagid]) elif title: d.execute("DELETE FROM blocktag WHERE (userid, tagid) = (%i, (SELECT tagid FROM searchtag WHERE title = '%s'))", [userid, d.get_search_tag(title)]) cached_select.invalidate(userid)
apache-2.0
-8,802,983,478,453,803,000
30.193798
120
0.587227
false
3.850718
false
false
false
EndyKaufman/django-postgres-angularjs-blog
app/manager/migrations/0006_properties.py
1
1170
# -*- coding: utf-8 -*- # Generated by Django 1.9 on 2016-04-24 14:05 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('manager', '0005_add_fields_and_set_defaults'), ] operations = [ migrations.CreateModel( name='Properties', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.TextField(max_length=512, unique=True)), ('value', models.TextField(blank=True, null=True)), ('created', models.DateTimeField(auto_now_add=True, null=True, verbose_name='date created')), ('updated', models.DateTimeField(auto_now=True, null=True, verbose_name='date updated')), ('created_user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)), ], ), ]
mit
-7,519,078,480,483,792,000
39.344828
150
0.62906
false
4.020619
false
false
false
bert9bert/statsmodels
statsmodels/tsa/statespace/kalman_filter.py
2
86079
""" State Space Representation and Kalman Filter Author: Chad Fulton License: Simplified-BSD """ from __future__ import division, absolute_import, print_function from warnings import warn import numpy as np from .representation import OptionWrapper, Representation, FrozenRepresentation from .tools import (validate_vector_shape, validate_matrix_shape, reorder_missing_matrix, reorder_missing_vector) from . import tools from statsmodels.tools.sm_exceptions import ValueWarning # Define constants FILTER_CONVENTIONAL = 0x01 # Durbin and Koopman (2012), Chapter 4 FILTER_EXACT_INITIAL = 0x02 # ibid., Chapter 5.6 FILTER_AUGMENTED = 0x04 # ibid., Chapter 5.7 FILTER_SQUARE_ROOT = 0x08 # ibid., Chapter 6.3 FILTER_UNIVARIATE = 0x10 # ibid., Chapter 6.4 FILTER_COLLAPSED = 0x20 # ibid., Chapter 6.5 FILTER_EXTENDED = 0x40 # ibid., Chapter 10.2 FILTER_UNSCENTED = 0x80 # ibid., Chapter 10.3 INVERT_UNIVARIATE = 0x01 SOLVE_LU = 0x02 INVERT_LU = 0x04 SOLVE_CHOLESKY = 0x08 INVERT_CHOLESKY = 0x10 STABILITY_FORCE_SYMMETRY = 0x01 MEMORY_STORE_ALL = 0 MEMORY_NO_FORECAST = 0x01 MEMORY_NO_PREDICTED = 0x02 MEMORY_NO_FILTERED = 0x04 MEMORY_NO_LIKELIHOOD = 0x08 MEMORY_NO_GAIN = 0x10 MEMORY_NO_SMOOTHING = 0x20 MEMORY_NO_STD_FORECAST = 0x40 MEMORY_CONSERVE = ( MEMORY_NO_FORECAST | MEMORY_NO_PREDICTED | MEMORY_NO_FILTERED | MEMORY_NO_LIKELIHOOD | MEMORY_NO_GAIN | MEMORY_NO_SMOOTHING | MEMORY_NO_STD_FORECAST ) TIMING_INIT_PREDICTED = 0 TIMING_INIT_FILTERED = 1 class KalmanFilter(Representation): r""" State space representation of a time series process, with Kalman filter Parameters ---------- k_endog : array_like or integer The observed time-series process :math:`y` if array like or the number of variables in the process if an integer. k_states : int The dimension of the unobserved state process. k_posdef : int, optional The dimension of a guaranteed positive definite covariance matrix describing the shocks in the measurement equation. Must be less than or equal to `k_states`. Default is `k_states`. loglikelihood_burn : int, optional The number of initial periods during which the loglikelihood is not recorded. Default is 0. tolerance : float, optional The tolerance at which the Kalman filter determines convergence to steady-state. Default is 1e-19. results_class : class, optional Default results class to use to save filtering output. Default is `FilterResults`. If specified, class must extend from `FilterResults`. **kwargs Keyword arguments may be used to provide values for the filter, inversion, and stability methods. See `set_filter_method`, `set_inversion_method`, and `set_stability_method`. Keyword arguments may be used to provide default values for state space matrices. See `Representation` for more details. Notes ----- There are several types of options available for controlling the Kalman filter operation. All options are internally held as bitmasks, but can be manipulated by setting class attributes, which act like boolean flags. For more information, see the `set_*` class method documentation. The options are: filter_method The filtering method controls aspects of which Kalman filtering approach will be used. inversion_method The Kalman filter may contain one matrix inversion: that of the forecast error covariance matrix. The inversion method controls how and if that inverse is performed. stability_method The Kalman filter is a recursive algorithm that may in some cases suffer issues with numerical stability. The stability method controls what, if any, measures are taken to promote stability. conserve_memory By default, the Kalman filter computes a number of intermediate matrices at each iteration. The memory conservation options control which of those matrices are stored. filter_timing By default, the Kalman filter follows Durbin and Koopman, 2012, in initializing the filter with predicted values. Kim and Nelson, 1999, instead initialize the filter with filtered values, which is essentially just a different timing convention. The `filter_method` and `inversion_method` options intentionally allow the possibility that multiple methods will be indicated. In the case that multiple methods are selected, the underlying Kalman filter will attempt to select the optional method given the input data. For example, it may be that INVERT_UNIVARIATE and SOLVE_CHOLESKY are indicated (this is in fact the default case). In this case, if the endogenous vector is 1-dimensional (`k_endog` = 1), then INVERT_UNIVARIATE is used and inversion reduces to simple division, and if it has a larger dimension, the Cholesky decomposition along with linear solving (rather than explicit matrix inversion) is used. If only SOLVE_CHOLESKY had been set, then the Cholesky decomposition method would *always* be used, even in the case of 1-dimensional data. See Also -------- FilterResults statsmodels.tsa.statespace.representation.Representation """ filter_methods = [ 'filter_conventional', 'filter_exact_initial', 'filter_augmented', 'filter_square_root', 'filter_univariate', 'filter_collapsed', 'filter_extended', 'filter_unscented' ] filter_conventional = OptionWrapper('filter_method', FILTER_CONVENTIONAL) """ (bool) Flag for conventional Kalman filtering. """ filter_exact_initial = OptionWrapper('filter_method', FILTER_EXACT_INITIAL) """ (bool) Flag for exact initial Kalman filtering. Not implemented. """ filter_augmented = OptionWrapper('filter_method', FILTER_AUGMENTED) """ (bool) Flag for augmented Kalman filtering. Not implemented. """ filter_square_root = OptionWrapper('filter_method', FILTER_SQUARE_ROOT) """ (bool) Flag for square-root Kalman filtering. Not implemented. """ filter_univariate = OptionWrapper('filter_method', FILTER_UNIVARIATE) """ (bool) Flag for univariate filtering of multivariate observation vector. """ filter_collapsed = OptionWrapper('filter_method', FILTER_COLLAPSED) """ (bool) Flag for Kalman filtering with collapsed observation vector. """ filter_extended = OptionWrapper('filter_method', FILTER_EXTENDED) """ (bool) Flag for extended Kalman filtering. Not implemented. """ filter_unscented = OptionWrapper('filter_method', FILTER_UNSCENTED) """ (bool) Flag for unscented Kalman filtering. Not implemented. """ inversion_methods = [ 'invert_univariate', 'solve_lu', 'invert_lu', 'solve_cholesky', 'invert_cholesky' ] invert_univariate = OptionWrapper('inversion_method', INVERT_UNIVARIATE) """ (bool) Flag for univariate inversion method (recommended). """ solve_lu = OptionWrapper('inversion_method', SOLVE_LU) """ (bool) Flag for LU and linear solver inversion method. """ invert_lu = OptionWrapper('inversion_method', INVERT_LU) """ (bool) Flag for LU inversion method. """ solve_cholesky = OptionWrapper('inversion_method', SOLVE_CHOLESKY) """ (bool) Flag for Cholesky and linear solver inversion method (recommended). """ invert_cholesky = OptionWrapper('inversion_method', INVERT_CHOLESKY) """ (bool) Flag for Cholesky inversion method. """ stability_methods = ['stability_force_symmetry'] stability_force_symmetry = ( OptionWrapper('stability_method', STABILITY_FORCE_SYMMETRY) ) """ (bool) Flag for enforcing covariance matrix symmetry """ memory_options = [ 'memory_store_all', 'memory_no_forecast', 'memory_no_predicted', 'memory_no_filtered', 'memory_no_likelihood', 'memory_no_gain', 'memory_no_smoothing', 'memory_no_std_forecast', 'memory_conserve' ] memory_store_all = OptionWrapper('conserve_memory', MEMORY_STORE_ALL) """ (bool) Flag for storing all intermediate results in memory (default). """ memory_no_forecast = OptionWrapper('conserve_memory', MEMORY_NO_FORECAST) """ (bool) Flag to prevent storing forecasts. """ memory_no_predicted = OptionWrapper('conserve_memory', MEMORY_NO_PREDICTED) """ (bool) Flag to prevent storing predicted state and covariance matrices. """ memory_no_filtered = OptionWrapper('conserve_memory', MEMORY_NO_FILTERED) """ (bool) Flag to prevent storing filtered state and covariance matrices. """ memory_no_likelihood = ( OptionWrapper('conserve_memory', MEMORY_NO_LIKELIHOOD) ) """ (bool) Flag to prevent storing likelihood values for each observation. """ memory_no_gain = OptionWrapper('conserve_memory', MEMORY_NO_GAIN) """ (bool) Flag to prevent storing the Kalman gain matrices. """ memory_no_smoothing = OptionWrapper('conserve_memory', MEMORY_NO_SMOOTHING) """ (bool) Flag to prevent storing likelihood values for each observation. """ memory_no_std_forecast = ( OptionWrapper('conserve_memory', MEMORY_NO_STD_FORECAST)) """ (bool) Flag to prevent storing standardized forecast errors. """ memory_conserve = OptionWrapper('conserve_memory', MEMORY_CONSERVE) """ (bool) Flag to conserve the maximum amount of memory. """ timing_options = [ 'timing_init_predicted', 'timing_init_filtered' ] timing_init_predicted = OptionWrapper('filter_timing', TIMING_INIT_PREDICTED) """ (bool) Flag for the default timing convention (Durbin and Koopman, 2012). """ timing_init_filtered = OptionWrapper('filter_timing', TIMING_INIT_FILTERED) """ (bool) Flag for the alternate timing convention (Kim and Nelson, 2012). """ # Default filter options filter_method = FILTER_CONVENTIONAL """ (int) Filtering method bitmask. """ inversion_method = INVERT_UNIVARIATE | SOLVE_CHOLESKY """ (int) Inversion method bitmask. """ stability_method = STABILITY_FORCE_SYMMETRY """ (int) Stability method bitmask. """ conserve_memory = MEMORY_STORE_ALL """ (int) Memory conservation bitmask. """ filter_timing = TIMING_INIT_PREDICTED """ (int) Filter timing. """ def __init__(self, k_endog, k_states, k_posdef=None, loglikelihood_burn=0, tolerance=1e-19, results_class=None, kalman_filter_classes=None, **kwargs): super(KalmanFilter, self).__init__( k_endog, k_states, k_posdef, **kwargs ) # Setup the underlying Kalman filter storage self._kalman_filters = {} # Filter options self.loglikelihood_burn = loglikelihood_burn self.results_class = ( results_class if results_class is not None else FilterResults ) # Options self.prefix_kalman_filter_map = ( kalman_filter_classes if kalman_filter_classes is not None else tools.prefix_kalman_filter_map.copy()) self.set_filter_method(**kwargs) self.set_inversion_method(**kwargs) self.set_stability_method(**kwargs) self.set_conserve_memory(**kwargs) self.set_filter_timing(**kwargs) self.tolerance = tolerance @property def _kalman_filter(self): prefix = self.prefix if prefix in self._kalman_filters: return self._kalman_filters[prefix] return None def _initialize_filter(self, filter_method=None, inversion_method=None, stability_method=None, conserve_memory=None, tolerance=None, filter_timing=None, loglikelihood_burn=None): if filter_method is None: filter_method = self.filter_method if inversion_method is None: inversion_method = self.inversion_method if stability_method is None: stability_method = self.stability_method if conserve_memory is None: conserve_memory = self.conserve_memory if loglikelihood_burn is None: loglikelihood_burn = self.loglikelihood_burn if filter_timing is None: filter_timing = self.filter_timing if tolerance is None: tolerance = self.tolerance # Make sure we have endog if self.endog is None: raise RuntimeError('Must bind a dataset to the model before' ' filtering or smoothing.') # Initialize the representation matrices prefix, dtype, create_statespace = self._initialize_representation() # Determine if we need to (re-)create the filter # (definitely need to recreate if we recreated the _statespace object) create_filter = create_statespace or prefix not in self._kalman_filters if not create_filter: kalman_filter = self._kalman_filters[prefix] create_filter = ( not kalman_filter.conserve_memory == conserve_memory or not kalman_filter.loglikelihood_burn == loglikelihood_burn ) # If the dtype-specific _kalman_filter does not exist (or if we need # to re-create it), create it if create_filter: if prefix in self._kalman_filters: # Delete the old filter del self._kalman_filters[prefix] # Setup the filter cls = self.prefix_kalman_filter_map[prefix] self._kalman_filters[prefix] = cls( self._statespaces[prefix], filter_method, inversion_method, stability_method, conserve_memory, filter_timing, tolerance, loglikelihood_burn ) # Otherwise, update the filter parameters else: kalman_filter = self._kalman_filters[prefix] kalman_filter.set_filter_method(filter_method, False) kalman_filter.inversion_method = inversion_method kalman_filter.stability_method = stability_method kalman_filter.filter_timing = filter_timing kalman_filter.tolerance = tolerance # conserve_memory and loglikelihood_burn changes always lead to # re-created filters return prefix, dtype, create_filter, create_statespace def set_filter_method(self, filter_method=None, **kwargs): r""" Set the filtering method The filtering method controls aspects of which Kalman filtering approach will be used. Parameters ---------- filter_method : integer, optional Bitmask value to set the filter method to. See notes for details. **kwargs Keyword arguments may be used to influence the filter method by setting individual boolean flags. See notes for details. Notes ----- The filtering method is defined by a collection of boolean flags, and is internally stored as a bitmask. The methods available are: FILTER_CONVENTIONAL = 0x01 Conventional Kalman filter. FILTER_UNIVARIATE = 0x10 Univariate approach to Kalman filtering. Overrides conventional method if both are specified. FILTER_COLLAPSED = 0x20 Collapsed approach to Kalman filtering. Will be used *in addition* to conventional or univariate filtering. Note that only the first method is available if using a Scipy version older than 0.16. If the bitmask is set directly via the `filter_method` argument, then the full method must be provided. If keyword arguments are used to set individual boolean flags, then the lowercase of the method must be used as an argument name, and the value is the desired value of the boolean flag (True or False). Note that the filter method may also be specified by directly modifying the class attributes which are defined similarly to the keyword arguments. The default filtering method is FILTER_CONVENTIONAL. Examples -------- >>> mod = sm.tsa.statespace.SARIMAX(range(10)) >>> mod.ssm.filter_method 1 >>> mod.ssm.filter_conventional True >>> mod.ssm.filter_univariate = True >>> mod.ssm.filter_method 17 >>> mod.ssm.set_filter_method(filter_univariate=False, ... filter_collapsed=True) >>> mod.ssm.filter_method 33 >>> mod.ssm.set_filter_method(filter_method=1) >>> mod.ssm.filter_conventional True >>> mod.ssm.filter_univariate False >>> mod.ssm.filter_collapsed False >>> mod.ssm.filter_univariate = True >>> mod.ssm.filter_method 17 """ if filter_method is not None: self.filter_method = filter_method for name in KalmanFilter.filter_methods: if name in kwargs: setattr(self, name, kwargs[name]) if self._compatibility_mode and not self.filter_method == 1: raise NotImplementedError('Only conventional Kalman filtering' ' is available. Consider updating' ' dependencies for more options.') def set_inversion_method(self, inversion_method=None, **kwargs): r""" Set the inversion method The Kalman filter may contain one matrix inversion: that of the forecast error covariance matrix. The inversion method controls how and if that inverse is performed. Parameters ---------- inversion_method : integer, optional Bitmask value to set the inversion method to. See notes for details. **kwargs Keyword arguments may be used to influence the inversion method by setting individual boolean flags. See notes for details. Notes ----- The inversion method is defined by a collection of boolean flags, and is internally stored as a bitmask. The methods available are: INVERT_UNIVARIATE = 0x01 If the endogenous time series is univariate, then inversion can be performed by simple division. If this flag is set and the time series is univariate, then division will always be used even if other flags are also set. SOLVE_LU = 0x02 Use an LU decomposition along with a linear solver (rather than ever actually inverting the matrix). INVERT_LU = 0x04 Use an LU decomposition along with typical matrix inversion. SOLVE_CHOLESKY = 0x08 Use a Cholesky decomposition along with a linear solver. INVERT_CHOLESKY = 0x10 Use an Cholesky decomposition along with typical matrix inversion. If the bitmask is set directly via the `inversion_method` argument, then the full method must be provided. If keyword arguments are used to set individual boolean flags, then the lowercase of the method must be used as an argument name, and the value is the desired value of the boolean flag (True or False). Note that the inversion method may also be specified by directly modifying the class attributes which are defined similarly to the keyword arguments. The default inversion method is `INVERT_UNIVARIATE | SOLVE_CHOLESKY` Several things to keep in mind are: - If the filtering method is specified to be univariate, then simple division is always used regardless of the dimension of the endogenous time series. - Cholesky decomposition is about twice as fast as LU decomposition, but it requires that the matrix be positive definite. While this should generally be true, it may not be in every case. - Using a linear solver rather than true matrix inversion is generally faster and is numerically more stable. Examples -------- >>> mod = sm.tsa.statespace.SARIMAX(range(10)) >>> mod.ssm.inversion_method 1 >>> mod.ssm.solve_cholesky True >>> mod.ssm.invert_univariate True >>> mod.ssm.invert_lu False >>> mod.ssm.invert_univariate = False >>> mod.ssm.inversion_method 8 >>> mod.ssm.set_inversion_method(solve_cholesky=False, ... invert_cholesky=True) >>> mod.ssm.inversion_method 16 """ if inversion_method is not None: self.inversion_method = inversion_method for name in KalmanFilter.inversion_methods: if name in kwargs: setattr(self, name, kwargs[name]) def set_stability_method(self, stability_method=None, **kwargs): r""" Set the numerical stability method The Kalman filter is a recursive algorithm that may in some cases suffer issues with numerical stability. The stability method controls what, if any, measures are taken to promote stability. Parameters ---------- stability_method : integer, optional Bitmask value to set the stability method to. See notes for details. **kwargs Keyword arguments may be used to influence the stability method by setting individual boolean flags. See notes for details. Notes ----- The stability method is defined by a collection of boolean flags, and is internally stored as a bitmask. The methods available are: STABILITY_FORCE_SYMMETRY = 0x01 If this flag is set, symmetry of the predicted state covariance matrix is enforced at each iteration of the filter, where each element is set to the average of the corresponding elements in the upper and lower triangle. If the bitmask is set directly via the `stability_method` argument, then the full method must be provided. If keyword arguments are used to set individual boolean flags, then the lowercase of the method must be used as an argument name, and the value is the desired value of the boolean flag (True or False). Note that the stability method may also be specified by directly modifying the class attributes which are defined similarly to the keyword arguments. The default stability method is `STABILITY_FORCE_SYMMETRY` Examples -------- >>> mod = sm.tsa.statespace.SARIMAX(range(10)) >>> mod.ssm.stability_method 1 >>> mod.ssm.stability_force_symmetry True >>> mod.ssm.stability_force_symmetry = False >>> mod.ssm.stability_method 0 """ if stability_method is not None: self.stability_method = stability_method for name in KalmanFilter.stability_methods: if name in kwargs: setattr(self, name, kwargs[name]) def set_conserve_memory(self, conserve_memory=None, **kwargs): r""" Set the memory conservation method By default, the Kalman filter computes a number of intermediate matrices at each iteration. The memory conservation options control which of those matrices are stored. Parameters ---------- conserve_memory : integer, optional Bitmask value to set the memory conservation method to. See notes for details. **kwargs Keyword arguments may be used to influence the memory conservation method by setting individual boolean flags. See notes for details. Notes ----- The memory conservation method is defined by a collection of boolean flags, and is internally stored as a bitmask. The methods available are: MEMORY_STORE_ALL = 0 Store all intermediate matrices. This is the default value. MEMORY_NO_FORECAST = 0x01 Do not store the forecast, forecast error, or forecast error covariance matrices. If this option is used, the `predict` method from the results class is unavailable. MEMORY_NO_PREDICTED = 0x02 Do not store the predicted state or predicted state covariance matrices. MEMORY_NO_FILTERED = 0x04 Do not store the filtered state or filtered state covariance matrices. MEMORY_NO_LIKELIHOOD = 0x08 Do not store the vector of loglikelihood values for each observation. Only the sum of the loglikelihood values is stored. MEMORY_NO_GAIN = 0x10 Do not store the Kalman gain matrices. MEMORY_NO_SMOOTHING = 0x20 Do not store temporary variables related to Klaman smoothing. If this option is used, smoothing is unavailable. MEMORY_NO_SMOOTHING = 0x20 Do not store standardized forecast errors. MEMORY_CONSERVE Do not store any intermediate matrices. Note that if using a Scipy version less than 0.16, the options MEMORY_NO_GAIN, MEMORY_NO_SMOOTHING, and MEMORY_NO_STD_FORECAST have no effect. If the bitmask is set directly via the `conserve_memory` argument, then the full method must be provided. If keyword arguments are used to set individual boolean flags, then the lowercase of the method must be used as an argument name, and the value is the desired value of the boolean flag (True or False). Note that the memory conservation method may also be specified by directly modifying the class attributes which are defined similarly to the keyword arguments. The default memory conservation method is `MEMORY_STORE_ALL`, so that all intermediate matrices are stored. Examples -------- >>> mod = sm.tsa.statespace.SARIMAX(range(10)) >>> mod.ssm..conserve_memory 0 >>> mod.ssm.memory_no_predicted False >>> mod.ssm.memory_no_predicted = True >>> mod.ssm.conserve_memory 2 >>> mod.ssm.set_conserve_memory(memory_no_filtered=True, ... memory_no_forecast=True) >>> mod.ssm.conserve_memory 7 """ if conserve_memory is not None: self.conserve_memory = conserve_memory for name in KalmanFilter.memory_options: if name in kwargs: setattr(self, name, kwargs[name]) def set_filter_timing(self, alternate_timing=None, **kwargs): r""" Set the filter timing convention By default, the Kalman filter follows Durbin and Koopman, 2012, in initializing the filter with predicted values. Kim and Nelson, 1999, instead initialize the filter with filtered values, which is essentially just a different timing convention. Parameters ---------- alternate_timing : integer, optional Whether or not to use the alternate timing convention. Default is unspecified. **kwargs Keyword arguments may be used to influence the memory conservation method by setting individual boolean flags. See notes for details. """ if alternate_timing is not None: self.filter_timing = int(alternate_timing) if 'timing_init_predicted' in kwargs: self.filter_timing = int(not kwargs['timing_init_predicted']) if 'timing_init_filtered' in kwargs: self.filter_timing = int(kwargs['timing_init_filtered']) if (self._compatibility_mode and self.filter_timing == TIMING_INIT_FILTERED): raise NotImplementedError('Only "predicted" Kalman filter' ' timing is available. Consider' ' updating dependencies for more' ' options.') def _filter(self, filter_method=None, inversion_method=None, stability_method=None, conserve_memory=None, filter_timing=None, tolerance=None, loglikelihood_burn=None, complex_step=False): # Initialize the filter prefix, dtype, create_filter, create_statespace = ( self._initialize_filter( filter_method, inversion_method, stability_method, conserve_memory, filter_timing, tolerance, loglikelihood_burn ) ) kfilter = self._kalman_filters[prefix] # Initialize the state self._initialize_state(prefix=prefix, complex_step=complex_step) # Run the filter kfilter() tmp = np.array(kfilter.loglikelihood) tmp2 = np.array(kfilter.predicted_state) return kfilter def filter(self, filter_method=None, inversion_method=None, stability_method=None, conserve_memory=None, filter_timing=None, tolerance=None, loglikelihood_burn=None, complex_step=False): r""" Apply the Kalman filter to the statespace model. Parameters ---------- filter_method : int, optional Determines which Kalman filter to use. Default is conventional. inversion_method : int, optional Determines which inversion technique to use. Default is by Cholesky decomposition. stability_method : int, optional Determines which numerical stability techniques to use. Default is to enforce symmetry of the predicted state covariance matrix. conserve_memory : int, optional Determines what output from the filter to store. Default is to store everything. filter_timing : int, optional Determines the timing convention of the filter. Default is that from Durbin and Koopman (2012), in which the filter is initialized with predicted values. tolerance : float, optional The tolerance at which the Kalman filter determines convergence to steady-state. Default is 1e-19. loglikelihood_burn : int, optional The number of initial periods during which the loglikelihood is not recorded. Default is 0. Notes ----- This function by default does not compute variables required for smoothing. """ if conserve_memory is None: conserve_memory = self.conserve_memory | MEMORY_NO_SMOOTHING # Run the filter kfilter = self._filter( filter_method, inversion_method, stability_method, conserve_memory, filter_timing, tolerance, loglikelihood_burn, complex_step) tmp = np.array(kfilter.loglikelihood) # Create the results object results = self.results_class(self) results.update_representation(self) results.update_filter(kfilter) return results def loglike(self, **kwargs): r""" Calculate the loglikelihood associated with the statespace model. Parameters ---------- **kwargs Additional keyword arguments to pass to the Kalman filter. See `KalmanFilter.filter` for more details. Returns ------- loglike : float The joint loglikelihood. """ if self.memory_no_likelihood: raise RuntimeError('Cannot compute loglikelihood if' ' MEMORY_NO_LIKELIHOOD option is selected.') kwargs['conserve_memory'] = MEMORY_CONSERVE ^ MEMORY_NO_LIKELIHOOD kfilter = self._filter(**kwargs) loglikelihood_burn = kwargs.get('loglikelihood_burn', self.loglikelihood_burn) return np.sum(kfilter.loglikelihood[loglikelihood_burn:]) def loglikeobs(self, **kwargs): r""" Calculate the loglikelihood for each observation associated with the statespace model. Parameters ---------- **kwargs Additional keyword arguments to pass to the Kalman filter. See `KalmanFilter.filter` for more details. Notes ----- If `loglikelihood_burn` is positive, then the entries in the returned loglikelihood vector are set to be zero for those initial time periods. Returns ------- loglike : array of float Array of loglikelihood values for each observation. """ if self.memory_no_likelihood: raise RuntimeError('Cannot compute loglikelihood if' ' MEMORY_NO_LIKELIHOOD option is selected.') kwargs['conserve_memory'] = MEMORY_CONSERVE ^ MEMORY_NO_LIKELIHOOD kfilter = self._filter(**kwargs) llf_obs = np.array(kfilter.loglikelihood, copy=True) # Set any burned observations to have zero likelihood loglikelihood_burn = kwargs.get('loglikelihood_burn', self.loglikelihood_burn) llf_obs[:loglikelihood_burn] = 0 return llf_obs def simulate(self, nsimulations, measurement_shocks=None, state_shocks=None, initial_state=None): r""" Simulate a new time series following the state space model Parameters ---------- nsimulations : int The number of observations to simulate. If the model is time-invariant this can be any number. If the model is time-varying, then this number must be less than or equal to the number measurement_shocks : array_like, optional If specified, these are the shocks to the measurement equation, :math:`\varepsilon_t`. If unspecified, these are automatically generated using a pseudo-random number generator. If specified, must be shaped `nsimulations` x `k_endog`, where `k_endog` is the same as in the state space model. state_shocks : array_like, optional If specified, these are the shocks to the state equation, :math:`\eta_t`. If unspecified, these are automatically generated using a pseudo-random number generator. If specified, must be shaped `nsimulations` x `k_posdef` where `k_posdef` is the same as in the state space model. initial_state : array_like, optional If specified, this is the state vector at time zero, which should be shaped (`k_states` x 1), where `k_states` is the same as in the state space model. If unspecified, but the model has been initialized, then that initialization is used. If unspecified and the model has not been initialized, then a vector of zeros is used. Note that this is not included in the returned `simulated_states` array. Returns ------- simulated_obs : array An (nsimulations x k_endog) array of simulated observations. simulated_states : array An (nsimulations x k_states) array of simulated states. """ time_invariant = self.time_invariant # Check for valid number of simulations if not time_invariant and nsimulations > self.nobs: raise ValueError('In a time-varying model, cannot create more' ' simulations than there are observations.') # Check / generate measurement shocks if measurement_shocks is not None: measurement_shocks = np.array(measurement_shocks) if measurement_shocks.ndim == 0: measurement_shocks = measurement_shocks[np.newaxis, np.newaxis] elif measurement_shocks.ndim == 1: measurement_shocks = measurement_shocks[:, np.newaxis] if not measurement_shocks.shape == (nsimulations, self.k_endog): raise ValueError('Invalid shape of provided measurement' ' shocks. Required (%d, %d)' % (nsimulations, self.k_endog)) elif self.shapes['obs_cov'][-1] == 1: measurement_shocks = np.random.multivariate_normal( mean=np.zeros(self.k_endog), cov=self['obs_cov'], size=nsimulations) # Check / generate state shocks if state_shocks is not None: state_shocks = np.array(state_shocks) if state_shocks.ndim == 0: state_shocks = state_shocks[np.newaxis, np.newaxis] elif state_shocks.ndim == 1: state_shocks = state_shocks[:, np.newaxis] if not state_shocks.shape == (nsimulations, self.k_posdef): raise ValueError('Invalid shape of provided state shocks.' ' Required (%d, %d).' % (nsimulations, self.k_posdef)) elif self.shapes['state_cov'][-1] == 1: state_shocks = np.random.multivariate_normal( mean=np.zeros(self.k_posdef), cov=self['state_cov'], size=nsimulations) # Get the initial states if initial_state is not None: initial_state = np.array(initial_state) if initial_state.ndim == 0: initial_state = initial_state[np.newaxis] elif (initial_state.ndim > 1 and not initial_state.shape == (self.k_states, 1)): raise ValueError('Invalid shape of provided initial state' ' vector. Required (%d, 1)' % self.k_states) elif self.initialization == 'known': initial_state = np.random.multivariate_normal( self._initial_state, self._initial_state_cov) elif self.initialization == 'stationary': from scipy.linalg import solve_discrete_lyapunov # (I - T)^{-1} c = x => (I - T) x = c initial_state_mean = np.linalg.solve( np.eye(self.k_states) - self['transition', :, :, 0], self['state_intercept', :, 0]) R = self['selection', :, :, 0] Q = self['state_cov', :, :, 0] selected_state_cov = R.dot(Q).dot(R.T) initial_state_cov = solve_discrete_lyapunov( self['transition', :, :, 0], selected_state_cov) initial_state = np.random.multivariate_normal( initial_state_mean, initial_state_cov) elif self.initialization == 'approximate_diffuse': initial_state = np.zeros(self.k_states) else: initial_state = np.zeros(self.k_states) return self._simulate(nsimulations, measurement_shocks, state_shocks, initial_state) def _simulate(self, nsimulations, measurement_shocks, state_shocks, initial_state): time_invariant = self.time_invariant # Holding variables for the simulations simulated_obs = np.zeros((nsimulations, self.k_endog), dtype=self.dtype) simulated_states = np.zeros((nsimulations+1, self.k_states), dtype=self.dtype) simulated_states[0] = initial_state # Perform iterations to create the new time series obs_intercept_t = 0 design_t = 0 state_intercept_t = 0 transition_t = 0 selection_t = 0 for t in range(nsimulations): # Get the current shocks (this accomodates time-varying matrices) if measurement_shocks is None: measurement_shock = np.random.multivariate_normal( mean=np.zeros(self.k_endog), cov=self['obs_cov', :, :, t]) else: measurement_shock = measurement_shocks[t] if state_shocks is None: state_shock = np.random.multivariate_normal( mean=np.zeros(self.k_posdef), cov=self['state_cov', :, :, t]) else: state_shock = state_shocks[t] # Get current-iteration matrices if not time_invariant: obs_intercept_t = 0 if self.obs_intercept.shape[-1] == 1 else t design_t = 0 if self.design.shape[-1] == 1 else t state_intercept_t = ( 0 if self.state_intercept.shape[-1] == 1 else t) transition_t = 0 if self.transition.shape[-1] == 1 else t selection_t = 0 if self.selection.shape[-1] == 1 else t obs_intercept = self['obs_intercept', :, obs_intercept_t] design = self['design', :, :, design_t] state_intercept = self['state_intercept', :, state_intercept_t] transition = self['transition', :, :, transition_t] selection = self['selection', :, :, selection_t] # Iterate the measurement equation simulated_obs[t] = ( obs_intercept + np.dot(design, simulated_states[t]) + measurement_shock) # Iterate the state equation simulated_states[t+1] = ( state_intercept + np.dot(transition, simulated_states[t]) + np.dot(selection, state_shock)) return simulated_obs, simulated_states[:-1] def impulse_responses(self, steps=10, impulse=0, orthogonalized=False, cumulative=False, **kwargs): r""" Impulse response function Parameters ---------- steps : int, optional The number of steps for which impulse responses are calculated. Default is 10. Note that the initial impulse is not counted as a step, so if `steps=1`, the output will have 2 entries. impulse : int or array_like If an integer, the state innovation to pulse; must be between 0 and `k_posdef-1` where `k_posdef` is the same as in the state space model. Alternatively, a custom impulse vector may be provided; must be a column vector with shape `(k_posdef, 1)`. orthogonalized : boolean, optional Whether or not to perform impulse using orthogonalized innovations. Note that this will also affect custum `impulse` vectors. Default is False. cumulative : boolean, optional Whether or not to return cumulative impulse responses. Default is False. **kwargs If the model is time-varying and `steps` is greater than the number of observations, any of the state space representation matrices that are time-varying must have updated values provided for the out-of-sample steps. For example, if `design` is a time-varying component, `nobs` is 10, and `steps` is 15, a (`k_endog` x `k_states` x 5) matrix must be provided with the new design matrix values. Returns ------- impulse_responses : array Responses for each endogenous variable due to the impulse given by the `impulse` argument. A (steps + 1 x k_endog) array. Notes ----- Intercepts in the measurement and state equation are ignored when calculating impulse responses. """ # Since the first step is the impulse itself, we actually want steps+1 steps += 1 # Check for what kind of impulse we want if type(impulse) == int: if impulse >= self.k_posdef or impulse < 0: raise ValueError('Invalid value for `impulse`. Must be the' ' index of one of the state innovations.') # Create the (non-orthogonalized) impulse vector idx = impulse impulse = np.zeros(self.k_posdef) impulse[idx] = 1 else: impulse = np.array(impulse) if impulse.ndim > 1: impulse = np.squeeze(impulse) if not impulse.shape == (self.k_posdef,): raise ValueError('Invalid impulse vector. Must be shaped' ' (%d,)' % self.k_posdef) # Orthogonalize the impulses, if requested, using Cholesky on the # first state covariance matrix if orthogonalized: state_chol = np.linalg.cholesky(self.state_cov[:, :, 0]) impulse = np.dot(state_chol, impulse) # If we have a time-invariant system, we can solve for the IRF directly if self.time_invariant: # Get the state space matrices design = self.design[:, :, 0] transition = self.transition[:, :, 0] selection = self.selection[:, :, 0] # Holding arrays irf = np.zeros((steps, self.k_endog), dtype=self.dtype) states = np.zeros((steps, self.k_states), dtype=self.dtype) # First iteration states[0] = np.dot(selection, impulse) irf[0] = np.dot(design, states[0]) # Iterations for t in range(1, steps): states[t] = np.dot(transition, states[t-1]) irf[t] = np.dot(design, states[t]) # Otherwise, create a new model else: # Get the basic model components representation = {} for name, shape in self.shapes.items(): if name in ['obs', 'obs_intercept', 'state_intercept']: continue representation[name] = getattr(self, name) # Allow additional specification warning = ('Model has time-invariant %s matrix, so the %s' ' argument to `irf` has been ignored.') exception = ('Impulse response functions for models with' ' time-varying %s matrix requires an updated' ' time-varying matrix for any periods beyond those in' ' the original model.') for name, shape in self.shapes.items(): if name in ['obs', 'obs_intercept', 'state_intercept']: continue if representation[name].shape[-1] == 1: if name in kwargs: warn(warning % (name, name), ValueWarning) elif name not in kwargs: raise ValueError(exception % name) else: mat = np.asarray(kwargs[name]) validate_matrix_shape(name, mat.shape, shape[0], shape[1], steps) if mat.ndim < 3 or not mat.shape[2] == steps: raise ValueError(exception % name) representation[name] = np.c_[representation[name], mat] # Setup the new statespace representation model_kwargs = { 'filter_method': self.filter_method, 'inversion_method': self.inversion_method, 'stability_method': self.stability_method, 'conserve_memory': self.conserve_memory, 'tolerance': self.tolerance, 'loglikelihood_burn': self.loglikelihood_burn } model_kwargs.update(representation) model = KalmanFilter(np.zeros(self.endog.T.shape), self.k_states, self.k_posdef, **model_kwargs) model.initialize_approximate_diffuse() model._initialize_filter() model._initialize_state() # Get the impulse response function via simulation of the state # space model, but with other shocks set to zero # Since simulate returns the zero-th period, we need to simulate # steps + 1 periods and exclude the zero-th observation. steps += 1 measurement_shocks = np.zeros((steps, self.k_endog)) state_shocks = np.zeros((steps, self.k_posdef)) state_shocks[0] = impulse irf, _ = model.simulate( steps, measurement_shocks=measurement_shocks, state_shocks=state_shocks) irf = irf[1:] # Get the cumulative response if requested if cumulative: irf = np.cumsum(irf, axis=0) return irf class FilterResults(FrozenRepresentation): """ Results from applying the Kalman filter to a state space model. Parameters ---------- model : Representation A Statespace representation Attributes ---------- nobs : int Number of observations. k_endog : int The dimension of the observation series. k_states : int The dimension of the unobserved state process. k_posdef : int The dimension of a guaranteed positive definite covariance matrix describing the shocks in the measurement equation. dtype : dtype Datatype of representation matrices prefix : str BLAS prefix of representation matrices shapes : dictionary of name,tuple A dictionary recording the shapes of each of the representation matrices as tuples. endog : array The observation vector. design : array The design matrix, :math:`Z`. obs_intercept : array The intercept for the observation equation, :math:`d`. obs_cov : array The covariance matrix for the observation equation :math:`H`. transition : array The transition matrix, :math:`T`. state_intercept : array The intercept for the transition equation, :math:`c`. selection : array The selection matrix, :math:`R`. state_cov : array The covariance matrix for the state equation :math:`Q`. missing : array of bool An array of the same size as `endog`, filled with boolean values that are True if the corresponding entry in `endog` is NaN and False otherwise. nmissing : array of int An array of size `nobs`, where the ith entry is the number (between 0 and `k_endog`) of NaNs in the ith row of the `endog` array. time_invariant : bool Whether or not the representation matrices are time-invariant initialization : str Kalman filter initialization method. initial_state : array_like The state vector used to initialize the Kalamn filter. initial_state_cov : array_like The state covariance matrix used to initialize the Kalamn filter. filter_method : int Bitmask representing the Kalman filtering method inversion_method : int Bitmask representing the method used to invert the forecast error covariance matrix. stability_method : int Bitmask representing the methods used to promote numerical stability in the Kalman filter recursions. conserve_memory : int Bitmask representing the selected memory conservation method. filter_timing : int Whether or not to use the alternate timing convention. tolerance : float The tolerance at which the Kalman filter determines convergence to steady-state. loglikelihood_burn : int The number of initial periods during which the loglikelihood is not recorded. converged : bool Whether or not the Kalman filter converged. period_converged : int The time period in which the Kalman filter converged. filtered_state : array The filtered state vector at each time period. filtered_state_cov : array The filtered state covariance matrix at each time period. predicted_state : array The predicted state vector at each time period. predicted_state_cov : array The predicted state covariance matrix at each time period. kalman_gain : array The Kalman gain at each time period. forecasts : array The one-step-ahead forecasts of observations at each time period. forecasts_error : array The forecast errors at each time period. forecasts_error_cov : array The forecast error covariance matrices at each time period. llf_obs : array The loglikelihood values at each time period. """ _filter_attributes = [ 'filter_method', 'inversion_method', 'stability_method', 'conserve_memory', 'filter_timing', 'tolerance', 'loglikelihood_burn', 'converged', 'period_converged', 'filtered_state', 'filtered_state_cov', 'predicted_state', 'predicted_state_cov', 'tmp1', 'tmp2', 'tmp3', 'tmp4', 'forecasts', 'forecasts_error', 'forecasts_error_cov', 'llf_obs', 'collapsed_forecasts', 'collapsed_forecasts_error', 'collapsed_forecasts_error_cov', ] _filter_options = ( KalmanFilter.filter_methods + KalmanFilter.stability_methods + KalmanFilter.inversion_methods + KalmanFilter.memory_options ) _attributes = FrozenRepresentation._model_attributes + _filter_attributes def __init__(self, model): super(FilterResults, self).__init__(model) # Setup caches for uninitialized objects self._kalman_gain = None self._standardized_forecasts_error = None def update_representation(self, model, only_options=False): """ Update the results to match a given model Parameters ---------- model : Representation The model object from which to take the updated values. only_options : boolean, optional If set to true, only the filter options are updated, and the state space representation is not updated. Default is False. Notes ----- This method is rarely required except for internal usage. """ if not only_options: super(FilterResults, self).update_representation(model) # Save the options as boolean variables for name in self._filter_options: setattr(self, name, getattr(model, name, None)) def update_filter(self, kalman_filter): """ Update the filter results Parameters ---------- kalman_filter : KalmanFilter The model object from which to take the updated values. Notes ----- This method is rarely required except for internal usage. """ # State initialization self.initial_state = np.array( kalman_filter.model.initial_state, copy=True ) self.initial_state_cov = np.array( kalman_filter.model.initial_state_cov, copy=True ) # Save Kalman filter parameters self.filter_method = kalman_filter.filter_method self.inversion_method = kalman_filter.inversion_method self.stability_method = kalman_filter.stability_method self.conserve_memory = kalman_filter.conserve_memory self.filter_timing = kalman_filter.filter_timing self.tolerance = kalman_filter.tolerance self.loglikelihood_burn = kalman_filter.loglikelihood_burn # Save Kalman filter output self.converged = bool(kalman_filter.converged) self.period_converged = kalman_filter.period_converged self.filtered_state = np.array(kalman_filter.filtered_state, copy=True) self.filtered_state_cov = np.array( kalman_filter.filtered_state_cov, copy=True ) self.predicted_state = np.array( kalman_filter.predicted_state, copy=True ) self.predicted_state_cov = np.array( kalman_filter.predicted_state_cov, copy=True ) # Reset caches has_missing = np.sum(self.nmissing) > 0 if not self._compatibility_mode and not (self.memory_no_std_forecast or self.invert_lu or self.solve_lu or self.filter_collapsed): if has_missing: self._standardized_forecasts_error = np.array( reorder_missing_vector( kalman_filter.standardized_forecast_error, self.missing, prefix=self.prefix)) else: self._standardized_forecasts_error = np.array( kalman_filter.standardized_forecast_error, copy=True) else: self._standardized_forecasts_error = None if not self._compatibility_mode: # In the partially missing data case, all entries will # be in the upper left submatrix rather than the correct placement # Re-ordering does not make sense in the collapsed case. if has_missing and (not self.memory_no_gain and not self.filter_collapsed): self._kalman_gain = np.array(reorder_missing_matrix( kalman_filter.kalman_gain, self.missing, reorder_cols=True, prefix=self.prefix)) self.tmp1 = np.array(reorder_missing_matrix( kalman_filter.tmp1, self.missing, reorder_cols=True, prefix=self.prefix)) self.tmp2 = np.array(reorder_missing_vector( kalman_filter.tmp2, self.missing, prefix=self.prefix)) self.tmp3 = np.array(reorder_missing_matrix( kalman_filter.tmp3, self.missing, reorder_rows=True, prefix=self.prefix)) self.tmp4 = np.array(reorder_missing_matrix( kalman_filter.tmp4, self.missing, reorder_cols=True, reorder_rows=True, prefix=self.prefix)) else: self._kalman_gain = np.array( kalman_filter.kalman_gain, copy=True) self.tmp1 = np.array(kalman_filter.tmp1, copy=True) self.tmp2 = np.array(kalman_filter.tmp2, copy=True) self.tmp3 = np.array(kalman_filter.tmp3, copy=True) self.tmp4 = np.array(kalman_filter.tmp4, copy=True) else: self._kalman_gain = None # Note: use forecasts rather than forecast, so as not to interfer # with the `forecast` methods in subclasses self.forecasts = np.array(kalman_filter.forecast, copy=True) self.forecasts_error = np.array( kalman_filter.forecast_error, copy=True ) self.forecasts_error_cov = np.array( kalman_filter.forecast_error_cov, copy=True ) self.llf_obs = np.array(kalman_filter.loglikelihood, copy=True) # If there was missing data, save the original values from the Kalman # filter output, since below will set the values corresponding to # the missing observations to nans. self.missing_forecasts = None self.missing_forecasts_error = None self.missing_forecasts_error_cov = None if np.sum(self.nmissing) > 0: # Copy the provided arrays (which are as the Kalman filter dataset) # into new variables self.missing_forecasts = np.copy(self.forecasts) self.missing_forecasts_error = np.copy(self.forecasts_error) self.missing_forecasts_error_cov = ( np.copy(self.forecasts_error_cov) ) # Save the collapsed values self.collapsed_forecasts = None self.collapsed_forecasts_error = None self.collapsed_forecasts_error_cov = None if self.filter_collapsed: # Copy the provided arrays (which are from the collapsed dataset) # into new variables self.collapsed_forecasts = self.forecasts[:self.k_states, :] self.collapsed_forecasts_error = ( self.forecasts_error[:self.k_states, :] ) self.collapsed_forecasts_error_cov = ( self.forecasts_error_cov[:self.k_states, :self.k_states, :] ) # Recreate the original arrays (which should be from the original # dataset) in the appropriate dimension self.forecasts = np.zeros((self.k_endog, self.nobs)) self.forecasts_error = np.zeros((self.k_endog, self.nobs)) self.forecasts_error_cov = ( np.zeros((self.k_endog, self.k_endog, self.nobs)) ) # Fill in missing values in the forecast, forecast error, and # forecast error covariance matrix (this is required due to how the # Kalman filter implements observations that are either partly or # completely missing) # Construct the predictions, forecasts if not (self.memory_no_forecast or self.memory_no_predicted): for t in range(self.nobs): design_t = 0 if self.design.shape[2] == 1 else t obs_cov_t = 0 if self.obs_cov.shape[2] == 1 else t obs_intercept_t = 0 if self.obs_intercept.shape[1] == 1 else t # For completely missing observations, the Kalman filter will # produce forecasts, but forecast errors and the forecast # error covariance matrix will be zeros - make them nan to # improve clarity of results. if self.nmissing[t] > 0: mask = ~self.missing[:, t].astype(bool) # We can recover forecasts # For partially missing observations, the Kalman filter # will produce all elements (forecasts, forecast errors, # forecast error covariance matrices) as usual, but their # dimension will only be equal to the number of non-missing # elements, and their location in memory will be in the # first blocks (e.g. for the forecasts_error, the first # k_endog - nmissing[t] columns will be filled in), # regardless of which endogenous variables they refer to # (i.e. the non- missing endogenous variables for that # observation). Furthermore, the forecast error covariance # matrix is only valid for those elements. What is done is # to set all elements to nan for these observations so that # they are flagged as missing. The variables # missing_forecasts, etc. then provide the forecasts, etc. # provided by the Kalman filter, from which the data can be # retrieved if desired. self.forecasts[:, t] = np.dot( self.design[:, :, design_t], self.predicted_state[:, t] ) + self.obs_intercept[:, obs_intercept_t] self.forecasts_error[:, t] = np.nan self.forecasts_error[mask, t] = ( self.endog[mask, t] - self.forecasts[mask, t]) self.forecasts_error_cov[:, :, t] = np.dot( np.dot(self.design[:, :, design_t], self.predicted_state_cov[:, :, t]), self.design[:, :, design_t].T ) + self.obs_cov[:, :, obs_cov_t] # In the collapsed case, everything just needs to be rebuilt # for the original observed data, since the Kalman filter # produced these values for the collapsed data. elif self.filter_collapsed: self.forecasts[:, t] = np.dot( self.design[:, :, design_t], self.predicted_state[:, t] ) + self.obs_intercept[:, obs_intercept_t] self.forecasts_error[:, t] = ( self.endog[:, t] - self.forecasts[:, t] ) self.forecasts_error_cov[:, :, t] = np.dot( np.dot(self.design[:, :, design_t], self.predicted_state_cov[:, :, t]), self.design[:, :, design_t].T ) + self.obs_cov[:, :, obs_cov_t] @property def kalman_gain(self): """ Kalman gain matrices """ if self._kalman_gain is None: # k x n self._kalman_gain = np.zeros( (self.k_states, self.k_endog, self.nobs), dtype=self.dtype) for t in range(self.nobs): # In the case of entirely missing observations, let the Kalman # gain be zeros. if self.nmissing[t] == self.k_endog: continue design_t = 0 if self.design.shape[2] == 1 else t transition_t = 0 if self.transition.shape[2] == 1 else t if self.nmissing[t] == 0: self._kalman_gain[:, :, t] = np.dot( np.dot( self.transition[:, :, transition_t], self.predicted_state_cov[:, :, t] ), np.dot( np.transpose(self.design[:, :, design_t]), np.linalg.inv(self.forecasts_error_cov[:, :, t]) ) ) else: mask = ~self.missing[:, t].astype(bool) F = self.forecasts_error_cov[np.ix_(mask, mask, [t])] self._kalman_gain[:, mask, t] = np.dot( np.dot( self.transition[:, :, transition_t], self.predicted_state_cov[:, :, t] ), np.dot( np.transpose(self.design[mask, :, design_t]), np.linalg.inv(F[:, :, 0]) ) ) return self._kalman_gain @property def standardized_forecasts_error(self): """ Standardized forecast errors Notes ----- The forecast errors produced by the Kalman filter are .. math:: v_t \sim N(0, F_t) Hypothesis tests are usually applied to the standardized residuals .. math:: v_t^s = B_t v_t \sim N(0, I) where :math:`B_t = L_t^{-1}` and :math:`F_t = L_t L_t'`; then :math:`F_t^{-1} = (L_t')^{-1} L_t^{-1} = B_t' B_t`; :math:`B_t` and :math:`L_t` are lower triangular. Finally, :math:`B_t v_t \sim N(0, B_t F_t B_t')` and :math:`B_t F_t B_t' = L_t^{-1} L_t L_t' (L_t')^{-1} = I`. Thus we can rewrite :math:`v_t^s = L_t^{-1} v_t` or :math:`L_t v_t^s = v_t`; the latter equation is the form required to use a linear solver to recover :math:`v_t^s`. Since :math:`L_t` is lower triangular, we can use a triangular solver (?TRTRS). """ if self._standardized_forecasts_error is None: if self.k_endog == 1: self._standardized_forecasts_error = ( self.forecasts_error / self.forecasts_error_cov[0, 0, :]**0.5) else: from scipy import linalg self._standardized_forecasts_error = np.zeros( self.forecasts_error.shape, dtype=self.dtype) for t in range(self.forecasts_error_cov.shape[2]): if self.nmissing[t] > 0: self._standardized_forecasts_error[:, t] = np.nan if self.nmissing[t] < self.k_endog: mask = ~self.missing[:, t].astype(bool) F = self.forecasts_error_cov[np.ix_(mask, mask, [t])] upper, _ = linalg.cho_factor(F[:, :, 0]) self._standardized_forecasts_error[mask, t] = ( linalg.solve_triangular( upper, self.forecasts_error[mask, t], trans=1)) return self._standardized_forecasts_error def predict(self, start=None, end=None, dynamic=None, **kwargs): r""" In-sample and out-of-sample prediction for state space models generally Parameters ---------- start : int, optional Zero-indexed observation number at which to start forecasting, i.e., the first forecast will be at start. end : int, optional Zero-indexed observation number at which to end forecasting, i.e., the last forecast will be at end. dynamic : int, optional Offset relative to `start` at which to begin dynamic prediction. Prior to this observation, true endogenous values will be used for prediction; starting with this observation and continuing through the end of prediction, forecasted endogenous values will be used instead. **kwargs If the prediction range is outside of the sample range, any of the state space representation matrices that are time-varying must have updated values provided for the out-of-sample range. For example, of `obs_intercept` is a time-varying component and the prediction range extends 10 periods beyond the end of the sample, a (`k_endog` x 10) matrix must be provided with the new intercept values. Returns ------- results : PredictionResults A PredictionResults object. Notes ----- All prediction is performed by applying the deterministic part of the measurement equation using the predicted state variables. Out-of-sample prediction first applies the Kalman filter to missing data for the number of periods desired to obtain the predicted states. """ # Cannot predict if we do not have appropriate arrays if self.memory_no_forecast or self.memory_no_predicted: raise ValueError('Predict is not possible if memory conservation' ' has been used to avoid storing forecasts or' ' predicted values.') # Get the start and the end of the entire prediction range if start is None: start = 0 elif start < 0: raise ValueError('Cannot predict values previous to the sample.') if end is None: end = self.nobs # Prediction and forecasting is performed by iterating the Kalman # Kalman filter through the entire range [0, end] # Then, everything is returned corresponding to the range [start, end]. # In order to perform the calculations, the range is separately split # up into the following categories: # - static: (in-sample) the Kalman filter is run as usual # - dynamic: (in-sample) the Kalman filter is run, but on missing data # - forecast: (out-of-sample) the Kalman filter is run, but on missing # data # Short-circuit if end is before start if end <= start: raise ValueError('End of prediction must be after start.') # Get the number of forecasts to make after the end of the sample nforecast = max(0, end - self.nobs) # Get the number of dynamic prediction periods # If `dynamic=True`, then assume that we want to begin dynamic # prediction at the start of the sample prediction. if dynamic is True: dynamic = 0 # If `dynamic=False`, then assume we want no dynamic prediction if dynamic is False: dynamic = None ndynamic = 0 if dynamic is not None: # Replace the relative dynamic offset with an absolute offset dynamic = start + dynamic # Validate the `dynamic` parameter if dynamic < 0: raise ValueError('Dynamic prediction cannot begin prior to the' ' first observation in the sample.') elif dynamic > end: warn('Dynamic prediction specified to begin after the end of' ' prediction, and so has no effect.', ValueWarning) dynamic = None elif dynamic > self.nobs: warn('Dynamic prediction specified to begin during' ' out-of-sample forecasting period, and so has no' ' effect.', ValueWarning) dynamic = None # Get the total size of the desired dynamic forecasting component # Note: the first `dynamic` periods of prediction are actually # *not* dynamic, because dynamic prediction begins at observation # `dynamic`. if dynamic is not None: ndynamic = max(0, min(end, self.nobs) - dynamic) # Get the number of in-sample static predictions nstatic = min(end, self.nobs) if dynamic is None else dynamic # Construct the design and observation intercept and covariance # matrices for start-npadded:end. If not time-varying in the original # model, then they will be copied over if none are provided in # `kwargs`. Otherwise additional matrices must be provided in `kwargs`. representation = {} for name, shape in self.shapes.items(): if name == 'obs': continue representation[name] = getattr(self, name) # Update the matrices from kwargs for forecasts warning = ('Model has time-invariant %s matrix, so the %s' ' argument to `predict` has been ignored.') exception = ('Forecasting for models with time-varying %s matrix' ' requires an updated time-varying matrix for the' ' period to be forecasted.') if nforecast > 0: for name, shape in self.shapes.items(): if name == 'obs': continue if representation[name].shape[-1] == 1: if name in kwargs: warn(warning % (name, name), ValueWarning) elif name not in kwargs: raise ValueError(exception % name) else: mat = np.asarray(kwargs[name]) if len(shape) == 2: validate_vector_shape(name, mat.shape, shape[0], nforecast) if mat.ndim < 2 or not mat.shape[1] == nforecast: raise ValueError(exception % name) representation[name] = np.c_[representation[name], mat] else: validate_matrix_shape(name, mat.shape, shape[0], shape[1], nforecast) if mat.ndim < 3 or not mat.shape[2] == nforecast: raise ValueError(exception % name) representation[name] = np.c_[representation[name], mat] # Update the matrices from kwargs for dynamic prediction in the case # that `end` is less than `nobs` and `dynamic` is less than `end`. In # this case, any time-varying matrices in the default `representation` # will be too long, causing an error to be thrown below in the # KalmanFilter(...) construction call, because the endog has length # nstatic + ndynamic + nforecast, whereas the time-varying matrices # from `representation` have length nobs. if ndynamic > 0 and end < self.nobs: for name, shape in self.shapes.items(): if not name == 'obs' and representation[name].shape[-1] > 1: representation[name] = representation[name][..., :end] # Construct the predicted state and covariance matrix for each time # period depending on whether that time period corresponds to # one-step-ahead prediction, dynamic prediction, or out-of-sample # forecasting. # If we only have simple prediction, then we can use the already saved # Kalman filter output if ndynamic == 0 and nforecast == 0: results = self else: # Construct the new endogenous array. endog = np.empty((self.k_endog, ndynamic + nforecast)) endog.fill(np.nan) endog = np.asfortranarray(np.c_[self.endog[:, :nstatic], endog]) # Setup the new statespace representation model_kwargs = { 'filter_method': self.filter_method, 'inversion_method': self.inversion_method, 'stability_method': self.stability_method, 'conserve_memory': self.conserve_memory, 'filter_timing': self.filter_timing, 'tolerance': self.tolerance, 'loglikelihood_burn': self.loglikelihood_burn } model_kwargs.update(representation) model = KalmanFilter( endog, self.k_states, self.k_posdef, **model_kwargs ) model.initialize_known( self.initial_state, self.initial_state_cov ) model._initialize_filter() model._initialize_state() results = self._predict(nstatic, ndynamic, nforecast, model) return PredictionResults(results, start, end, nstatic, ndynamic, nforecast) def _predict(self, nstatic, ndynamic, nforecast, model): # Note: this doesn't use self, and can either be a static method or # moved outside the class altogether. # Get the underlying filter kfilter = model._kalman_filter # Save this (which shares memory with the memoryview on which the # Kalman filter will be operating) so that we can replace actual data # with predicted data during dynamic forecasting endog = model._representations[model.prefix]['obs'] for t in range(kfilter.model.nobs): # Run the Kalman filter for the first `nstatic` periods (for # which dynamic computation will not be performed) if t < nstatic: next(kfilter) # Perform dynamic prediction elif t < nstatic + ndynamic: design_t = 0 if model.design.shape[2] == 1 else t obs_intercept_t = 0 if model.obs_intercept.shape[1] == 1 else t # Unconditional value is the intercept (often zeros) endog[:, t] = model.obs_intercept[:, obs_intercept_t] # If t > 0, then we can condition the forecast on the state if t > 0: # Predict endog[:, t] given `predicted_state` calculated in # previous iteration (i.e. t-1) endog[:, t] += np.dot( model.design[:, :, design_t], kfilter.predicted_state[:, t] ) # Advance Kalman filter next(kfilter) # Perform any (one-step-ahead) forecasting else: next(kfilter) # Return the predicted state and predicted state covariance matrices results = FilterResults(model) results.update_representation(model) results.update_filter(kfilter) return results class PredictionResults(FilterResults): r""" Results of in-sample and out-of-sample prediction for state space models generally Parameters ---------- results : FilterResults Output from filtering, corresponding to the prediction desired start : int Zero-indexed observation number at which to start forecasting, i.e., the first forecast will be at start. end : int Zero-indexed observation number at which to end forecasting, i.e., the last forecast will be at end. nstatic : int Number of in-sample static predictions (these are always the first elements of the prediction output). ndynamic : int Number of in-sample dynamic predictions (these always follow the static predictions directly, and are directly followed by the forecasts). nforecast : int Number of in-sample forecasts (these always follow the dynamic predictions directly). Attributes ---------- npredictions : int Number of observations in the predicted series; this is not necessarily the same as the number of observations in the original model from which prediction was performed. start : int Zero-indexed observation number at which to start prediction, i.e., the first predict will be at `start`; this is relative to the original model from which prediction was performed. end : int Zero-indexed observation number at which to end prediction, i.e., the last predict will be at `end`; this is relative to the original model from which prediction was performed. nstatic : int Number of in-sample static predictions. ndynamic : int Number of in-sample dynamic predictions. nforecast : int Number of in-sample forecasts. endog : array The observation vector. design : array The design matrix, :math:`Z`. obs_intercept : array The intercept for the observation equation, :math:`d`. obs_cov : array The covariance matrix for the observation equation :math:`H`. transition : array The transition matrix, :math:`T`. state_intercept : array The intercept for the transition equation, :math:`c`. selection : array The selection matrix, :math:`R`. state_cov : array The covariance matrix for the state equation :math:`Q`. filtered_state : array The filtered state vector at each time period. filtered_state_cov : array The filtered state covariance matrix at each time period. predicted_state : array The predicted state vector at each time period. predicted_state_cov : array The predicted state covariance matrix at each time period. forecasts : array The one-step-ahead forecasts of observations at each time period. forecasts_error : array The forecast errors at each time period. forecasts_error_cov : array The forecast error covariance matrices at each time period. Notes ----- The provided ranges must be conformable, meaning that it must be that `end - start == nstatic + ndynamic + nforecast`. This class is essentially a view to the FilterResults object, but returning the appropriate ranges for everything. """ representation_attributes = [ 'endog', 'design', 'design', 'obs_intercept', 'obs_cov', 'transition', 'state_intercept', 'selection', 'state_cov' ] filter_attributes = [ 'filtered_state', 'filtered_state_cov', 'predicted_state', 'predicted_state_cov', 'forecasts', 'forecasts_error', 'forecasts_error_cov' ] def __init__(self, results, start, end, nstatic, ndynamic, nforecast): # Save the filter results object self.results = results # Save prediction ranges self.npredictions = start - end self.start = start self.end = end self.nstatic = nstatic self.ndynamic = ndynamic self.nforecast = nforecast def __getattr__(self, attr): """ Provide access to the representation and filtered output in the appropriate range (`start` - `end`). """ # Prevent infinite recursive lookups if attr[0] == '_': raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)) _attr = '_' + attr # Cache the attribute if not hasattr(self, _attr): if attr == 'endog' or attr in self.filter_attributes: # Get a copy value = getattr(self.results, attr).copy() # Subset to the correct time frame value = value[..., self.start:self.end] elif attr in self.representation_attributes: value = getattr(self.results, attr).copy() # If a time-invariant matrix, return it. Otherwise, subset to # the correct period. if value.shape[-1] == 1: value = value[..., 0] else: value = value[..., self.start:self.end] else: raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)) setattr(self, _attr, value) return getattr(self, _attr)
bsd-3-clause
3,883,965,358,522,059,000
41.340876
79
0.594628
false
4.440953
false
false
false
rjw57/cubbie
migrations/versions/316bb58e84f_add_user_identities.py
1
1110
"""add user_identities Revision ID: 316bb58e84f Revises: 38c8ec357e0 Create Date: 2015-03-11 01:40:12.157458 """ # revision identifiers, used by Alembic. revision = '316bb58e84f' down_revision = '38c8ec357e0' from alembic import op import sqlalchemy as sa def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.create_table('user_identities', sa.Column('id', sa.Integer(), nullable=False), sa.Column('provider', sa.Text(), nullable=False), sa.Column('provider_user_id', sa.Text(), nullable=False), sa.Column('user_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['user_id'], ['users.id'], ), sa.PrimaryKeyConstraint('id') ) op.create_index('idx_user_identities_provider_provider_id', 'user_identities', ['provider', 'provider_user_id'], unique=False) ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_index('idx_user_identities_provider_provider_id', table_name='user_identities') op.drop_table('user_identities') ### end Alembic commands ###
mit
7,870,348,524,913,182,000
30.714286
130
0.684685
false
3.313433
false
false
false
cwgreene/Nanostructure-Simulator
utils/plot_trajectories.py
1
1140
import os import sys import re import pylab def parse_trajectory_line(line): trajectory = [] for x,y in re.findall("\(([0-9.]+), ([0-9.]+)\)",line): trajectory.append((float(x),float(y))) return trajectory def generate_trajectories(file): #get rid fo two first lines file.readline() file.readline() #parse each line for line in file: yield parse_trajectory_line(line) def open_trajectory_file(n): for filename in os.listdir("results"): if re.match(str(n)+"traj",filename): return open("results/"+filename) raise "File not found" def display_trajectories(n): input ="" file = open_trajectory_file(n) trajectory_gen = generate_trajectories(file) trajectory = trajectory_gen.next() interactive = True i = 0 while input != 'q': first = map(lambda x: x[0],trajectory) second = map(lambda x: x[1],trajectory) pylab.plot(first,second) if interactive: input = raw_input() if input == "go": i += 1 interactive=False if i %100 == 0: print i raw_input() try: trajectory=trajectory_gen.next() except: print "Done" break if __name__=="__main__": display_trajectories(sys.argv[1])
mit
-5,705,216,141,029,246,000
20.923077
56
0.669298
false
2.900763
false
false
false
jonathansick/androcmd
scripts/phat_baseline_test.py
1
3612
#!/usr/bin/env python # encoding: utf-8 """ Grid computation of dust attenuation for old vs. young stellar populations. 2015-05-12 - Created by Jonathan Sick """ import argparse from androcmd.phatpipeline import PhatCatalog from androcmd.baselineexp import SolarZPipeline, ThreeZPipeline def main(): args = parse_args() if args.pipeline == 'solarz': # Use the single-Z solar pipeline Pipeline = SolarZPipeline elif args.pipeline == 'threez': # Use the three-metallicity track pipeline Pipeline = ThreeZPipeline isoc = dict(isoc_kind='parsec_CAF09_v1.2S', photsys_version='yang') pipeline = Pipeline(brick=23, root_dir=args.model_name, isoc_args=isoc) if args.fit is not None: dataset = PhatCatalog(args.brick) pipeline.fit(args.fit, [args.fit], dataset) if args.plot_hess is not None: from androcmd.baselineexp import plot_fit_hess_grid dataset = PhatCatalog(args.brick) plot_fit_hess_grid(args.plot_hess, pipeline, dataset) if args.plot_diff is not None: from androcmd.baselineexp import plot_diff_hess_grid dataset = PhatCatalog(args.brick) plot_diff_hess_grid(args.plot_diff, pipeline, dataset) if args.plot_sfh is not None: from androcmd.baselineexp import sfh_comparison_plot dataset = PhatCatalog(args.brick) sfh_comparison_plot(args.plot_sfh, pipeline, dataset) if args.plot_zsfh is not None: from androcmd.baselineexp import plot_sfh_metallicity_trends dataset = PhatCatalog(args.brick) for fit_key in args.plot_zsfh: plot_path = "{model}_b{brick:d}_zsfh_{key}".format( model=args.model_name, brick=args.brick, key=fit_key) plot_sfh_metallicity_trends(plot_path, pipeline, dataset, fit_key) if args.chi_table is not None: from androcmd.baselineexp import tabulate_fit_chi dataset = PhatCatalog(args.brick) tabulate_fit_chi(args.chi_table, pipeline, dataset) if args.plot_isoc is not None: from androcmd.baselineexp import plot_isocs, plot_isocs_lewis dataset = PhatCatalog(args.brick) plot_isocs(args.plot_isoc, pipeline, dataset) plot_isocs_lewis(args.plot_isoc + '_lewis', pipeline, dataset) if args.plot_lock is not None: from androcmd.baselineexp import plot_lockfile plot_lockfile(args.plot_lock, pipeline) def parse_args(): parser = argparse.ArgumentParser( description="Model a brick with differential old/young dust.") parser.add_argument('model_name') parser.add_argument('brick', type=int) parser.add_argument('--fit', choices=['lewis', 'acs_rgb', 'acs_all', 'oir_all', 'ir_rgb'], default=None) parser.add_argument('--pipeline', choices=['solarz', 'threez'], default='solarz') parser.add_argument('--plot-hess', default=None) parser.add_argument('--plot-diff', default=None) parser.add_argument('--plot-sfh', default=None) parser.add_argument('--chi-table', default=None) parser.add_argument('--plot-zsfh', nargs='*', default=None, choices=['lewis', 'acs_rgb', 'acs_all', 'oir_all', 'ir_rgb']) parser.add_argument('--plot-isoc', default=None) parser.add_argument('--plot-lock', default=None) return parser.parse_args() if __name__ == '__main__': main()
mit
-6,061,255,170,724,512,000
35.484848
78
0.623477
false
3.544652
false
false
false
lhellebr/spacewalk
backend/server/rhnLib.py
1
8211
# # Copyright (c) 2008--2015 Red Hat, Inc. # # This software is licensed to you under the GNU General Public License, # version 2 (GPLv2). There is NO WARRANTY for this software, express or # implied, including the implied warranties of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2 # along with this software; if not, see # http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. # # Red Hat trademarks are not licensed under GPLv2. No permission is # granted to use or replicate Red Hat trademarks that are incorporated # in this software or its documentation. # import os import hashlib import string import base64 import posixpath from spacewalk.common.rhnLib import parseRPMName from spacewalk.common.rhnLog import log_debug from spacewalk.common.rhnException import rhnFault # architecture work from rhnMapping import check_package_arch def computeSignature(*fields): # Init the hash m = hashlib.new('sha256') for i in fields: # use str(i) since some of the fields may be non-string m.update(str(i)) return base64.encodestring(m.digest()).rstrip() # 'n_n-n-v.v.v-r_r.r:e.ARCH.rpm' ---> [n,v,r,e,a] def parseRPMFilename(pkgFilename): """ IN: Package Name: xxx-yyy-ver.ver.ver-rel.rel_rel:e.ARCH.rpm (string) Understood rules: o Name can have nearly any char, but end in a - (well seperated by). Any character; may include - as well. o Version cannot have a -, but ends in one. o Release should be an actual number, and can't have any -'s. o Release can include the Epoch, e.g.: 2:4 (4 is the epoch) o Epoch: Can include anything except a - and the : seperator??? XXX: Is epoch info above correct? OUT: [n,e,v,r, arch]. """ if type(pkgFilename) != type(''): raise rhnFault(21, str(pkgFilename)) # Invalid arg. pkgFilename = os.path.basename(pkgFilename) # Check that this is a package NAME (with arch.rpm) and strip # that crap off. pkg = string.split(pkgFilename, '.') # 'rpm' at end? if string.lower(pkg[-1]) not in ['rpm', 'deb']: raise rhnFault(21, 'neither an rpm nor a deb package name: %s' % pkgFilename) # Valid architecture next? if check_package_arch(pkg[-2]) is None: raise rhnFault(21, 'Incompatible architecture found: %s' % pkg[-2]) _arch = pkg[-2] # Nuke that arch.rpm. pkg = string.join(pkg[:-2], '.') ret = list(parseRPMName(pkg)) if ret: ret.append(_arch) return ret # XXX TBD where to place this function - it has to be accessible from several # places def normalize_server_arch(arch): log_debug(4, 'server arch', arch) if arch is None: return '' arch = str(arch) if '-' in arch: # Already normalized return arch # Fix the arch if need be suffix = '-redhat-linux' arch = arch + suffix return arch class InvalidAction(Exception): """ An error class to signal when we can not handle an action """ pass class EmptyAction(Exception): """ An error class that signals that we encountered an internal error trying to handle an action through no fault of the client """ pass class ShadowAction(Exception): """ An error class for actions that should not get to the client """ pass def transpose_to_hash(arr, column_names): """ Handy function to transpose an array from row-based to column-based, with named columns. """ result = [] for c in column_names: result.append([]) colnum = len(column_names) for r in arr: if len(r) != colnum: raise Exception( "Mismatching number of columns: expected %s, got %s; %s" % ( colnum, len(r), r)) for i in range(len(r)): result[i].append(r[i]) # Now build the hash labeled with the column names rh = {} for i in range(len(column_names)): rh[column_names[i]] = result[i] return rh def get_package_path(nevra, org_id, source=0, prepend="", omit_epoch=None, package_type='rpm', checksum_type=None, checksum=None): """ Computes a package path, optionally prepending a prefix The path will look like <prefix>/<org_id>/checksum[:3]/n/e:v-r/a/checksum/n-v-r.a.rpm if not omit_epoch <prefix>/<org_id>/checksum[:3]/n/v-r/a/checksum/n-v-r.a.rpm if omit_epoch """ name, epoch, version, release, pkgarch = nevra # dirarch and pkgarch are special-cased for source rpms if source: dirarch = 'SRPMS' else: dirarch = pkgarch if org_id in ['', None]: org = "NULL" else: org = org_id if not omit_epoch and epoch not in [None, '']: version = str(epoch) + ':' + version # normpath sanitizes the path (removing duplicated / and such) template = os.path.normpath(prepend + "/%s/%s/%s/%s-%s/%s/%s/%s-%s-%s.%s.%s") return template % (org, checksum[:3], name, version, release, dirarch, checksum, name, nevra[2], release, pkgarch, package_type) # bug #161989 # It seems that our software was written specifically for rpms in far too many # ways. Here's a little bit of a hack function that will return the package path # (as in from get_package_path) but without the filename appended. # This enables us to append an arbitrary file name that is not restricted to the # form: name-version-release.arch.type def get_package_path_without_package_name(nevra, org_id, prepend="", checksum_type=None, checksum=None): """return a package path without the package name appended""" return os.path.dirname(get_package_path(nevra, org_id, prepend=prepend, checksum_type=checksum_type, checksum=checksum)) class CallableObj: """ Generic callable object """ def __init__(self, name, func): self.func = func self.name = name def __call__(self, *args, **kwargs): return self.func(self.name, *args, **kwargs) def make_evr(nvre, source=False): """ IN: 'e:name-version-release' or 'name-version-release:e' OUT: {'name':name, 'version':version, 'release':release, 'epoch':epoch } """ if ":" in nvre: nvr, epoch = nvre.rsplit(":", 1) if "-" in epoch: nvr, epoch = epoch, nvr else: nvr, epoch = nvre, "" nvr_parts = nvr.rsplit("-", 2) if len(nvr_parts) != 3: raise rhnFault(err_code=21, err_text="NVRE is missing name, version, or release.") result = dict(zip(["name", "version", "release"], nvr_parts)) result["epoch"] = epoch if source and result["release"].endswith(".src"): result["release"] = result["release"][:-4] return result def _is_secure_path(path): path = posixpath.normpath(path) return not (path.startswith('/') or path.startswith('../')) def get_crash_path(org_id, system_id, crash): """For a given org_id, system_id and crash, return relative path to a crash directory.""" path = os.path.join('systems', org_id, system_id, 'crashes', crash) if _is_secure_path(path): return path else: return None def get_crashfile_path(org_id, system_id, crash, filename): """For a given org_id, system_id, crash and filename, return relative path to a crash file.""" path = os.path.join(get_crash_path(org_id, system_id, crash), filename) if _is_secure_path(path): return path else: return None def get_action_path(org_id, system_id, action_id): """For a given org_id, system_id, and action_id, return relative path to a store directory.""" path = os.path.join('systems', str(org_id), str(system_id), 'actions', str(action_id)) if _is_secure_path(path): return path def get_actionfile_path(org_id, system_id, action_id, filename): """For a given org_id, system_id, action_id, and file, return relative path to a file.""" path = os.path.join(get_action_path(org_id, system_id, action_id), str(filename)) if _is_secure_path(path): return path
gpl-2.0
8,887,653,560,759,876,000
30.580769
98
0.629765
false
3.543807
false
false
false
m-r-hunt/invaders
enemies.py
1
6646
# Invaders # Copyright (C) 2013 Maximilian Hunt # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. import os, random, pygame, projectiles, score_counter class EnemySprite(pygame.sprite.Sprite): # Class for one enemy invader. def __init__(self, image, position, bullet_group): # image: relative path to an image pygame can load # position: (x, y) coordinates on screen # bullet_group: pygame.sprite.Group to put fired bullets in pygame.sprite.Sprite.__init__(self) self.image = pygame.image.load(image) self.position = position self.rect = self.image.get_rect() self.rect.center = position self.bullet_group = bullet_group def update(self, dv, score, collisions): # Update this enemy. Should be called once per frame. # dv: (x, y) vector for movement this frame # score: a Score to increment on death # collisions: a dictionary of collisions, possibly containing this object # Handle any collisions given if self in collisions: death = False for bullet in collisions[self]: if (bullet.origin != self): bullet.kill() death = True if (death == True): score.increment() self.kill() # Update position self.position = (self.position[0] + dv[0], self.position[1] + dv[1]) self.rect.center = self.position def y(self): # Return height (y coordinate). return self.position[1] def fire(self): # (Possibly) fire a bullet down. if (random.randrange(100) < 2): bounds = (0-100, 800+100, 0-100, 600+100) bullet = projectiles.Bullet(os.path.join("Resources", "Enemy Bullet.png"), self.position, (0, 5), bounds, self) self.bullet_group.add(bullet) class EnemyColumn(pygame.sprite.Group): # Class for one column in a formation of enemies. # Exists so we can easily fire only the lowest enemy in each column # Remembers its own x coordinate, everything else happens inside the actual enemies def __init__(self, x_position): # x_position: integer x coordinate pygame.sprite.Group.__init__(self) self.x_position = x_position def update(self, dv, score, collisions): # Update this column. Should be called once per frame. # dv: (x, y) vector for movement this frame # score: a Score to pass to contained EnemySprites # collisions: a dictionary of collisions to pass to contained EnemySprites # Return (x, y), x of this column and y of lowest contained Sprite. self.x_position += dv[0] # Update contained sprites for i in self.sprites(): i.update(dv, score, collisions) # Compute biggest y, ask that EnemySprite to fire. max_y = 0 if (len(self) != 0): for i in self.sprites(): if (i.y() > max_y): max_y = i.y() bottom_enemy = i bottom_enemy.fire() return self.x_position, max_y class EnemyFormation(pygame.sprite.Group): # Class for a whole formation of enemies. # Contains both EnemyColumns and EnemySprites # Magic numbers: Base speed stepped horizontally or vertically each frame. H_STEP = 2 V_STEP = 10 def __init__(self, topleft, layout, bounds, bullet_group): pygame.sprite.Group.__init__(self) self.columns = [] columns, rows = layout # Generate all the enemies and columns. for i in range(0, columns): column_x = topleft[0] + i*64 enemy_column = EnemyColumn(topleft[0] + i*64) for j in range(0, rows): new_enemy = EnemySprite(os.path.join("resources", "Enemy.png"), (column_x, topleft[1] + j*64), bullet_group) enemy_column.add(new_enemy) self.add(new_enemy) self.columns.append(enemy_column) # Direction: +1 for right, -1 for left (i.e. +-ve x direction) self.current_direction = +1 self.left_bound, self.right_bound, self.bottom_bound = bounds self.total = columns * rows def update(self, score, collisions): # Update this formation. Should be called once per frame. # score: a Score to pass to contained EnemyColumns # collisions: a dictionary of collisions to pass to contained EnemyColumns # Returns (bool, bool). First is True if this formation is still in a good state, False if it needs resetting. # Second is True if this is because it's now empty, False if it has reached the bottom of the screen. direction_change = too_low = False # Compute factor to move faster when we have fewer remaining members. scale = int(float(self.total)/float(len(self))) # Update columns for i in self.columns: x, y = i.update((scale*self.current_direction*self.H_STEP, 0), score, collisions) # Remove empty columns if (len(i.sprites()) == 0): self.columns.remove(i) # Notice if we've gone too low elif (y > self.bottom_bound): too_low = True # Remember to change direction when we reach screen edges elif (x < self.left_bound or x > self.right_bound): direction_change = True # Indicate we're empty if (len(self.columns) == 0): return False, True # Indicate we reached the bottom of the screen. elif too_low: return False, False # Drop down and change direction elif direction_change: self.current_direction *= -1 for i in self.columns: i.update((scale*self.current_direction*self.H_STEP, self.V_STEP), score, []) # If we made it here, everything's fine. return True, True
gpl-2.0
-4,189,515,760,736,269,300
41.06962
124
0.614354
false
3.941874
false
false
false
2Minutes/davos-dev
davos/core/utils.py
1
7692
import re import sys import os import os.path as osp from fnmatch import fnmatch from pytd.gui.dialogs import promptDialog from pytd.util.logutils import logMsg from pytd.util.sysutils import importModule, toStr, inDevMode, getCaller from pytd.util.fsutils import pathSplitDirs, pathResolve, pathNorm, pathJoin from pytd.util.fsutils import jsonRead, jsonWrite, isDirStat, parseDirContent from pytd.util.strutils import padded _VERS_SPLIT_REXP = re.compile(r'-(v[0-9]+)') def getConfigModule(sProjectName): try: sConfPkg = os.environ.get("DAVOS_CONF_PACKAGE", "davos.config") sConfigModule = sConfPkg + '.' + sProjectName modobj = importModule(sConfigModule) except ImportError: raise ImportError("No config module named '{}'".format(sConfigModule)) return modobj def versionFromName(sFileName): vers = _VERS_SPLIT_REXP.findall(sFileName) return int(vers[-1].strip('v')) if vers else None def mkVersionSuffix(v): if not isinstance(v, int): raise TypeError("argument must be of type <int>. Got {}.".format(type(v))) return "".join(('-v', padded(v))) def findVersionFields(s): return _VERS_SPLIT_REXP.findall(s) def promptForComment(**kwargs): sComment = "" bOk = False result = promptDialog(title='Please...', message='Leave a comment: ', button=['OK', 'Cancel'], defaultButton='OK', cancelButton='Cancel', dismissString='Cancel', scrollableField=True, **kwargs) if result == 'Cancel': logMsg("Cancelled !" , warning=True) elif result == 'OK': sComment = promptDialog(query=True, text=True) bOk = True return sComment, bOk def projectNameFromPath(p): sConfPkg = os.environ.get("DAVOS_CONF_PACKAGE", "davos.config") pkg = importModule(sConfPkg) sPkgDirPath = os.path.dirname(pkg.__file__) sDirList = pathSplitDirs(p) for sFilename in os.listdir(sPkgDirPath): bIgnored = False for sPatrn in ("__*", ".*", "*.pyc"): if fnmatch(sFilename, sPatrn): bIgnored = True break if bIgnored: continue sModName = os.path.splitext(sFilename)[0] m = importModule(sConfPkg + '.' + sModName) sProjDir = m.project.dir_name if sProjDir in sDirList: return sModName return "" def splitStep(sTaskName): return sTaskName.rsplit("|", 1) if ("|" in sTaskName) else ("", sTaskName) def damasServerPort(): return os.getenv("DAMAS_DEV_PORT", "8443") if inDevMode() else "8443" def loadPrefs(): global DAVOS_PREFS try: p = pathResolve(r"%USERPROFILE%\davos_prefs.json") DAVOS_PREFS = jsonRead(p) except EnvironmentError: DAVOS_PREFS = {} return DAVOS_PREFS def savePrefs(): global DAVOS_PREFS if DAVOS_PREFS: p = pathResolve(r"%USERPROFILE%\davos_prefs.json") jsonWrite(p, DAVOS_PREFS) def setPref(in_sKey, value): global DAVOS_PREFS if "|" not in in_sKey: DAVOS_PREFS[in_sKey] = value return sKeyList = in_sKey.split("|") iLastKey = len(sKeyList) - 1 currPrefs = DAVOS_PREFS sPrevKey = "" prevPrefs = None for i, sKey in enumerate(sKeyList): if not isinstance(currPrefs, dict): prevPrefs[sPrevKey] = {} currPrefs = prevPrefs[sPrevKey] if i == iLastKey: currPrefs[sKey] = value return if sKey not in currPrefs: currPrefs[sKey] = {} prevPrefs = currPrefs sPrevKey = sKey currPrefs = currPrefs[sKey] def getPref(in_sKey, default=None): global DAVOS_PREFS if "|" not in in_sKey: return DAVOS_PREFS.get(in_sKey, default) sKeyList = in_sKey.split("|") iLastKey = len(sKeyList) - 1 currPrefs = DAVOS_PREFS for i, sKey in enumerate(sKeyList): if not isinstance(currPrefs, dict): k = "|".join(sKeyList[:(i + 1)]) logMsg("Not a pref dictionary: '{}'.".format(k), warning=True) return default if i == iLastKey: return currPrefs.get(sKey, default) if sKey in currPrefs: currPrefs = currPrefs[sKey] else: logMsg("No such pref: '{}'.".format(in_sKey), warning=True) return default _ICON_DIR_PATH = "" def mkIconPath(sRelPath): global _ICON_DIR_PATH if (not _ICON_DIR_PATH) or (not osp.exists(_ICON_DIR_PATH)): p = sys.modules["davos"].__file__ p = osp.abspath(osp.join(osp.dirname(p), "..", "resources", "icon")) _ICON_DIR_PATH = p return pathJoin(_ICON_DIR_PATH, sRelPath) def writePackContent(sPackDirPath, dirStat=None): sPackDirPath = pathNorm(sPackDirPath) if not dirStat: dirStat = os.stat(sPackDirPath) sJsonPath = mkPackFilePath(sPackDirPath) iMtime = 0 if not osp.exists(sJsonPath): iMtime = dirStat.st_mtime iAtime = dirStat.st_atime try: open(sJsonPath, 'a+b').close() # create json file so it is listed by parseDirContent() dirContent = parseDirContent(sPackDirPath) jsonWrite(sJsonPath, dirContent, sort_keys=True) finally: if iMtime: os.utime(sPackDirPath, (iAtime, iMtime)) return dirContent def readPackContent(sPackDirPath, fail=True): try: dirContent = jsonRead(mkPackFilePath(sPackDirPath)) except EnvironmentError as e: if fail: raise logMsg(toStr(e), warning=True) dirContent = parseDirContent(sPackDirPath) return dirContent def mkPackFilePath(sPackDirPath): return pathJoin(sPackDirPath, "_package.json") _ISPACK_REXP = re.compile(r".+_pkg[^/\w].+", re.I) def assertPack(p, dirStat=None): if not dirStat: dirStat = os.stat(pathNorm(p)) if isPack(p, fail=True, dirStat=dirStat): return dirStat return None def belowPack(p): p = pathNorm(p) if os.environ["IN_SEB_MODE"]: return True if _belowPack(p) else _belowOldPack(p) else: return _belowPack(p) def isPack(p, fail=False, dirStat=None): p = pathNorm(p) if os.environ["IN_SEB_MODE"]: bPackPath = True if _isPack(p) else _isOldPack(p) else: bPackPath = _isPack(p) if not bPackPath: if fail: sMsg = ("Directory NOT a package (should start with 'pkg_' or 'lyr_'): '{}'." .format(osp.basename(p))) raise EnvironmentError(sMsg) else: return False if dirStat and not isDirStat(dirStat): if fail: raise EnvironmentError("Package path NOT a directory: '{}'".format(p)) else: return False return True def _belowPack(p): p = osp.dirname(p) for sDirName in pathSplitDirs(p): if _isPack(sDirName): return True return False def _isPack(p): sBaseName = osp.basename(p) if "/" in p else p if "_" not in sBaseName: return False sPrefix = sBaseName.split("_", 1)[0] if not sPrefix: return False return (sPrefix.lower() + "_") in ("pkg_", "lyr_") def _belowOldPack(p): p = osp.dirname(p) if "_pkg/" in p.lower(): return True if _ISPACK_REXP.match(p): return True return False def _isOldPack(p): sName = osp.basename(p) if sName.lower().endswith("_pkg"): return True if _ISPACK_REXP.match(sName): return True return False
gpl-3.0
-7,329,362,235,480,504,000
23.341772
94
0.597634
false
3.366302
true
false
false
solarsail/aerosol-tools
clustatlib/clucsv.py
1
3752
import numpy as np import os import os.path class csvbuilder: def __init__(self, cs): self.cs = cs if not os.path.isdir('csv'): os.mkdir('csv') def month_type_csv(self, site = None): label = 'all' if site == None else site values, percentages = self.cs.month_type_stat(site) header = ",".join(["type{},%".format(t) for t in range(1, len(values)+1)]) header = "month," + header all = [] for i in range(len(values)): all.append(values[i]) all.append(percentages[i]) mat = np.matrix(all) mat = mat.transpose().tolist() content = [] for i in range(12): content.append("%d,%s" % (i+1, ','.join([str(field) for field in mat[i]]))) content = '\n'.join(content) with open("csv/month_type_%s.csv" % label, 'w') as outfile: outfile.write('\n'.join((header, content))) def year_type_csv(self, start_year, end_year, site = None): label = 'all' if site == None else site values, percentages = self.cs.year_type_stat(start_year, end_year, site) header = ",".join(["type{},%".format(t) for t in range(1, len(values)+1)]) header = "year," + header all = [] for i in range(len(values)): all.append(values[i]) all.append(percentages[i]) mat = np.matrix(all) mat = mat.transpose().tolist() content = [] for i in range(start_year, end_year+1): content.append("%d,%s" % (i, ','.join([str(field) for field in mat[i-start_year]]))) content = '\n'.join(content) with open("csv/year_type_%s.csv" % label, 'w') as outfile: outfile.write('\n'.join((header, content))) def type_csv(self): header = "type,count,percentage%" all = self.cs.type_stat() content = '\n'.join([','.join([str(field) for field in row]) for row in all]) with open("csv/type_count.csv", 'w') as outfile: outfile.write('\n'.join((header, content))) def site_type_csv(self): all, types = self.cs.site_type_stat() header = ",".join(["type{},%".format(t) for t in range(1, types+1)]) header = "site," + header content = '\n'.join([','.join([str(field) for field in row]) for row in all]) with open("csv/site_type_count.csv", 'w') as outfile: outfile.write('\n'.join((header, content))) def type_stat_csv(self): header = "type,refr440,refr675,refr870,refr1020,refi440,refi675,refi870,refi1020,volmedianradf,stddevf,volconf,volmedianradc,stddevc,volconc,ssa675,ssa870,ssa1020,asy440,asy675,asy870,sphericity" list1 = self.cs.type_means() list2 = self.cs.type_stddev() l = [] for i in range(len(list1)): l.append(list1[i]) stddevline = list(list2[i]) stddevline[0] = "stddev" l.append(stddevline) content = '\n'.join([','.join([str(field) for field in row]) for row in l]) with open("csv/type_stat.csv", 'w') as outfile: outfile.write('\n'.join((header, content))) def distances_csv(self): clus, dist_mat = self.cs.all_distances() header = "," + ",".join([str(cid) for cid in clus]) lines = [] first = 1 cur = 0 for clu in clus: lines.append(str(clu) + ',' * first + ','.join(str(d) for d in dist_mat[cur:cur+len(clus)-first+1])) cur += len(clus) - first + 1 first += 1 content = '\n'.join(lines) with open("csv/distance_stat.csv", 'w') as outfile: outfile.write('\n'.join((header, content)))
gpl-3.0
-5,397,643,248,379,671,000
41.647727
203
0.539179
false
3.398551
false
false
false
hawkeyexp/plugin.video.netflix
resources/lib/services/nfsession/session/base.py
1
2055
# -*- coding: utf-8 -*- """ Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix) Copyright (C) 2018 Caphm (original implementation module) Copyright (C) 2019 Stefano Gottardo - @CastagnaIT Initialize the netflix session SPDX-License-Identifier: MIT See LICENSES/MIT.md for more information. """ from __future__ import absolute_import, division, unicode_literals import resources.lib.common as common from resources.lib.database.db_utils import TABLE_SESSION from resources.lib.globals import G from resources.lib.utils.logging import LOG class SessionBase(object): """Initialize the netflix session""" session = None """The requests.session object to handle communication to Netflix""" verify_ssl = True """Use SSL verification when performing requests""" # Functions from derived classes to allow perform particular operations in parent classes external_func_activate_profile = None # (set by nfsession_op.py) def __init__(self): self.verify_ssl = bool(G.ADDON.getSettingBool('ssl_verification')) self._init_session() def _init_session(self): """Initialize the session to use for all future connections""" try: self.session.close() LOG.info('Session closed') except AttributeError: pass from requests import session self.session = session() self.session.max_redirects = 10 # Too much redirects should means some problem self.session.headers.update({ 'User-Agent': common.get_user_agent(enable_android_mediaflag_fix=True), 'Accept-Encoding': 'gzip, deflate, br', 'Host': 'www.netflix.com' }) LOG.info('Initialized new session') @property def auth_url(self): """Access rights to make HTTP requests on an endpoint""" return G.LOCAL_DB.get_value('auth_url', table=TABLE_SESSION) @auth_url.setter def auth_url(self, value): G.LOCAL_DB.set_value('auth_url', value, TABLE_SESSION)
mit
8,894,260,233,918,521,000
33.830508
93
0.66618
false
4.021526
false
false
false
playerNaN/NaNPyGameEngine
engine.py
1
5921
import pygame import sys import os from collections import namedtuple import time import resourcemanager ColorList = namedtuple("ColorList", "black white red green blue") colors = ColorList((0,0,0),(0xFF,0xFF,0xFF),(0xFF,0,0),(0,0xFF,0),(0,0,0xFF)) PyListener = namedtuple("PyListener", "condition effect") PyEventListener = namedtuple("PyEventListener","events condition effect") class Pyengine: def __init__(self,size): pygame.init() self.__size = size self.__fps = 60 self.__bg = colors.white self.__fg = colors.black self.__on_update = [] self.__on_draw = [] self.__keys_down = {} self.__listeners = [] self.__event_handlers = [] self.__mouse_down = {} self.__display = None self.__screen_centerX = size[0]/2 self.__scaleX = 1.0 self.__scaleY = 1.0 self.__screen_centerY = size[1]/2 self.__clock = pygame.time.Clock() self.__buffer_surface = None self.__resource_manager = resourcemanager.ResourceManager() self.__animators = {} def add_animator(self,name,animator): self.__animators[name] = animator def remove_animator(self,name): del self.__animators[name] def get_animator(self,name): return self.__animators[name] def set_scale_x(self,x): self.__scaleX = x def get_scale_x(self): return self.__scaleX def set_scale_y(self,y): self.__scaleY = y def get_scale_y(self): return self.__scaleY def set_scale(self,s): self.__scaleX = s[0] self.__scaleY = s[1] def get_scale(self): return (self.__scaleX,self.__scaleY) def set_fg(self,fg): self.__fg = fg def get_fg(self): return self.__fg def set_bg(self,bg): self.__bg = bg def get_bg(self): return self.__bg def get_display(self): return self.__display() def set_screen_center_x(self,x): self.__screen_centerX = x def get_screen_center_x(self): return self.__screen_centerX def set_screen_center_y(self,y): self.__screen_centerY = y def get_screen_center_y(self): return self.__screen_centerY def set_screen_center(self,pos): self.__screen_centerX = pos[0] self.__screen_centerY = pos[1] def get_screen_center(self): return (self.__screen_centerX,self.__screen_centerY) def get_buffer_surface(self): return self.__buffer_surface def get_resource_manager(self): return self.__resource_manager def update_all_animators(self): ms = self.__clock.get_time() for i in self.__animators: self.__animators[i].update(ms) def draw_all_animators(self): for i in self.__animators: self.draw_image(self.__animators[i].get_current_image(),self.__animators[i].get_position()) def handle_events(self): for event in pygame.event.get(): if event.type == pygame.QUIT: self.exit() elif event.type == pygame.KEYDOWN: self.__keys_down[event.key] = True elif event.type == pygame.KEYUP: self.__keys_down[event.key] = False elif event.type == pygame.MOUSEBUTTONDOWN: self.__mouse_down = True elif event.type == pygame.MOUSEBUTTONUP: self.__mouse_down = False for handler in self.__event_handlers: if event.type in handler.events and handler.condition(self,event): handler.effect(self,event) def draw_image(self,name,pos): self.__buffer_surface.blit(self.__resource_manager.get_image(name),pos) def is_key_down(self,key): if not key in self.__keys_down: return False return self.__keys_down[key] def is_mouse_button_down(self,button): if not button in self.__mouse_down: return False return self.__mouse_down[button] def run(self): screen = pygame.display.set_mode(self.__size) self.__display = screen oldTime = time.time() while True: spf = 1.0 / self.__fps self.handle_events() self.update() self.draw(screen) self.__clock.tick(self.__fps) def exit(self): pygame.display.quit() pygame.quit() sys.exit() def update(self): self.update_all_animators() for l in self.__listeners: if l.condition(self): l.effect(self) def draw(self,display): self.__buffer_surface = pygame.Surface(display.get_size()) display.fill(colors.red) self.__buffer_surface.fill(self.__bg) for od in self.__on_draw: od(self,self.__buffer_surface) self.draw_all_animators() src_size = (self.__size[0]/self.__scaleX,self.__size[1]/self.__scaleY) top = self.__screen_centerY - src_size[1] / 2 left = self.__screen_centerX - src_size[0] / 2 cropped = pygame.Surface(src_size) cropped.blit(self.__buffer_surface,(0,0),(left,top,src_size[0],src_size[1])) cropped = pygame.transform.scale(cropped,self.__size) display.blit(cropped,(0,0)) pygame.display.update((0,0,self.__size[0],self.__size[1])) def add_draw_listener(self,f): self.__on_draw.append(f) def add_listener(self,condition,effect): self.__listeners.append(PyListener(condition,effect)) def add_on_update(self,effect): self.__add_listener(lambda s:True,effect) def add_event_listener(self,events,condition,effect): self.__event_handlers.append(PyEventListener(events,condition,effect)) def set_fps(self,fps): self.__fps = fps def get_fps(self): return self.__fps
unlicense
-5,850,025,484,649,098,000
32.647727
103
0.575578
false
3.657196
false
false
false
muminoff/savollar
savollar/pipelines.py
1
2093
# -*- coding: utf-8 -*- # Define your item pipelines here # # Don"t forget to add your pipeline to the ITEM_PIPELINES setting # See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html from scrapy.exceptions import DropItem from scrapy.conf import settings from scrapy import log from elasticsearch import Elasticsearch from uuid import uuid1 from savollar.models import SavolModel class ElasticSearchIndexPipeline(object): def process_item(self, item, spider): es = Elasticsearch([ {"host": settings["ELASTICSEARCH_HOST"]}, ]) valid = True for data in item: if not data: raise DropItem("Missing %s of item from %s" %(data, item["link"])) if valid: es.index( index=settings["ELASTICSEARCH_INDEX"], doc_type="info", id=str(uuid1()), body=dict(item) ) log.msg("Item indexed to ElasticSearch database %s:%s" % (settings["ELASTICSEARCH_HOST"], settings["ELASTICSEARCH_PORT"]), level=log.DEBUG, spider=spider) return item class CassandraExportPipleline(object): def process_item(self, item, spider): valid = True for data in item: if not data: raise DropItem("Missing %s of item from %s" %(data, item["link"])) if valid: model = SavolModel() model.title = item["title"] model.question = item["question"] model.answer = item["answer"] model.author = item["author"] model.permalink = item["permalink"] model.year = int(item["year"]) model.month = int(item["month"]) model.date = int(item["date"]) model.tags = item["title"].split() model.save() log.msg("Item exported to Cassandra database %s/%s" % (settings["CASSANDRA_HOST"], settings["CASSANDRA_KEYSPACE"]), level=log.DEBUG, spider=spider) return item
apache-2.0
5,381,455,535,540,653,000
33.883333
85
0.565695
false
4.161034
false
false
false
shanot/imp
modules/rmf/examples/link.py
2
1236
## \example rmf/link.py # This example is like module/rmf/pdb.py except that instead of creating a # new hierarchy from the rmf file, it simply links the existing hierarchy # to the file. This mechanism can be used for loading multiple # conformations for scoring or other analysis without having to set up # restraints and things each time. from __future__ import print_function import IMP.atom import IMP.rmf import RMF import sys IMP.setup_from_argv(sys.argv, "link") m = IMP.Model() # Create a new IMP.atom.Hierarchy from the contents of the pdb file h = IMP.atom.read_pdb(IMP.rmf.get_example_path("simple.pdb"), m) tfn = "link.rmf" print("File name is", tfn) # open the file, clearing any existing contents rh = RMF.create_rmf_file(tfn) # add the hierarchy to the file IMP.rmf.add_hierarchies(rh, [h]) # add the current configuration to the file as frame 0 IMP.rmf.save_frame(rh) # close the file del rh # reopen it, don't clear the file when opening it rh = RMF.open_rmf_file_read_only(tfn) # link to the existing pdb hierarchy IMP.rmf.link_hierarchies(rh, [h]) # load the same coordinates in, ok, that is not very exciting IMP.rmf.load_frame(rh, RMF.FrameID(0)) print("Try running rmf_display or rmf_show on", tfn)
gpl-3.0
2,193,594,142,939,475,700
25.869565
74
0.7411
false
3
false
false
false
karesansui/karesansui
bin/restart_network.py
1
4392
#!/usr/bin/env python # -*- coding: utf-8 -*- # # This file is part of Karesansui. # # Copyright (C) 2012 HDE, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # import os import sys import logging from optparse import OptionParser from ksscommand import KssCommand, KssCommandException, KssCommandOptException import __cmd__ try: import karesansui from karesansui import __version__ from karesansui.lib.virt.virt import KaresansuiVirtConnection, KaresansuiVirtException from karesansui.lib.const import NETWORK_IFCONFIG_COMMAND, NETWORK_BRCTL_COMMAND from karesansui.lib.utils import load_locale from karesansui.lib.utils import execute_command except ImportError, e: print >>sys.stderr, "[Error] some packages not found. - %s" % e sys.exit(1) _ = load_locale() usage = '%prog [options]' def getopts(): optp = OptionParser(usage=usage, version=__version__) optp.add_option('-n', '--name', dest='name', help=_('Network name')) optp.add_option('-f', '--force', dest='force', action="store_true", help=_('Do everything to bring up network')) return optp.parse_args() def chkopts(opts): if not opts.name: raise KssCommandOptException('ERROR: %s option is required.' % '-n or --name') class RestartNetwork(KssCommand): def process(self): (opts, args) = getopts() chkopts(opts) self.up_progress(10) conn = KaresansuiVirtConnection(readonly=False) try: active_networks = conn.list_active_network() inactive_networks = conn.list_inactive_network() if not (opts.name in active_networks or opts.name in inactive_networks): raise KssCommandException('Could not find the specified network. - net=%s' % (opts.name)) self.up_progress(10) try: conn.stop_network(opts.name) except KaresansuiVirtException, e: if opt.force is not True: raise KssCommandException('Could not stop the specified network. - net=%s' % (opts.name)) self.up_progress(20) try: conn.start_network(opts.name) except KaresansuiVirtException, e: if opts.force is not True: raise KssCommandException('Could not start the specified network. - net=%s' % (opts.name)) # try to bring down existing bridge kvn = conn.search_kvn_networks(opts.name)[0] try: bridge_name = kvn.get_info()['bridge']['name'] except KeyError: pass ret, res = execute_command([NETWORK_IFCONFIG_COMMAND, bridge_name, 'down']) ret, res = execute_command([NETWORK_BRCTL_COMMAND, 'delbr', bridge_name]) # try again conn.start_network(opts.name) self.up_progress(10) if not (opts.name in conn.list_active_network()): raise KssCommandException('Failed to start network. - net=%s' % (opts.name)) self.logger.info('Restarted network. - net=%s' % (opts.name)) print >>sys.stdout, _('Restarted network. - net=%s') % (opts.name) return True finally: conn.close() if __name__ == "__main__": target = RestartNetwork() sys.exit(target.run())
mit
2,224,646,137,374,195,000
36.538462
116
0.645492
false
3.981868
false
false
false
qnzhou/ThingiverseCrawler
thingiverse_crawler.py
1
9320
#!//usr/bin/env python import argparse import datetime import os import os.path import requests import re import time import urllib import urlparse from subprocess import check_call def utc_mktime(utc_tuple): """Returns number of seconds elapsed since epoch Note that no timezone are taken into consideration. utc tuple must be: (year, month, day, hour, minute, second) """ if len(utc_tuple) == 6: utc_tuple += (0, 0, 0) return time.mktime(utc_tuple) - time.mktime((1970, 1, 1, 0, 0, 0, 0, 0, 0)) def datetime_to_timestamp(dt): """Converts a datetime object to UTC timestamp""" return int(utc_mktime(dt.timetuple())) def parse_thing_ids(text): pattern = "thing:(\d{5,7})" matched = re.findall(pattern, text) return [int(val) for val in matched] def parse_file_ids(text): pattern = "download:(\d{5,7})" matched = re.findall(pattern, text) return [int(val) for val in matched] known_licenses = [ ("Creative Commons - Attribution", re.compile("http://creativecommons.org/licenses/by/\d(.\d)?/")), ("Creative Commons - Attribution - Share Alike", re.compile("http://creativecommons.org/licenses/by-sa/\d(.\d)?/")), ("Creative Commons - Attribution - No Derivatives", re.compile("http://creativecommons.org/licenses/by-nd/\d(.\d)?/")), ("Creative Commons - Attribution - Non-Commercial", re.compile("http://creativecommons.org/licenses/by-nc/\d(.\d)?/")), ("Attribution - Non-Commercial - Share Alike", re.compile("http://creativecommons.org/licenses/by-nc-sa/\d(.\d)?/")), ("Attribution - Non-Commercial - No Derivatives", re.compile("http://creativecommons.org/licenses/by-nc-nd/\d(.\d)?/")), ("Creative Commons - Public Domain Dedication", re.compile("http://creativecommons.org/publicdomain/zero/\d(.\d)?/")), ("GNU - GPL", re.compile("http://creativecommons.org/licenses/GPL/\d(.\d)?/")), ("GNU - LGPL", re.compile("http://creativecommons.org/licenses/LGPL/\d(.\d)?/")), ("BSD License", re.compile("http://creativecommons.org/licenses/BSD/")), ("Nokia", re.compile("http://www.developer.nokia.com/Terms_and_conditions/3d-printing.xhtml")), ("Public Domain", re.compile("http://creativecommons.org/licenses/publicdomain/")), ] def parse_license(text): for name, pattern in known_licenses: if pattern.search(text): return name return "unknown_license" def crawl_thing_ids(N, end_date=None): """ This method extract N things that were uploaded to thingiverse.com before end_date. If end_date is None, use today's date. """ baseurl = "http://www.thingiverse.com/search/recent/things/page:{}?q=&start_date=&stop_date={}&search_mode=advanced&description=&username=&tags=&license=" end_date = datetime_to_timestamp(end_date) thing_ids = set() for i in range(N/12 + 1): url = baseurl.format(i, end_date) r = requests.get(url) assert(r.status_code==200) thing_ids.update(parse_thing_ids(r.text)) if len(thing_ids) > N: break # Sleep a bit to avoid being mistaken as DoS. time.sleep(0.5) return thing_ids def crawl_things(N, output_dir, term=None, category=None, source=None, organize=False): #baseurl = "http://www.thingiverse.com/newest/page:{}" #baseurl = "http://www.thingiverse.com/explore/popular/page:{}" key = None if term is None: assert(source is not None); url_prefix= "http://www.thingiverse.com/explore/{}/".format(source); if category is None: baseurl = url_prefix + "page:{}" else: baseurl = url_prefix + urllib.quote_plus(category) + "/page:{}" key = category else: baseurl = "http://www.thingiverse.com/search/page:{}?type=things&q=" + urllib.quote_plus(term) key = term thing_ids = set() file_ids = set() records = [] num_files = 0 page = 0 previous_path = '' while True: url = baseurl.format(page+1) contents = get_url(url) page += 1 # If the previous url ends up being the same as the old one, we should stop as there are no more pages current_path = urlparse.urlparse(contents.url).path if previous_path == current_path: return records else: previous_path = current_path for thing_id in parse_thing_ids(contents.text): if thing_id in thing_ids: continue print("thing id: {}".format(thing_id)) thing_ids.add(thing_id) license, thing_files = get_thing(thing_id) for file_id in thing_files: if file_id in file_ids: continue file_ids.add(file_id) print(" file id: {}".format(file_id)) result = download_file(file_id, thing_id, output_dir, organize) if result is None: continue filename, link = result if filename is not None: records.append((thing_id, file_id, filename, license, link)) if N is not None and len(records) >= N: return records # Sleep a bit to avoid being mistaken as DoS. time.sleep(0.5) save_records(records, key) def get_thing(thing_id): base_url = "http://www.thingiverse.com/{}:{}" file_ids = [] url = base_url.format("thing", thing_id) contents = get_url(url).text license = parse_license(contents) return license, parse_file_ids(contents) def get_url(url, time_out=600): r = requests.get(url) sleep_time = 1.0 while r.status_code != 200: print("sleep {}s".format(sleep_time)) print(url) time.sleep(sleep_time) r = requests.get(url) sleep_time += 2 if (sleep_time > time_out): # We have sleeped for over 10 minutes, the page probably does # not exist. break if r.status_code != 200: print("failed to retrieve {}".format(url)) else: return r # return r.text def get_download_link(file_id): base_url = "https://www.thingiverse.com/{}:{}" url = base_url.format("download", file_id) r = requests.head(url) link = r.headers.get("Location", None) if link is not None: __, ext = os.path.splitext(link) if ext.lower() not in [".stl", ".obj", ".ply", ".off"]: return None return link def download_file(file_id, thing_id, output_dir, organize): link = get_download_link(file_id) if link is None: return None __, ext = os.path.splitext(link) output_file = "{}{}".format(file_id, ext.lower()) if organize: output_file = os.path.join(str(thing_id), output_file) output_file = os.path.join(output_dir, output_file) command = "wget -q --tries=20 --waitretry 20 -O {} {}".format(output_file, link) #check_call(command.split()) return output_file, link def save_records(records, key=None): # Enforce kebab case file name output_name = re.sub('(\w) (\w)', r'\1-\2', key).lower()+"-" if key else "" output_name += "summary" with open(output_name+".csv", 'w') as fout: fout.write("thing_id, file_id, file, license, link\n") for entry in records: fout.write(",".join([str(val) for val in entry]) + "\n") def parse_args(): parser = argparse.ArgumentParser( description="Crawl data from thingiverse", epilog="Written by Qingnan Zhou <qnzhou at gmail dot com> Modified by Mike Gleason") parser.add_argument("--output-dir", "-o", help="output directories", default=".") parser.add_argument("--number", "-n", type=int, help="how many files to crawl", default=None) group = parser.add_mutually_exclusive_group() group.add_argument("--search-term", "-s", type=str, default=None, help="term to search for") group.add_argument("--category", "-c", type=str, default=None, help="catergory to search for") parser.add_argument('--organize', dest='organized', default=False, action='store_true', help="organize files by their main category") parser.add_argument("--source", choices=("newest", "featured", "popular", "verified", "made-things", "derivatives", "customizable", "random-things", "firehose"), default="featured"); return parser def main(): parser = parse_args() args = parser.parse_args() if args.number is None and (args.search_term is None and args.category is None): parser.error('Number or Search/Category Term required') output_dir = args.output_dir number = args.number records = crawl_things( args.number, output_dir, args.search_term, args.category, args.source, args.organized) if args.search_term: save_records(records, args.search_term) elif args.category: save_records(records, args.category) else: save_records(records) if __name__ == "__main__": main()
mit
-6,989,201,447,378,768,000
33.64684
158
0.593777
false
3.534319
false
false
false
PyBossa/pybossa
pybossa/model/counter.py
2
1787
# -*- coding: utf8 -*- # This file is part of PYBOSSA. # # Copyright (C) 2017 Scifabric LTD. # # PYBOSSA is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # PYBOSSA is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>. from sqlalchemy import Integer from sqlalchemy.schema import Column, ForeignKey from sqlalchemy.dialects.postgresql import TIMESTAMP from pybossa.core import db from pybossa.model import DomainObject, make_timestamp class Counter(db.Model, DomainObject): '''A Counter lists the number of task runs for a given Task.''' __tablename__ = 'counter' #: Counter.ID id = Column(Integer, primary_key=True) #: UTC timestamp when the counter was created. created = Column(TIMESTAMP, default=make_timestamp) #: Project.ID that this counter is associated with. project_id = Column(Integer, ForeignKey('project.id', ondelete='CASCADE'), nullable=False) #: Task.ID that this counter is associated with. task_id = Column(Integer, ForeignKey('task.id', ondelete='CASCADE'), nullable=False) #: Number of task_runs for this task. n_task_runs = Column(Integer, default=0, nullable=False)
agpl-3.0
-4,575,830,606,666,470,400
39.613636
77
0.684947
false
4.244656
false
false
false
gogogo/gogogo-hk
gogogo/models/property.py
1
3233
from google.appengine.ext import db from django import forms from django.utils.translation import ugettext_lazy as _ class TransitTypeProperty(db.IntegerProperty): """ Transit Type Property - Storage of transit type """ def __init__ (self,*args,**kwargs): kwargs["choices"] = range(0,8) db.IntegerProperty.__init__(self,*args,**kwargs) def validate(self, value): if isinstance(value,basestring): value = int(value) return super(TransitTypeProperty, self).validate(value) def get_form_field(self, **kwargs): attrs = { 'form_class': forms.ChoiceField, 'choices' : TransitTypeProperty.get_choices() } attrs.update(kwargs) return super(TransitTypeProperty, self).get_form_field(**attrs) def get_choices(): ret = [ (i,TransitTypeProperty.get_type_name(i)) for i in range(0,8)] return ret get_choices = staticmethod(get_choices) def get_basic_type_name_list(): """ Return a list of basic type name """ ret = [TransitTypeProperty.get_type_name(i) for i in range(0,8)] return ret get_basic_type_name_list = staticmethod(get_basic_type_name_list) def get_type_name(type): if type == 0: return _("Tram, Streetcar, Light rail") elif type == 1: return _("Subway, Metro") #Any underground rail system within a metropolitan area elif type == 2: return _("Rail") #Used for intercity or long-distance travel. elif type == 3: return _("Bus") elif type == 4: return _("Ferry") elif type == 5: return _("Cable car") elif type == 6: return _("Gondola, Suspended cable car") elif type == 7: return _("Funicular") else: return "" get_type_name = staticmethod(get_type_name) class PaymentMethodProperty(db.IntegerProperty): """ Payment Method """ def __init__ (self,*args,**kwargs): kwargs["choices"] = range(0,2) if "default" not in kwargs: kwargs["default"] = 0 db.IntegerProperty.__init__(self,*args,**kwargs) def validate(self, value): if isinstance(value,basestring): value = int(value) return super(PaymentMethodProperty, self).validate(value) def get_form_field(self, **kwargs): attrs = { 'form_class': forms.ChoiceField, 'choices' : PaymentMethodProperty.get_choices() } attrs.update(kwargs) return super(PaymentMethodProperty, self).get_form_field(**attrs) def get_choices(): ret = [ (i,PaymentMethodProperty.get_type_name(i)) for i in range(0,2)] return ret get_choices = staticmethod(get_choices) def get_type_name(type): if type == 0: return _("Fare is paid on board") elif type == 1: return _("Fare must be paid before boarding") get_type_name = staticmethod(get_type_name)
agpl-3.0
2,126,706,793,343,167,200
30.086538
93
0.554903
false
4.102792
false
false
false
Saevon/webdnd
shared/utils/debug_toolbars.py
1
1502
import django from django.conf import settings from django.template.loader import render_to_string from django.utils.translation import ugettext_lazy as _ from debug_toolbar.panels import DebugPanel import sys class VersionDebugPanel(DebugPanel): ''' Panel that displays the Django version. ''' name = 'Version' has_content = True def nav_title(self): return _('Versions') def nav_subtitle(self): return 'Django %s' % django.get_version() def url(self): return '' def title(self): return _('Versions') def content(self): versions = {} versions['Web D&D'] = settings.VERSION versions['Syncrae'] = settings.SYNCRAE_VERSION context = self.context.copy() context.update({ 'versions': versions, 'paths': sys.path, }) return render_to_string('debug_toolbar/panels/versions.html', context) class SyncraeSpyDebugPanel(DebugPanel): ''' Panel that shows Syncrae Messages ''' name = 'Syncrae' has_content = True def nav_title(self): return _('Syncrae') def nav_subtitle(self): return '' def url(self): return '' def title(self): return _('Syncrae') def content(self): return render_to_string('debug_syncrae.html', self.context) class DividerDebugPanel(DebugPanel): name = 'Divider' has_content = False def nav_title(self): return ' '
mit
-4,841,625,695,873,232,000
18.25641
78
0.608522
false
3.973545
false
false
false
stefco/geco_data
geco_irig_plot.py
1
5662
#!/usr/bin/env python # (c) Stefan Countryman, 2016-2017 DESC="""Plot an IRIG-B signal read from stdin. Assumes that the timeseries is a sequence of newline-delimited float literals.""" FAST_CHANNEL_BITRATE = 16384 # for IRIG-B, DuoTone, etc. # THE REST OF THE IMPORTS ARE AFTER THIS IF STATEMENT. # Quits immediately on --help or -h flags to skip slow imports when you just # want to read the help documentation. if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description=DESC) # TODO: make this -i and --ifo instead of detector. parser.add_argument("--detector", help=("the detector; used in the title of the output " "plot")) parser.add_argument("-O", "--outfile", help="the filename of the generated plot") parser.add_argument("-T", "--timeseries", help="copy from stdin to stdout while reading", action="store_true") parser.add_argument("-A", "--actualtime", help=("actual time signal was recorded " "(appears in title)")) args = parser.parse_args() # Force matplotlib to not use any Xwindows backend. NECESSARY ON CLUSTER. import matplotlib matplotlib.use('Agg') import sys import time import numpy as np import matplotlib.pyplot as plt import geco_irig_decode def read_timeseries_stdin(num_lines, cat_to_stdout=False): """Read in newline-delimited numerical data from stdin; don't read more than a second worth of data. If cat_to_stdout is True, print data that has been read in back to stdout (useful for piped commands).""" timeseries = np.zeros(num_lines) line = "" i = 0 while i < num_lines: line = float(sys.stdin.readline()) timeseries[i] = line if cat_to_stdout: print(line) i += 1 return timeseries def irigb_decoded_title(timeseries, IFO=None, actual_time=None): """Get a title for an IRIG-B timeseries plot that includes the decoded time in the timeseries itself.""" # get the detector name if IFO is None: detector_suffix = "" else: detector_suffix = " at " + IFO # get the actual time of recording, if provided if actual_time is None: actual_time_str = "" else: actual_time_str = "\nActual Time: {}".format(actual_time) # add title and so on try: decoded_time = geco_irig_decode.get_date_from_timeseries(timeseries) decoded_time_str = decoded_time.strftime('%a %b %d %X %Y') except ValueError as e: decoded_time_str = "COULD NOT DECODE TIME" fmt = "One Second of IRIG-B Signal{}\nDecoded Time: {}{}" return fmt.format(detector_suffix, decoded_time_str, actual_time_str) def irigb_output_filename(outfile=None): """Get the output filename for an IRIG-B plot.""" if outfile is None: output_filename = "irigb-plot-made-at-" + str(time.time()) + ".png" else: output_filename = outfile # append .png if not already there if output_filename.split(".")[-1] != "png": output_filename += ".png" return output_filename def plot_with_zoomed_views(timeseries, title, num_subdivs=5, dt=1., output_filename=None, overlay=False, linewidth=1): """Plot a timeseries and produce num_subdivs subplots that show equal-sized subdivisions of the full timeseries data to show details (good for high-bitrate timeseries). If you want to keep plotting data to the same figure, set 'overlay=True', and the current figure will be plotted to.""" bitrate = int(len(timeseries) / float(dt)) times = np.linspace(0, 1, num=bitrate, endpoint=False) # find max and min values in timeseries; use these to set plot boundaries yrange = timeseries.max() - timeseries.min() ymax = timeseries.max() + 0.1*yrange ymin = timeseries.min() - 0.1*yrange if not overlay: plt.figure() # print("making plot") plt.gcf().set_figwidth(7) plt.gcf().set_figheight(4+1.2*num_subdivs) # ~1.2in height per zoomed plot # plot the full second on the first row; lines should be black ('k' option). plt.subplot(num_subdivs + 1, 1, 1) plt.ylim(ymin, ymax) plt.plot(times, timeseries, 'k', linewidth=linewidth) plt.tick_params(axis='y', labelsize='small') # make num_subdivs subplots to better show the full second for i in range(num_subdivs): # print("making plot " + str(i)) plt.subplot(num_subdivs+1, 1, i+2) plt.ylim(ymin, ymax) plt.xlim(float(i)/num_subdivs, (float(i)+1)/num_subdivs) start = bitrate*i // num_subdivs end = bitrate*(i+1) // num_subdivs plt.plot(times[start:end], timeseries[start:end], 'k', linewidth=linewidth) plt.tick_params(axis='y', labelsize='small') plt.suptitle(title) plt.xlabel("Time since start of second [$s$]") # print("saving plot") plt.subplots_adjust(left=0.125, right=0.9, bottom=0.1, top=0.9, wspace=0.2, hspace=0.5) if not (output_filename is None): plt.savefig(output_filename) return plt if __name__ == '__main__': timeseries = read_timeseries_stdin(FAST_CHANNEL_BITRATE, cat_to_stdout=args.timeseries) title = irigb_decoded_title(timeseries, args.detector, args.actualtime) output_filename = irigb_output_filename(args.outfile) plot_with_zoomed_views(timeseries, title, num_subdivs=5, dt=1., output_filename=output_filename)
mit
2,641,587,764,302,819,300
40.028986
80
0.628753
false
3.588086
false
false
false
TNick/pyl2extra
pyl2extra/datasets/images.py
1
13590
""" Dataset for images and related functionality. This module does not have dependencies inside pyl2extra package, so you can just copy-paste it inside your source tree. To use this dataset prepare a .csv file with targets (integers or real numbers) on first column and file paths on the second column: .. code:: 0,file1.png 1,file2.png Image file paths are relative to current directory (``os.getcwd()``). The images need not be square and can be in any format recognized by the ``Image`` module. Internally, the images are converted to RGB and are made square for you. Use it in a .yaml file like so: .. code:: dataset: &trndataset !obj:pyl2extra.datasets.images.Images { source: 'train.csv', image_size: 128 } The ``image_size`` can be skipped, in which case the size of the images is derived from first image that is provided. By default the class assumes a classification problem (targets are integers). If you need to uset it in a regression problem create it like so: .. code:: dataset: &trndataset !obj:pyl2extra.datasets.images.Images { source: 'train.csv', image_size: 128, regression: True } As the dataset simply wraps the ``DenseDesignMatrix``, parameters like ``rng`` (random number generator), ``preprocessor`` and ``fit_preprocessor`` can be used and will be passed to ``DenseDesignMatrix`` superclass. """ __authors__ = "Nicu Tofan" __copyright__ = "Copyright 2015, Nicu Tofan" __credits__ = ["Nicu Tofan"] __license__ = "3-clause BSD" __maintainer__ = "Nicu Tofan" __email__ = "[email protected]" import csv import numpy import os from PIL import Image from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix import theano class Images(DenseDesignMatrix): """ A pylearn2 dataset that loads the images from a list or csv file. Please note that - if you use this dataset and your model has a final Softmax layer you should construct it like so (YAML syntax): .. code:: !obj:pylearn2.models.mlp.Softmax { layer_name: 'y', irange: .0, n_classes: %(classes)d, binary_target_dim: 1 } where ``classes`` is the same number of classes passed to ``Images`` constructor. ``binary_target_dim`` is important and failing to set it constructs the wrong architecture, causing errors like: ValueError: VectorSpace(dim=1, dtype=float32) with total dimension 1 can't format a batch into VectorSpace(dim=X, dtype=float32) because its total dimension is X. Parameters ---------- source: OrderedDict, dict, str, tuple, list This argument provides the input images and (optionally) associated categories. The meaning of the argument depends on the data type: - if ``source`` is a string, it is interpreted to be the path towards a csv file; the file must NOT have a header, first column must contain the targets (classes or values) and second column must contain the paths for the image files; - if ``source`` is a dictionary, the keys must be the paths for image files, ``Image`` instances or numpy arrays and the values must be the classes or values (None or empty string if this instance does not provide the labels); - a tuple or list must have exactly one or two members: first one must be a list or tuple of image paths or Images or numpy arrays, while second one (optional) has the targets (classes as integers or real values). image_size: int, optional The size of the images in the final dataset. All images will be resized to be ``image_size`` x ``image_size`` pixels. classes: int, optional If this is a classification problem the parameter should be used to indicate the total number of classes and targets are expected to be integers in the range ``[0; classes-1]``. If this is a regression problem the parameter should be ``None`` and targets are expected to be real numbers. rng: object, optional A random number generator used for picking random \ indices into the design matrix when choosing minibatches. preprocessor: Preprocessor, optional Preprocessor to apply to images. fit_preprocessor: bool, optional Whether preprocessor can fit parameters when applied to training data. """ def __init__(self, source, image_size=None, classes=None, rng=None, preprocessor=None, fit_preprocessor=False): #: preserve original argument for future reference self.source = source #: Number of classes (None for regression) self.classes = classes # all images are loaded in ``ind`` variable ind = _init_input(source) # DenseDesignMatrix expects us to provide a numpy array # we choose to have number of examples on first axis ('b'), # then rows and columns of the image, then the channels # always 3 in our case self.axes = ('b', 0, 1, 'c') if image_size is None: dense_x = None else: dense_x = numpy.zeros(shape=(len(ind), image_size, image_size, 3), dtype='uint8') categories = [] has_targets = False for i, (img, ctg) in enumerate(ind): if isinstance(img, Image.Image): img = numpy.array(img) width = img.shape[1] height = img.shape[0] largest = max(width, height) if image_size is None: # if the user did not specify an image size we determine # the size using the first image that we encounter; this is # usefull if all images are already of required size, # for example image_size = largest dense_x = numpy.zeros(shape=(len(ind), image_size, image_size, 3), dtype='uint8') imgin = img # do we need to enlarge / shrink the image? elif largest != image_size: wpercent = image_size / float(largest) width = int(width * wpercent) height = int(height * wpercent) largest = max(width, height) # inefficient? could use scipy.ndimage.zoom. img_tmp = Image.fromarray(img) img_tmp = img_tmp.resize((width, height), Image.ANTIALIAS) imgin = numpy.array(img_tmp) else: imgin = img delta_x = (largest - width) / 2 delta_y = (largest - height) / 2 delta_x2 = delta_x + width delta_y2 = delta_y + height #print delta_x, delta_y, delta_x2, delta_y2, width, height dense_x[i, delta_y:delta_y2, delta_x:delta_x2, :] = imgin categories.append(ctg) if ctg != '': has_targets = True dense_x = numpy.cast[theano.config.floatX](dense_x) # if we have categories / values convert them to proper format if has_targets: if classes is None: # in regression we expect real values dense_y = numpy.empty(shape=(len(ind), 1), dtype=theano.config.floatX) for i, ctg in enumerate(categories): dense_y[i, 0] = float(ctg) else: # in classification we expect integers dense_y = numpy.empty(shape=(len(ind), 1), dtype=int) for i, ctg in enumerate(categories): dense_y[i, 0] = int(ctg) else: dense_y = None if rng is None: rng = DenseDesignMatrix._default_seed # everything else is handled by the DenseDesignMatrix superclass super(Images, self).__init__(topo_view=dense_x, y=dense_y, axes=self.axes, view_converter=None, preprocessor=preprocessor, fit_preprocessor=fit_preprocessor, X_labels=None, y_labels=classes if has_targets else None) def _init_input(source): """ Homogenize sources. """ if isinstance(source, basestring): # this is a csv file that we're going to read result = _load_list(_load_csv(source)) elif isinstance(source, dict): # keys are file names, values are classes result = _load_list(source.items()) elif isinstance(source, (list, tuple)): # one item lists the files, the other lists the classes if len(source) == 1: result = _load_list([(src, None) for src in source[0]]) elif len(source) == 2: if len(source[0]) == len(source[1]): result = _load_list(zip(source[0], source[1])) else: raise ValueError("Lists/tuples provded to Images class " "constructor are expected to have " "same length (%d != %d)" % (len(source[0]), len(source[1]))) else: raise ValueError("Lists/tuples provided to Images class " "constructor are expected to have one " "(images only) or two members (images" " and classes); the input has %d members." % len(source)) else: raise ValueError("Images class expects for its `source` argument " "a file path (string), a dictionary of " "file:class pairs, or a pair of lists (tuples); " "%s is not supported" % str(source.__class__)) return result def _load_csv(csv_path): """ Internal function for loading the content from a .csv file. Parameters ---------- csv_path: str The path towards the .csv file to read. Returns ------- result: list of tuples The method creates a list of tuples that should be passed to `_load_list()`. """ # we're going to accumulate files and categories here result = [] # compute absolute path of the source csv file csv_path = os.path.abspath(csv_path) with open(csv_path, 'rt') as fhand: # the reader is flexible, allowing delimiters # other than comma; quotation can also be customized csvr = csv.reader(fhand, delimiter=',', quotechar='"') # the reader will give us a list for each row of # the source file for row in csvr: # we're going to skip empty rows without warning if len(row) == 0: continue # we could skip the header here, if present; we # could even detect the column index from its # name; but we try to keep the things simple # class/value is always first, file path second result.append((row[1], row[0])) return result def _load_list(srclist): """ Internal function for loading the content from a list. Image files are converted to `numpy.ndarray`; empty classes are normalized to a string of lenghth 0. Parameters ---------- srclist: list of tuples A list of tuples, with first entry in tuple being a string, an Image or `numpy.ndarray` instances and second being classes (None for no class). Returns ------- result: list of tuples The method creates a list of tuples, with first entry in tuple being `numpy.ndarray` instances and second being targets (None for no target) - integer classes (classification) or real values (regression). """ # we're going to accumulate Images and categories here result = [] for img, cls in srclist: if isinstance(img, basestring): imgin = Image.open(img) elif isinstance(img, numpy.ndarray): imgin = Image.fromarray(img) elif isinstance(img, Image.Image): imgin = img elif Image.isImageType(img): imgin = img else: raise ValueError("Valid input for images are strings (a " "path towards a file), pil images " "and numpy arrays; %s is not supported" % str(img.__class__)) if cls is None: cls = '' imgin = imgin.convert('RGB') result.append((numpy.array(imgin), cls)) return result def one_image(image, image_size=None, classes=None, rng=None, preprocessor=None, fit_preprocessor=False): """ Convenience function that creates an Images dataset from a single image. Parameters ---------- image: string, image or numpy.ndarray The image to use as source. See :class:`Images` for a description of other parameters. """ return Images(source=((image,),), image_size=image_size, classes=classes, rng=rng, preprocessor=preprocessor, fit_preprocessor=fit_preprocessor)
bsd-3-clause
5,339,343,264,149,101,000
36.960894
79
0.576306
false
4.471866
false
false
false
mahak/cloudify-cli
cloudify_cli/commands/users.py
1
9023
######## # Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############ from .. import env from ..cli import cfy from ..table import print_data, print_single from ..utils import handle_client_error USER_COLUMNS = ['username', 'groups', 'role', 'group_system_roles', 'active', 'last_login_at', 'is_locked'] GET_DATA_COLUMNS = ['user_tenants', 'group_tenants'] NO_GET_DATA_COLUMNS = ['tenants'] USER_LABELS = {'role': 'system wide role', 'group_system_roles': 'system wide roles via groups'} def _format_user(user): user_tenants = dict( (str(tenant), str(user.user_tenants[tenant])) for tenant in user.user_tenants ) group_tenants = dict( (str(tenant), dict( (str(role), [str(group) for group in user.group_tenants[tenant][role]]) for role in user.group_tenants[tenant] )) for tenant in user.group_tenants ) user['user_tenants'] = str(user_tenants)[1:-1] user['group_tenants'] = str(group_tenants)[1:-1] return user def _format_group_system_roles(user): group_system_roles = dict( (str(role), [str(user_group) for user_group in user['group_system_roles'][role]]) for role in user['group_system_roles'] ) user['group_system_roles'] = str(group_system_roles).strip('{}') return user @cfy.group(name='users') @cfy.options.common_options def users(): """Handle Cloudify users """ if not env.is_initialized(): env.raise_uninitialized() @users.command(name='list', short_help='List users [manager only]') @cfy.options.sort_by('username') @cfy.options.descending @cfy.options.common_options @cfy.options.get_data @cfy.options.search @cfy.options.pagination_offset @cfy.options.pagination_size @cfy.assert_manager_active() @cfy.pass_client() @cfy.pass_logger def list(sort_by, descending, get_data, search, pagination_offset, pagination_size, logger, client): """List all users """ logger.info('Listing all users...') users_list = client.users.list( sort=sort_by, is_descending=descending, _get_data=get_data, _search=search, _offset=pagination_offset, _size=pagination_size ) total = users_list.metadata.pagination.total # copy list columns = [] + USER_COLUMNS users_list = [_format_group_system_roles(user) for user in users_list] if get_data: users_list = [_format_user(user) for user in users_list] columns += GET_DATA_COLUMNS else: columns += NO_GET_DATA_COLUMNS print_data(columns, users_list, 'Users:', labels=USER_LABELS) logger.info('Showing {0} of {1} users'.format(len(users_list), total)) @users.command(name='create', short_help='Create a user [manager only]') @cfy.argument('username', callback=cfy.validate_name) @cfy.options.common_options @cfy.options.security_role @cfy.options.password @cfy.options.tenant_name(required=False) @cfy.options.user_tenant_role(required=False, options_flags=['-l', '--user-tenant-role']) @cfy.assert_manager_active() @cfy.pass_client(use_tenant_in_header=False) @cfy.pass_logger def create(username, security_role, password, tenant_name, user_tenant_role, logger, client): """Create a new user on the manager `USERNAME` is the username of the user """ client.users.create(username, password, security_role) logger.info('User `{0}` created with `{1}` security role'.format( username, security_role)) if tenant_name and user_tenant_role: client.tenants.add_user(username, tenant_name, user_tenant_role) logger.info( 'User `{0}` added successfully to tenant `{1}` with `{2}` role' .format(username, tenant_name, user_tenant_role)) @users.command(name='set-password', short_help='Set a new password for a user [manager only]') @cfy.argument('username', callback=cfy.validate_name) @cfy.options.password @cfy.options.common_options @cfy.assert_manager_active() @cfy.pass_client() @cfy.pass_logger def set_password(username, password, logger, client): """Set a new password for a user `USERNAME` is the username of the user """ logger.info('Setting new password for user {0}...'.format(username)) client.users.set_password(username, password) logger.info('New password set') @users.command(name='set-role', short_help='Set a new role for a user [manager only]') @cfy.argument('username', callback=cfy.validate_name) @cfy.options.security_role @cfy.options.common_options @cfy.assert_manager_active() @cfy.pass_client() @cfy.pass_logger def set_role(username, security_role, logger, client): """Set a new role for a user `USERNAME` is the username of the user """ logger.info('Setting new role for user {0}...'.format(username)) client.users.set_role(username, security_role) logger.info('New role `{0}` set'.format(security_role)) @users.command(name='get', short_help='Get details for a single user [manager only]') @cfy.argument( 'username', callback=cfy.validate_name, default=env.get_username()) @cfy.options.common_options @cfy.options.get_data @cfy.assert_manager_active() @cfy.pass_client() @cfy.pass_logger def get(username, get_data, logger, client): """Get details for a single user `USERNAME` is the username of the user. (default: current user) """ logger.info('Getting info for user `{0}`...'.format(username)) if username == env.get_username(): user_details = client.users.get_self(_get_data=get_data) else: user_details = client.users.get(username, _get_data=get_data) # copy list columns = [] + USER_COLUMNS if get_data: _format_user(user_details) columns += GET_DATA_COLUMNS else: columns += NO_GET_DATA_COLUMNS print_single(columns, user_details, 'Requested user info:', labels=USER_LABELS) @users.command(name='delete', short_help='Delete a user [manager only]') @cfy.argument('username', callback=cfy.validate_name) @cfy.options.common_options @cfy.assert_manager_active() @cfy.pass_client() @cfy.pass_logger def delete(username, logger, client): """Delete a user `USERNAME` is the username of the user """ logger.info('Deleting user `{0}`...'.format(username)) client.users.delete(username) logger.info('User removed') @users.command(name='activate', short_help='Make an inactive user active [manager only]') @cfy.argument('username') @cfy.options.common_options @cfy.assert_manager_active() @cfy.pass_client() @cfy.pass_logger def activate(username, logger, client): """Activate a user `USERNAME` is the username of the user """ graceful_msg = 'User `{0}` is already active'.format(username) logger.info('Activating user `{0}`...'.format(username)) with handle_client_error(409, graceful_msg, logger): client.users.activate(username) logger.info('User activated') @users.command(name='deactivate', short_help='Make an active user inactive [manager only]') @cfy.argument('username') @cfy.options.common_options @cfy.assert_manager_active() @cfy.pass_client() @cfy.pass_logger def deactivate(username, logger, client): """Deactivate a user `USERNAME` is the username of the user """ graceful_msg = 'User `{0}` is already inactive'.format(username) logger.info('Deactivating user `{0}`...'.format(username)) with handle_client_error(409, graceful_msg, logger): client.users.deactivate(username) logger.info('User deactivated') @users.command(name='unlock', short_help='Unlock a locked user [manager only]') @cfy.argument('username') @cfy.options.common_options @cfy.assert_manager_active() @cfy.pass_client() @cfy.pass_logger def unlock(username, logger, client): """Unlock a locked user `USERNAME` is the username of the user """ graceful_msg = 'User `{0}` is already unlocked'.format(username) logger.info('Unlocking user `{0}`...'.format(username)) with handle_client_error(409, graceful_msg, logger): client.users.unlock(username) logger.info('User unlocked')
apache-2.0
-221,964,532,561,751,600
30.883392
78
0.654217
false
3.5
false
false
false
diefenbach/django-lfs
lfs/marketing/models.py
1
1821
# django imports from django.db import models from django.utils.translation import ugettext_lazy as _, ugettext # lfs imports from lfs.catalog.models import Product from lfs.order.models import Order class Topseller(models.Model): """Selected products are in any case among topsellers. """ product = models.ForeignKey(Product, models.CASCADE, verbose_name=_(u"Product")) position = models.PositiveSmallIntegerField(_(u"Position"), default=1) class Meta: ordering = ["position"] app_label = 'marketing' def __str__(self): return u"%s (%s)" % (self.product.name, self.position) class ProductSales(models.Model): """Stores totals sales per product. """ product = models.ForeignKey(Product, models.CASCADE, verbose_name=_(u"Product")) sales = models.IntegerField(_(u"sales"), default=0) class Meta: app_label = 'marketing' class FeaturedProduct(models.Model): """Featured products are manually selected by the shop owner """ product = models.ForeignKey(Product, models.CASCADE, verbose_name=_(u"Product")) position = models.PositiveSmallIntegerField(_(u"Position"), default=1) active = models.BooleanField(_(u"Active"), default=True) class Meta: ordering = ["position"] app_label = 'marketing' def __str__(self): return u"%s (%s)" % (self.product.name, self.position) class OrderRatingMail(models.Model): """Saves whether and when a rating mail has been send for an order. """ order = models.ForeignKey(Order, models.CASCADE, verbose_name=_(u"Order")) send_date = models.DateTimeField(auto_now=True) def __str__(self): return u"%s (%s)" % (self.order.id, self.send_date.strftime(ugettext('DATE_FORMAT'))) class Meta: app_label = 'marketing'
bsd-3-clause
-2,391,349,366,419,636,700
29.864407
93
0.667216
false
3.891026
false
false
false
benregn/itu-courses
itu/pipelines.py
1
1027
import pymongo from scrapy.exceptions import DropItem from scrapy.conf import settings from scrapy import log class MongoDBPipeline(object): def __init__(self): connection = pymongo.Connection( settings['MONGODB_SERVER'], settings['MONGODB_PORT']) db = connection[settings['MONGODB_DB']] self.collection = db[settings['MONGODB_COLLECTION']] def process_item(self, item, spider): valid = True for data in item: # here we only check if the data is not null # but we could do any crazy validation we want if not data: valid = False raise DropItem( "Missing %s course from %s" % (data, item['url'])) if valid: self.collection.insert(dict(item)) log.msg("Item written to MongoDB database %s/%s" % (settings['MONGODB_DB'], settings['MONGODB_COLLECTION']), level=log.DEBUG, spider=spider) return item
mit
7,761,366,228,736,481,000
33.233333
77
0.581305
false
4.426724
false
false
false
robertnishihara/ray
streaming/python/tests/test_word_count.py
1
1689
import os import ray from ray.streaming import StreamingContext def test_word_count(): ray.init(_load_code_from_local=True) ctx = StreamingContext.Builder() \ .build() ctx.read_text_file(__file__) \ .set_parallelism(1) \ .flat_map(lambda x: x.split()) \ .map(lambda x: (x, 1)) \ .key_by(lambda x: x[0]) \ .reduce(lambda old_value, new_value: (old_value[0], old_value[1] + new_value[1])) \ .filter(lambda x: "ray" not in x) \ .sink(lambda x: print("result", x)) ctx.submit("word_count") import time time.sleep(3) ray.shutdown() def test_simple_word_count(): ray.init(_load_code_from_local=True) ctx = StreamingContext.Builder() \ .build() sink_file = "/tmp/ray_streaming_test_simple_word_count.txt" if os.path.exists(sink_file): os.remove(sink_file) def sink_func(x): with open(sink_file, "a") as f: line = "{}:{},".format(x[0], x[1]) print("sink_func", line) f.write(line) ctx.from_values("a", "b", "c") \ .set_parallelism(1) \ .flat_map(lambda x: [x, x]) \ .map(lambda x: (x, 1)) \ .key_by(lambda x: x[0]) \ .reduce(lambda old_value, new_value: (old_value[0], old_value[1] + new_value[1])) \ .sink(sink_func) ctx.submit("word_count") import time time.sleep(3) ray.shutdown() with open(sink_file, "r") as f: result = f.read() assert "a:2" in result assert "b:2" in result assert "c:2" in result if __name__ == "__main__": test_word_count() test_simple_word_count()
apache-2.0
-5,655,306,037,818,127,000
27.15
63
0.536412
false
3.087751
false
false
false
letolab/airy
airy/utils/cache.py
1
9676
""" This module contains helper functions for controlling caching. It does so by managing the "Vary" header of responses. It includes functions to patch the header of response objects directly and decorators that change functions to do that header-patching themselves. For information on the Vary header, see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.44 Essentially, the "Vary" HTTP header defines which headers a cache should take into account when building its cache key. Requests with the same path but different header content for headers named in "Vary" need to get different cache keys to prevent delivery of wrong content. An example: i18n middleware would need to distinguish caches by the "Accept-language" header. """ import re import time from airy.core.conf import settings from airy.core.cache import get_cache from airy.utils.encoding import smart_str, iri_to_uri from airy.utils.http import http_date from airy.utils.hashcompat import md5_constructor from airy.utils.translation import get_language from airy.http import HttpRequest cc_delim_re = re.compile(r'\s*,\s*') def patch_cache_control(response, **kwargs): """ This function patches the Cache-Control header by adding all keyword arguments to it. The transformation is as follows: * All keyword parameter names are turned to lowercase, and underscores are converted to hyphens. * If the value of a parameter is True (exactly True, not just a true value), only the parameter name is added to the header. * All other parameters are added with their value, after applying str() to it. """ def dictitem(s): t = s.split('=', 1) if len(t) > 1: return (t[0].lower(), t[1]) else: return (t[0].lower(), True) def dictvalue(t): if t[1] is True: return t[0] else: return t[0] + '=' + smart_str(t[1]) if response.has_header('Cache-Control'): cc = cc_delim_re.split(response['Cache-Control']) cc = dict([dictitem(el) for el in cc]) else: cc = {} # If there's already a max-age header but we're being asked to set a new # max-age, use the minimum of the two ages. In practice this happens when # a decorator and a piece of middleware both operate on a given view. if 'max-age' in cc and 'max_age' in kwargs: kwargs['max_age'] = min(cc['max-age'], kwargs['max_age']) # Allow overriding private caching and vice versa if 'private' in cc and 'public' in kwargs: del cc['private'] elif 'public' in cc and 'private' in kwargs: del cc['public'] for (k, v) in kwargs.items(): cc[k.replace('_', '-')] = v cc = ', '.join([dictvalue(el) for el in cc.items()]) response['Cache-Control'] = cc def get_max_age(response): """ Returns the max-age from the response Cache-Control header as an integer (or ``None`` if it wasn't found or wasn't an integer. """ if not response.has_header('Cache-Control'): return cc = dict([_to_tuple(el) for el in cc_delim_re.split(response['Cache-Control'])]) if 'max-age' in cc: try: return int(cc['max-age']) except (ValueError, TypeError): pass def patch_response_headers(response, cache_timeout=None): """ Adds some useful headers to the given HttpResponse object: ETag, Last-Modified, Expires and Cache-Control Each header is only added if it isn't already set. cache_timeout is in seconds. The CACHE_MIDDLEWARE_SECONDS setting is used by default. """ if cache_timeout is None: cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS if cache_timeout < 0: cache_timeout = 0 # Can't have max-age negative if settings.USE_ETAGS and not response.has_header('ETag'): response['ETag'] = '"%s"' % md5_constructor(response.content).hexdigest() if not response.has_header('Last-Modified'): response['Last-Modified'] = http_date() if not response.has_header('Expires'): response['Expires'] = http_date(time.time() + cache_timeout) patch_cache_control(response, max_age=cache_timeout) def add_never_cache_headers(response): """ Adds headers to a response to indicate that a page should never be cached. """ patch_response_headers(response, cache_timeout=-1) def patch_vary_headers(response, newheaders): """ Adds (or updates) the "Vary" header in the given HttpResponse object. newheaders is a list of header names that should be in "Vary". Existing headers in "Vary" aren't removed. """ # Note that we need to keep the original order intact, because cache # implementations may rely on the order of the Vary contents in, say, # computing an MD5 hash. if response.has_header('Vary'): vary_headers = cc_delim_re.split(response['Vary']) else: vary_headers = [] # Use .lower() here so we treat headers as case-insensitive. existing_headers = set([header.lower() for header in vary_headers]) additional_headers = [newheader for newheader in newheaders if newheader.lower() not in existing_headers] response['Vary'] = ', '.join(vary_headers + additional_headers) def has_vary_header(response, header_query): """ Checks to see if the response has a given header name in its Vary header. """ if not response.has_header('Vary'): return False vary_headers = cc_delim_re.split(response['Vary']) existing_headers = set([header.lower() for header in vary_headers]) return header_query.lower() in existing_headers def _i18n_cache_key_suffix(request, cache_key): """If enabled, returns the cache key ending with a locale.""" if settings.USE_I18N: # first check if LocaleMiddleware or another middleware added # LANGUAGE_CODE to request, then fall back to the active language # which in turn can also fall back to settings.LANGUAGE_CODE cache_key += '.%s' % getattr(request, 'LANGUAGE_CODE', get_language()) return cache_key def _generate_cache_key(request, method, headerlist, key_prefix): """Returns a cache key from the headers given in the header list.""" ctx = md5_constructor() for header in headerlist: value = request.META.get(header, None) if value is not None: ctx.update(value) path = md5_constructor(iri_to_uri(request.get_full_path())) cache_key = 'views.decorators.cache.cache_page.%s.%s.%s.%s' % ( key_prefix, request.method, path.hexdigest(), ctx.hexdigest()) return _i18n_cache_key_suffix(request, cache_key) def _generate_cache_header_key(key_prefix, request): """Returns a cache key for the header cache.""" path = md5_constructor(iri_to_uri(request.get_full_path())) cache_key = 'views.decorators.cache.cache_header.%s.%s' % ( key_prefix, path.hexdigest()) return _i18n_cache_key_suffix(request, cache_key) def get_cache_key(request, key_prefix=None, method='GET', cache=None): """ Returns a cache key based on the request path and query. It can be used in the request phase because it pulls the list of headers to take into account from the global path registry and uses those to build a cache key to check against. If there is no headerlist stored, the page needs to be rebuilt, so this function returns None. """ if key_prefix is None: key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX cache_key = _generate_cache_header_key(key_prefix, request) if cache is None: cache = get_cache(settings.CACHE_MIDDLEWARE_ALIAS) headerlist = cache.get(cache_key, None) if headerlist is not None: return _generate_cache_key(request, method, headerlist, key_prefix) else: return None def learn_cache_key(request, response, cache_timeout=None, key_prefix=None, cache=None): """ Learns what headers to take into account for some request path from the response object. It stores those headers in a global path registry so that later access to that path will know what headers to take into account without building the response object itself. The headers are named in the Vary header of the response, but we want to prevent response generation. The list of headers to use for cache key generation is stored in the same cache as the pages themselves. If the cache ages some data out of the cache, this just means that we have to build the response once to get at the Vary header and so at the list of headers to use for the cache key. """ if key_prefix is None: key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX if cache_timeout is None: cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS cache_key = _generate_cache_header_key(key_prefix, request) if cache is None: cache = get_cache(settings.CACHE_MIDDLEWARE_ALIAS) if response.has_header('Vary'): headerlist = ['HTTP_'+header.upper().replace('-', '_') for header in cc_delim_re.split(response['Vary'])] cache.set(cache_key, headerlist, cache_timeout) return _generate_cache_key(request, request.method, headerlist, key_prefix) else: # if there is no Vary header, we still need a cache key # for the request.get_full_path() cache.set(cache_key, [], cache_timeout) return _generate_cache_key(request, request.method, [], key_prefix) def _to_tuple(s): t = s.split('=',1) if len(t) == 2: return t[0].lower(), t[1] return t[0].lower(), True
bsd-2-clause
-6,054,072,846,640,393,000
39.655462
88
0.671042
false
3.816963
false
false
false
QQuick/Transcrypt
transcrypt/modules/org/transcrypt/autotester/__init__.py
1
12645
# First run a test from the command prompt, generating an HTML file. # The output of the test is stored in a DIV. # Also the script is automatically included in the HTML file. # Loading the HTML file will run the script. # This will compare the output of the script running in the browswer to the output in the DIV. # If those two match, the test reports OK, else it reports failure. from org.transcrypt.stubs.browser import __main__, __envir__, __pragma__ from org.transcrypt.autotester.html import HTMLGenerator, DataConverter, JSTesterUI, itemsAreEqual # Don't import __envir__ from __base__ since it will overwrite __buildin__.__envir__ in the browser # Import from stubs will be skipped in the browser # ... The ice is a bit thin here __pragma__ ('nokwargs') import itertools def getFileLocation(ancestor): """ This function needs to crawl up the stack and find out where the ancestor caller of this function was in the source code of either the python or javascript, depending on environment. @param ancestor the ancestor of this function that we want to capture file information about. @return string indicating the file position and line number """ if __envir__.executor_name == __envir__.transpiler_name: # js s = None __pragma__('js', '{}', ''' var e = new Error(); if ( ! e.stack ) { console.log("MAJOR ISSUE: Browser Error lacks Stack"); } else { s = e.stack; } ''') # Now we will process the stack to find the grandparent # calling function # @note - I'm explicitly not including a 're' module # dependency here frames = None __pragma__('js', '{}', ''' var linereg = new RegExp("\\n\\r|\\n", "g"); frames = s.toString().split(linereg); ''') if ( frames is None or (len(frames) < 2)): __pragma__('js', '{}', 'console.log("Failed to Split Stack");') return("UNKNOWN:???") # @note - if the call stack in transcrypts javascript # translation changes then this index may need to change # @todo - need more work here to determine this because # this is fragile gpFrame = frames[(ancestor*2 + 1)] # This regex splits the string coming from the javascript # stacktrace so that we can connect the file and line number # runTests (http://localhost:8080/run/autotest.js:3159:8) # func URL filename lineno:colno # Group 1 = function # Group 2 & 3 = protocol and hostname # Group 4 = Path on this host (filename is at the end) # Group 5 = lineno # Group 6 = column number in file frameReg = r"([^(]*)\(?([^:]*:)\/{2,3}([^:/]*:?)([^:]*):(\d+):(\d+)" m = None __pragma__('js', '{}', ''' var r = new RegExp(frameReg); m = r.exec(gpFrame); ''') if m: filepath = m[4] # Split the filepath and take the last element # to the get filename pathParts = filepath.split("/") filename = pathParts[len(pathParts)-1] lineno = m[5] return( "{}:{}".format(filename, lineno) ) else: __pragma__('js', '{}', 'console.log("Failed to Match Frame", gpFrame);') return("UNKNOWN:???") #ELSE # Needed because Transcrypt imports are compile time __pragma__("skip") from inspect import getframeinfo, stack s = stack() caller = getframeinfo(s[ancestor][0]) # Trim the file name path so that we don't get # a lot of unnecessary content filepath = caller.filename # @todo - this is a hack - we should use os.path pathParts = filepath.split('/') filename = "/".join(pathParts[-2:]) return( "%s:%d" % (filename, caller.lineno)) __pragma__ ('noskip') class AutoTester: """ Main testing class for comparing CPython to Transcrypt. This class is primarily used by calling the "check" method to confirm that the result is the same in both environments and "done" when all checks for a particular module have been completed. """ def __init__ (self, symbols = []): self.symbols = symbols # refDict/testDict contains the test results # of each testlet identified by name as the key self._currTestlet = "UNKNOWN" self.testDict = {} self.refDict = {} if __envir__.executor_name == __envir__.transpiler_name: self.ui = JSTesterUI() else: self.ui = None def sortedRepr (self, any): # When using sets or dicts, use elemens or keys # of one type, in sort order def tryGetNumKey (key): if type (key) == str: # Try to interpret key as numerical, see comment with repr function in __builtins__ try: return int (key) except: try: return float (key) except: return key else: return key if type (any) == dict: return '{' + ', '.join ([ '{}: {}'.format (repr (key), repr (any [key])) for index, key in enumerate (sorted ([tryGetNumKey (key) for key in any.keys ()], key = lambda aKey: str (aKey))) ]) + '}' elif type (any) == set: if len (any): return '{' + ', '.join (sorted ([str (item) for item in list (any)])) + '}' else: return repr (any) elif type (any) == range: return repr (list (any)) else: return repr (any) __pragma__('kwargs') def check (self, *args, ancestor = 2): """ Given a set of values from either the python or transcrypt environments, we log the position of the check call in the test and representative values of the passed arguments for later comparison. """ position=getFileLocation(ancestor) # N.B. stubs.browser provides a special sorting repr item = ' '.join ([self.sortedRepr (arg) for arg in args]) if __envir__.executor_name == __envir__.transpiler_name: self.testDict[self._currTestlet].append((position,item)) else: self.refDict[self._currTestlet].append((position,item)) __pragma__('nokwargs') def expectException(self, func): """ This method attempts to call the passed method and checks to see whether an exception was generated. @return string indicating "no exception" or "exception" """ try: func() return("no exception") except Exception as exc: return("exception") def throwToError(self, func): """ This function invokes the passed function and then converts an exception to an error response so that the unit test can continue even in the case where an exception may or may not occur. """ try: return(func()) except Exception as exc: return (None, "!!!{}".format(str(exc))) def checkEval(self, func): """ Check the result of the passed function which is invoked without arguments. If this function throws an exception, that exception is caught and converted to an error with can be compared against the result. This allows the user to control for exception that may or may not be generated in the unit tests """ ret = self.throwToError(func) self.check(ret, ancestor = 3) def checkPad(self, val, count): """ This method is to help manage flow control in unit tests and keep all unit tests aligned """ for i in range(0, count): self.check(val) def _getTotalErrorCnt(self, testData, refData): """ This method determines the total number of non-matching values in the test and reference data for a particular module. """ errCount = 0 for i,(refPos, refItem) in enumerate(refData): try: testPos,testItem = testData[i] if not itemsAreEqual (testItem, refItem): errCount+=1 except: errCount+=1 return(errCount) def compare (self): # Load the python reference data from the hidden HTML div dc = DataConverter() self.refDict = dc.getPythonResults() totalErrors = 0 sKeys = sorted(self.refDict.keys()) for key in sKeys: refData = self.refDict[key] try: testData = self.testDict[key] if ( testData is None ): raise KeyError("No Test Data Module: {}".format(key)) except KeyError: # No Test Data found for this key - we will populate with # errors for all ref data self.ui.appendSeqRowName(key, len(refData)) for i,(refPos, refItem) in enumerate(refData): self.ui.appendTableResult(key, None, None, refPos, refItem, False) continue # know we have testData so let's determine the total number of # errors for this test module. This will allow us to both set # the num of errors in the test module header row and set the # rows to the appropriate initial collapsed/expanded state. errCount= self._getTotalErrorCnt(testData, refData) collapse = (errCount == 0) self.ui.appendSeqRowName(key, errCount) # Now we will populate the table with all the rows # of data fro the comparison for i,(refPos, refItem) in enumerate(refData): try: # This will throw if testData's length is # shorter than refData's testPos,testItem = testData[i] except: testPos = None testItem = None self.ui.appendTableResult( key, testPos, testItem, refPos, refItem, collapse ) totalErrors += errCount self.ui.setOutputStatus( totalErrors == 0 ) def _cleanName(self, name): """ Clean the passed name of characters that won't be allowed in CSS class or HTML id strings. """ # Convert testletName to replace any of the characters that # are not acceptable in a CSS class or HTML id - this is to # make our lives easier # @note - I'm SPECIFICALLY not using a regex here because the # regex engine module is still under dev and could possibly # have issues ret = name invalidChars = [ '~', '!', '@', '$', '%', '^', '&', '*', '(', ')', '+', '=', ',', '.', '/', "'", ';', ':', '"', '?', '>', '<', '[', ']', '\\', '{', '}', '|', '`', '#', " ", ] for ch in invalidChars: ret = ret.replace(ch, "_") return(ret) def run (self, testlet, testletName): testletName = self._cleanName(testletName) self._currTestlet = testletName if __envir__.executor_name == __envir__.transpiler_name: self.testDict[self._currTestlet] = [] else: self.refDict[self._currTestlet] = [] try: testlet.run (self) except Exception as exc: if ( self.ui is not None ): self.ui.setOutputStatus(False) self.ui.showException(testletName, exc) else: # Error - No UI yet, reraise specific exception to enable finding out why raise def done (self): if __envir__.executor_name == __envir__.transpiler_name: self.compare () else: fnameBase = __main__.__file__.replace ('\\', '/') hg = HTMLGenerator(fnameBase) hg.generate_html(self.refDict)
apache-2.0
-3,752,604,880,992,353,000
37.515625
129
0.535152
false
4.482453
true
false
false
walterbender/Pippy
pippy_app.py
2
59457
#!/usr/bin/python3 # -*- coding: utf-8 -*- # # Copyright (C) 2007,2008,2009 Chris Ball, based on Collabora's # "hellomesh" demo. # # Copyright (C) 2013,14 Walter Bender # Copyright (C) 2013,14 Ignacio Rodriguez # Copyright (C) 2013 Jorge Gomez # Copyright (C) 2013,14 Sai Vineet # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA """Pippy Activity: A simple Python programming activity .""" import re import os import subprocess from random import uniform import locale import json import sys from shutil import copy2 from signal import SIGTERM from gettext import gettext as _ import uuid import dbus from dbus.mainloop.glib import DBusGMainLoop from gi import require_version require_version('Gdk', '3.0') require_version('Gtk', '3.0') from gi.repository import Gdk from gi.repository import Gtk from gi.repository import GLib from gi.repository import Pango try: require_version('Vte', '2.91') except: require_version('Vte', '2.90') from gi.repository import Vte from gi.repository import GObject DBusGMainLoop(set_as_default=True) bus = dbus.SessionBus() from sugar3.datastore import datastore from sugar3.activity import activity as activity from sugar3.activity.widgets import EditToolbar from sugar3.activity.widgets import StopButton from sugar3.activity.activity import get_bundle_path from sugar3.graphics.alert import Alert from sugar3.graphics.alert import ConfirmationAlert from sugar3.graphics.alert import NotifyAlert from sugar3.graphics.icon import Icon from sugar3.graphics.objectchooser import ObjectChooser from sugar3.graphics.toggletoolbutton import ToggleToolButton from sugar3.graphics.toolbarbox import ToolbarButton from sugar3.graphics.toolbutton import ToolButton from sugar3.graphics.toolbarbox import ToolbarBox from sugar3.activity.widgets import ActivityToolbarButton from jarabe.view.customizebundle import generate_unique_id from activity import ViewSourceActivity from activity import TARGET_TYPE_TEXT from collabwrapper import CollabWrapper from filedialog import FileDialog from icondialog import IconDialog from notebook import SourceNotebook, tab_object from toolbars import DevelopViewToolbar import sound_check import logging text_buffer = None # magic prefix to use utf-8 source encoding PYTHON_PREFIX = '''#!/usr/bin/python3 # -*- coding: utf-8 -*- ''' # Force category names into Pootle DEFAULT_CATEGORIES = [_('graphics'), _('math'), _('python'), _('sound'), _('string'), _('tutorials')] _logger = logging.getLogger('pippy-activity') DISTUTILS_SETUP_SCRIPT = """#!/usr/bin/python3 # -*- coding: utf-8 -*- from distutils.core import setup setup(name='{modulename}', version='1.0', py_modules=[ {filenames} ], ) """ # This is .format()'ed with the list of the file names. DISTUTILS_SETUP_SCRIPT = """#!/usr/bin/python3 # -*- coding: utf-8 -*- from distutils.core import setup setup(name='{modulename}', version='1.0', py_modules=[ {filenames} ], ) """ # This is .format()'ed with the list of the file names. def _has_new_vte_api(): try: return (Vte.MAJOR_VERSION >= 0 and Vte.MINOR_VERSION >= 38) except: # Really old versions of Vte don't have VERSION return False def _find_object_id(activity_id, mimetype='text/x-python'): ''' Round-about way of accessing self._jobject.object_id ''' dsobjects, nobjects = datastore.find({'mime_type': [mimetype]}) for dsobject in dsobjects: if 'activity_id' in dsobject.metadata and \ dsobject.metadata['activity_id'] == activity_id: return dsobject.object_id return None class PippyActivity(ViewSourceActivity): '''Pippy Activity as specified in activity.info''' def __init__(self, handle): self._pippy_instance = self self.session_data = [] # Used to manage saving self._loaded_session = [] # Used to manage tabs self._py_file_loaded_from_journal = False self._py_object_id = None self._dialog = None sys.path.append(os.path.join(self.get_activity_root(), 'Library')) ViewSourceActivity.__init__(self, handle) self._collab = CollabWrapper(self) self._collab.message.connect(self.__message_cb) self.set_canvas(self.initialize_display()) self.after_init() self.connect("notify::active", self.__active_cb) self._collab.setup() def focus(): """ Enforce focus for the text view once. """ widget = self.get_toplevel().get_focus() textview = self._source_tabs.get_text_view() if widget is None and textview is not None: textview.grab_focus() return True return False GLib.timeout_add(100, focus) def initialize_display(self): '''Build activity toolbar with title input, share button and export buttons ''' toolbar_box = ToolbarBox() activity_button = ActivityToolbarButton(self) toolbar_box.toolbar.insert(activity_button, 0) self.set_toolbar_box(toolbar_box) activity_button.show() toolbar_box.show() activity_toolbar = activity_button.page separator = Gtk.SeparatorToolItem() activity_toolbar.insert(separator, -1) separator.show() button = ToolButton('pippy-import-doc') button.set_tooltip(_('Import Python file to new tab')) button.connect('clicked', self._import_py_cb) activity_toolbar.insert(button, -1) button.show() button = ToolButton('pippy-export-doc') button.set_tooltip(_('Export as Pippy document')) button.connect('clicked', self._export_document_cb) activity_toolbar.insert(button, -1) button.show() button = ToolButton('pippy-export-library') button.set_tooltip(_('Save this file to the Pippy library')) button.connect('clicked', self._save_as_library) activity_toolbar.insert(button, -1) if not self._library_writable(): button.set_sensitive(False) button.show() button = ToolButton('pippy-export-example') button.set_tooltip(_('Export as new Pippy example')) button.connect('clicked', self._export_example_cb) activity_toolbar.insert(button, -1) button.show() button = ToolButton('pippy-create-bundle') button.set_tooltip(_('Create a Sugar activity bundle')) button.connect('clicked', self._create_bundle_cb) activity_toolbar.insert(button, -1) button.show() button = ToolButton('pippy-create-distutils') # TRANS: A distutils package is used to distribute Python modules button.set_tooltip(_('Export as a distutils package')) button.connect('clicked', self._export_distutils_cb) activity_toolbar.insert(button, -1) button.show() self._edit_toolbar = EditToolbar() button = ToolbarButton() button.set_page(self._edit_toolbar) button.props.icon_name = 'toolbar-edit' button.props.label = _('Edit') self.get_toolbar_box().toolbar.insert(button, -1) button.show() self._edit_toolbar.show() self._edit_toolbar.undo.connect('clicked', self.__undobutton_cb) self._edit_toolbar.redo.connect('clicked', self.__redobutton_cb) self._edit_toolbar.copy.connect('clicked', self.__copybutton_cb) self._edit_toolbar.paste.connect('clicked', self.__pastebutton_cb) view_btn = ToolbarButton() view_toolbar = DevelopViewToolbar(self) view_btn.props.page = view_toolbar view_btn.props.icon_name = 'toolbar-view' view_btn.props.label = _('View') view_toolbar.connect('font-size-changed', self._font_size_changed_cb) self.get_toolbar_box().toolbar.insert(view_btn, -1) self.view_toolbar = view_toolbar view_toolbar.show() actions_toolbar = self.get_toolbar_box().toolbar self._toggle_output = ToggleToolButton('tray-show') self._toggle_output.set_tooltip(_('Show output panel')) self._toggle_output.connect('toggled', self._toggle_output_cb) actions_toolbar.insert(self._toggle_output, -1) self._toggle_output.show() self._inverted_colors = ToggleToolButton(icon_name='dark-theme') self._inverted_colors.set_tooltip(_('Inverted Colors')) self._inverted_colors.set_accelerator('<Ctrl><Shift>I') self._inverted_colors.connect( 'toggled', self.__inverted_colors_toggled_cb) actions_toolbar.insert(self._inverted_colors, -1) self._inverted_colors.show() icons_path = os.path.join(get_bundle_path(), 'icons') icon_bw = Gtk.Image() icon_bw.set_from_file(os.path.join(icons_path, 'run_bw.svg')) icon_bw.show() icon_color = Gtk.Image() icon_color.set_from_file(os.path.join(icons_path, 'run_color.svg')) icon_color.show() button = ToolButton(label=_('Run!')) button.props.accelerator = _('<alt>r') button.set_icon_widget(icon_bw) button.set_tooltip(_('Run!')) button.connect('clicked', self._flash_cb, dict({'bw': icon_bw, 'color': icon_color})) button.connect('clicked', self._go_button_cb) actions_toolbar.insert(button, -1) button.show() icon_bw = Gtk.Image() icon_bw.set_from_file(os.path.join(icons_path, 'stopit_bw.svg')) icon_bw.show() icon_color = Gtk.Image() icon_color.set_from_file(os.path.join(icons_path, 'stopit_color.svg')) icon_color.show() button = ToolButton(label=_('Stop')) button.props.accelerator = _('<alt>s') button.set_icon_widget(icon_bw) button.connect('clicked', self._flash_cb, dict({'bw': icon_bw, 'color': icon_color})) button.connect('clicked', self._stop_button_cb) button.set_tooltip(_('Stop')) actions_toolbar.insert(button, -1) button.show() icon_bw = Gtk.Image() icon_bw.set_from_file(os.path.join(icons_path, 'eraser_bw.svg')) icon_bw.show() icon_color = Gtk.Image() icon_color.set_from_file(os.path.join(icons_path, 'eraser_color.svg')) icon_color.show() button = ToolButton(label=_('Clear output panel')) button.props.accelerator = _('<alt>c') button.set_icon_widget(icon_bw) button.connect('clicked', self._clear_button_cb) button.connect('clicked', self._flash_cb, dict({'bw': icon_bw, 'color': icon_color})) button.set_tooltip(_('Clear output panel')) actions_toolbar.insert(button, -1) button.show() activity_toolbar.show() separator = Gtk.SeparatorToolItem() self.get_toolbar_box().toolbar.insert(separator, -1) separator.show() button = ToolButton('pippy-openoff') button.set_tooltip(_('Open an example')) button.connect('clicked', self._load_example_cb) self.get_toolbar_box().toolbar.insert(button, -1) button.show() separator = Gtk.SeparatorToolItem() separator.props.draw = False separator.set_expand(True) self.get_toolbar_box().toolbar.insert(separator, -1) separator.show() stop = StopButton(self) self.get_toolbar_box().toolbar.insert(stop, -1) stop.show() vpane = Gtk.Paned.new(orientation=Gtk.Orientation.VERTICAL) vpane.set_position(400) # setting initial position self.paths = [] try: if sound_check.finddir(): TAMTAM_AVAILABLE = True else: TAMTAM_AVAILABLE = False except sound_check.SoundLibraryNotFoundError: TAMTAM_AVAILABLE = False data_path = os.path.join(get_bundle_path(), 'data') # get default language from locale locale_lang = locale.getdefaultlocale()[0] if locale_lang is None: lang = 'en' else: lang = locale_lang.split('_')[0] _logger.debug(locale.getdefaultlocale()) _logger.debug(lang) # construct the path for both lang_path = os.path.join(data_path, lang) en_lang_path = os.path.join(data_path, 'en') # get all folders in lang examples all_folders = [] if os.path.exists(lang_path): for d in sorted(os.listdir(lang_path)): all_folders.append(d) # get all folders in English examples for d in sorted(os.listdir(en_lang_path)): # check if folder isn't already in list if d not in all_folders: all_folders.append(d) for folder in all_folders: # Skip sound folders if TAMTAM is not installed if folder == 'sound' and not TAMTAM_AVAILABLE: continue direntry = {} # check if dir exists in pref language, if exists, add it if os.path.exists(os.path.join(lang_path, folder)): direntry = { 'name': _(folder.capitalize()), 'path': os.path.join(lang_path, folder) + '/'} # if not try to see if it's in default English path elif os.path.exists(os.path.join(en_lang_path, folder)): direntry = { 'name': _(folder.capitalize()), 'path': os.path.join(en_lang_path, folder) + '/'} self.paths.append([direntry['name'], direntry['path']]) # Adding local examples data_path = os.path.join(get_bundle_path(), 'data') self.paths.append([_('My examples'), data_path]) self._source_tabs = SourceNotebook(self, self._collab) self._source_tabs.connect('tab-added', self._add_source_cb) self._source_tabs.connect('tab-renamed', self._rename_source_cb) self._source_tabs.connect('tab-closed', self._close_source_cb) if self._loaded_session: for name, content, path in self._loaded_session: self._source_tabs.add_tab(name, content, path) else: self.session_data.append(None) self._source_tabs.add_tab() # New instance, ergo empty tab vpane.add1(self._source_tabs) self._source_tabs.show() self._outbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL) self._vte = Vte.Terminal() self._vte.set_encoding('utf-8') self._vte.set_size(30, 5) self._vte.set_scrollback_lines(-1) self._vte_set_colors('#000000', '#E7E7E7') self._child_exited_handler = None self._vte.connect('child_exited', self._child_exited_cb) self._vte.connect('drag_data_received', self._vte_drop_cb) self._outbox.pack_start(self._vte, True, True, 0) outsb = Gtk.Scrollbar(orientation=Gtk.Orientation.VERTICAL) outsb.set_adjustment(self._vte.get_vadjustment()) outsb.show() self._outbox.pack_start(outsb, False, False, 0) self._load_config() vpane.add2(self._outbox) self._outbox.show() vpane.show() return vpane def _vte_set_colors(self, bg, fg): # XXX support both Vte APIs if _has_new_vte_api(): foreground = Gdk.RGBA() foreground.parse(bg) background = Gdk.RGBA() background.parse(fg) else: foreground = Gdk.color_parse(bg) background = Gdk.color_parse(fg) self._vte.set_colors(foreground, background, []) def after_init(self): self._outbox.hide() def _font_size_changed_cb(self, widget, size): self._source_tabs.set_font_size(size) self._vte.set_font( Pango.FontDescription('Monospace {}'.format(size))) def _store_config(self): font_size = self._source_tabs.get_font_size() _config_file_path = os.path.join( activity.get_activity_root(), 'data', 'config.json') with open(_config_file_path, "w") as f: f.write(json.dumps(font_size)) def _load_config(self): _config_file_path = os.path.join( activity.get_activity_root(), 'data', 'config.json') if not os.path.isfile(_config_file_path): return with open(_config_file_path, "r") as f: font_size = json.loads(f.read()) self.view_toolbar.set_font_size(font_size) self._vte.set_font( Pango.FontDescription('Monospace {}'.format(font_size))) def __active_cb(self, widget, event): _logger.debug('__active_cb %r', self.props.active) if self.props.active: self.resume() else: self.pause() def do_visibility_notify_event(self, event): _logger.debug('do_visibility_notify_event %r', event.get_state()) if event.get_state() == Gdk.VisibilityState.FULLY_OBSCURED: self.pause() else: self.resume() def pause(self): # FIXME: We had resume, but no pause? pass def resume(self): if self._dialog is not None: self._dialog.set_keep_above(True) def _toggle_output_cb(self, button): shown = button.get_active() if shown: self._outbox.show_all() self._toggle_output.set_tooltip(_('Hide output panel')) self._toggle_output.set_icon_name('tray-hide') else: self._outbox.hide() self._toggle_output.set_tooltip(_('Show output panel')) self._toggle_output.set_icon_name('tray-show') def __inverted_colors_toggled_cb(self, button): if button.props.active: self._vte_set_colors('#E7E7E7', '#000000') self._source_tabs.set_dark() button.set_icon_name('light-theme') button.set_tooltip(_('Normal Colors')) else: self._vte_set_colors('#000000', '#E7E7E7') self._source_tabs.set_light() button.set_icon_name('dark-theme') button.set_tooltip(_('Inverted Colors')) def _load_example_cb(self, widget): widget.set_icon_name('pippy-openon') self._dialog = FileDialog(self.paths, self, widget) self._dialog.show() self._dialog.run() path = self._dialog.get_path() if path: self._select_func_cb(path) def _add_source_cb(self, button, force=False, editor_id=None): if self._collab._leader or force: if editor_id is None: editor_id = str(uuid.uuid1()) self._source_tabs.add_tab(editor_id=editor_id) self.session_data.append(None) self._source_tabs.get_nth_page(-1).show_all() self._source_tabs.get_text_view().grab_focus() if self._collab._leader: self._collab.post(dict( action='add-source', editor_id=editor_id)) else: # The leader must do it first so that they can set # up the text buffer self._collab.post(dict(action='add-source-request')) # Check if dark mode enabled, apply it if self._inverted_colors.props.active: self._source_tabs.set_dark() def _rename_source_cb(self, notebook, page, name): _logger.debug('_rename_source_cb %r %r' % (page, name)) self._collab.post(dict(action='rename-source', page=page, name=name)) def _close_source_cb(self, notebook, page): _logger.debug('_close_source_cb %r' % (page)) self._collab.post(dict(action='close-source', page=page)) def __message_cb(self, collab, buddy, msg): action = msg.get('action') if action == 'add-source-request' and self._collab._leader: self._add_source_cb(None, force=True) elif action == 'add-source': self._add_source_cb( None, force=True, editor_id=msg.get('editor_id')) elif action == 'rename-source': page = msg.get('page') name = msg.get('name') _logger.debug('__message_cb rename-source %r %r' % (page, name)) self._source_tabs.rename_tab(page, name) elif action == 'close-source': page = msg.get('page') _logger.debug('__message_cb close-source %r' % (page)) self._source_tabs.close_tab(page) def _vte_drop_cb(self, widget, context, x, y, selection, targetType, time): if targetType == TARGET_TYPE_TEXT: self._vte.feed_child(selection.data) def get_data(self): return self._source_tabs.get_all_data() def set_data(self, data): # Remove initial new/blank thing self.session_data = [] self._loaded_session = [] try: self._source_tabs.remove_page(0) tab_object.pop(0) self._source_tabs.last_tab = 0 except IndexError: pass list_ = list(zip(*data)) for name, code, path, modified, editor_id in list_: self._source_tabs.add_tab( label=name, editor_id=editor_id) self.session_data.append(None) # maybe? def _selection_cb(self, value): self.save() _logger.debug('clicked! %s' % value['path']) _file = open(value['path'], 'r') lines = _file.readlines() self._add_source_cb(None) text_buffer = self._source_tabs.get_text_buffer() text_buffer.set_text(''.join(lines)) text_buffer.set_modified(False) self._pippy_instance.metadata['title'] = value['name'] self._stop_button_cb(None) self._reset_vte() self._source_tabs.set_current_label(value['name']) self._source_tabs.set_current_path(value['path']) self._source_tabs.get_text_view().grab_focus() def _select_func_cb(self, path): values = {} values['name'] = os.path.basename(path) values['path'] = path self._selection_cb(values) def _timer_cb(self, button, icons): button.set_icon_widget(icons['bw']) button.show_all() return False def _flash_cb(self, button, icons): button.set_icon_widget(icons['color']) button.show_all() GObject.timeout_add(400, self._timer_cb, button, icons) def _clear_button_cb(self, button): self.save() self._stop_button_cb(None) self._reset_vte() self._source_tabs.get_text_view().grab_focus() def _write_all_buffers(self, tmp_dir): data = self._source_tabs.get_all_data() zipdata = list(zip(data[0], data[1])) for name, content in zipdata: name = self._source_tabs.purify_name(name) with open(os.path.join(tmp_dir, name), 'w') as f: # Write utf-8 coding prefix if there's not one already if re.match(r'coding[:=]\s*([-\w.]+)', '\n'.join(content.splitlines()[:2])) is None: f.write(PYTHON_PREFIX) f.write(content) def _reset_vte(self): self._vte.grab_focus() self._vte.feed(b'\x1B[H\x1B[J\x1B[0;39m') def __undobutton_cb(self, butston): text_buffer = self._source_tabs.get_text_buffer() if text_buffer.can_undo(): text_buffer.undo() def __redobutton_cb(self, button): text_buffer = self._source_tabs.get_text_buffer() if text_buffer.can_redo(): text_buffer.redo() def __copybutton_cb(self, button): text_buffer = self._source_tabs.get_text_buffer() if self._vte.get_has_selection(): self._vte.copy_clipboard() elif text_buffer.get_has_selection(): clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD) text_buffer.copy_clipboard(clipboard) def __pastebutton_cb(self, button): text_buffer = self._source_tabs.get_text_buffer() clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD) text_buffer.paste_clipboard(clipboard, None, True) def _go_button_cb(self, button): self._stop_button_cb(button) # Try stopping old code first. self._reset_vte() # FIXME: We're losing an odd race here # Gtk.main_iteration(block=False) if self._toggle_output.get_active() is False: self._outbox.show_all() self._toggle_output.set_active(True) pippy_tmp_dir = '%s/tmp/' % self.get_activity_root() self._write_all_buffers(pippy_tmp_dir) current_file = os.path.join( pippy_tmp_dir, self._source_tabs.get_current_file_name()) # Write activity.py here too, to support pippy-based activities. copy2('%s/activity.py' % get_bundle_path(), '%s/tmp/activity.py' % self.get_activity_root()) # XXX Support both Vte APIs if _has_new_vte_api(): vte_run = self._vte.spawn_sync else: vte_run = self._vte.fork_command_full self._pid = vte_run( Vte.PtyFlags.DEFAULT, get_bundle_path(), ['/bin/sh', '-c', 'python3 %s; sleep 1' % current_file, 'PYTHONPATH=%s/library:%s' % (get_bundle_path(), os.getenv('PYTHONPATH', ''))], ['PYTHONPATH=%s/library:%s' % (get_bundle_path(), os.getenv('PYTHONPATH', ''))], GLib.SpawnFlags.DO_NOT_REAP_CHILD, None, None,) def _stop_button_cb(self, button): try: if self._pid is not None: os.kill(self._pid[1], SIGTERM) except: pass # Process must already be dead. def _library_writable(self): return os.access(os.path.join(get_bundle_path(), 'library'), os.W_OK) def _save_as_library(self, button): library_dir = os.path.join(get_bundle_path(), 'library') file_name = self._source_tabs.get_current_file_name() text_buffer = self._source_tabs.get_text_buffer() content = text_buffer.get_text( *text_buffer.get_bounds(), include_hidden_chars=True) if not os.path.isdir(library_dir): os.mkdir(library_dir) with open(os.path.join(library_dir, file_name), 'w') as f: f.write(content) success = True if success: alert = NotifyAlert(5) alert.props.title = _('Python File added to Library') IMPORT_MESSAGE = _('The file you selected has been added' ' to the library. Use "import {importname}"' ' to import the library for using.') alert.props.msg = IMPORT_MESSAGE.format(importname=file_name[:-3]) alert.connect('response', self._remove_alert_cb) self.add_alert(alert) def _export_document_cb(self, __): self.copy() alert = NotifyAlert() alert.props.title = _('Saved') alert.props.msg = _('The document has been saved to journal.') alert.connect('response', lambda x, i: self.remove_alert(x)) self.add_alert(alert) def _remove_alert_cb(self, alert, response_id): self.remove_alert(alert) def _import_py_cb(self, button): chooser = ObjectChooser() result = chooser.run() if result is Gtk.ResponseType.ACCEPT: dsitem = chooser.get_selected_object() if dsitem.metadata['mime_type'] != 'text/x-python': alert = NotifyAlert(5) alert.props.title = _('Error importing Python file') alert.props.msg = _('The file you selected is not a ' 'Python file.') alert.connect('response', self._remove_alert_cb) self.add_alert(alert) elif dsitem.object_id in self.session_data: alert = NotifyAlert(5) alert.props.title = _('Error importing Python file') alert.props.msg = _('The file you selected is already ' 'open') alert.connect('response', self._remove_alert_cb) self.add_alert(alert) else: name = dsitem.metadata['title'] file_path = dsitem.get_file_path() content = open(file_path, 'r').read() self._source_tabs.add_tab(name, content, None) self._source_tabs.set_current_label(name) self.session_data.append(dsitem.object_id) _logger.debug('after import py: %r' % self.session_data) chooser.destroy() def _create_bundle_cb(self, button): from shutil import rmtree from tempfile import mkdtemp # Get the name of this pippy program. title = self._pippy_instance.metadata['title'].replace('.py', '') title = title.replace('-', '') if title == 'Pippy Activity': alert = Alert() alert.props.title = _('Save as Activity Error') alert.props.msg = _('Please give your activity a meaningful name ' 'before attempting to save it as an activity.') ok_icon = Icon(icon_name='dialog-ok') alert.add_button(Gtk.ResponseType.OK, _('Ok'), ok_icon) alert.connect('response', self._dismiss_alert_cb) self.add_alert(alert) return alert_icon = Alert() ok_icon = Icon(icon_name='dialog-ok') alert_icon.add_button(Gtk.ResponseType.OK, _('Ok'), ok_icon) alert_icon.props.title = _('Activity icon') alert_icon.props.msg = _('Please select an activity icon.') self._stop_button_cb(None) # try stopping old code first. self._reset_vte() self._outbox.show_all() self._vte.feed(_("Creating activity bundle...").encode()) self._vte.feed(b'\r\n') TMPDIR = 'instance' app_temp = mkdtemp('.activity', 'Pippy', os.path.join(self.get_activity_root(), TMPDIR)) sourcefile = os.path.join(app_temp, 'xyzzy.py') # invoke ourself to build the activity bundle. _logger.debug('writing out source file: %s' % sourcefile) def internal_callback(window=None, event=None): icon = '%s/activity/activity-default.svg' % (get_bundle_path()) if window: icon = window.get_icon() self._stop_button_cb(None) # Try stopping old code first. self._reset_vte() self._vte.feed(_('Creating activity bundle...').encode()) self._vte.feed(b'\r\n') TMPDIR = 'instance' app_temp = mkdtemp('.activity', 'Pippy', os.path.join(self.get_activity_root(), TMPDIR)) sourcefile = os.path.join(app_temp, 'xyzzy.py') # Invoke ourself to build the activity bundle. _logger.debug('writing out source file: %s' % sourcefile) # Write out application code self._write_text_buffer(sourcefile) try: # FIXME: vte invocation was raising errors. # Switched to subprocss output = subprocess.check_output( ['/usr/bin/python3', '%s/pippy_app.py' % get_bundle_path(), '-p', '%s/library' % get_bundle_path(), '-d', app_temp, title, sourcefile, icon]) self._vte.feed(output) self._vte.feed(b'\r\n') self._bundle_cb(title, app_temp) except subprocess.CalledProcessError: rmtree(app_temp, ignore_errors=True) # clean up! self._vte.feed(_('Save as Activity Error').encode()) self._vte.feed(b'\r\n') raise def _alert_response(alert, response_id): self.remove_alert(alert) def _dialog(): dialog = IconDialog() dialog.connect('destroy', internal_callback) GObject.idle_add(_dialog) alert_icon.connect('response', _alert_response) self.add_alert(alert_icon) def _write_text_buffer(self, filename): text_buffer = self._source_tabs.get_text_buffer() start, end = text_buffer.get_bounds() text = text_buffer.get_text(start, end, True) with open(filename, 'w') as f: # Write utf-8 coding prefix if there's not one already if re.match(r'coding[:=]\s*([-\w.]+)', '\n'.join(text.splitlines()[:2])) is None: f.write(PYTHON_PREFIX) for line in text: f.write(line) def _export_distutils_cb(self, button): app_temp = os.path.join(self.get_activity_root(), 'instance') data = self._source_tabs.get_all_data() for filename, content in zip(data[0], data[1]): fileobj = open(os.path.join(app_temp, filename), 'w') fileobj.write(content) fileobj.close() filenames = ','.join([("'" + name[:-3] + "'") for name in data[0]]) title = self._pippy_instance.metadata['title'] if title is _('Pippy Activity'): alert = Alert() alert.props.title = _('Save as distutils package error') alert.props.msg = _('Please give your activity a meaningful ' 'name before attempting to save it ' 'as an distutils package.') ok_icon = Icon(icon_name='dialog-ok') alert.add_button(Gtk.ResponseType.OK, _('Ok'), ok_icon) alert.connect('response', self._dismiss_alert_cb) self.add_alert(alert) return found = next(( name for name in data[0] if name != self._source_tabs.purify_name(name)), None) if found is not None: example = self._source_tabs.purify_name(found) alert = Alert() alert.props.title = _('Save as distutils package error') alert.props.msg = _('Please give your source files a proper ' 'name, for example "%s", before attempting to ' 'save it as an distutils package.') % example ok_icon = Icon(icon_name='dialog-ok') alert.add_button(Gtk.ResponseType.OK, _('Ok'), ok_icon) alert.connect('response', self._dismiss_alert_cb) self.add_alert(alert) return setup_script = DISTUTILS_SETUP_SCRIPT.format(modulename=title, filenames=filenames) setupfile = open(os.path.join(app_temp, 'setup.py'), 'w') setupfile.write(setup_script) setupfile.close() os.chdir(app_temp) subprocess.check_output( ['/usr/bin/python3', os.path.join(app_temp, 'setup.py'), 'sdist', '-v']) # Hand off to journal os.chmod(app_temp, 0o777) jobject = datastore.create() metadata = { 'title': '%s distutils bundle' % title, 'title_set_by_user': '1', 'mime_type': 'application/x-gzip', } for k, v in list(metadata.items()): # The dict.update method is missing =( jobject.metadata[k] = v tarname = 'dist/{modulename}-1.0.tar.gz'.format(modulename=title) jobject.file_path = os.path.join(app_temp, tarname) datastore.write(jobject) def _export_example_cb(self, button): # Get the name of this pippy program. title = self._pippy_instance.metadata['title'] if title == _('Pippy Activity'): alert = Alert() alert.props.title = _('Save as Example Error') alert.props.msg = \ _('Please give your activity a meaningful ' 'name before attempting to save it as an example.') ok_icon = Icon(icon_name='dialog-ok') alert.add_button(Gtk.ResponseType.OK, _('Ok'), ok_icon) alert.connect('response', self._dismiss_alert_cb) self.add_alert(alert) return self._stop_button_cb(None) # Try stopping old code first. self._reset_vte() self._vte.feed(_('Creating example...').encode()) self._vte.feed(b'\r\n') local_data = os.path.join(os.environ['SUGAR_ACTIVITY_ROOT'], 'data') local_file = os.path.join(local_data, title) if os.path.exists(local_file): alert = ConfirmationAlert() alert.props.title = _('Save as Example Warning') alert.props.msg = _('This example already exists. ' 'Do you want to overwrite it?') alert.connect('response', self._confirmation_alert_cb, local_file) self.add_alert(alert) else: self.write_file(local_file) self._reset_vte() self._vte.feed(_('Saved as example.').encode()) self._vte.feed(b'\r\n') self._add_to_example_list(local_file) def _child_exited_cb(self, *args): '''Called whenever a child exits. If there's a handler, run it.''' h, self._child_exited_handler = self._child_exited_handler, None if h is not None: h() def _bundle_cb(self, title, app_temp): '''Called when we're done building a bundle for a source file.''' from sugar3 import profile from shutil import rmtree try: # Find the .xo file: were we successful? bundle_file = [f for f in os.listdir(app_temp) if f.endswith('.xo')] if len(bundle_file) != 1: _logger.debug("Couldn't find bundle: %s" % str(bundle_file)) self._vte.feed(b'\r\n') self._vte.feed(_('Error saving activity to journal.').encode()) self._vte.feed(b'\r\n') return # Something went wrong. # Hand off to journal os.chmod(app_temp, 0o755) jobject = datastore.create() metadata = { 'title': '%s Bundle' % title, 'title_set_by_user': '1', 'buddies': '', 'preview': '', 'icon-color': profile.get_color().to_string(), 'mime_type': 'application/vnd.olpc-sugar', } for k, v in list(metadata.items()): # The dict.update method is missing =( jobject.metadata[k] = v jobject.file_path = os.path.join(app_temp, bundle_file[0]) datastore.write(jobject) self._vte.feed(b'\r\n') self._vte.feed(_('Activity saved to journal.').encode()) self._vte.feed(b'\r\n') self.journal_show_object(jobject.object_id) jobject.destroy() finally: rmtree(app_temp, ignore_errors=True) # clean up! def _dismiss_alert_cb(self, alert, response_id): self.remove_alert(alert) def _confirmation_alert_cb(self, alert, response_id, local_file): # Callback for conf alert self.remove_alert(alert) if response_id is Gtk.ResponseType.OK: self.write_file(local_file) self._reset_vte() self._vte.feed(_('Saved as example.').encode()) self._vte.feed(b'\r\n') else: self._reset_vte() def _add_to_example_list(self, local_file): entry = {'name': _(os.path.basename(local_file)), 'path': local_file} _iter = self.model.insert_before(self.example_iter, None) self.model.set_value(_iter, 0, entry) self.model.set_value(_iter, 1, entry['name']) def is_example(self, path): if path is None: return False for name in self.paths: if path.startswith(name[1]): return True return False def _get_pippy_object_id(self): ''' We need the object_id of this pippy instance to save in the .py file metadata''' if self._pippy_instance == self: return _find_object_id(self.metadata['activity_id'], mimetype='application/json') else: return self._pippy_instance.get_object_id() def write_file(self, file_path): pippy_id = self._get_pippy_object_id() data = self._source_tabs.get_all_data() zipped_data = list(zip(*data)) session_list = [] app_temp = os.path.join(self.get_activity_root(), 'instance') tmpfile = os.path.join(app_temp, 'pippy-tempfile-storing.py') if not self.session_data: self.session_data.append(None) for zipdata, content in zip(zipped_data, self.session_data): _logger.debug('Session data %r', content) name, python_code, path, modified, editor_id = zipdata if content is not None and content == self._py_object_id: _logger.debug('saving to self') self.metadata['title'] = name self.metadata['mime_type'] = 'text/x-python' if pippy_id is not None: self.metadata['pippy_instance'] = pippy_id __file = open(file_path, 'w') __file.write(python_code) __file.close() session_list.append([name, content]) elif content is not None and content[0] != '/': _logger.debug('Saving an existing dsobject') dsobject = datastore.get(content) dsobject.metadata['title'] = name dsobject.metadata['mime_type'] = 'text/x-python' if pippy_id is not None: dsobject.metadata['pippy_instance'] = pippy_id __file = open(tmpfile, 'w') __file.write(python_code) __file.close() dsobject.set_file_path(tmpfile) datastore.write(dsobject) session_list.append([name, dsobject.object_id]) elif modified: _logger.debug('Creating new dsobj for modified code') if len(python_code) > 0: dsobject = datastore.create() dsobject.metadata['title'] = name dsobject.metadata['mime_type'] = 'text/x-python' if pippy_id is not None: dsobject.metadata['pippy_instance'] = pippy_id __file = open(tmpfile, 'w') __file.write(python_code) __file.close() dsobject.set_file_path(tmpfile) datastore.write(dsobject) session_list.append([name, dsobject.object_id]) # If there are multiple Nones, we need to find # the correct one. if content is None and \ self.session_data.count(None) > 1: i = zipped_data.index(zipdata) else: i = self.session_data.index(content) self.session_data[i] = dsobject.object_id elif content is not None or path is not None: _logger.debug('Saving reference to sample file') if path is None: # Should not happen, but just in case... _logger.error('path is None.') session_list.append([name, content]) else: session_list.append([name, path]) else: # Should not happen, but just in case... _logger.debug('Nothing to save in tab? %s %s %s %s' % (str(name), str(python_code), str(path), str(content))) self._pippy_instance.metadata['mime_type'] = 'application/json' pippy_data = json.dumps(session_list) # Override file path if we created a new Pippy instance if self._py_file_loaded_from_journal: file_path = os.path.join(app_temp, 'pippy-temp-instance-data') _file = open(file_path, 'w') _file.write(pippy_data) _file.close() if self._py_file_loaded_from_journal: _logger.debug('setting pippy instance file_path to %s' % file_path) self._pippy_instance.set_file_path(file_path) datastore.write(self._pippy_instance) self._store_config() def read_file(self, file_path): # Either we are opening Python code or a list of objects # stored (json-encoded) in a Pippy instance, or a shared # session. # Remove initial new/blank thing self.session_data = [] self._loaded_session = [] try: self._source_tabs.remove_page(0) tab_object.pop(0) self._source_tabs.last_tab = 0 except IndexError: pass if self.metadata['mime_type'] == 'text/x-python': _logger.debug('Loading Python code') # Opening some Python code directly try: text = open(file_path).read() except: alert = NotifyAlert(10) alert.props.title = _('Error') alert.props.msg = _('Error reading data.') def _remove_alert(alert, response_id): self.remove_alert(alert) alert.connect("response", _remove_alert) self.add_alert(alert) return self._py_file_loaded_from_journal = True # Discard the '#!/usr/bin/python3' and 'coding: utf-8' lines, # if present python_code = re.sub(r'^' + re.escape(PYTHON_PREFIX), '', text) name = self.metadata['title'] self._loaded_session.append([name, python_code, None]) # Since we loaded Python code, we need to create (or # restore) a Pippy instance if 'pippy_instance' in self.metadata: _logger.debug('found a pippy instance: %s' % self.metadata['pippy_instance']) try: self._pippy_instance = datastore.get( self.metadata['pippy_instance']) except: _logger.debug('Cannot find old Pippy instance: %s') self._pippy_instance = None if self._pippy_instance in [self, None]: self._pippy_instance = datastore.create() self._pippy_instance.metadata['title'] = self.metadata['title'] self._pippy_instance.metadata['mime_type'] = 'application/json' self._pippy_instance.metadata['activity'] = 'org.laptop.Pippy' datastore.write(self._pippy_instance) self.metadata['pippy_instance'] = \ self._pippy_instance.get_object_id() _logger.debug('get_object_id %s' % self.metadata['pippy_instance']) # We need the Pippy file path so we can read the session data file_path = self._pippy_instance.get_file_path() # Finally, add this Python object to the session data self._py_object_id = _find_object_id(self.metadata['activity_id']) self.session_data.append(self._py_object_id) _logger.debug('session_data: %s' % self.session_data) if self.metadata['mime_type'] == 'application/json' or \ self._pippy_instance != self: # Reading file list from Pippy instance _logger.debug('Loading Pippy instance') if len(file_path) == 0: return data = json.loads(open(file_path).read()) for name, content in data: # content is either a datastore id or the path to some # sample code if content is not None and content[0] == '/': # a path try: python_code = open(content).read() except: _logger.error('Could not open %s; skipping' % content) path = content elif content != self._py_object_id: try: dsobject = datastore.get(content) if 'mime_type' not in dsobject.metadata: _logger.error( 'Warning: %s missing mime_type' % content) elif dsobject.metadata['mime_type'] != 'text/x-python': _logger.error( 'Warning: %s has unexpected mime_type %s' % (content, dsobject.metadata['mime_type'])) except: # Could be that the item has subsequently been # deleted from the datastore, so we skip it. _logger.error('Could not open %s; skipping' % content) continue try: python_code = open(dsobject.get_file_path()).read() except: # Malformed bundle? _logger.error('Could not open %s; skipping' % dsobject.get_file_path()) continue path = None # Queue up the creation of the tabs... # And add this content to the session data if content not in self.session_data: self.session_data.append(content) self._loaded_session.append([name, python_code, path]) # Create tabs from the datastore, else add a blank tab if self._loaded_session: for name, content, path in self._loaded_session: self._source_tabs.add_tab(name, content, path) else: self._source_tabs.add_tab() # TEMPLATES AND INLINE FILES ACTIVITY_INFO_TEMPLATE = ''' [Activity] name = %(title)s bundle_id = %(bundle_id)s exec = sugar-activity3 %(class)s icon = activity-icon activity_version = %(version)d mime_types = %(mime_types)s show_launcher = yes %(extra_info)s ''' PIPPY_ICON = """<?xml version="1.0" ?><!DOCTYPE svg PUBLIC '-//W3C//DTD SVG 1.1//EN' 'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd' [ <!ENTITY stroke_color "#010101"> <!ENTITY fill_color "#FFFFFF"> ]> <svg enable-background="new 0 0 55 55" height="55px" version="1.1" viewBox="0 0 55 55" width="55px" x="0px" xml:space="preserve" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" y="0px"><g display="block" id="activity-pippy"> <path d="M28.497,48.507 c5.988,0,14.88-2.838,14.88-11.185 c0-9.285-7.743-10.143-10.954-11.083 c-3.549-0.799-5.913-1.914-6.055-3.455 c-0.243-2.642,1.158-3.671,3.946-3.671 c0,0,6.632,3.664,12.266,0.74 c1.588-0.823,4.432-4.668,4.432-7.32 c0-2.653-9.181-5.719-11.967-5.719 c-2.788,0-5.159,3.847-5.159,3.847 c-5.574,0-11.149,5.306-11.149,10.612 c0,5.305,5.333,9.455,11.707,10.612 c2.963,0.469,5.441,2.22,4.878,5.438 c-0.457,2.613-2.995,5.306-8.361,5.306 c-4.252,0-13.3-0.219-14.745-4.079 c-0.929-2.486,0.168-5.205,1.562-5.205l-0.027-0.16 c-1.42-0.158-5.548,0.16-5.548,5.465 C8.202,45.452,17.347,48.507,28.497,48.507z" fill="&fill_color;" stroke="&stroke_color;" stroke-linecap="round" stroke-linejoin="round" stroke-width="3.5"/> <path d="M42.579,19.854c-2.623-0.287-6.611-2-7.467-5.022" fill="none" stroke="&stroke_color;" stroke-linecap="round" stroke-width="3"/> <circle cx="35.805" cy="10.96" fill="&stroke_color;" r="1.676"/> </g></svg><!-- " --> """ # ACTIVITY META-INFORMATION # this is used by Pippy to generate a bundle for itself. def pippy_activity_version(): '''Returns the version number of the generated activity bundle.''' return 39 def pippy_activity_extra_files(): '''Returns a map of 'extra' files which should be included in the generated activity bundle.''' # Cheat here and generate the map from the fs contents. extra = {} bp = get_bundle_path() for d in ['po', 'data', 'post']: # everybody gets library for root, dirs, files in os.walk(os.path.join(bp, d)): for name in files: fn = os.path.join(root, name).replace(bp + '/', '') extra[fn] = open(os.path.join(root, name), 'r').read() return extra def pippy_activity_news(): '''Return the NEWS file for this activity.''' # Cheat again. return open(os.path.join(get_bundle_path(), 'NEWS')).read() def pippy_activity_icon(): '''Return an SVG document specifying the icon for this activity.''' return PIPPY_ICON def pippy_activity_class(): '''Return the class which should be started to run this activity.''' return 'pippy_app.PippyActivity' def pippy_activity_bundle_id(): '''Return the bundle_id for the generated activity.''' return 'org.laptop.Pippy' def pippy_activity_mime_types(): '''Return the mime types handled by the generated activity, as a list.''' return ['text/x-python'] def pippy_activity_extra_info(): return ''' license = GPLv2+ update_url = http://activities.sugarlabs.org ''' # ACTIVITY BUNDLER def main(): '''Create a bundle from a pippy-style source file''' from optparse import OptionParser from pyclbr import readmodule_ex from tempfile import mkdtemp from shutil import copytree, copy2, rmtree from sugar3.activity import bundlebuilder parser = OptionParser(usage='%prog [options] [title] [sourcefile] [icon]') parser.add_option('-d', '--dir', dest='dir', default='.', metavar='DIR', help='Put generated bundle in the specified directory.') parser.add_option('-p', '--pythonpath', dest='path', action='append', default=[], metavar='DIR', help='Append directory to python search path.') (options, args) = parser.parse_args() if len(args) < 3: parser.error('The title, sourcefile and icon arguments are required.') title = args[0] sourcefile = args[1] icon_path = args[2] pytitle = re.sub(r'[^A-Za-z0-9_]', '', title) if re.match(r'[0-9]', pytitle) is not None: pytitle = '_' + pytitle # first character cannot be numeric # First take a gander at the source file and see if it's got extra info # for us. sourcedir, basename = os.path.split(sourcefile) if not sourcedir: sourcedir = '.' module, ext = os.path.splitext(basename) f = open(icon_path, 'r') icon = f.read() f.close() # Things we look for: bundle_info = { 'version': 1, 'extra_files': {}, 'news': 'No news.', 'icon': icon, 'class': 'activity.VteActivity', 'bundle_id': ('org.sugarlabs.pippy.%s%d' % (generate_unique_id(), int(round(uniform(1000, 9999), 0)))), 'mime_types': '', 'extra_info': '', } # Are any of these things in the module? try_import = False info = readmodule_ex(module, [sourcedir] + options.path) for func in list(bundle_info.keys()): p_a_func = 'pippy_activity_%s' % func if p_a_func in info: try_import = True if try_import: # Yes, let's try to execute them to get better info about our bundle oldpath = list(sys.path) sys.path[0:0] = [sourcedir] + options.path modobj = __import__(module) for func in list(bundle_info.keys()): p_a_func = 'pippy_activity_%s' % func if p_a_func in modobj.__dict__: bundle_info[func] = modobj.__dict__[p_a_func]() sys.path = oldpath # Okay! We've done the hard part. Now let's build a bundle. # Create a new temp dir in which to create the bundle. app_temp = mkdtemp('.activity', 'Pippy') # Hope TMPDIR is set correctly! bundle = get_bundle_path() try: copytree('%s/library' % bundle, '%s/library' % app_temp) copy2('%s/activity.py' % bundle, '%s/activity.py' % app_temp) # create activity.info file. bundle_info['title'] = title bundle_info['pytitle'] = pytitle # put 'extra' files in place. extra_files = { 'activity/activity.info': ACTIVITY_INFO_TEMPLATE % bundle_info, 'activity/activity-icon.svg': bundle_info['icon'], 'NEWS': bundle_info['news'], } extra_files.update(bundle_info['extra_files']) for path, contents in list(extra_files.items()): # safety first! assert '..' not in path dirname, filename = os.path.split(path) dirname = os.path.join(app_temp, dirname) if not os.path.exists(dirname): os.makedirs(dirname) with open(os.path.join(dirname, filename), 'w') as f: f.write(contents) # Put script into $app_temp/pippy_app.py copy2(sourcefile, '%s/pippy_app.py' % app_temp) # Invoke bundle builder olddir = os.getcwd() oldargv = sys.argv os.chdir(app_temp) sys.argv = ['setup.py', 'dist_xo'] print('\r\nStarting bundlebuilder\r\n') bundlebuilder.start() sys.argv = oldargv os.chdir(olddir) # Move to destination directory. src = '%s/dist/%s-%d.xo' % (app_temp, pytitle, bundle_info['version']) dst = '%s/%s-%d.xo' % (options.dir, pytitle, bundle_info['version']) if not os.path.exists(src): print('Cannot find %s\r\n' % (src)) else: copy2(src, dst) finally: rmtree(app_temp, ignore_errors=True) print('Finally\r\n') if __name__ == '__main__': from gettext import gettext as _ if False: # Change this to True to test within Pippy sys.argv = sys.argv + ['-d', '/tmp', 'Pippy', '/home/olpc/pippy_app.py'] print(_('Working...')) sys.stdout.flush() main() print(_('done!')) sys.exit(0)
gpl-3.0
-1,851,500,253,610,730,000
37.860784
79
0.564677
false
3.765484
false
false
false
infrascloudy/flask-base
app/models/user.py
1
6385
from flask import current_app from flask_login import AnonymousUserMixin, UserMixin from itsdangerous import TimedJSONWebSignatureSerializer as Serializer from itsdangerous import BadSignature, SignatureExpired from werkzeug.security import check_password_hash, generate_password_hash from app import db, login_manager class Permission: GENERAL = 0x01 ADMINISTER = 0xff class Role(db.Model): __tablename__ = 'roles' id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64), unique=True) index = db.Column(db.String(64)) default = db.Column(db.Boolean, default=False, index=True) permissions = db.Column(db.Integer) users = db.relationship('User', backref='role', lazy='dynamic') @staticmethod def insert_roles(): roles = { 'User': (Permission.GENERAL, 'main', True), 'Administrator': ( Permission.ADMINISTER, 'admin', False # grants all permissions ) } for r in roles: role = Role.query.filter_by(name=r).first() if role is None: role = Role(name=r) role.permissions = roles[r][0] role.index = roles[r][1] role.default = roles[r][2] db.session.add(role) db.session.commit() def __repr__(self): return '<Role \'%s\'>' % self.name class User(UserMixin, db.Model): __tablename__ = 'users' id = db.Column(db.Integer, primary_key=True) confirmed = db.Column(db.Boolean, default=False) first_name = db.Column(db.String(64), index=True) last_name = db.Column(db.String(64), index=True) email = db.Column(db.String(64), unique=True, index=True) password_hash = db.Column(db.String(128)) role_id = db.Column(db.Integer, db.ForeignKey('roles.id')) def __init__(self, **kwargs): super(User, self).__init__(**kwargs) if self.role is None: if self.email == current_app.config['ADMIN_EMAIL']: self.role = Role.query.filter_by( permissions=Permission.ADMINISTER).first() if self.role is None: self.role = Role.query.filter_by(default=True).first() def full_name(self): return '%s %s' % (self.first_name, self.last_name) def can(self, permissions): return self.role is not None and \ (self.role.permissions & permissions) == permissions def is_admin(self): return self.can(Permission.ADMINISTER) @property def password(self): raise AttributeError('`password` is not a readable attribute') @password.setter def password(self, password): self.password_hash = generate_password_hash(password) def verify_password(self, password): return check_password_hash(self.password_hash, password) def generate_confirmation_token(self, expiration=604800): """Generate a confirmation token to email a new user.""" s = Serializer(current_app.config['SECRET_KEY'], expiration) return s.dumps({'confirm': self.id}) def generate_email_change_token(self, new_email, expiration=3600): """Generate an email change token to email an existing user.""" s = Serializer(current_app.config['SECRET_KEY'], expiration) return s.dumps({'change_email': self.id, 'new_email': new_email}) def generate_password_reset_token(self, expiration=3600): """ Generate a password reset change token to email to an existing user. """ s = Serializer(current_app.config['SECRET_KEY'], expiration) return s.dumps({'reset': self.id}) def confirm_account(self, token): """Verify that the provided token is for this user's id.""" s = Serializer(current_app.config['SECRET_KEY']) try: data = s.loads(token) except (BadSignature, SignatureExpired): return False if data.get('confirm') != self.id: return False self.confirmed = True db.session.add(self) db.session.commit() return True def change_email(self, token): """Verify the new email for this user.""" s = Serializer(current_app.config['SECRET_KEY']) try: data = s.loads(token) except (BadSignature, SignatureExpired): return False if data.get('change_email') != self.id: return False new_email = data.get('new_email') if new_email is None: return False if self.query.filter_by(email=new_email).first() is not None: return False self.email = new_email db.session.add(self) db.session.commit() return True def reset_password(self, token, new_password): """Verify the new password for this user.""" s = Serializer(current_app.config['SECRET_KEY']) try: data = s.loads(token) except (BadSignature, SignatureExpired): return False if data.get('reset') != self.id: return False self.password = new_password db.session.add(self) db.session.commit() return True @staticmethod def generate_fake(count=100, **kwargs): """Generate a number of fake users for testing.""" from sqlalchemy.exc import IntegrityError from random import seed, choice from faker import Faker fake = Faker() roles = Role.query.all() seed() for i in range(count): u = User( first_name=fake.first_name(), last_name=fake.last_name(), email=fake.email(), password=fake.password(), confirmed=True, role=choice(roles), **kwargs) db.session.add(u) try: db.session.commit() except IntegrityError: db.session.rollback() def __repr__(self): return '<User \'%s\'>' % self.full_name() class AnonymousUser(AnonymousUserMixin): def can(self, _): return False def is_admin(self): return False login_manager.anonymous_user = AnonymousUser @login_manager.user_loader def load_user(user_id): return User.query.get(int(user_id))
mit
1,365,562,869,490,029,800
31.576531
76
0.592482
false
4.077267
true
false
false
ME-ICA/me-ica
meica.libs/mdp/graph/graph.py
1
13012
# inspired by some code by Nathan Denny (1999) # see http://www.ece.arizona.edu/~denny/python_nest/graph_lib_1.0.1.html try: # use reduce against BDFL's will even on python > 2.6 from functools import reduce except ImportError: pass class GraphException(Exception): """Base class for exception in the graph package.""" pass class GraphTopologicalException(GraphException): """Exception thrown during a topological sort if the graph is cyclical.""" pass def is_sequence(x): return isinstance(x, (list, tuple)) def recursive_map(func, seq): """Apply a function recursively on a sequence and all subsequences.""" def _func(x): if is_sequence(x): return recursive_map(func, x) else: return func(x) return map(_func, seq) def recursive_reduce(func, seq, *argv): """Apply reduce(func, seq) recursively to a sequence and all its subsequences.""" def _func(x, y): if is_sequence(y): return func(x, recursive_reduce(func, y)) else: return func(x, y) return reduce(_func, seq, *argv) class GraphNode(object): """Represent a graph node and all information attached to it.""" def __init__(self, data=None): self.data = data # edges in self.ein = [] # edges out self.eout = [] def add_edge_in(self, edge): self.ein.append(edge) def add_edge_out(self, edge): self.eout.append(edge) def remove_edge_in(self, edge): self.ein.remove(edge) def remove_edge_out(self, edge): self.eout.remove(edge) def get_edges_in(self, from_ = None): """Return a copy of the list of the entering edges. If from_ is specified, return only the nodes coming from that node.""" inedges = self.ein[:] if from_: inedges = [edge for edge in inedges if edge.head == from_] return inedges def get_edges_out(self, to_ = None): """Return a copy of the list of the outgoing edges. If to_ is specified, return only the nodes going to that node.""" outedges = self.eout[:] if to_: outedges = [edge for edge in outedges if edge.tail == to_] return outedges def get_edges(self, neighbor = None): """Return a copy of all edges. If neighbor is specified, return only the edges connected to that node.""" return ( self.get_edges_in(from_=neighbor) + self.get_edges_out(to_=neighbor) ) def in_degree(self): """Return the number of entering edges.""" return len(self.ein) def out_degree(self): """Return the number of outgoing edges.""" return len(self.eout) def degree(self): """Return the number of edges.""" return self.in_degree()+self.out_degree() def in_neighbors(self): """Return the neighbors down in-edges (i.e. the parents nodes).""" return map(lambda x: x.get_head(), self.ein) def out_neighbors(self): """Return the neighbors down in-edges (i.e. the parents nodes).""" return map(lambda x: x.get_tail(), self.eout) def neighbors(self): return self.in_neighbors() + self.out_neighbors() class GraphEdge(object): """Represent a graph edge and all information attached to it.""" def __init__(self, head, tail, data=None): # head node self.head = head # neighbors out self.tail = tail # arbitrary data slot self.data = data def get_ends(self): """Return the tuple (head_id, tail_id).""" return (self.head, self.tail) def get_tail(self): return self.tail def get_head(self): return self.head class Graph(object): """Represent a directed graph.""" def __init__(self): # list of nodes self.nodes = [] # list of edges self.edges = [] # node functions def add_node(self, data=None): node = GraphNode(data=data) self.nodes.append(node) return node def remove_node(self, node): # the node is not in this graph if node not in self.nodes: errstr = 'This node is not part of the graph (%s)' % node raise GraphException(errstr) # remove all edges containing this node for edge in node.get_edges(): self.remove_edge(edge) # remove the node self.nodes.remove(node) # edge functions def add_edge(self, head, tail, data=None): """Add an edge going from head to tail. head : head node tail : tail node """ # create edge edge = GraphEdge(head, tail, data=data) # add edge to head and tail node head.add_edge_out(edge) tail.add_edge_in(edge) # add to the edges dictionary self.edges.append(edge) return edge def remove_edge(self, edge): head, tail = edge.get_ends() # remove from head head.remove_edge_out(edge) # remove from tail tail.remove_edge_in(edge) # remove the edge self.edges.remove(edge) ### populate functions def add_nodes(self, data): """Add many nodes at once. data -- number of nodes to add or sequence of data values, one for each new node""" if not is_sequence(data): data = [None]*data return map(self.add_node, data) def add_tree(self, tree): """Add a tree to the graph. The tree is specified with a nested list of tuple, in a LISP-like notation. The values specified in the list become the values of the single nodes. Return an equivalent nested list with the nodes instead of the values. Example: >>> a=b=c=d=e=None >>> g.add_tree( (a, b, (c, d ,e)) ) corresponds to this tree structure, with all node values set to None: a / \ b c / \ d e """ def _add_edge(root, son): self.add_edge(root, son) return root nodes = recursive_map(self.add_node, tree) recursive_reduce(_add_edge, nodes) return nodes def add_full_connectivity(self, from_nodes, to_nodes): """Add full connectivity from a group of nodes to another one. Return a list of lists of edges, one for each node in 'from_nodes'. Example: create a two-layer graph with full connectivity. >>> g = Graph() >>> layer1 = g.add_nodes(10) >>> layer2 = g.add_nodes(5) >>> g.add_full_connectivity(layer1, layer2) """ edges = [] for from_ in from_nodes: edges.append(map(lambda x: self.add_edge(from_, x), to_nodes)) return edges ###### graph algorithms def topological_sort(self): """Perform a topological sort of the nodes. If the graph has a cycle, throw a GraphTopologicalException with the list of successfully ordered nodes.""" # topologically sorted list of the nodes (result) topological_list = [] # queue (fifo list) of the nodes with in_degree 0 topological_queue = [] # {node: in_degree} for the remaining nodes (those with in_degree>0) remaining_indegree = {} # init queues and lists for node in self.nodes: indegree = node.in_degree() if indegree == 0: topological_queue.append(node) else: remaining_indegree[node] = indegree # remove nodes with in_degree 0 and decrease the in_degree of their sons while len(topological_queue): # remove the first node with degree 0 node = topological_queue.pop(0) topological_list.append(node) # decrease the in_degree of the sons for son in node.out_neighbors(): remaining_indegree[son] -= 1 if remaining_indegree[son] == 0: topological_queue.append(son) # if not all nodes were covered, the graph must have a cycle # raise a GraphTopographicalException if len(topological_list)!=len(self.nodes): raise GraphTopologicalException(topological_list) return topological_list ### Depth-First sort def _dfs(self, neighbors_fct, root, visit_fct=None): # core depth-first sort function # changing the neighbors function to return the sons of a node, # its parents, or both one gets normal dfs, reverse dfs, or # dfs on the equivalent undirected graph, respectively # result list containing the nodes in Depth-First order dfs_list = [] # keep track of all already visited nodes visited_nodes = { root: None } # stack (lifo) list dfs_stack = [] dfs_stack.append(root) while len(dfs_stack): # consider the next node on the stack node = dfs_stack.pop() dfs_list.append(node) # visit the node if visit_fct != None: visit_fct(node) # add all sons to the stack (if not already visited) for son in neighbors_fct(node): if son not in visited_nodes: visited_nodes[son] = None dfs_stack.append(son) return dfs_list def dfs(self, root, visit_fct=None): """Return a list of nodes in some Depth First order starting from a root node. If defined, visit_fct is applied on each visited node. The returned list does not have to contain all nodes in the graph, but only the ones reachable from the root. """ neighbors_fct = lambda node: node.out_neighbors() return self._dfs(neighbors_fct, root, visit_fct=visit_fct) def undirected_dfs(self, root, visit_fct=None): """Perform Depth First sort. This function is identical to dfs, but the sort is performed on the equivalent undirected version of the graph.""" neighbors_fct = lambda node: node.neighbors() return self._dfs(neighbors_fct, root, visit_fct=visit_fct) ### Connected components def connected_components(self): """Return a list of lists containing the nodes of all connected components of the graph.""" visited = {} def visit_fct(node, visited=visited): visited[node] = None components = [] nodes = self.nodes for node in nodes: if node in visited: continue components.append(self.undirected_dfs(node, visit_fct)) return components def is_weakly_connected(self): """Return True if the graph is weakly connected.""" return len(self.undirected_dfs(self.nodes[0]))==len(self.nodes) ### Breadth-First Sort # BFS and DFS could be generalized to one function. I leave them # distinct for clarity. def _bfs(self, neighbors_fct, root, visit_fct=None): # core breadth-first sort function # changing the neighbors function to return the sons of a node, # its parents, or both one gets normal bfs, reverse bfs, or # bfs on the equivalent undirected graph, respectively # result list containing the nodes in Breadth-First order bfs_list = [] # keep track of all already visited nodes visited_nodes = { root: None } # queue (fifo) list bfs_queue = [] bfs_queue.append(root) while len(bfs_queue): # consider the next node in the queue node = bfs_queue.pop(0) bfs_list.append(node) # visit the node if visit_fct != None: visit_fct(node) # add all sons to the queue (if not already visited) for son in neighbors_fct(node): if son not in visited_nodes: visited_nodes[son] = None bfs_queue.append(son) return bfs_list def bfs(self, root, visit_fct=None): """Return a list of nodes in some Breadth First order starting from a root node. If defined, visit_fct is applied on each visited node. Note the returned list does not have to contain all nodes in the graph, but only the ones reachable from the root.""" neighbors_fct = lambda node: node.out_neighbors() return self._bfs(neighbors_fct, root, visit_fct=visit_fct) def undirected_bfs(self, root, visit_fct=None): """Perform Breadth First sort. This function is identical to bfs, but the sort is performed on the equivalent undirected version of the graph.""" neighbors_fct = lambda node: node.neighbors() return self._bfs(neighbors_fct, root, visit_fct=visit_fct)
lgpl-2.1
-8,937,105,326,309,814,000
31.448878
80
0.587919
false
4.013572
false
false
false
caffeinehit/yell
yell/backends/celery.py
1
2316
from __future__ import absolute_import from celery.task import Task from yell import Notification, notify, registry class CeleryNotificationTask(Task): """ Dispatch and run the notification. """ def run(self, name=None, backend=None, *args, **kwargs): """ The Celery task. Delivers the notification via all backends returned by :param:`backend`. """ assert name is not None, "No 'name' specified to notify" assert backend is not None, "No 'backend' specified to notify with" backends = backend().get_backends(*args, **kwargs) notify(name, backends=backends, *args, **kwargs) class CeleryNotification(Notification): """ Delivers notifications through Celery. :example: :: from yell import notify, Notification class EmailNotification(Notification): name = 'async' def notify(self, *args, **kwargs): # Deliver email class DBNotification(Notification): name = 'async' def notify(self, *args, **kwargs): # Save to database class AsyncNotification(CeleryNotification): name = 'async' notify('async', backends = [AsyncNotification], text = "This notification is routed through Celery before being sent and saved") In the above example when calling :attr:`yell.notify` will invoke ``EmailNotification`` and ``DBNotification`` once the task was delivered through Celery. """ name = None """ The name of this notification. Override in subclasses. """ def get_backends(self, *args, **kwargs): """ Return all backends the task should use to deliver notifications. By default all backends with the same :attr:`name` except for subclasses of :class:`CeleryNotifications` will be used. """ return filter(lambda cls: not issubclass(cls, self.__class__), registry.notifications[self.name]) def notify(self, *args, **kwargs): """ Dispatches the notification to Celery """ return CeleryNotificationTask.delay(name=self.name, backend=self.__class__, *args, **kwargs)
mit
-3,298,192,501,619,146,000
32.565217
105
0.603195
false
4.755647
false
false
false
CiNC0/Cartier
cartier-python-resign-linux/tests/test_versioning.py
1
1194
#!/usr/bin/env python import os.path import importlib import unittest tests_dir = os.path.abspath(os.path.dirname(__file__)) package_name = tests_dir.split(os.path.sep)[-2].replace('-', '_') package = importlib.import_module(package_name) class VersioningTestCase(unittest.TestCase): def assert_proper_attribute(self, attribute): try: assert getattr(package, attribute), ( "{} improperly set".format(attribute)) except AttributeError: assert False, "missing {}".format(attribute) def test_version_attribute(self): self.assert_proper_attribute("__version__") # test major, minor, and patch are numbers version_split = package.__version__.split(".")[:3] assert version_split, "__version__ is not set" for n in version_split: try: int(n) except ValueError: assert False, "'{}' is not an integer".format(n) def test_commit_attribute(self): self.assert_proper_attribute("__commit__") def test_build_attribute(self): self.assert_proper_attribute("__build__") if __name__ == '__main__': unittest.main()
apache-2.0
-6,473,486,557,290,887,000
28.85
65
0.60804
false
4.174825
true
false
false
rgerkin/pyNeuroML
pyneuroml/tune/NeuroMLSimulation.py
1
5357
''' A class for running a single instance of a NeuroML model by generating a LEMS file and using pyNeuroML to run in a chosen simulator ''' import sys import time from pyneuroml import pynml from pyneuroml.lems import generate_lems_file_for_neuroml try: import pyelectro # Not used here, just for checking installation except: print('>> Note: pyelectro from https://github.com/pgleeson/pyelectro is required!') exit() try: import neurotune # Not used here, just for checking installation except: print('>> Note: neurotune from https://github.com/pgleeson/neurotune is required!') exit() class NeuroMLSimulation(object): def __init__(self, reference, neuroml_file, target, sim_time=1000, dt=0.05, simulator='jNeuroML', generate_dir = './', cleanup = True, nml_doc = None): self.sim_time = sim_time self.dt = dt self.simulator = simulator self.generate_dir = generate_dir if generate_dir.endswith('/') else generate_dir+'/' self.reference = reference self.target = target self.neuroml_file = neuroml_file self.nml_doc = nml_doc self.cleanup = cleanup self.already_run = False def show(self): """ Plot the result of the simulation once it's been intialized """ from matplotlib import pyplot as plt if self.already_run: for ref in self.volts.keys(): plt.plot(self.t, self.volts[ref], label=ref) plt.title("Simulation voltage vs time") plt.legend() plt.xlabel("Time [ms]") plt.ylabel("Voltage [mV]") else: pynml.print_comment("First you have to 'go()' the simulation.", True) plt.show() def go(self): lems_file_name = 'LEMS_%s.xml'%(self.reference) generate_lems_file_for_neuroml(self.reference, self.neuroml_file, self.target, self.sim_time, self.dt, lems_file_name = lems_file_name, target_dir = self.generate_dir, nml_doc = self.nml_doc) pynml.print_comment_v("Running a simulation of %s ms with timestep %s ms: %s"%(self.sim_time, self.dt, lems_file_name)) self.already_run = True start = time.time() if self.simulator == 'jNeuroML': results = pynml.run_lems_with_jneuroml(lems_file_name, nogui=True, load_saved_data=True, plot=False, exec_in_dir = self.generate_dir, verbose=False, cleanup=self.cleanup) elif self.simulator == 'jNeuroML_NEURON': results = pynml.run_lems_with_jneuroml_neuron(lems_file_name, nogui=True, load_saved_data=True, plot=False, exec_in_dir = self.generate_dir, verbose=False, cleanup=self.cleanup) else: pynml.print_comment_v('Unsupported simulator: %s'%self.simulator) exit() secs = time.time()-start pynml.print_comment_v("Ran simulation in %s in %f seconds (%f mins)\n\n"%(self.simulator, secs, secs/60.0)) self.t = [t*1000 for t in results['t']] self.volts = {} for key in results.keys(): if key != 't': self.volts[key] = [v*1000 for v in results[key]] if __name__ == '__main__': sim_time = 700 dt = 0.05 if len(sys.argv) == 2 and sys.argv[1] == '-net': sim = NeuroMLSimulation('TestNet', '../../examples/test_data/simplenet.nml', 'simplenet', sim_time, dt, 'jNeuroML', 'temp/') sim.go() sim.show() else: sim = NeuroMLSimulation('TestHH', '../../examples/test_data/HHCellNetwork.net.nml', 'HHCellNetwork', sim_time, dt, 'jNeuroML', 'temp') sim.go() sim.show()
lgpl-3.0
-7,017,770,894,526,758,000
31.271084
127
0.417958
false
4.563032
false
false
false
Azure/azure-sdk-for-python
sdk/powerbiembedded/azure-mgmt-powerbiembedded/azure/mgmt/powerbiembedded/models/operation_py3.py
1
1163
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class Operation(Model): """Operation. :param name: The name of the operation being performed on this particular object. This name should match the action name that appears in RBAC / the event service. :type name: str :param display: :type display: ~azure.mgmt.powerbiembedded.models.Display """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'display': {'key': 'display', 'type': 'Display'}, } def __init__(self, *, name: str=None, display=None, **kwargs) -> None: super(Operation, self).__init__(**kwargs) self.name = name self.display = display
mit
5,197,764,973,507,058,000
33.205882
78
0.574377
false
4.473077
false
false
false
Anderson0026/mapproxy
mapproxy/script/conf/app.py
1
6606
# -:- encoding: utf-8 -:- # This file is part of the MapProxy project. # Copyright (C) 2013 Omniscale <http://omniscale.de> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import with_statement import sys import os import optparse import logging import textwrap import datetime import xml.etree.ElementTree import yaml from contextlib import contextmanager from cStringIO import StringIO from .sources import sources from .layers import layers from .caches import caches from .seeds import seeds from .utils import update_config, MapProxyYAMLDumper, download_capabilities from mapproxy.config.loader import load_configuration from mapproxy.util.ext.wmsparse import parse_capabilities def setup_logging(level=logging.INFO): mapproxy_log = logging.getLogger('mapproxy') mapproxy_log.setLevel(level) ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.DEBUG) formatter = logging.Formatter( "[%(asctime)s] %(name)s - %(levelname)s - %(message)s") ch.setFormatter(formatter) mapproxy_log.addHandler(ch) def write_header(f, capabilities): print >>f, '# MapProxy configuration automatically generated from:' print >>f, '# %s' % capabilities print >>f, '#' print >>f, '# NOTE: The generated configuration can be highly inefficient,' print >>f, '# especially when multiple layers and caches are requested at once.' print >>f, '# Make sure you understand the generated configuration!' print >>f, '#' print >>f, '# Created on %s with:' % datetime.datetime.now() print >>f, ' \\\n'.join(textwrap.wrap(' '.join(sys.argv), initial_indent='# ', subsequent_indent='# ')) print >>f, '' @contextmanager def file_or_stdout(name): if name == '-': yield sys.stdout else: with open(name, 'wb') as f: yield f def config_command(args): parser = optparse.OptionParser("usage: %prog autoconfig [options]") parser.add_option('--capabilities', help="URL or filename of WMS 1.1.1/1.3.0 capabilities document") parser.add_option('--output', help="filename for created MapProxy config [default: -]", default="-") parser.add_option('--output-seed', help="filename for created seeding config") parser.add_option('--base', help='base config to include in created MapProxy config') parser.add_option('--overwrite', help='YAML file with overwrites for the created MapProxy config') parser.add_option('--overwrite-seed', help='YAML file with overwrites for the created seeding config') parser.add_option('--force', default=False, action='store_true', help="overwrite existing files") options, args = parser.parse_args(args) if not options.capabilities: parser.print_help() print >>sys.stderr, "\nERROR: --capabilities required" return 2 if not options.output and not options.output_seed: parser.print_help() print >>sys.stderr, "\nERROR: --output and/or --output-seed required" return 2 if not options.force: if options.output and options.output != '-' and os.path.exists(options.output): print >>sys.stderr, "\nERROR: %s already exists, use --force to overwrite" % options.output return 2 if options.output_seed and options.output_seed != '-' and os.path.exists(options.output_seed): print >>sys.stderr, "\nERROR: %s already exists, use --force to overwrite" % options.output_seed return 2 log = logging.getLogger('mapproxy_conf_cmd') log.addHandler(logging.StreamHandler()) setup_logging(logging.WARNING) srs_grids = {} if options.base: base = load_configuration(options.base) for name, grid_conf in base.grids.iteritems(): if name.startswith('GLOBAL_'): continue srs_grids[grid_conf.tile_grid().srs.srs_code] = name cap_doc = options.capabilities if cap_doc.startswith(('http://', 'https://')): cap_doc = download_capabilities(options.capabilities).read() else: cap_doc = open(cap_doc, 'rb').read() try: cap = parse_capabilities(StringIO(cap_doc)) except (xml.etree.ElementTree.ParseError, ValueError), ex: print >>sys.stderr, ex print >>sys.stderr, cap_doc[:1000] + ('...' if len(cap_doc) > 1000 else '') return 3 overwrite = None if options.overwrite: with open(options.overwrite, 'rb') as f: overwrite = yaml.load(f) overwrite_seed = None if options.overwrite_seed: with open(options.overwrite_seed, 'rb') as f: overwrite_seed = yaml.load(f) conf = {} if options.base: conf['base'] = os.path.abspath(options.base) conf['services'] = {'wms': {'md': {'title': cap.metadata()['title']}}} if overwrite: conf['services'] = update_config(conf['services'], overwrite.pop('service', {})) conf['sources'] = sources(cap) if overwrite: conf['sources'] = update_config(conf['sources'], overwrite.pop('sources', {})) conf['caches'] = caches(cap, conf['sources'], srs_grids=srs_grids) if overwrite: conf['caches'] = update_config(conf['caches'], overwrite.pop('caches', {})) conf['layers'] = layers(cap, conf['caches']) if overwrite: conf['layers'] = update_config(conf['layers'], overwrite.pop('layers', {})) if overwrite: conf = update_config(conf, overwrite) seed_conf = {} seed_conf['seeds'], seed_conf['cleanups'] = seeds(cap, conf['caches']) if overwrite_seed: seed_conf = update_config(seed_conf, overwrite_seed) if options.output: with file_or_stdout(options.output) as f: write_header(f, options.capabilities) yaml.dump(conf, f, default_flow_style=False, Dumper=MapProxyYAMLDumper) if options.output_seed: with file_or_stdout(options.output_seed) as f: write_header(f, options.capabilities) yaml.dump(seed_conf, f, default_flow_style=False, Dumper=MapProxyYAMLDumper) return 0
apache-2.0
-2,084,402,879,449,501,700
34.713514
110
0.65607
false
3.879037
true
false
false
SonarOpenCommunity/sonar-cxx
cxx-sensors/src/tools/clangsa_createrules.py
1
6838
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # SonarQube C++ Community Plugin (cxx plugin) # Copyright (C) 2010-2021 SonarOpenCommunity # http://github.com/SonarOpenCommunity/sonar-cxx # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 3 of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software Foundation, # """ Simple script to generate the rules xml file for SonarQube cxx plugin from the Clang Static Analyzer checkers. The clang compiler should be available in the PATH or output of clang -cc1 -analyzer-checker-help as input file. """ from xml.dom import minidom import argparse import re import subprocess import sys import xml.etree.ElementTree as ET def CDATA(text=None): element = ET.Element('![CDATA[') element.text = text return element ET._original_serialize_xml = ET._serialize_xml def _serialize_xml(write, elem, qnames, namespaces, short_empty_elements, **kwargs): if elem.tag == '![CDATA[': write("<%s%s]]>" % (elem.tag, elem.text)) return return ET._original_serialize_xml( write, elem, qnames, namespaces, short_empty_elements, **kwargs) ET._serialize_xml = ET._serialize['xml'] = _serialize_xml def collect_checkers(clangsa_output): """ Parse clang static analyzer output. Return the list of checkers and the description. """ checkers_data = {} # Checker name and description in one line. pattern = re.compile(r'^\s\s(?P<checker_name>\S*)\s*(?P<description>.*)') checker_name = None for line in clangsa_output.splitlines(): line = line.decode(encoding='UTF-8') if re.match(r'^CHECKERS:', line) or line == '': continue elif checker_name and not re.match(r'^\s\s\S', line): # Collect description for the checker name. checkers_data[checker_name] = line.strip() checker_name = None elif re.match(r'^\s\s\S+$', line.rstrip()): # Only checker name is in the line. checker_name = line.strip() else: # Checker name and description is in one line. match = pattern.match(line.rstrip()) if match: current = match.groupdict() checkers_data[current['checker_name']] = current['description'] # Filter out debug checkers. non_debug = {k: v for k, v in checkers_data.items() if 'debug' not in k} return non_debug def main(): parser = argparse.ArgumentParser( description="""Generate the rules xml file for cxx plugin plugin from the Clang Static Analyzer checkers. https://clang-analyzer.llvm.org/""", usage='%(prog)s -o clangsa.xml') parser.add_argument('-i', '--input', dest='input_file', action='store', required=False, help="""Input file to read rules. If parameter does not exist it tries to call clang.""") parser.add_argument('-o', '--output', dest='output_file', action='store', required=True, help="""Output file to write the xml rules. If the file already exists it will be overwritten.""") args = parser.parse_args() clang_version = "clang version ???".encode('utf-8') if args.input_file: with open(args.input_file, 'r') as input: checker_data = collect_checkers(input.read().encode('utf-8')) else: try: clang_version = ['clang', '--version'] version_info = subprocess.run(clang_version, stdout=subprocess.PIPE, check=True).stdout except subprocess.CalledProcessError as cpe: sys.exit(cpe.returncode) # Only the first line is interesting. clang_version = version_info.splitlines()[0] try: clang_checkers = ['clang', '-cc1', '-analyzer-checker-help'] checkers_output = subprocess.run(clang_checkers, stdout=subprocess.PIPE, check=True).stdout print("Collecting clang checkers ...", end='') checker_data = collect_checkers(checkers_output) except subprocess.CalledProcessError as cpe: sys.exit(cpe.returncode) if not checker_data: print("No checkers could be processed.") sys.exit(1) print(" done.") print("Generating rules xml ...", end='') # build a tree structure rules = ET.Element("rules") comment = " C and C++ rules for Clang Static Analyzer. " \ "https://clang-analyzer.llvm.org/\n" + \ "Rules list was generated based on " + \ clang_version.decode("utf-8") + " " rules.append(ET.Comment(comment)) for checker_name, description in checker_data.items(): rule = ET.SubElement(rules, "rule") key = ET.SubElement(rule, "key") name = ET.SubElement(rule, "name") desc = ET.SubElement(rule, "description") sev = ET.SubElement(rule, "severity") c_type = ET.SubElement(rule, "type") key.text = checker_name name.text = checker_name sev.text = "MAJOR" c_type.text = "BUG" if sev.text != 'INFO': ET.SubElement(rule, 'remediationFunction').text = 'LINEAR' ET.SubElement(rule, 'remediationFunctionGapMultiplier').text = '5min' auto_tag = checker_name.split('.')[0] tag = ET.SubElement(rule, "tag") tag.text = auto_tag.lower() cdata = CDATA('\n<p>' + description.strip() + '\n</p>\n <h2>References</h2>' ' <p><a href="https://clang-analyzer.llvm.org/"' ' target="_blank">clang-analyzer.llvm.org</a></p> \n') desc.append(cdata) xmlstr = minidom.parseString( ET.tostring(rules, method='xml')).toprettyxml(indent=" ") print(" done.") with open(args.output_file, 'w') as out: out.write(xmlstr) if __name__ == '__main__': main()
lgpl-3.0
6,750,779,244,084,423,000
33.0199
81
0.580579
false
4.089713
false
false
false
mikedh/trimesh
trimesh/proximity.py
1
19400
""" proximity.py --------------- Query mesh- point proximity. """ import numpy as np from . import util from .grouping import group_min from .constants import tol, log_time from .triangles import closest_point as closest_point_corresponding from .triangles import points_to_barycentric try: from scipy.spatial import cKDTree except BaseException as E: from .exceptions import closure cKDTree = closure(E) def nearby_faces(mesh, points): """ For each point find nearby faces relatively quickly. The closest point on the mesh to the queried point is guaranteed to be on one of the faces listed. Does this by finding the nearest vertex on the mesh to each point, and then returns all the faces that intersect the axis aligned bounding box centered at the queried point and extending to the nearest vertex. Parameters ---------- mesh : trimesh.Trimesh Mesh to query. points : (n, 3) float Points in space Returns ----------- candidates : (points,) int Sequence of indexes for mesh.faces """ points = np.asanyarray(points, dtype=np.float64) if not util.is_shape(points, (-1, 3)): raise ValueError('points must be (n,3)!') # an r-tree containing the axis aligned bounding box for every triangle rtree = mesh.triangles_tree # a kd-tree containing every vertex of the mesh kdtree = cKDTree(mesh.vertices[mesh.referenced_vertices]) # query the distance to the nearest vertex to get AABB of a sphere distance_vertex = kdtree.query(points)[0].reshape((-1, 1)) distance_vertex += tol.merge # axis aligned bounds bounds = np.column_stack((points - distance_vertex, points + distance_vertex)) # faces that intersect axis aligned bounding box candidates = [list(rtree.intersection(b)) for b in bounds] return candidates def closest_point_naive(mesh, points): """ Given a mesh and a list of points find the closest point on any triangle. Does this by constructing a very large intermediate array and comparing every point to every triangle. Parameters ---------- mesh : Trimesh Takes mesh to have same interfaces as `closest_point` points : (m, 3) float Points in space Returns ---------- closest : (m, 3) float Closest point on triangles for each point distance : (m,) float Distances between point and triangle triangle_id : (m,) int Index of triangle containing closest point """ # get triangles from mesh triangles = mesh.triangles.view(np.ndarray) # establish that input points are sane points = np.asanyarray(points, dtype=np.float64) if not util.is_shape(triangles, (-1, 3, 3)): raise ValueError('triangles shape incorrect') if not util.is_shape(points, (-1, 3)): raise ValueError('points must be (n,3)') # create a giant tiled array of each point tiled len(triangles) times points_tiled = np.tile(points, (1, len(triangles))) on_triangle = np.array([closest_point_corresponding( triangles, i.reshape((-1, 3))) for i in points_tiled]) # distance squared distance_2 = [((i - q)**2).sum(axis=1) for i, q in zip(on_triangle, points)] triangle_id = np.array([i.argmin() for i in distance_2]) # closest cartesian point closest = np.array([g[i] for i, g in zip(triangle_id, on_triangle)]) distance = np.array([g[i] for i, g in zip(triangle_id, distance_2)]) ** .5 return closest, distance, triangle_id def closest_point(mesh, points): """ Given a mesh and a list of points find the closest point on any triangle. Parameters ---------- mesh : trimesh.Trimesh Mesh to query points : (m, 3) float Points in space Returns ---------- closest : (m, 3) float Closest point on triangles for each point distance : (m,) float Distance to mesh. triangle_id : (m,) int Index of triangle containing closest point """ points = np.asanyarray(points, dtype=np.float64) if not util.is_shape(points, (-1, 3)): raise ValueError('points must be (n,3)!') # do a tree- based query for faces near each point candidates = nearby_faces(mesh, points) # view triangles as an ndarray so we don't have to recompute # the MD5 during all of the subsequent advanced indexing triangles = mesh.triangles.view(np.ndarray) # create the corresponding list of triangles # and query points to send to the closest_point function all_candidates = np.concatenate(candidates) num_candidates = list(map(len, candidates)) tile_idxs = np.repeat(np.arange(len(points)), num_candidates) query_point = points[tile_idxs, :] query_tri = triangles[all_candidates] # do the computation for closest point query_close = closest_point_corresponding(query_tri, query_point) query_group = np.cumsum(num_candidates)[:-1] # vectors and distances for # closest point to query point query_vector = query_point - query_close query_distance = util.diagonal_dot(query_vector, query_vector) # get best two candidate indices by arg-sorting the per-query_distances qds = np.array_split(query_distance, query_group) idxs = np.int32([qd.argsort()[:2] if len(qd) > 1 else [0, 0] for qd in qds]) idxs[1:] += query_group.reshape(-1, 1) # points, distances and triangle ids for best two candidates two_points = query_close[idxs] two_dists = query_distance[idxs] two_candidates = all_candidates[idxs] # the first candidate is the best result for unambiguous cases result_close = query_close[idxs[:, 0]] result_tid = two_candidates[:, 0] result_distance = two_dists[:, 0] # however: same closest point on two different faces # find the best one and correct triangle ids if necessary check_distance = two_dists.ptp(axis=1) < tol.merge check_magnitude = np.all(np.abs(two_dists) > tol.merge, axis=1) # mask results where corrections may be apply c_mask = np.bitwise_and(check_distance, check_magnitude) # get two face normals for the candidate points normals = mesh.face_normals[two_candidates[c_mask]] # compute normalized surface-point to query-point vectors vectors = (query_vector[idxs[c_mask]] / two_dists[c_mask].reshape(-1, 2, 1) ** 0.5) # compare enclosed angle for both face normals dots = (normals * vectors).sum(axis=2) # take the idx with the most positive angle # allows for selecting the correct candidate triangle id c_idxs = dots.argmax(axis=1) # correct triangle ids where necessary # closest point and distance remain valid result_tid[c_mask] = two_candidates[c_mask, c_idxs] result_distance[c_mask] = two_dists[c_mask, c_idxs] result_close[c_mask] = two_points[c_mask, c_idxs] # we were comparing the distance squared so # now take the square root in one vectorized operation result_distance **= .5 return result_close, result_distance, result_tid def signed_distance(mesh, points): """ Find the signed distance from a mesh to a list of points. * Points OUTSIDE the mesh will have NEGATIVE distance * Points within tol.merge of the surface will have POSITIVE distance * Points INSIDE the mesh will have POSITIVE distance Parameters ----------- mesh : trimesh.Trimesh Mesh to query. points : (n, 3) float Points in space Returns ---------- signed_distance : (n,) float Signed distance from point to mesh """ # make sure we have a numpy array points = np.asanyarray(points, dtype=np.float64) # find the closest point on the mesh to the queried points closest, distance, triangle_id = closest_point(mesh, points) # we only care about nonzero distances nonzero = distance > tol.merge if not nonzero.any(): return distance # For closest points that project directly in to the triangle, compute sign from # triangle normal Project each point in to the closest triangle plane nonzero = np.where(nonzero)[0] normals = mesh.face_normals[triangle_id] projection = (points[nonzero] - (normals[nonzero].T * np.einsum( "ij,ij->i", points[nonzero] - closest[nonzero], normals[nonzero])).T) # Determine if the projection lies within the closest triangle barycentric = points_to_barycentric( mesh.triangles[triangle_id[nonzero]], projection) ontriangle = ~(( (barycentric < -tol.merge) | (barycentric > 1 + tol.merge) ).any(axis=1)) # Where projection does lie in the triangle, compare vector to projection to the # triangle normal to compute sign sign = np.sign(np.einsum( "ij,ij->i", normals[nonzero[ontriangle]], points[nonzero[ontriangle]] - projection[ontriangle])) distance[nonzero[ontriangle]] *= -1.0 * sign # For all other triangles, resort to raycasting against the entire mesh inside = mesh.ray.contains_points(points[nonzero[~ontriangle]]) sign = (inside.astype(int) * 2) - 1.0 # apply sign to previously computed distance distance[nonzero[~ontriangle]] *= sign return distance class ProximityQuery(object): """ Proximity queries for the current mesh. """ def __init__(self, mesh): self._mesh = mesh @log_time def on_surface(self, points): """ Given list of points, for each point find the closest point on any triangle of the mesh. Parameters ---------- points : (m,3) float, points in space Returns ---------- closest : (m, 3) float Closest point on triangles for each point distance : (m,) float Distance to surface triangle_id : (m,) int Index of closest triangle for each point. """ return closest_point(mesh=self._mesh, points=points) def vertex(self, points): """ Given a set of points, return the closest vertex index to each point Parameters ---------- points : (n, 3) float Points in space Returns ---------- distance : (n,) float Distance from source point to vertex. vertex_id : (n,) int Index of mesh.vertices for closest vertex. """ tree = self._mesh.kdtree return tree.query(points) def signed_distance(self, points): """ Find the signed distance from a mesh to a list of points. * Points OUTSIDE the mesh will have NEGATIVE distance * Points within tol.merge of the surface will have POSITIVE distance * Points INSIDE the mesh will have POSITIVE distance Parameters ----------- points : (n, 3) float Points in space Returns ---------- signed_distance : (n,) float Signed distance from point to mesh. """ return signed_distance(self._mesh, points) def longest_ray(mesh, points, directions): """ Find the lengths of the longest rays which do not intersect the mesh cast from a list of points in the provided directions. Parameters ----------- points : (n, 3) float Points in space. directions : (n, 3) float Directions of rays. Returns ---------- signed_distance : (n,) float Length of rays. """ points = np.asanyarray(points, dtype=np.float64) if not util.is_shape(points, (-1, 3)): raise ValueError('points must be (n,3)!') directions = np.asanyarray(directions, dtype=np.float64) if not util.is_shape(directions, (-1, 3)): raise ValueError('directions must be (n,3)!') if len(points) != len(directions): raise ValueError('number of points must equal number of directions!') faces, rays, locations = mesh.ray.intersects_id(points, directions, return_locations=True, multiple_hits=True) if len(rays) > 0: distances = np.linalg.norm(locations - points[rays], axis=1) else: distances = np.array([]) # Reject intersections at distance less than tol.planar rays = rays[distances > tol.planar] distances = distances[distances > tol.planar] # Add infinite length for those with no valid intersection no_intersections = np.setdiff1d(np.arange(len(points)), rays) rays = np.concatenate((rays, no_intersections)) distances = np.concatenate((distances, np.repeat(np.inf, len(no_intersections)))) return group_min(rays, distances) def max_tangent_sphere(mesh, points, inwards=True, normals=None, threshold=1e-6, max_iter=100): """ Find the center and radius of the sphere which is tangent to the mesh at the given point and at least one more point with no non-tangential intersections with the mesh. Masatomo Inui, Nobuyuki Umezu & Ryohei Shimane (2016) Shrinking sphere: A parallel algorithm for computing the thickness of 3D objects, Computer-Aided Design and Applications, 13:2, 199-207, DOI: 10.1080/16864360.2015.1084186 Parameters ---------- points : (n, 3) float Points in space. inwards : bool Whether to have the sphere inside or outside the mesh. normals : (n, 3) float or None Normals of the mesh at the given points if is None computed automatically. Returns ---------- centers : (n,3) float Centers of spheres radii : (n,) float Radii of spheres """ points = np.asanyarray(points, dtype=np.float64) if not util.is_shape(points, (-1, 3)): raise ValueError('points must be (n,3)!') if normals is not None: normals = np.asanyarray(normals, dtype=np.float64) if not util.is_shape(normals, (-1, 3)): raise ValueError('normals must be (n,3)!') if len(points) != len(normals): raise ValueError('number of points must equal number of normals!') else: normals = mesh.face_normals[closest_point(mesh, points)[2]] if inwards: normals = -normals # Find initial tangent spheres distances = longest_ray(mesh, points, normals) radii = distances * 0.5 not_converged = np.ones(len(points), dtype=bool) # boolean mask # If ray is infinite, find the vertex which is furthest from our point # when projected onto the ray. I.e. find v which maximises # (v-p).n = v.n - p.n. # We use a loop rather a vectorised approach to reduce memory cost # it also seems to run faster. for i in np.where(np.isinf(distances))[0]: projections = np.dot(mesh.vertices - points[i], normals[i]) # If no points lie outside the tangent plane, then the radius is infinite # otherwise we have a point outside the tangent plane, take the one with maximal # projection if projections.max() < tol.planar: radii[i] = np.inf not_converged[i] = False else: vertex = mesh.vertices[projections.argmax()] radii[i] = (np.dot(vertex - points[i], vertex - points[i]) / (2 * np.dot(vertex - points[i], normals[i]))) # Compute centers centers = points + normals * np.nan_to_num(radii.reshape(-1, 1)) centers[np.isinf(radii)] = [np.nan, np.nan, np.nan] # Our iterative process terminates when the difference in sphere # radius is less than threshold*D D = np.linalg.norm(mesh.bounds[1] - mesh.bounds[0]) convergence_threshold = threshold * D n_iter = 0 while not_converged.sum() > 0 and n_iter < max_iter: n_iter += 1 n_points, n_dists, n_faces = mesh.nearest.on_surface( centers[not_converged]) # If the distance to the nearest point is the same as the distance # to the start point then we are done. done = np.abs( n_dists - np.linalg.norm( centers[not_converged] - points[not_converged], axis=1)) < tol.planar not_converged[np.where(not_converged)[0][done]] = False # Otherwise find the radius and center of the sphere tangent to the mesh # at the point and the nearest point. diff = n_points[~done] - points[not_converged] old_radii = radii[not_converged].copy() # np.einsum produces element wise dot product radii[not_converged] = (np.einsum('ij, ij->i', diff, diff) / (2 * np.einsum('ij, ij->i', diff, normals[not_converged]))) centers[not_converged] = points[not_converged] + \ normals[not_converged] * radii[not_converged].reshape(-1, 1) # If change in radius is less than threshold we have converged cvged = old_radii - radii[not_converged] < convergence_threshold not_converged[np.where(not_converged)[0][cvged]] = False return centers, radii def thickness(mesh, points, exterior=False, normals=None, method='max_sphere'): """ Find the thickness of the mesh at the given points. Parameters ---------- points : (n, 3) float Points in space exterior : bool Whether to compute the exterior thickness (a.k.a. reach) normals : (n, 3) float Normals of the mesh at the given points If is None computed automatically. method : string One of 'max_sphere' or 'ray' Returns ---------- thickness : (n,) float Thickness at given points. """ points = np.asanyarray(points, dtype=np.float64) if not util.is_shape(points, (-1, 3)): raise ValueError('points must be (n,3)!') if normals is not None: normals = np.asanyarray(normals, dtype=np.float64) if not util.is_shape(normals, (-1, 3)): raise ValueError('normals must be (n,3)!') if len(points) != len(normals): raise ValueError('number of points must equal number of normals!') else: normals = mesh.face_normals[closest_point(mesh, points)[2]] if method == 'max_sphere': centers, radius = max_tangent_sphere(mesh=mesh, points=points, inwards=not exterior, normals=normals) thickness = radius * 2 return thickness elif method == 'ray': if exterior: return longest_ray(mesh, points, normals) else: return longest_ray(mesh, points, -normals) else: raise ValueError('Invalid method, use "max_sphere" or "ray"')
mit
8,312,762,869,782,250,000
32.448276
88
0.609227
false
4.020725
false
false
false
hcseob/py_spectre
py_spectre/psf.py
1
50756
# -*- coding: latin-1 -*- """ Copyright (c) 2008 Pycircuit Development Team All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: a. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. b. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. c. Neither the name of the Pycircuit nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import unittest import struct, os, re import operator import numpy # import psfasc from copy import copy from struct import unpack, pack class PSFInvalid(Exception): pass def warning(str): print "Warning: "+str def indent(str, n=2): return "\n".join([' '*n+s for s in str.split("\n")]) class PSFData(object): @classmethod def fromFile(cls, file): obj = cls() obj.deSerializeFile(file) return obj size=None def __init__(self, value=None, extarg=None): self.value = value self.extarg = extarg def setValue(self, value): self.value = value def __eq__(self, a): return self.value == a def __cmp__(self, a): return cmp(self.value, a) def __hash__(self): return hash(self.value) def deSerializeFile(self, file): pass def getSize(self): self.size def getValue(self): return self.value def __str__(self): return str(self.value) def toPSFasc(self, prec=None): return str(self) def __repr__(self): return self.value.__repr__() class PSFNumber(PSFData): def __int__(self): return self.value def __add__(self, a): return UInt32(self.value+int(a)) def __mul__(self, a): return UInt32(self.value*int(a)) def __radd__(self, a): return UInt32(self.value+int(a)) def __sub__(self, a): return UInt32(self.value-int(a)) def __rsub__(self, a): return UInt32(int(a)-self.value) def __div__(self, a): return UInt32(self.value/int(a)) def __rdiv__(self, a): return UInt32(int(a)/self.value) def __floordiv__(self, a): return UInt32(self.value//int(a)) def __rfloordiv__(self, a): return UInt32(int(a)//self.value) def __mod__(self, a): return UInt32(self.value%int(a)) class Int8(PSFNumber): size=4 def deSerializeFile(self, file, size=None): data=file.read(self.size) self.value = unpack("b",data[3])[0] class UInt8(PSFNumber): size=4 def deSerializeFile(self, file, size=None): data=file.read(self.size) self.value = unpack("B",data[3])[0] class Int32(PSFNumber): size=4 def deSerializeFile(self, file, size=None): self.value = unpack(">i",file.read(self.size))[0] class UInt32(PSFNumber): size=4 def deSerializeFile(self, file, size=None): self.value = unpack(">I",file.read(self.size))[0] class Int64(PSFNumber): size=8 def __int__(self): return self.value def deSerializeFile(self, file, size=None): self.value = unpack(">q",file.read(self.size))[0] class UInt64(PSFNumber): size=8 def __int__(self): return self.value def deSerializeFile(self, file, size=None): self.value = unpack(">Q",file.read(self.size))[0] class Float64(PSFNumber): size=8 def __float__(self): return float(self.value) def toPSFasc(self, prec=6): if prec: fmt=('%%#%dg'%prec) else: fmt='%#g' return fmt%self.value def deSerializeFile(self, file, size=None): self.value = unpack(">d",file.read(self.size))[0] class Float32(PSFNumber): size=4 def __float__(self): return float(self.value) def deSerializeFile(self, file, size=None): self.value = unpack(">f",file.read(self.size))[0] class ComplexFloat64(PSFNumber): size=16 def toPSFasc(self, prec=6): if prec: fmt=('%%#%dg'%prec) else: fmt='%#g' return "(" + fmt%self.value.real + " " + fmt%self.value.imag + ")" def deSerializeFile(self, file, size=None): re,im = unpack(">dd",file.read(self.size)) self.value = complex(re,im) class String(PSFData): def __str__(self): return self.value def deSerializeFile(self, file, size=None): self.len = unpack(">I",file.read(4))[0] if self.len < 0x100: self.value = file.read(self.len) # Pad to 32-bit boundary file.read((4-self.len)%4) else: raise Exception("String too long %d"%self.len) def toPSFasc(self, prec=None): return "\""+str(self.value)+"\"" class Struct(PSFData): def __init__(self, structdef, value=None): self.structdef = structdef self.value = {} if value: self.setValue(value) def __getitem__(self, key): return self.value[key] def getValue(self): return dict([(k,v.getValue()) for k,v in self.value.items()]) def setValue(self, value): assert(value != None and len(value) == len(self.structdef.children)) for element, val in zip(self.structdef.children, value): valueobj = element.getDataObj() valueobj.setValue(val) self.value[element.name] = valueobj def deSerializeFile(self, file): for element in self.structdef.children: value = element.getDataObj() value.deSerializeFile(file) self.value[element.name] = value def toPSFasc(self, prec=None): s="(\n" for element in self.structdef.children: s+=self.value[element.name].toPSFasc(prec)+"\n" s+=")" return s def __repr__(self): return "\n".join([indent(s) for s in map(repr,self.value.items())]) + "\n" class Array(PSFData): def setValue(self, value): dataclass, length = self.extarg if value != None: self.children = [dataclass(value=val) for val in value] else: self.children = [dataclass(value=None) for val in range(length)] def getValue(self): return [v.getValue() for v in self.children] def __iter__(self): return self.children.__iter__() def __tuple__(self): return tuple(self.children) def __repr__(self): return "\n".join([indent(s) for s in map(str,self.children)]) + "\n" class Chunk: """Base class for chunk""" def __init__(self, psf=None, type=None): self.psf = psf self.fileoffset=None if not hasattr(self.__class__, 'type'): self.type = type self.verbose = False self.name = "" def deSerializeFile(self, file): self.fileoffset = file.tell() type = UInt32.fromFile(file) if (self.type != None) and self.type != type: file.seek(-UInt32.size, 1) raise IncorrectChunk(type, self.type) def __repr__(self): return self.__class__.__name__ class NextSectionType(Chunk): type=1 class NextSectionSweep(Chunk): type=2 class NextSectionTrace(Chunk): type=3 class NextSectionValues(Chunk): type=4 class EndOfStructDef(Chunk): type=18 NextSectionClasses = [NextSectionType, NextSectionSweep, NextSectionTrace, NextSectionValues] class Property(Chunk): type=None valueclass=None def __init__(self, name=None, value=None): Chunk.__init__(self) self.name = String(name) self.value = self.valueclass(value) def deSerializeFile(self, file): Chunk.deSerializeFile(self, file) self.name = String.fromFile(file) self.value = self.valueclass.fromFile(file) def toPSFasc(self, prec=9): return self.name.toPSFasc() + " " + self.value.toPSFasc(prec=prec) def __repr__(self): return self.__class__.__name__+"("+str(self.name)+","+str(self.value)+")" class PropertyString(Property): type=33 valueclass=String class PropertyUInt(Property): type=34 valueclass=UInt32 class PropertyFloat64(Property): type=35 valueclass=Float64 PropertyClasses = [PropertyString, PropertyUInt, PropertyFloat64] TYPEFLOATDOUBLE = 11 TYPEINTBYTE = 1 TYPECOMPLEXDOUBLE = 12 TYPESTRUCT = 16 TYPESTRING = 2 ## Incorrect number TYPEARRAY = 3 ## Incorrect number TYPEINTLONG = 5 class DataTypeDef(Chunk): """Class representing data type of waveform data""" type=16 ClassDict = { TYPEFLOATDOUBLE: Float64, TYPEINTBYTE: Int8, TYPECOMPLEXDOUBLE: ComplexFloat64, TYPESTRING: String, TYPEARRAY: Array, TYPEINTLONG: Int32 } PSFASCDict = { TYPEFLOATDOUBLE: "FLOAT DOUBLE", TYPEINTBYTE: "INT BYTE", TYPECOMPLEXDOUBLE: "COMPLEX DOUBLE", TYPESTRING: "STRING *", TYPEINTLONG: "INT LONG" } def __init__(self, psf, id=0, name=None, datatypeid=0, structdef=None): Chunk.__init__(self, psf, type) self.id = id self.name = name self.datatypeid = datatypeid self.structdef = structdef self.properties = [] def getDataObj(self): """Get a data object described by the DataType""" if self.datatypeid == TYPESTRUCT: return self.structdef.getDataObj() elif self.datatypeid == TYPEARRAY: return Array(extarg=(self.ClassDict[self.structdef[0]], self.structdef[1])) else: return self.ClassDict[self.datatypeid](extarg=self.structdef) def toPSFasc(self, prec=None): r=self.name.toPSFasc(prec) + " " if self.datatypeid == TYPESTRUCT: r+=self.structdef.toPSFasc(prec) elif self.datatypeid == TYPEARRAY: r+="ARRAY ( %s ) "%str(self.structdef[1])+self.PSFASCDict[self.structdef[0]] else: r+= self.PSFASCDict[self.datatypeid] if len(self.properties)>0: r+=" PROP(\n" r+="\n".join([prop.toPSFasc(prec) for prop in self.properties]) r+="\n)" return r def getDataSize(self): if self.datatypeid == TYPESTRUCT: return self.structdef.getDataSize() else: return self.ClassDict[self.datatypeid].size def deSerializeFile(self, file): start = file.tell() Chunk.deSerializeFile(self, file) self.id = UInt32.fromFile(file) self.name = String.fromFile(file) arraytype = UInt32.fromFile(file) self.datatypeid = UInt32.fromFile(file) if arraytype != 0: self.datatypeid, self.structdef = TYPEARRAY, (UInt32.fromFile(file), self.datatypeid) if self.datatypeid == 16: self.structdef = StructDef.fromFile(file, self.psf) # Read possible property objects that belongs to the type by peeking ahead while True: oldpos = file.tell() try: prop = readChunk(self.psf, file, expectedclasses=PropertyClasses) self.properties.append(prop) except ValueError: file.seek(oldpos) break def __repr__(self): return self.__class__.__name__+"("+str({"name":self.name,"id":"0x%x"%self.id, "datatypeid":self.datatypeid, "properties":self.properties})+")" class DataTypeRef(Chunk): type=16 """Class representing link to data type""" def __init__(self, psf, type=None): Chunk.__init__(self, psf, type) self.id = None self.name = None self.datatypeid = 0 self.properties = [] def getDataObj(self): """Get a data object described by the DataType""" return self.psf.types.idMap[self.datatypeid].getDataObj() def toPSFasc(self, prec=None): r=self.name.toPSFasc(prec) + " " r+=self.psf.types.idMap[self.datatypeid].name.toPSFasc() if len(self.properties)>0: r+=" PROP(\n" r+="\n".join([prop.toPSFasc(prec) for prop in self.properties]) r+="\n)" return r def getDataSize(self): return self.psf.types.idMap[self.datatypeid].getDataSize() def deSerializeFile(self, file): start = file.tell() Chunk.deSerializeFile(self, file) self.id = UInt32.fromFile(file) self.name = String.fromFile(file) self.datatypeid = UInt32.fromFile(file) assert(self.datatypeid != 0) # Read possible property objects that belongs to the type by peeking ahead while True: oldpos = file.tell() try: prop = readChunk(self.psf, file, expectedclasses=PropertyClasses) self.properties.append(prop) except ValueError: file.seek(oldpos) break def __repr__(self): return self.__class__.__name__+"("+str({"name":self.name,"id":"0x%x"%self.id, "datatypeid":self.datatypeid, "properties":self.properties})+")" class StructDef(PSFData): """Class representing struct definition""" @classmethod def fromFile(cls, file, psf): obj = cls() obj.deSerializeFile(file, psf) return obj def __init__(self): self.children = [] def getDataObj(self): return Struct(self) def getDataSize(self): return sum([child.getDataSize() for child in self.children]) def toPSFasc(self, prec=None): s="STRUCT(\n" for child in self.children: s+=child.toPSFasc(prec)+"\n" s+=")" return s def deSerializeFile(self, file, psf): while True: chunk = readChunk(psf, file, expectedclasses=[DataTypeDef, EndOfStructDef]) if isinstance(chunk, EndOfStructDef): break else: self.children.append(chunk) def __repr__(self): return self.__class__.__name__ + "(\n"+\ "\n".join(map(str,self.children))+\ ")\n" class SimpleContainer(Chunk): type = 21 def __init__(self, psf, type=None, childrenclslist=None, childrenclsignore=None): Chunk.__init__(self, psf, type) self.section = None self.children = [] self.childrenclslist = childrenclslist self.childrenclsignore = childrenclsignore self.endpos = None def getChunks(self): return self.children def deSerializeFile(self, file): Chunk.deSerializeFile(self, file) self.endpos = UInt32.fromFile(file).value self.children = [] while file.tell() < self.endpos: chunk = readChunk(self.psf, file, expectedclasses=self.childrenclslist+self.childrenclsignore) if chunk.__class__ in self.childrenclslist: self.children.append(chunk) # Read trailing bytes if self.endpos-file.tell() != 0: warning("%d trailing bytes in %s"%(self.endpos-file.tell(), self.__class__.__name__)) self.tail = file.read(self.endpos-file.tell()) file.seek(self.endpos) def __repr__(self): s="" if self.fileoffset: s+= "0x%x"%self.fileoffset+ ":" s+= self.__class__.__name__ + "(" + str(self.type) +")" if self.endpos and self.fileoffset: s+= "size="+str(self.endpos-self.fileoffset) s+= "\n" + "\n".join([indent(s) for s in map(str,self.children)]) + "\n" return s class Container22(Chunk): type=22 def __init__(self, psf, type=None, n=None, childrenclslist=None): Chunk.__init__(self, psf, 22) self.section = None self.children = [] self.childrenclslist = childrenclslist self.endpos = None def getChunks(self): return self.children def deSerializeFile(self, file): Chunk.deSerializeFile(self, file) self.endpos = UInt32.fromFile(file).value # Save end position of Container self.children = [] while file.tell() < self.endpos: chunk = readChunk(self.psf, file, expectedclasses=self.childrenclslist) self.children.append(chunk) # Read trailing bytes if self.endpos-file.tell() != 0: warning("%d trailing bytes in %s"%(self.endpos-file.tell(), self.__class__.__name__)) self.tail = file.read(self.endpos-file.tell()) file.seek(self.endpos) def __repr__(self): return "0x%x"%self.fileoffset +":" + self.__class__.__name__ +\ "(" + str(self.type) +")" + "\n" + "\n".join([indent(s) for s in map(str,self.children)]) + "\n" class ZeroPad(Chunk): type = 20 def deSerializeFile(self, file): Chunk.deSerializeFile(self, file) size = UInt32.fromFile(file).value self.endpos = file.tell() + size file.seek(self.endpos) class HashTable(Chunk): type = 19 """Class representing offset of trace data""" def __init__(self, psf, n=None): Chunk.__init__(self, psf, type) self.children = [] self.extra=[] def deSerializeFile(self, file): Chunk.deSerializeFile(self, file) startpos = file.tell() size = UInt32.fromFile(file) for i in range(0, size/8): id = UInt32.fromFile(file) offset = UInt32.fromFile(file) self.children.append((id, offset)) def __repr__(self): return self.__class__.__name__+"\n"+ "\n".join([" 0x%x: 0x%x"%(k,v.value) for k,v in self.children])+")" class HashTableTrace(Chunk): type = 19 """Class representing offset of trace data""" def __init__(self, psf): Chunk.__init__(self, psf, type) self.children = [] def deSerializeFile(self, file): Chunk.deSerializeFile(self, file) self.size = UInt32.fromFile(file) for i in range(0, self.size.value/16): id = UInt32.fromFile(file) offset = UInt32.fromFile(file) data1 = UInt32.fromFile(file).value data2 = UInt32.fromFile(file).value self.children.append((id,offset,data1,data2)) def __repr__(self): return self.__class__.__name__+"\n"+ "\n".join([" %s: 0x%x 0x%x 0x%x"%(pack(">I",k.value),v.value,d1,d2) for k,v,d1,d2 in self.children])+")" class HashContainer(Chunk): type=21 hashclass = HashTable def __init__(self, psf, childrenclslist=None, childrenclsignore=None): Chunk.__init__(self, psf, type) self.section = None self.children = [] self.childrenclslist = childrenclslist self.childrenclsignore = childrenclsignore self.endpos = None self.hashtable = None def __len__(self): return len(self.children) def getChunks(self): return self.children def deSerializeFile(self, file): Chunk.deSerializeFile(self, file) self.endpos = UInt32.fromFile(file).value self.children = [] self.data = Container22(self.psf, childrenclslist=self.childrenclslist) self.data.deSerializeFile(file) self.hashtable = self.hashclass(self.psf) self.hashtable.deSerializeFile(file) # Copy children reference from data self.children = self.data.children self.section = UInt32.fromFile(file) # Read trailing bytes if self.endpos-file.tell() != 0: warning("%d trailing bytes in %s"%(self.endpos-file.tell(), self.__class__.__name__)) self.tail = file.read(self.endpos-file.tell()) file.seek(self.endpos) def __repr__(self): s="" if self.fileoffset: s += "0x%x"%self.fileoffset +":" s += self.__class__.__name__ + "(" + str(self.type) +")" if self.endpos: s+=" size="+str(self.endpos-self.fileoffset) + "\n" s += "\n".join([indent(s) for s in map(str,(self.children, self.hashtable))]) + "\n" return s class HeaderSection(SimpleContainer): type=21 def __init__(self, psf, n=None): SimpleContainer.__init__(self,psf, childrenclslist=PropertyClasses, childrenclsignore=NextSectionClasses) self.properties = {} def addProperty(self, prop): """Add property to header""" self.children.append(prop) self.properties[prop.name] = prop.value def deSerializeFile(self, file): SimpleContainer.deSerializeFile(self, file) # Read header properties self.properties = {} for prop in self.children: self.properties[prop.name] = prop.value def toPSFasc(self, prec=None): r="HEADER\n" r+='"PSFversion" "1.00"\n' r+="\n".join([child.toPSFasc(prec) for child in self.children \ if not child.name.value[0:3].upper() == 'PSF']) return r class SweepSection(SimpleContainer): type=21 def __init__(self, psf): SimpleContainer.__init__(self, psf, childrenclslist=[DataTypeRef], childrenclsignore=NextSectionClasses) def deSerializeFile(self, file): SimpleContainer.deSerializeFile(self, file) # Read header properties self.idMap = {} for chunk in self.children: self.idMap[chunk.id] = chunk def getSweep(self, id): return self.idMap[id] def getNames(self): return tuple([str(child.name) for child in self.children]) def toPSFasc(self, prec=None): r="SWEEP\n" r+="\n".join([child.toPSFasc(prec) for child in self.children]) return r class TypeSection(HashContainer): def __init__(self, psf): HashContainer.__init__(self, psf, childrenclslist=[DataTypeDef], childrenclsignore=NextSectionClasses) self.idMap = {} self.nameMap = {} def addType(self, type): type.id = self.psf.allocId() self.children.append(type) self.idMap[type.id] = type self.nameMap[type.name] = type def getType(self, id): return self.idMap[id] def getTypeByName(self, name): return self.nameMap[name] def deSerializeFile(self, file): HashContainer.deSerializeFile(self, file) # Read header properties self.idMap = {} for chunk in self.children: self.idMap[chunk.id] = chunk self.nameMap[chunk.name] = type def toPSFasc(self, prec=None): r="TYPE\n" r+="\n".join([child.toPSFasc(prec) for child in self.children]) return r class TraceSection(HashContainer): hashclass = HashTableTrace def __init__(self, psf): HashContainer.__init__(self, psf, childrenclslist=[GroupDef, DataTypeRef]) self.idMap = {} self.nameIndex = {} def deSerializeFile(self, file): HashContainer.deSerializeFile(self, file) self.idMap = {} for index, chunk in enumerate(self.children): self.idMap[chunk.id] = chunk if isinstance(chunk, GroupDef): self.nameIndex.update(dict([(par, (index,)+value) for par,value in chunk.getNameIndex().items()])) else: self.nameIndex[chunk.name] = (index,) def getNameIndex(self): return self.nameIndex def toPSFasc(self, prec=None): r="TRACE\n" r+="\n".join([child.toPSFasc(prec) for child in self.children]) return r def getTraceNames(self): result = [] for trace in self.children: if isinstance(trace,GroupDef): result += trace.getNames() else: result.append(trace.name) return tuple(map(str, result)) def getTraceIndexByName(self, name): """Returns an index to the given trace name The index is hierarchical so if if the traces are divided into 2 groups the index (0,1) means child 1 of group 0 >>> psf=PSFReader('./test/psf/srcSweep') >>> psf.open() >>> psf.traces.getTraceIndexByName("VIN") (0, 1) >>> psf=PSFReader('./test/resultdirs/parsweep2/C=1e-12,R=1e-12/psf/ac.ac') >>> psf.open() >>> psf.traces.getTraceIndexByName("net3") (0,) """ return self.nameIndex[name] class ValuesSectionNonSweep(HashContainer): type=21 def __init__(self, psf): HashContainer.__init__(self, psf, childrenclslist=[NonSweepValue]) self.idMap={} self.nameMap={} def addValue(self, value): value.id = self.psf.allocId() if not isinstance(value, NonSweepValue): raise ValueError("Value should be a NonSweepValue") self.idMap[value.id] = value self.nameMap[value.name] = value self.children.append(value) def deSerializeFile(self, file): HashContainer.deSerializeFile(self, file) for child in self.children: self.nameMap[child.name] = child def getValuePropertiesByName(self, name): return dict([(prop.name, prop.value) for prop in self.nameMap[name].properties]) def getValueByName(self, name): return self.nameMap[name].getValue() def getValueNames(self): return tuple([child.name for child in self.children]) def toPSFasc(self, prec=None): r="VALUE\n" r+="\n".join([child.toPSFasc(prec) for child in self.children]) return r class ValuesSectionSweep(SimpleContainer): type=21 def deSerializeFile(self, file): Chunk.deSerializeFile(self, file) self.endpos = UInt32.fromFile(file).value windowedsweep = self.psf.header.properties.has_key('PSF window size') if windowedsweep: el = ZeroPad(self.psf) el.deSerializeFile(file) isweep=0 while isweep < self.psf.header.properties['PSF sweep points']: if windowedsweep: value = SweepValueWindowed(self.psf) else: value = SweepValueSimple(self.psf) isweep += value.deSerializeFile(file, n=self.psf.header.properties['PSF sweep points']-isweep) self.children.append(value) self.section = UInt32.fromFile(file) # Read trailing bytes if self.endpos-file.tell() != 0: warning("%d trailing bytes in %s"%(self.endpos-file.tell(), self.__class__.__name__)) self.tail = file.read(self.endpos-file.tell()) file.seek(self.endpos) def getSweepParamValues(self): return reduce(operator.__add__, [child.getSweepParamValues() for child in self.children]) def getValueNames(self): return self.psf.traces.getTraceNames() def __len__(self): return len(self.psf.traces) def getValueByName(self, name): windowedsweep = self.psf.header.properties.has_key('PSF window size') index = self.psf.traces.getTraceIndexByName(name) result = [] for child in self.children: obj=child for i in index: obj = obj.children[i] # If windowed sweep, each child will be a list of values in the window if windowedsweep: result += [v.getValue() for v in obj] else: result.append(obj.getValue()) return numpy.array(result) def toPSFasc(self, prec=None): r="VALUE\n" r+="\n".join([child.toPSFasc(prec) for child in self.children]) return r class NonSweepValue(Chunk): type=16 def __init__(self, psf, id=None, typeid=None, name=None, value=None): Chunk.__init__(self, psf, type) self.id = id self.name = name self.typeid = typeid if typeid: self.valuetype = self.psf.types.idMap[self.typeid] else: self.valuetype = None if value: self.value = value elif self.valuetype: self.value = self.valuetype.getDataObj() else: self.value = None self.properties = [] def getValue(self): return self.value.getValue() def setValue(self, value): self.value.setValue(value) def deSerializeFile(self, file): startpos = file.tell() Chunk.deSerializeFile(self, file) self.id = UInt32.fromFile(file) self.name = String.fromFile(file) self.typeid = UInt32.fromFile(file) assert(self.typeid != 0) self.valuetype = self.psf.types.idMap[self.typeid] self.value = self.valuetype.getDataObj() self.value.deSerializeFile(file) # Read possible property objects that belongs to the type by peeking ahead while True: oldpos = file.tell() try: prop = readChunk(self.psf, file, expectedclasses=PropertyClasses) self.properties.append(prop) except ValueError: file.seek(oldpos) break def toPSFasc(self, prec=None): r = self.name.toPSFasc(prec) + " " + self.valuetype.name.toPSFasc(prec) + " " + self.value.toPSFasc(prec) if len(self.properties)>0: r+=" PROP(\n" r+="\n".join([prop.toPSFasc(prec) for prop in self.properties]) r+="\n)" return r def __repr__(self): return self.__class__.__name__+"("+str({"name":self.name, "id":"0x%x"%self.id, "typeid":"0x%x"%self.typeid, "properties":self.properties,"value":self.value})+")" class SweepValue(Chunk): """Class representing waveform data""" type = 16 def __init__(self, psf, type=None): Chunk.__init__(self, psf, type) self.id = None self.linktypeid = UInt32() self.datatypeid = UInt32() self.paramtype = None self.paramvalue = None self.children = [] self.properties = [] def deSerializeFile(self, file, n=None): pass def getSweepParamValues(self): pass def __len__(self): return len(self.children) def __repr__(self): return self.__class__.__name__ + "(" + str(self.paramtype.name) + "=" + str(self.paramvalue) +","+ \ "children="+str(self.children) +")\n" class SweepValueSimple(SweepValue): def deSerializeFile(self, file, n=None): Chunk.deSerializeFile(self, file) self.paramtypeid = UInt32.fromFile(file) self.paramtype = self.psf.sweeps.getSweep(self.paramtypeid) self.paramvalue = self.paramtype.getDataObj() self.paramvalue.deSerializeFile(file) for datatype in self.psf.traces.children: datatypeid = UInt32.fromFile(file) if datatypeid in (17,16): valuetypeid = UInt32.fromFile(file) if valuetypeid != datatype.id: ## Unexpected value type id found ## This is probably because of missing trace values ## Undo read of datatypeid, valuetypeid and break out of loop and file.seek(-2*UInt32.size, 1) break value = datatype.getDataObj() value.deSerializeFile(file) self.children.append(value) elif datatypeid == 15: ## End of section file.seek(-UInt32.size, 1) break else: raise Exception("Datatypeid unknown 0x%x" % datatypeid) return 1 def getSweepParamValues(self): return [self.paramvalue.getValue()] def toPSFasc(self, prec=None): r=self.paramtype.name.toPSFasc(prec) + " " +self.paramvalue.toPSFasc(prec)+"\n" r+="\n".join([valuetype.name.toPSFasc(prec) + " " + value.toPSFasc(prec) \ for valuetype, value in zip(self.psf.traces.children, self.children)]) return r class SweepValueWindowed(SweepValue): def deSerializeFile(self, file, n=None): bufferstart = file.tell() Chunk.deSerializeFile(self, file) self.paramtypeid = UInt32.fromFile(file) assert(len(self.psf.sweeps.children) == 1) self.paramtype=self.psf.sweeps.children[0] self.paramvalue = [] # Get sweep parameter values paramvaluesize = self.paramtype.getDataSize() windowsize = self.psf.header.properties['PSF window size'].value leftinwindow = (file.tell()//windowsize + 1)*windowsize - file.tell() windowlen = leftinwindow//paramvaluesize; if n > windowlen: n = windowlen for j in xrange(n): paramvalue = self.paramtype.getDataObj() paramvalue.deSerializeFile(file) if j < n: self.paramvalue.append(paramvalue) # Get trace values for trace in self.psf.traces.children: value = trace.getDataObj() value.deSerializeFile(file, count=n, windowsize=self.psf.header.properties['PSF window size'].value) self.children.append(value) # Skip trailing padding bytes padsize = int((self.psf.header.properties['PSF buffer size'] - (file.tell()-bufferstart))% \ self.psf.header.properties['PSF buffer size']) file.seek(padsize, 1) return n def getSweepParamValues(self): return [v.getValue() for v in self.paramvalue] def toPSFasc(self, prec=None): r='' for i, paramvalue in enumerate(self.paramvalue): r+=self.paramtype.name.toPSFasc(prec) + " " + paramvalue.toPSFasc(prec) + "\n" r+="\n".join([trace.name.toPSFasc(prec) + " " + value.toPSFasc(prec=prec, index=i) \ for trace,value in zip(self.psf.traces.children, self.children)]) if i < len(self.paramvalue)-1: r+="\n" return r class GroupData(PSFData): def __init__(self, groupdef): PSFData.__init__(self) self.groupdef = groupdef self.children = [] def deSerializeFile(self, file, count=None, windowsize=None): for element in self.groupdef.children: if count==None: value = element.getDataObj() value.deSerializeFile(file) self.children.append(value) else: valuearray=[] # If a window is used in the PSF file, the entire window is stored # and the data is aligned to the end of the window. So we need # to skip window size - data size file.seek(int(windowsize - count*element.getDataSize()), 1) for i in xrange(0,count): value = element.getDataObj() value.deSerializeFile(file) valuearray.append(value) self.children.append(valuearray) def toPSFasc(self, prec=None, index=None): if index != None: return "\n".join([v[index].toPSFasc(prec) for v in self.children]) else: return "\n".join([v.toPSFasc(prec) for v in self.children]) def getSize(self): return self.groupdef.getDataSize() def __repr__(self): return "GroupData" + "\n" + "\n".join([indent(s) for s in map(repr,self.children)]) + "\n" class GroupDef(Chunk): type=17 """Class representing group of traces""" def __init__(self, psf): Chunk.__init__(self, psf) self.children=[] self.datasize=None def getDataObj(self): return GroupData(self) def deSerializeFile(self, file): Chunk.deSerializeFile(self, file) self.id = UInt32.fromFile(file) self.name = String.fromFile(file) self.nchildren = UInt32.fromFile(file) # Read children self.children = [] self.datasize = 0 for i in range(0, self.nchildren): child = DataTypeRef(self.psf) child.deSerializeFile(file) self.children.append(child) self.datasize += child.getDataSize() def getNameIndex(self): return dict([(v.name, (i,)) for i,v in enumerate(self.children)]) def toPSFasc(self, prec=None): s=self.name.toPSFasc(prec) + " GROUP %d\n"%len(self.children) s+="\n".join([child.toPSFasc(prec) for child in self.children]) return s def getDataSize(self): return self.datasize def getNames(self): return [str(child.name) for child in self.children] def __repr__(self): return "0x%x"%self.fileoffset +":" + self.__class__.__name__+ "(id=0x%x"%self.id+", nchildren=%d"%self.nchildren+")\n" + "\n".join([indent(s) for s in map(str,self.children)]) + "\n" class UnknownChunk(Exception): def __init__(self, chunktype): self.type = chunktype def __str__(self): return "Unknown chunk of type: %d"%self.type class InvalidChunk(Exception): def __init__(self, chunk): self.chunk = chunk def __str__(self): return "Invalid %s"%(self.chunk.__class__.__name__) class IncorrectChunk(Exception): def __init__(self, type, expectedtype): self.type = type self.expectedtype = expectedtype def __str__(self): return "Incorrect chunk type %d (should be %d)"%(self.type, self.expectedtype) class LastValue(Exception): pass def readChunk(psf, file, expectedclasses=None): type = UInt32.fromFile(file) file.seek(-4, 1) # Rewind one word since the type will be read again by the deSerializeFile function if expectedclasses: if not type in [cls.type for cls in expectedclasses]: raise ValueError("Unexpected type %d, not in "%type + str([cls.type for cls in expectedclasses])) for cls in expectedclasses: if type == cls.type: chunk = cls(psf) else: raise Exception("Use expectedclasses!") if type == 21: chunk = Section(psf) elif type == 20: chunk = ZeroPad(psf) elif type == 22: chunk = Container22(psf, type, n=n) elif type == 33: chunk = PropertyString(psf) elif type == 34: chunk = PropertyUInt(psf) elif type == 35: chunk = PropertyFloat64(psf) elif type == 16: chunk = DataTypeDef(psf,type) elif type == 17: chunk = GroupDef(psf) elif type == 19: chunk = HashTable(psf, n=n) elif type in (1,2,3,4): file.seek(4,1) return None else: warning("Unknown chunk %d"%type) raise UnknownChunk(type) chunk.deSerializeFile(file) return chunk class PSFReader(object): def __init__(self, filename=None, asc=None): self.header = None self.types = TypeSection(self) self.sweeps = None self.traces = None self.lastid = 0x1000 self.verbose = False self.filename = filename self.file = None self.values = None self.asc = asc def open(self): """Open a PSF file and read its headers. Example: Trying to open a valid psf file >>> psf=PSFReader('./test/psf/srcSweep') >>> psf.open() """ if self.asc == None: self.asc = False if not self.asc: self.file = open(self.filename, "rb") if self.validate(): self.deSerializeFile(self.file) else: raise PSFInvalid("Invalid PSF file") else: newpsfobj = psfasc.parse("psfasc", open(self.filename).read()) self.header = newpsfobj.header self.types = newpsfobj.types self.sweeps = newpsfobj.sweeps self.traces = newpsfobj.traces self.values = newpsfobj.values self.lastid = newpsfobj.lastid self.verbose = newpsfobj.verbose def validate(self): """Check if the PSF file is valid. Returns True if valid, False otherwise >>> psf=PSFReader('./test/psf/srcSweep') >>> psf.validate() True >>> psf=PSFReader('./test/psfasc/srcSweep.asc') >>> psf.validate() False """ if self.file == None: file = open(self.filename, "rb") else: file = self.file # Read Clarissa signature file.seek(-4-8,2) clarissa = file.read(8) return clarissa == "Clarissa" def getNSweepPoints(self): """Returns number of sweeps. 0 if not swept. >>> psf=PSFReader('./test/psf/srcSweep') >>> psf.open() >>> psf.getNSweepPoints() 4 """ if self.file == None: ValueError("Please open the PSF file first") return self.header.properties['PSF sweep points'] def getNSweeps(self): """Returns the number of nested sweeps >>> psf=PSFReader('./test/psf/srcSweep') >>> psf.open() >>> psf.getNSweeps() 1 """ if self.file == None: ValueError("Please open the PSF file first") return self.header.properties['PSF sweeps'] def __len__(self): return len(self.values) def getValueNames(self): """Returns a tuple of the names of the traces >>> psf=PSFReader('./test/psf/srcSweep') >>> psf.getValueNames() >>> psf.open() >>> psf.getValueNames() ('VOUT', 'VIN', 'R0') >>> psf=PSFReader('./test/resultdirs/simple/opBegin') >>> psf.open() >>> psf.getValueNames() ('R0', 'V1', 'V0', 'E0', 'VIN', 'NET9', 'VOUT') """ if self.values: return self.values.getValueNames() def getSweepParamNames(self): return self.sweeps.getNames() def getSweepParamValues(self, dim=0): """Returns a numpy.array of sweep parameter values for sweep dimension dim. >>> psf=PSFReader('./test/psf/srcSweep') >>> psf.open() >>> psf.getSweepParamValues(0) array([ 1., 2., 3., 4.]) windowed result >>> psf=PSFReader('./test/psf/timeSweep') >>> psf.open() >>> psf.getSweepParamValues(0)[:3] array([ 0.00000000e+00, 2.00000000e-11, 5.33333333e-11]) """ return numpy.array(self.values.getSweepParamValues()) def getValuePropertiesByName(self, name): """Returns the properties associated with value >>> psf=PSFReader('./test/psf/opBegin') >>> psf.open() >>> psf.getValuePropertiesByName("XIRXRFMIXTRIM0.XM1PDAC1.XMN.MAIN")["Region"] 'subthreshold' """ return self.values.getValuePropertiesByName(name) def getValuesByName(self, name): """Returns a numpy.array of trace values for swept results and a scalar for non swept. Example: swept psf file >>> psf=PSFReader('./test/psf/srcSweep') >>> psf.open() >>> psf.getValuesByName("VOUT") array([-6., -4., -2., 0.]) >>> psf.getValuesByName("VIN") array([ 1., 2., 3., 4.]) swept psf with complex numbers >>> psf=PSFReader('./test/psf/frequencySweep') >>> psf.open() >>> res = psf.getValuesByName("ANT_CM") >>> len(res) 123 >>> res[:3] array([ 0.6+0.j, 0. +0.j, 0. +0.j]) swept windowed psf file >>> psf=PSFReader('./test/psf/timeSweep') >>> psf.open() >>> psf.getValuesByName("INP")[0:3] array([ 0.6 , 0.62486899, 0.66211478]) non-swept psf file >>> psf=PSFReader('./test/psf/dcOpInfo.info') >>> psf.open() >>> psf.getValuesByName("IREG21U_0.MP5.b1")['betadc'] 4.7957014499434756 swept psf file withouth groups >>> psf=PSFReader('./test/resultdirs/parsweep/C=1e-12,R=1e-12/psf/ac.ac') >>> psf.open() >>> psf.getValuesByName("net3") array([ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j]) """ return self.values.getValueByName(name) def nTraces(self): """Returns number of traces >>> psf=PSFReader('./test/psf/srcSweep') >>> psf.open() >>> psf.nTraces() 3 """ if self.file == None: ValueError("Please open the PSF file first") return self.header.properties['PSF traces'] def allocId(self): self.lastid+=1 return self.lastid-1 def info(self): s="Number of sweeps: %d\n"%self.getNSweeps() if self.getNSweeps() > 0: s+="Number of sweep points: %d\n"%self.getNSweepPoints() s+="Number of traces: %d"%self.nTraces() return s def updateHeader(self): if self.sweeps: sweeps = len(self.sweeps.children) else: sweeps=0 self.header.addProperty(PropertyUInt("PSF sweeps", sweeps)) def deSerializeFile(self, file): # Find filesize file.seek(0,2) filesize = file.tell() # Last word contains the size of the data file.seek(-4,2) datasize = UInt32.fromFile(file).value if self.verbose: print "Total data size: ",datasize # Read Clarissa signature file.seek(-4-8,2) clarissa = file.read(8) if not clarissa == "Clarissa": raise ValueError("Clarissa signature not found") # Read section index table sectionoffsets = {} file.seek(-4-8-8,2) pos = file.tell() sectionnums = [] while file.tell() >= datasize: sectionnum = UInt32.fromFile(file) sectionnums.insert(0,sectionnum.value) offset = UInt32.fromFile(file) sectionoffsets[sectionnum] = offset pos -= 8 file.seek(pos) offsets = [sectionoffsets[secnum] for secnum in sectionnums] sizes = map(operator.sub, offsets[1:]+[datasize], offsets) sectionsizes = dict(zip(sectionnums, sizes)) if self.verbose: print sectionoffsets, sectionsizes file.seek(0) self.unk1 = UInt32.fromFile(file) if self.verbose: print "First word: 0x%x"%self.unk1 # Load headers file.seek(int(sectionoffsets[0])) self.header = HeaderSection(self) self.header.deSerializeFile(file) if self.verbose: print "HEADER" print self.header if sectionoffsets.has_key(1): file.seek(int(sectionoffsets[1])) self.types.deSerializeFile(file) if self.verbose: print "TYPE" print self.types if sectionoffsets.has_key(2): file.seek(int(sectionoffsets[2])) self.sweeps = SweepSection(self) self.sweeps.deSerializeFile(file) if self.verbose: print "SWEEPS" print self.sweeps if sectionoffsets.has_key(3): file.seek(int(sectionoffsets[3])) self.traces = TraceSection(self) self.traces.deSerializeFile(file) if sectionoffsets.has_key(4): file.seek(int(sectionoffsets[4])) # Load data if self.sweeps: self.values = ValuesSectionSweep(self) else: self.values = ValuesSectionNonSweep(self) self.values.deSerializeFile(file) def printme(self): print "HEADER" print self.header print "TYPES" print self.types if self.sweeps: print "SWEEP" print self.sweeps if self.traces: print "TRACE" print self.traces print "VALUES" print self.values def toPSFasc(self, prec=None): """Export to PSF ascii""" sections = [self.header.toPSFasc(prec), self.types.toPSFasc(prec)] if self.sweeps: sections.append(self.sweeps.toPSFasc(prec)) if self.traces: sections.append(self.traces.toPSFasc(prec)) if self.values: sections.append(self.values.toPSFasc(prec)) r="\n".join(sections) + "\n" r+="END\n" return r def __repr__(self): return "\n".join(map(str, (self.header, self.types, self.sweeps, self.traces, self.values))) if __name__ == "__main__": import doctest doctest.testmod()
mit
7,594,775,208,493,014,000
31.022713
190
0.569805
false
3.659145
false
false
false
unix-beard/matasano
set1/detect_single_character_xor/detect_single_character_xor.py
1
1514
#!/usr/bin/env python3 ################################################################################ # The matasano crypto challenges # http://cryptopals.com/sets/1/challenges/4/ # Set 1 Challenge 4 # Detect single-character XOR ################################################################################ # One of the 60-character strings in the input file has been encrypted # by single-character XOR. Find it. # Key: int=53, char='5' # Message: Now that the party is jumping # # NOTE: This implementation is strictly sequential ################################################################################ import sys import string def find_key(key, tuple_): return chr(int(tuple_[0] + tuple_[1], base=16) ^ key) def decode_with_key(key, s): decoded_msg = '' for t in zip(s[0::2], s[1::2]): decoded_msg += find_key(key, t) if len([c for c in decoded_msg if c in string.ascii_letters + ' \n']) == len(decoded_msg): print('[*] Trying the key: int: {0}, char: {1}'.format(key, chr(key))) print('Decoded message: {0}'.format(decoded_msg)) def decode(s): print('Decoding [{0}]'.format(s)) for key in range(0, 256): decode_with_key(key, s) def remove_eol(s): """Removes trailing '\n' if there is one""" return s[0:len(s) - 1] if s[len(s) - 1] == '\n' else s def main(): with open(sys.argv[1], 'r') as f: for encoded_str in f: decode(remove_eol(encoded_str)) if __name__ == '__main__': main()
mit
4,528,978,805,032,337,400
30.541667
94
0.509247
false
3.488479
false
false
false
looker/sdk-examples
python/soft_delete_dashboard.py
1
1367
import sys from typing import Sequence import exceptions from looker_sdk import client, error, models sdk = client.setup("../looker.ini") def main(): """Given a dashboard title, get the ids of all dashboards with matching titles and move them to trash. $ python soft_delete_dashboard.py "An Unused Dashboard" """ dashboard_title = sys.argv[1] if len(sys.argv) > 1 else "" if not dashboard_title: raise exceptions.ArgumentError("Please provide: <dashboardTitle>") dashboards = get_dashboards(dashboard_title) delete_dashboards(dashboards) def get_dashboards(title: str) -> Sequence[models.Dashboard]: """Get dashboards with matching title""" lc_title = title.lower() results = sdk.search_dashboards(title=lc_title) if not results: raise exceptions.NotFoundError(f'dashboard "{title}" not found') assert isinstance(results, Sequence) return results def delete_dashboards(dashboards: Sequence[models.Dashboard]): """Soft delete dashboards""" for dashboard in dashboards: try: assert dashboard.id sdk.delete_dashboard(dashboard.id) except error.SDKError: print(f"Failed to delete dashboard with id {dashboard.id}.") else: print(f'"{dashboard.title}" (id {dashboard.id}) has been moved to trash.') main()
mit
-5,936,345,032,288,775,000
26.897959
86
0.675933
false
4.080597
false
false
false
robmcmullen/peppy
peppy/major_modes/fortran_95.py
1
1742
# peppy Copyright (c) 2006-2009 Rob McMullen # Licenced under the GPLv2; see http://peppy.flipturn.org for more info """Fortran 95 programming language editing support. Major mode for editing Fortran 95 files. Supporting actions and minor modes should go here only if they are uniquely applicable to this major mode and can't be used in other major modes. If actions can be used with multiple major modes, they should be put in a separate plugin in the peppy/plugins directory. """ import os import wx import wx.stc from peppy.lib.foldexplorer import * from peppy.lib.autoindent import * from peppy.yapsy.plugins import * from peppy.major import * from peppy.editra.style_specs import unique_keywords from peppy.fundamental import FundamentalMode class Fortran95Mode(FundamentalMode): """Stub major mode for editing Fortran 95 files. This major mode has been automatically generated and is a boilerplate/ placeholder major mode. Enhancements to this mode are appreciated! """ keyword = 'Fortran 95' editra_synonym = 'Fortran 95' stc_lexer_id = wx.stc.STC_LEX_FORTRAN start_line_comment = '!' end_line_comment = '' icon = 'icons/page_white.png' default_classprefs = ( StrParam('extensions', 'f2k f90 f95 fpp', fullwidth=True), StrParam('keyword_set_0', unique_keywords[38], hidden=False, fullwidth=True), StrParam('keyword_set_1', unique_keywords[39], hidden=False, fullwidth=True), StrParam('keyword_set_2', unique_keywords[40], hidden=False, fullwidth=True), ) class Fortran95ModePlugin(IPeppyPlugin): """Plugin to register modes and user interface for Fortran 95 """ def getMajorModes(self): yield Fortran95Mode
gpl-2.0
-6,352,703,335,974,964,000
32.5
85
0.723307
false
3.629167
false
false
false
awacha/cct
cct/qtgui/devices/motor/movemotor/movemotor.py
1
4527
import logging from PyQt5 import QtWidgets, QtGui from .movemotor_ui import Ui_Form from ....core.mixins import ToolWindow from .....core.devices import Motor from .....core.instrument.privileges import PRIV_MOVEMOTORS logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) class MoveMotor(QtWidgets.QWidget, Ui_Form, ToolWindow): required_privilege = PRIV_MOVEMOTORS def __init__(self, *args, **kwargs): credo = kwargs.pop('credo') self.motorname = kwargs.pop('motorname') QtWidgets.QWidget.__init__(self, *args, **kwargs) self.setupToolWindow(credo, required_devices=['Motor_' + self.motorname]) self._start_requested = False self.setupUi(self) def setupUi(self, Form): Ui_Form.setupUi(self, Form) self.motorComboBox.addItems(sorted(self.credo.motors.keys())) self.motorComboBox.currentTextChanged.connect(self.onMotorSelected) self.movePushButton.clicked.connect(self.onMove) self.motorComboBox.setCurrentIndex(self.motorComboBox.findText(self.motorname)) self.relativeCheckBox.toggled.connect(self.onRelativeChanged) self.targetDoubleSpinBox.editingFinished.connect(self.onEditingFinished) self.onMotorSelected() self.adjustSize() def onEditingFinished(self): if self.targetDoubleSpinBox.hasFocus(): self.onMove() def onRelativeChanged(self): self.onMotorPositionChange(self.motor(), self.motor().where()) if self.relativeCheckBox.isChecked(): self.targetDoubleSpinBox.setValue(0) else: self.targetDoubleSpinBox.setValue(self.motor().where()) self.adjustSize() def setIdle(self): super().setIdle() self.movePushButton.setText('Move') icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(":/icons/motor.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.movePushButton.setIcon(icon) self.targetDoubleSpinBox.setEnabled(True) self.motorComboBox.setEnabled(True) self.relativeCheckBox.setEnabled(True) self.movePushButton.setEnabled(True) self._start_requested = False def setBusy(self): self.movePushButton.setText('Stop') self.movePushButton.setIcon(QtGui.QIcon.fromTheme('process-stop')) self.targetDoubleSpinBox.setEnabled(False) self.motorComboBox.setEnabled(False) self.relativeCheckBox.setEnabled(False) self.movePushButton.setEnabled(True) super().setBusy() def motor(self) -> Motor: return self.credo.motors[self.motorComboBox.currentText()] def onMove(self): if self.movePushButton.text() == 'Move': self.movePushButton.setEnabled(False) self._start_requested = True if self.relativeCheckBox.isChecked(): self.motor().moverel(self.targetDoubleSpinBox.value()) else: self.motor().moveto(self.targetDoubleSpinBox.value()) else: self.movePushButton.setEnabled(False) self.motor().stop() def onMotorStart(self, motor: Motor): if self._start_requested: self.setBusy() def onMotorSelected(self): self.setWindowTitle('Move motor {}'.format(self.motorComboBox.currentText())) for d in self.required_devices: self.unrequireDevice(d) self.required_devices = ['Motor_' + self.motorComboBox.currentText()] self.requireDevice(self.required_devices[0]) motor = self.credo.motors[self.motorComboBox.currentText()] self.onMotorPositionChange(motor, motor.where()) if self.relativeCheckBox.isChecked(): self.targetDoubleSpinBox.setValue(0.0) else: self.targetDoubleSpinBox.setValue(motor.where()) def onMotorPositionChange(self, motor: Motor, newposition: float): self.positionLabel.setText('<b>{:.4f}</b>'.format(newposition)) left = motor.get_variable('softleft') right = motor.get_variable('softright') if self.relativeCheckBox.isChecked(): left -= newposition right -= newposition self.targetDoubleSpinBox.setMinimum(left) self.targetDoubleSpinBox.setMaximum(right) self.leftLimitLabel.setText('{:.4f}'.format(left)) self.rightLimitLabel.setText('{:.4f}'.format(right)) self.adjustSize() def onMotorStop(self, motor: Motor, targetpositionreached: bool): self.setIdle()
bsd-3-clause
2,612,978,641,138,935,300
38.365217
95
0.664237
false
4.056452
false
false
false
tracyjacks/PyMetWeather
pymetweather/pymetweather.py
1
13941
import curses from datetime import date, timedelta import locale from textwrap import fill from pymetweather.forecasts import WeatherForecast from pymetweather.get_args import get_command_line_args, get_config_args locale.setlocale(locale.LC_ALL, '') class WeatherPrinter(object): def __init__(self, forecast, screen_width): self.fcs = forecast self.cols = [ (['Time'], 5, '{$:02}:00'), (['Conditions'], 22, '{W}'), (['Precipitation', 'probability'], 15, '{Pp:>3} %'), (['Temperature', '(Feels Like)'], 14, '{T:>2} {F} °C'), (['Wind Speed', '(Gust)'], 16, '{S:>2} {G} mph'), (['Wind', 'Direction'], 12, '{D:>3}'), (['Relative', 'Humidity'], 10, '{H} %'), (['Visibility'], 12, '{V}'), (['UV', 'Index'], 7, '{U}')] self.daily_cols = [ (['Day'], 13, '{$}', '{$}'), (['Conditions'], 22, '{W}', '{W}'), (['Precipitation', 'probability'], 15, '{PPd:>3} %', '{PPn:>3} %'), (['Max day/', 'Min night', 'Temperature', '(Feels like)'], 14, '{Dm:>2} {FDm} °C', '{Nm:>2} {FNm} °C'), (['Wind Speed', '(Gust)'], 16, '{S:>2} {Gn} mph', '{S:>2} {Gm} mph'), (['Wind', 'Direction'], 12, '{D:>3}', '{D:>3}'), (['Relative', 'Humidity'], 10, '{Hn} %', '{Hm} %'), (['Visibility'], 12, '{V}', '{V}')] self.top_pad = curses.newpad(2000, 500) self.tab_pad = curses.newpad(2000, 500) self.bottom_bar = curses.newpad(1, 500) self.help_screen_pad = curses.newpad(500, 500) self.top_maxy = 0 self.tab_maxy = 0 self.tab_maxx = 0 self.screen_width = screen_width self.print_bottom_bar() self.setup_help() @staticmethod def addustr(win, text, *args): win.addstr(text.encode('utf-8'), *args) def print_help_screen(self, top_only): if not top_only: self.addustr(self.tab_pad, self.help_string) self.tab_maxy = self.help_maxy self.tab_maxx = self.help_maxx def setup_help(self): help = [ ('q', 'Quit'), ('?', 'Show this help'), ('t', "Today's weather"), ('d', 'Five day summary'), ('0', "Today's weather"), ('1', "Tomorrow's weather"), ('2', 'Weather for 2 days later'), ('3', 'Weather for 3 days later'), ('4', 'Weather for 4 days later'), ('5–9', 'UK outlook for the next month'), ('l', 'UK outlook for the next month'), ('left arrow', 'scroll left'), ('right arrow', 'scroll left'), ('up arrow', 'scroll up'), ('down arrow', 'scroll down'), ] c1width = max([len(k[0]) for k in help]) c2width = max([len(k[1]) for k in help]) self.help_string = '' for h in help: self.help_string += h[0].ljust(c1width + 1) + ' : ' + h[1] + '\n' self.help_string = self.help_string.strip('\n') self.help_maxy = len(help) - 1 self.help_maxx = c1width + c2width - 1 def print_bottom_bar(self): self.addustr( self.bottom_bar, '?: help q: quit t: today ' 'd: 5 day summary 1–4: days 1 to 4 ' 'l: longterm'.ljust(499), curses.A_REVERSE | curses.A_BOLD) def print_longer_term_weather(self): regf1 = self.fcs.reg_fcs[2]['Paragraph'] regf2 = self.fcs.reg_fcs[3]['Paragraph'] self.addustr( self.top_pad, self.wrap_text(regf1['title']), curses.A_BOLD) self.addustr(self.top_pad, '\n' + self.wrap_text(regf1['$']) + '\n\n') self.addustr( self.top_pad, self.wrap_text(regf2['title']), curses.A_BOLD) self.addustr(self.top_pad, '\n' + self.wrap_text(regf2['$'])) self.top_maxy = self.top_pad.getyx()[0] + 1 def wrap_text(self, text): return fill(text, self.screen_width) def print_hourly_top(self, n_day, day): title = 'Weather for {}, {}'.format( self.fcs.site_name, day.strftime('%A %d %B %Y')) self.addustr(self.top_pad, self.wrap_text(title) + '\n', curses.A_BOLD) regfindex = 0 regf = self.fcs.reg_fcs[0]['Paragraph'] if n_day == 0: if 'Headline' in regf[regfindex]['title']: self.addustr(self.top_pad, self.wrap_text(regf[regfindex]['$']) + '\n\n') regfindex += 1 if 'Today' in regf[regfindex]['title']: today_text = self.wrap_text('Today: ' + regf[regfindex]['$']) self.addustr(self.top_pad, today_text[:7], curses.A_BOLD) self.addustr(self.top_pad, today_text[7:] + '\n\n') regfindex += 1 if 'Tonight' in regf[regfindex]['title']: tonight_text = self.wrap_text(regf[regfindex]['title'] + ' ' + regf[regfindex]['$']) lent = len(regf[regfindex]['title']) self.addustr(self.top_pad, tonight_text[:lent], curses.A_BOLD) self.addustr(self.top_pad, tonight_text[lent:] + '\n\n') regfindex += 1 elif n_day == 1: for regfindex in range(len(regf)): if day.strftime('%A') in regf[regfindex]['title']: self.addustr( self.top_pad, self.wrap_text(regf[regfindex]['$']) + '\n\n') break else: regf = self.fcs.reg_fcs[1]['Paragraph'] outlook = self.wrap_text(regf['title'] + ' ' + regf['$']) lent = len(regf['title']) + 1 self.addustr(self.top_pad, '\n' + outlook[:lent], curses.A_BOLD) self.addustr(self.top_pad, outlook[lent:] + '\n\n') self.top_maxy = self.top_pad.getyx()[0] + 1 def print_hourly_tab(self, n_day, period): width_counter = 0 for c in self.cols: for i, head in enumerate(c[0]): head_text = '{:^{}}'.format(head, c[1]) self.tab_pad.move(i, width_counter) self.addustr(self.tab_pad, head_text, curses.A_BOLD) width_counter += c[1] top_row = ( self.tab_pad.getyx()[0] + max([len(c[0]) for c in self.cols]) - 1) for i, rep in enumerate(period['Rep']): width_counter = 0 for c in self.cols: cell_text = '{:^{}}'.format(c[2].format(**rep), c[1]) self.tab_pad.move(top_row + i, width_counter) self.addustr(self.tab_pad, cell_text) width_counter += c[1] self.tab_maxy = self.tab_pad.getyx()[0] self.tab_maxx = sum([c[1] for c in self.cols]) - 2 def print_hourly_weather(self, n_day, top_only=False): day = date.today() + timedelta(n_day) period = self.fcs.hourly_fcs['Period'][n_day] assert period['value'] == day.strftime('%Y-%m-%dZ') self.print_hourly_top(n_day, day) if not top_only: self.print_hourly_tab(n_day, period) def print_weather_brief(self, top_only=False): period = self.fcs.daily_fcs['Period'] width_counter = 0 for c in self.daily_cols: for i, head in enumerate(c[0]): head_text = '{:^{}}'.format(head, c[1]) self.tab_pad.move(i, width_counter) self.addustr(self.tab_pad, head_text, curses.A_BOLD) width_counter += c[1] top_row = ( self.tab_pad.getyx()[0] + max([len(c[0]) for c in self.daily_cols])) c = self.daily_cols[0] for i, rep in enumerate(period): cell_text = '{:<{}} '.format(rep['value'], c[1] - 3) self.tab_pad.move(top_row + i * 4, 0) self.addustr(self.tab_pad, cell_text) cell_text = '{:>{}} '.format( c[2].format(**rep['Rep'][0]), c[1] - 3) self.tab_pad.move(top_row + i * 4 + 1, 0) self.addustr(self.tab_pad, cell_text) cell_text = '{:>{}} '.format( c[3].format(**rep['Rep'][1]), c[1] - 3) self.tab_pad.move(top_row + i * 4 + 2, 0) self.addustr(self.tab_pad, cell_text) for i, rep in enumerate(period): rep = rep['Rep'] width_counter = self.daily_cols[0][1] for c in self.daily_cols[1:]: cell_text = '{:^{}}'.format(c[2].format(**rep[0]), c[1]) self.tab_pad.move(top_row + i * 4 + 1, width_counter) self.addustr(self.tab_pad, cell_text) cell_text = '{:^{}}'.format(c[3].format(**rep[1]), c[1]) self.tab_pad.move(top_row + i * 4 + 2, width_counter) self.addustr(self.tab_pad, cell_text) width_counter += c[1] self.tab_maxy = self.tab_pad.getyx()[0] self.tab_maxx = sum([c[1] for c in self.daily_cols]) - 2 def print_screen(self, screen, screen_width=None, top_only=False): if screen_width is not None: self.screen_width = screen_width self.top_pad.clear() self.top_maxy = 0 if not top_only: self.tab_maxy = 0 self.tab_maxx = 0 self.tab_pad.clear() if screen in range(0, 5): self.print_hourly_weather(screen, top_only) elif screen == 8: self.print_longer_term_weather() elif screen == 7: self.print_weather_brief(top_only) elif screen == 9: self.print_help_screen(top_only) class WeatherApp(object): key_map = { '0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 8, '6': 8, '7': 8, '8': 8, '9': 9, 't': 0, 'l': 8, 'd': 7, 'b': 7, '?': 9} def __init__(self, stdscr, fcs, start_screen=0): self.stdscr = stdscr curses.curs_set(0) curses.use_default_colors() self.fcs = fcs self.scrolly = 0 self.scrollx = 0 self.maxy = 0 self.maxx = 0 self.y = self.stdscr.getmaxyx()[0] - 1 self.x = self.stdscr.getmaxyx()[1] - 1 self.printer = WeatherPrinter(self.fcs, self.x + 1) self.print_screen(start_screen) def print_resize(self): self.y = self.stdscr.getmaxyx()[0] - 1 self.x = self.stdscr.getmaxyx()[1] - 1 self.printer.print_screen(self.screen_showing, self.x + 1, True) self.maxx = max(self.printer.tab_maxx, self.x - 1) self.maxy = self.printer.tab_maxy + self.printer.top_maxy if self.y > (self.maxy - self.scrolly): self.scrolly = max(self.maxy - (self.y - 1), 0) if self.x > (self.maxx - self.scrollx): self.scrollx = max(self.maxx - (self.x - 1), 0) self.draw_screen() def print_screen(self, screen): self.screen_showing = screen self.scrolly = 0 self.scrollx = 0 self.printer.print_screen(self.screen_showing) self.maxy = self.printer.tab_maxy + self.printer.top_maxy self.maxx = max(self.printer.tab_maxx, self.x - 1) self.draw_screen() def draw_screen(self): self.stdscr.clear() self.stdscr.refresh() top_y = self.printer.top_maxy try: assert self.y == self.stdscr.getmaxyx()[0] - 1 assert self.x == self.stdscr.getmaxyx()[1] - 1 except AssertionError: self.print_resize() return self.printer.top_pad.noutrefresh( self.scrolly, 0, 0, 0, min(top_y, self.y), self.x) if self.y - (top_y - self.scrolly) > 1: self.printer.tab_pad.noutrefresh( max(0, self.scrolly - top_y), self.scrollx, top_y - self.scrolly, 0, self.y, self.x) self.printer.bottom_bar.noutrefresh( 0, 0, self.y, 0, self.y, self.x) try: assert self.y == self.stdscr.getmaxyx()[0] - 1 assert self.x == self.stdscr.getmaxyx()[1] - 1 except AssertionError: self.print_resize() return with open('/tmp/log', 'a') as f: f.write('{}\t{}\t{}\t{}\t{}\t{}\n'.format( self.maxy, self.y, self.scrolly, self.maxx, self.x, self.scrollx)) curses.doupdate() def main_loop(self): while True: c = self.stdscr.getkey() if c == 'q': return elif c in self.key_map and self.screen_showing != self.key_map[c]: self.print_screen(self.key_map[c]) elif c == 'KEY_RESIZE': self.print_resize() elif c == 'KEY_DOWN': if self.scrolly + self.y - 1 < self.maxy: self.scrolly += 1 self.draw_screen() elif c == 'KEY_UP' and self.scrolly != 0: self.scrolly -= 1 self.draw_screen() elif c == 'KEY_LEFT' and self.scrollx != 0: self.scrollx -= 1 self.draw_screen() elif c == 'KEY_RIGHT': if self.scrollx + self.x - 1 < self.maxx: self.scrollx += 1 self.draw_screen() def run_curses_app(screen, fcs): wap = WeatherApp(screen, fcs) wap.main_loop() def run_app(args): fcs = WeatherForecast(args['api_key'], args['location'], args['datadir']) if args['quiet_update']: fcs.load(True) return fcs.load(args['dont_update']) curses.wrapper(run_curses_app, fcs) def main(): args = get_config_args() args.update(get_command_line_args()) run_app(args)
gpl-2.0
-7,420,689,951,984,828,000
35.413613
79
0.492955
false
3.184524
false
false
false
jcarva/digital_image_processing_assignments
spatial_domain/python/task1_6.py
1
1722
# coding=UTF-8 # 1.6. Limiarização aplicada sobre Y, com limiar m e duas opções: a) m # escolhido pelo usuáio; b) m = média de valores da banda Y; import numpy as np import utils import color def main(): image = utils.load_image('lenna.png') yiq_image = color.rgb2yiq(image) grayscale_image = yiq_image[:, :, 2] # Y threshold_value = 255 * 0.2 mean_value = np.mean(grayscale_image) threshold_user_image = _segment(grayscale_image, threshold_value) original_threshold_user_image = np.copy(yiq_image) original_threshold_user_image[:, :, 2] = threshold_user_image original_threshold_user_image = color.yiq2rgb(original_threshold_user_image) threshold_mean_image = _segment(grayscale_image, mean_value) original_threshold_mean_image = np.copy(yiq_image) original_threshold_mean_image[:, :, 2] = threshold_mean_image original_threshold_mean_image = color.yiq2rgb(original_threshold_mean_image) utils.display_single_image('Original Image', image) utils.display_single_image('YIQ Image', yiq_image) utils.display_single_image('Y Channel', grayscale_image) utils.display_single_image('Y Threshold (User ' + str(threshold_value) + ')', threshold_user_image) utils.display_single_image('Back to Original (User ' + str(threshold_value) + ')', original_threshold_user_image) utils.display_single_image('Y Threshold (Mean ' + str(mean_value) + ')', threshold_mean_image) utils.display_single_image('Back to Original (Mean ' + str(mean_value) + ')', original_threshold_mean_image) utils.wait_key_and_destroy_windows() def _segment(image, m): output = (image >= m) * 255 return output if __name__ == "__main__": main()
gpl-3.0
-4,424,867,651,343,764,500
34.770833
117
0.689977
false
3.171904
false
false
false
PXke/invenio
invenio/legacy/websubmit/functions/Create_Modify_Interface.py
1
12922
## This file is part of Invenio. ## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """ This is the Create_Modify_Interface function (along with its helpers). It is used by WebSubmit for the "Modify Bibliographic Information" action. """ __revision__ = "$Id$" import os import re import time import pprint from invenio.legacy.dbquery import run_sql from invenio.legacy.websubmit.config import InvenioWebSubmitFunctionError from invenio.legacy.websubmit.functions.Retrieve_Data import Get_Field from invenio.ext.logging import register_exception def Create_Modify_Interface_getfieldval_fromfile(cur_dir, fld=""): """Read a field's value from its corresponding text file in 'cur_dir' (if it exists) into memory. Delete the text file after having read-in its value. This function is called on the reload of the modify-record page. This way, the field in question can be populated with the value last entered by the user (before reload), instead of always being populated with the value still found in the DB. """ fld_val = "" if len(fld) > 0 and os.access("%s/%s" % (cur_dir, fld), os.R_OK|os.W_OK): fp = open( "%s/%s" % (cur_dir, fld), "r" ) fld_val = fp.read() fp.close() try: os.unlink("%s/%s"%(cur_dir, fld)) except OSError: # Cannot unlink file - ignore, let WebSubmit main handle this pass fld_val = fld_val.strip() return fld_val def Create_Modify_Interface_getfieldval_fromDBrec(fieldcode, recid): """Read a field's value from the record stored in the DB. This function is called when the Create_Modify_Interface function is called for the first time when modifying a given record, and field values must be retrieved from the database. """ fld_val = "" if fieldcode != "": for next_field_code in [x.strip() for x in fieldcode.split(",")]: fld_val += "%s\n" % Get_Field(next_field_code, recid) fld_val = fld_val.rstrip('\n') return fld_val def Create_Modify_Interface_transform_date(fld_val): """Accept a field's value as a string. If the value is a date in one of the following formats: DD Mon YYYY (e.g. 23 Apr 2005) YYYY-MM-DD (e.g. 2005-04-23) ...transform this date value into "DD/MM/YYYY" (e.g. 23/04/2005). """ if re.search("^[0-9]{2} [a-z]{3} [0-9]{4}$", fld_val, re.IGNORECASE) is not None: try: fld_val = time.strftime("%d/%m/%Y", time.strptime(fld_val, "%d %b %Y")) except (ValueError, TypeError): # bad date format: pass elif re.search("^[0-9]{4}-[0-9]{2}-[0-9]{2}$", fld_val, re.IGNORECASE) is not None: try: fld_val = time.strftime("%d/%m/%Y", time.strptime(fld_val, "%Y-%m-%d")) except (ValueError,TypeError): # bad date format: pass return fld_val def Create_Modify_Interface(parameters, curdir, form, user_info=None): """ Create an interface for the modification of a document, based on the fields that the user has chosen to modify. This avoids having to redefine a submission page for the modifications, but rely on the elements already defined for the initial submission i.e. SBI action (The only page that needs to be built for the modification is the page letting the user specify a document to modify). This function should be added at step 1 of your modification workflow, after the functions that retrieves report number and record id (Get_Report_Number, Get_Recid). Functions at step 2 are the one executed upon successful submission of the form. Create_Modify_Interface expects the following parameters: * "fieldnameMBI" - the name of a text file in the submission working directory that contains a list of the names of the WebSubmit fields to include in the Modification interface. These field names are separated by"\n" or "+". Given the list of WebSubmit fields to be included in the modification interface, the values for each field are retrieved for the given record (by way of each WebSubmit field being configured with a MARC Code in the WebSubmit database). An HTML FORM is then created. This form allows a user to modify certain field values for a record. The file referenced by 'fieldnameMBI' is usually generated from a multiple select form field): users can then select one or several fields to modify Note that the function will display WebSubmit Response elements, but will not be able to set an initial value: this must be done by the Response element iteself. Additionally the function creates an internal field named 'Create_Modify_Interface_DONE' on the interface, that can be retrieved in curdir after the form has been submitted. This flag is an indicator for the function that displayed values should not be retrieved from the database, but from the submitted values (in case the page is reloaded). You can also rely on this value when building your WebSubmit Response element in order to retrieve value either from the record, or from the submission directory. """ global sysno,rn t = "" # variables declaration fieldname = parameters['fieldnameMBI'] # Path of file containing fields to modify the_globals = { 'doctype' : doctype, 'action' : action, 'act' : action, ## for backward compatibility 'step' : step, 'access' : access, 'ln' : ln, 'curdir' : curdir, 'uid' : user_info['uid'], 'uid_email' : user_info['email'], 'rn' : rn, 'last_step' : last_step, 'action_score' : action_score, '__websubmit_in_jail__' : True, 'form': form, 'sysno': sysno, 'user_info' : user_info, '__builtins__' : globals()['__builtins__'], 'Request_Print': Request_Print } if os.path.exists("%s/%s" % (curdir, fieldname)): fp = open( "%s/%s" % (curdir, fieldname), "r" ) fieldstext = fp.read() fp.close() fieldstext = re.sub("\+","\n", fieldstext) fields = fieldstext.split("\n") else: res = run_sql("SELECT fidesc FROM sbmFIELDDESC WHERE name=%s", (fieldname,)) if len(res) == 1: fields = res[0][0].replace(" ", "") fields = re.findall("<optionvalue=.*>", fields) regexp = re.compile("""<optionvalue=(?P<quote>['|"]?)(?P<value>.*?)(?P=quote)""") fields = [regexp.search(x) for x in fields] fields = [x.group("value") for x in fields if x is not None] fields = [x for x in fields if x not in ("Select", "select")] else: raise InvenioWebSubmitFunctionError("cannot find fields to modify") #output some text t = t+"<CENTER bgcolor=\"white\">The document <B>%s</B> has been found in the database.</CENTER><br />Please modify the following fields:<br />Then press the 'END' button at the bottom of the page<br />\n" % rn for field in fields: subfield = "" value = "" marccode = "" text = "" # retrieve and display the modification text t = t + "<FONT color=\"darkblue\">\n" res = run_sql("SELECT modifytext FROM sbmFIELDDESC WHERE name=%s", (field,)) if len(res)>0: t = t + "<small>%s</small> </FONT>\n" % res[0][0] # retrieve the marc code associated with the field res = run_sql("SELECT marccode FROM sbmFIELDDESC WHERE name=%s", (field,)) if len(res) > 0: marccode = res[0][0] # then retrieve the previous value of the field if os.path.exists("%s/%s" % (curdir, "Create_Modify_Interface_DONE")): # Page has been reloaded - get field value from text file on server, not from DB record value = Create_Modify_Interface_getfieldval_fromfile(curdir, field) else: # First call to page - get field value from DB record value = Create_Modify_Interface_getfieldval_fromDBrec(marccode, sysno) # If field is a date value, transform date into format DD/MM/YYYY: value = Create_Modify_Interface_transform_date(value) res = run_sql("SELECT * FROM sbmFIELDDESC WHERE name=%s", (field,)) if len(res) > 0: element_type = res[0][3] numcols = res[0][6] numrows = res[0][5] size = res[0][4] maxlength = res[0][7] val = res[0][8] fidesc = res[0][9] if element_type == "T": text = "<TEXTAREA name=\"%s\" rows=%s cols=%s wrap>%s</TEXTAREA>" % (field, numrows, numcols, value) elif element_type == "F": text = "<INPUT TYPE=\"file\" name=\"%s\" size=%s maxlength=\"%s\">" % (field, size, maxlength) elif element_type == "I": value = re.sub("[\n\r\t]+", "", value) text = "<INPUT name=\"%s\" size=%s value=\"%s\"> " % (field, size, val) text = text + "<SCRIPT>document.forms[0].%s.value=\"%s\";</SCRIPT>" % (field, value) elif element_type == "H": text = "<INPUT type=\"hidden\" name=\"%s\" value=\"%s\">" % (field, val) text = text + "<SCRIPT>document.forms[0].%s.value=\"%s\";</SCRIPT>" % (field, value) elif element_type == "S": values = re.split("[\n\r]+", value) text = fidesc if re.search("%s\[\]" % field, fidesc): multipletext = "[]" else: multipletext = "" if len(values) > 0 and not(len(values) == 1 and values[0] == ""): text += "<SCRIPT>\n" text += "var i = 0;\n" text += "el = document.forms[0].elements['%s%s'];\n" % (field, multipletext) text += "max = el.length;\n" for val in values: text += "var found = 0;\n" text += "var i=0;\n" text += "while (i != max) {\n" text += " if (el.options[i].value == \"%s\" || el.options[i].text == \"%s\") {\n" % (val, val) text += " el.options[i].selected = true;\n" text += " found = 1;\n" text += " }\n" text += " i=i+1;\n" text += "}\n" #text += "if (found == 0) {\n" #text += " el[el.length] = new Option(\"%s\", \"%s\", 1,1);\n" #text += "}\n" text += "</SCRIPT>\n" elif element_type == "D": text = fidesc elif element_type == "R": try: co = compile(fidesc.replace("\r\n", "\n"), "<string>", "exec") ## Note this exec is safe WRT global variable because the ## Create_Modify_Interface has already been parsed by ## execfile within a protected environment. the_globals['text'] = '' exec co in the_globals text = the_globals['text'] except: msg = "Error in evaluating response element %s with globals %s" % (pprint.pformat(field), pprint.pformat(globals())) register_exception(req=None, alert_admin=True, prefix=msg) raise InvenioWebSubmitFunctionError(msg) else: text = "%s: unknown field type" % field t = t + "<small>%s</small>" % text # output our flag field t += '<input type="hidden" name="Create_Modify_Interface_DONE" value="DONE\n" />' # output some more text t = t + "<br /><br /><CENTER><small><INPUT type=\"button\" width=400 height=50 name=\"End\" value=\"END\" onClick=\"document.forms[0].step.value = 2;user_must_confirm_before_leaving_page = false;document.forms[0].submit();\"></small></CENTER></H4>" return t
gpl-2.0
429,905,731,939,165,250
46.682657
252
0.580019
false
3.946854
false
false
false
quonb/atom-generator
atom_generator/video.py
1
2028
import re class YouTube(object): def __init__(self, url=None): self._video_id = self._extract_id(url) def __call__(self, url=False): if url is None or url: self._video_id = self._extract_id(url) return self._video_id def _extract_id(self, url=None): """Extract youtube video ID Based on `youtube_dl` code """ if not url: return None YOUTUBE_URL = r"""^ (?: (?:https?://)? # http(s):// (optional) (?:(?:(?: (?:\w+\.)?youtube(?:-nocookie)?\.com/| tube\.majestyc\.net/| youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains (?:.*?\#/)? # handle anchor (#/) redirect urls (?: # the various things that can precede the ID: (?:(?:v|embed|e)/)| # v/ or embed/ or e/ (?: # or the v= param in all its forms (?: (?:watch|movie)(?:_popup)?(?:\.php)? )? # preceding watch(_popup|.php) or nothing (like /?v=xxxx) (?:\?|\#!?) # the params delimiter ? or # or #! (?:.*?&)? # any other preceding param (like /?s=tuff&v=xxxx) v= ) ))| youtu\.be/ # just youtu.be/xxxx ) )? # all until now is optional -> you can pass the naked ID ([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID (?(1).+)? # if we found the ID, everything can follow $""" video_id = re.match(YOUTUBE_URL, str(url), re.VERBOSE) return video_id and video_id.group(1) def thumbnail(self): return self._video_id and "http://i.ytimg.com/vi/%s/0.jpg" % self._video_id def video(self): return self._video_id and "http://www.youtube.com/watch?v=%s" % self._video_id
apache-2.0
-9,203,827,087,365,975,000
37.264151
97
0.446746
false
3.840909
false
false
false
nikolhm/Pokus
knownpaths.py
1
9583
import ctypes, sys from ctypes import windll, wintypes from uuid import UUID class GUID(ctypes.Structure): # [1] _fields_ = [ ("Data1", wintypes.DWORD), ("Data2", wintypes.WORD), ("Data3", wintypes.WORD), ("Data4", wintypes.BYTE * 8) ] def __init__(self, uuid_): ctypes.Structure.__init__(self) self.Data1, self.Data2, self.Data3, self.Data4[0], self.Data4[1], rest = uuid_.fields for i in range(2, 8): self.Data4[i] = rest>>(8 - i - 1)*8 & 0xff class FOLDERID: # [2] AccountPictures = UUID('{008ca0b1-55b4-4c56-b8a8-4de4b299d3be}') AdminTools = UUID('{724EF170-A42D-4FEF-9F26-B60E846FBA4F}') ApplicationShortcuts = UUID('{A3918781-E5F2-4890-B3D9-A7E54332328C}') CameraRoll = UUID('{AB5FB87B-7CE2-4F83-915D-550846C9537B}') CDBurning = UUID('{9E52AB10-F80D-49DF-ACB8-4330F5687855}') CommonAdminTools = UUID('{D0384E7D-BAC3-4797-8F14-CBA229B392B5}') CommonOEMLinks = UUID('{C1BAE2D0-10DF-4334-BEDD-7AA20B227A9D}') CommonPrograms = UUID('{0139D44E-6AFE-49F2-8690-3DAFCAE6FFB8}') CommonStartMenu = UUID('{A4115719-D62E-491D-AA7C-E74B8BE3B067}') CommonStartup = UUID('{82A5EA35-D9CD-47C5-9629-E15D2F714E6E}') CommonTemplates = UUID('{B94237E7-57AC-4347-9151-B08C6C32D1F7}') Contacts = UUID('{56784854-C6CB-462b-8169-88E350ACB882}') Cookies = UUID('{2B0F765D-C0E9-4171-908E-08A611B84FF6}') Desktop = UUID('{B4BFCC3A-DB2C-424C-B029-7FE99A87C641}') DeviceMetadataStore = UUID('{5CE4A5E9-E4EB-479D-B89F-130C02886155}') Documents = UUID('{FDD39AD0-238F-46AF-ADB4-6C85480369C7}') DocumentsLibrary = UUID('{7B0DB17D-9CD2-4A93-9733-46CC89022E7C}') Downloads = UUID('{374DE290-123F-4565-9164-39C4925E467B}') Favorites = UUID('{1777F761-68AD-4D8A-87BD-30B759FA33DD}') Fonts = UUID('{FD228CB7-AE11-4AE3-864C-16F3910AB8FE}') GameTasks = UUID('{054FAE61-4DD8-4787-80B6-090220C4B700}') History = UUID('{D9DC8A3B-B784-432E-A781-5A1130A75963}') ImplicitAppShortcuts = UUID('{BCB5256F-79F6-4CEE-B725-DC34E402FD46}') InternetCache = UUID('{352481E8-33BE-4251-BA85-6007CAEDCF9D}') Libraries = UUID('{1B3EA5DC-B587-4786-B4EF-BD1DC332AEAE}') Links = UUID('{bfb9d5e0-c6a9-404c-b2b2-ae6db6af4968}') LocalAppData = UUID('{F1B32785-6FBA-4FCF-9D55-7B8E7F157091}') LocalAppDataLow = UUID('{A520A1A4-1780-4FF6-BD18-167343C5AF16}') LocalizedResourcesDir = UUID('{2A00375E-224C-49DE-B8D1-440DF7EF3DDC}') Music = UUID('{4BD8D571-6D19-48D3-BE97-422220080E43}') MusicLibrary = UUID('{2112AB0A-C86A-4FFE-A368-0DE96E47012E}') NetHood = UUID('{C5ABBF53-E17F-4121-8900-86626FC2C973}') OriginalImages = UUID('{2C36C0AA-5812-4b87-BFD0-4CD0DFB19B39}') PhotoAlbums = UUID('{69D2CF90-FC33-4FB7-9A0C-EBB0F0FCB43C}') PicturesLibrary = UUID('{A990AE9F-A03B-4E80-94BC-9912D7504104}') Pictures = UUID('{33E28130-4E1E-4676-835A-98395C3BC3BB}') Playlists = UUID('{DE92C1C7-837F-4F69-A3BB-86E631204A23}') PrintHood = UUID('{9274BD8D-CFD1-41C3-B35E-B13F55A758F4}') Profile = UUID('{5E6C858F-0E22-4760-9AFE-EA3317B67173}') ProgramData = UUID('{62AB5D82-FDC1-4DC3-A9DD-070D1D495D97}') ProgramFiles = UUID('{905e63b6-c1bf-494e-b29c-65b732d3d21a}') ProgramFilesX64 = UUID('{6D809377-6AF0-444b-8957-A3773F02200E}') ProgramFilesX86 = UUID('{7C5A40EF-A0FB-4BFC-874A-C0F2E0B9FA8E}') ProgramFilesCommon = UUID('{F7F1ED05-9F6D-47A2-AAAE-29D317C6F066}') ProgramFilesCommonX64 = UUID('{6365D5A7-0F0D-45E5-87F6-0DA56B6A4F7D}') ProgramFilesCommonX86 = UUID('{DE974D24-D9C6-4D3E-BF91-F4455120B917}') Programs = UUID('{A77F5D77-2E2B-44C3-A6A2-ABA601054A51}') Public = UUID('{DFDF76A2-C82A-4D63-906A-5644AC457385}') PublicDesktop = UUID('{C4AA340D-F20F-4863-AFEF-F87EF2E6BA25}') PublicDocuments = UUID('{ED4824AF-DCE4-45A8-81E2-FC7965083634}') PublicDownloads = UUID('{3D644C9B-1FB8-4f30-9B45-F670235F79C0}') PublicGameTasks = UUID('{DEBF2536-E1A8-4c59-B6A2-414586476AEA}') PublicLibraries = UUID('{48DAF80B-E6CF-4F4E-B800-0E69D84EE384}') PublicMusic = UUID('{3214FAB5-9757-4298-BB61-92A9DEAA44FF}') PublicPictures = UUID('{B6EBFB86-6907-413C-9AF7-4FC2ABF07CC5}') PublicRingtones = UUID('{E555AB60-153B-4D17-9F04-A5FE99FC15EC}') PublicUserTiles = UUID('{0482af6c-08f1-4c34-8c90-e17ec98b1e17}') PublicVideos = UUID('{2400183A-6185-49FB-A2D8-4A392A602BA3}') QuickLaunch = UUID('{52a4f021-7b75-48a9-9f6b-4b87a210bc8f}') Recent = UUID('{AE50C081-EBD2-438A-8655-8A092E34987A}') RecordedTVLibrary = UUID('{1A6FDBA2-F42D-4358-A798-B74D745926C5}') ResourceDir = UUID('{8AD10C31-2ADB-4296-A8F7-E4701232C972}') Ringtones = UUID('{C870044B-F49E-4126-A9C3-B52A1FF411E8}') RoamingAppData = UUID('{3EB685DB-65F9-4CF6-A03A-E3EF65729F3D}') RoamedTileImages = UUID('{AAA8D5A5-F1D6-4259-BAA8-78E7EF60835E}') RoamingTiles = UUID('{00BCFC5A-ED94-4e48-96A1-3F6217F21990}') SampleMusic = UUID('{B250C668-F57D-4EE1-A63C-290EE7D1AA1F}') SamplePictures = UUID('{C4900540-2379-4C75-844B-64E6FAF8716B}') SamplePlaylists = UUID('{15CA69B3-30EE-49C1-ACE1-6B5EC372AFB5}') SampleVideos = UUID('{859EAD94-2E85-48AD-A71A-0969CB56A6CD}') SavedGames = UUID('{4C5C32FF-BB9D-43b0-B5B4-2D72E54EAAA4}') SavedSearches = UUID('{7d1d3a04-debb-4115-95cf-2f29da2920da}') Screenshots = UUID('{b7bede81-df94-4682-a7d8-57a52620b86f}') SearchHistory = UUID('{0D4C3DB6-03A3-462F-A0E6-08924C41B5D4}') SearchTemplates = UUID('{7E636BFE-DFA9-4D5E-B456-D7B39851D8A9}') SendTo = UUID('{8983036C-27C0-404B-8F08-102D10DCFD74}') SidebarDefaultParts = UUID('{7B396E54-9EC5-4300-BE0A-2482EBAE1A26}') SidebarParts = UUID('{A75D362E-50FC-4fb7-AC2C-A8BEAA314493}') SkyDrive = UUID('{A52BBA46-E9E1-435f-B3D9-28DAA648C0F6}') SkyDriveCameraRoll = UUID('{767E6811-49CB-4273-87C2-20F355E1085B}') SkyDriveDocuments = UUID('{24D89E24-2F19-4534-9DDE-6A6671FBB8FE}') SkyDrivePictures = UUID('{339719B5-8C47-4894-94C2-D8F77ADD44A6}') StartMenu = UUID('{625B53C3-AB48-4EC1-BA1F-A1EF4146FC19}') Startup = UUID('{B97D20BB-F46A-4C97-BA10-5E3608430854}') System = UUID('{1AC14E77-02E7-4E5D-B744-2EB1AE5198B7}') SystemX86 = UUID('{D65231B0-B2F1-4857-A4CE-A8E7C6EA7D27}') Templates = UUID('{A63293E8-664E-48DB-A079-DF759E0509F7}') UserPinned = UUID('{9E3995AB-1F9C-4F13-B827-48B24B6C7174}') UserProfiles = UUID('{0762D272-C50A-4BB0-A382-697DCD729B80}') UserProgramFiles = UUID('{5CD7AEE2-2219-4A67-B85D-6C9CE15660CB}') UserProgramFilesCommon = UUID('{BCBD3057-CA5C-4622-B42D-BC56DB0AE516}') Videos = UUID('{18989B1D-99B5-455B-841C-AB7C74E4DDFC}') VideosLibrary = UUID('{491E922F-5643-4AF4-A7EB-4E7A138D8174}') Windows = UUID('{F38BF404-1D43-42F2-9305-67DE0B28FC23}') class UserHandle: # [3] current = wintypes.HANDLE(0) common = wintypes.HANDLE(-1) _CoTaskMemFree = windll.ole32.CoTaskMemFree # [4] _CoTaskMemFree.restype= None _CoTaskMemFree.argtypes = [ctypes.c_void_p] _SHGetKnownFolderPath = windll.shell32.SHGetKnownFolderPath # [5] [3] _SHGetKnownFolderPath.argtypes = [ ctypes.POINTER(GUID), wintypes.DWORD, wintypes.HANDLE, ctypes.POINTER(ctypes.c_wchar_p) ] class PathNotFoundException(Exception): pass def get_path(folderid, user_handle=UserHandle.common): fid = GUID(folderid) pPath = ctypes.c_wchar_p() S_OK = 0 if _SHGetKnownFolderPath(ctypes.byref(fid), 0, user_handle, ctypes.byref(pPath)) != S_OK: raise PathNotFoundException() path = pPath.value _CoTaskMemFree(pPath) return path if __name__ == '__main__': if len(sys.argv) < 2 or sys.argv[1] in ['-?', '/?']: print('python knownpaths.py FOLDERID {current|common}') sys.exit(0) try: folderid = getattr(FOLDERID, sys.argv[1]) except AttributeError: print('Unknown folder id "%s"' % sys.argv[1], file=sys.stderr) sys.exit(1) try: if len(sys.argv) == 2: print(get_path(folderid)) else: print(get_path(folderid, getattr(UserHandle, sys.argv[2]))) except PathNotFoundException: print('Folder not found "%s"' % ' '.join(sys.argv[1:]), file=sys.stderr) sys.exit(1) # [1] http://msdn.microsoft.com/en-us/library/windows/desktop/aa373931.aspx # [2] http://msdn.microsoft.com/en-us/library/windows/desktop/dd378457.aspx # [3] http://msdn.microsoft.com/en-us/library/windows/desktop/bb762188.aspx # [4] http://msdn.microsoft.com/en-us/library/windows/desktop/ms680722.aspx # [5] http://www.themacaque.com/?p=954
mit
-3,393,526,364,773,057,500
57.432927
93
0.627883
false
2.45844
false
false
false
tanghaibao/jcvi
jcvi/projects/vanilla.py
1
11915
#!/usr/bin/env python # -*- coding: UTF-8 -*- """ Plotting scripts for the vanilla genome paper. """ import logging import sys from jcvi.apps.base import ActionDispatcher, OptionParser from jcvi.compara.synteny import AnchorFile, check_beds from jcvi.formats.base import get_number from jcvi.formats.bed import Bed from jcvi.graphics.base import normalize_axes, panel_labels, plt, savefig from jcvi.graphics.glyph import TextCircle from jcvi.graphics.synteny import Synteny, draw_gene_legend def main(): actions = ( # Chromosome painting since WGD ("ancestral", "paint 14 chromosomes following alpha WGD (requires data)"), # main figures in text ("ploidy", "plot vanilla synteny (requires data)"), # Composite phylogeny - tree and ks ("phylogeny", "create a composite figure with tree and ks"), ("tree", "create a separate figure with tree"), ("ks", "create a separate figure with ks"), # Composite synteny - wgd and microsynteny ("synteny", "create a composite figure with wgd and microsynteny"), ("wgd", "create separate figures with wgd"), ("microsynteny", "create separate figures with microsynteny"), ) p = ActionDispatcher(actions) p.dispatch(globals()) def phylogeny(args): """ %prog phylogeny treefile ks.layout Create a composite figure with (A) tree and (B) ks. """ from jcvi.graphics.tree import parse_tree, LeafInfoFile, WGDInfoFile, draw_tree p = OptionParser(phylogeny.__doc__) opts, args, iopts = p.set_image_options(args, figsize="10x12") (datafile, layoutfile) = args logging.debug("Load tree file `{0}`".format(datafile)) t, hpd = parse_tree(datafile) fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) ax1 = fig.add_axes([0, 0.4, 1, 0.6]) ax2 = fig.add_axes([0.12, 0.065, 0.8, 0.3]) margin, rmargin = 0.1, 0.2 # Left and right margin leafinfo = LeafInfoFile("leafinfo.csv").cache wgdinfo = WGDInfoFile("wgdinfo.csv").cache outgroup = "ginkgo" # Panel A draw_tree( ax1, t, hpd=hpd, margin=margin, rmargin=rmargin, supportcolor=None, internal=False, outgroup=outgroup, reroot=False, leafinfo=leafinfo, wgdinfo=wgdinfo, geoscale=True, ) from jcvi.apps.ks import Layout, KsPlot, KsFile # Panel B ks_min = 0.0 ks_max = 3.0 bins = 60 fill = False layout = Layout(layoutfile) print(layout, file=sys.stderr) kp = KsPlot(ax2, ks_max, bins, legendp="upper right") for lo in layout: data = KsFile(lo.ksfile) data = [x.ng_ks for x in data] data = [x for x in data if ks_min <= x <= ks_max] kp.add_data( data, lo.components, label=lo.label, color=lo.color, marker=lo.marker, fill=fill, fitted=False, kde=True, ) kp.draw(filename=None) normalize_axes([root, ax1]) labels = ((0.05, 0.95, "A"), (0.05, 0.4, "B")) panel_labels(root, labels) image_name = "phylogeny.pdf" savefig(image_name, dpi=iopts.dpi, iopts=iopts) def tree(args): """ %prog tree treefile Create a tree figure. """ from jcvi.graphics.tree import parse_tree, LeafInfoFile, WGDInfoFile, draw_tree p = OptionParser(tree.__doc__) opts, args, iopts = p.set_image_options(args, figsize="10x8") (datafile,) = args logging.debug("Load tree file `{0}`".format(datafile)) t, hpd = parse_tree(datafile) fig = plt.figure(1, (iopts.w, iopts.h)) ax1 = fig.add_axes([0, 0, 1, 1]) margin, rmargin = 0.1, 0.2 # Left and right margin leafinfo = LeafInfoFile("leafinfo.csv").cache wgdinfo = WGDInfoFile("wgdinfo.csv").cache outgroup = "ginkgo" # Panel A draw_tree( ax1, t, hpd=hpd, margin=margin, rmargin=rmargin, supportcolor=None, internal=False, outgroup=outgroup, reroot=False, leafinfo=leafinfo, wgdinfo=wgdinfo, geoscale=True, ) normalize_axes([ax1]) image_name = "tree.pdf" savefig(image_name, dpi=iopts.dpi, iopts=iopts) def ks(args): """ %prog ks ks.layout Create a ks figure. """ p = OptionParser(ks.__doc__) opts, args, iopts = p.set_image_options(args, figsize="10x4") (layoutfile,) = args from jcvi.apps.ks import Layout, KsPlot, KsFile fig = plt.figure(1, (iopts.w, iopts.h)) ax2 = fig.add_axes([0.12, 0.12, 0.8, 0.8]) # Panel B ks_min = 0.0 ks_max = 3.0 bins = 60 fill = False layout = Layout(layoutfile) print(layout, file=sys.stderr) kp = KsPlot(ax2, ks_max, bins, legendp="upper right") for lo in layout: data = KsFile(lo.ksfile) data = [x.ng_ks for x in data] data = [x for x in data if ks_min <= x <= ks_max] kp.add_data( data, lo.components, label=lo.label, color=lo.color, marker=lo.marker, fill=fill, fitted=False, kde=True, ) kp.draw(filename=None) image_name = "ks.pdf" savefig(image_name, dpi=iopts.dpi, iopts=iopts) def synteny(args): """ %prog synteny vplanifoliaA_blocks.bed vplanifoliaA.sizes \ b1.blocks all.bed b1.layout Create a composite figure with (A) wgd and (B) microsynteny. """ from jcvi.graphics.chromosome import draw_chromosomes p = OptionParser(synteny.__doc__) opts, args, iopts = p.set_image_options(args, figsize="12x12") (bedfile, sizesfile, blocksfile, allbedfile, blockslayout) = args fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) ax1 = fig.add_axes([0, 0.5, 1, 0.5]) ax2 = fig.add_axes([0.02, 0, 0.98, 0.5]) # Panel A title = r"Genome duplication $\alpha^{O}$ event in $\textit{Vanilla}$" draw_chromosomes( ax1, bedfile, sizes=sizesfile, iopts=iopts, mergedist=200000, winsize=50000, imagemap=False, gauge=True, legend=False, title=title, ) # Panel B draw_ploidy(fig, ax2, blocksfile, allbedfile, blockslayout) normalize_axes([root, ax1, ax2]) labels = ((0.05, 0.95, "A"), (0.05, 0.5, "B")) panel_labels(root, labels) image_name = "synteny.pdf" savefig(image_name, dpi=iopts.dpi, iopts=iopts) def wgd(args): """ %prog wgd vplanifoliaA_blocks.bed vplanifoliaA.sizes Create a wgd figure. """ from jcvi.graphics.chromosome import draw_chromosomes p = OptionParser(synteny.__doc__) opts, args, iopts = p.set_image_options(args, figsize="8x5") (bedfile, sizesfile) = args fig = plt.figure(1, (iopts.w, iopts.h)) ax1 = fig.add_axes([0, 0, 1, 1]) title = r"Genome duplication $\alpha^{O}$ event in $\textit{Vanilla}$" draw_chromosomes( ax1, bedfile, sizes=sizesfile, iopts=iopts, mergedist=200000, winsize=50000, imagemap=False, gauge=True, legend=False, title=title, ) normalize_axes([ax1]) image_name = "wgd.pdf" savefig(image_name, dpi=iopts.dpi, iopts=iopts) def microsynteny(args): """ %prog microsynteny b1.blocks all.bed b1.layout Create a microsynteny figure. """ p = OptionParser(synteny.__doc__) opts, args, iopts = p.set_image_options(args, figsize="12x6") (blocksfile, allbedfile, blockslayout) = args fig = plt.figure(1, (iopts.w, iopts.h)) ax2 = fig.add_axes([0, 0, 1, 1]) draw_ploidy(fig, ax2, blocksfile, allbedfile, blockslayout) normalize_axes([ax2]) image_name = "microsynteny.pdf" savefig(image_name, dpi=iopts.dpi, iopts=iopts) def ancestral(args): """ %prog ancestral vplanifoliaA.vplanifoliaA.anchors > vplanifoliaA_blocks.bed Paint 14 chromosomes following alpha WGD. """ p = OptionParser(ancestral.__doc__) p.set_beds() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) (anchorsfile,) = args qbed, sbed, qorder, sorder, is_self = check_beds(anchorsfile, p, opts) # We focus on the following chromosome pairs target_pairs = { (1, 1), (1, 6), (1, 8), (1, 13), (2, 4), (3, 12), (3, 14), (5, 6), (5, 8), (7, 9), (7, 11), (9, 10), (10, 11), } def get_target(achr, bchr): if "chr" not in achr and "chr" not in bchr: return None achr, bchr = get_number(achr), get_number(bchr) if achr > bchr: achr, bchr = bchr, achr if (achr, bchr) in target_pairs: return achr, bchr return None def build_bedline(astart, aend, target_pair): # target_name = "{:02d}-{:02d}".format(*target_pair) target_name = [str(x) for x in target_pair if x in (1, 2, 3, 5, 7, 10)][0] return "\t".join( str(x) for x in (astart.seqid, astart.start, aend.end, target_name) ) # Iterate through the blocks, store any regions that has hits to one of the # target_pairs ac = AnchorFile(anchorsfile) blocks = ac.blocks outbed = Bed() for i, block in enumerate(blocks): a, b, scores = zip(*block) a = [qorder[x] for x in a] b = [sorder[x] for x in b] astart, aend = min(a)[1], max(a)[1] bstart, bend = min(b)[1], max(b)[1] # Now convert to BED lines with new accn achr, bchr = astart.seqid, bstart.seqid target = get_target(achr, bchr) if target is None: continue outbed.add(build_bedline(astart, aend, target)) outbed.add(build_bedline(bstart, bend, target)) outbed.print_to_file(sorted=True) def ploidy(args): """ %prog ploidy b1.blocks all.bed b1.layout Build a figure that illustrates the WGD history of the vanilla genome. """ p = OptionParser(ploidy.__doc__) opts, args, iopts = p.set_image_options(args, figsize="12x6") if len(args) != 3: sys.exit(not p.print_help()) blocksfile, bedfile, blockslayout = args fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) draw_ploidy(fig, root, blocksfile, bedfile, blockslayout) root.set_xlim(0, 1) root.set_ylim(0, 1) root.set_axis_off() pf = "vanilla-karyotype" image_name = pf + "." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts) def draw_ploidy(fig, root, blocksfile, bedfile, blockslayout): switchidsfile = "switch.ids" Synteny( fig, root, blocksfile, bedfile, blockslayout, scalebar=True, switch=switchidsfile, ) # Legend showing the orientation of the genes draw_gene_legend(root, 0.2, 0.3, 0.53) # WGD labels radius = 0.025 tau_color = "#bebada" alpha_color = "#bc80bd" label_color = "k" pad = 0.05 for y in (0.74 + 1.5 * pad, 0.26 - 1.5 * pad): TextCircle( root, 0.25, y, r"$\alpha^{O}$", radius=radius, fc=alpha_color, color=label_color, fontweight="bold", ) TextCircle( root, 0.75, y, r"$\alpha^{O}$", radius=radius, fc=alpha_color, color=label_color, fontweight="bold", ) for y in (0.74 + 3 * pad, 0.26 - 3 * pad): TextCircle( root, 0.5, y, r"$\tau$", radius=radius, fc=tau_color, color=label_color ) if __name__ == "__main__": main()
bsd-2-clause
6,120,270,252,504,517,000
25.07221
83
0.573059
false
3.071668
false
false
false
cwacek/python-jsonschema-objects
python_jsonschema_objects/wrapper_types.py
1
11522
import collections import logging import six from python_jsonschema_objects import util from python_jsonschema_objects.validators import registry, ValidationError from python_jsonschema_objects.util import lazy_format as fmt logger = logging.getLogger(__name__) class ArrayWrapper(collections.abc.MutableSequence): """A wrapper for array-like structures. This implements all of the array like behavior that one would want, with a dirty-tracking mechanism to avoid constant validation costs. """ @property def strict(self): return getattr(self, "_strict_", False) def __len__(self): return len(self.data) def mark_or_revalidate(self): if self.strict: self.validate() else: self._dirty = True def __delitem__(self, index): self.data.pop(index) self.mark_or_revalidate() def insert(self, index, value): self.data.insert(index, value) self.mark_or_revalidate() def __setitem__(self, index, value): self.data[index] = value self.mark_or_revalidate() def __getitem__(self, idx): return self.typed_elems[idx] def __eq__(self, other): if isinstance(other, ArrayWrapper): return self.for_json() == other.for_json() else: return self.for_json() == other def __init__(self, ary): """Initialize a wrapper for the array Args: ary: (list-like, or ArrayWrapper) """ """ Marks whether or not the underlying array has been modified """ self._dirty = True """ Holds a typed copy of the array """ self._typed = None if isinstance(ary, (list, tuple, collections.abc.Sequence)): self.data = ary else: raise TypeError("Invalid value given to array validator: {0}".format(ary)) logger.debug(fmt("Initializing ArrayWrapper {} with {}", self, ary)) @property def typed_elems(self): logger.debug(fmt("Accessing typed_elems of ArrayWrapper {} ", self)) if self._typed is None or self._dirty is True: self.validate() return self._typed def __repr__(self): return "<%s=%s>" % (self.__class__.__name__, str(self.data)) @classmethod def from_json(cls, jsonmsg): import json msg = json.loads(jsonmsg) obj = cls(msg) obj.validate() return obj def serialize(self): enc = util.ProtocolJSONEncoder() return enc.encode(self.typed_elems) def for_json(self): from python_jsonschema_objects import classbuilder out = [] for item in self.typed_elems: if isinstance( item, (classbuilder.ProtocolBase, classbuilder.LiteralValue, ArrayWrapper), ): out.append(item.for_json()) else: out.append(item) return out def validate(self): if self.strict or self._dirty: self.validate_items() self.validate_length() self.validate_uniqueness() return True def validate_uniqueness(self): if getattr(self, "uniqueItems", False) is True: testset = set(repr(item) for item in self.data) if len(testset) != len(self.data): raise ValidationError( "{0} has duplicate elements, but uniqueness required".format( self.data ) ) def validate_length(self): if getattr(self, "minItems", None) is not None: if len(self.data) < self.minItems: raise ValidationError( "{1} has too few elements. Wanted {0}.".format( self.minItems, self.data ) ) if getattr(self, "maxItems", None) is not None: if len(self.data) > self.maxItems: raise ValidationError( "{1} has too many elements. Wanted {0}.".format( self.maxItems, self.data ) ) def validate_items(self): """Validates the items in the backing array, including performing type validation. Sets the _typed property and clears the dirty flag as a side effect Returns: The typed array """ logger.debug(fmt("Validating {}", self)) from python_jsonschema_objects import classbuilder if self.__itemtype__ is None: return type_checks = self.__itemtype__ if not isinstance(type_checks, (tuple, list)): # we were given items = {'type': 'blah'} ; thus ensure the type for all data. type_checks = [type_checks] * len(self.data) elif len(type_checks) > len(self.data): raise ValidationError( "{1} does not have sufficient elements to validate against {0}".format( self.__itemtype__, self.data ) ) typed_elems = [] for elem, typ in zip(self.data, type_checks): if isinstance(typ, dict): for param, paramval in six.iteritems(typ): validator = registry(param) if validator is not None: validator(paramval, elem, typ) typed_elems.append(elem) elif util.safe_issubclass(typ, classbuilder.LiteralValue): val = typ(elem) val.validate() typed_elems.append(val) elif util.safe_issubclass(typ, classbuilder.ProtocolBase): if not isinstance(elem, typ): try: if isinstance( elem, (six.string_types, six.integer_types, float) ): val = typ(elem) else: val = typ(**util.coerce_for_expansion(elem)) except TypeError as e: raise ValidationError( "'{0}' is not a valid value for '{1}': {2}".format( elem, typ, e ) ) else: val = elem val.validate() typed_elems.append(val) elif util.safe_issubclass(typ, ArrayWrapper): val = typ(elem) val.validate() typed_elems.append(val) elif isinstance(typ, (classbuilder.TypeProxy, classbuilder.TypeRef)): try: if isinstance(elem, (six.string_types, six.integer_types, float)): val = typ(elem) else: val = typ(**util.coerce_for_expansion(elem)) except TypeError as e: raise ValidationError( "'{0}' is not a valid value for '{1}': {2}".format(elem, typ, e) ) else: val.validate() typed_elems.append(val) self._dirty = False self._typed = typed_elems return typed_elems @staticmethod def create(name, item_constraint=None, **addl_constraints): """Create an array validator based on the passed in constraints. If item_constraint is a tuple, it is assumed that tuple validation is being performed. If it is a class or dictionary, list validation will be performed. Classes are assumed to be subclasses of ProtocolBase, while dictionaries are expected to be basic types ('string', 'number', ...). addl_constraints is expected to be key-value pairs of any of the other constraints permitted by JSON Schema v4. """ logger.debug( fmt( "Constructing ArrayValidator with {} and {}", item_constraint, addl_constraints, ) ) from python_jsonschema_objects import classbuilder klassbuilder = addl_constraints.pop( "classbuilder", None ) # type: python_jsonschema_objects.classbuilder.ClassBuilder props = {} if item_constraint is not None: if isinstance(item_constraint, (tuple, list)): for i, elem in enumerate(item_constraint): isdict = isinstance(elem, (dict,)) isklass = isinstance(elem, type) and util.safe_issubclass( elem, (classbuilder.ProtocolBase, classbuilder.LiteralValue) ) if not any([isdict, isklass]): raise TypeError( "Item constraint (position {0}) is not a schema".format(i) ) elif isinstance( item_constraint, (classbuilder.TypeProxy, classbuilder.TypeRef) ): pass elif util.safe_issubclass(item_constraint, ArrayWrapper): pass else: isdict = isinstance(item_constraint, (dict,)) isklass = isinstance(item_constraint, type) and util.safe_issubclass( item_constraint, (classbuilder.ProtocolBase, classbuilder.LiteralValue), ) if not any([isdict, isklass]): raise TypeError("Item constraint is not a schema") if isdict and "$ref" in item_constraint: if klassbuilder is None: raise TypeError( "Cannot resolve {0} without classbuilder".format( item_constraint["$ref"] ) ) item_constraint = klassbuilder.resolve_type( item_constraint["$ref"], name ) elif isdict and item_constraint.get("type") == "array": # We need to create a sub-array validator. item_constraint = ArrayWrapper.create( name + "#sub", item_constraint=item_constraint["items"], addl_constraints=item_constraint, ) elif isdict and "oneOf" in item_constraint: # We need to create a TypeProxy validator uri = "{0}_{1}".format(name, "<anonymous_list_type>") type_array = klassbuilder.construct_objects( item_constraint["oneOf"], uri ) item_constraint = classbuilder.TypeProxy(type_array) elif isdict and item_constraint.get("type") == "object": """ We need to create a ProtocolBase object for this anonymous definition""" uri = "{0}_{1}".format(name, "<anonymous_list_type>") item_constraint = klassbuilder.construct(uri, item_constraint) props["__itemtype__"] = item_constraint strict = addl_constraints.pop("strict", False) props["_strict_"] = strict props.update(addl_constraints) validator = type(str(name), (ArrayWrapper,), props) return validator
mit
6,283,899,650,825,311,000
34.343558
96
0.518486
false
4.837112
false
false
false
franciscogmm/FinancialAnalysisUsingNLPandMachineLearning
SentimentAnalysis - Polarity - Domain Specific Lexicon.py
1
2667
import csv import pandas as pd import nltk from nltk import FreqDist,ngrams from nltk.corpus import stopwords import string from os import listdir from os.path import isfile, join def ngram_list(file,n): f = open(file,'rU') raw = f.read() raw = raw.replace('\n',' ') #raw = raw.decode('utf8') #raw = raw.decode("utf-8", 'ignore') ngramz = ngrams(raw.split(),n) return ngramz def IsNotNull(value): return value is not None and len(value) > 0 mypath = '/Users/francis/Documents/FORDHAM/2nd Term/Text Analytics/' #path where files are located onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))] dict_p = [] f = open('positive.txt', 'r') for line in f: t = line.strip().lower() if IsNotNull(t): dict_p.append(t) f.close dict_n = [] f = open('negative.txt', 'r') for line in f: t = line.strip().lower() if IsNotNull(t): dict_n.append(t) f.close totallist = [] rowlist = [] qa = 0 qb = 0 counti = 0 for i in onlyfiles: if i.endswith('.txt'): # get code j = i.replace('.txt','') # string filename file = mypath + str(i) print i f = open(file,'rU') raw = f.read() #print type(raw) raw = [w.translate(None, string.punctuation) for w in raw] raw = ''.join(raw) raw = raw.replace('\n','') raw = raw.replace(' ','') #print raw qa = 0 qb = 0 for word in dict_p: if word in raw: qa += 1 for word in dict_n: if word in raw: qb += 1 qc = qa - qb if qc > 0: sentiment = 'POSITIVE' elif qc == 0: sentiment = 'NEUTRAL' else: sentiment = 'NEGATIVE' rowlist.append(i) rowlist.append(qa) rowlist.append(qb) rowlist.append(qc) rowlist.append(sentiment) print counti counti += 1 totallist.append(rowlist) rowlist = [] else: pass labels = ('file', 'P', 'N', 'NET', 'SENTIMENT') df = pd.DataFrame.from_records(totallist, columns = labels) df.to_csv('oursentiment.csv', index = False) #print dict_p # allbigrams.append(ngram_list(file,2)) # print i + ' BIGRAM - OK' # alltrigrams.append(ngram_list(file,3)) # print i + ' TRIGRAM - OK' # allfourgrams.append(ngram_list(file,4)) # print i + ' FOURGRAM - OK' # allfivegrams.append(ngram_list(file,5)) # print i + ' TRIGRAM - OK' # allsixgrams.append(ngram_list(file,6)) # print i + ' SIXGRAM - OK' # allsevengrams.append(ngram_list(file,7)) # print i + ' SEVENGRAM - OK' # alleightgrams.append(ngram_list(file,8)) # print i + ' EIGHTGRAM - OK'
mit
7,485,374,827,431,947,000
21.420168
98
0.578178
false
2.886364
false
false
false
sassoftware/mint
mint/django_rest/rbuilder/querysets/views/v1/views.py
1
8001
#!/usr/bin/python # # Copyright (c) SAS Institute Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from django import http from django.core.urlresolvers import reverse from django.http import HttpResponseRedirect from mint.django_rest.deco import return_xml, requires, access, xObjRequires from mint.django_rest.rbuilder import service # from mint.django_rest.rbuilder.querysets import models from mint.django_rest.rbuilder.rbac.rbacauth import rbac, manual_rbac from mint.django_rest.rbuilder.errors import PermissionDenied from mint.django_rest.rbuilder.rbac.manager.rbacmanager import \ READSET, MODSETDEF def rbac_can_read_queryset(view, request, query_set_id, *args, **kwargs): obj = view.mgr.getQuerySet(query_set_id) if obj.is_public: # existance of querysets like "All Systems", etc, are not stealthed. # but may vary in size depending on the user accessing them's permissions # (ReadMember) on their contents. return True user = view.mgr.getSessionInfo().user[0] ok = view.mgr.userHasRbacPermission(user, obj, READSET) return ok def rbac_can_write_queryset(view, request, query_set_id, *args, **kwargs): obj = view.mgr.getQuerySet(query_set_id) user = view.mgr.getSessionInfo().user[0] return view.mgr.userHasRbacPermission(user, obj, MODSETDEF) class BaseQuerySetService(service.BaseService): pass class QuerySetsService(BaseQuerySetService): # rbac is handled semimanually for this function -- show only # querysets that we have permission to see # but don't use full rbac code, because that is implemented using querysets # and is too meta. @access.authenticated @return_xml def rest_GET(self, request): user = request._authUser querysets = self.mgr.getQuerySets() return self.mgr.filterRbacQuerysets(user, querysets, request) # not used above, but still needed by load_from_href and other # functions def get(self): return self.mgr.getQuerySets() @access.admin @requires('query_set', load=True, save=True) @return_xml def rest_POST(self, request, query_set): return self.mgr.addQuerySet(query_set, request._authUser) class QuerySetService(BaseQuerySetService): # rbac is handled semimanually for this function -- show only # querysets that we have permission to see # but don't use full rbac code, because that is implemented using querysets # and is too meta. @rbac(manual_rbac) @return_xml def rest_GET(self, request, query_set_id): user = request._authUser queryset = self.mgr.getQuerySet(query_set_id) if not queryset.is_public and not self.mgr.userHasRbacPermission( user, queryset, READSET, request ): raise PermissionDenied() return queryset # not used above, but still needed by load_from_href and other # functions def get(self, query_set_id): return self.mgr.getQuerySet(query_set_id) @access.admin @requires('query_set') @return_xml def rest_PUT(self, request, query_set_id, query_set): oldQuerySet = self.mgr.getQuerySet(query_set_id) if oldQuerySet.pk != query_set.pk: raise PermissionDenied(msg='Attempting to reassign ID') return self.mgr.updateQuerySet(query_set, request._authUser) @access.admin def rest_DELETE(self, request, query_set_id): querySet = self.mgr.getQuerySet(query_set_id) self.mgr.deleteQuerySet(querySet) response = http.HttpResponse(status=204) return response class QuerySetAllResultService(BaseQuerySetService): @access.authenticated @return_xml def rest_GET(self, request, query_set_id): return self.mgr.getQuerySetAllResult(query_set_id, for_user=request._authUser) class QuerySetUniverseResultService(BaseQuerySetService): '''the parent queryset of all objects of a given type''' @access.authenticated @return_xml def rest_GET(self, request, query_set_id): self.mgr.getQuerySetUniverseSet(query_set_id) url = reverse('QuerySetAllResult', args=[query_set_id]) return HttpResponseRedirect(url) class QuerySetChosenResultService(BaseQuerySetService): @access.authenticated @return_xml def rest_GET(self, request, query_set_id): return self.mgr.getQuerySetChosenResult(query_set_id, for_user=request._authUser) @rbac(rbac_can_write_queryset) # TODO: source fromc onstant somewhere @requires(['systems', 'users', 'images', 'targets', 'project_branch_stages', 'projects', 'grants', 'roles']) @return_xml def rest_PUT(self, request, query_set_id, *args, **kwargs): resources = kwargs.items()[0][1] return self.mgr.addQuerySetChosen(query_set_id, resources, request._authUser) @rbac(rbac_can_write_queryset) # TODO: source fromc onstant somewhere @requires(['system', 'user', 'image', 'target', 'project_branch_stage', 'project_branch', 'project', 'grant', 'role']) @return_xml def rest_POST(self, request, query_set_id, *args, **kwargs): resource = kwargs.items()[0][1] self.mgr.updateQuerySetChosen(query_set_id, resource, request._authUser) return resource @rbac(rbac_can_write_queryset) # TODO: source fromc onstant somewhere @requires(['system', 'user', 'image', 'target', 'project_branch_stage', 'project_branch', 'project', 'grant', 'role']) @return_xml def rest_DELETE(self, request, query_set_id, *args, **kwargs): resource = kwargs.items()[0][1] return self.mgr.deleteQuerySetChosen(query_set_id, resource, request._authUser) class QuerySetFilteredResultService(BaseQuerySetService): @access.authenticated @return_xml def rest_GET(self, request, query_set_id): return self.mgr.getQuerySetFilteredResult(query_set_id, for_user=request._authUser) class QuerySetChildResultService(BaseQuerySetService): @access.authenticated @return_xml def rest_GET(self, request, query_set_id): if rbac_can_read_queryset(self, request, query_set_id): return self.mgr.getQuerySetChildResult(query_set_id) else: return self.mgr.getQuerySetChildResult(query_set_id, for_user=request._authUser) # this is not expected to be our final API for removing child members # but serves as a temporary one in case someone needs it. Deleting # the queryset is not an option to clear it out because associated # grants would be purged. @rbac(rbac_can_write_queryset) @requires('query_set') @return_xml def rest_DELETE(self, request, query_set_id, query_set): return self.mgr.deleteQuerySetChild(query_set_id, query_set, for_user=request._authUser) class QuerySetJobsService(BaseQuerySetService): # no way to list running jobs at the moment # since all jobs run immediately @rbac(rbac_can_read_queryset) @xObjRequires('job') def rest_POST(self, request, query_set_id, job): '''launch a job on this queryset''' queryset = self.mgr.getQuerySet(query_set_id) self.mgr.scheduleQuerySetJobAction( queryset, job ) return http.HttpResponse(status=200) class QuerySetFilterDescriptorService(BaseQuerySetService): # @access.authenticated @return_xml def rest_GET(self, request, query_set_id=None): return self.mgr.getQuerySetFilterDescriptor(query_set_id)
apache-2.0
-5,749,486,631,042,150,000
36.56338
122
0.699038
false
3.678621
false
false
false
Anonymike/pasta-bot
plugins/google_broken.py
1
3457
import random from util import hook, http, text, database, web import re def api_get(kind, query): """Use the RESTful Google Search API""" url = 'http://ajax.googleapis.com/ajax/services/search/%s?' \ 'v=1.0&safe=off' return http.get_json(url % kind, q=query) @hook.command('search') @hook.command('g') @hook.command def google(inp,db=None,chan=None): """google <query> -- Returns first google search result for <query>.""" trimlength = database.get(db,'channels','trimlength','chan',chan) if not trimlength: trimlength = 9999 parsed = api_get('web', inp) if not 200 <= parsed['responseStatus'] < 300: raise IOError('error searching for pages: {}: {}'.format(parsed['responseStatus'], '')) if not parsed['responseData']['results']: return 'No results found.' result = parsed['responseData']['results'][0] title = http.unescape(result['titleNoFormatting']) content = http.unescape(result['content']) if not content: content = "No description available." else: content = http.html.fromstring(content.replace('\n', '')).text_content() return u'{} -- \x02{}\x02: "{}"'.format(result['unescapedUrl'], title, content) # @hook.command('image') @hook.command('gis') @hook.command('gi') @hook.command('image') @hook.command def googleimage(inp): """gis <query> -- Returns first Google Image result for <query>.""" parsed = api_get('images', inp) if not 200 <= parsed['responseStatus'] < 300: raise IOError('error searching for images: {}: {}'.format(parsed['responseStatus'], '')) if not parsed['responseData']['results']: return 'no images found' return random.choice(parsed['responseData']['results'][:10])['unescapedUrl'] @hook.command def gcalc(inp): "gcalc <term> -- Calculate <term> with Google Calc." soup = http.get_soup('http://www.google.com/search', q=inp) result = soup.find('span', {'class': 'cwcot'}) formula = soup.find('span', {'class': 'cwclet'}) if not result: return "Could not calculate '{}'".format(inp) return u"{} {}".format(formula.contents[0].strip(),result.contents[0].strip()) @hook.regex(r'^\>(.*\.(gif|GIF|jpg|JPG|jpeg|JPEG|png|PNG|tiff|TIFF|bmp|BMP))\s?(\d+)?') @hook.command def implying(inp): """>laughing girls.gif <num> -- Returns first Google Image result for <query>.""" try: search = inp.group(1) except: search = inp try: num = int(inp.group(3)) except: num = 0 if 'http' in search: return parsed = api_get('images', search) if not 200 <= parsed['responseStatus'] < 300: raise IOError('error searching for images: {}: {}'.format(parsed['responseStatus'], '')) if not parsed['responseData']['results']: return 'no images found' try: return u'\x033\x02>{}\x02\x03 {}'.format(search, parsed['responseData']['results'][:10][num]['unescapedUrl']) except: return u'\x033\x02>{}\x02\x03 {}'.format(search, parsed['responseData']['results'][:10][0]['unescapedUrl']) #return random.choice(parsed['responseData']['results'][:10])['unescapedUrl'] @hook.command('nym') @hook.command('littleanon') @hook.command('gfy') @hook.command def lmgtfy(inp, bot=None): "lmgtfy [phrase] - Posts a google link for the specified phrase" link = "http://lmgtfy.com/?q=%s" % http.quote_plus(inp) try: return web.isgd(link) except (web.ShortenError, http.HTTPError): return link
gpl-3.0
2,788,865,380,336,183,000
33.919192
119
0.639283
false
3.429563
false
false
false
Himon-SYNCRAFT/taskplus
tests/core/actions/test_get_task_status_details.py
1
3408
from unittest import mock from taskplus.core.actions import (GetTaskStatusDetailsAction, GetTaskStatusDetailsRequest) from taskplus.core.domain import TaskStatus from taskplus.core.shared.response import ResponseFailure def test_get_status_details_action(): status = mock.Mock() status = TaskStatus(name='new', id=1) statuses_repo = mock.Mock() statuses_repo.one.return_value = status request = GetTaskStatusDetailsRequest(status.id) action = GetTaskStatusDetailsAction(statuses_repo) response = action.execute(request) assert bool(response) is True statuses_repo.one.assert_called_once_with(status.id) assert response.value == status def test_get_status_details_action_with_hooks(): status = mock.Mock() status = TaskStatus(name='new', id=1) statuses_repo = mock.Mock() statuses_repo.one.return_value = status request = GetTaskStatusDetailsRequest(status.id) action = GetTaskStatusDetailsAction(statuses_repo) before = mock.MagicMock() after = mock.MagicMock() action.add_before_execution_hook(before) action.add_after_execution_hook(after) response = action.execute(request) assert before.called assert after.called assert bool(response) is True statuses_repo.one.assert_called_once_with(status.id) assert response.value == status def test_get_status_details_action_handles_bad_request(): status = mock.Mock() status = TaskStatus(name='new', id=1) statuses_repo = mock.Mock() statuses_repo.one.return_value = status request = GetTaskStatusDetailsRequest(status_id=None) action = GetTaskStatusDetailsAction(statuses_repo) response = action.execute(request) assert bool(response) is False assert not statuses_repo.one.called assert response.value == { 'type': ResponseFailure.PARAMETER_ERROR, 'message': 'status_id: is required' } def test_get_status_details_action_handles_generic_error(): error_message = 'Error!!!' statuses_repo = mock.Mock() statuses_repo.one.side_effect = Exception(error_message) request = GetTaskStatusDetailsRequest(status_id=1) action = GetTaskStatusDetailsAction(statuses_repo) response = action.execute(request) assert bool(response) is False statuses_repo.one.assert_called_once_with(1) assert response.value == { 'type': ResponseFailure.SYSTEM_ERROR, 'message': 'Exception: {}'.format(error_message) } def test_get_status_details_request(): status_id = 1 request = GetTaskStatusDetailsRequest(status_id) assert request.is_valid() assert request.status_id == status_id def test_get_status_details_request_without_id(): status_id = None request = GetTaskStatusDetailsRequest(status_id) assert not request.is_valid() assert request.status_id == status_id assert len(request.errors) == 1 error = request.errors[0] assert error.parameter == 'status_id' assert error.message == 'is required' def test_get_status_details_bad_request(): status_id = 'asd' request = GetTaskStatusDetailsRequest(status_id) assert not request.is_valid() assert request.status_id == status_id assert len(request.errors) == 1 error = request.errors[0] assert error.parameter == 'status_id' assert error.message == 'expected int, got str(asd)'
bsd-3-clause
-939,071,211,444,209,800
29.159292
63
0.701585
false
3.774086
true
false
false
codeforamerica/comport
migrations/versions/0d78d545906f_.py
1
1135
"""Add 'is_public' flags for datasets Revision ID: 0d78d545906f Revises: 6d30846080b2 Create Date: 2016-06-27 15:30:14.415519 """ # revision identifiers, used by Alembic. revision = '0d78d545906f' down_revision = '6d30846080b2' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('departments', sa.Column('is_public_assaults_on_officers', sa.Boolean(), server_default=sa.true(), nullable=False)) op.add_column('departments', sa.Column('is_public_citizen_complaints', sa.Boolean(), server_default=sa.true(), nullable=False)) op.add_column('departments', sa.Column('is_public_officer_involved_shootings', sa.Boolean(), server_default=sa.true(), nullable=False)) op.add_column('departments', sa.Column('is_public_use_of_force_incidents', sa.Boolean(), server_default=sa.true(), nullable=False)) def downgrade(): op.drop_column('departments', 'is_public_use_of_force_incidents') op.drop_column('departments', 'is_public_officer_involved_shootings') op.drop_column('departments', 'is_public_citizen_complaints') op.drop_column('departments', 'is_public_assaults_on_officers')
bsd-3-clause
-5,945,847,998,224,271,000
39.535714
139
0.732159
false
2.902813
false
false
false
madgik/exareme
Exareme-Docker/src/exareme/exareme-tools/madis/src/functionslocal/vtable/dummycoding.py
1
2450
import setpath import functions import json registered=True def convert(data): if isinstance(data, basestring): return str(data) elif isinstance(data, collections.Mapping): return dict(map(convert, data.iteritems())) elif isinstance(data, collections.Iterable): return type(data)(map(convert, data)) else: return data class dummycoding(functions.vtable.vtbase.VT): def VTiter(self, *parsedArgs,**envars): largs, dictargs = self.full_parse(parsedArgs) if 'query' not in dictargs: raise functions.OperatorError(__name__.rsplit('.')[-1],"No query argument ") query = dictargs['query'] if 'metadata' not in dictargs: raise functions.OperatorError(__name__.rsplit('.')[-1],"No metadata ") metadata = json.loads(dictargs['metadata']) cur = envars['db'].cursor() c=cur.execute(query) schema = cur.getdescriptionsafe() no = 0 for myrow in c: first_tuple = [] schema1 = [] for item in xrange(len(schema)): if schema[item][0] in metadata: vals = metadata[schema[item][0]].split(',') vals.sort() for v in vals: newv = str(schema[item][0]) + '(' + str(v) + ')' schema1.append(newv) if myrow[item] == v: first_tuple.append(1) else : first_tuple.append(0) else: # print 'no', schema[item][0] newv = str(schema[item][0]) schema1.append(newv) first_tuple.append(myrow[item]) if no == 0: # print tuple((x,) for x in schema1) yield tuple((x,) for x in schema1) no =no+1 # print str(first_tuple) yield tuple(first_tuple,) def Source(): return functions.vtable.vtbase.VTGenerator(dummycoding) if not ('.' in __name__): """ This is needed to be able to test the function, put it at the end of every new function you create """ import sys import setpath from functions import * testfunction() if __name__ == "__main__": reload(sys) sys.setdefaultencoding('utf-8') import doctest doctest.tes
mit
-3,340,337,105,526,376,000
29.259259
88
0.517959
false
4.3058
false
false
false
gnarula/eden_deployment
modules/s3db/msg.py
1
88933
# -*- coding: utf-8 -*- """ Sahana Eden Messaging Model @copyright: 2009-2014 (c) Sahana Software Foundation @license: MIT Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ __all__ = ("S3ChannelModel", "S3MessageModel", "S3MessageAttachmentModel", "S3EmailModel", "S3FacebookModel", "S3MCommonsModel", "S3ParsingModel", "S3RSSModel", "S3SMSModel", "S3SMSOutboundModel", "S3TropoModel", "S3TwilioModel", "S3TwitterModel", "S3TwitterSearchModel", "S3XFormsModel", "S3BaseStationModel", ) from gluon import * from gluon.storage import Storage from ..s3 import * # Compact JSON encoding SEPARATORS = (",", ":") # ============================================================================= class S3ChannelModel(S3Model): """ Messaging Channels - all Inbound & Outbound channels for messages are instances of this super-entity """ names = ("msg_channel", "msg_channel_limit", "msg_channel_status", "msg_channel_id", "msg_channel_enable", "msg_channel_disable", "msg_channel_enable_interactive", "msg_channel_disable_interactive", "msg_channel_onaccept", ) def model(self): T = current.T db = current.db define_table = self.define_table #---------------------------------------------------------------------- # Super entity: msg_channel # channel_types = Storage(msg_email_channel = T("Email (Inbound)"), msg_facebook_channel = T("Facebook"), msg_mcommons_channel = T("Mobile Commons (Inbound)"), msg_rss_channel = T("RSS Feed"), msg_sms_modem_channel = T("SMS Modem"), msg_sms_webapi_channel = T("SMS WebAPI (Outbound)"), msg_sms_smtp_channel = T("SMS via SMTP (Outbound)"), msg_tropo_channel = T("Tropo"), msg_twilio_channel = T("Twilio (Inbound)"), msg_twitter_channel = T("Twitter"), ) tablename = "msg_channel" self.super_entity(tablename, "channel_id", channel_types, Field("name", #label = T("Name"), ), Field("description", #label = T("Description"), ), Field("enabled", "boolean", default = True, #label = T("Enabled?") #represent = s3_yes_no_represent, ), # @ToDo: Indicate whether channel can be used for Inbound or Outbound #Field("inbound", "boolean", # label = T("Inbound?")), #Field("outbound", "boolean", # label = T("Outbound?")), ) # @todo: make lazy_table table = db[tablename] table.instance_type.readable = True # Reusable Field channel_id = S3ReusableField("channel_id", "reference %s" % tablename, label = T("Channel"), ondelete = "SET NULL", represent = S3Represent(lookup=tablename), requires = IS_EMPTY_OR( IS_ONE_OF_EMPTY(db, "msg_channel.id")), ) self.add_components(tablename, msg_channel_status = "channel_id", ) # --------------------------------------------------------------------- # Channel Limit # Used to limit the number of emails sent from the system # - works by simply recording an entry for the timestamp to be checked against # # - currently just used by msg.send_email() # tablename = "msg_channel_limit" define_table(tablename, # @ToDo: Make it per-channel #channel_id(), *s3_timestamp()) # --------------------------------------------------------------------- # Channel Status # Used to record errors encountered in the Channel # tablename = "msg_channel_status" define_table(tablename, channel_id(), Field("status", #label = T("Status") #represent = s3_yes_no_represent, ), *s3_meta_fields()) # --------------------------------------------------------------------- return dict(msg_channel_id = channel_id, msg_channel_enable = self.channel_enable, msg_channel_disable = self.channel_disable, msg_channel_enable_interactive = self.channel_enable_interactive, msg_channel_disable_interactive = self.channel_disable_interactive, msg_channel_onaccept = self.channel_onaccept, msg_channel_poll = self.channel_poll, ) # ------------------------------------------------------------------------- @staticmethod def channel_enable(tablename, channel_id): """ Enable a Channel - Schedule a Poll for new messages - Enable all associated Parsers CLI API for shell scripts & to be called by S3Method """ db = current.db s3db = current.s3db table = s3db.table(tablename) record = db(table.channel_id == channel_id).select(table.id, # needed for update_record table.enabled, limitby=(0, 1), ).first() if not record.enabled: # Flag it as enabled # Update Instance record.update_record(enabled = True) # Update Super s3db.update_super(table, record) # Enable all Parser tasks on this channel ptable = s3db.msg_parser query = (ptable.channel_id == channel_id) & \ (ptable.deleted == False) parsers = db(query).select(ptable.id) for parser in parsers: s3db.msg_parser_enable(parser.id) # Do we have an existing Task? ttable = db.scheduler_task args = '["%s", %s]' % (tablename, channel_id) query = ((ttable.function_name == "msg_poll") & \ (ttable.args == args) & \ (ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"]))) exists = db(query).select(ttable.id, limitby=(0, 1)).first() if exists: return "Channel already enabled" else: current.s3task.schedule_task("msg_poll", args = [tablename, channel_id], period = 300, # seconds timeout = 300, # seconds repeats = 0 # unlimited ) return "Channel enabled" # ------------------------------------------------------------------------- @staticmethod def channel_enable_interactive(r, **attr): """ Enable a Channel - Schedule a Poll for new messages S3Method for interactive requests """ tablename = r.tablename result = current.s3db.msg_channel_enable(tablename, r.record.channel_id) current.session.confirmation = result fn = tablename.split("_", 1)[1] redirect(URL(f=fn)) # ------------------------------------------------------------------------- @staticmethod def channel_disable(tablename, channel_id): """ Disable a Channel - Remove schedule for Polling for new messages - Disable all associated Parsers CLI API for shell scripts & to be called by S3Method """ db = current.db s3db = current.s3db table = s3db.table(tablename) record = db(table.channel_id == channel_id).select(table.id, # needed for update_record table.enabled, limitby=(0, 1), ).first() if record.enabled: # Flag it as disabled # Update Instance record.update_record(enabled = False) # Update Super s3db.update_super(table, record) # Disable all Parser tasks on this channel ptable = s3db.msg_parser parsers = db(ptable.channel_id == channel_id).select(ptable.id) for parser in parsers: s3db.msg_parser_disable(parser.id) # Do we have an existing Task? ttable = db.scheduler_task args = '["%s", %s]' % (tablename, channel_id) query = ((ttable.function_name == "msg_poll") & \ (ttable.args == args) & \ (ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"]))) exists = db(query).select(ttable.id, limitby=(0, 1)).first() if exists: # Disable all db(query).update(status="STOPPED") return "Channel disabled" else: return "Channel already disabled" # -------------------------------------------------------------------------- @staticmethod def channel_disable_interactive(r, **attr): """ Disable a Channel - Remove schedule for Polling for new messages S3Method for interactive requests """ tablename = r.tablename result = current.s3db.msg_channel_disable(tablename, r.record.channel_id) current.session.confirmation = result fn = tablename.split("_", 1)[1] redirect(URL(f=fn)) # ------------------------------------------------------------------------- @staticmethod def channel_onaccept(form): """ Process the Enabled Flag """ if form.record: # Update form # process of changed if form.record.enabled and not form.vars.enabled: current.s3db.msg_channel_disable(form.table._tablename, form.vars.channel_id) elif form.vars.enabled and not form.record.enabled: current.s3db.msg_channel_enable(form.table._tablename, form.vars.channel_id) else: # Create form # Process only if enabled if form.vars.enabled: current.s3db.msg_channel_enable(form.table._tablename, form.vars.channel_id) # ------------------------------------------------------------------------- @staticmethod def channel_poll(r, **attr): """ Poll a Channel for new messages S3Method for interactive requests """ tablename = r.tablename current.s3task.async("msg_poll", args=[tablename, r.record.channel_id]) current.session.confirmation = \ current.T("The poll request has been submitted, so new messages should appear shortly - refresh to see them") if tablename == "msg_email_channel": fn = "email_inbox" elif tablename == "msg_mcommons_channel": fn = "sms_inbox" elif tablename == "msg_rss_channel": fn = "rss" elif tablename == "msg_twilio_channel": fn = "sms_inbox" elif tablename == "msg_twitter_channel": fn = "twitter_inbox" else: return "Unsupported channel: %s" % tablename redirect(URL(f=fn)) # ============================================================================= class S3MessageModel(S3Model): """ Messages """ names = ("msg_message", "msg_message_id", "msg_message_represent", "msg_outbox", ) def model(self): T = current.T db = current.db UNKNOWN_OPT = current.messages.UNKNOWN_OPT configure = self.configure define_table = self.define_table # Message priority msg_priority_opts = {3 : T("High"), 2 : T("Medium"), 1 : T("Low"), } # --------------------------------------------------------------------- # Message Super Entity - all Inbound & Outbound Messages # message_types = Storage(msg_email = T("Email"), msg_facebook = T("Facebook"), msg_rss = T("RSS"), msg_sms = T("SMS"), msg_twitter = T("Twitter"), msg_twitter_result = T("Twitter Search Results"), ) tablename = "msg_message" self.super_entity(tablename, "message_id", message_types, # Knowing which Channel Incoming Messages # came in on allows correlation to Outbound # messages (campaign_message, deployment_alert, etc) self.msg_channel_id(), s3_datetime(default="now"), Field("body", "text", label = T("Message"), ), Field("from_address", label = T("From"), ), Field("to_address", label = T("To"), ), Field("inbound", "boolean", default = False, label = T("Direction"), represent = lambda direction: \ (direction and [T("In")] or \ [T("Out")])[0], ), ) # @todo: make lazy_table table = db[tablename] table.instance_type.readable = True table.instance_type.writable = True configure(tablename, list_fields = ["instance_type", "from_address", "to_address", "body", "inbound", ], ) # Reusable Field message_represent = S3Represent(lookup=tablename, fields=["body"]) message_id = S3ReusableField("message_id", "reference %s" % tablename, ondelete = "RESTRICT", represent = message_represent, requires = IS_EMPTY_OR( IS_ONE_OF_EMPTY(db, "msg_message.id")), ) self.add_components(tablename, msg_attachment = "message_id", deploy_response = "message_id", ) # --------------------------------------------------------------------- # Outbound Messages # # Show only the supported messaging methods MSG_CONTACT_OPTS = current.msg.MSG_CONTACT_OPTS # Maximum number of retries to send a message MAX_SEND_RETRIES = current.deployment_settings.get_msg_max_send_retries() # Valid message outbox statuses MSG_STATUS_OPTS = {1 : T("Unsent"), 2 : T("Sent"), 3 : T("Draft"), 4 : T("Invalid"), 5 : T("Failed"), } opt_msg_status = S3ReusableField("status", "integer", notnull=True, requires = IS_IN_SET(MSG_STATUS_OPTS, zero=None), default = 1, label = T("Status"), represent = lambda opt: \ MSG_STATUS_OPTS.get(opt, UNKNOWN_OPT)) # Outbox - needs to be separate to Message since a single message # sent needs different outbox entries for each recipient tablename = "msg_outbox" define_table(tablename, # FK not instance message_id(), # Person/Group to send the message out to: self.super_link("pe_id", "pr_pentity"), # If set used instead of picking up from pe_id: Field("address"), Field("contact_method", length=32, default = "EMAIL", label = T("Contact Method"), represent = lambda opt: \ MSG_CONTACT_OPTS.get(opt, UNKNOWN_OPT), requires = IS_IN_SET(MSG_CONTACT_OPTS, zero=None), ), opt_msg_status(), # Used to loop through a PE to get it's members Field("system_generated", "boolean", default = False, ), # Give up if we can't send after MAX_RETRIES Field("retries", "integer", default = MAX_SEND_RETRIES, readable = False, writable = False, ), *s3_meta_fields()) configure(tablename, list_fields = ["id", "message_id", "pe_id", "status", ], orderby = "msg_outbox.created_on desc", ) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) return dict(msg_message_id = message_id, msg_message_represent = message_represent, ) # ------------------------------------------------------------------------- @staticmethod def defaults(): """ Return safe defaults in case the model has been deactivated. """ dummy = S3ReusableField("dummy_id", "integer", readable = False, writable = False) return dict(msg_message_id = lambda **attr: dummy("message_id"), ) # ============================================================================= class S3MessageAttachmentModel(S3Model): """ Message Attachments - link table between msg_message & doc_document """ names = ("msg_attachment",) def model(self): # --------------------------------------------------------------------- # tablename = "msg_attachment" self.define_table(tablename, # FK not instance self.msg_message_id(), self.doc_document_id(), *s3_meta_fields()) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) return dict() # ============================================================================= class S3EmailModel(S3ChannelModel): """ Email InBound Channels Outbound Email is currently handled via deployment_settings InBox/OutBox """ names = ("msg_email_channel", "msg_email", ) def model(self): T = current.T configure = self.configure define_table = self.define_table set_method = self.set_method super_link = self.super_link # --------------------------------------------------------------------- # Email Inbound Channels # tablename = "msg_email_channel" define_table(tablename, # Instance super_link("channel_id", "msg_channel"), Field("name"), Field("description"), Field("enabled", "boolean", default = True, label = T("Enabled?"), represent = s3_yes_no_represent, ), Field("server"), Field("protocol", requires = IS_IN_SET(["imap", "pop3"], zero=None), ), Field("use_ssl", "boolean"), Field("port", "integer"), Field("username"), Field("password", "password", length=64, readable = False, requires = IS_NOT_EMPTY(), ), # Set true to delete messages from the remote # inbox after fetching them. Field("delete_from_server", "boolean"), *s3_meta_fields()) configure(tablename, onaccept = self.msg_channel_onaccept, super_entity = "msg_channel", ) set_method("msg", "email_channel", method = "enable", action = self.msg_channel_enable_interactive) set_method("msg", "email_channel", method = "disable", action = self.msg_channel_disable_interactive) set_method("msg", "email_channel", method = "poll", action = self.msg_channel_poll) # --------------------------------------------------------------------- # Email Messages: InBox & Outbox # sender = current.deployment_settings.get_mail_sender() tablename = "msg_email" define_table(tablename, # Instance super_link("message_id", "msg_message"), self.msg_channel_id(), s3_datetime(default = "now"), Field("subject", length=78, # RFC 2822 label = T("Subject"), ), Field("body", "text", label = T("Message"), ), Field("from_address", #notnull=True, default = sender, label = T("Sender"), requires = IS_EMAIL(), ), Field("to_address", label = T("To"), requires = IS_EMAIL(), ), Field("raw", "text", label = T("Message Source"), readable = False, writable = False, ), Field("inbound", "boolean", default = False, label = T("Direction"), represent = lambda direction: \ (direction and [T("In")] or [T("Out")])[0], ), *s3_meta_fields()) configure(tablename, orderby = "msg_email.date desc", super_entity = "msg_message", ) # Components self.add_components(tablename, # Used to link to custom tab deploy_response_select_mission: deploy_mission = {"name": "select", "link": "deploy_response", "joinby": "message_id", "key": "mission_id", "autodelete": False, }, ) # --------------------------------------------------------------------- return dict() # ============================================================================= class S3FacebookModel(S3ChannelModel): """ Facebook Channels InBox/OutBox https://developers.facebook.com/docs/graph-api """ names = ("msg_facebook_channel", "msg_facebook", "msg_facebook_login", ) def model(self): T = current.T configure = self.configure define_table = self.define_table set_method = self.set_method super_link = self.super_link # --------------------------------------------------------------------- # Facebook Channels # tablename = "msg_facebook_channel" define_table(tablename, # Instance super_link("channel_id", "msg_channel"), Field("name"), Field("description"), Field("enabled", "boolean", default = True, label = T("Enabled?"), represent = s3_yes_no_represent, ), Field("login", "boolean", default = False, label = T("Use for Login?"), represent = s3_yes_no_represent, ), Field("app_id", "bigint", requires = IS_INT_IN_RANGE(0, +1e16) ), Field("app_secret", "password", length=64, readable = False, requires = IS_NOT_EMPTY(), ), # Optional Field("page_id", "bigint", requires = IS_INT_IN_RANGE(0, +1e16) ), Field("page_access_token"), *s3_meta_fields()) configure(tablename, onaccept = self.msg_facebook_channel_onaccept, super_entity = "msg_channel", ) set_method("msg", "facebook_channel", method = "enable", action = self.msg_channel_enable_interactive) set_method("msg", "facebook_channel", method = "disable", action = self.msg_channel_disable_interactive) #set_method("msg", "facebook_channel", # method = "poll", # action = self.msg_channel_poll) # --------------------------------------------------------------------- # Facebook Messages: InBox & Outbox # tablename = "msg_facebook" define_table(tablename, # Instance super_link("message_id", "msg_message"), self.msg_channel_id(), s3_datetime(default = "now"), Field("body", "text", label = T("Message"), ), # @ToDo: Are from_address / to_address relevant in Facebook? Field("from_address", #notnull=True, #default = sender, label = T("Sender"), ), Field("to_address", label = T("To"), ), Field("inbound", "boolean", default = False, label = T("Direction"), represent = lambda direction: \ (direction and [T("In")] or [T("Out")])[0], ), *s3_meta_fields()) configure(tablename, orderby = "msg_facebook.date desc", super_entity = "msg_message", ) # --------------------------------------------------------------------- return dict(msg_facebook_login = self.msg_facebook_login, ) # ------------------------------------------------------------------------- @staticmethod def defaults(): """ Safe defaults for model-global names if module is disabled """ return dict(msg_facebook_login = lambda: False, ) # ------------------------------------------------------------------------- @staticmethod def msg_facebook_channel_onaccept(form): if form.vars.login: # Ensure only a single account used for Login current.db(current.s3db.msg_facebook_channel.id != form.vars.id).update(login = False) # Normal onaccept processing S3ChannelModel.channel_onaccept(form) # ------------------------------------------------------------------------- @staticmethod def msg_facebook_login(): table = current.s3db.msg_facebook_channel query = (table.login == True) & \ (table.deleted == False) c = current.db(query).select(table.app_id, table.app_secret, limitby=(0, 1) ).first() return c # ============================================================================= class S3MCommonsModel(S3ChannelModel): """ Mobile Commons Inbound SMS Settings - Outbound can use Web API """ names = ("msg_mcommons_channel",) def model(self): #T = current.T define_table = self.define_table set_method = self.set_method # --------------------------------------------------------------------- tablename = "msg_mcommons_channel" define_table(tablename, self.super_link("channel_id", "msg_channel"), Field("name"), Field("description"), Field("enabled", "boolean", default = True, #label = T("Enabled?"), represent = s3_yes_no_represent, ), Field("campaign_id", length=128, unique=True, requires = IS_NOT_EMPTY(), ), Field("url", default = \ "https://secure.mcommons.com/api/messages", requires = IS_URL() ), Field("username", requires = IS_NOT_EMPTY(), ), Field("password", "password", readable = False, requires = IS_NOT_EMPTY(), ), Field("query"), Field("timestmp", "datetime", writable = False, ), *s3_meta_fields()) self.configure(tablename, onaccept = self.msg_channel_onaccept, super_entity = "msg_channel", ) set_method("msg", "mcommons_channel", method = "enable", action = self.msg_channel_enable_interactive) set_method("msg", "mcommons_channel", method = "disable", action = self.msg_channel_disable_interactive) set_method("msg", "mcommons_channel", method = "poll", action = self.msg_channel_poll) # --------------------------------------------------------------------- return dict() # ============================================================================= class S3ParsingModel(S3Model): """ Message Parsing Model """ names = ("msg_parser", "msg_parsing_status", "msg_session", "msg_keyword", "msg_sender", "msg_parser_enabled", "msg_parser_enable", "msg_parser_disable", "msg_parser_enable_interactive", "msg_parser_disable_interactive", ) def model(self): T = current.T define_table = self.define_table set_method = self.set_method channel_id = self.msg_channel_id message_id = self.msg_message_id # --------------------------------------------------------------------- # Link between Message Channels and Parsers in parser.py # tablename = "msg_parser" define_table(tablename, # Source channel_id(ondelete = "CASCADE"), Field("function_name", label = T("Parser"), ), Field("enabled", "boolean", default = True, label = T("Enabled?"), represent = s3_yes_no_represent, ), *s3_meta_fields()) self.configure(tablename, onaccept = self.msg_parser_onaccept, ) set_method("msg", "parser", method = "enable", action = self.parser_enable_interactive) set_method("msg", "parser", method = "disable", action = self.parser_disable_interactive) set_method("msg", "parser", method = "parse", action = self.parser_parse) # --------------------------------------------------------------------- # Message parsing status # - component to core msg_message table # tablename = "msg_parsing_status" define_table(tablename, # Component, not Instance message_id(ondelete = "CASCADE"), # Source channel_id(ondelete = "CASCADE"), Field("is_parsed", "boolean", default = False, label = T("Parsing Status"), represent = lambda parsed: \ (parsed and [T("Parsed")] or \ [T("Not Parsed")])[0], ), message_id("reply_id", label = T("Reply"), ondelete = "CASCADE", ), *s3_meta_fields()) # --------------------------------------------------------------------- # Login sessions for Message Parsing # - links a from_address with a login until expiry # tablename = "msg_session" define_table(tablename, Field("from_address"), Field("email"), Field("created_datetime", "datetime", default = current.request.utcnow, ), Field("expiration_time", "integer"), Field("is_expired", "boolean", default = False, ), *s3_meta_fields()) # --------------------------------------------------------------------- # Keywords for Message Parsing # tablename = "msg_keyword" define_table(tablename, Field("keyword", label = T("Keyword"), ), # @ToDo: Move this to a link table self.event_incident_type_id(), *s3_meta_fields()) # --------------------------------------------------------------------- # Senders for Message Parsing # - whitelist / blacklist / prioritise # tablename = "msg_sender" define_table(tablename, Field("sender", label = T("Sender"), ), # @ToDo: Make pe_id work for this #self.super_link("pe_id", "pr_pentity"), Field("priority", "integer", label = T("Priority"), ), *s3_meta_fields()) # --------------------------------------------------------------------- return dict(msg_parser_enabled = self.parser_enabled, msg_parser_enable = self.parser_enable, msg_parser_disable = self.parser_disable, ) # ----------------------------------------------------------------------------- @staticmethod def parser_parse(r, **attr): """ Parse unparsed messages S3Method for interactive requests """ record = r.record current.s3task.async("msg_parse", args=[record.channel_id, record.function_name]) current.session.confirmation = \ current.T("The parse request has been submitted") redirect(URL(f="parser")) # ------------------------------------------------------------------------- @staticmethod def parser_enabled(channel_id): """ Helper function to see if there is a Parser connected to a Channel - used to determine whether to populate the msg_parsing_status table """ table = current.s3db.msg_parser record = current.db(table.channel_id == channel_id).select(table.enabled, limitby=(0, 1), ).first() if record and record.enabled: return True else: return False # ------------------------------------------------------------------------- @staticmethod def parser_enable(id): """ Enable a Parser - Connect a Parser to a Channel CLI API for shell scripts & to be called by S3Method @ToDo: Ensure only 1 Parser is connected to any Channel at a time """ db = current.db s3db = current.s3db table = s3db.msg_parser record = db(table.id == id).select(table.id, # needed for update_record table.enabled, table.channel_id, table.function_name, limitby=(0, 1), ).first() if not record.enabled: # Flag it as enabled record.update_record(enabled = True) channel_id = record.channel_id function_name = record.function_name # Do we have an existing Task? ttable = db.scheduler_task args = '[%s, "%s"]' % (channel_id, function_name) query = ((ttable.function_name == "msg_parse") & \ (ttable.args == args) & \ (ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"]))) exists = db(query).select(ttable.id, limitby=(0, 1)).first() if exists: return "Parser already enabled" else: current.s3task.schedule_task("msg_parse", args = [channel_id, function_name], period = 300, # seconds timeout = 300, # seconds repeats = 0 # unlimited ) return "Parser enabled" # ------------------------------------------------------------------------- @staticmethod def parser_enable_interactive(r, **attr): """ Enable a Parser - Connect a Parser to a Channel S3Method for interactive requests """ result = current.s3db.msg_parser_enable(r.id) current.session.confirmation = result redirect(URL(f="parser")) # ------------------------------------------------------------------------- @staticmethod def parser_disable(id): """ Disable a Parser - Disconnect a Parser from a Channel CLI API for shell scripts & to be called by S3Method """ db = current.db s3db = current.s3db table = s3db.msg_parser record = db(table.id == id).select(table.id, # needed for update_record table.enabled, table.channel_id, table.function_name, limitby=(0, 1), ).first() if record.enabled: # Flag it as disabled record.update_record(enabled = False) # Do we have an existing Task? ttable = db.scheduler_task args = '[%s, "%s"]' % (record.channel_id, record.function_name) query = ((ttable.function_name == "msg_parse") & \ (ttable.args == args) & \ (ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"]))) exists = db(query).select(ttable.id, limitby=(0, 1)).first() if exists: # Disable all db(query).update(status="STOPPED") return "Parser disabled" else: return "Parser already disabled" # ------------------------------------------------------------------------- @staticmethod def parser_disable_interactive(r, **attr): """ Disable a Parser - Disconnect a Parser from a Channel S3Method for interactive requests """ result = current.s3db.msg_parser_disable(r.id) current.session.confirmation = result redirect(URL(f="parser")) # ------------------------------------------------------------------------- @staticmethod def msg_parser_onaccept(form): """ Process the Enabled Flag """ if form.record: # Update form # process of changed if form.record.enabled and not form.vars.enabled: current.s3db.msg_parser_disable(form.vars.id) elif form.vars.enabled and not form.record.enabled: current.s3db.msg_parser_enable(form.vars.id) else: # Create form # Process only if enabled if form.vars.enabled: current.s3db.msg_parser_enable(form.vars.id) # ============================================================================= class S3RSSModel(S3ChannelModel): """ RSS channel """ names = ("msg_rss_channel", "msg_rss", ) def model(self): T = current.T define_table = self.define_table set_method = self.set_method super_link = self.super_link # --------------------------------------------------------------------- # RSS Settings for an account # tablename = "msg_rss_channel" define_table(tablename, # Instance super_link("channel_id", "msg_channel"), Field("name", length=255, unique=True, label = T("Name"), ), Field("description", label = T("Description"), ), Field("enabled", "boolean", default = True, label = T("Enabled?"), represent = s3_yes_no_represent, ), Field("url", label = T("URL"), requires = IS_URL(), ), s3_datetime(label = T("Last Polled"), writable = False, ), Field("etag", label = T("ETag"), writable = False ), *s3_meta_fields()) self.configure(tablename, list_fields = ["name", "description", "enabled", "url", "date", "channel_status.status", ], onaccept = self.msg_channel_onaccept, super_entity = "msg_channel", ) set_method("msg", "rss_channel", method = "enable", action = self.msg_channel_enable_interactive) set_method("msg", "rss_channel", method = "disable", action = self.msg_channel_disable_interactive) set_method("msg", "rss_channel", method = "poll", action = self.msg_channel_poll) # --------------------------------------------------------------------- # RSS Feed Posts # tablename = "msg_rss" define_table(tablename, # Instance super_link("message_id", "msg_message"), self.msg_channel_id(), s3_datetime(default="now", label = T("Published on"), ), Field("title", label = T("Title"), ), Field("body", "text", label = T("Content"), ), Field("from_address", label = T("Link"), ), # http://pythonhosted.org/feedparser/reference-feed-author_detail.html Field("author", label = T("Author"), ), # http://pythonhosted.org/feedparser/reference-entry-tags.html Field("tags", "list:string", label = T("Tags"), ), self.gis_location_id(), # Just present for Super Entity Field("inbound", "boolean", default = True, readable = False, writable = False, ), *s3_meta_fields()) self.configure(tablename, deduplicate = self.msg_rss_duplicate, list_fields = ["channel_id", "title", "from_address", "date", "body" ], super_entity = current.s3db.msg_message, ) # --------------------------------------------------------------------- return dict() # --------------------------------------------------------------------- @staticmethod def msg_rss_duplicate(item): """ Import item deduplication, match by link (from_address) @param item: the S3ImportItem instance """ if item.tablename == "msg_rss": table = item.table from_address = item.data.get("from_address") query = (table.from_address == from_address) duplicate = current.db(query).select(table.id, limitby=(0, 1)).first() if duplicate: item.id = duplicate.id item.method = item.METHOD.UPDATE # ============================================================================= class S3SMSModel(S3Model): """ SMS: Short Message Service These can be received through a number of different gateways - MCommons - Modem (@ToDo: Restore this) - Tropo - Twilio """ names = ("msg_sms",) def model(self): #T = current.T user = current.auth.user if user and user.organisation_id: # SMS Messages need to be tagged to their org so that they can be sent through the correct gateway default = user.organisation_id else: default = None # --------------------------------------------------------------------- # SMS Messages: InBox & Outbox # tablename = "msg_sms" self.define_table(tablename, # Instance self.super_link("message_id", "msg_message"), self.msg_channel_id(), self.org_organisation_id(default = default), s3_datetime(default="now"), Field("body", "text", # Allow multi-part SMS #length = 160, #label = T("Message"), ), Field("from_address", #label = T("Sender"), ), Field("to_address", #label = T("To"), ), Field("inbound", "boolean", default = False, #represent = lambda direction: \ # (direction and [T("In")] or \ # [T("Out")])[0], #label = T("Direction")), ), # Used e.g. for Clickatell Field("remote_id", #label = T("Remote ID"), ), *s3_meta_fields()) self.configure(tablename, super_entity = "msg_message", ) # --------------------------------------------------------------------- return dict() # ============================================================================= class S3SMSOutboundModel(S3Model): """ SMS: Short Message Service - Outbound Channels These can be sent through a number of different gateways - Modem - SMTP - Tropo - Web API (inc Clickatell, MCommons, mVaayoo) """ names = ("msg_sms_outbound_gateway", "msg_sms_modem_channel", "msg_sms_smtp_channel", "msg_sms_webapi_channel", ) def model(self): #T = current.T configure = self.configure define_table = self.define_table # --------------------------------------------------------------------- # SMS Outbound Gateway # - select which gateway is in active use for which Organisation/Branch # tablename = "msg_sms_outbound_gateway" define_table(tablename, self.msg_channel_id( requires = IS_ONE_OF(current.db, "msg_channel.channel_id", S3Represent(lookup="msg_channel"), instance_types = ("msg_sms_modem_channel", "msg_sms_webapi_channel", "msg_sms_smtp_channel", ), sort = True, ), ), #Field("outgoing_sms_handler", length=32, # requires = IS_IN_SET(current.msg.GATEWAY_OPTS, # zero = None), # ), # Allow selection of different gateways based on Organisation/Branch self.org_organisation_id(), # @ToDo: Allow selection of different gateways based on destination Location #self.gis_location_id(), # @ToDo: Allow addition of relevant country code (currently in deployment_settings) #Field("default_country_code", "integer", # default = 44), *s3_meta_fields()) # --------------------------------------------------------------------- # SMS Modem Channel # tablename = "msg_sms_modem_channel" define_table(tablename, self.super_link("channel_id", "msg_channel"), Field("name"), Field("description"), Field("modem_port"), Field("modem_baud", "integer", default = 115200, ), Field("enabled", "boolean", default = True, ), Field("max_length", "integer", default = 160, ), *s3_meta_fields()) configure(tablename, super_entity = "msg_channel", ) # --------------------------------------------------------------------- # SMS via SMTP Channel # tablename = "msg_sms_smtp_channel" define_table(tablename, self.super_link("channel_id", "msg_channel"), Field("name"), Field("description"), Field("address", length=64, requires = IS_NOT_EMPTY(), ), Field("subject", length=64), Field("enabled", "boolean", default = True, ), Field("max_length", "integer", default = 160, ), *s3_meta_fields()) configure(tablename, super_entity = "msg_channel", ) # --------------------------------------------------------------------- # Settings for Web API services # # @ToDo: Simplified dropdown of services which prepopulates entries & provides nice prompts for the config options # + Advanced mode for raw access to real fields # tablename = "msg_sms_webapi_channel" define_table(tablename, self.super_link("channel_id", "msg_channel"), Field("name"), Field("description"), Field("url", default = "https://api.clickatell.com/http/sendmsg", # Clickatell #default = "https://secure.mcommons.com/api/send_message", # Mobile Commons requires = IS_URL(), ), Field("parameters", default = "user=yourusername&password=yourpassword&api_id=yourapiid", # Clickatell #default = "campaign_id=yourid", # Mobile Commons ), Field("message_variable", "string", default = "text", # Clickatell #default = "body", # Mobile Commons requires = IS_NOT_EMPTY(), ), Field("to_variable", "string", default = "to", # Clickatell #default = "phone_number", # Mobile Commons requires = IS_NOT_EMPTY(), ), Field("max_length", "integer", default = 480, # Clickatell concat 3 ), # If using HTTP Auth (e.g. Mobile Commons) Field("username"), Field("password"), Field("enabled", "boolean", default = True, ), *s3_meta_fields()) configure(tablename, super_entity = "msg_channel", ) # --------------------------------------------------------------------- return dict() # ============================================================================= class S3TropoModel(S3Model): """ Tropo can be used to send & receive SMS, Twitter & XMPP https://www.tropo.com """ names = ("msg_tropo_channel", "msg_tropo_scratch", ) def model(self): #T = current.T define_table = self.define_table set_method = self.set_method # --------------------------------------------------------------------- # Tropo Channels # tablename = "msg_tropo_channel" define_table(tablename, self.super_link("channel_id", "msg_channel"), Field("name"), Field("description"), Field("enabled", "boolean", default = True, #label = T("Enabled?"), represent = s3_yes_no_represent, ), Field("token_messaging"), #Field("token_voice"), *s3_meta_fields()) self.configure(tablename, super_entity = "msg_channel", ) set_method("msg", "tropo_channel", method = "enable", action = self.msg_channel_enable_interactive) set_method("msg", "tropo_channel", method = "disable", action = self.msg_channel_disable_interactive) set_method("msg", "tropo_channel", method = "poll", action = self.msg_channel_poll) # --------------------------------------------------------------------- # Tropo Scratch pad for outbound messaging # tablename = "msg_tropo_scratch" define_table(tablename, Field("row_id", "integer"), Field("message_id", "integer"), Field("recipient"), Field("message"), Field("network"), ) # --------------------------------------------------------------------- return dict() # ============================================================================= class S3TwilioModel(S3ChannelModel): """ Twilio Inbound SMS channel """ names = ("msg_twilio_channel", "msg_twilio_sid", ) def model(self): #T = current.T define_table = self.define_table set_method = self.set_method # --------------------------------------------------------------------- # Twilio Channels # tablename = "msg_twilio_channel" define_table(tablename, # Instance self.super_link("channel_id", "msg_channel"), Field("name"), Field("description"), Field("enabled", "boolean", default = True, #label = T("Enabled?"), represent = s3_yes_no_represent, ), Field("account_name", length=255, unique=True), Field("url", default = \ "https://api.twilio.com/2010-04-01/Accounts" ), Field("account_sid", length=64, requires = IS_NOT_EMPTY(), ), Field("auth_token", "password", length=64, readable = False, requires = IS_NOT_EMPTY(), ), *s3_meta_fields()) self.configure(tablename, onaccept = self.msg_channel_onaccept, super_entity = "msg_channel", ) set_method("msg", "twilio_channel", method = "enable", action = self.msg_channel_enable_interactive) set_method("msg", "twilio_channel", method = "disable", action = self.msg_channel_disable_interactive) set_method("msg", "twilio_channel", method = "poll", action = self.msg_channel_poll) # --------------------------------------------------------------------- # Twilio Message extensions # - store message sid to know which ones we've already downloaded # tablename = "msg_twilio_sid" define_table(tablename, # Component not Instance self.msg_message_id(ondelete = "CASCADE"), Field("sid"), *s3_meta_fields()) # --------------------------------------------------------------------- return dict() # ============================================================================= class S3TwitterModel(S3Model): names = ("msg_twitter_channel", "msg_twitter", ) def model(self): T = current.T db = current.db configure = self.configure define_table = self.define_table set_method = self.set_method # --------------------------------------------------------------------- # Twitter Channel # tablename = "msg_twitter_channel" define_table(tablename, #Instance self.super_link("channel_id", "msg_channel"), # @ToDo: Allow different Twitter accounts for different PEs (Orgs / Teams) #self.pr_pe_id(), Field("name"), Field("description"), Field("enabled", "boolean", default = True, label = T("Enabled?"), represent = s3_yes_no_represent, ), Field("twitter_account"), Field("consumer_key", "password"), Field("consumer_secret", "password"), Field("access_token", "password"), Field("access_token_secret", "password"), *s3_meta_fields()) configure(tablename, onaccept = self.msg_channel_onaccept, #onvalidation = self.twitter_channel_onvalidation super_entity = "msg_channel", ) set_method("msg", "twitter_channel", method = "enable", action = self.msg_channel_enable_interactive) set_method("msg", "twitter_channel", method = "disable", action = self.msg_channel_disable_interactive) set_method("msg", "twitter_channel", method = "poll", action = self.msg_channel_poll) # --------------------------------------------------------------------- # Twitter Messages: InBox & Outbox # tablename = "msg_twitter" define_table(tablename, # Instance self.super_link("message_id", "msg_message"), self.msg_channel_id(), s3_datetime(default = "now", label = T("Posted on"), ), Field("body", length=140, label = T("Message"), ), Field("from_address", #notnull=True, label = T("From"), represent = self.twitter_represent, requires = IS_NOT_EMPTY(), ), Field("to_address", label = T("To"), represent = self.twitter_represent, ), Field("inbound", "boolean", default = False, label = T("Direction"), represent = lambda direction: \ (direction and [T("In")] or \ [T("Out")])[0], ), Field("msg_id", # Twitter Message ID readable = False, writable = False, ), *s3_meta_fields()) configure(tablename, list_fields = ["id", #"priority", #"category", "body", "from_address", "date", #"location_id", ], #orderby = ~table.priority, super_entity = "msg_message", ) # --------------------------------------------------------------------- return dict() # ------------------------------------------------------------------------- @staticmethod def twitter_represent(nickname, show_link=True): """ Represent a Twitter account """ if not nickname: return current.messages["NONE"] db = current.db s3db = current.s3db table = s3db.pr_contact query = (table.contact_method == "TWITTER") & \ (table.value == nickname) row = db(query).select(table.pe_id, limitby=(0, 1)).first() if row: repr = s3db.pr_pentity_represent(row.pe_id) if show_link: # Assume person ptable = s3db.pr_person row = db(ptable.pe_id == row.pe_id).select(ptable.id, limitby=(0, 1)).first() if row: link = URL(c="pr", f="person", args=[row.id]) return A(repr, _href=link) return repr else: return nickname # ------------------------------------------------------------------------- @staticmethod def twitter_channel_onvalidation(form): """ Complete oauth: take tokens from session + pin from form, and do the 2nd API call to Twitter """ T = current.T session = current.session settings = current.deployment_settings.msg s3 = session.s3 vars = form.vars if vars.pin and s3.twitter_request_key and s3.twitter_request_secret: try: import tweepy except: raise HTTP(501, body=T("Can't import tweepy")) oauth = tweepy.OAuthHandler(settings.twitter_oauth_consumer_key, settings.twitter_oauth_consumer_secret) oauth.set_request_token(s3.twitter_request_key, s3.twitter_request_secret) try: oauth.get_access_token(vars.pin) vars.oauth_key = oauth.access_token.key vars.oauth_secret = oauth.access_token.secret twitter = tweepy.API(oauth) vars.twitter_account = twitter.me().screen_name vars.pin = "" # we won't need it anymore return except tweepy.TweepError: session.error = T("Settings were reset because authenticating with Twitter failed") # Either user asked to reset, or error - clear everything for k in ["oauth_key", "oauth_secret", "twitter_account"]: vars[k] = None for k in ["twitter_request_key", "twitter_request_secret"]: s3[k] = "" # ============================================================================= class S3TwitterSearchModel(S3ChannelModel): """ Twitter Searches - results can be fed to KeyGraph https://dev.twitter.com/docs/api/1.1/get/search/tweets """ names = ("msg_twitter_search", "msg_twitter_result", ) def model(self): T = current.T db = current.db configure = self.configure define_table = self.define_table set_method = self.set_method # --------------------------------------------------------------------- # Twitter Search Query # tablename = "msg_twitter_search" define_table(tablename, Field("keywords", "text", label = T("Keywords"), ), # @ToDo: Allow setting a Point & Radius for filtering by geocode #self.gis_location_id(), Field("lang", # Set in controller #default = current.response.s3.language, label = T("Language"), ), Field("count", "integer", default = 100, label = T("# Results per query"), ), Field("include_entities", "boolean", default = False, label = T("Include Entity Information?"), represent = s3_yes_no_represent, comment = DIV(_class="tooltip", _title="%s|%s" % (T("Entity Information"), T("This is required if analyzing with KeyGraph."))), ), # @ToDo: Rename or even move to Component Table Field("is_processed", "boolean", default = False, label = T("Processed with KeyGraph?"), represent = s3_yes_no_represent, ), Field("is_searched", "boolean", default = False, label = T("Searched?"), represent = s3_yes_no_represent, ), *s3_meta_fields()) configure(tablename, list_fields = ["keywords", "lang", "count", #"include_entities", ], ) # Reusable Query ID represent = S3Represent(lookup=tablename, fields=["keywords"]) search_id = S3ReusableField("search_id", "reference %s" % tablename, label = T("Search Query"), ondelete = "CASCADE", represent = represent, requires = IS_EMPTY_OR( IS_ONE_OF_EMPTY(db, "msg_twitter_search.id") ), ) set_method("msg", "twitter_search", method = "poll", action = self.twitter_search_poll) set_method("msg", "twitter_search", method = "keygraph", action = self.twitter_keygraph) set_method("msg", "twitter_result", method = "timeline", action = self.twitter_timeline) # --------------------------------------------------------------------- # Twitter Search Results # # @ToDo: Store the places mentioned in the Tweet as linked Locations # tablename = "msg_twitter_result" define_table(tablename, # Instance self.super_link("message_id", "msg_message"), # Just present for Super Entity #self.msg_channel_id(), search_id(), s3_datetime(default="now", label = T("Tweeted on"), ), Field("tweet_id", label = T("Tweet ID")), Field("lang", label = T("Language")), Field("from_address", label = T("Tweeted by")), Field("body", label = T("Tweet")), # @ToDo: Populate from Parser #Field("category", # writable = False, # label = T("Category"), # ), #Field("priority", "integer", # writable = False, # label = T("Priority"), # ), self.gis_location_id(), # Just present for Super Entity #Field("inbound", "boolean", # default = True, # readable = False, # writable = False, # ), *s3_meta_fields()) configure(tablename, list_fields = [#"category", #"priority", "body", "from_address", "date", "location_id", ], #orderby=~table.priority, super_entity = "msg_message", ) # --------------------------------------------------------------------- return dict() # ----------------------------------------------------------------------------- @staticmethod def twitter_search_poll(r, **attr): """ Perform a Search of Twitter S3Method for interactive requests """ id = r.id tablename = r.tablename current.s3task.async("msg_twitter_search", args=[id]) current.session.confirmation = \ current.T("The search request has been submitted, so new messages should appear shortly - refresh to see them") # Filter results to this Search redirect(URL(f="twitter_result", vars={"~.search_id": id})) # ----------------------------------------------------------------------------- @staticmethod def twitter_keygraph(r, **attr): """ Prcoess Search Results with KeyGraph S3Method for interactive requests """ tablename = r.tablename current.s3task.async("msg_process_keygraph", args=[r.id]) current.session.confirmation = \ current.T("The search results are now being processed with KeyGraph") # @ToDo: Link to KeyGraph results redirect(URL(f="twitter_result")) # ============================================================================= @staticmethod def twitter_timeline(r, **attr): """ Display the Tweets on a Simile Timeline http://www.simile-widgets.org/wiki/Reference_Documentation_for_Timeline """ if r.representation == "html" and r.name == "twitter_result": response = current.response s3 = response.s3 appname = r.application # Add core Simile Code s3.scripts.append("/%s/static/scripts/simile/timeline/timeline-api.js" % appname) # Add our control script if s3.debug: s3.scripts.append("/%s/static/scripts/S3/s3.timeline.js" % appname) else: s3.scripts.append("/%s/static/scripts/S3/s3.timeline.min.js" % appname) # Add our data # @ToDo: Make this the initial data & then collect extra via REST with a stylesheet # add in JS using S3.timeline.eventSource.addMany(events) where events is a [] if r.record: # Single record rows = [r.record] else: # Multiple records # @ToDo: Load all records & sort to closest in time # http://stackoverflow.com/questions/7327689/how-to-generate-a-sequence-of-future-datetimes-in-python-and-determine-nearest-d rows = r.resource.select(["date", "body"], limit=2000, as_rows=True) data = {"dateTimeFormat": "iso8601", } now = r.utcnow tl_start = tl_end = now events = [] import re for row in rows: # Dates start = row.date or "" if start: if start < tl_start: tl_start = start if start > tl_end: tl_end = start start = start.isoformat() title = (re.sub(r"(?<=^|(?<=[^a-zA-Z0-9-_\.]))@([A-Za-z]+[A-Za-z0-9]+)|RT", "", row.body)) if len(title) > 30: title = title[:30] events.append({"start": start, "title": title, "description": row.body, }) data["events"] = events data = json.dumps(data, separators=SEPARATORS) code = "".join(( '''S3.timeline.data=''', data, ''' S3.timeline.tl_start="''', tl_start.isoformat(), '''" S3.timeline.tl_end="''', tl_end.isoformat(), '''" S3.timeline.now="''', now.isoformat(), '''" ''')) # Control our code in static/scripts/S3/s3.timeline.js s3.js_global.append(code) # Create the DIV item = DIV(_id="s3timeline", _class="s3-timeline") output = dict(item=item) # Maintain RHeader for consistency if attr.get("rheader"): rheader = attr["rheader"](r) if rheader: output["rheader"] = rheader output["title"] = current.T("Twitter Timeline") response.view = "timeline.html" return output else: r.error(405, current.ERROR.BAD_METHOD) # ============================================================================= class S3XFormsModel(S3Model): """ XForms are used by the ODK Collect mobile client http://eden.sahanafoundation.org/wiki/BluePrint/Mobile#Android """ names = ("msg_xforms_store",) def model(self): #T = current.T # --------------------------------------------------------------------- # SMS store for persistence and scratch pad for combining incoming xform chunks tablename = "msg_xforms_store" self.define_table(tablename, Field("sender", length=20), Field("fileno", "integer"), Field("totalno", "integer"), Field("partno", "integer"), Field("message", length=160) ) # --------------------------------------------------------------------- return dict() # ============================================================================= class S3BaseStationModel(S3Model): """ Base Stations (Cell Towers) are a type of Site @ToDo: Calculate Coverage from Antenna Height, Radio Power and Terrain - see RadioMobile """ names = ("msg_basestation",) def model(self): T = current.T define_table = self.define_table # --------------------------------------------------------------------- # Base Stations (Cell Towers) # tablename = "msg_basestation" define_table(tablename, self.super_link("site_id", "org_site"), Field("name", notnull=True, length=64, # Mayon Compatibility label = T("Name"), ), Field("code", length=10, # Mayon compatibility label = T("Code"), # Deployments that don't wants site codes can hide them #readable = False, #writable = False, # @ToDo: Deployment Setting to add validator to make these unique ), self.org_organisation_id( label = T("Operator"), #widget=S3OrganisationAutocompleteWidget(default_from_profile=True), requires = self.org_organisation_requires(required=True, updateable=True), ), self.gis_location_id(), s3_comments(), *s3_meta_fields()) # CRUD strings ADD_BASE = T("Create Base Station") current.response.s3.crud_strings[tablename] = Storage( label_create=T("Create Base Station"), title_display=T("Base Station Details"), title_list=T("Base Stations"), title_update=T("Edit Base Station"), title_upload=T("Import Base Stations"), title_map=T("Map of Base Stations"), label_list_button=T("List Base Stations"), label_delete_button=T("Delete Base Station"), msg_record_created=T("Base Station added"), msg_record_modified=T("Base Station updated"), msg_record_deleted=T("Base Station deleted"), msg_list_empty=T("No Base Stations currently registered")) self.configure(tablename, deduplicate = self.msg_basestation_duplicate, super_entity = "org_site", ) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # return dict() # --------------------------------------------------------------------- @staticmethod def msg_basestation_duplicate(item): """ Import item deduplication, match by name (Adding location_id doesn't seem to be a good idea) @param item: the S3ImportItem instance """ if item.tablename == "msg_basestation": table = item.table name = "name" in item.data and item.data.name query = (table.name.lower() == name.lower()) #location_id = None # if "location_id" in item.data: # location_id = item.data.location_id ## This doesn't find deleted records: # query = query & (table.location_id == location_id) duplicate = current.db(query).select(table.id, limitby=(0, 1)).first() # if duplicate is None and location_id: ## Search for deleted basestations with this name # query = (table.name.lower() == name.lower()) & \ # (table.deleted == True) # row = db(query).select(table.id, table.deleted_fk, # limitby=(0, 1)).first() # if row: # fkeys = json.loads(row.deleted_fk) # if "location_id" in fkeys and \ # str(fkeys["location_id"]) == str(location_id): # duplicate = row if duplicate: item.id = duplicate.id item.method = item.METHOD.UPDATE # END =========================================================================
mit
2,607,987,622,534,597,000
37.903325
141
0.390586
false
5.521045
false
false
false
cdubz/babybuddy
reports/graphs/feeding_amounts.py
1
1422
# -*- coding: utf-8 -*- from django.utils import timezone from django.utils.translation import gettext as _ import plotly.offline as plotly import plotly.graph_objs as go from reports import utils def feeding_amounts(instances): """ Create a graph showing daily feeding amounts over time. :param instances: a QuerySet of Feeding instances. :returns: a tuple of the the graph's html and javascript. """ totals = {} for instance in instances: end = timezone.localtime(instance.end) date = end.date() if date not in totals.keys(): totals[date] = 0 totals[date] += instance.amount or 0 amounts = [round(amount, 2) for amount in totals.values()] trace = go.Bar( name=_('Total feeding amount'), x=list(totals.keys()), y=amounts, hoverinfo='text', textposition='outside', text=amounts ) layout_args = utils.default_graph_layout_options() layout_args['title'] = _('<b>Total Feeding Amounts</b>') layout_args['xaxis']['title'] = _('Date') layout_args['xaxis']['rangeselector'] = utils.rangeselector_date() layout_args['yaxis']['title'] = _('Feeding amount') fig = go.Figure({ 'data': [trace], 'layout': go.Layout(**layout_args) }) output = plotly.plot(fig, output_type='div', include_plotlyjs=False) return utils.split_graph_output(output)
bsd-2-clause
-5,724,312,530,632,887,000
29.913043
72
0.627286
false
3.802139
false
false
false
geraldinepascal/FROGS
tools/phyloseq_beta_diversity/phyloseq_beta_diversity.py
1
7336
#!/usr/bin/env python3 # # Copyright (C) 2018 INRA # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # __author__ = 'Ta Thi Ngan & Maria Bernard INRA - SIGENAE' __copyright__ = 'Copyright (C) 2017 INRA' __license__ = 'GNU General Public License' __version__ = '3.2.3' __email__ = '[email protected]' __status__ = 'prod' import os import sys import argparse CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) FROGS_DIR="" if CURRENT_DIR.endswith("phyloseq_beta_diversity"): FROGS_DIR = os.path.dirname(os.path.dirname(CURRENT_DIR)) else: FROGS_DIR = os.path.dirname(CURRENT_DIR) # PATH BIN_DIR = os.path.abspath(os.path.join(FROGS_DIR, "libexec")) os.environ['PATH'] = BIN_DIR + os.pathsep + os.environ['PATH'] APP_DIR = os.path.abspath(os.path.join(FROGS_DIR, "app")) os.environ['PATH'] = APP_DIR + os.pathsep + os.environ['PATH'] # PYTHONPATH LIB_DIR = os.path.abspath(os.path.join(FROGS_DIR, "lib")) sys.path.append(LIB_DIR) if os.getenv('PYTHONPATH') is None: os.environ['PYTHONPATH'] = LIB_DIR else: os.environ['PYTHONPATH'] = LIB_DIR + os.pathsep + os.environ['PYTHONPATH'] # LIBR LIBR_DIR = os.path.join(LIB_DIR,"external-lib") from frogsUtils import * ################################################################################################################################################## # # COMMAND LINES # ################################################################################################################################################## class Rscript(Cmd): """ @summary: Launch Rmarkdown script to present the data beta diversity with phyloseq. @see: http://rmarkdown.rstudio.com/ https://joey711.github.io/phyloseq/ @return: html file containing the plots beta divesity distance matrix tsv file(s) """ def __init__(self, html, phyloseq, varExp, methods, outdir, rmd_stderr): """ @param html: [str] path to store resulting html file. @param phyloseq: [str] path to phyloseq object in RData file, the result of FROGS Phyloseq Import Data. @param varExp: [str] Experiment variable to split plot. @param methods: [str] one or more of beta diversity method. @param outdir: [str] The path to store resulting beta diversity distance matrix. @param rmd_stderr: [str] Path to temporary Rmarkdown stderr output file """ rmd = os.path.join(CURRENT_DIR, "phyloseq_beta_diversity.Rmd") Cmd.__init__( self, 'Rscript', 'Run 1 code Rmarkdown', '-e "rmarkdown::render(' + "'" + rmd + "',knit_root_dir='" + outdir + "',output_file='" + html + \ "', params=list(phyloseq='" + phyloseq + "', varExp='" + varExp + "', methods='" + methods + "', libdir ='" + LIBR_DIR + "'), intermediates_dir='" + os.path.dirname(html) + "')" + '" 2> ' + rmd_stderr, "-e '(sessionInfo()[[1]][13])[[1]][1]; paste(\"Rmarkdown version: \",packageVersion(\"rmarkdown\")) ; library(phyloseq); paste(\"Phyloseq version: \",packageVersion(\"phyloseq\"))'") def get_version(self): """ @summary: Returns the program version number. @return: [str] Version number if this is possible, otherwise this method return 'unknown'. """ return Cmd.get_version(self, 'stdout') ################################################################################################################################################## # # MAIN # ################################################################################################################################################## if __name__ == "__main__": # Manage parameters parser = argparse.ArgumentParser( description='To present the data beta diversity with phyloseq.') parser.add_argument( '--debug', default=False, action='store_true', help="Keep temporary files to debug program." ) parser.add_argument( '--version', action='version', version=__version__ ) parser.add_argument('-v', '--varExp', type=str, required=True, default=None, help='The experiment variable you want to analyse.') parser.add_argument('-m', '--distance-methods', required=True, type=str, default='bray,cc,unifrac,wunifrac', help='Comma separated values beta diversity methods available in Phyloseq (see https://www.bioconductor.org/packages/devel/bioc/manuals/phyloseq/man/phyloseq.pdf). [Default: %(default)s].') # Inputs group_input = parser.add_argument_group( 'Inputs' ) group_input.add_argument('-r','--rdata', required=True, default=None, help="The path of RData file containing a phyloseq object-the result of FROGS Phyloseq Import Data" ) # output group_output = parser.add_argument_group( 'Outputs' ) group_output.add_argument('--matrix-outdir', required=True, action="store", type=str, help="Path to output matrix file") group_output.add_argument('-o','--html', default='phyloseq_beta_diversity.nb.html', help="The HTML file containing the graphs. [Default: %(default)s]" ) group_output.add_argument( '-l', '--log-file', default=sys.stdout, help='This output file will contain several informations on executed commands.') args = parser.parse_args() prevent_shell_injections(args) Logger.static_write(args.log_file, "## Application\nSoftware :" + sys.argv[0] + " (version : " + str(__version__) + ")\nCommand : " + " ".join(sys.argv) + "\n\n") # check parameter list_distance=["unifrac","wunifrac","bray","cc","dpcoa","jsd","manhattan","euclidean","canberra","kulczynski","jaccard","gower","altGower","morisita","horn","mountford","raup","binomial","chao","cao","wt","-1","c","wb","rt","I","e","t","me","j","sor","m","-2","co","g","-3","l","19","hk","rlb","sim","gl","z","maximum","binary","minkowski","ANY"] methods = args.distance_methods.strip() if not args.distance_methods.strip()[-1]=="," else args.distance_methods.strip()[:-1] for method in methods.split(","): if method not in list_distance: raise_exception( Exception( '\n\n#ERROR : Your method "'+str(method)+'", name is not correct !!! Please make sure that it is in the list:'+str(list_distance)+"\n\n")) # Process outdir = os.path.abspath(args.matrix_outdir) if not os.path.exists(outdir): os.makedirs(outdir) phyloseq=os.path.abspath(args.rdata) html=os.path.abspath(args.html) try: tmpFiles = TmpFiles(os.path.dirname(html)) rmd_stderr = tmpFiles.add("rmarkdown.stderr") Rscript(html, phyloseq, args.varExp, methods, outdir, rmd_stderr).submit( args.log_file ) finally : if not args.debug: tmpFiles.deleteAll()
gpl-3.0
-1,608,606,170,957,709,000
54.157895
350
0.598555
false
3.626298
false
false
false
Detailscool/YHSpider
JiraStoryMaker/JiraStoryMaker2.py
1
5993
#!/usr/bin/python # -*- coding:utf-8 -*- # JiraStoryMaker.py # Created by Henry on 2018/4/9 # Description : from selenium import webdriver from selenium.webdriver.support.ui import Select from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By import os import json import time import sys reload(sys) sys.setdefaultencoding('utf-8') def create_story(**kwargs): summary_text = kwargs.get('summary_text', None) work_time_text = kwargs.get('work_time_text', None) REQ = kwargs.get('REQ', None) isFirst = kwargs.get('isFirst', False) time.sleep(1) new_button = driver.find_element_by_css_selector('#create_link') new_button.click() WebDriverWait(driver, 10000).until( EC.presence_of_element_located((By.CSS_SELECTOR, 'span.drop-menu')) ) drop_menus = driver.find_elements_by_css_selector('span.drop-menu') if isFirst: project = drop_menus[0] project.click() data_suggestions = driver.find_element_by_id('project-options').get_attribute('data-suggestions') items = json.loads(data_suggestions) # print items if isinstance(items, list) and items and isinstance(items[0], dict) and isinstance(items[0]['items'], list) and items[0]['items'] and isinstance(items[0]['items'][0], dict) and items[0]['items'][0]['label']: select_group = items[0]['items'][0]['label'] if u'IOSZHIBO' not in select_group: groups = [a for a in driver.find_elements_by_css_selector('li a.aui-list-item-link') if 'IOSZHIBO' in a.text] # print '\ngroups:', groups if groups: groups[0].click() print 'click' time.sleep(0.5) else: project.click() story_type = driver.find_element_by_id('issuetype-single-select') story_type.click() story_type_groups = [a for a in driver.find_elements_by_css_selector('li a.aui-list-item-link') if u'故事'==a.text] if story_type_groups: story_type_groups[0].click() time.sleep(0.5) drop_menus = driver.find_elements_by_css_selector('span.drop-menu') if len(drop_menus) < 5: time.sleep(10) print '出错啦' sys.exit(1) test_type = Select(driver.find_element_by_id('customfield_10200')) test_type.select_by_value('10202') time.sleep(0.5) requirement = Select(driver.find_element_by_id('customfield_10101')) requirement.select_by_value('10101') time.sleep(0.5) summary = driver.find_element_by_id('summary') summary.send_keys(unicode(summary_text)) time.sleep(0.5) work_time = driver.find_element_by_id('customfield_10833') work_time.send_keys(work_time_text) time.sleep(0.5) sprint = drop_menus[5] sprint.click() sprint_groups = [] while not sprint_groups: time.sleep(0.5) sprint_groups = [a for a in driver.find_elements_by_css_selector('li a') if group in a.text and u'在用' in a.text] sprint_groups[0].click() time.sleep(0.5) # time.sleep(15) # code = driver.find_element_by_id('customfield_10503-3') # code.click() if REQ: question = driver.find_element_by_css_selector('#issuelinks-issues-multi-select textarea') question.send_keys(unicode(REQ)) time.sleep(0.5) items = driver.find_elements_by_css_selector('li.menu-item') if items and len(items) > 1: relationship_item = items[1] relationship_item.click() time.sleep(0.5) dev_person = driver.find_element_by_css_selector('#customfield_10300_container textarea') if dev_person and login_token.split('-'): dev_person.send_keys(login_token.split('-')[0]) time.sleep(0.5) tester_person = driver.find_element_by_css_selector('#customfield_10400_container textarea') if tester_person and tester: tester_person.send_keys(tester) time.sleep(0.5) submit = driver.find_element_by_id('create-issue-submit') submit.click() WebDriverWait(driver, 10000).until( EC.element_to_be_clickable((By.XPATH, '//*[@id="aui-flag-container"]/div/div/a')) ) story = driver.find_element_by_xpath('//*[@id="aui-flag-container"]/div/div/a') story_href = story.get_attribute('href') print summary_text, ': ', story_href # print '已建: ', summary_text, ', 时长, :', work_time_text, '天' driver.refresh() if __name__ == '__main__': login_token = sys.argv[1] file_path = sys.argv[2] tester = sys.argv[3] if not os.path.exists(file_path): print '出错啦' sys.exit(1) else: with open(file_path, 'r') as f: lines = f.readlines() f.close() if '-' not in login_token: print '出错啦' sys.exit(1) elif len(login_token.split('-')[-1]) != 32: print '出错啦' sys.exit(1) chrome_options = webdriver.ChromeOptions() # chrome_options.add_argument('--headless') driver = webdriver.Chrome(chrome_options=chrome_options) url = '' + login_token print url driver.get(url) # print driver.get_cookies() group = u'iOS直播服务组' for idx, line in enumerate(lines): if ',' in line and ',' not in line: words = line.encode('utf-8').strip().split(',') elif ',' in line and ',' not in line: words = line.encode('utf-8').strip().split(',') else: words = [] if len(words) == 2: create_story(summary_text=words[0].strip(), work_time_text=words[1].strip(), isFirst=(idx==0)) elif len(words) == 3: create_story(summary_text=words[0].strip(), work_time_text=words[1].strip(), REQ=words[2].strip(), isFirst=(idx==0)) driver.close()
mit
-496,559,804,413,257,400
31.78453
215
0.607281
false
3.294281
true
false
false
sdgathman/pymilter
testsample.py
1
5060
import unittest import Milter import sample import template import mime import zipfile from Milter.test import TestBase from Milter.testctx import TestCtx class TestMilter(TestBase,sample.sampleMilter): def __init__(self): TestBase.__init__(self) sample.sampleMilter.__init__(self) class BMSMilterTestCase(unittest.TestCase): def setUp(self): self.zf = zipfile.ZipFile('test/virus.zip','r') self.zf.setpassword(b'denatured') def tearDown(self): self.zf.close() self.zf = None def testTemplate(self,fname='test2'): ctx = TestCtx() Milter.factory = template.myMilter ctx._setsymval('{auth_authen}','batman') ctx._setsymval('{auth_type}','batcomputer') ctx._setsymval('j','mailhost') count = 10 while count > 0: rc = ctx._connect(helo='milter-template.example.org') self.assertEquals(rc,Milter.CONTINUE) with open('test/'+fname,'rb') as fp: rc = ctx._feedFile(fp) milter = ctx.getpriv() self.assertFalse(ctx._bodyreplaced,"Message body replaced") ctx._close() count -= 1 def testHeader(self,fname='utf8'): ctx = TestCtx() Milter.factory = sample.sampleMilter ctx._setsymval('{auth_authen}','batman') ctx._setsymval('{auth_type}','batcomputer') ctx._setsymval('j','mailhost') rc = ctx._connect() self.assertEquals(rc,Milter.CONTINUE) with open('test/'+fname,'rb') as fp: rc = ctx._feedFile(fp) milter = ctx.getpriv() self.assertFalse(ctx._bodyreplaced,"Message body replaced") fp = ctx._body with open('test/'+fname+".tstout","wb") as ofp: ofp.write(fp.getvalue()) ctx._close() def testCtx(self,fname='virus1'): ctx = TestCtx() Milter.factory = sample.sampleMilter ctx._setsymval('{auth_authen}','batman') ctx._setsymval('{auth_type}','batcomputer') ctx._setsymval('j','mailhost') rc = ctx._connect() self.assertTrue(rc == Milter.CONTINUE) with self.zf.open(fname) as fp: rc = ctx._feedFile(fp) milter = ctx.getpriv() # self.assertTrue(milter.user == 'batman',"getsymval failed: "+ # "%s != %s"%(milter.user,'batman')) self.assertEquals(milter.user,'batman') self.assertTrue(milter.auth_type != 'batcomputer',"setsymlist failed") self.assertTrue(rc == Milter.ACCEPT) self.assertTrue(ctx._bodyreplaced,"Message body not replaced") fp = ctx._body with open('test/'+fname+".tstout","wb") as f: f.write(fp.getvalue()) #self.assertTrue(fp.getvalue() == open("test/virus1.out","r").read()) fp.seek(0) msg = mime.message_from_file(fp) s = msg.get_payload(1).get_payload() milter.log(s) ctx._close() def testDefang(self,fname='virus1'): milter = TestMilter() milter.setsymval('{auth_authen}','batman') milter.setsymval('{auth_type}','batcomputer') milter.setsymval('j','mailhost') rc = milter.connect() self.assertTrue(rc == Milter.CONTINUE) with self.zf.open(fname) as fp: rc = milter.feedFile(fp) self.assertTrue(milter.user == 'batman',"getsymval failed") # setsymlist not working in TestBase #self.assertTrue(milter.auth_type != 'batcomputer',"setsymlist failed") self.assertTrue(rc == Milter.ACCEPT) self.assertTrue(milter._bodyreplaced,"Message body not replaced") fp = milter._body with open('test/'+fname+".tstout","wb") as f: f.write(fp.getvalue()) #self.assertTrue(fp.getvalue() == open("test/virus1.out","r").read()) fp.seek(0) msg = mime.message_from_file(fp) s = msg.get_payload(1).get_payload() milter.log(s) milter.close() def testParse(self,fname='spam7'): milter = TestMilter() milter.connect('somehost') rc = milter.feedMsg(fname) self.assertTrue(rc == Milter.ACCEPT) self.assertFalse(milter._bodyreplaced,"Milter needlessly replaced body.") fp = milter._body with open('test/'+fname+".tstout","wb") as f: f.write(fp.getvalue()) milter.close() def testDefang2(self): milter = TestMilter() milter.connect('somehost') rc = milter.feedMsg('samp1') self.assertTrue(rc == Milter.ACCEPT) self.assertFalse(milter._bodyreplaced,"Milter needlessly replaced body.") with self.zf.open("virus3") as fp: rc = milter.feedFile(fp) self.assertTrue(rc == Milter.ACCEPT) self.assertTrue(milter._bodyreplaced,"Message body not replaced") fp = milter._body with open("test/virus3.tstout","wb") as f: f.write(fp.getvalue()) #self.assertTrue(fp.getvalue() == open("test/virus3.out","r").read()) with self.zf.open("virus6") as fp: rc = milter.feedFile(fp) self.assertTrue(rc == Milter.ACCEPT) self.assertTrue(milter._bodyreplaced,"Message body not replaced") self.assertTrue(milter._headerschanged,"Message headers not adjusted") fp = milter._body with open("test/virus6.tstout","wb") as f: f.write(fp.getvalue()) milter.close() def suite(): return unittest.makeSuite(BMSMilterTestCase,'test') if __name__ == '__main__': unittest.main()
gpl-2.0
4,102,225,718,785,301,500
33.189189
77
0.651383
false
3.227041
true
false
false
StuartGordonReid/Comp-Finance
Optimizers/Solution.py
1
1512
__author__ = 'Stuart Gordon Reid' __email__ = '[email protected]' __website__ = 'http://www.stuartreid.co.za' """ File description """ class Solution(object): solution = [] def __init__(self, solution, problem): """ Abstract initialization method for a solution to some optimization function :param solution: a numpy array (much faster than lists) """ self.solution = solution self.problem = problem return def __len__(self): """ Overload of the len operator for the Solution class :rtype : Sized? """ return len(self.solution) def update(self, solution): """ This method is used for updating a solution """ self.solution = solution def get(self): """ This method is used to retrieve the numpy array for direct manipulation """ return self.solution def evaluate(self): return self.problem.evaluate(self.solution) def __gt__(self, other): assert isinstance(other, Solution) if self.problem.optimization is "min": return self.evaluate() < other.evaluate() elif self.problem.optimization is "max": return self.evaluate() > other.evaluate() def deep_copy(self): copy = Solution(None, self.problem) copy.solution = [] for i in range(len(self.solution)): copy.solution.append(self.solution[i]) return copy
lgpl-3.0
2,597,317,269,718,818,000
25.526316
83
0.587963
false
4.295455
false
false
false
ModoUnreal/PyWeather
setup.py
1
99122
''' _______ | \ \ / @@@; | \ \ / `#....@ | | \ / ,;@.....;,; | | \ / @..@........@` PyWeather Setup | | \ / .............@ version 0.6.3 beta | / \ / .............@ (c) 2017-2018 - o355 |_______/ | @...........#` | | .+@@++++@#; | | @ ; , | | : ' . | | @ # .` | | @ # .` ''' # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import sys if sys.version_info < (3, 0, 0): print("You'll need Python 3 to run PyWeather.", "Press enter to exit.") input() sys.exit() elif (sys.version_info > (3, 0, 0) and sys.version_info < (3, 5, 0)): print("You have a Python version between 3.0 and 3.4.", "While PyWeather will work, you may experience a few quirks.", "Try updating to Python 3.6, as it works more reliably.", "Please take note of this in PyWeather.","", sep="\n") elif sys.version_info >= (3, 7, 0): print("You have a Python version of 3.7 and greater.", "Please note that PyWeather 0.6.2 beta is NOT certified to work with", "Python 3.7. Python 3.6 and below should work just fine.", sep="\n") import configparser import traceback import subprocess import logging import os import urllib # Now force the writing of the versioninfo file during setup, this should prevent issues # in the event I forget to gitignore the file. try: open('updater//versioninfo.txt', 'w').close() with open("updater//versioninfo.txt", 'a') as out: out.write("0.6.3 beta") out.close() except: print("Couldn't write the versioninfo file. This may cause issues with PyWeather down the road.") config = configparser.ConfigParser() config.read('storage//config.ini') def configprovision(): try: config.add_section("GEOCODER API") except configparser.DuplicateSectionError: print("Failed to add the Geocoder API section.") try: config.add_section("FAVORITE LOCATIONS") except configparser.DuplicateSectionError: print("Failed to add the favorite locations section.") try: config.add_section("PREVIOUS LOCATIONS") except configparser.DuplicateSectionError: print("Failed to add the previous locations section") try: config.add_section("HURRICANE") except configparser.DuplicateSectionError: print("Failed to add the hurricane section.") try: config.add_section("FIRSTINPUT") except configparser.DuplicateSectionError: print("Failed to add the firstinput section.") try: config.add_section('SUMMARY') except configparser.DuplicateSectionError: print("Failed to add the summary section.") try: config.add_section('VERBOSITY') except configparser.DuplicateSectionError: print("Failed to add the verbosity section.") try: config.add_section('TRACEBACK') except configparser.DuplicateSectionError: print("Failed to add the traceback section.") try: config.add_section('UI') except configparser.DuplicateSectionError: print("Failed to add the UI section.") try: config.add_section('PREFETCH') except configparser.DuplicateSectionError: print("Failed to add the prefetch section.") try: config.add_section('UPDATER') except configparser.DuplicateSectionError: print("Failed to add the updater section.") try: config.add_section('KEYBACKUP') except configparser.DuplicateSectionError: print("Failed to add the keybackup section.") try: config.add_section('PYWEATHER BOOT') except configparser.DuplicateSectionError: print("Failed to add the PyWeather Boot section.") try: config.add_section('USER') except configparser.DuplicateSectionError: print("Failed to add the user section.") try: config.add_section('CACHE') except configparser.DuplicateSectionError: print("Failed to add the cache section.") try: config.add_section('RADAR GUI') except configparser.DuplicateSectionError: print("Failed to add the Radar GUI section.") try: config.add_section('GEOCODER') except configparser.DuplicateSectionError: print("Failed to add the Geocoder section.") config['SUMMARY']['sundata_summary'] = 'False' config['SUMMARY']['almanac_summary'] = 'False' config['SUMMARY']['showalertsonsummary'] = 'True' config['SUMMARY']['showtideonsummary'] = 'False' config['SUMMARY']['showyesterdayonsummary'] = 'False' config['VERBOSITY']['verbosity'] = 'False' config['VERBOSITY']['json_verbosity'] = 'False' config['VERBOSITY']['setup_verbosity'] = 'False' config['VERBOSITY']['setup_jsonverbosity'] = 'False' config['VERBOSITY']['updater_verbosity'] = 'False' config['VERBOSITY']['updater_jsonverbosity'] = 'False' config['VERBOSITY']['keybackup_verbosity'] = 'False' config['VERBOSITY']['configdefault_verbosity'] = 'False' config['TRACEBACK']['tracebacks'] = 'False' config['TRACEBACK']['setup_tracebacks'] = 'False' config['TRACEBACK']['updater_tracebacks'] = 'False' config['TRACEBACK']['configdefault_tracebacks'] = 'False' config['UI']['show_entertocontinue'] = 'True' config['UI']['detailedinfoloops'] = '6' config['UI']['forecast_detailedinfoloops'] = '5' config['UI']['show_completediterations'] = 'False' config['UI']['alerts_usiterations'] = '1' config['UI']['alerts_euiterations'] = '2' config['UI']['extratools_enabled'] = 'False' config['PREFETCH']['10dayfetch_atboot'] = 'False' config['PREFETCH']['yesterdaydata_atboot'] = 'False' config['UPDATER']['autocheckforupdates'] = 'False' config['UPDATER']['show_updaterreleasetag'] = 'False' config['KEYBACKUP']['savedirectory'] = 'backup//' config['PYWEATHER BOOT']['validateapikey'] = 'True' config['UPDATER']['showReleaseNotes'] = 'True' config['UPDATER']['showReleaseNotes_uptodate'] = 'False' config['UPDATER']['showNewVersionReleaseDate'] = 'True' config['USER']['configprovisioned'] = 'True' config['CACHE']['enabled'] = 'True' config['CACHE']['alerts_cachedtime'] = '5' config['CACHE']['current_cachedtime'] = '10' config['CACHE']['threedayhourly_cachedtime'] = '60' config['CACHE']['tendayhourly_cachedtime'] = '60' config['CACHE']['forecast_cachedtime'] = '60' config['CACHE']['almanac_cachedtime'] = '240' config['CACHE']['sundata_cachedtime'] = '480' config['CACHE']['tide_cachedtime'] = '480' config['CACHE']['hurricane_cachedtime'] = '180' config['CACHE']['yesterday_cachedtime'] = '720' config['RADAR GUI']['radar_imagesize'] = 'normal' config['RADAR GUI']['bypassconfirmation'] = 'False' config['GEOCODER']['scheme'] = 'https' config['GEOCODER API']['customkey_enabled'] = 'False' config['GEOCODER API']['customkey'] = 'None' config['PREFETCH']['hurricanedata_atboot'] = 'False' config['FIRSTINPUT']['geoipservice_enabled'] = 'False' config['FIRSTINPUT']['allow_pwsqueries'] = 'True' config['HURRICANE']['enablenearestcity'] = 'False' config['HURRICANE']['enablenearestcity_forecast'] = 'False' config['HURRICANE']['api_username'] = 'pyweather_proj' config['HURRICANE']['nearestcitysize'] = 'medium' config['FAVORITE LOCATIONS']['enabled'] = 'True' config['FAVORITE LOCATIONS']['favloc1'] = 'None' config['FAVORITE LOCATIONS']['favloc2'] = 'None' config['FAVORITE LOCATIONS']['favloc3'] = 'None' config['FAVORITE LOCATIONS']['favloc4'] = 'None' config['FAVORITE LOCATIONS']['favloc5'] = 'None' config['FAVORITE LOCATIONS']['favloc1_data'] = 'None' config['FAVORITE LOCATIONS']['favloc2_data'] = 'None' config['FAVORITE LOCATIONS']['favloc3_data'] = 'None' config['FAVORITE LOCATIONS']['favloc4_data'] = 'None' config['FAVORITE LOCATIONS']['favloc5_data'] = 'None' config['PREVIOUS LOCATIONS']['enabled'] = 'True' config['PREVIOUS LOCATIONS']['prevloc1'] = 'None' config['PREVIOUS LOCATIONS']['prevloc2'] = 'None' config['PREVIOUS LOCATIONS']['prevloc3'] = 'None' config['PREVIOUS LOCATIONS']['prevloc4'] = 'None' config['PREVIOUS LOCATIONS']['prevloc5'] = 'None' config['PREVIOUS LOCATIONS']['prevloc1_data'] = 'None' config['PREVIOUS LOCATIONS']['prevloc2_data'] = 'None' config['PREVIOUS LOCATIONS']['prevloc3_data'] = 'None' config['PREVIOUS LOCATIONS']['prevloc4_data'] = 'None' config['PREVIOUS LOCATIONS']['prevloc5_data'] = 'None' try: with open('storage//config.ini', 'w') as configfile: config.write(configfile) except: print("Hmmf...an odd error occurred. A full traceback will be", "printed below. Please report this issue on GitHub", "(github.com/o355/pyweather), as that would be greatly appreciated", "for trying to fix the bug that you just encountered!", sep="\n") traceback.print_exc() # Giving users choice, unlike Microsoft. print("Would you like to continue using PyWeather with an unprovisioned config?", "It's highly recommended you don't continue, as you may encounter", "unexpected errors and issues with using PyWeather. Yes or No.", sep="\n") provisionfailed_continue = input("Input here: ").lower() if provisionfailed_continue == "yes": print("Continuing with PyWeather Setup. Please remember, you may encounter", "unexpected errors and issues. You can always retry provisioning your config", "by using the configsetup.py script in the storage folder.", sep="\n") elif provisionfailed_continue == "no": print("Stopping PyWeather Setup. You can retry to provision your config by using", "the configsetup.py script in the storage folder.", "Press enter to exit.", sep="\n") input() sys.exit() else: print("Couldn't understand your input. By default, PyWeather Setup is stopping.", "You can retry to provision your config by using the configsetup.py script", "in the storage folder. Press enter to exit.", sep="\n") input() sys.exit() # See if the config is "provisioned". If it isn't, a KeyError will occur, # because it's not created. Here, we set up the config to defaults if it's not # provisioned. try: configprovisioned = config.getboolean('USER', 'configprovisioned') except: print("Your config likely isn't provisioned. Would you like to provision your config?", "It's highly recommended you provision your config. If you decide not to,", "you may run into issues using PyWeather.", "Yes or No.", sep="\n") provisionconfig = input("Input here: ").lower() if provisionconfig == "yes": print("Provisioning your config.") configprovision() print("Config file provisioned successfully! Moving on with PyWeather setup...") elif provisionconfig == "no": print("Not provisioning your config. You may encounter unexpected errors", "and issues when using PyWeather, however.", sep="\n") else: print("Couldn't understand your input. By default, I'm going to provision", "your config. Beginning now...", sep="\n") configprovision() print("Config file provisioned successfully! Moving on with PyWeather setup...") try: verbosity = config.getboolean('VERBOSITY', 'setup_verbosity') jsonVerbosity = config.getboolean('VERBOSITY', 'setup_jsonverbosity') tracebacksEnabled = config.getboolean('TRACEBACK', 'setup_tracebacks') except: print("Couldn't load your config file. Make sure there aren't any typos", "in the config, and that the config file is accessible.", "Setting config variables to their defaults.", "Here's the full traceback, in case you need it.", sep="\n") traceback.print_exc() verbosity = False jsonVerbosity = False tracebacksEnabled = False def printException(): if tracebacksEnabled == True: print("Here's the full traceback (for error reporting):") traceback.print_exc() def printException_loggerwarn(): if verbosity == True: logger.warning("Oh snap! We ran into a non-critical error. Here's the traceback.") traceback.print_exc() logger = logging.getLogger(name='pyweather_setup_0.6.2beta') logger.setLevel(logging.DEBUG) logformat = '%(asctime)s | %(levelname)s | %(message)s' logging.basicConfig(format=logformat) if verbosity == True: logger.setLevel(logging.DEBUG) elif tracebacksEnabled == True: logger.setLevel(logging.ERROR) else: logger.setLevel(logging.CRITICAL) logger.debug("Listing configuration options:") logger.debug("verbosity: %s ; jsonVerbosity: %s" % (verbosity, jsonVerbosity)) logger.debug("tracebacksEnabled: %s" % tracebacksEnabled) print("Hi! Welcome to PyWeather 0.6.3 beta! Glad that you're here.", "I'm here to help set up PyWeather, and let you configure it to your liking.", "Let's begin!", sep="\n") import shutil import time import json import codecs buildnumber = 63 buildversion = "0.6.3 beta" logger.debug("buildnumber: %s ; buildversion: %s" % (buildnumber, buildversion)) print("","Before we get started, I want to confirm some permissions from you.", "Is it okay if I use 1-5 MB of data (downloading libraries), save a small", "text file called apikey.txt (under 2 KB), and automatically install Python", "libraries?", "Please input yes or no below:", sep="\n") confirmPermissions = input("Input here: ").lower() logger.debug("confirmPermissions: %s" % confirmPermissions) if confirmPermissions == "no": logger.debug("User denied permissions. Closing...") print("Okay! Closing now.", "Press enter to exit.", sep="\n") input() sys.exit() elif confirmPermissions != "yes": logger.debug("Couldn't understand. Closing...") print("I couldn't understand what you said.", "As a precaution, I won't proceed any further.", "Press enter to exit.", sep="\n") input() sys.exit() print("","Cool! Let's start.", "I'm going to start by checking for necessary libraries (to run PyWeather).", "This can take a moment, so please hold tight while I check!", sep="\n") try: import pip except ImportError: logger.warn("pip is NOT installed! Asking user for automated install...") printException_loggerwarn() print("","Shucks! PIP couldn't be imported, and I need PIP to install", "libraries for you. Would you like me to install PIP for you?", "Yes or No.", sep="\n") pipConfirm = input("Input here: ").lower() logger.debug("pipConfirm: %s" % pipConfirm) if pipConfirm == "no": logger.info("User denied PIP install, closing...") print("","Okay! I'm closing setup, as I need PIP to continue.", "Press enter to continue.", sep="\n") input() sys.exit() elif pipConfirm == "yes": logger.info("User allowed PIP install. Starting...") print("","Okay!", "I'll download PIP's installer, and run it.", "Doing such uses about 2-4 MB of data, and will quit PW setup.", "When the setup script finishes, you'll need to run the setup script again." "I'll start in a few seconds.", sep="\n") time.sleep(3) print("Downloading the installer...") # We use the built-in urllib library, as some Python installs don't include requests. try: with urllib.request.urlopen('https://bootstrap.pypa.io/get-pip.py') as update_response, open('get-pip.py', 'wb') as update_out_file: logger.debug("update_response: %s ; update_out_file: %s" % (update_response, update_out_file)) shutil.copyfileobj(update_response, update_out_file) except: print("Couldn't download the PIP installer, either due to no internet connection, or the library that fetches", "files has failed. As an alternative, you can download the installer yourself.", "Please download this file: 'https://bootstrap.pypa.io/get-pip.py', and place it in PyWeather's base directory.", "Afterwards, press enter to execute the installer. Press Control + C to exit.", sep="\n") printException() input() print("Running the installer...") logger.debug("Executing get-pip.py. If this script exits, please restart the setup script.") exec(open("get-pip.py").read()) else: logger.warn("Couldn't understand the input. Closing...") print("","I didn't understand what you said.", "As a precaution, I'm closing setup, as I need PIP to continue.", "Press enter to exit.", sep="\n") input() sys.exit() except PermissionError: traceback.print_exc() print("PIP has incorrect permissions on your machine. Please attempt to fix", "permissions on the folder that is listed in the traceback.", "Linux users: Use sudo chown -R <yourusername> <folder>, this should fix the issue.", "Press enter to exit.", sep="\n") input() sys.exit() print("Deleting the PIP installer file (if it exists)") try: os.remove("get-pip.py") except: printException_loggerwarn() print("The file get-pip.py didn't exist, or we had wrong permissions.") neededLibraries = 0 try: import colorama coloramaInstalled = True logger.info("Colorama is installed.") logger.debug("coloramaInstalled: %s" % coloramaInstalled) except ImportError: coloramaInstalled = False neededLibraries = neededLibraries + 1 logger.warn("Colorama is not installed.") printException_loggerwarn() logger.debug("coloramaInstalled: %s ; neededLibraries: %s" % (coloramaInstalled, neededLibraries)) try: import geopy geopyInstalled = True logger.info("geopy is installed.") logger.debug("geopyInstalled: %s" % geopyInstalled) except ImportError: geopyInstalled = False neededLibraries = neededLibraries + 1 logger.info("geopy is NOT installed.") printException_loggerwarn() logger.debug("geopyInstalled: %s ; neededLibraries: %s" % (geopyInstalled, neededLibraries)) try: from appJar import gui appjarInstalled = True logger.info("appjar is installed.") logger.debug("appjarInstalled: %s" % appjarInstalled) except ImportError as e: if e == "No module named '_tkinter', please install the python3-tk package": print("appJar cannot run on this platform. Skipping installation...") appjarInstalled = True logger.debug("appjarInstalled: %s" % appjarInstalled) else: appjarInstalled = False neededLibraries = neededLibraries + 1 logger.debug("appJar is NOT installed.") printException_loggerwarn() logger.debug("appjarInstalled: %s ; neededLibraries: %s" % (appjarInstalled, neededLibraries)) try: import requests requestsInstalled = True logger.debug("requests is installed.") logger.debug("requestsInstalled: %s" % requestsInstalled) except: requestsInstalled = False neededLibraries = neededLibraries + 1 logger.debug("requests is NOT installed.") printException_loggerwarn() logger.debug("requestsInstalled: %s ; neededLibraries: %s" % (requestsInstalled, neededLibraries)) try: import halo haloInstalled = True logger.debug("halo is installed.") logger.debug("haloInstalled: %s" % haloInstalled) except: haloInstalled = False neededLibraries += 1 logger.debug("halo is NOT installed.") printException_loggerwarn() logger.debug("haloInstalled: %s ; neededLibraries: %s" % (haloInstalled, neededLibraries)) print("All done!") if neededLibraries == 0: logger.debug("All libraries are installed.") print("All necessary libraries have been installed!") else: logger.debug("Libraries need to be installed.") print("Shucks. Not all necessary libraries are installed. Here's what needs to be installed:") if coloramaInstalled is False: print("- Colorama") if geopyInstalled is False: print("- Geopy") if appjarInstalled is False: print("- appJar") if requestsInstalled is False: print("- Requests") if haloInstalled is False: print("- Halo") print("If you want me to, I can automatically install these libraries.", "Would you like me to do such? Yes or No.", sep="\n") neededLibrariesConfirm = input("Input here: ").lower() logger.debug("neededLibrariesConfirm: %s" % neededLibrariesConfirm) if neededLibrariesConfirm == "no": logger.warning("Not installing necessary libraries. Now exiting...") print("Okay. I needed to install necessary libraries to continue.", "Now quitting...", "Press enter to exit.", sep="\n") input() sys.exit() elif neededLibrariesConfirm == "yes": print("Now installing necessary libraries...") if coloramaInstalled is False: print("Installing Colorama...") pip.main(['install', 'colorama']) if geopyInstalled is False: print("Installing geopy...") pip.main(['install', 'geopy']) if appjarInstalled is False: print("Installing appJar...") pip.main(['install', 'appJar']) if requestsInstalled is False: print("Installing requests...") pip.main(['install', 'requests']) if haloInstalled is False: print("Installing halo...") pip.main(['install', 'halo']) logger.info("Running the double check on libraries...") print("Sweet! All libraries should be installed.", "Just to confirm, I'm double checking if needed libraries are installed.", sep="\n") try: import colorama logger.info("Colorama installed successfully.") except ImportError: logger.warn("colorama was not installed successfully.") print("Hmm...Colorama didn't install properly.") printException() print("As a last resort, we can use sudo -H to install packages.", "Do you want to use the shell option to install colorama?", "WARNING: Using the last-resort method may screw up PIP, and", "may require you to reinstall PIP on your machine." "Yes or No.", sep="\n") colorama_lastresort = input("Input here: ").lower() logger.debug("colorama_lastresort: %s" % colorama_lastresort) if colorama_lastresort == "yes": try: print("Now executing `sudo -H pip3 install colorama`.", "Please enter the password for sudo when the prompt", "comes up. Press Control + C to cancel.", "Starting in 5 seconds...", sep="\n") time.sleep(5) try: subprocess.call(["sudo -H pip3 install colorama"], shell=True) try: print("Attempting to reimport colorama.") import colorama print("Colorama is FINALLY installed!") except: print("Colorama still wasn't successfully installed.", "Cannot continue without Colorama.", "Try doing a manual install of Colorama with PIP.", sep="\n") printException() print("Press enter to exit.") input() sys.exit() except: print("When running the command, an error occurred", "Try doing a manual install of Colorama with PIP.", sep="\n") printException() print("Press enter to exit.") input() sys.exit() except KeyboardInterrupt: print("Command execution aborted.", "Cannot continue without Colorama.", "Try and do a manual install of Colorama with PIP", "in a command line.", sep="\n") printException() print("Press enter to exit.") input() sys.exit() elif colorama_lastresort == "no": print("Not installing Colorama with a shell command.", "Cannot continue without Colorama.", "Press enter to exit.", sep="\n") input() sys.exit() else: print("Did not understand your input. Defaulting to not installing", "via the shell. Cannot continue without Colorama.", "Try installing Colorama with PIP.", "Press enter to exit.") input() sys.exit() try: import geopy logger.info("geopy installed successfully.") except ImportError: logger.warn("geopy was not installed successfully.") print("Hmm...geopy didn't install properly.") printException() print("As a last resort, we can use sudo -H to install packages.", "Do you want to use the shell option to install geopy?", "WARNING: Using the last-resort method may screw up PIP, and", "may require you to reinstall PIP on your machine." "Yes or No.", sep="\n") geopy_lastresort = input("Input here: ").lower() logger.debug("geopy_lastresort: %s" % geopy_lastresort) if geopy_lastresort == "yes": try: print("Now executing `sudo -H pip3 install geopy`.", "Please enter the password for sudo when the prompt", "comes up. Press Control + C to cancel.", "Starting in 5 seconds...", sep="\n") time.sleep(5) try: subprocess.call(["sudo -H pip3 install geopy"], shell=True) try: print("Attempting to reimport geopy.") import geopy print("Geopy is FINALLY installed!") except: print("Geopy still wasn't successfully installed.", "Cannot continue without geopy.", "Try doing a manual install of geopy with PIP.", sep="\n") printException() print("Press enter to exit.") input() sys.exit() except: print("When running the command, an error occurred", "Try doing a manual install of geopy with PIP.", sep="\n") printException() print("Press enter to exit.") input() sys.exit() except KeyboardInterrupt: print("Command execution aborted.", "Cannot continue without geopy.", "Try and do a manual install of geopy with PIP", "in a command line.", sep="\n") printException() print("Press enter to exit.") input() sys.exit() elif geopy_lastresort == "no": print("Not installing geopy with a shell command.", "Cannot continue without geopy.", "Press enter to exit.", sep="\n") input() sys.exit() else: print("Did not understand your input. Defaulting to not installing", "via the shell. Cannot continue without geopy.", "Try installing geopy with PIP.", "Press enter to exit.") input() sys.exit() # Why is appJar not here? When appJar is straight up imported in a non-GUI environment, it'll throw an error # even when it's installed. I don't check for an install because of this reason. try: import requests logger.info("requests installed successfully.") except ImportError: logger.warning("Requests was not installed successfully.") print("Hmm...requests didn't install properly.") printException() print("As a last resort, we can use sudo -H to install packages.", "Do you want to use the shell option to install requests?", "WARNING: Using the last-resort method may screw up PIP, and", "may require you to reinstall PIP on your machine." "Yes or No.", sep="\n") requests_lastresort = input("Input here: ").lower() logger.debug("requests_lastresort: %s" % requests_lastresort) if requests_lastresort == "yes": try: print("Now executing `sudo -H pip3 install requests`.", "Please enter the password for sudo when the prompt", "comes up. Press Control + C to cancel.", "Starting in 5 seconds...", sep="\n") time.sleep(5) try: subprocess.call(["sudo -H pip3 install requests"], shell=True) try: # Fun fact: This is inside THREE try/except things. print("Attempting to reimport requests.") import requests print("requests is FINALLY installed!") except: print("requests still wasn't successfully installed.", "Cannot continue without requests.", "Try doing a manual install of requests with PIP.", sep="\n") printException() print("Press enter to exit.") input() sys.exit() except: print("When running the command, an error occurred", "Try doing a manual install of requests with PIP.", sep="\n") printException() print("Press enter to exit.") input() sys.exit() except KeyboardInterrupt: print("Command execution aborted.", "Cannot continue without appJar.", "Try and do a manual install of requests with PIP", "in a command line.", sep="\n") printException() print("Press enter to exit.") input() sys.exit() elif requests_lastresort == "no": print("Not installing appJar with a shell command.", "Cannot continue without requests.", "Press enter to exit.", sep="\n") input() sys.exit() else: print("Did not understand your input. Defaulting to not installing", "via the shell. Cannot continue without requests.", "Try installing requests with PIP.", "Press enter to exit.") input() sys.exit() try: import halo logger.info("Halo installed successfully.") except ImportError: logger.warn("halo was not installed successfully.") print("Hmm...Halo didn't install properly.") printException() print("As a last resort, we can use sudo -H to install packages.", "Do you want to use the shell option to install halo?", "WARNING: Using the last-resort method may screw up PIP, and", "may require you to reinstall PIP on your machine." "Yes or No.", sep="\n") halo_lastresort = input("Input here: ").lower() logger.debug("halo_lastresort: %s" % halo_lastresort) if halo_lastresort == "yes": try: print("Now executing `sudo -H pip3 install halo`.", "Please enter the password for sudo when the prompt", "comes up. Press Control + C to cancel.", "Starting in 5 seconds...", sep="\n") time.sleep(5) try: subprocess.call(["sudo -H pip3 install halo"], shell=True) try: print("Attempting to reimport halo.") import colorama print("Halo is now installed!") except: print("Halo still wasn't successfully installed.", "Cannot continue without Halo.", "Try doing a manual install of Halo with PIP.", sep="\n") printException() print("Press enter to exit.") input() sys.exit() except: print("When running the command, an error occurred", "Try doing a manual install of Halo with PIP.", sep="\n") printException() print("Press enter to exit.") input() sys.exit() except KeyboardInterrupt: print("Command execution aborted.", "Cannot continue without Halo.", "Try and do a manual install of Halo with PIP", "in a command line.", sep="\n") printException() print("Press enter to exit.") input() sys.exit() elif halo_lastresort == "no": print("Not installing Halo with a shell command.", "Cannot continue without Halo.", "Press enter to exit.", sep="\n") input() sys.exit() else: print("Did not understand your input. Defaulting to not installing", "via the shell. Cannot continue without Halo.", "Try installing Halo with PIP.", "Press enter to exit.") input() sys.exit() print("","All libraries are installed!", sep="\n") else: logger.warn("Input was not understood. Closing...") print("Your input wasn't understood for if you wanted to automatically import libraries.", "As a precaution PyWeather Setup needs to now close. Press enter to exit.", sep="\n") input() sys.exit() # Previously this updated all your pip packages. I then did this on my NAS (on FreeNAS 11). # It broke my NAS! Woo hoo! print("", "Would you like PyWeather to automatically update it's required packages?", "Doing this is generally recommended, and will have benefits down the line when", "some libraries fix known issues that occur in PyWeather. Yes or No.", sep="\n") confirm_updatepip = input("Input here: ").lower() logger.debug("confirm_updatepip: %s" % confirm_updatepip) if confirm_updatepip == "yes": print("") print("Updating PIP packages.") totalpackages = 5 updatecount = 1 pip_requiredlibraries = ['requests', 'halo', 'appjar', 'colorama', 'geopy'] for pkgname in pip_requiredlibraries: print("Now updating package: %s (Update %s/%s)" % (pkgname, updatecount, totalpackages)) pip.main(['install', '--upgrade', '%s' % pkgname]) updatecount = updatecount + 1 elif confirm_updatepip == "no": print("Not updating PIP packages. You may run into issues with non-updated", "packages in future versions of PyWeather.") else: print("Input not understood, not updating PIP packages. You may run into", "issues with non-updated packages in future versions of PyWeather.") # Verbosity is not needed here. print("I'm now going to guide you through obtaining an API key.", "Please carefully read my detailed instructions, so you don't mess anything up.", sep="\n") print("","If you know how to acquire a Wunderground API key, or are resetting PyWeather,", "hit enter 14 times to get to the API key entry.", sep="\n") print("Let's begin.", "Start by opening a web browser, and going to https://www.wunderground.com/weather/api/.", "Press any key when you are done.", sep="\n") input() print("Next, click the 'Explore my options' button.", "Press any key when you are done.", sep="\n") input() print("Next, click the small button next to 'ANVIL PLAN'.", "After that, confirm that the total underneath the 'Purchase Key' button says", "'$0 USD per month'.", "If the total underneath the 'Purchase Key' button doesn't", "say '$0 USD per month, please ensure that the small button next to 'Developer'", "on the table in the middle of the screen is selected, and the total", "says '$0 USD per month'", "Press any key when you are done.", sep="\n") input() print("Next, click the 'Purchase Key' button.", "Press any key when you are done.", sep="\n") input() print("Next, input your email, and a password to sign up for a Weather", "Underground account.", "Be sure to select the checkbox next to 'I agree to the Terms of Service'", "It's best if you leave the checkbox next to 'I would like to receive WU", "updates via email' unchecked.", "Press any key when you are done and ready.", sep="\n") input() print("Next, press the 'Sign up for free' button.", "When the welcome window pops up, be sure to click the X button at the top right of the popup.", "When clicking the X, you should be redirected to wunderground.com.", "Press any key when you are done and ready.", sep="\n") input() print("Next, click 'My Profile' at the top right corner of the homepage.", "In the dropdown, click 'My Email & Text Alerts'", "Press any key when you are done and ready.", sep="\n") input() print("Next, next to your email listed on the page, click the 'Edit / Verify' button.", "After you click the button, click the 'Verify Email' button.", "Press any key when you are done and ready.", sep="\n") input() print("Next, check your email in which you signed up with.", "If you got a letter from Weather Underground, titled 'Daily Forecast", "Email Verification', open that letter, and click the link.", "If you didn't get the letter, wait a few minutes, and be sure to check your spam folder.", "Hint: If you followed this guide exactly, WU will not be sending you daily forecasts to your email.", "Press any key when you are done and ready.", sep="\n") input() print("Your email should be verified.", "Next, in your web browser, head back to https://www.wunderground.com/weather/api/.", "Then, click the 'Explore my Options' button, again.", "Press any key when you are done and ready.", sep="\n") input() print("Next, at the top of the page, make sure the button next to 'ANVIL PLAN'", "is selected.", "After that, confirm that the total underneath the 'Purchase Key' button says", "'$0 USD per month'", "If the total doesn't say that, in the pricing table, make sure the button", "next to 'Developer' is selected.", "Press any key when you are done and ready.", sep="\n") input() print("Next, click the 'Purchase Key' button, on top of your total (which", "should be $0 USD per month)", "Next, fill out the form, considering these tips:", "For the contact name/email, it's recommended you use your real name", "(first name last initial is fine).", "It's also recommended that you use your real email.", "For the project name, put in something generic, like 'to use a script that", "uses WU's API', or 'WU API test'. It's up to you.", "For the project website, put in something generic, like 'google.com', or", "some other site you feel like having as the project site.", "For the question 'Where will the API be used', answer Other.", "For the question 'Will the API be used for commercial use?', answer No.", "For the question 'Will the API be used for manufacturing mobile chip", "processing?', answer No.", "Answer yes if you somehow are manufacturing mobile chip processing. I doubt", "you are, however.", "For the country that you are based in, put your location.", "Before we move on, fill out these forms, and press any key when you are done " "and ready.", sep="\n") input() print("Next, for the brief description, put something like 'using an API key", "to use a script using Wunderground'.", "After that, check both boxes at the bottom of the page. Read the ToS if you", "feel like it.", "Finally, click 'Purchase Key'.", "You should land on a page that says 'Edit API Key'.", "Press any key when you are done and ready.", sep="\n") input() print("In the table to the left of the page, copy the text that's under Key ID.", "(Ctrl+C, right click)", "I'm now going to ask you to input the API key into the text entry below.", "The API key will be saved to storage/apikey.txt, so PyWeather can easily", "pull it up.", "Press any key when you are done and ready.", sep="\n") input() print("Please input your API key below.") apikey_input = input("Input here: ") logger.debug("apikey_input: %s" % apikey_input) print("", "Just to confirm, the API key you gave me was: " + apikey_input + ".", sep="\n") print("Please double check your input, and confirm in the dialogue below.") apikey_confirm = input("Is the API key right? Yes or no: ").lower() logger.debug("apikey_confirm: %s" % apikey_confirm) if apikey_confirm == "no": while True: logger.debug("User now re-entering key...") print("","Please input your API key below.", sep="\n") apikey_input = input("Input here: ") logger.debug("apikey_input: %s" % apikey_input) print("Just to confirm, the API key you gave me was: " + apikey_input + ".") apikey_confirm = input("Is the API key right? Yes or no: ").lower() if apikey_confirm == "yes": break elif apikey_confirm == "no": continue else: print("Couldn't understand your input.", "I'll assume the API key is correct, moving on.", sep="\n") print("Now saving your API key...") open('storage//apikey.txt', 'w').close() with open("storage//apikey.txt", 'a') as out: logger.debug("out: %s" % out) out.write(apikey_input) out.close() logger.debug("Performed ops: overwrite apikey.txt, out.write(apikey_input), out.close()") print("", "I can also back up your API key, in case you do something wrong.", sep="\n") # A future release should bring customization as to the storage location. print("Would you like me to save a backup? Yes or no.") backup_APIkey = input("Input here: ").lower() if backup_APIkey == "yes": print("","Where would you want me to backup the key to?", "This is a directory. If I wanted my key at directory/backkey.txt,", "You would enter 'directory'. The default directory is 'backup'.", sep="\n") # Doing a .lower() here to prevent case insensitiveness. backup_APIkeydirectory = input("Input here: ").lower() folder_argument = backup_APIkeydirectory + "//backkey.txt" backup_APIkeydirectory2 = backup_APIkeydirectory + "//" logger.debug("backup_APIkeydirectory: %s ; backup_APIkeydirectory2: %s" % (backup_APIkeydirectory, backup_APIkeydirectory2)) logger.debug("folder_argument: %s" % folder_argument) # These two variables will get reset if the directory is backup, or empty. if backup_APIkeydirectory == "backup" or backup_APIkeydirectory == "": print("Using the default directory of //backup.") folder_argument = "backup//backkey.txt" backup_APIkeydirectory2 = "backup//" logger.debug("folder_argument: %s ; backup_APIkeydirectory2: %s" % (folder_argument, backup_APIkeydirectory2)) elif backup_APIkeydirectory != "backup": try: os.mkdir(backup_APIkeydirectory2) except: printException_loggerwarn() print("Couldn't make the directory, does it exist?") # Overwrite the file, if it exists. open(folder_argument, 'w').close() open(folder_argument, 'a').write(apikey_input) open(folder_argument).close() config['KEYBACKUP']['savedirectory'] = backup_APIkeydirectory2 print("The API key was backed up successfully!") logger.debug("Performed 3 ops. Overwrite "+ folder_argument + "backkey.txt, write to backkey.txt" + ", and close backkey.txt.") print("", "Before we configure PyWeather, I'll now validate your API key.", sep="\n") # Do an infinite loop of validation of the API key, so the user can reenter the API key # if it was wrong. while True: apitest_URL = 'http://api.wunderground.com/api/' + apikey_input + '/conditions/q/NY/New_York.json' testreader = codecs.getreader("utf-8") logger.debug("apitest_URL: %s ; testreader: %s" % (apitest_URL, testreader)) try: testJSON = requests.get(apitest_URL) logger.debug("testJSON: %s" % testJSON) except: logger.warn("Couldn't connect to Wunderground's API! No internet?") print("When PyWeather Setup attempted to fetch the .json to validate your API key,", "it ran into an error. If you're on a network with a filter, make sure that", "'api.wunderground.com' is unblocked. Otherwise, make sure you have an internet", "connection.", sep="\n") printException() print("Press enter to exit.") input() sys.exit() test_json = json.loads(testJSON.text) if jsonVerbosity == True: logger.debug("test_json: %s" % test_json) try: test_conditions = str(test_json['current_observation']['temp_f']) logger.debug("test_conditions: %s" % test_conditions) print("Hurray! Your API key is valid and works.") break except: logger.warn("Error! Is the API key invalid?") print("When attempting to validate the API key that you entered/confirmed,", "PyWeather ran into an error. Would you like to reenter your API key to revalidate it?", "Please note, that this error might be caused by WU's API being down, or another cause.", "However, 90% of the time, this is due to a bad API key.", "Yes or No.", sep='\n') revalidateAPIkey = input("Input here: ").lower() if revalidateAPIkey == "yes": print("Enter in your API key below.") apikey_input = input("Input here: ") logger.debug("apikey_input: %s") print("Revalidating your API key...") continue elif revalidateAPIkey == "no": print("Not revalidating your API key. You'll need a valid API key to continue.", "Press enter to exit.", sep="\n") input() sys.exit() printException() print("Press enter to exit.") input() sys.exit() print("Let's configure PyWeather to your liking.") logger.debug("config: %s" % config) print("", "(1/42)","On the summary screen, would you like to show sunrise/sunset times?", "By default, this is disabled.", "Yes or No.", sep="\n") sundata_Summary = input("Input here: ").lower() logger.debug("sundata_Summary: %s" % sundata_Summary) if sundata_Summary == "yes": config['SUMMARY']['sundata_summary'] = 'True' print("Changes saved.") logger.debug("Sundata on the summary is now ENABLED.") elif sundata_Summary == "no": config['SUMMARY']['sundata_summary'] = 'False' print("Changes saved.") logger.debug("Sundata on the summary is now DISABLED.") else: print("Could not understand what you inputted.", "Defaulting to 'False'", sep="\n") config['SUMMARY']['sundata_summary'] = 'False' print("Changes saved.") logger.debug("Could not recognize input. Defaulting to DISABLED.") print("", "(2/42)","On the summary screen, would you like to show almanac data?", "By default, this is disabled.", "Yes or no:", sep="\n") almanacdata_Summary = input("Input here: ").lower() logger.debug("almanacdata_Summary: %s" % almanacdata_Summary) if almanacdata_Summary == "yes": config['SUMMARY']['almanac_summary'] = 'True' print("Changes saved.") logger.debug("Almanac on the summary is now ENABLED.") elif almanacdata_Summary == "no": config['SUMMARY']['almanac_summary'] = 'False' print("Changes saved.") logger.debug("Almanac on the summary is now DISABLED.") else: print("Could not understand what you inputted.", "Defaulting to 'False'", sep="\n") config['SUMMARY']['almanac_summary'] = 'False' print("Changes saved.") logger.debug("Could not recognize input. Defaulting to DISABLED.") print("", "(3/42)", "On the summary screen, would you like to show alerts data?", "By default, this is enabled. Please note, Wunderground", "only supports alert data in the US and EU at this time.", "Yes or No.", sep="\n") alertsdata_Summary = input("Input here: ").lower() logger.debug("alertsdata_Summary: %s" % alertsdata_Summary) if alertsdata_Summary == "yes": config['SUMMARY']['showalertsonsummary'] = 'True' print("Changes saved.") logger.debug("Alerts on the summary is now ENABLED.") elif alertsdata_Summary == "no": config['SUMMARY']['showalertsonsummary'] = 'False' print("Changes saved.") logger.debug("Alerts on the summary is now DISABLED.") else: print("Could not understand what you inputted.", "Defaulting to 'True'", sep="\n") config['SUMMARY']['showAlertsOnSummary'] = 'True' print("", "(4/42)","On boot, would you like PyWeather to check for updates?", "By default, this is disabled, due to a load time increase of ~2-5 seconds.", "Yes or No.", sep="\n") checkForUpdates = input("Input here: ").lower() logger.debug("checkForUpdates: %s" % checkForUpdates) if checkForUpdates == "yes": config['UPDATER']['autoCheckForUpdates'] = 'True' print("Changes saved.") logger.debug("Checking for updates on startup is ENABLED.") elif checkForUpdates == "no": config['UPDATER']['autoCheckForUpdates'] = 'False' print("Changes saved.") logger.debug("Checking for updates on startup is DISABLED.") else: print("Could not understand what you inputted.", "Defaulting to 'False'", sep="\n") config['UPDATER']['autoCheckForUpdates'] = 'False' print("Changes saved.") logger.debug("Could not recognize input. Defaulting to DISABLED.") print("", "(5/42)","When an error occurs, would you like PyWeather to show the full error?", "When enabled, you'll have easier access to the full error for reporting", "the bug on GitHub.", "By default, this is disabled, as errors look less pretty when enabled.", "Yes or no.", sep="\n") displayTracebacks = input("Input here: ").lower() logger.debug("displayTracebacks: %s" % displayTracebacks) if displayTracebacks == "yes": config['TRACEBACK']['tracebacks'] = 'True' config['TRACEBACK']['setup_tracebacks'] = 'True' config['TRACEBACK']['updater_tracebacks'] = 'True' config['TRACEBACK']['keybackup_tracebacks'] = 'True' config['TRACEBACK']['configdefault_tracebacks'] = 'True' print("Changes saved.") logger.debug("Printing tracebacks is ENABLED.") elif displayTracebacks == "no": config['TRACEBACK']['tracebacks'] = 'False' config['TRACEBACK']['setup_tracebacks'] = 'False' config['TRACEBACK']['updater_tracebacks'] = 'False' config['TRACEBACK']['keybackup_tracebacks'] = 'False' config['TRACEBACK']['configdefault_tracebacks'] = 'False' print("Changes saved.") logger.debug("Printing tracebacks is DISABLED.") else: print("Couldn't understand what you inputted.", "Defaulting to 'False'", sep="\n") config['TRACEBACK']['tracebacks'] = 'False' config['TRACEBACK']['setup_tracebacks'] = 'False' config['TRACEBACK']['updater_tracebacks'] = 'False' config['TRACEBACK']['keybackup_tracebacks'] = 'False' print("Changes saved.") logger.debug("Could not understand input. Defaulting to DISABLED.") print("", "(6/42)", "When booting PyWeather up initially, would you like PyWeather to", "fetch the 10-day hourly forecast, instead of the 3-day forecast?", "This is disabled by default. When enabled, initial loading times are", "increased. However, when you view the 10-day hourly forecast, you won't", "have to wait for it to load, and use another API call.", "Yes or No.", sep="\n") tenday_onboot = input("Input here: ").lower() if tenday_onboot == "yes": config['PREFETCH']['10dayfetch_atboot'] = 'True' print("Changes saved.") logger.debug("Fetching 10 day JSON at boot is ENABLED.") elif tenday_onboot == "no": config['PREFETCH']['10dayfetch_atboot'] = 'False' print("Changes saved.") logger.debug("Fetching 10 day JSON at boot is DISABLED.") else: print("Couldn't understand what you inputted.", "Defaulting to the default value 'False'", sep="\n") config['PREFETCH']['10dayfetch_atboot'] = 'False' print("Changes saved.") logger.debug("Could not understand input. Defaulting to DISABLED.") print("", "(7/42)", "When viewing detailed hourly, 10-day hourly, and historical hourly,", "detailed information, how many iterations should PyWeather go through", "before asking you to continue?", "By default, this is 6. An input above 10", "is not recommended.", sep="\n") detailedloops = input("Input here: ") try: detailedloops = int(detailedloops) detailedloops = str(detailedloops) config['UI']['detailedinfoloops'] = detailedloops print("Changes saved.") logger.debug("Detailed info iterations now %s." % detailedloops) except: print("Couldn't convert input into a number. Defaulting to '6'.") printException_loggerwarn() config['UI']['detailedinfoloops'] = '6' print("Changes saved.") logger.debug("Detailed info loops now 6.") print("", "(8/42)", "When viewing detailed 10-day forecast information, how many", "iterations should PyWeather go through, before asking you to", "continue?", "By default, this is 5. An input above 10 will not prompt", "the enter to continue prompt", sep="\n") detailedForecastLoops = input("Input here: ") try: detailedForecastLoops = int(detailedForecastLoops) detailedForecastLoops = str(detailedForecastLoops) config['UI']['forecast_detailedinfoloops'] = detailedForecastLoops print("Changes saved.") logger.debug("Detailed forecast info iterations now %s" % detailedForecastLoops) except: print("Couldn't convert input into a number. Defaulting to '5'.") printException_loggerwarn() config['UI']['forecast_detailedinfoloops'] = '5' print("Changes saved.") logger.debug("Detailed forecast info loops now 5.") print("", "(9/42)", "PyWeather has a caching system, in which if you're gone for some time", "data will automatically refresh. Would you like to turn this on?", "This is enabled by default. Yes or No.", sep="\n") enablecache = input("Input here: ").lower() if enablecache == "no": print("Cache will be disabled.") config['CACHE']['enabled'] = 'False' print("Changes saved.") else: config['CACHE']['enabled'] = 'True' print("You entered yes, or your input wasn't understood (yes is the default.)", "In the next few inputs, enter the time in minutes that PyWeather should keep", "certain types of data, before a data refresh is automatically requested.", "If you want to leave cache values to their defaults, press enter at any prompt.", sep="\n") print("", "(10/42)", "Please enter the cache time for alerts data in minutes (default = 5)", sep="\n") alertscachetime = input("Input here: ").lower() try: alertscachetime = float(alertscachetime) alertscachetime = str(alertscachetime) config['CACHE']['alerts_cachedtime'] = alertscachetime print("Changes saved.") logger.debug("Alerts cache time now %s minutes." % alertscachetime) except: print("", "Your input couldn't be converted into a number. Setting alerts", "cache time to it's default value of '5'.", sep="\n") config['CACHE']['alerts_cachedtime'] = '5' logger.debug("Alerts cache time now 5 minutes.") print("", "(11/42)", "Please enter the cache time for current data in minutes (default = 10)", sep="\n") currentcachetime = input("Input here: ").lower() try: currentcachetime = float(currentcachetime) currentcachetime = str(currentcachetime) config['CACHE']['current_cachedtime'] = currentcachetime print("Changes saved.") logger.debug("Current cache time now %s minutes." % alertscachetime) except: print("", "Your input couldn't be converted into a number. Setting current", "cache time to it's default value of '10'.", sep="\n") config['CACHE']['current_cachedtime'] = '10' logger.debug("Current cache time now 10 minutes.") print("", "(12/42)", "Please enter the cache time for forecast data in minutes (default = 60)", sep="\n") forecastcachetime = input("Input here: ").lower() try: forecastcachetime = float(forecastcachetime) forecastcachetime = str(forecastcachetime) config['CACHE']['forecast_cachedtime'] = forecastcachetime print("Changes saved.") logger.debug("Forecast cache time now %s minutes." % forecastcachetime) except: print("", "Your input couldn't be converted into a number. Setting forecast", "cache time to it's default value of '60'.", sep="\n") config['CACHE']['forecast_cachedtime'] = '60' logger.debug("Forecast cache time now 60 minutes.") print("", "(13/42)", "Please enter the cache time for almanac data in minutes (default = 240)", sep="\n") almanaccachetime = input("Input here: ").lower() try: almanaccachetime = float(almanaccachetime) almanaccachetime = str(almanaccachetime) config['CACHE']['almanac_cachedtime'] = almanaccachetime print("Changes saved.") logger.debug("Almanac cache time now %s minutes." % almanaccachetime) except: print("", "Your input couldn't be converted into a number. Setting almanac", "cache time to it's default value of '240'.", sep="\n") config['CACHE']['almanac_cachedtime'] = '240' logger.debug("Almanac cache time now 240 minutes.") print("", "(14/42)", "Please enter the cache time for 1.5 day hourly data in minutes (default = 60)", sep="\n") threedayhourly_cachedtime = input("Input here: ").lower() try: threedayhourly = float(threedayhourly_cachedtime) threedayhourly = str(threedayhourly_cachedtime) config['CACHE']['threedayhourly_cachedtime'] = threedayhourly_cachedtime print("Changes saved.") logger.debug("3 day hourly cache time now %s minutes." % threedayhourly_cachedtime) except: print("", "Your input couldn't be converted into a number. Setting three day hourly", "cache time to it's default value of '60'.", sep="\n") config['CACHE']['threedayhourly_cachedtime'] = "60" logger.debug("3 day hourly cache time now 60 minutes") print("", "(15/42)", "Please enter the cache time for the ten day hourly data in minutes (default = 60)", sep="\n") tendayhourly_cachedtime = input("Input here: ").lower() try: tendayhourly = float(tendayhourly_cachedtime) tendayhourly = str(tendayhourly_cachedtime) config['CACHE']['tendayhourly_cachedtime'] = tendayhourly_cachedtime print("Changes saved.") logger.debug("10 day hourly cache time now %s minutes." % tendayhourly_cachedtime) except: print("", "Your input couldn't be converted into a number. Setting ten day hourly", "cache time to it's default value of '60'.", sep="\n") config['CACHE']['tendayhourly_cachedtime'] = "60" logger.debug("10 day hourly cache time now 60 minutes") print("", "(16/42)", "Please enter the cache time for sun data in minutes (default = 480)", sep="\n") sundatacachetime = input("Input here: ").lower() try: sundatacachetime = float(sundatacachetime) sundatacachetime = str(sundatacachetime) config['CACHE']['sundata_cachedtime'] = forecastcachetime print("Changes saved.") logger.debug("Sun data cache time now %s minutes." % sundatacachetime) except: print("", "Your input couldn't be converted into a number. Setting sun data", "cache time to it's default value of '480'.", sep="\n") config['CACHE']['sundata_cachedtime'] = '480' logger.debug("Sun data cache time now 480 minutes.") print("", "(17/42)", "Please enter the cache time for tide data in minutes (default = 480)", sep="\n") tidecachetime = input("Input here: ").lower() try: tidecachetime = float(tidecachetime) tidecachetime = str(tidecachetime) config['CACHE']['tide_cachedtime'] = tidecachetime print("Changes saved.") logger.debug("Tide cache time now %s minutes." % tidecachetime) except: print("", "Your input couldn't be converted into a number. Setting tide data", "cache time to it's default value of '480'.", sep="\n") config['CACHE']['tide_cachedtime'] = '480' logger.debug("Tide data cache time now 480 minutes.") print("", "(18/42)", "Please enter the cache time for hurricane data in minutes (default = 480)", sep="\n") hurricanecachetime = input("Input here: ").lower() try: hurricanecachetime = float(hurricanecachetime) hurricanecachetime = str(hurricanecachetime) config['CACHE']['hurricane_cachedtime'] = hurricanecachetime print("Changes saved.") logger.debug("Hurricane cache time now %s minutes" % hurricanecachetime) except: print("", "Your input couldn't be converted into a number. Setting hurricane data", "cache time to it's default value of '180'.", sep="\n") config['CACHE']['hurricane_cachedtime'] = '180' logger.debug("Hurricane data cache time now 180 minutes.") print("", "(19/42)", "Please enter the cache time for yesterday's weather data in minutes (default = 720)", sep="\n") yesterdaycachedtime = input("Input here: ").lower() try: yesterdaycachedtime = float(yesterdaycachedtime) yesterdaycachedtime = str(yesterdaycachedtime) config['CACHE']['yesterday_cachedtime'] = yesterdaycachedtime print("Changes saved.") logger.debug("Yesterday cache time now %s minutess" % yesterdaycachedtime) except: print("", "Your input couldn't be converted into a number. Setting yesterday's weather data", "cache time to it's default value of 720.", sep="\n") config['CACHE']['yesterday_cachedtime'] = '720' logger.debug("Yesterday data cache time now 720 minutes.") print("", "(20/42)", "When viewing detailed EU alerts information, how many", "iterations should PyWeather go through, before asking you to", "continue?", "By default, this is 2.", sep="\n") EUalertsloops = input("Input here: ") try: EUalertsloops = int(EUalertsloops) EUalertsloops = str(EUalertsloops) config['UI']['alerts_EUiterations'] = EUalertsloops print("Changes saved.") logger.debug("Detailed EU alert iterations now %s" % EUalertsloops) except: print("Couldn't convert input into a number. Defaulting to '2'.") printException_loggerwarn() config['UI']['alerts_EUiterations'] = '2' print("Changes saved.") logger.debug("Detailed EU alert iterations now 2.") print("", "(21/42)", "When viewing detailed US alerts information, how many", "iterations should PyWeather go through, before asking you to", "continue?", "By default, this is 1.", sep="\n") USalertsloops = input("Input here: ") try: USalertsloops = int(USalertsloops) USalertsloops = str(USalertsloops) config['UI']['alerts_USiterations'] = USalertsloops print("Changes saved.") logger.debug("Detailed US alert iterations now %s" % USalertsloops) except: print("Couldn't convert input to a number. Defaulting to '1'.") printException_loggerwarn() config['UI']['alerts_USiterations'] = '1' print("Changes saved.") logger.debug("Detailed US alert iterations now 1.") print("", "(22/42)","When PyWeather is going through detailed information, it can show", "how many iterations are completed.", "By default, this is disabled.", "Yes or No.", sep="\n") showIterations = input("Input here: ").lower() if showIterations == "yes": config['UI']['show_completediterations'] = 'True' print("Changes saved.") logger.debug("Showing completed iterations is ENABLED.") elif showIterations == "no": config['UI']['show_completediterations'] = 'False' print("Changes saved.") logger.debug("Showing completed iterations is DISABLED.") else: print("Couldn't understand what you inputted.", "Defaulting to 'FALSE'.", sep="\n") config['UI']['show_completediterations'] = 'False' print("Changes saved.") logger.debug("Could not understand input. Defaulting to DISABLED.") print("", "(23/42)", "When PyWeather is going through detailed information, would", "you like the 'Enter to Continue' prompts to pop up?", "By default, this is enabled.", "Yes or No.", sep="\n") showEnterToContinue = input("Input here: ").lower() if showEnterToContinue == "yes": config['UI']['show_entertocontinue'] = 'True' print("Changes saved.") logger.debug("Showing enter to continue prompts is ENABLED.") elif showEnterToContinue == "no": config['UI']['show_entertocontinue'] = 'False' print("Changes saved.") logger.debug("Showing enter to continue prompts is DISABLED.") else: print("Could not understand what you inputted.", "Defaulting to 'True'.", sep="\n") config['UI']['show_entertocontinue'] = 'True' print("Changes saved.") logger.debug("Could not understand input. Defaulting to ENABLED.") print("", "(24/42)", "In the PyWeather Updater, the updater can show the release tag", "associated with the latest release. Helpful for those using Git to", "update PyWeather. By default, this is disabled.", "Yes or No.", sep="\n") showReleaseTag = input("Input here: ").lower() if showReleaseTag == "yes": config['UPDATER']['show_updaterreleasetag'] = 'True' print("Changes saved.") logger.debug("Showing release tag in updater is ENABLED.") elif showReleaseTag == "no": config['UPDATER']['show_updaterreleasetag'] = 'False' print("Changes saved.") logger.debug("Showing release tag in updater is DISABLED.") else: print("Could not understand what you inputted.", "Defaulting to 'False'.", sep="\n") config['UPDATER']['show_updaterreleasetag'] = 'False' print("Changes saved.") logger.debug("Could not understand input. Defaulting to DISABLED.") print("", "(25/42)", "When PyWeather boots, it can validate your API key. If PyWeather", "finds your primary API key is invalid, it'll attempt to validate your", "backup key, and load that if it's validated successfully.", "By default, this is enabled, as it's well worth the 1 API call to make", "sure your key is valid. However, if you said 'Yes' to almanac/sun data", "on the summary screen, you might not want to enable this.", "Yes or No.", sep="\n") validateKeyOnBoot = input("Input here: ").lower() if validateKeyOnBoot == "yes": config['PYWEATHER BOOT']['validateAPIKey'] = 'True' print("Changes saved.") logger.debug("Validating API key on boot is ENABLED.") elif validateKeyOnBoot == "no": config['PYWEATHER BOOT']['validateAPIKey'] = 'False' print("Changes saved.") logger.debug("Validating API key on boot is DISABLED.") else: print("Could not understand what you inputted.", "Defaulting to 'True'.", sep="\n") config['PYWEATHER BOOT']['validateAPIKey'] = 'False' logger.debug("Could not understand input. Defaulting to ENABLED.") print("", "(26/42)", "PyWeather now has a radar feature, which opens up a GUI on supported", "platforms. Depending on your screen resolution, you'll have to set how large", "the radar picture is when rendered. In the prompt below, enter one of five sizes.", "extrasmall - 320x240 window", "small - 480x320 window", "normal - 640x480 window", "large - 960x720 window", "extralarge - 1280x960 window", "By default, the resolution is normal. Adapt your choice to the screen resolution", "of the machine you're using.", sep="\n") radar_resolutions = ["extrasmall", "small", "normal", "large", "extralarge"] logger.debug("radar_resolutions: %s" % radar_resolutions) radar_resolutioninput = input("Input here: ").lower() for x in range(0, 5): if radar_resolutioninput == radar_resolutions[x]: logger.debug("Resolution input matched, end result: %s" % radar_resolutions[x]) config['RADAR GUI']['radar_imagesize'] = radar_resolutions[x] print("Changes saved.") break # This works by design. If x = 4 (extralarge), the if would catch first. elif x == 5: print("Could not understand what you inputted. Defaulting to 'normal'.") config['RADAR GUI']['radar_imagesize'] = 'normal' print("Changes saved.") print("", "(27/42)", "PyWeather's radar feature is unfortunately experimental as of PyWeather 0.6.3 beta.", "By default, a confirmation message will always appear when attempting to launch the radar.", "However, this can be turned off, if you plan to use the experimental radar on a regular basis.", "By default, bypassing the confirmation message is disabled. Yes or No.", sep="\n") radar_bypassconfinput = input("Input here: ").lower() logger.debug("radar_bypassconfinput: %s" % radar_bypassconfinput) if radar_bypassconfinput == "yes": config['RADAR GUI']['bypassconfirmation'] = 'True' logger.debug("RADAR GUI/bypassconfirmation is now TRUE") print("Changes saved.") elif radar_bypassconfinput == "no": config['RADAR GUI']['bypassconfirmation'] = 'False' logger.debug("RADAR GUI/bypassconfirmation is now FALSE") print("Changes saved.") else: print("Could not understand what you inputted. Defaulting to 'False'.") config['RADAR GUI']['bypassconfirmation'] = 'False' logger.debug("RADAR GUI/bypassconfirmation is now FALSE") print("Changes saved.") print("", "(28/42)", "On the summary screen, would you like tide data to be shown?", "This uses an extra API call when enabled. By default, this is disabled.", "Yes or No.", sep="\n") tideonsummary = input("Input here: ").lower() logger.debug("tideonsummary: %s" % tideonsummary) if tideonsummary == "yes": config['SUMMARY']['showtideonsummary'] = "True" logger.debug("SUMMARY/showtideonsummary is now TRUE") print("Changes saved.") elif tideonsummary == "no": config['SUMMARY']['showtideonsummary'] = "False" logger.debug("SUMMARY/showtideonsummary is now FALSE") print("Changes saved.") else: print("Could not understand what you inputted. Defaulting to 'False'.") config['SUMMARY']['showtideonsummary'] = "False" logger.debug("SUMMARY/showtideonsummary is now FALSE") print("Changes saved.") print("", "(29/42)", "When PyWeather boots, would you like hurricane data to be fetched?", "Initial loading times will increase when this is on, but hurricane data will load faster.", "This can use an extra API call, especially when you fetch hurricane data but don't check it", "in PyWeather. By default, this is disabled.", "Yes or No.", sep="\n") hurricaneprefetch = input("Input here: ").lower() logger.debug("hurricaneprefetch: %s" % hurricaneprefetch) if hurricaneprefetch == "yes": config['PREFETCH']['hurricanedata_atboot'] = 'True' logger.debug("PREFETCH/hurricanedata_atbooot is now TRUE.") print("Changes saved.") elif hurricaneprefetch == "no": config['PREFETCH']['hurricanedata_atboot'] = 'False' logger.debug("PREFETCH/hurricanedata_atboot is now FALSE.") print("Changes saved.") else: print("Could not understand what you inputted. Defaulting to 'False'.") config['PREFETCH']['hurricanedata_atboot'] = 'False' logger.debug("PREFETCH/hurricanedata_atboot is now FALSE.") print("Changes saved.") print("", "(30/42)", "PyWeather has a new feature where you can now easily call your current location at boot.", "The current location feature allows you to enter 'currentlocation' at boot, and view the weather for your", "approximate location. However, GeoIP lookups might be inaccurate, especially for mobile users. The GeoIP service", "uses freegeoip.net. Would you like to enable this service? By default, this is disabled. Yes or No.", sep="\n") allowgeoipservice = input("Input here: ").lower() logger.debug("allowgeoipservice: %s" % allowgeoipservice) if allowgeoipservice == "yes": config['FIRSTINPUT']['geoipservice_enabled'] = 'True' logger.debug("FIRSTINPUT/geoipservice_enabled is now TRUE.") print("Changes saved.") elif allowgeoipservice == "no": config['FIRSTINPUT']['geoipservice_enabled'] = 'False' logger.debug("FIRSTINPUT/geoipservice_enabled is now FALSE.") else: print("Could not understand what you inputted. Defaulting to 'False'.") config['FIRSTINPUT']['geoipservice_enabled'] = 'False' logger.debug("FIRSTINPUT/geoipservice_enabled is now FALSE.") print("Changes saved.") print("", "(31/42)", "PyWeather has a new feature where you can query indivdiual Wunderground PWS stations.", "You can query any PWS globally by entering pws:<pws ID> when enabled, and where <pws ID> is the ID of the", "PWS you want to query. However, this can be turned off if you don't want to have extra lines of text at boot,", "or don't want the ability to query PWSes. By default, this is enabled. Yes or No.", sep="\n") allowpwsqueries = input("Input here: ").lower() logger.debug("allowpwsqueries: %s" % allowpwsqueries) if allowpwsqueries == "yes": config['FIRSTINPUT']['allow_pwsqueries'] = 'True' logger.debug("FIRSTINPUT/allow_pwsqueries is now TRUE.") print("Changes saved.") elif allowpwsqueries == "no": config['FIRSTINPUT']['allow_pwsqueries'] = 'False' logger.debug("FIRSTINPUT/allow_pwsqueries is now FALSE.") print("Changes saved.") else: print("Could not understand what you inputted. Defaulting to 'True'.") config['FIRSTINPUT']['allow_pwsqueries'] = 'True' logger.debug("FIRSTINPUT/allow_pwsqueries is now TRUE.") print("Changes saved.") print("", "(32/42)", "PyWeather has a new feature where in hurricane data, you can see the nearest city that a hurricane is to.", "However, this feature uses a separate API (geonames.org), can only work when the hurricane is within 300km of a city,", "and will drastically increase loading times. You may also run into issues with the default API key hitting rate limits.", "Despite all of this, would you like to enable the nearest city features for non-forecast hurricane data?", "Yes or No. By default, this is disabled.", sep="\n") allownearestcities = input("Input here: ").lower() logger.debug("allownearestcities: %s" % allownearestcities) if allownearestcities == "yes": additional_ncoptions = True logger.debug("additional_ncoptions: %s" % additional_ncoptions) config['HURRICANE']['enablenearestcity'] = 'True' logger.debug("HURRICANE/enablenearestcity is now TRUE.") print("Changes saved.") elif allownearestcities == "no": additional_ncoptions = False logger.debug("additional_ncoptions: %s" % additional_ncoptions) config['HURRICANE']['enablenearestcity'] = 'False' logger.debug("HURRICANE/enablenearestcity is now FALSE.") print("Changes saved.") else: additional_ncoptions = False logger.debug("additional_ncoptions: %s" % additional_ncoptions) print("Could not understand what you inputted. Defaulting to 'False'.") config['HURRICANE']['enablenearestcity'] = 'False' logger.debug("HURRICANE/enablenearestcity is now FALSE.") print("Changes saved.") # <--- Additional options for nearest city feature ---> if additional_ncoptions is True: print("", "(33/42)", "By default, the nearest city feature is only enabled on the current data screen of hurricane data.", "You can enable the nearest city feature to be enabled on forecast data. However, loading hurricane data becomes much", "slower. By default, this is disabled. Yes or No.", sep="\n") enable_ncforecast = input("Input here: ").lower() if enable_ncforecast == "yes": config['HURRICANE']['enablenearestcity_forecast'] = 'True' logger.debug("HURRICANE/enablenearestcity_forecast is now TRUE.") print("Changes saved.") elif enable_ncforecast == "no": config['HURRICANE']['enablenearestcity_forecast'] = 'False' logger.debug("HURRICANE/enablenearestcity_forecast is now FALSE.") print("Changes saved.") else: print("Could not understand your input. Defaulting to 'False'.") config['HURRICANE']['enablenearestcity_forecast'] = 'False' logger.debug("HURRICANE/enablenearestcity_forecast is now FALSE.") print("Changes saved.") print("", "(34/42)", "By default, PyWeather uses it's own API username for the nearest city features, which should be able to", "handle PyWeather's user demands just fine. However, if you'd like to use your own account for the API, you may.", "You can sign up at geonames.org, and follow all the steps. The confirmation letter may take some time to hit your inbox.", "Would you like to define your own API username? Yes or No. By default, this is no.", sep="\n") definegeonamesusername = input("Input here: ").lower() logger.debug("definegeonamesusername: %s" % definegeonamesusername) if definegeonamesusername == "yes": # Enter into confirmation loop while True: print("Please enter the username that you'll use to access the geonames API.") geonamesusername = input("Input here: ").lower() logger.debug("geonamesusername: %s" % geonamesusername) print("The API username you gave me was: %s" % geonamesusername, "Is this the username that you'd like to use? Yes or No.", "Please note that your username will not be validated.", sep="\n") geonamesconfirmation = input("Input here: ").lower() confirmurl = 'http://api.geonames.org/findNearbyPlaceNameJSON?lat=19.3&lng=102.2&username= ' + geonamesusername + '&radius=300&maxRows=1&cities=cities5000' logger.debug("geonamesconfirmation: %s ; confirmurl: %s" % (geonamesconfirmation, confirmurl)) if geonamesconfirmation == "yes": config['HURRICANE']['api_username'] = geonamesusername logger.debug("HURRICANE/api_username is now %s" % geonamesusername) print("Changes saved.") elif geonamesconfirmation == "no": continue else: print("Input not understood. Will not validate username. If the username is", "invalid, please change the HURRICANE/api_username option in the config.", sep="\n") config['HURRICANE']['api_username'] = geonamesusername logger.debug("HURRICANE/api_username is now %s" % geonamesusername) print("Changes saved.") elif definegeonamesusername == "no": print("Defaulting to the default username for the geonames API.") else: print("Input not understood.", "Defaulting to the default username for the geonames API.", sep="\n") print("", "(35/42)", "For the nearest city feature, you can define how large a city has to be to show up as a nearest city.", "You have three options for this. 'small' will set the threshold to cities with a 1,000 population and greater, but this", "tends to include cities with very few or no people. 'medium' will set the threshold to cities with a 5,000 population", "and greater, and 'large' for cities that have a population of 10,000 or greater. Please enter either 'small', 'medium'", "or 'large' below. Default is 'medium'.", sep="\n") nearestcitysize = input("Input here: ").lower() logger.debug("nearestcitysize: %s" % nearestcitysize) if nearestcitysize == "small": config['HURRICANE']['nearestcitysize'] = 'small' logger.debug("HURRICANE/nearestcitysize is now 'small'.") print("Changes saved.") elif nearestcitysize == "medium": config['HURRICANE']['nearestcitysize'] = 'medium' logger.debug("HURRICANE/nearestcitysize is now 'medium'") print("Changes saved.") else: print("Could not understand your input. Defaulting to 'medium'.") config['HURRICANE']['nearestcitysize'] = 'medium' logger.debug("HURRICANE/nearestcitysize is now 'medium'.") print("Changes saved.") print("", "(36/42)", "PyWeather will now let you enable a favorite locations feature, which allows", "you to quickly call up to 5 locations in PyWeather. You have the ability to configure your", "favorite locations in a menu option in PyWeather. By default, this feature is enabled.", "Yes or No.", sep="\n") enable_favoritelocations = input("Input here: ").lower() logger.debug("enable_favoritelocations: %s" % enable_favoritelocations) if enable_favoritelocations == "yes": config['FAVORITE LOCATIONS']['enabled'] = 'True' logger.debug("FAVORITE LOCATIONS/enabled is now 'True'.") print("Changes saved!") elif enable_favoritelocations == "no": config['FAVORITE LOCATIONS']['enabled'] = 'False' logger.debug("FAVORITE LOCATIONS/enabled is now 'False'.") print("Changes saved!") else: print("Could not understand your input. Defaulting to 'True'.") config['FAVORITE LOCATIONS']['enabled'] = 'True' logger.debug("FAVORITE LOCATIONS/enabled is now 'True'.") print("Changes saved!") print("", "(37/43)", "PyWeather can now store your previously searched locations.", "You have the ability to configure your previous locations in a menu option", "in PyWeather. By default this feature is enabled.", "Yes or No.", sep="\n") enable_previouslocations = input("Input here: ").lower() logger.debug("enable_previouslocations: %s" % enable_previouslocations) if enable_previouslocations == "yes": config['PREVIOUS LOCATIONS']['enabled'] = 'True' logger.debug("PREVIOUS LOCATIONS/enabled is now 'True'.") print("Changes saved!") elif enable_previouslocations == "no": config['PREVIOUS LOCATIONS']['enabled'] = 'False' logger.debug("PREVIOUS LOCATIONS/enabled is now 'False'.") print("Changes saved.") else: print("Could not understand your input. Defaulting to 'True'.") config['PREVIOUS LOCATIONS']['enabled'] = 'True' logger.debug("PREVIOUS LOCATIONS/enabled is now 'True'.") print("", "(37/42)", "PyWeather by default uses Google's geocoder, which can occasionally have rate limiting issues.", "To get around this, you can manually use your own API key that you sign up for with Google. This is completely", "optional, and you can continue past this step and not impede PyWeather's functionality. However, would you like", "to enable the use of a custom API key for the geocoder? Yes or No.", sep="\n") enablecustomgeocoderkey = input("Input here: ").lower() logger.debug("enablecustomgeocoderkey: %s" % enablecustomgeocoderkey) if enablecustomgeocoderkey == "yes": print("", "(38/42)", "To sign up for a Google Maps API key, please visit this link: ", "https://developers.google.com/maps/documentation/javascript/get-api-key", "Press the button 'Get Key', and wait a minute. Copy and paste the key into the input", "below. Your API key will NOT be validated. Enter 'exit' to exit this process, and to disable", "a custom API key.", sep="\n") customgeocoderkey = input("Input here: ") logger.debug("customgeocoderkey: %s" % customgeocoderkey) while True: print("", "The API key you entered is: %s" % customgeocoderkey, "Is this the API key you want to use? Yes or No.", sep="\n") confirmcustomgeocoderkey = input("Input here: ").lower() logger.debug("confirmcustomgeocoderkey: %s" % confirmcustomgeocoderkey) if confirmcustomgeocoderkey == "yes": break else: if confirmcustomgeocoderkey != "no": print("Couldn't understand your input. Please input your API key again.") print("Please enter the API key you want to use below.") customgeocoderkey = input("Input here: ") logger.debug("customgeocoderkey: %s" % customgeocoderkey) if customgeocoderkey == "exit": print("Exiting the custom geocoder key process, and disabling a custom geocoder key.") config['GEOCODER API']['customkey_enabled'] = 'False' logger.debug("GEOCODER API/customkey_enabled is now FALSE.") print("Changes saved.") else: config['GEOCODER API']['customkey_enabled'] = 'True' config['GEOCODER API']['customkey'] = str(customgeocoderkey) logger.debug("GEOCODER API/customkey_enabled is now TRUE.") print("Changes saved.") elif enablecustomgeocoderkey == "no": config['GEOCODER API']['customkey_enabled'] = 'False' logger.debug("GEOCODER API/customkey_enabled is now FALSE.") print("Changes saved.") else: print("Your input could not be understood. Defaulting to 'False'.") config['GEOCODER API']['customkey_enabled'] = 'False' logger.debug("GEOCODER API/customkey_enabled is now FALSE.") print("Changes saved.") print("", "(39/42)", "On the summary screen, you can now view a summary of the weather that occurred yesterday.", "Enabling this will also enable the option to prefetch yesterday's weather at boot in the config file.", "Please note that enabling this uses 1 extra API call at boot, and will increase PyWeather's loading time.", "Would you like to turn on showing yesterday's weather on the summary screen? Yes or No. By default, this is", "disabled.", sep="\n") showyesterdayonsummary = input("Input here: ").lower() logger.debug("showyesterdayonsummary: %s" % showyesterdayonsummary) if showyesterdayonsummary == "yes": config['SUMMARY']['showyesterdayonsummary'] = 'True' logger.info("SUMMARY/showyesterdayonsummary is now 'True'.") config['PREFETCH']['yesterdaydata_atboot'] = 'True' logger.info("PREFETCH/yesterdaydata_atboot is now 'True'.") showyesterdayonsummary = True logger.debug("showyesterdayonsummary: %s" % showyesterdayonsummary) print("Changes saved.") elif showyesterdayonsummary == "no": config['SUMMARY']['showyesterdayonsummary'] = 'False' logger.info("SUMMARY/showyesterdayonsummary is now 'False'.") showyesterdayonsummary = False logger.debug("showyesterdayonsummary: %s" % showyesterdayonsummary) print("Changes saved.") else: print("Your input could not be understood. Defaulting to 'False'.") config['SUMMARY']['showyesterdayonsummary'] = 'False' logger.info("SUMMARY/showyesterdayonsumary is now 'False'.") showyesterdayonsummary = False logger.debug("showyesterdayonsummary: %s" % showyesterdayonsummary) print("Changes saved.") if showyesterdayonsummary is False: print("", "(40/42)", "When PyWeather boots up, you can have the option to have yesterday's weather data", "prefetched during bootup. Enabling this will use 1 extra API call at boot, and will increase PyWeather's", "loading time. Would you like to enable prefetching yesterday's weather data on boot? Yes or No.", "By default, this is disabled.", sep="\n") prefetchyesterdayatboot = input("Input here: ").lower() logger.debug("prefetchyesterdayatboot: %s" % prefetchyesterdayatboot) if prefetchyesterdayatboot == "yes": config['PREFETCH']['yesterdaydata_atboot'] = 'True' logger.info("PREFETCH/yesterdaydata_atboot is now 'True'.") print("Changes saved.") elif prefetchyesterdayatboot == "no": config['PREFETCH']['yesterdaydata_atboot'] = 'False' logger.info("PREFETCH/yesterdaydata_atboot is now 'False'.") print("Changes saved.") else: print("Your input could not be understood. Defaulting to 'False'.") config['PREFETCH']['yesterdaydata_atboot'] = 'False' logger.info("PREFETCH/yesterdaydata_atboot is now 'False'.") print("Changes saved.") print("", "(41/42)", "In 0.6.3 beta and newer, you have the option to enable extra tools for PyWeather.", "Extra tools are diagnostic tools, and so far you can see cache timings in PyWeather, and more extra tools", "will be added as time goes on. Would you like to enable the ability to use extra tools? Yes or No. By default", "this is disabled.", sep="\n") enableextratools = input("Input here: ").lower() logger.debug("enableextratools: %s" % enableextratools) if enableextratools == "yes": config['UI']['extratools_enabled'] = 'True' logger.info("UI/extratools_enabled is now 'True'.") print("Changes saved.") elif enableextratools == "no": config['UI']['extratools_enabled'] = 'False' logger.info("UI/extratools_enabled is now 'False'.") print("Changes saved.") else: print("Could not understand your input. Defaulting to 'False'.") config['UI']['extratools_enabled'] = 'False' logger.info("UI/extratools_enabled is now 'False'.") print("Changes saved.") print("", "(42/42)", "PyWeather's geocoder usually uses https, but issues have been discovered", "on some platforms, where the geocoder cannot operate in the https mode. If you press enter", "PyWeather will automatically detect which scheme to use. If you are an advanced user, and want", "to configure the scheme yourself, enter advancedconfig at the prompt below.", sep="\n") configuregeopyscheme = input("Input here: ").lower() logger.debug("configuregeopyscheme: %s" % configuregeopyscheme) if configuregeopyscheme == "advancedconfig": print("Which geopy scheme would you like to use? 'https' works on most platforms", "but 'http' is needed on some platforms (OS X, as an example). Please input", "'https' or 'http' below.") geopyschemetype = input("Input here: ").lower() logger.debug("geopyschemetype: %s" % geopyschemetype) if geopyschemetype == "https": config['GEOCDER']['scheme'] = 'https' logger.debug("GEOCODER/scheme is now 'https'") print("Changes saved. Geocoder settings will not be validated.") elif geopyschemetype == "http": config['GEOCODER']['scheme'] = 'http' logger.debug("GEOCODER/scheme is now 'http'") print("Changes saved. Geocoder settings will not be validated.") else: print("Your input could not be understood. Defaulting to 'https'.") logger.debug("GEOCODER/scheme is now 'https'") print("Changes saved. Geocoder settings will not be validated.") else: print("Now automatically configuring your geopy scheme.") # HTTPS validation from geopy import GoogleV3 geocoder = GoogleV3(scheme='https') # I've found that one "warm up request", and then waiting ~15 seconds somehow helps determine if a platform is HTTP/HTTPS compatible. try: geocoder.geocode("123 5th Avenue, New York, NY") except: logger.debug("Warm up geocode failed.") print("I've just completed a warm-up geocode. However, sometimes a rate limit will", "occur after this geocode. I've paused the setup process for 10 seconds. This", "should help with figuring out what scheme works on your OS.", sep="\n") time.sleep(10) try: geocoder.geocode("123 5th Avenue, New York, NY") print("The geocoder can operate with HTTPS enabled on your OS. Saving these changes...") config['GEOCODER']['scheme'] = 'https' logger.debug("GEOCODER/scheme is now 'https'") print("Changes saved.") except geopy.exc.GeocoderServiceError: print("Geopy probably can't run without HTTPS (or your internet went down). Trying HTTP as the scheme...") geocoder = GoogleV3(scheme='http') print("Waiting 10 seconds to avoid rate limiting after the previous geocode...") time.sleep(10) try: geocoder.geocode("123 5th Avenue, New York, NY") print("The geocoder can operate, but without HTTPS enabled on your OS. Saving these changes...") config['GEOCODER']['scheme'] = 'http' logger.debug("GEOCODER/scheme is now 'http'") print("Changes saved.") except geopy.exc.GeocoderServiceError: print("You probably don't have an internet connection, as HTTPS and HTTP validation both failed.", "Defaulting to HTTP as the geopy scheme...", sep="\n") config['GEOCODER']['scheme'] = 'http' logger.debug("GEOCODER/scheme is now 'http'") print("Changes saved.") # if showing yesterday is disabled show prefetch yesterday # if show yest. on sum. is enabled enable prefetch too basically the same code print("","That's it! Now commiting config changes...", sep="\n") try: with open('storage//config.ini', 'w') as configfile: logger.debug("configfile: %s" % configfile) config.write(configfile) print("Changes committed!") logger.info("Performed operation: config.write(configfile)") except: print("The config file couldn't be written to.", "Make sure the config file can be written to.", sep="\n") printException() print("Press enter to exit.") input() sys.exit() print("","Everything is set up and ready to rumble!", "Enjoy using PyWeather! If you have any issues, please report them on GitHub!", "Press enter to continue.", sep="\n") input() sys.exit()
gpl-3.0
5,827,768,068,210,387,000
47.070297
167
0.616019
false
4.047117
true
false
false
lambdaq/pytr
core.py
1
7912
#!/usr/bin/env python # coding: utf8 # from gevent import monkey # monkey.patch_all() import socket import os, sys import random, struct import logging from collections import deque, Counter, defaultdict logger = logging.getLogger(__file__) logger.addHandler(logging.StreamHandler(sys.stderr)) logger.setLevel(logging.ERROR) class UdpIpParser(object): """parse IP+UDP""" def __init__(self, data): self.data = data self.ip_hdrl = ip_hdrl = ((data[0]) & 0x0F) * 4 self.udp_payload_len = struct.unpack( '!H', data[ip_hdrl + 4:ip_hdrl + 6])[0] @property def payload(self): udp_hdrl = 8 return self.data[self.ip_hdrl + udp_hdrl:self.ip_hdrl + self.udp_payload_len] class IpPacket(object): def __init__(self, data): self.data = data self.hdrl = (0x0F & (data[0])) * 4 self.payload = self.data[self.hdrl:] self.ttl = self.data[8] @property def src_ip(self): return socket.inet_ntoa(str(self.data[12:16])) @property def dst_ip(self): return socket.inet_ntoa(str(self.data[16:20])) class IcmpParser(object): hdrl = 8 def __init__(self, data): self.data = data @property def type(self): return self.data[0] @property def payload(self): return self.data[8:14] @property def id(self): return struct.unpack('>H', self.data[4:6])[0] def checksum(msg): # simplest rfc1071. msg is bytearray s = 0 for i in range(0, len(msg), 2): w = msg[i] + (msg[i + 1] << 8) c = s + w s = (c & 0xffff) + (c >> 16) return ~s & 0xffff def create_ping(id=None): id = id or random.randint(30000, 65500) icmp_type = 8 icmp_code = 0 icmp_checksum = 0 icmp_seq = 1 icmp_timestamp = 0 data = '%06d' % id s = struct.Struct('!bbHHhQ%ss' % len(data)) msg = bytearray(s.size) s.pack_into( msg, 0, icmp_type, icmp_code, icmp_checksum, id, icmp_seq, icmp_timestamp, data) # calculate ICMP checksum, which can not be offloaded cs = checksum(msg) struct.pack_into('<H', msg, 2, cs) return msg def guess_hop(ttl): if not ttl: return if ttl >= 128: return 256 - ttl elif 64 < ttl < 128: return 128 - ttl else: return 64 - ttl MAX_RETRY = 5 class Tracer(object): MAX_TTL = 32 def __init__(self): """ packet send rate = self.batch_size/self.timeout - hosts is iterable target IPs """ self.batch_size = 100 self.max_retry = 10 self.timeout = 1 self.running = self.timeout * self.max_retry self.max_ttl = defaultdict(lambda: self.MAX_TTL) self.echo_map = {} self.in_flight = deque(maxlen=self.batch_size) # a list of ip-ttl tuples self.retries = Counter() # remaining retries self.result = defaultdict(dict) # {ip: [hop1, hop2, ...]} self.sock = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP) self.sock.bind(('', 0)) self.sock.settimeout(self.timeout) def _iter_ip_and_ttl(self, hosts): """generate all IPs and their hops need to ping Need consider retries. """ for ip in hosts: for ttl in xrange(1, self.MAX_TTL + 1): if ttl >= self.max_ttl[ip]: break resp = (ip.strip(), ttl) self.in_flight.append(resp) yield resp def run(self, hosts): """would block""" self.ip_and_ttl = self._iter_ip_and_ttl(hosts) self.tick() while self.running > 0: data = bytearray(1024) try: nbytes, addr = self.sock.recvfrom_into(data) self.on_data(data, addr[0]) except socket.timeout: self.tick() return self.result def _iter_retry(self): i = 0 while self.in_flight and self.retries: if not i < len(self.in_flight): return key = self.in_flight[i] if self.retries[key] > 0: self.retries[key] -= 1 yield key i += 1 if self.retries[key] <= 0: self.on_retry_fail(*key) i -= 1 def on_retry_fail(self, ip, ttl): self.retries.pop((ip, ttl), None) self.in_flight.remove((ip, ttl)) if ttl <= self.max_ttl[ip]: self.result[ip][ttl] = '?' @property def on_tick(self): return getattr(self, '_on_tick', None) or (lambda *args: None) @on_tick.setter def on_tick(self, func): self._on_tick = func @property def on_pong(self): return getattr(self, '_on_pong', None) or (lambda *args: None) @on_pong.setter def on_pong(self, func): self._on_pong = func def tick(self): logger.debug('in_flight=%s, retries=%s', len(self.in_flight), self.retries.most_common(4)) self.on_tick(self) sent = 0 for ip, ttl in self._iter_retry(): self.ping(ip, ttl) sent += 1 if sent >= self.batch_size: break while sent < self.batch_size: try: ip, ttl = self.ip_and_ttl.next() except StopIteration: self.running -= self.timeout return self.ping(ip, ttl) self.retries[(ip, ttl)] = self.max_retry sent += 1 def ping(self, ip, ttl): logger.debug("Ping %s, ttl=%s", ip, ttl) key = (ip, ttl) sock = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP) sock.bind(('', 0)) sock.setsockopt(socket.SOL_IP, socket.IP_TTL, ttl) icmp_id = random.randint(30000, 60000) self.echo_map[icmp_id] = (ip, ttl) packet = create_ping(icmp_id) sock.sendto(packet, (ip, 0)) sock.close() return icmp_id def pong(self, ping_ip, pong_ip, ttl): # @ToDo: handle multi-path trace-route if ping_ip == pong_ip: ttl = min(ttl, self.max_ttl[ping_ip]) self.max_ttl[ping_ip] = ttl for k in xrange(1, self.MAX_TTL): ip = self.result[ping_ip].get(k) if k > ttl or ip == ping_ip: self.result[ping_ip].pop(k, None) key = ping_ip, ttl try: self.in_flight.remove(key) except ValueError: pass self.retries.pop(key, None) else: key = ping_ip, ttl try: self.in_flight.remove(key) except ValueError: pass self.retries.pop(key, None) self.result[ping_ip][ttl] = pong_ip self.on_pong(self, ping_ip, pong_ip, ttl) def on_data(self, data, addr): # get IP packet inside returned IP outer_ip = IpPacket(data) inner_ip = IpPacket(outer_ip.payload[IcmpParser.hdrl:]) # the raw structure is: IP(ICMP(IP(ICMP))) icmp = IcmpParser(inner_ip.payload) icmp_id = None if icmp.payload.isdigit(): icmp_id = int(icmp.payload) if not icmp_id: icmp_id = icmp.id if icmp_id in self.echo_map: ip, ttl = self.echo_map[icmp_id] logger.debug('Pong %s, ip=%s, hop=%s', ip, addr, ttl) # f.write('%s\t%s\t%s\n' % (ip, ttl, addr)) self.pong(ip, addr, ttl) else: logger.debug('Pong unknown %s -> %s type %s' % ( inner_ip.src_ip, inner_ip.dst_ip, icmp.type)) def get_hops(res): return [res.get(i) or '?' for i in xrange(max(res.keys()), 0, -1)]
bsd-2-clause
-1,979,069,231,076,845,000
27.056738
98
0.523129
false
3.452007
false
false
false
ajstarna/RicochetRobots
Brobot/model.py
1
9336
import itertools import random # Directions NORTH = 'N' EAST = 'E' SOUTH = 'S' WEST = 'W' DIRECTIONS = [NORTH, EAST, SOUTH, WEST] REVERSE = { NORTH: SOUTH, EAST: WEST, SOUTH: NORTH, WEST: EAST, } OFFSET = { NORTH: -16, EAST: 1, SOUTH: 16, WEST: -1, } # Masks M_NORTH = 0x01 M_EAST = 0x02 M_SOUTH = 0x04 M_WEST = 0x08 M_ROBOT = 0x10 M_LOOKUP = { NORTH: M_NORTH, EAST: M_EAST, SOUTH: M_SOUTH, WEST: M_WEST, } # Colors RED = 'R' GREEN = 'G' BLUE = 'B' YELLOW = 'Y' COLORS = [RED, GREEN, BLUE, YELLOW] # Shapes CIRCLE = 'C' TRIANGLE = 'T' SQUARE = 'Q' HEXAGON = 'H' SHAPES = [CIRCLE, TRIANGLE, SQUARE, HEXAGON] # Tokens TOKENS = [''.join(token) for token in itertools.product(COLORS, SHAPES)] # Quadrants QUAD_1A = ( 'NW,N,N,N,NE,NW,N,N,' 'W,S,X,X,X,X,SEYH,W,' 'WE,NWGT,X,X,X,X,N,X,' 'W,X,X,X,X,X,X,X,' 'W,X,X,X,X,X,S,X,' 'SW,X,X,X,X,X,NEBQ,W,' 'NW,X,E,SWRC,X,X,X,S,' 'W,X,X,N,X,X,E,NW' ) QUAD_1B = ( 'NW,NE,NW,N,NS,N,N,N,' 'W,S,X,E,NWRC,X,X,X,' 'W,NEGT,W,X,X,X,X,X,' 'W,X,X,X,X,X,SEYH,W,' 'W,X,X,X,X,X,N,X,' 'SW,X,X,X,X,X,X,X,' 'NW,X,E,SWBQ,X,X,X,S,' 'W,X,X,N,X,X,E,NW' ) QUAD_2A = ( 'NW,N,N,NE,NW,N,N,N,' 'W,X,X,X,X,E,SWBC,X,' 'W,S,X,X,X,X,N,X,' 'W,NEYT,W,X,X,S,X,X,' 'W,X,X,X,E,NWGQ,X,X,' 'W,X,SERH,W,X,X,X,X,' 'SW,X,N,X,X,X,X,S,' 'NW,X,X,X,X,X,E,NW' ) QUAD_2B = ( 'NW,N,N,N,NE,NW,N,N,' 'W,X,SERH,W,X,X,X,X,' 'W,X,N,X,X,X,X,X,' 'WE,SWGQ,X,X,X,X,S,X,' 'SW,N,X,X,X,E,NWYT,X,' 'NW,X,X,X,X,S,X,X,' 'W,X,X,X,X,NEBC,W,S,' 'W,X,X,X,X,X,E,NW' ) QUAD_3A = ( 'NW,N,N,NE,NW,N,N,N,' 'W,X,X,X,X,SEGH,W,X,' 'WE,SWRQ,X,X,X,N,X,X,' 'SW,N,X,X,X,X,S,X,' 'NW,X,X,X,X,E,NWYC,X,' 'W,X,S,X,X,X,X,X,' 'W,X,NEBT,W,X,X,X,S,' 'W,X,X,X,X,X,E,NW' ) QUAD_3B = ( 'NW,N,NS,N,NE,NW,N,N,' 'W,E,NWYC,X,X,X,X,X,' 'W,X,X,X,X,X,X,X,' 'W,X,X,X,X,E,SWBT,X,' 'SW,X,X,X,S,X,N,X,' 'NW,X,X,X,NERQ,W,X,X,' 'W,SEGH,W,X,X,X,X,S,' 'W,N,X,X,X,X,E,NW' ) QUAD_4A = ( 'NW,N,N,NE,NW,N,N,N,' 'W,X,X,X,X,X,X,X,' 'W,X,X,X,X,SEBH,W,X,' 'W,X,S,X,X,N,X,X,' 'SW,X,NEGC,W,X,X,X,X,' 'NW,S,X,X,X,X,E,SWRT,' 'WE,NWYQ,X,X,X,X,X,NS,' 'W,X,X,X,X,X,E,NW' ) QUAD_4B = ( 'NW,N,N,NE,NW,N,N,N,' 'WE,SWRT,X,X,X,X,S,X,' 'W,N,X,X,X,X,NEGC,W,' 'W,X,X,X,X,X,X,X,' 'W,X,SEBH,W,X,X,X,S,' 'SW,X,N,X,X,X,E,NWYQ,' 'NW,X,X,X,X,X,X,S,' 'W,X,X,X,X,X,E,NW' ) QUADS = [ (QUAD_1A, QUAD_1B), (QUAD_2A, QUAD_2B), (QUAD_3A, QUAD_3B), (QUAD_4A, QUAD_4B), ] # Rotation ROTATE_QUAD = [ 56, 48, 40, 32, 24, 16, 8, 0, 57, 49, 41, 33, 25, 17, 9, 1, 58, 50, 42, 34, 26, 18, 10, 2, 59, 51, 43, 35, 27, 19, 11, 3, 60, 52, 44, 36, 28, 20, 12, 4, 61, 53, 45, 37, 29, 21, 13, 5, 62, 54, 46, 38, 30, 22, 14, 6, 63, 55, 47, 39, 31, 23, 15, 7, ] ROTATE_WALL = { NORTH: EAST, EAST: SOUTH, SOUTH: WEST, WEST: NORTH, } # Helper Functions def idx(x, y, size=16): return y * size + x def xy(index, size=16): x = index % size y = index / size return (x, y) def rotate_quad(data, times=1): for i in range(times): result = [data[index] for index in ROTATE_QUAD] result = [''.join(ROTATE_WALL.get(c, c) for c in x) for x in result] data = result return data def create_grid(quads=None): if quads is None: quads = [random.choice(pair) for pair in QUADS] random.shuffle(quads) quads = [quad.split(',') for quad in quads] quads = [rotate_quad(quads[i], i) for i in [0, 1, 3, 2]] result = [None for i in range(16 * 16)] for i, quad in enumerate(quads): dx, dy = xy(i, 2) for j, data in enumerate(quad): x, y = xy(j, 8) x += dx * 8 y += dy * 8 index = idx(x, y) result[index] = data return result def to_mask(cell): result = 0 for letter, mask in M_LOOKUP.items(): if letter in cell: result |= mask return result # Game class Game(object): @staticmethod def hardest(): quads = [QUAD_2B, QUAD_4B, QUAD_3B, QUAD_1B] robots = [226, 48, 43, 18] token = 'BT' return Game(quads=quads, robots=robots, token=token) def __init__(self, seed=None, quads=None, robots=None, token=None): if seed: random.seed(seed) self.grid = create_grid(quads) if robots is None: self.robots = self.place_robots() else: self.robots = dict(zip(COLORS, robots)) self.token = token or random.choice(TOKENS) self.moves = 0 self.last = None def place_robots(self): result = {} used = set() for color in COLORS: while True: index = random.randint(0, 255) if index in (119, 120, 135, 136): continue if self.grid[index][-2:] in TOKENS: continue if index in used: continue result[color] = index used.add(index) break return result def get_robot(self, index): for color, position in self.robots.iteritems(): if position == index: return color return None def can_move(self, color, direction): if self.last == (color, REVERSE[direction]): return False index = self.robots[color] if direction in self.grid[index]: return False new_index = index + OFFSET[direction] if new_index in self.robots.itervalues(): return False return True def compute_move(self, color, direction): index = self.robots[color] robots = self.robots.values() while True: if direction in self.grid[index]: break new_index = index + OFFSET[direction] if new_index in robots: break index = new_index return index def do_move(self, color, direction): start = self.robots[color] last = self.last if last == (color, REVERSE[direction]): print 'reverse' #raise Exception end = self.compute_move(color, direction) if start == end: print 'wall move' #raise Exception self.moves += 1 self.robots[color] = end self.last = (color, direction) return (color, start, last) def undo_move(self, data): color, start, last = data self.moves -= 1 self.robots[color] = start self.last = last def get_moves(self, colors=None): result = [] colors = colors or COLORS for color in colors: for direction in DIRECTIONS: if self.can_move(color, direction): result.append((color, direction)) return result def over(self): color = self.token[0] return self.token in self.grid[self.robots[color]] def key(self): return tuple(self.robots.itervalues()) def search(self): max_depth = 1 while True: #print 'Searching to depth:', max_depth result = self._search([], set(), 0, max_depth) if result is not None: return result max_depth += 1 def _search(self, path, memo, depth, max_depth): if self.over(): return list(path) if depth == max_depth: return None key = (depth, self.key()) if key in memo: return None memo.add(key) if depth == max_depth - 1: colors = [self.token[0]] else: colors = None moves = self.get_moves(colors) for move in moves: data = self.do_move(*move) path.append(move) result = self._search(path, memo, depth + 1, max_depth) path.pop(-1) self.undo_move(data) if result: return result return None def export(self): grid = [] token = None robots = [self.robots[color] for color in COLORS] for index, cell in enumerate(self.grid): mask = to_mask(cell) if index in robots: mask |= M_ROBOT grid.append(mask) if self.token in cell: token = index robot = COLORS.index(self.token[0]) return { 'grid': grid, 'robot': robot, 'token': token, 'robots': robots, } def export2(self): grid = [] token = None robots = [self.robots[color] for color in COLORS] for index, cell in enumerate(self.grid): mask = to_mask(cell) grid.append(mask) if self.token in cell: token = index robot = COLORS.index(self.token[0]) return { 'grid': grid, 'robot': robot, 'token': token, 'robots': robots, }
bsd-2-clause
-6,690,397,051,170,826,000
23.439791
76
0.487575
false
2.67354
false
false
false
wolfgangmauerer/prosoda
prosoda/interactive.py
1
1232
# Commands that are useful after adist.yp has been # run in ipython # This file is part of prosoda. prosoda is free software: you can # redistribute it and/or modify it under the terms of the GNU General Public # License as published by the Free Software Foundation, version 2. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # Copyright 2010, 2011, 2012 by Wolfgang Mauerer <[email protected]> # All Rights Reserved. initialiseR() git = shelve.open("/home/wolfgang/linux-14-33")["git"] res = createSeries(git, "__main__", ["v2.6.24", "v2.6.25"]) writeToFile(res, "/home/wolfgang/raw.dat") runR('raw = as.xts(read.zoo(file="/home/wolfgang/raw.dat", FUN=tstamp_to_date))') runR('reg = to.regts(raw[,1], 250)') reg = RtoPython(runR('reg')) raw = RtoPython(runR('raw')) # ... and then commence with the analysis as desired
gpl-2.0
-7,668,178,328,738,901,000
41.482759
81
0.729708
false
3.242105
false
false
false
kdebrab/pandas
pandas/core/indexes/category.py
1
30548
import operator import numpy as np from pandas._libs import index as libindex from pandas import compat from pandas.compat.numpy import function as nv from pandas.core.dtypes.generic import ABCCategorical, ABCSeries from pandas.core.dtypes.dtypes import CategoricalDtype from pandas.core.dtypes.common import ( is_categorical_dtype, ensure_platform_int, is_list_like, is_interval_dtype, is_scalar) from pandas.core.dtypes.missing import array_equivalent, isna from pandas.core.algorithms import take_1d from pandas.util._decorators import Appender, cache_readonly from pandas.core.config import get_option from pandas.core.indexes.base import Index, _index_shared_docs from pandas.core import accessor import pandas.core.common as com import pandas.core.missing as missing import pandas.core.indexes.base as ibase from pandas.core.arrays.categorical import Categorical, contains _index_doc_kwargs = dict(ibase._index_doc_kwargs) _index_doc_kwargs.update(dict(target_klass='CategoricalIndex')) class CategoricalIndex(Index, accessor.PandasDelegate): """ Immutable Index implementing an ordered, sliceable set. CategoricalIndex represents a sparsely populated Index with an underlying Categorical. Parameters ---------- data : array-like or Categorical, (1-dimensional) categories : optional, array-like categories for the CategoricalIndex ordered : boolean, designating if the categories are ordered copy : bool Make a copy of input ndarray name : object Name to be stored in the index Attributes ---------- codes categories ordered Methods ------- rename_categories reorder_categories add_categories remove_categories remove_unused_categories set_categories as_ordered as_unordered map See Also -------- Categorical, Index """ _typ = 'categoricalindex' _engine_type = libindex.Int64Engine _attributes = ['name'] def __new__(cls, data=None, categories=None, ordered=None, dtype=None, copy=False, name=None, fastpath=False): if fastpath: return cls._simple_new(data, name=name, dtype=dtype) if name is None and hasattr(data, 'name'): name = data.name if isinstance(data, ABCCategorical): data = cls._create_categorical(data, categories, ordered, dtype) elif isinstance(data, CategoricalIndex): data = data._data data = cls._create_categorical(data, categories, ordered, dtype) else: # don't allow scalars # if data is None, then categories must be provided if is_scalar(data): if data is not None or categories is None: cls._scalar_data_error(data) data = [] data = cls._create_categorical(data, categories, ordered, dtype) if copy: data = data.copy() return cls._simple_new(data, name=name) def _create_from_codes(self, codes, categories=None, ordered=None, name=None): """ *this is an internal non-public method* create the correct categorical from codes Parameters ---------- codes : new codes categories : optional categories, defaults to existing ordered : optional ordered attribute, defaults to existing name : optional name attribute, defaults to existing Returns ------- CategoricalIndex """ if categories is None: categories = self.categories if ordered is None: ordered = self.ordered if name is None: name = self.name cat = Categorical.from_codes(codes, categories=categories, ordered=self.ordered) return CategoricalIndex(cat, name=name) @classmethod def _create_categorical(cls, data, categories=None, ordered=None, dtype=None): """ *this is an internal non-public method* create the correct categorical from data and the properties Parameters ---------- data : data for new Categorical categories : optional categories, defaults to existing ordered : optional ordered attribute, defaults to existing dtype : CategoricalDtype, defaults to existing Returns ------- Categorical """ if (isinstance(data, (cls, ABCSeries)) and is_categorical_dtype(data)): data = data.values if not isinstance(data, ABCCategorical): if ordered is None and dtype is None: ordered = False data = Categorical(data, categories=categories, ordered=ordered, dtype=dtype) else: if categories is not None: data = data.set_categories(categories, ordered=ordered) elif ordered is not None and ordered != data.ordered: data = data.set_ordered(ordered) if isinstance(dtype, CategoricalDtype) and dtype != data.dtype: # we want to silently ignore dtype='category' data = data._set_dtype(dtype) return data @classmethod def _simple_new(cls, values, name=None, categories=None, ordered=None, dtype=None, **kwargs): result = object.__new__(cls) values = cls._create_categorical(values, categories, ordered, dtype=dtype) result._data = values result.name = name for k, v in compat.iteritems(kwargs): setattr(result, k, v) result._reset_identity() return result @Appender(_index_shared_docs['_shallow_copy']) def _shallow_copy(self, values=None, categories=None, ordered=None, dtype=None, **kwargs): # categories and ordered can't be part of attributes, # as these are properties # we want to reuse self.dtype if possible, i.e. neither are # overridden. if dtype is not None and (categories is not None or ordered is not None): raise TypeError("Cannot specify both `dtype` and `categories` " "or `ordered`") if categories is None and ordered is None: dtype = self.dtype if dtype is None else dtype return super(CategoricalIndex, self)._shallow_copy( values=values, dtype=dtype, **kwargs) if categories is None: categories = self.categories if ordered is None: ordered = self.ordered return super(CategoricalIndex, self)._shallow_copy( values=values, categories=categories, ordered=ordered, **kwargs) def _is_dtype_compat(self, other): """ *this is an internal non-public method* provide a comparison between the dtype of self and other (coercing if needed) Raises ------ TypeError if the dtypes are not compatible """ if is_categorical_dtype(other): if isinstance(other, CategoricalIndex): other = other._values if not other.is_dtype_equal(self): raise TypeError("categories must match existing categories " "when appending") else: values = other if not is_list_like(values): values = [values] other = CategoricalIndex(self._create_categorical( other, dtype=self.dtype)) if not other.isin(values).all(): raise TypeError("cannot append a non-category item to a " "CategoricalIndex") return other def equals(self, other): """ Determines if two CategorialIndex objects contain the same elements. """ if self.is_(other): return True if not isinstance(other, Index): return False try: other = self._is_dtype_compat(other) return array_equivalent(self._data, other) except (TypeError, ValueError): pass return False @property def _formatter_func(self): return self.categories._formatter_func def _format_attrs(self): """ Return a list of tuples of the (attr,formatted_value) """ max_categories = (10 if get_option("display.max_categories") == 0 else get_option("display.max_categories")) attrs = [ ('categories', ibase.default_pprint(self.categories, max_seq_items=max_categories)), ('ordered', self.ordered)] if self.name is not None: attrs.append(('name', ibase.default_pprint(self.name))) attrs.append(('dtype', "'%s'" % self.dtype.name)) max_seq_items = get_option('display.max_seq_items') or len(self) if len(self) > max_seq_items: attrs.append(('length', len(self))) return attrs @property def inferred_type(self): return 'categorical' @property def values(self): """ return the underlying data, which is a Categorical """ return self._data @property def itemsize(self): # Size of the items in categories, not codes. return self.values.itemsize def get_values(self): """ return the underlying data as an ndarray """ return self._data.get_values() def tolist(self): return self._data.tolist() @property def codes(self): return self._data.codes @property def categories(self): return self._data.categories @property def ordered(self): return self._data.ordered def _reverse_indexer(self): return self._data._reverse_indexer() @Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs) def __contains__(self, key): # if key is a NaN, check if any NaN is in self. if isna(key): return self.hasnans return contains(self, key, container=self._engine) @Appender(_index_shared_docs['contains'] % _index_doc_kwargs) def contains(self, key): return key in self def __array__(self, dtype=None): """ the array interface, return my values """ return np.array(self._data, dtype=dtype) @Appender(_index_shared_docs['astype']) def astype(self, dtype, copy=True): if is_interval_dtype(dtype): from pandas import IntervalIndex return IntervalIndex(np.array(self)) elif is_categorical_dtype(dtype): # GH 18630 dtype = self.dtype.update_dtype(dtype) if dtype == self.dtype: return self.copy() if copy else self return super(CategoricalIndex, self).astype(dtype=dtype, copy=copy) @cache_readonly def _isnan(self): """ return if each value is nan""" return self._data.codes == -1 @Appender(ibase._index_shared_docs['fillna']) def fillna(self, value, downcast=None): self._assert_can_do_op(value) return CategoricalIndex(self._data.fillna(value), name=self.name) def argsort(self, *args, **kwargs): return self.values.argsort(*args, **kwargs) @cache_readonly def _engine(self): # we are going to look things up with the codes themselves return self._engine_type(lambda: self.codes.astype('i8'), len(self)) # introspection @cache_readonly def is_unique(self): return self._engine.is_unique @property def is_monotonic_increasing(self): return self._engine.is_monotonic_increasing @property def is_monotonic_decreasing(self): return self._engine.is_monotonic_decreasing @Appender(_index_shared_docs['index_unique'] % _index_doc_kwargs) def unique(self, level=None): if level is not None: self._validate_index_level(level) result = self.values.unique() # CategoricalIndex._shallow_copy keeps original categories # and ordered if not otherwise specified return self._shallow_copy(result, categories=result.categories, ordered=result.ordered) @Appender(Index.duplicated.__doc__) def duplicated(self, keep='first'): from pandas._libs.hashtable import duplicated_int64 codes = self.codes.astype('i8') return duplicated_int64(codes, keep) def _to_safe_for_reshape(self): """ convert to object if we are a categorical """ return self.astype('object') def get_loc(self, key, method=None): """ Get integer location, slice or boolean mask for requested label. Parameters ---------- key : label method : {None} * default: exact matches only. Returns ------- loc : int if unique index, slice if monotonic index, else mask Examples --------- >>> unique_index = pd.CategoricalIndex(list('abc')) >>> unique_index.get_loc('b') 1 >>> monotonic_index = pd.CategoricalIndex(list('abbc')) >>> monotonic_index.get_loc('b') slice(1, 3, None) >>> non_monotonic_index = pd.CategoricalIndex(list('abcb')) >>> non_monotonic_index.get_loc('b') array([False, True, False, True], dtype=bool) """ codes = self.categories.get_loc(key) if (codes == -1): raise KeyError(key) return self._engine.get_loc(codes) def get_value(self, series, key): """ Fast lookup of value from 1-dimensional ndarray. Only use this if you know what you're doing """ try: k = com._values_from_object(key) k = self._convert_scalar_indexer(k, kind='getitem') indexer = self.get_loc(k) return series.iloc[indexer] except (KeyError, TypeError): pass # we might be a positional inexer return super(CategoricalIndex, self).get_value(series, key) def _can_reindex(self, indexer): """ always allow reindexing """ pass @Appender(_index_shared_docs['where']) def where(self, cond, other=None): if other is None: other = self._na_value values = np.where(cond, self.values, other) cat = Categorical(values, categories=self.categories, ordered=self.ordered) return self._shallow_copy(cat, **self._get_attributes_dict()) def reindex(self, target, method=None, level=None, limit=None, tolerance=None): """ Create index with target's values (move/add/delete values as necessary) Returns ------- new_index : pd.Index Resulting index indexer : np.ndarray or None Indices of output values in original index """ if method is not None: raise NotImplementedError("argument method is not implemented for " "CategoricalIndex.reindex") if level is not None: raise NotImplementedError("argument level is not implemented for " "CategoricalIndex.reindex") if limit is not None: raise NotImplementedError("argument limit is not implemented for " "CategoricalIndex.reindex") target = ibase.ensure_index(target) if not is_categorical_dtype(target) and not target.is_unique: raise ValueError("cannot reindex with a non-unique indexer") indexer, missing = self.get_indexer_non_unique(np.array(target)) if len(self.codes): new_target = self.take(indexer) else: new_target = target # filling in missing if needed if len(missing): cats = self.categories.get_indexer(target) if (cats == -1).any(): # coerce to a regular index here! result = Index(np.array(self), name=self.name) new_target, indexer, _ = result._reindex_non_unique( np.array(target)) else: codes = new_target.codes.copy() codes[indexer == -1] = cats[missing] new_target = self._create_from_codes(codes) # we always want to return an Index type here # to be consistent with .reindex for other index types (e.g. they don't # coerce based on the actual values, only on the dtype) # unless we had an initial Categorical to begin with # in which case we are going to conform to the passed Categorical new_target = np.asarray(new_target) if is_categorical_dtype(target): new_target = target._shallow_copy(new_target, name=self.name) else: new_target = Index(new_target, name=self.name) return new_target, indexer def _reindex_non_unique(self, target): """ reindex from a non-unique; which CategoricalIndex's are almost always """ new_target, indexer = self.reindex(target) new_indexer = None check = indexer == -1 if check.any(): new_indexer = np.arange(len(self.take(indexer))) new_indexer[check] = -1 cats = self.categories.get_indexer(target) if not (cats == -1).any(): # .reindex returns normal Index. Revert to CategoricalIndex if # all targets are included in my categories new_target = self._shallow_copy(new_target) return new_target, indexer, new_indexer @Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs) def get_indexer(self, target, method=None, limit=None, tolerance=None): from pandas.core.arrays.categorical import _recode_for_categories method = missing.clean_reindex_fill_method(method) target = ibase.ensure_index(target) if self.is_unique and self.equals(target): return np.arange(len(self), dtype='intp') if method == 'pad' or method == 'backfill': raise NotImplementedError("method='pad' and method='backfill' not " "implemented yet for CategoricalIndex") elif method == 'nearest': raise NotImplementedError("method='nearest' not implemented yet " 'for CategoricalIndex') if (isinstance(target, CategoricalIndex) and self.values.is_dtype_equal(target)): if self.values.equals(target.values): # we have the same codes codes = target.codes else: codes = _recode_for_categories(target.codes, target.categories, self.values.categories) else: if isinstance(target, CategoricalIndex): code_indexer = self.categories.get_indexer(target.categories) codes = take_1d(code_indexer, target.codes, fill_value=-1) else: codes = self.categories.get_indexer(target) indexer, _ = self._engine.get_indexer_non_unique(codes) return ensure_platform_int(indexer) @Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs) def get_indexer_non_unique(self, target): target = ibase.ensure_index(target) if isinstance(target, CategoricalIndex): # Indexing on codes is more efficient if categories are the same: if target.categories is self.categories: target = target.codes indexer, missing = self._engine.get_indexer_non_unique(target) return ensure_platform_int(indexer), missing target = target.values codes = self.categories.get_indexer(target) indexer, missing = self._engine.get_indexer_non_unique(codes) return ensure_platform_int(indexer), missing @Appender(_index_shared_docs['_convert_scalar_indexer']) def _convert_scalar_indexer(self, key, kind=None): if self.categories._defer_to_indexing: return self.categories._convert_scalar_indexer(key, kind=kind) return super(CategoricalIndex, self)._convert_scalar_indexer( key, kind=kind) @Appender(_index_shared_docs['_convert_list_indexer']) def _convert_list_indexer(self, keyarr, kind=None): # Return our indexer or raise if all of the values are not included in # the categories if self.categories._defer_to_indexing: indexer = self.categories._convert_list_indexer(keyarr, kind=kind) return Index(self.codes).get_indexer_for(indexer) indexer = self.categories.get_indexer(np.asarray(keyarr)) if (indexer == -1).any(): raise KeyError( "a list-indexer must only " "include values that are " "in the categories") return self.get_indexer(keyarr) @Appender(_index_shared_docs['_convert_arr_indexer']) def _convert_arr_indexer(self, keyarr): keyarr = com._asarray_tuplesafe(keyarr) if self.categories._defer_to_indexing: return keyarr return self._shallow_copy(keyarr) @Appender(_index_shared_docs['_convert_index_indexer']) def _convert_index_indexer(self, keyarr): return self._shallow_copy(keyarr) @Appender(_index_shared_docs['take'] % _index_doc_kwargs) def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs): nv.validate_take(tuple(), kwargs) indices = ensure_platform_int(indices) taken = self._assert_take_fillable(self.codes, indices, allow_fill=allow_fill, fill_value=fill_value, na_value=-1) return self._create_from_codes(taken) def is_dtype_equal(self, other): return self._data.is_dtype_equal(other) take_nd = take def map(self, mapper): """ Map values using input correspondence (a dict, Series, or function). Maps the values (their categories, not the codes) of the index to new categories. If the mapping correspondence is one-to-one the result is a :class:`~pandas.CategoricalIndex` which has the same order property as the original, otherwise an :class:`~pandas.Index` is returned. If a `dict` or :class:`~pandas.Series` is used any unmapped category is mapped to `NaN`. Note that if this happens an :class:`~pandas.Index` will be returned. Parameters ---------- mapper : function, dict, or Series Mapping correspondence. Returns ------- pandas.CategoricalIndex or pandas.Index Mapped index. See Also -------- Index.map : Apply a mapping correspondence on an :class:`~pandas.Index`. Series.map : Apply a mapping correspondence on a :class:`~pandas.Series`. Series.apply : Apply more complex functions on a :class:`~pandas.Series`. Examples -------- >>> idx = pd.CategoricalIndex(['a', 'b', 'c']) >>> idx CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'], ordered=False, dtype='category') >>> idx.map(lambda x: x.upper()) CategoricalIndex(['A', 'B', 'C'], categories=['A', 'B', 'C'], ordered=False, dtype='category') >>> idx.map({'a': 'first', 'b': 'second', 'c': 'third'}) CategoricalIndex(['first', 'second', 'third'], categories=['first', 'second', 'third'], ordered=False, dtype='category') If the mapping is one-to-one the ordering of the categories is preserved: >>> idx = pd.CategoricalIndex(['a', 'b', 'c'], ordered=True) >>> idx CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'], ordered=True, dtype='category') >>> idx.map({'a': 3, 'b': 2, 'c': 1}) CategoricalIndex([3, 2, 1], categories=[3, 2, 1], ordered=True, dtype='category') If the mapping is not one-to-one an :class:`~pandas.Index` is returned: >>> idx.map({'a': 'first', 'b': 'second', 'c': 'first'}) Index(['first', 'second', 'first'], dtype='object') If a `dict` is used, all unmapped categories are mapped to `NaN` and the result is an :class:`~pandas.Index`: >>> idx.map({'a': 'first', 'b': 'second'}) Index(['first', 'second', nan], dtype='object') """ return self._shallow_copy_with_infer(self.values.map(mapper)) def delete(self, loc): """ Make new Index with passed location(-s) deleted Returns ------- new_index : Index """ return self._create_from_codes(np.delete(self.codes, loc)) def insert(self, loc, item): """ Make new Index inserting new item at location. Follows Python list.append semantics for negative values Parameters ---------- loc : int item : object Returns ------- new_index : Index Raises ------ ValueError if the item is not in the categories """ code = self.categories.get_indexer([item]) if (code == -1) and not (is_scalar(item) and isna(item)): raise TypeError("cannot insert an item into a CategoricalIndex " "that is not already an existing category") codes = self.codes codes = np.concatenate((codes[:loc], code, codes[loc:])) return self._create_from_codes(codes) def _concat(self, to_concat, name): # if calling index is category, don't check dtype of others return CategoricalIndex._concat_same_dtype(self, to_concat, name) def _concat_same_dtype(self, to_concat, name): """ Concatenate to_concat which has the same class ValueError if other is not in the categories """ to_concat = [self._is_dtype_compat(c) for c in to_concat] codes = np.concatenate([c.codes for c in to_concat]) result = self._create_from_codes(codes, name=name) # if name is None, _create_from_codes sets self.name result.name = name return result def _codes_for_groupby(self, sort, observed): """ Return a Categorical adjusted for groupby """ return self.values._codes_for_groupby(sort, observed) @classmethod def _add_comparison_methods(cls): """ add in comparison methods """ def _make_compare(op): opname = '__{op}__'.format(op=op.__name__) def _evaluate_compare(self, other): # if we have a Categorical type, then must have the same # categories if isinstance(other, CategoricalIndex): other = other._values elif isinstance(other, Index): other = self._create_categorical( other._values, dtype=self.dtype) if isinstance(other, (ABCCategorical, np.ndarray, ABCSeries)): if len(self.values) != len(other): raise ValueError("Lengths must match to compare") if isinstance(other, ABCCategorical): if not self.values.is_dtype_equal(other): raise TypeError("categorical index comparisons must " "have the same categories and ordered " "attributes") result = op(self.values, other) if isinstance(result, ABCSeries): # Dispatch to pd.Categorical returned NotImplemented # and we got a Series back; down-cast to ndarray result = result.values return result return compat.set_function_name(_evaluate_compare, opname, cls) cls.__eq__ = _make_compare(operator.eq) cls.__ne__ = _make_compare(operator.ne) cls.__lt__ = _make_compare(operator.lt) cls.__gt__ = _make_compare(operator.gt) cls.__le__ = _make_compare(operator.le) cls.__ge__ = _make_compare(operator.ge) def _delegate_method(self, name, *args, **kwargs): """ method delegation to the ._values """ method = getattr(self._values, name) if 'inplace' in kwargs: raise ValueError("cannot use inplace with CategoricalIndex") res = method(*args, **kwargs) if is_scalar(res): return res return CategoricalIndex(res, name=self.name) @classmethod def _add_accessors(cls): """ add in Categorical accessor methods """ CategoricalIndex._add_delegate_accessors( delegate=Categorical, accessors=["rename_categories", "reorder_categories", "add_categories", "remove_categories", "remove_unused_categories", "set_categories", "as_ordered", "as_unordered", "min", "max"], typ='method', overwrite=True) CategoricalIndex._add_numeric_methods_add_sub_disabled() CategoricalIndex._add_numeric_methods_disabled() CategoricalIndex._add_logical_methods_disabled() CategoricalIndex._add_comparison_methods() CategoricalIndex._add_accessors()
bsd-3-clause
-6,292,839,324,184,888,000
34.52093
79
0.57097
false
4.50494
false
false
false
chop-dbhi/varify-data-warehouse
vdw/genes/models.py
1
4984
from django.db import models from django.contrib.auth.models import User from objectset.models import ObjectSet, SetObject from vdw.literature.models import PubMed from vdw.genome.models import Chromosome from vdw.phenotypes.models import Phenotype, PhenotypeThrough from .managers import GeneManager class GeneFamily(models.Model): "Gene family tags and descriptions." tag = models.CharField(max_length=30, null=True) description = models.CharField(max_length=200, null=True) class Meta(object): db_table = 'gene_family' class Synonym(models.Model): """Model which contains known alternate gene names and symbols for the canonical genes. This can be used as an index for search-related queries. """ # Call it a label since this may be a symbol, a name or something else label = models.CharField(max_length=255, db_index=True) class Meta(object): db_table = 'synonym' class Gene(models.Model): """Unified gene model. This includes data from multiple sources with the appropriate `id` defined to which references the source. If multiple sources contain have overlap, the respective `id`s will be filled in. The canonical source is HGNC, which approves gene names and symbols, the `approved` flag should be set if this is the approved gene name and symbol by HGNC. """ chr = models.ForeignKey(Chromosome) symbol = models.CharField(max_length=255, db_index=True) name = models.TextField('full name', blank=True) hgnc_id = models.IntegerField('HGNC ID', null=True, blank=True) # Via the HGNC documentation: "Families/groups may be either structural or # functional, therefore a gene may belong to more than one family/group" families = models.ManyToManyField(GeneFamily, blank=True) # Literature articles = models.ManyToManyField(PubMed, db_table='gene_pubmed') # Synonyms synonyms = models.ManyToManyField(Synonym, db_table='gene_synonym') # Phenotypes phenotypes = models.ManyToManyField(Phenotype, through='GenePhenotype') objects = GeneManager() class Meta(object): db_table = 'gene' def __unicode__(self): return self.symbol def approved(self): return self.hgnc_id is not None def hgnc_url(self): if self.hgnc_id: return 'http://www.genenames.org/data/hgnc_data.php?hgnc_id=' + \ str(self.hgnc_id) class GenePhenotype(PhenotypeThrough): gene = models.ForeignKey(Gene) class Meta(object): db_table = 'gene_phenotype' class Exon(models.Model): "Gene-specific exon region" gene = models.ForeignKey(Gene) index = models.IntegerField('exon index') start = models.IntegerField('exon start position') end = models.IntegerField('exon end position') class Meta(object): db_table = 'exon' class Transcript(models.Model): "Gene transcripts" refseq_id = models.CharField(max_length=100, unique=True) strand = models.CharField(max_length=1, null=True, blank=True, help_text='+ or - for strand') start = models.IntegerField('transcript start position', null=True, blank=True) end = models.IntegerField('transcript end position', null=True, blank=True) coding_start = models.IntegerField('coding region start position', null=True, blank=True) coding_end = models.IntegerField('coding region end position', null=True, blank=True) coding_start_status = models.CharField('coding region start status', max_length=20, null=True, blank=True) coding_end_status = models.CharField('coding region end status', max_length=20, null=True, blank=True) exon_count = models.IntegerField('number of exons', null=True, blank=True) gene = models.ForeignKey(Gene, null=True, blank=True) exons = models.ManyToManyField(Exon, db_table='transcript_exon') class Meta(object): db_table = 'transcript' def ncbi_url(self): return 'http://www.ncbi.nlm.nih.gov/nuccore/' + self.refseq_id class GeneSet(ObjectSet): user = models.ForeignKey(User, null=True, blank=True) name = models.CharField(max_length=100, null=True, blank=True) genes = models.ManyToManyField(Gene, through='GeneSetObject') published = models.BooleanField(default=True) set_object_rel = 'genes' label_field = 'name' def __unicode__(self): return unicode(self.name) class Meta(object): db_table = 'geneset' ordering = ('user', 'name',) class GeneSetObject(SetObject): object_set = models.ForeignKey(GeneSet, db_column='set_id') set_object = models.ForeignKey(Gene, db_column='object_id') class Meta(object): db_table = 'geneset_setobject'
bsd-2-clause
2,703,393,772,772,176,400
33.372414
79
0.660112
false
3.787234
false
false
false
numericube/twistranet
twistranet/twistapp/forms/fields.py
1
7275
""" The twistranet Fields """ import os import urlparse from django import forms from django.core.validators import URL_VALIDATOR_USER_AGENT from django.db import models from django.core.validators import EMPTY_VALUES from django.utils.encoding import smart_unicode from django.utils.translation import ugettext as _ from twistranet.twistapp.lib.log import log import widgets from validators import URLValidator, ViewPathValidator class PermissionFormField(forms.ChoiceField): """ This overrides the regular ChoiceField to add additional rendering. """ widget = widgets.PermissionsWidget def __init__( self, choices = (), required=True, widget=None, max_length = None, label=None, initial=None, help_text=None, to_field_name=None, *args, **kwargs ): super(PermissionFormField, self).__init__(choices, required, widget, label, initial, help_text, *args, **kwargs) # We put this here to avoid import errors self.default_error_messages = { 'invalid_choice': _(u'Select a valid choice. That choice is not one of' u' the available choices.'), } class PermissionsFormField(forms.ChoiceField): """ This overrides the regular ChoiceField to add additional rendering. """ def valid_value(self, value): "Check to see if the provided value is a valid choice" for id, name, description in self.choices: if value == smart_unicode(id): return True return False class ModelInputField(forms.Field): """ This is a field used to enter a foreign key value inside a classic Input widget. This is used when there are a lot of values to check against (and ModelChoiceField is not efficient anymore), plus the value is checked against the QuerySet very late in the process. """ def __init__( self, model, filter = None, required=True, widget=None, label=None, initial=None, help_text=None, to_field_name=None, *args, **kwargs ): super(ModelInputField, self).__init__(required, widget, label, initial, help_text, *args, **kwargs) self.model = model self.filter = filter self.to_field_name = to_field_name # We put this here to avoid import errors self.default_error_messages = { 'invalid_choice': _(u'Select a valid choice. That choice is not one of' u' the available choices.'), } def to_python(self, value): """ 'Resolve' the query set at validation time. This way, we're sure to have the freshest version of the QS. """ if value in EMPTY_VALUES: return None try: key = self.to_field_name or 'pk' qs = self.model.objects.get_query_set() if self.filter: qs = qs.filter(self.filter) value = qs.get(**{key: value}) except self.queryset.model.DoesNotExist: raise ValidationError(self.error_messages['invalid_choice']) return value class ResourceFormField(forms.MultiValueField): """ The ResourceFormField is a resource browser. You can pass it a few parameters: - model which is the subclass you want to read your resources from (default: twistranet.Resource). Useful if you want to display only images for example. - filter which will be passed to model.objects.filter() call before rendering the widget. These model / filter params are the only solution to handle choices WITH the security model. - allow_upload (upload is ok) - allow_select (can select an existing resource from the given filter) """ widget = widgets.ResourceWidget field = ModelInputField model = None filter = None def __init__(self, *args, **kwargs): # Initial values from twistranet.twistapp.models import Resource self.model = kwargs.pop("model", Resource) self.filter = kwargs.pop("filter", None) self.allow_upload = kwargs.pop("allow_upload", True) self.allow_select = kwargs.pop("allow_select", True) self.display_renderer = kwargs.pop("display_renderer", True) self.media_type = kwargs.pop("media_type", 'file') self.widget = kwargs.pop("widget", self.widget( model = self.model, filter = self.filter, allow_upload = self.allow_upload, allow_select = self.allow_select, display_renderer = self.display_renderer, media_type = self.media_type )) self.required = kwargs.pop("required", True) # The fields we'll use: # - A ModelInputField used to handle the ForeignKey. # - A FileField used to handle data upload. fields = [] field0 = self.field(model = self.model, filter = self.filter, required = self.required) # no more used # field1 = forms.FileField(required = False) dummy = forms.CharField(required = False) if self.allow_select or self.allow_upload: fields.append(field0) else: fields.append(dummy) # # Compatibility with form_for_instance # if kwargs.get('initial'): # initial = kwargs['initial'] # else: # initial = None # self.widget = self.widget(initial=initial) super(ResourceFormField, self).__init__(fields, label = kwargs.pop('label'), required = False) #self.required) def prepare_value(self, value): """ Pass the query_set to the underlying widget, so that it's computed as late as possible. """ qs = self.model.objects.get_query_set() if self.filter: qs = qs.filter(self.filter) self.widget.query_set = qs return super(ResourceFormField, self).prepare_value(value) def compress(self, data_list): return data_list # URLField which also accept relative urls class LargeURLField(forms.CharField): """ A URL field which accepts internal link and intranet links (without a standard domain) """ def __init__(self, max_length=None, min_length=None, verify_exists=False, validator_user_agent=URL_VALIDATOR_USER_AGENT, *args, **kwargs): super(LargeURLField, self).__init__(max_length, min_length, *args, **kwargs) self.validators.append(URLValidator(verify_exists=verify_exists, validator_user_agent=validator_user_agent)) def to_python(self, value): if value: value = urlparse.urlunparse(urlparse.urlparse(value)) return super(LargeURLField, self).to_python(value) class ViewPathField(forms.CharField): """ View Path field (could be improved) """ def __init__(self, max_length=None, min_length=None, *args, **kwargs): super(ViewPathField, self).__init__(max_length, min_length, *args, **kwargs) self.validators.append(ViewPathValidator()) self.default_error_messages = { 'invalid': _(u'Enter a valid Path.'),}
agpl-3.0
4,674,477,367,613,389,000
37.492063
120
0.619794
false
4.244457
false
false
false
PhyloStar/PyBayes
params_moves.py
1
1027
import numpy as np from scipy.stats import dirichlet import random, math dir_alpha = 100.0 scaler_alpha = 1.25 epsilon = 1e-10 def mvDirichlet(pi): pi_new = dirichlet.rvs(dir_alpha*pi)[0] #print(pi, pi_new) hastings_ratio = dirichlet.logpdf(pi, pi_new) - dirichlet.logpdf(pi_new, pi) return pi_new, hastings_ratio def mvDualSlider(pi): i, j = random.sample(range(pi.shape[0]),2 ) sum_ij = pi[i]+pi[j] x = random.uniform(epsilon, sum_ij) y = sum_ij -x pi[i], pi[j] = x, y return pi, 0.0 def mvScaler(x): log_c = scaler_alpha*(np.random.uniform()-0.5) c = math.exp(log_c) x_new = x*c return x_new, log_c def mvVecScaler(X): log_c = scaler_alpha*(np.random.uniform()-0.5) c = math.exp(log_c) X_new = X*c return X_new, log_c def mvSlider(x, a, b): """ a and b are bounds """ x_hat = np.random.uniform(x-0.5, x+0.5) if x_hat < a: return 2.0*a -x_hat elif xhat > b: return 2.0*b -x_hat else: return x_hat
gpl-2.0
-2,911,197,455,575,236,000
21.822222
80
0.589094
false
2.542079
false
false
false