repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
Samnsparky/combinations_distance_summarizer | summarizer.py | 1 | 4757 | """Utility to find the combinations of words and return the average distance.
Utility program to find the combinations of words in a given list, the distances
between those combination pairs, and the average of those distances. The final
return value is that average.
@author: Ariel Aguilar, 2013
@author: Sam Pottinger, 2013
@license: MIT
"""
import csv
import itertools
import sys
class WordDistanceFinder:
"""Wrapper around a list of dictionaries with distances between words.
Wrapper around a list of dictionaries with the distances between words
loaded CSV or other structured data.
"""
def __init__(self, distances):
"""Create a new word distance finder.
@param distances: Description of the distances between words. Should
be a list with dictionaries. The dictionaries should have a 'word'
key with a value indicating what word the dictionary is for. The
rest of the keys should be other words with distances to those
words.
@type distances: list of dict
"""
self.__distances = distances
def find_distance_list(self, words):
"""Find the distance between the two words in the given parameter.
@param words: The list of two words to find the distance between.
@type words: List of str.
@return: The distances between those words.
@rtype: list of float
"""
if len(words) != 2:
raise ValueError('Can only find distance between two words.')
return self.find_distance(words[0], words[1])
def find_distance(self, word_1, word_2):
"""Find the distance between two words.
@param word_1: The first word in the pair of words to find the distance
between.
@type word_1: str
@param word_2: The second word in the pair of words to find the distance
between.
@type: word_2: str
@return: The distance between word_1 and word_2
@rtype: float
"""
word_rows = filter(lambda x: x['word'] == word_1, self.__distances)
if len(word_rows) == 0:
raise ValueError('%s not found.' % word_1)
elif len(word_rows) > 1:
raise ValueError('Multiple entries for %s found.' % word_1)
word_row = word_rows[0]
if word_2 in word_row:
return float(word_row[word_2])
else:
raise ValueError('Distance %s to %s not found.' % (word_1, word_2))
def load_distances_csv(loc):
"""Load a CSV file containing word distances.
@param loc: The path or file name of the CSV file to load.
@type loc: str
@return: WordDistanceFinder from contents of the given CSV file.
@rtype: WordDistanceFinder
"""
with open(loc, 'rb') as f:
dialect = csv.Sniffer().sniff(f.readline())
f.seek(0)
values = list(csv.DictReader(f, dialect=dialect))
return WordDistanceFinder(values)
def load_words_to_summarize(loc):
with open(loc, 'rb') as f:
words = f.read().split('\n')
return filter(lambda x: x != '', words)
def find_combiantions_and_distances(distance_finder, words):
# Find distances for all combinations
word_combinations = list(itertools.combinations(words, 2))
word_distances = map(distance_finder.find_distance_list, word_combinations)
return (word_combinations, word_distances)
def arithmetic_mean(target):
return sum(target) / float(len(target))
def run_cli():
"""Run the command line interface driver for this program.
@return: The average distance between the combination of user-provided
words or None if error.
@rtype: float
"""
# Check correct number of arguments supplied
if len(sys.argv) < 3 or len(sys.argv) > 4:
with open('usage.txt') as f:
sys.stderr.write(f.read())
return None
# Parse command line arguments and load distances
words_loc = sys.argv[1]
distances_csv_loc = sys.argv[2]
if len(sys.argv) == 4:
display_pairs = sys.argv[3].lower() == 'y'
else:
display_pairs = False
words = load_words_to_summarize(words_loc)
distance_finder = load_distances_csv(distances_csv_loc)
word_combinations, word_distances = find_combiantions_and_distances(
distance_finder, words)
# Display individual pairs
if display_pairs:
for (pair, distance) in zip(word_combinations, word_distances):
print "%s: %s" % (pair, distance)
return arithmetic_mean(word_distances)
if __name__ == '__main__':
result = run_cli()
if result:
sys.stdout.write(str(result))
sys.stdout.write('\n')
sys.exit(0)
else:
sys.exit(1)
| mit | 859,668,767,277,711,400 | 30.091503 | 80 | 0.635695 | false |
wwitzel3/awx | awx/main/management/commands/replay_job_events.py | 1 | 10556 | # Copyright (c) 2017 Ansible by Red Hat
# All Rights Reserved.
import sys
import time
import json
import random
from django.utils import timezone
from django.core.management.base import BaseCommand
from awx.main.models import (
UnifiedJob,
Job,
AdHocCommand,
ProjectUpdate,
InventoryUpdate,
SystemJob
)
from awx.main.consumers import emit_channel_notification
from awx.api.serializers import (
JobEventWebSocketSerializer,
AdHocCommandEventWebSocketSerializer,
ProjectUpdateEventWebSocketSerializer,
InventoryUpdateEventWebSocketSerializer,
SystemJobEventWebSocketSerializer
)
class JobStatusLifeCycle():
def emit_job_status(self, job, status):
# {"status": "successful", "project_id": 13, "unified_job_id": 659, "group_name": "jobs"}
job.websocket_emit_status(status)
def determine_job_event_finish_status_index(self, job_event_count, random_seed):
if random_seed == 0:
return job_event_count - 1
random.seed(random_seed)
job_event_index = random.randint(0, job_event_count - 1)
return job_event_index
class ReplayJobEvents(JobStatusLifeCycle):
recording_start = None
replay_start = None
def now(self):
return timezone.now()
def start(self, first_event_created):
self.recording_start = first_event_created
self.replay_start = self.now()
def lateness(self, now, created):
time_passed = now - self.recording_start
job_event_time = created - self.replay_start
return (time_passed - job_event_time).total_seconds()
def get_job(self, job_id):
try:
unified_job = UnifiedJob.objects.get(id=job_id)
except UnifiedJob.DoesNotExist:
print("UnifiedJob {} not found.".format(job_id))
sys.exit(1)
return unified_job.get_real_instance()
def sleep(self, seconds):
time.sleep(seconds)
def replay_elapsed(self):
return (self.now() - self.replay_start)
def recording_elapsed(self, created):
return (created - self.recording_start)
def replay_offset(self, created, speed):
return self.replay_elapsed().total_seconds() - (self.recording_elapsed(created).total_seconds() * (1.0 / speed))
def get_job_events(self, job):
if type(job) is Job:
job_events = job.job_events.order_by('created')
elif type(job) is AdHocCommand:
job_events = job.ad_hoc_command_events.order_by('created')
elif type(job) is ProjectUpdate:
job_events = job.project_update_events.order_by('created')
elif type(job) is InventoryUpdate:
job_events = job.inventory_update_events.order_by('created')
elif type(job) is SystemJob:
job_events = job.system_job_events.order_by('created')
count = job_events.count()
if count == 0:
raise RuntimeError("No events for job id {}".format(job.id))
return job_events, count
def get_serializer(self, job):
if type(job) is Job:
return JobEventWebSocketSerializer
elif type(job) is AdHocCommand:
return AdHocCommandEventWebSocketSerializer
elif type(job) is ProjectUpdate:
return ProjectUpdateEventWebSocketSerializer
elif type(job) is InventoryUpdate:
return InventoryUpdateEventWebSocketSerializer
elif type(job) is SystemJob:
return SystemJobEventWebSocketSerializer
else:
raise RuntimeError("Job is of type {} and replay is not yet supported.".format(type(job)))
sys.exit(1)
def run(self, job_id, speed=1.0, verbosity=0, skip_range=[], random_seed=0, final_status_delay=0, debug=False):
stats = {
'events_ontime': {
'total': 0,
'percentage': 0,
},
'events_late': {
'total': 0,
'percentage': 0,
'lateness_total': 0,
'lateness_average': 0,
},
'events_total': 0,
'events_distance_total': 0,
'events_distance_average': 0,
'recording_start': 0,
'recording_end': 0,
'recording_duration': 0,
'replay_start': 0,
'replay_end': 0,
'replay_duration': 0,
}
try:
job = self.get_job(job_id)
job_events, job_event_count = self.get_job_events(job)
serializer = self.get_serializer(job)
except RuntimeError as e:
print("{}".format(e.message))
sys.exit(1)
je_previous = None
self.emit_job_status(job, 'pending')
self.emit_job_status(job, 'waiting')
self.emit_job_status(job, 'running')
finish_status_index = self.determine_job_event_finish_status_index(job_event_count, random_seed)
for n, je_current in enumerate(job_events):
if je_current.counter in skip_range:
continue
if debug:
raw_input("{} of {}:".format(n, job_event_count))
if not je_previous:
stats['recording_start'] = je_current.created
self.start(je_current.created)
stats['replay_start'] = self.replay_start
je_previous = je_current
je_serialized = serializer(je_current).data
emit_channel_notification('{}-{}'.format(je_serialized['group_name'], job.id), je_serialized)
replay_offset = self.replay_offset(je_previous.created, speed)
recording_diff = (je_current.created - je_previous.created).total_seconds() * (1.0 / speed)
stats['events_distance_total'] += recording_diff
if verbosity >= 3:
print("recording: next job in {} seconds".format(recording_diff))
if replay_offset >= 0:
replay_diff = recording_diff - replay_offset
if replay_diff > 0:
stats['events_ontime']['total'] += 1
if verbosity >= 3:
print("\treplay: sleep for {} seconds".format(replay_diff))
self.sleep(replay_diff)
else:
stats['events_late']['total'] += 1
stats['events_late']['lateness_total'] += (replay_diff * -1)
if verbosity >= 3:
print("\treplay: too far behind to sleep {} seconds".format(replay_diff))
else:
replay_offset = self.replay_offset(je_current.created, speed)
stats['events_late']['lateness_total'] += (replay_offset * -1)
stats['events_late']['total'] += 1
if verbosity >= 3:
print("\treplay: behind by {} seconds".format(replay_offset))
stats['events_total'] += 1
je_previous = je_current
if n == finish_status_index:
if final_status_delay != 0:
self.sleep(final_status_delay)
self.emit_job_status(job, job.status)
if stats['events_total'] > 2:
stats['replay_end'] = self.now()
stats['replay_duration'] = (stats['replay_end'] - stats['replay_start']).total_seconds()
stats['replay_start'] = stats['replay_start'].isoformat()
stats['replay_end'] = stats['replay_end'].isoformat()
stats['recording_end'] = je_current.created
stats['recording_duration'] = (stats['recording_end'] - stats['recording_start']).total_seconds()
stats['recording_start'] = stats['recording_start'].isoformat()
stats['recording_end'] = stats['recording_end'].isoformat()
stats['events_ontime']['percentage'] = (stats['events_ontime']['total'] / float(stats['events_total'])) * 100.00
stats['events_late']['percentage'] = (stats['events_late']['total'] / float(stats['events_total'])) * 100.00
stats['events_distance_average'] = stats['events_distance_total'] / stats['events_total']
stats['events_late']['lateness_average'] = stats['events_late']['lateness_total'] / stats['events_late']['total']
else:
stats = {'events_total': stats['events_total']}
if verbosity >= 2:
print(json.dumps(stats, indent=4, sort_keys=True))
class Command(BaseCommand):
help = 'Replay job events over websockets ordered by created on date.'
def _parse_slice_range(self, slice_arg):
slice_arg = tuple([int(n) for n in slice_arg.split(':')])
slice_obj = slice(*slice_arg)
start = slice_obj.start or 0
stop = slice_obj.stop or -1
step = slice_obj.step or 1
return range(start, stop, step)
def add_arguments(self, parser):
parser.add_argument('--job_id', dest='job_id', type=int, metavar='j',
help='Id of the job to replay (job or adhoc)')
parser.add_argument('--speed', dest='speed', type=float, metavar='s',
help='Speedup factor.')
parser.add_argument('--skip-range', dest='skip_range', type=str, metavar='k',
default='0:-1:1', help='Range of events to skip')
parser.add_argument('--random-seed', dest='random_seed', type=int, metavar='r',
default=0, help='Random number generator seed to use when determining job_event index to emit final job status')
parser.add_argument('--final-status-delay', dest='final_status_delay', type=float, metavar='f',
default=0, help='Delay between event and final status emit')
parser.add_argument('--debug', dest='debug', type=bool, metavar='d',
default=False, help='Enable step mode to control emission of job events one at a time.')
def handle(self, *args, **options):
job_id = options.get('job_id')
speed = options.get('speed') or 1
verbosity = options.get('verbosity') or 0
random_seed = options.get('random_seed')
final_status_delay = options.get('final_status_delay')
debug = options.get('debug')
skip = self._parse_slice_range(options.get('skip_range'))
replayer = ReplayJobEvents()
replayer.run(job_id, speed=speed, verbosity=verbosity, skip_range=skip, random_seed=random_seed,
final_status_delay=final_status_delay, debug=debug)
| apache-2.0 | 3,277,444,997,338,887,700 | 39.290076 | 140 | 0.583081 | false |
robk5uj/invenio | modules/webaccess/lib/access_control_config.py | 1 | 21700 | ## This file is part of Invenio.
## Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Invenio Access Control Config. """
__revision__ = \
"$Id$"
# pylint: disable=C0301
from invenio.config import CFG_SITE_NAME, CFG_SITE_URL, CFG_SITE_LANG, \
CFG_SITE_SECURE_URL, CFG_SITE_SUPPORT_EMAIL, CFG_CERN_SITE, \
CFG_OPENAIRE_SITE, CFG_SITE_RECORD, CFG_INSPIRE_SITE
from invenio.messages import gettext_set_language
class InvenioWebAccessFireroleError(Exception):
"""Just an Exception to discover if it's a FireRole problem"""
pass
# VALUES TO BE EXPORTED
# CURRENTLY USED BY THE FILES access_control_engine.py access_control_admin.py webaccessadmin_lib.py
# name of the role giving superadmin rights
SUPERADMINROLE = 'superadmin'
# name of the webaccess webadmin role
WEBACCESSADMINROLE = 'webaccessadmin'
# name of the action allowing roles to access the web administrator interface
WEBACCESSACTION = 'cfgwebaccess'
# name of the action allowing roles to access the web administrator interface
VIEWRESTRCOLL = 'viewrestrcoll'
# name of the action allowing roles to delegate the rights to other roles
# ex: libraryadmin to delegate libraryworker
DELEGATEADDUSERROLE = 'accdelegaterole'
# max number of users to display in the drop down selects
MAXSELECTUSERS = 25
# max number of users to display in a page (mainly for user area)
MAXPAGEUSERS = 25
# default role definition, source:
CFG_ACC_EMPTY_ROLE_DEFINITION_SRC = 'deny all'
# default role definition, compiled:
CFG_ACC_EMPTY_ROLE_DEFINITION_OBJ = (False, ())
# default role definition, compiled and serialized:
CFG_ACC_EMPTY_ROLE_DEFINITION_SER = None
# List of tags containing (multiple) emails of users who should authorize
# to access the corresponding record regardless of collection restrictions.
if CFG_CERN_SITE:
CFG_ACC_GRANT_AUTHOR_RIGHTS_TO_EMAILS_IN_TAGS = ['859__f', '270__m', '506__m']
else:
CFG_ACC_GRANT_AUTHOR_RIGHTS_TO_EMAILS_IN_TAGS = ['8560_f']
# Use external source for access control?
# CFG_EXTERNAL_AUTHENTICATION -- this is a dictionary with the enabled login method.
# The key is the name of the login method and the value is an instance of
# of the login method (see /help/admin/webaccess-admin-guide#5). Set the value
# to None if you wish to use the local Invenio authentication method.
# CFG_EXTERNAL_AUTH_DEFAULT -- set this to the key in CFG_EXTERNAL_AUTHENTICATION
# that should be considered as default login method
# CFG_EXTERNAL_AUTH_USING_SSO -- set this to the login method name of an SSO
# login method, if any, otherwise set this to None.
# CFG_EXTERNAL_AUTH_LOGOUT_SSO -- if CFG_EXTERNAL_AUTH_USING_SSO was not None
# set this to the URL that should be contacted to perform an SSO logout
from invenio.external_authentication_robot import ExternalAuthRobot
if CFG_CERN_SITE:
import external_authentication_sso as ea_sso
CFG_EXTERNAL_AUTH_USING_SSO = "CERN"
CFG_EXTERNAL_AUTH_DEFAULT = CFG_EXTERNAL_AUTH_USING_SSO
CFG_EXTERNAL_AUTH_LOGOUT_SSO = 'https://login.cern.ch/adfs/ls/?wa=wsignout1.0'
CFG_EXTERNAL_AUTHENTICATION = {
CFG_EXTERNAL_AUTH_USING_SSO : ea_sso.ExternalAuthSSO(),
}
elif CFG_OPENAIRE_SITE:
CFG_EXTERNAL_AUTH_DEFAULT = 'Local'
CFG_EXTERNAL_AUTH_USING_SSO = False
CFG_EXTERNAL_AUTH_LOGOUT_SSO = None
CFG_EXTERNAL_AUTHENTICATION = {
"Local": None,
"OpenAIRE": ExternalAuthRobot(enforce_external_nicknames=True, use_zlib=False),
"ZOpenAIRE": ExternalAuthRobot(enforce_external_nicknames=True, use_zlib=True)
}
elif CFG_INSPIRE_SITE:
# INSPIRE specific robot configuration
CFG_EXTERNAL_AUTH_DEFAULT = 'Local'
CFG_EXTERNAL_AUTH_USING_SSO = False
CFG_EXTERNAL_AUTH_LOGOUT_SSO = None
CFG_EXTERNAL_AUTHENTICATION = {
"Local": None,
"Robot": ExternalAuthRobot(enforce_external_nicknames=True, use_zlib=False, check_user_ip=2),
"ZRobot": ExternalAuthRobot(enforce_external_nicknames=True, use_zlib=True, check_user_ip=2)
}
else:
CFG_EXTERNAL_AUTH_DEFAULT = 'Local'
CFG_EXTERNAL_AUTH_USING_SSO = False
CFG_EXTERNAL_AUTH_LOGOUT_SSO = None
CFG_EXTERNAL_AUTHENTICATION = {
"Local": None,
"Robot": ExternalAuthRobot(enforce_external_nicknames=True, use_zlib=False),
"ZRobot": ExternalAuthRobot(enforce_external_nicknames=True, use_zlib=True)
}
## If using SSO, this is the number of seconds after which the keep-alive
## SSO handler is pinged again to provide fresh SSO information.
CFG_EXTERNAL_AUTH_SSO_REFRESH = 600
# default data for the add_default_settings function
# Note: by default the definition is set to deny any. This won't be a problem
# because userid directly connected with roles will still be allowed.
# roles
# name description definition
DEF_ROLES = ((SUPERADMINROLE, 'superuser with all rights', 'deny any'),
(WEBACCESSADMINROLE, 'WebAccess administrator', 'deny any'),
('anyuser', 'Any user', 'allow any'),
('basketusers', 'Users who can use baskets', 'allow any'),
('loanusers', 'Users who can use loans', 'allow any'),
('groupusers', 'Users who can use groups', 'allow any'),
('alertusers', 'Users who can use alerts', 'allow any'),
('messageusers', 'Users who can use messages', 'allow any'),
('holdingsusers', 'Users who can view holdings', 'allow any'),
('statisticsusers', 'Users who can view statistics', 'allow any'),
('claimpaperusers', 'Users who can perform changes to their own paper attributions without the need for an operator\'s approval', 'allow any'),
('claimpaperoperators', 'Users who can perform changes to _all_ paper attributions without the need for an operator\'s approval', 'deny any'),
('paperclaimviewers', 'Users who can view "claim my paper" facilities.', 'allow all'),
('paperattributionviewers', 'Users who can view "attribute this paper" facilities', 'allow all'),
('paperattributionlinkviewers', 'Users who can see attribution links in the search', 'allow all'),
)
# Demo site roles
DEF_DEMO_ROLES = (('photocurator', 'Photo collection curator', 'deny any'),
('thesesviewer', 'Theses viewer', 'allow group "Theses viewers"'),
('thesescurator', 'Theses collection curator', 'deny any'),
('swordcurator', 'BibSword client curator', 'deny any'),
('referee_DEMOBOO_*', 'Book collection curator', 'deny any'),
('restrictedpicturesviewer', 'Restricted pictures viewer', 'deny any'),
('curator', 'Curator', 'deny any'),
('basketusers', 'Users who can use baskets', 'deny email "[email protected]"\nallow any'),
('claimpaperusers', 'Users who can perform changes to their own paper attributions without the need for an operator\'s approval', 'deny email "[email protected]"\nallow any'),
('submit_DEMOJRN_*', 'Users who can submit (and modify) "Atlantis Times" articles', 'deny all'),
('atlantiseditor', 'Users who can configure "Atlantis Times" journal', 'deny all'),
('commentmoderator', 'Users who can moderate comments', 'deny all'),
('poetrycommentreader', 'Users who can view comments in Poetry collection', 'deny all'))
DEF_DEMO_USER_ROLES = (('[email protected]', 'thesesviewer'),
('[email protected]', 'swordcurator'),
('[email protected]', 'claimpaperusers'),
('[email protected]', 'referee_DEMOBOO_*'),
('[email protected]', 'curator'),
('[email protected]', 'restrictedpicturesviewer'),
('[email protected]', 'swordcurator'),
('[email protected]', 'thesescurator'),
('[email protected]', 'restrictedpicturesviewer'),
('[email protected]', 'photocurator'),
('[email protected]', 'submit_DEMOJRN_*'),
('[email protected]', 'submit_DEMOJRN_*'),
('[email protected]', 'atlantiseditor'),
('[email protected]', 'poetrycommentreader'))
# users
# list of e-mail addresses
DEF_USERS = []
# actions
# name desc allowedkeywords optional
DEF_ACTIONS = (
('cfgwebsearch', 'configure WebSearch', '', 'no'),
('cfgbibformat', 'configure BibFormat', '', 'no'),
('cfgbibknowledge', 'configure BibKnowledge', '', 'no'),
('cfgwebsubmit', 'configure WebSubmit', '', 'no'),
('cfgbibrank', 'configure BibRank', '', 'no'),
('cfgwebcomment', 'configure WebComment', '', 'no'),
('cfgoaiharvest', 'configure OAI Harvest', '', 'no'),
('cfgoairepository', 'configure OAI Repository', '', 'no'),
('cfgbibindex', 'configure BibIndex', '', 'no'),
('cfgbibexport', 'configure BibExport', '', 'no'),
('cfgrobotkeys', 'configure Robot keys', 'login_method,robot', 'yes'),
('runbibindex', 'run BibIndex', '', 'no'),
('runbibupload', 'run BibUpload', '', 'no'),
('runwebcoll', 'run webcoll', 'collection', 'yes'),
('runbibformat', 'run BibFormat', 'format', 'yes'),
('runbibclassify', 'run BibClassify', 'taxonomy', 'yes'),
('runbibtaskex', 'run BibTaskEx example', '', 'no'),
('runbibrank', 'run BibRank', '', 'no'),
('runoaiharvest', 'run oaiharvest task', '', 'no'),
('runoairepository', 'run oairepositoryupdater task', '', 'no'),
('runbibedit', 'run Record Editor', 'collection', 'yes'),
('runbibeditmulti', 'run Multi-Record Editor', '', 'no'),
('runbibdocfile', 'run Document File Manager', '', 'no'),
('runbibmerge', 'run Record Merger', '', 'no'),
('runbibswordclient', 'run BibSword client', '', 'no'),
('runwebstatadmin', 'run WebStadAdmin', '', 'no'),
('runinveniogc', 'run InvenioGC', '', 'no'),
('runbibexport', 'run BibExport', '', 'no'),
('referee', 'referee document type doctype/category categ', 'doctype,categ', 'yes'),
('submit', 'use webSubmit', 'doctype,act,categ', 'yes'),
('viewrestrdoc', 'view restricted document', 'status', 'no'),
('viewrestrcomment', 'view restricted comment', 'status', 'no'),
(WEBACCESSACTION, 'configure WebAccess', '', 'no'),
(DELEGATEADDUSERROLE, 'delegate subroles inside WebAccess', 'role', 'no'),
(VIEWRESTRCOLL, 'view restricted collection', 'collection', 'no'),
('cfgwebjournal', 'configure WebJournal', 'name,with_editor_rights', 'no'),
('viewcomment', 'view comments', 'collection', 'no'),
('sendcomment', 'send comments', 'collection', 'no'),
('attachcommentfile', 'attach files to comments', 'collection', 'no'),
('attachsubmissionfile', 'upload files to drop box during submission', '', 'no'),
('cfgbibexport', 'configure BibExport', '', 'no'),
('runbibexport', 'run BibExport', '', 'no'),
('usebaskets', 'use baskets', '', 'no'),
('useloans', 'use loans', '', 'no'),
('usegroups', 'use groups', '', 'no'),
('usealerts', 'use alerts', '', 'no'),
('usemessages', 'use messages', '', 'no'),
('viewholdings', 'view holdings', 'collection', 'yes'),
('viewstatistics', 'view statistics', 'collection', 'yes'),
('runbibcirculation', 'run BibCirculation', '', 'no'),
('moderatecomments', 'moderate comments', 'collection', 'no'),
('runbatchuploader', 'run batchuploader', 'collection', 'yes'),
('runbibtasklet', 'run BibTaskLet', '', 'no'),
('claimpaper_view_pid_universe', 'View the Claim Paper interface', '', 'no'),
('claimpaper_claim_own_papers', 'Clam papers to his own personID', '', 'no'),
('claimpaper_claim_others_papers', 'Claim papers for others', '', 'no'),
('claimpaper_change_own_data', 'Change data associated to his own person ID', '', 'no'),
('claimpaper_change_others_data', 'Change data of any person ID', '', 'no'),
('runbibtasklet', 'run BibTaskLet', '', 'no')
)
# Default authorizations
# role action arguments
DEF_AUTHS = (('basketusers', 'usebaskets', {}),
('loanusers', 'useloans', {}),
('groupusers', 'usegroups', {}),
('alertusers', 'usealerts', {}),
('messageusers', 'usemessages', {}),
('holdingsusers', 'viewholdings', {}),
('statisticsusers', 'viewstatistics', {}),
('claimpaperusers', 'claimpaper_view_pid_universe', {}),
('claimpaperoperators', 'claimpaper_view_pid_universe', {}),
('claimpaperusers', 'claimpaper_claim_own_papers', {}),
('claimpaperoperators', 'claimpaper_claim_own_papers', {}),
('claimpaperoperators', 'claimpaper_claim_others_papers', {}),
('claimpaperusers', 'claimpaper_change_own_data', {}),
('claimpaperoperators', 'claimpaper_change_own_data', {}),
('claimpaperoperators', 'claimpaper_change_others_data', {}),
)
# Demo site authorizations
# role action arguments
DEF_DEMO_AUTHS = (
('photocurator', 'runwebcoll', {'collection': 'Pictures'}),
('restrictedpicturesviewer', 'viewrestrdoc', {'status': 'restricted_picture'}),
('thesesviewer', VIEWRESTRCOLL, {'collection': 'Theses'}),
('referee_DEMOBOO_*', 'referee', {'doctype': 'DEMOBOO', 'categ': '*'}),
('curator', 'cfgbibknowledge', {}),
('curator', 'runbibedit', {}),
('curator', 'runbibeditmulti', {}),
('curator', 'runbibmerge', {}),
('swordcurator', 'runbibswordclient', {}),
('thesescurator', 'runbibedit', {'collection': 'Theses'}),
('thesescurator', VIEWRESTRCOLL, {'collection': 'Theses'}),
('photocurator', 'runbibedit', {'collection': 'Pictures'}),
('referee_DEMOBOO_*', 'runbibedit', {'collection': 'Books'}),
('submit_DEMOJRN_*', 'submit', {'doctype': 'DEMOJRN', 'act': 'SBI', 'categ': '*'}),
('submit_DEMOJRN_*', 'submit', {'doctype': 'DEMOJRN', 'act': 'MBI', 'categ': '*'}),
('submit_DEMOJRN_*', 'cfgwebjournal', {'name': 'AtlantisTimes', 'with_editor_rights': 'no'}),
('atlantiseditor', 'cfgwebjournal', {'name': 'AtlantisTimes', 'with_editor_rights': 'yes'}),
('referee_DEMOBOO_*', 'runbatchuploader', {'collection': 'Books'}),
('poetrycommentreader', 'viewcomment', {'collection': 'Poetry'}),
('atlantiseditor', VIEWRESTRCOLL, {'collection': 'Atlantis Times Drafts'})
)
_ = gettext_set_language(CFG_SITE_LANG)
# Activities (i.e. actions) for which exists an administrative web interface.
CFG_ACC_ACTIVITIES_URLS = {
'runbibedit' : (_("Run Record Editor"), "%s/%s/edit/?ln=%%s" % (CFG_SITE_URL, CFG_SITE_RECORD)),
'runbibeditmulti' : (_("Run Multi-Record Editor"), "%s/%s/multiedit/?ln=%%s" % (CFG_SITE_URL, CFG_SITE_RECORD)),
'runbibdocfile' : (_("Run Document File Manager"), "%s/submit/managedocfiles?ln=%%s" % CFG_SITE_URL),
'runbibmerge' : (_("Run Record Merger"), "%s/%s/merge/?ln=%%s" % (CFG_SITE_URL, CFG_SITE_RECORD)),
'runbibswordclient' : (_("Run BibSword client"), "%s/bibsword/?ln=%%s" % CFG_SITE_URL),
'cfgbibknowledge' : (_("Configure BibKnowledge"), "%s/kb?ln=%%s" % CFG_SITE_URL),
'cfgbibformat' : (_("Configure BibFormat"), "%s/admin/bibformat/bibformatadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgoaiharvest' : (_("Configure OAI Harvest"), "%s/admin/bibharvest/oaiharvestadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgoairepository' : (_("Configure OAI Repository"), "%s/admin/bibharvest/oairepositoryadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgbibindex' : (_("Configure BibIndex"), "%s/admin/bibindex/bibindexadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgbibrank' : (_("Configure BibRank"), "%s/admin/bibrank/bibrankadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgwebaccess' : (_("Configure WebAccess"), "%s/admin/webaccess/webaccessadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgwebcomment' : (_("Configure WebComment"), "%s/admin/webcomment/webcommentadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgwebsearch' : (_("Configure WebSearch"), "%s/admin/websearch/websearchadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgwebsubmit' : (_("Configure WebSubmit"), "%s/admin/websubmit/websubmitadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgwebjournal' : (_("Configure WebJournal"), "%s/admin/webjournal/webjournaladmin.py?ln=%%s" % CFG_SITE_URL),
'runbibcirculation' : (_("Run BibCirculation"), "%s/admin/bibcirculation/bibcirculationadmin.py?ln=%%s" % CFG_SITE_URL),
'runbatchuploader' : (_("Run Batch Uploader"), "%s/batchuploader/metadata?ln=%%s" % CFG_SITE_URL),
'claimpaper_claim_others_papers' : (_("Run Person/Author Manager"), "%s/person/search?ln=%%s" % CFG_SITE_URL)
}
CFG_WEBACCESS_MSGS = {
0: 'Try to <a href="%s/youraccount/login?referer=%%s">login</a> with another account.' % (CFG_SITE_SECURE_URL),
1: '<br />If you think this is not correct, please contact: <a href="mailto:%s">%s</a>' % (CFG_SITE_SUPPORT_EMAIL, CFG_SITE_SUPPORT_EMAIL),
2: '<br />If you have any questions, please write to <a href="mailto:%s">%s</a>' % (CFG_SITE_SUPPORT_EMAIL, CFG_SITE_SUPPORT_EMAIL),
3: 'Guest users are not allowed, please <a href="%s/youraccount/login">login</a>.' % CFG_SITE_SECURE_URL,
4: 'The site is temporarily closed for maintenance. Please come back soon.',
5: 'Authorization failure',
6: '%s temporarily closed' % CFG_SITE_NAME,
7: 'This functionality is temporarily closed due to server maintenance. Please use only the search engine in the meantime.',
8: 'Functionality temporarily closed'
}
CFG_WEBACCESS_WARNING_MSGS = {
0: 'Authorization granted',
1: 'You are not authorized to perform this action.',
2: 'You are not authorized to perform any action.',
3: 'The action %s does not exist.',
4: 'Unexpected error occurred.',
5: 'Missing mandatory keyword argument(s) for this action.',
6: 'Guest accounts are not authorized to perform this action.',
7: 'Not enough arguments, user ID and action name required.',
8: 'Incorrect keyword argument(s) for this action.',
9: """Account '%s' is not yet activated.""",
10: """You were not authorized by the authentication method '%s'.""",
11: """The selected login method '%s' is not the default method for this account, please try another one.""",
12: """Selected login method '%s' does not exist.""",
13: """Could not register '%s' account.""",
14: """Could not login using '%s', because this user is unknown.""",
15: """Could not login using your '%s' account, because you have introduced a wrong password.""",
16: """External authentication troubles using '%s' (maybe temporary network problems).""",
17: """You have not yet confirmed the email address for the '%s' authentication method.""",
18: """The administrator has not yet activated your account for the '%s' authentication method.""",
19: """The site is having troubles in sending you an email for confirming your email address. The error has been logged and will be taken care of as soon as possible.""",
20: """No roles are authorized to perform action %s with the given parameters."""
}
| gpl-2.0 | 5,986,359,662,859,588,000 | 59.955056 | 201 | 0.596359 | false |
rainwoodman/fastpm-python | fastpm/force/lpt.py | 1 | 1947 | from . import kernels as FKN
import numpy
def lpt1(dlin_k, q, resampler='cic'):
""" Run first order LPT on linear density field, returns displacements of particles
reading out at q. The result has the same dtype as q.
"""
basepm = dlin_k.pm
ndim = len(basepm.Nmesh)
delta_k = basepm.create('complex')
layout = basepm.decompose(q)
local_q = layout.exchange(q)
source = numpy.zeros((len(q), ndim), dtype=q.dtype)
for d in range(len(basepm.Nmesh)):
disp = dlin_k.apply(FKN.laplace) \
.apply(FKN.gradient(d, order=1), out=Ellipsis) \
.c2r(out=Ellipsis)
local_disp = disp.readout(local_q, resampler=resampler)
source[..., d] = layout.gather(local_disp)
return source
def lpt2source(dlin_k):
""" Generate the second order LPT source term. """
source = dlin_k.pm.create('real')
source[...] = 0
if dlin_k.ndim != 3: # only for 3d
return source.r2c(out=Ellipsis)
D1 = [1, 2, 0]
D2 = [2, 0, 1]
phi_ii = []
# diagnoal terms
for d in range(dlin_k.ndim):
phi_ii_d = dlin_k.apply(FKN.laplace) \
.apply(FKN.gradient(d, order=1), out=Ellipsis) \
.apply(FKN.gradient(d, order=1), out=Ellipsis) \
.c2r(out=Ellipsis)
phi_ii.append(phi_ii_d)
for d in range(3):
source[...] += phi_ii[D1[d]].value * phi_ii[D2[d]].value
# free memory
phi_ii = []
phi_ij = []
# off-diag terms
for d in range(dlin_k.ndim):
phi_ij_d = dlin_k.apply(FKN.laplace) \
.apply(FKN.gradient(D1[d], order=1), out=Ellipsis) \
.apply(FKN.gradient(D2[d], order=1), out=Ellipsis) \
.c2r(out=Ellipsis)
source[...] -= phi_ij_d[...] ** 2
# this ensures x = x0 + dx1(t) + d2(t) for 2LPT
source[...] *= 3.0 / 7
return source.r2c(out=Ellipsis)
| gpl-3.0 | 85,663,300,217,380,750 | 28.059701 | 87 | 0.548023 | false |
huogerac/cookiecutter-django-magic-content | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/apps/core/management/commands/backup_site.py | 1 | 1788 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from optparse import make_option
from fabric.colors import green
from django.core.management.base import BaseCommand
from magicbackup.helpers import MagicBackup
class Command(BaseCommand):
help = 'Backup a Site'
option_list = BaseCommand.option_list + (
make_option('--backup-name', action='store', dest='backup_name',
type='string', help='A name for backup folder'),
make_option('--site-id', action='store', dest='site_id',
type='int', help='The site ID'),
)
def handle(self, *args, **options):
backup_name = options['backup_name']
site_id = options['site_id']
if not backup_name or not site_id:
raise Exception('backup_name or site_id is missing')
models = ["magiccontent.Widget",
"magiccontent.Area",
"magiccontent.SiteLink",
"magicgallery.Gallery",
"magicgallery.GalleryItem",
"textimagecontent.TextImageContent",
"formattedtextimagecontent.FormattedTextImageContent",
"iconcontent.IconContent",
"background.BackgroundArea",
"dividertextcontent.DividerTextContent",
"imagecontent.ImageContent",
"magiccontentnavigation.MenuItem",
"core.SitePreferences",
"magicthemes.ThemePreferences", ]
backup = MagicBackup().site(site_id).save_as(backup_name)
for model in models:
print(green('backuping {0}...'.format(model)))
backup.model(model).backup()
print(green('new backup created at {0}'.format(backup.target_dir)))
| bsd-3-clause | -5,535,139,425,958,014,000 | 35.489796 | 75 | 0.587248 | false |
wxgeo/geophar | wxgeometrie/sympy/discrete/convolution.py | 2 | 7892 | """
Convolution (using FFT, NTT, FWHT), Subset Convolution,
Covering Product, Intersecting Product
"""
from __future__ import print_function, division
from sympy.core import S
from sympy.core.compatibility import range, as_int
from sympy.core.function import expand_mul
from sympy.discrete.transforms import (
fft, ifft, ntt, intt, fwht, ifwht)
def convolution(a, b, **hints):
"""
Performs convolution by determining the type of desired
convolution using hints.
If no hints are given, linear convolution is performed using
FFT.
Parameters
==========
a, b : iterables
The sequences for which convolution is performed.
hints : dict
Specifies the type of convolution to be performed.
The following hints can be given as keyword arguments.
dps : Integer
Specifies the number of decimal digits for precision for
performing FFT on the sequence.
prime : Integer
Prime modulus of the form (m*2**k + 1) to be used for
performing NTT on the sequence.
cycle : Integer
Specifies the length for doing cyclic convolution.
dyadic : bool
Identifies the convolution type as dyadic (XOR)
convolution, which is performed using FWHT.
Examples
========
>>> from sympy import convolution, symbols, S, I
>>> convolution([1 + 2*I, 4 + 3*I], [S(5)/4, 6], dps=3)
[1.25 + 2.5*I, 11.0 + 15.8*I, 24.0 + 18.0*I]
>>> convolution([1, 2, 3], [4, 5, 6], cycle=3)
[31, 31, 28]
>>> convolution([111, 777], [888, 444], prime=19*2**10 + 1)
[1283, 19351, 14219]
>>> convolution([111, 777], [888, 444], prime=19*2**10 + 1, cycle=2)
[15502, 19351]
>>> u, v, x, y, z = symbols('u v x y z')
>>> convolution([u, v], [x, y, z], dyadic=True)
[u*x + v*y, u*y + v*x, u*z, v*z]
"""
fft = hints.pop('fft', None)
dps = hints.pop('dps', None)
p = hints.pop('prime', None)
c = as_int(hints.pop('cycle', 0))
dyadic = hints.pop('dyadic', None)
if c < 0:
raise ValueError("The length for cyclic convolution must be non-negative")
fft = True if fft else None
dyadic = True if dyadic else None
if sum(x is not None for x in (p, dps, dyadic)) > 1 or \
sum(x is not None for x in (fft, dyadic)) > 1:
raise TypeError("Ambiguity in determining the convolution type")
if p is not None:
ls = convolution_ntt(a, b, prime=p)
return ls if not c else [sum(ls[i::c]) % p for i in range(c)]
elif hints.pop('ntt', False):
raise TypeError("Prime modulus must be specified for performing NTT")
if dyadic:
ls = convolution_fwht(a, b)
else:
ls = convolution_fft(a, b, dps=dps)
return ls if not c else [sum(ls[i::c]) for i in range(c)]
#----------------------------------------------------------------------------#
# #
# Convolution for Complex domain #
# #
#----------------------------------------------------------------------------#
def convolution_fft(a, b, dps=None):
"""
Performs linear convolution using Fast Fourier Transform.
Parameters
==========
a, b : iterables
The sequences for which convolution is performed.
dps : Integer
Specifies the number of decimal digits for precision.
Examples
========
>>> from sympy import S, I
>>> from sympy.discrete.convolution import convolution_fft
>>> convolution_fft([2, 3], [4, 5])
[8, 22, 15]
>>> convolution_fft([2, 5], [6, 7, 3])
[12, 44, 41, 15]
>>> convolution_fft([1 + 2*I, 4 + 3*I], [S(5)/4, 6])
[5/4 + 5*I/2, 11 + 63*I/4, 24 + 18*I]
References
==========
.. [1] https://en.wikipedia.org/wiki/Convolution_theorem
.. [1] https://en.wikipedia.org/wiki/Discrete_Fourier_transform_(general)
"""
a, b = a[:], b[:]
n = m = len(a) + len(b) - 1 # convolution size
if n > 0 and n&(n - 1): # not a power of 2
n = 2**n.bit_length()
# padding with zeros
a += [S.Zero]*(n - len(a))
b += [S.Zero]*(n - len(b))
a, b = fft(a, dps), fft(b, dps)
a = [expand_mul(x*y) for x, y in zip(a, b)]
a = ifft(a, dps)[:m]
return a
#----------------------------------------------------------------------------#
# #
# Convolution for GF(p) #
# #
#----------------------------------------------------------------------------#
def convolution_ntt(a, b, prime):
"""
Performs linear convolution using Number Theoretic Transform.
Parameters
==========
a, b : iterables
The sequences for which convolution is performed.
prime : Integer
Prime modulus of the form (m*2**k + 1) to be used for performing
NTT on the sequence.
Examples
========
>>> from sympy.discrete.convolution import convolution_ntt
>>> convolution_ntt([2, 3], [4, 5], prime=19*2**10 + 1)
[8, 22, 15]
>>> convolution_ntt([2, 5], [6, 7, 3], prime=19*2**10 + 1)
[12, 44, 41, 15]
>>> convolution_ntt([333, 555], [222, 666], prime=19*2**10 + 1)
[15555, 14219, 19404]
References
==========
.. [1] https://en.wikipedia.org/wiki/Convolution_theorem
.. [2] https://en.wikipedia.org/wiki/Discrete_Fourier_transform_(general)
"""
a, b, p = a[:], b[:], as_int(prime)
n = m = len(a) + len(b) - 1 # convolution size
if n > 0 and n&(n - 1): # not a power of 2
n = 2**n.bit_length()
# padding with zeros
a += [0]*(n - len(a))
b += [0]*(n - len(b))
a, b = ntt(a, p), ntt(b, p)
a = [x*y % p for x, y in zip(a, b)]
a = intt(a, p)[:m]
return a
#----------------------------------------------------------------------------#
# #
# Convolution for 2**n-group #
# #
#----------------------------------------------------------------------------#
def convolution_fwht(a, b):
"""
Performs dyadic (XOR) convolution using Fast Walsh Hadamard Transform.
The convolution is automatically padded to the right with zeros, as the
radix 2 FWHT requires the number of sample points to be a power of 2.
Parameters
==========
a, b : iterables
The sequences for which convolution is performed.
Examples
========
>>> from sympy import symbols, S, I
>>> from sympy.discrete.convolution import convolution_fwht
>>> u, v, x, y = symbols('u v x y')
>>> convolution_fwht([u, v], [x, y])
[u*x + v*y, u*y + v*x]
>>> convolution_fwht([2, 3], [4, 5])
[23, 22]
>>> convolution_fwht([2, 5 + 4*I, 7], [6*I, 7, 3 + 4*I])
[56 + 68*I, -10 + 30*I, 6 + 50*I, 48 + 32*I]
>>> convolution_fwht([S(33)/7, S(55)/6, S(7)/4], [S(2)/3, 5])
[2057/42, 1870/63, 7/6, 35/4]
References
==========
.. [1] https://researchgate.net/publication/26511536_Walsh_-_Hadamard_Transformation_of_a_Convolution
.. [2] https://en.wikipedia.org/wiki/Hadamard_transform
"""
if not a or not b:
return []
a, b = a[:], b[:]
n = max(len(a), len(b))
if n&(n - 1): # not a power of 2
n = 2**n.bit_length()
# padding with zeros
a += [S.Zero]*(n - len(a))
b += [S.Zero]*(n - len(b))
a, b = fwht(a), fwht(b)
a = [expand_mul(x*y) for x, y in zip(a, b)]
a = ifwht(a)
return a
| gpl-2.0 | -200,314,091,119,685,950 | 28.33829 | 105 | 0.484161 | false |
fernandog/Medusa | ext/sqlalchemy/dialects/sybase/pyodbc.py | 1 | 2102 | # sybase/pyodbc.py
# Copyright (C) 2005-2018 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: sybase+pyodbc
:name: PyODBC
:dbapi: pyodbc
:connectstring: sybase+pyodbc://<username>:<password>@<dsnname>\
[/<database>]
:url: http://pypi.python.org/pypi/pyodbc/
Unicode Support
---------------
The pyodbc driver currently supports usage of these Sybase types with
Unicode or multibyte strings::
CHAR
NCHAR
NVARCHAR
TEXT
VARCHAR
Currently *not* supported are::
UNICHAR
UNITEXT
UNIVARCHAR
"""
from sqlalchemy.dialects.sybase.base import SybaseDialect,\
SybaseExecutionContext
from sqlalchemy.connectors.pyodbc import PyODBCConnector
from sqlalchemy import types as sqltypes, processors
import decimal
class _SybNumeric_pyodbc(sqltypes.Numeric):
"""Turns Decimals with adjusted() < -6 into floats.
It's not yet known how to get decimals with many
significant digits or very large adjusted() into Sybase
via pyodbc.
"""
def bind_processor(self, dialect):
super_process = super(_SybNumeric_pyodbc, self).\
bind_processor(dialect)
def process(value):
if self.asdecimal and \
isinstance(value, decimal.Decimal):
if value.adjusted() < -6:
return processors.to_float(value)
if super_process:
return super_process(value)
else:
return value
return process
class SybaseExecutionContext_pyodbc(SybaseExecutionContext):
def set_ddl_autocommit(self, connection, value):
if value:
connection.autocommit = True
else:
connection.autocommit = False
class SybaseDialect_pyodbc(PyODBCConnector, SybaseDialect):
execution_ctx_cls = SybaseExecutionContext_pyodbc
colspecs = {
sqltypes.Numeric: _SybNumeric_pyodbc,
}
dialect = SybaseDialect_pyodbc
| gpl-3.0 | 1,979,549,040,717,915,400 | 23.44186 | 69 | 0.664129 | false |
rtorres90/learning-python-package-system | packages/stores/migrations/0001_initial.py | 1 | 1458 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-13 02:07
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='EmployeeTitle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Store',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('location', models.CharField(max_length=50)),
],
),
migrations.AddField(
model_name='employeetitle',
name='store',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stores.Store'),
),
migrations.AddField(
model_name='employeetitle',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| apache-2.0 | -387,377,259,657,424,900 | 32.136364 | 114 | 0.585734 | false |
cloudaice/simple-data | misc/virtenv/share/doc/pycurl/tests/reset_test.py | 1 | 2088 | #! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
import pycurl
import unittest
try:
import urllib.parse as urllib_parse
except ImportError:
import urllib as urllib_parse
from . import appmanager
from . import util
setup_module, teardown_module = appmanager.setup(('app', 8380))
class ResetTest(unittest.TestCase):
# XXX this test was broken when it was test_reset.py
def skip_reset(self):
outf = util.StringIO()
cm = pycurl.CurlMulti()
# Set multi handle's options
cm.setopt(pycurl.M_PIPELINING, 1)
eh = pycurl.Curl()
for x in range(1, 20):
eh.setopt(pycurl.WRITEFUNCTION, outf.write)
eh.setopt(pycurl.URL, 'http://localhost:8380/success')
cm.add_handle(eh)
while 1:
ret, active_handles = cm.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
while active_handles:
ret = cm.select(1.0)
if ret == -1:
continue
while 1:
ret, active_handles = cm.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
count, good, bad = cm.info_read()
for h, en, em in bad:
print("Transfer to %s failed with %d, %s\n" % \
(h.getinfo(pycurl.EFFECTIVE_URL), en, em))
raise RuntimeError
for h in good:
httpcode = h.getinfo(pycurl.RESPONSE_CODE)
if httpcode != 200:
print("Transfer to %s failed with code %d\n" %\
(h.getinfo(pycurl.EFFECTIVE_URL), httpcode))
raise RuntimeError
else:
print("Recd %d bytes from %s" % \
(h.getinfo(pycurl.SIZE_DOWNLOAD),
h.getinfo(pycurl.EFFECTIVE_URL)))
cm.remove_handle(eh)
eh.reset()
eh.close()
cm.close()
outf.close()
| mit | 1,887,134,179,056,732,400 | 28.408451 | 68 | 0.501437 | false |
ac769/continuum_technologies | read_raw_sensor_data.py | 1 | 1150 | import numpy as np
from scipy.signal import butter, lfilter, freqz
import matplotlib.pyplot as plt
from clean_bad_trace import clean_bad_trace
file = open("processing/save_to_file/data.txt")
trace = file.readlines()
trace_clean = clean_bad_trace(trace)
print(trace_clean)
plt.plot(trace_clean, label='Noisy signal')
plt.show()
def butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = lfilter(b, a, data)
return y
if __name__ == "__main__":
# Sample rate and desired cutoff frequencies (in Hz).
fs = 5000.0
lowcut = 500.0
highcut = 1250.0
# Filter our noisy signal.
y = butter_bandpass_filter(trace_clean, lowcut, highcut, fs, order=6)
plt.plot(y, label='Filtered signal (Hz)')
plt.xlabel('time (seconds)')
plt.hlines([-a, a], 0, T, linestyles='--')
plt.grid(True)
plt.axis('tight')
plt.legend(loc='upper left')
plt.show() | mit | 7,555,190,031,029,722,000 | 24.577778 | 73 | 0.648696 | false |
AndersenLab/bam-toolbox | bam/coverage.py | 1 | 9134 | #! /usr/bin/env python
"""
usage:
bam coverage <bam> [options] [--mtchr=<mtchr>]
bam coverage <bam> [options] <chrom:start-end>...
bam coverage <bam> [options] --window=<size>
bam coverage <bam> [options] --regions=<gff/bed>
options:
-h --help Show this screen.
--version Show version.
--header print header
"""
from docopt import docopt
from collections import OrderedDict
from clint.textui import colored, indent, puts_err
import os
import re
from subprocess import Popen, PIPE
from datetime import datetime
from collections import OrderedDict
class output_line:
"""
Entity-Attributes-Value Model
"""
header_out = False
def __init__(self, entity, attributes, value, header=False):
self.entity = entity
if type(attributes) in [dict, OrderedDict]:
attributes = [k + "=" + str(v) for k, v in attributes.items()]
elif type(attributes) != list:
attributes = [attributes]
self.attributes = attributes
self.value = value
if not output_line.header_out and header:
print("bam\tcontig\tstart\tend\tproperty\tvalue")
output_line.header_out = True
def __setattr__(self, name, value):
# Value is attribute
if name == "add_attr" or name == "set_attr":
if type(value) in [dict, OrderedDict]:
value = [k + "=" + v for k, v in value.items()]
elif type(value) != list:
value = [value]
if name == "add_attr":
self.__dict__["attributes"].extend(value)
else:
self.__dict__["attributes"] = value
else:
self.__dict__[name] = value
def __repr__(self):
attributes = '\t'.join(map(str, [x.split("=")[1] for x in self.attributes]))
out = [self.entity, attributes, self.value]
output = map(str, out)
return '\t'.join(output)
class bam_file:
def __init__(self, fname, mtchr = None):
self.fname = fname
self.mtchr = mtchr
self.parse_header()
def parse_header(self):
header, err = Popen(["samtools", "view", "-H", self.fname], stdout=PIPE, stderr=PIPE).communicate()
if err != "":
raise Exception(err)
self.header = header
contigs = OrderedDict()
contig_regions = []
for x in re.findall("@SQ\WSN:(?P<chrom>[A-Za-z0-9_]*)\WLN:(?P<length>[0-9]+)", header):
contigs[x[0]] = int(x[1])
region = "%s:%s-%s" % (x[0], "1", x[1])
contig_regions.append(region)
self.contigs = contigs
self.contig_regions = contig_regions
mtchr = [x for x in self.contigs.keys() if x.lower().find("m") == 0]
if len(mtchr) == 1:
self.mtchr = mtchr[0]
with indent(4):
puts_err(colored.blue("\nGuessing Mitochondrial Chromosome: " + self.mtchr + "\n"))
self.genome_length = sum(contigs.values())
if mtchr:
self.nuclear_length = sum([x for x in contigs.values() if x != contigs[self.mtchr]])
def sum_coverage(self, region=None):
for n, i in enumerate(region):
comm = Popen(["samtools", "depth", "-r", region, self.fname], stdout=PIPE, stderr=PIPE)
pos_covered = 0
cum_depth = 0
for row in comm.stdout:
chrom, pos, depth = row.strip().split("\t")
pos_covered += 1
cum_depth += int(depth)
return pos_covered, cum_depth
def iterate_window(bamfile, size):
for chrom, size in bamfile.contigs.items():
for i in xrange(1, size, window):
if i + window > size:
end = size
else:
end = i + window - 1
yield "{chrom}:{i}-{end}".format(**locals())
def calc_coverage(bamfile, regions=None, mtchr=None):
from pybedtools.cbedtools import Interval
depths = []
for region in regions:
output_dir = OrderedDict()
if type(region) == Interval:
# Add one to start as starts are 0 based; ends are 1 based.
rchrom = str(region.chrom)
chrom, start, end = rchrom, region.start + 1, region.stop
output_dir["name"] = region.name
else:
chrom, start, end = re.split("[:-]", region)
start, end = int(start), int(end)
output_dir["chrom"] = chrom
output_dir["start"] = start
output_dir["end"] = end
# If end extends to far, adjust for chrom
chrom_len = bamfile.contigs[chrom]
if end > chrom_len:
m = "\nSpecified chromosome end extends beyond chromosome length. Set to max of: "
with indent(4):
puts_err(colored.yellow(m + str(chrom_len) + "\n"))
end = chrom_len
region = "{c}:{s}-{e}".format(c=chrom, s=start, e=end + 1)
pos_covered, cum_depth = bamfile.sum_coverage(region)
length = end - start + 1
coverage = cum_depth / float(length)
breadth = pos_covered / float(length)
output_dir["ATTR"] = "bases_mapped"
print(output_line(bam_name, output_dir, cum_depth, args["--header"]))
output_dir["ATTR"] = "depth_of_coverage"
print(output_line(bam_name, output_dir, coverage))
output_dir["ATTR"] = "breadth_of_coverage"
print(output_line(bam_name, output_dir, breadth))
output_dir["ATTR"] = "length"
print(output_line(bam_name, output_dir, length))
output_dir["ATTR"] = "pos_mapped"
print(output_line(bam_name, output_dir, pos_covered))
depths.append({"chrom": chrom,
"bases_mapped": cum_depth,
"pos_covered": pos_covered,
"depth_of_coverage": coverage})
return depths
if __name__ == '__main__':
args = docopt(__doc__,
version='BAM-Toolbox v0.1',
options_first=False)
if args["<bam>"]:
# Add check for file here
bam_name = os.path.basename(args["<bam>"]).replace(".bam", "")
b = bam_file(args["<bam>"], args["--mtchr"])
if args["<chrom:start-end>"]:
"""
Calculate coverage in a given region or regions
"""
calc_coverage(b, args["<chrom:start-end>"])
elif args["--window"]:
"""
Calculate coverage across a window of given size.
"""
window = int(args["--window"])
regions = iterate_window(b, window)
calc_coverage(b, regions)
elif args["--regions"]:
"""
Calculate coverage in specified regions
"""
from pybedtools import BedTool
bed = BedTool(args["--regions"])
calc_coverage(b, bed[:])
elif args["<bam>"]:
"""
Calculate coverage genome wide
"""
bam = args["<bam>"]
cov = calc_coverage(b, b.contig_regions)
# Genomewide depth
output_dir = {}
output_dir["start"] = 1
output_dir["end"] = b.genome_length
output_dir["chrom"] = "genome"
bases_mapped = sum([x["bases_mapped"] for x in cov])
output_dir["ATTR"] = "bases_mapped"
print(output_line(bam_name, output_dir, bases_mapped))
output_dir["ATTR"] = "depth_of_coverage"
coverage = bases_mapped / float(b.genome_length)
print(output_line(bam_name, output_dir, coverage))
output_dir["ATTR"] = "breadth_of_coverage"
breadth = sum([x["pos_covered"] for x in cov]) / float(b.genome_length)
print(output_line(bam_name, output_dir, breadth))
output_dir["ATTR"] = "positions_mapped"
pos_mapped = sum([x["pos_covered"] for x in cov])
print(output_line(bam_name, output_dir, pos_mapped))
if b.mtchr:
# Nuclear
output_dir["end"] = b.nuclear_length
output_dir["chrom"] = "nuclear"
bases_mapped = sum([x["bases_mapped"] for x in cov if x["chrom"] != b.mtchr])
output_dir["ATTR"] = "bases_mapped"
print(output_line(bam_name, output_dir, bases_mapped))
output_dir["ATTR"] = "depth_of_coverage"
coverage = bases_mapped / float(b.nuclear_length)
print(output_line(bam_name, output_dir, coverage))
output_dir["ATTR"] = "breadth_of_coverage"
breadth = sum([x["pos_covered"] for x in cov if x["chrom"] != b.mtchr]) / float(b.nuclear_length)
print(output_line(bam_name, output_dir, breadth))
output_dir["ATTR"] = "positions_mapped"
pos_mapped = sum([x["pos_covered"] for x in cov if x["chrom"] != b.mtchr])
print(output_line(bam_name, output_dir, pos_mapped))
# mt:nuclear ratio
output_dir = {"start": 1, "end": b.nuclear_length, "chrom": "genome", "ATTR": "mt_nuclear_ratio"}
mt_nuc = [x for x in cov if x["chrom"] == b.mtchr][0]["depth_of_coverage"] / coverage
print(output_line(bam_name, output_dir, mt_nuc))
| mit | 5,665,556,332,376,103,000 | 35.830645 | 109 | 0.546748 | false |
spirit-code/spirit | core/python/test/geometry.py | 1 | 3422 | import os
import sys
# spirit_py_dir = os.path.dirname(os.path.realpath(__file__))
spirit_py_dir = os.path.abspath(os.path.join(os.path.dirname( __file__ ), ".."))
sys.path.insert(0, spirit_py_dir)
from spirit import state, geometry
import unittest
##########
cfgfile = spirit_py_dir + "/../test/input/fd_neighbours.cfg" # Input File
p_state = state.setup(cfgfile) # State setup
class TestParameters(unittest.TestCase):
def setUp(self):
''' Setup a p_state and copy it to Clipboard'''
self.p_state = p_state
class Geometry(TestParameters):
def test_bounds(self):
minb, maxb = geometry.get_bounds(self.p_state)
# From the api.cfg the space is 2:2:1 particles
self.assertEqual(minb[0], 0)
self.assertEqual(minb[1], 0)
self.assertEqual(minb[2], 0)
self.assertEqual(maxb[0], 1)
self.assertEqual(maxb[1], 1)
self.assertEqual(maxb[2], 0)
def test_center(self):
center = geometry.get_center(self.p_state)
# From the api.cfg the space is 2:2:1 particles
self.assertEqual(center[0], 0.5)
self.assertEqual(center[1], 0.5)
self.assertEqual(center[2], 0)
def test_bravais_vector(self):
a, b, c = geometry.get_bravais_vectors(self.p_state)
# From the api.cfg the bravais vectors are (1,0,0), (0,1,0), (0,0,1)
self.assertEqual(a[0], b[1])
self.assertEqual(b[1], c[2])
# Check also that the bravais lattice type matches simple cubic
lattice_type = geometry.get_bravais_lattice_type(self.p_state)
self.assertEqual(lattice_type, geometry.BRAVAIS_LATTICE_SC)
def test_N_cells(self):
ncells = geometry.get_n_cells(self.p_state)
self.assertEqual(ncells[0], 2)
self.assertEqual(ncells[1], 2)
self.assertEqual(ncells[2], 1)
def test_dimensionality(self):
dim = geometry.get_dimensionality(self.p_state)
self.assertEqual(dim, 2)
def test_positions(self):
positions = geometry.get_positions(self.p_state)
# spin at (0,0,0)
self.assertAlmostEqual(positions[0][0], 0)
self.assertAlmostEqual(positions[0][1], 0)
self.assertAlmostEqual(positions[0][2], 0)
# spin at (1,0,0)
self.assertAlmostEqual(positions[1][0], 1)
self.assertAlmostEqual(positions[1][1], 0)
self.assertAlmostEqual(positions[1][2], 0)
# spin at (0,1,0)
self.assertAlmostEqual(positions[2][0], 0)
self.assertAlmostEqual(positions[2][1], 1)
self.assertAlmostEqual(positions[2][2], 0)
# spin at (1,1,0)
self.assertAlmostEqual(positions[3][0], 1)
self.assertAlmostEqual(positions[3][1], 1)
self.assertAlmostEqual(positions[3][2], 0)
def test_atom_types(self):
types = geometry.get_atom_types(self.p_state)
self.assertEqual(len(types), 4)
self.assertEqual(types[0], 0)
self.assertEqual(types[1], 0)
self.assertEqual(types[2], 0)
self.assertEqual(types[3], 0)
#########
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(Geometry))
return suite
if __name__ == '__main__':
suite = suite()
runner = unittest.TextTestRunner()
success = runner.run(suite).wasSuccessful()
state.delete( p_state ) # Delete State
sys.exit(not success) | mit | 2,290,684,823,493,047,000 | 31.913462 | 80 | 0.616891 | false |
samuelgarcia/HearingLossSimulator | hearinglosssimulator/gui/wifidevice/packet_types.py | 1 | 3336 | # adress by default
MAX_PACKET_LEN = 1036
BUFFER_SIZE = 1024
sample_per_packet =256
header_size = 12
frame_header = [('type', 'uint16'),
('length', 'uint16'),
('packet_num', 'uint32'),
('option', 'uint32')
]
test_header = [('test_packet', 'uint32'),
('send_time', 'uint32'),
('recv_time', 'uint32'),
# variable BUFFER_SIZE-12
]
spatial_header = [('acc_x', '>i2'),
('acc_y', '>i2'),
('acc_z', '>i2'),
('mag_x', '>i2'),
('mag_y', '>i2'),
('mag_z', '>i2')]
file_header = [('inode', 'uint32'),
('parent', 'uint32'),
('mode', 'uint32')]
# frame type uint16
CONNECTION = 1#return ACK
ACK = 2
PING = 3#return PONG
PONG = 4
RESET = 5#return ACK
START_STREAM = 6#return ACK
STOP_STREAM = 7#return ACK
GET_PARAMS = 8
SET_PARAMS = 9#return ACK
F_OPEN = 10#return ACK en v2.0.2
F_CLOSE = 11#return ACK
F_WRITE = 12#return ACK
F_READ = 13
AUDIO_DATA = 20
TEST_DATA = 21
PARAMS_DATA = 22
FILE_DATA = 23
SPAT_DATA = 24
# option int32
#START_STREAM STOP_STREAM
AUDIO_STREAM = 0x10000000
TEST_STREAM = 0x01000000
SPAT_STREAM = 0x00100000
#option params
SYSTEM_INFO = 0xFFFF #RO
NETWORK_CONF = 0x0004 #RW
TEST_CONF = 0x0005#RW
AUDIO_CONF = 0x0006 #RW
GPS_CMD = 0x0008#RW
ACC_CONF = 0x0009 #RW
# file inodes
ROOT_DIR = 0x0001 # with file RO
SYS_DIR = 0x0002 # with file RO
HOME_DIR = 0x0003 # with file RO
NEW_FILE = 0x0000
AUDIO_REG = 0x0007# with file RO
ACC_REG = 0x000A# with file RO
# file option
WRITE = 0xFFFFFFFF
READ = 0x0000
# struct
stream_types = {'audio':AUDIO_STREAM, 'test':TEST_STREAM, 'spatialization':SPAT_STREAM}
# TIMEOUT
#~ TIMEOUT_AUDIO = 0.2 #s
TIMEOUT_AUDIO = 0.5 #s
TIMEOUT_TEST = 0.5 #s
TIMEOUT_PING_PONG = 0.3 #s
TIMEOUT_ACK_START_STREAM = 1.#s
TIMEOUT_GET_PARAMS = 8.#s
TIMEOUT_ACK_SET_PARAMS = 10.#s
PING_INTERVAL = .5 #s
RECONNECT_INTERVAL = 1.#s
"""
Dans la doc SLAA408A
dispo sur ti.com
*****
SPEAKER VOLUME
*****
D7-D0
R/W
0000 0000
Left DAC Channel Digital Volume Control Setting
0111 1111-0011 0001: Reserved. Do not use
0011 0000: Digital Volume Control = +24dB
0010 1111: Digital Volume Control = +23.5dB
...
0000 0001: Digital Volume Control = +0.5dB
0000 0000: Digital Volume Control = 0.0dB
1111 1111: Digital Volume Control = -0.5dB
...
1000 0010: Digital Volume Control = -63dB
1000 0001: Digital Volume Control = -63.5dB
1000 0000: Reserved. Do not use
*****
MICRO VOLUME
*****
D6-D0
R/W
000 0000
Left ADC Channel Volume Control
100 0000-110 0111: Reserved. Do not use
110 1000: Left ADC Channel Volume = -12dB
110 1001: Left ADC Channel Volume = -11.5dB
110 1010: Left ADC Channel Volume = -11.0dB
...
111 1111: Left ADC Channel Volume = -0.5dB
000 0000: Left ADC Channel Volume = 0.0dB
000 0001: Left ADC Channel Volume = 0.5dB
...
010 0110: Left ADC Channel Volume = 19.0dB
010 0111: Left ADC Channel Volume = 19.5dB
010 1000: Left ADC Channel Volume = 20.0dB
010 1001-111 1111: Reserved. Do not use
"""
| mit | -2,829,242,479,558,415,400 | 20.384615 | 87 | 0.583933 | false |
CadeiraCuidadora/UMISS-backend | umiss_project/umiss_auth/migrations/0001_initial.py | 1 | 4571 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-04-21 23:16
from __future__ import unicode_literals
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='CustomUser',
fields=[
('id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID')),
('password',
models.CharField(
max_length=128,
verbose_name='password')),
('last_login',
models.DateTimeField(
blank=True,
null=True,
verbose_name='last login')),
('is_superuser',
models.BooleanField(
default=False,
help_text='Designates that this user has all permissions without explicitly assigning them.',
verbose_name='superuser status')),
('username',
models.CharField(
error_messages={
'unique': 'A user with that username already exists.'},
help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.',
max_length=150,
unique=True,
validators=[
django.contrib.auth.validators.UnicodeUsernameValidator()],
verbose_name='username')),
('first_name',
models.CharField(
blank=True,
max_length=30,
verbose_name='first name')),
('last_name',
models.CharField(
blank=True,
max_length=30,
verbose_name='last name')),
('email',
models.EmailField(
blank=True,
max_length=254,
verbose_name='email address')),
('is_staff',
models.BooleanField(
default=False,
help_text='Designates whether the user can log into this admin site.',
verbose_name='staff status')),
('is_active',
models.BooleanField(
default=True,
help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.',
verbose_name='active')),
('date_joined',
models.DateTimeField(
default=django.utils.timezone.now,
verbose_name='date joined')),
('user_type',
models.CharField(
choices=[
('patient',
'User Type Pacient'),
('monitor',
'User Type Monitor')],
default='monitor',
max_length=2)),
('groups',
models.ManyToManyField(
blank=True,
help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.',
related_name='user_set',
related_query_name='user',
to='auth.Group',
verbose_name='groups')),
('user_permissions',
models.ManyToManyField(
blank=True,
help_text='Specific permissions for this user.',
related_name='user_set',
related_query_name='user',
to='auth.Permission',
verbose_name='user permissions')),
],
options={
'verbose_name_plural': 'users',
'abstract': False,
'verbose_name': 'user',
},
managers=[
('objects',
django.contrib.auth.models.UserManager()),
],
),
]
| gpl-3.0 | -2,481,173,413,696,960,500 | 37.737288 | 135 | 0.436228 | false |
nwiizo/workspace_2017 | keras_ex/example/cifar10_cnn.py | 2 | 3817 | '''Train a simple deep CNN on the CIFAR10 small images dataset.
GPU run command with Theano backend (with TensorFlow, the GPU is automatically used):
THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python cifar10_cnn.py
It gets down to 0.65 test logloss in 25 epochs, and down to 0.55 after 50 epochs.
(it's still underfitting at that point, though).
'''
from __future__ import print_function
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
batch_size = 32
nb_classes = 10
nb_epoch = 200
data_augmentation = True
# input image dimensions
img_rows, img_cols = 32, 32
# The CIFAR10 images are RGB.
img_channels = 3
# The data, shuffled and split between train and test sets:
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# Convert class vectors to binary class matrices.
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Convolution2D(32, 3, 3, border_mode='same',
input_shape=X_train.shape[1:]))
model.add(Activation('relu'))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(64, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
if not data_augmentation:
print('Not using data augmentation.')
model.fit(X_train, Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
validation_data=(X_test, Y_test),
shuffle=True)
else:
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# Compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(X_train)
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(datagen.flow(X_train, Y_train,
batch_size=batch_size),
samples_per_epoch=X_train.shape[0],
nb_epoch=nb_epoch,
validation_data=(X_test, Y_test))
| mit | 7,820,224,148,347,043,000 | 36.058252 | 94 | 0.687713 | false |
laughingman7743/PyAthena | pyathena/pandas/util.py | 1 | 9450 | # -*- coding: utf-8 -*-
import concurrent
import logging
import uuid
from collections import OrderedDict
from concurrent.futures.process import ProcessPoolExecutor
from concurrent.futures.thread import ThreadPoolExecutor
from copy import deepcopy
from multiprocessing import cpu_count
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterator,
List,
Optional,
Type,
Union,
)
from boto3 import Session
from pyathena import OperationalError
from pyathena.model import AthenaCompression
from pyathena.util import RetryConfig, parse_output_location, retry_api_call
if TYPE_CHECKING:
from pandas import DataFrame, Series
from pyathena.connection import Connection
from pyathena.cursor import Cursor
_logger = logging.getLogger(__name__) # type: ignore
def get_chunks(df: "DataFrame", chunksize: int = None) -> Iterator["DataFrame"]:
rows = len(df)
if rows == 0:
return
if chunksize is None:
chunksize = rows
elif chunksize <= 0:
raise ValueError("Chunk size argument must be greater than zero")
chunks = int(rows / chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, rows)
if start_i >= end_i:
break
yield df[start_i:end_i]
def reset_index(df: "DataFrame", index_label: Optional[str] = None) -> None:
df.index.name = index_label if index_label else "index"
try:
df.reset_index(inplace=True)
except ValueError as e:
raise ValueError("Duplicate name in index/columns: {0}".format(e))
def as_pandas(cursor: "Cursor", coerce_float: bool = False) -> "DataFrame":
from pandas import DataFrame
description = cursor.description
if not description:
return DataFrame()
names = [metadata[0] for metadata in description]
return DataFrame.from_records(
cursor.fetchall(), columns=names, coerce_float=coerce_float
)
def to_sql_type_mappings(col: "Series") -> str:
import pandas as pd
col_type = pd.api.types.infer_dtype(col, skipna=True)
if col_type == "datetime64" or col_type == "datetime":
return "TIMESTAMP"
elif col_type == "timedelta":
return "INT"
elif col_type == "timedelta64":
return "BIGINT"
elif col_type == "floating":
if col.dtype == "float32":
return "FLOAT"
else:
return "DOUBLE"
elif col_type == "integer":
if col.dtype == "int32":
return "INT"
else:
return "BIGINT"
elif col_type == "boolean":
return "BOOLEAN"
elif col_type == "date":
return "DATE"
elif col_type == "bytes":
return "BINARY"
elif col_type in ["complex", "time"]:
raise ValueError("Data type `{0}` is not supported".format(col_type))
return "STRING"
def to_parquet(
df: "DataFrame",
bucket_name: str,
prefix: str,
retry_config: RetryConfig,
session_kwargs: Dict[str, Any],
client_kwargs: Dict[str, Any],
compression: str = None,
flavor: str = "spark",
) -> str:
import pyarrow as pa
from pyarrow import parquet as pq
session = Session(**session_kwargs)
client = session.resource("s3", **client_kwargs)
bucket = client.Bucket(bucket_name)
table = pa.Table.from_pandas(df)
buf = pa.BufferOutputStream()
pq.write_table(table, buf, compression=compression, flavor=flavor)
response = retry_api_call(
bucket.put_object,
config=retry_config,
Body=buf.getvalue().to_pybytes(),
Key=prefix + str(uuid.uuid4()),
)
return "s3://{0}/{1}".format(response.bucket_name, response.key)
def to_sql(
df: "DataFrame",
name: str,
conn: "Connection",
location: str,
schema: str = "default",
index: bool = False,
index_label: Optional[str] = None,
partitions: List[str] = None,
chunksize: Optional[int] = None,
if_exists: str = "fail",
compression: str = None,
flavor: str = "spark",
type_mappings: Callable[["Series"], str] = to_sql_type_mappings,
executor_class: Type[
Union[ThreadPoolExecutor, ProcessPoolExecutor]
] = ThreadPoolExecutor,
max_workers: int = (cpu_count() or 1) * 5,
) -> None:
# TODO Supports orc, avro, json, csv or tsv format
if if_exists not in ("fail", "replace", "append"):
raise ValueError("`{0}` is not valid for if_exists".format(if_exists))
if compression is not None and not AthenaCompression.is_valid(compression):
raise ValueError("`{0}` is not valid for compression".format(compression))
if partitions is None:
partitions = []
bucket_name, key_prefix = parse_output_location(location)
bucket = conn.session.resource(
"s3", region_name=conn.region_name, **conn._client_kwargs
).Bucket(bucket_name)
cursor = conn.cursor()
table = cursor.execute(
"""
SELECT table_name
FROM information_schema.tables
WHERE table_schema = '{schema}'
AND table_name = '{table}'
""".format(
schema=schema, table=name
)
).fetchall()
if if_exists == "fail":
if table:
raise OperationalError(
"Table `{0}.{1}` already exists.".format(schema, name)
)
elif if_exists == "replace":
if table:
cursor.execute(
"""
DROP TABLE {schema}.{table}
""".format(
schema=schema, table=name
)
)
objects = bucket.objects.filter(Prefix=key_prefix)
if list(objects.limit(1)):
objects.delete()
if index:
reset_index(df, index_label)
with executor_class(max_workers=max_workers) as e:
futures = []
session_kwargs = deepcopy(conn._session_kwargs)
session_kwargs.update({"profile_name": conn.profile_name})
client_kwargs = deepcopy(conn._client_kwargs)
client_kwargs.update({"region_name": conn.region_name})
if partitions:
for keys, group in df.groupby(by=partitions, observed=True):
keys = keys if isinstance(keys, tuple) else (keys,)
group = group.drop(partitions, axis=1)
partition_prefix = "/".join(
["{0}={1}".format(key, val) for key, val in zip(partitions, keys)]
)
for chunk in get_chunks(group, chunksize):
futures.append(
e.submit(
to_parquet,
chunk,
bucket_name,
"{0}{1}/".format(key_prefix, partition_prefix),
conn._retry_config,
session_kwargs,
client_kwargs,
compression,
flavor,
)
)
else:
for chunk in get_chunks(df, chunksize):
futures.append(
e.submit(
to_parquet,
chunk,
bucket_name,
key_prefix,
conn._retry_config,
session_kwargs,
client_kwargs,
compression,
flavor,
)
)
for future in concurrent.futures.as_completed(futures):
result = future.result()
_logger.info("to_parquet: {0}".format(result))
ddl = generate_ddl(
df=df,
name=name,
location=location,
schema=schema,
partitions=partitions,
compression=compression,
type_mappings=type_mappings,
)
_logger.info(ddl)
cursor.execute(ddl)
if partitions:
repair = "MSCK REPAIR TABLE {0}.{1}".format(schema, name)
_logger.info(repair)
cursor.execute(repair)
def get_column_names_and_types(
df: "DataFrame", type_mappings
) -> "OrderedDict[str, str]":
return OrderedDict(
(
(str(df.columns[i]), type_mappings(df.iloc[:, i]))
for i in range(len(df.columns))
)
)
def generate_ddl(
df: "DataFrame",
name: str,
location: str,
schema: str = "default",
partitions: Optional[List[str]] = None,
compression: Optional[str] = None,
type_mappings: Callable[["Series"], str] = to_sql_type_mappings,
) -> str:
if partitions is None:
partitions = []
column_names_and_types = get_column_names_and_types(df, type_mappings)
ddl = "CREATE EXTERNAL TABLE IF NOT EXISTS `{0}`.`{1}` (\n".format(schema, name)
ddl += ",\n".join(
[
"`{0}` {1}".format(col, type_)
for col, type_ in column_names_and_types.items()
if col not in partitions
]
)
ddl += "\n)\n"
if partitions:
ddl += "PARTITIONED BY (\n"
ddl += ",\n".join(
["`{0}` {1}".format(p, column_names_and_types[p]) for p in partitions]
)
ddl += "\n)\n"
ddl += "STORED AS PARQUET\n"
ddl += "LOCATION '{0}'\n".format(location)
if compression:
ddl += "TBLPROPERTIES ('parquet.compress'='{0}')\n".format(compression.upper())
return ddl
| mit | -3,390,893,893,548,295,000 | 30.291391 | 87 | 0.56 | false |
bireme/api-nlm | src/ProcessLog.py | 1 | 4880 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# =========================================================================
#
# Copyright © 2016 BIREME/PAHO/WHO
#
# This file is part of API-NLM.
#
# API-NLM is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1 of
# the License, or (at your option) any later version.
#
# API-NLM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with API-NLM. If not, see <http://www.gnu.org/licenses/>.
#
# =========================================================================
import traceback
from datetime import datetime
from MongoDb import MyMongo
from NLM_AOPFactory import NLM_AOPFactory
from NLM_AOPHarvesting import NLM_AOPHarvesting
# from ProcessLog import ProcessLog
__date__ = 20160509
class ProcessLog:
def __init__(self,
harvesting_,
mongodbDoc_,
mongodbLog_,
owner_,
process_):
"""
Constructor.
harvesting_ - Harvesting object that makes the document harvesting
mongodbDoc_ - MongoDb object that contains the 'doc' harvesting
collection
mongodbLog_ - MongoDb object that contains the 'log' log collection
owner_ - Owner of the process
process_ - Process name
"""
self.harvesting = harvesting_
self.mongodbDoc = mongodbDoc_
self.mongodbLog = mongodbLog_
self.owner = owner_
self.process = process_
def harvest(self):
"""
Execute the harvesting and add a document with start time, end time,
process status and number of collected documents.
Returns a dictionary with harvesting statistics.
"""
now = datetime.now()
dateBegin = datetime.strftime(now, "%Y%m%d")
hourBegin = datetime.strftime(now, "%H:%M:%S")
id_ = dateBegin + "-" + hourBegin
doc = {"_id": id_, "process": self.process + "_harvesting",
"owner": self.owner, "status": "in process",
"dataBegin": dateBegin, "hourBegin": hourBegin}
self.mongodbLog.saveDoc(doc)
try:
self.harvesting.harvest(dateBegin, hourBegin)
status = "finished"
except (Exception, RuntimeError) as ex:
traceback.print_stack()
print("Exception/error generated: " + str(ex))
status = "broken"
now2 = datetime.now()
dateEnd = datetime.strftime(now2, "%Y%m%d")
hourEnd = datetime.strftime(now2, "%H:%M:%S")
doc = self.harvesting.getHarvStatDoc(id_,
self.process + "_harvesting",
self.owner, status,
dateBegin, hourBegin,
dateEnd, hourEnd)
self.mongodbLog.replaceDoc(doc)
return doc
if __name__ == "__main__":
# Execute only if run as a script.
verbose_ = True
# mongoHost = "ts01vm.bireme.br"
mongoHost = "mongodb.bireme.br"
dbName = "db_AheadOfPrint"
mid = MyMongo(dbName, "col_Id", mongoHost)
mdoc = MyMongo(dbName, "col_Doc", mongoHost)
mlog = MyMongo(dbName, "col_Log", mongoHost)
process = "aheadofprint"
owner = "serverofi5"
factory_ = NLM_AOPFactory()
factory_.setMyMongoId(mid)
factory_.setMyMongoDoc(mdoc)
factory_.setXmlOutDir("/bases/mdlG4/fasea/aheadofprint")
# factory_.setXmlOutDir("../xml")
factory_.setProcess(process)
factory_.setOwner(owner)
harvesting = NLM_AOPHarvesting(factory_, verbose_)
log = ProcessLog(harvesting, mdoc, mlog, owner, process)
result = log.harvest()
if verbose_:
print("Process=" + process)
print("Owner=" + result["owner"])
print("Status=" + result["status"])
print("DateBegin=" + result["dateBegin"])
print("HourBegin=" + result["hourBegin"])
print("DateEnd=" + result["dateEnd"])
print("HourEnd=" + result["hourEnd"])
print("TotAheadDocs=" + str(result["totAheadDocs"]))
print("TotNoAheadDocs=" + str(result["totNoAheadDocs"]))
print("TotInProcessDocs=" + str(result["totInProcessDocs"]))
print("NewAheadDocs=" + str(result["newAheadDocs"]))
print("NewInProcessDocs=" + str(result["newInProcessDocs"]))
print("NewNoAheadDocs=" + str(result["newNoAheadDocs"]))
print("")
| lgpl-2.1 | -8,249,847,676,567,609,000 | 34.875 | 76 | 0.580242 | false |
ycflame/google-python-exercises | basic/string1.py | 1 | 3600 | #!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic string exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in string2.py.
# A. donuts
# Given an int count of a number of donuts, return a string
# of the form 'Number of donuts: <count>', where <count> is the number
# passed in. However, if the count is 10 or more, then use the word 'many'
# instead of the actual count.
# So donuts(5) returns 'Number of donuts: 5'
# and donuts(23) returns 'Number of donuts: many'
def donuts(count):
# +++your code here+++
result = count if count < 10 else 'many'
return 'Number of donuts: %s' % result
# B. both_ends
# Given a string s, return a string made of the first 2
# and the last 2 chars of the original string,
# so 'spring' yields 'spng'. However, if the string length
# is less than 2, return instead the empty string.
def both_ends(s):
# +++your code here+++
return s[:2] + s[-2:] if len(s) > 2 else ''
# C. fix_start
# Given a string s, return a string
# where all occurences of its first char have
# been changed to '*', except do not change
# the first char itself.
# e.g. 'babble' yields 'ba**le'
# Assume that the string is length 1 or more.
# Hint: s.replace(stra, strb) returns a version of string s
# where all instances of stra have been replaced by strb.
def fix_start(s):
# +++your code here+++
return s[:1] + s[1:].replace(s[0], '*')
# D. MixUp
# Given strings a and b, return a single string with a and b separated
# by a space '<a> <b>', except swap the first 2 chars of each string.
# e.g.
# 'mix', pod' -> 'pox mid'
# 'dog', 'dinner' -> 'dig donner'
# Assume a and b are length 2 or more.
def mix_up(a, b):
# +++your code here+++
return "%s %s" % (b[:2] + a[2:], a[:2] + b[2:])
# Provided simple test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Provided main() calls the above functions with interesting inputs,
# using test() to check if each result is correct or not.
def main():
print 'donuts'
# Each line calls donuts, compares its result to the expected for that call.
test(donuts(4), 'Number of donuts: 4')
test(donuts(9), 'Number of donuts: 9')
test(donuts(10), 'Number of donuts: many')
test(donuts(99), 'Number of donuts: many')
print
print 'both_ends'
test(both_ends('spring'), 'spng')
test(both_ends('Hello'), 'Helo')
test(both_ends('a'), '')
test(both_ends('xyz'), 'xyyz')
print
print 'fix_start'
test(fix_start('babble'), 'ba**le')
test(fix_start('aardvark'), 'a*rdv*rk')
test(fix_start('google'), 'goo*le')
test(fix_start('donut'), 'donut')
print
print 'mix_up'
test(mix_up('mix', 'pod'), 'pox mid')
test(mix_up('dog', 'dinner'), 'dig donner')
test(mix_up('gnash', 'sport'), 'spash gnort')
test(mix_up('pezzy', 'firm'), 'fizzy perm')
# Standard boilerplate to call the main() function.
if __name__ == '__main__':
main()
| apache-2.0 | 8,111,817,648,614,050,000 | 30.858407 | 78 | 0.659444 | false |
DylanLukes/django-authority | authority/admin.py | 1 | 7317 | import django
from django import forms, template
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext, ungettext, ugettext_lazy as _
from django.shortcuts import render_to_response
from django.utils.safestring import mark_safe
from django.forms.formsets import all_valid
from django.contrib import admin
from django.contrib.admin import helpers
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
try:
from django.contrib.admin import actions
except ImportError:
actions = False
# From 1.7 forward, Django consistenly uses the name "utils",
# not "util". We alias for backwards compatibility.
if django.VERSION[:2] < (1, 7):
forms.utils = forms.util
from authority.models import Permission
from authority.widgets import GenericForeignKeyRawIdWidget
from authority import get_choices_for
class PermissionInline(generic.GenericTabularInline):
model = Permission
raw_id_fields = ('user', 'group', 'creator')
extra = 1
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name == 'codename':
perm_choices = get_choices_for(self.parent_model)
kwargs['label'] = _('permission')
kwargs['widget'] = forms.Select(choices=perm_choices)
return super(PermissionInline, self).formfield_for_dbfield(db_field, **kwargs)
class ActionPermissionInline(PermissionInline):
raw_id_fields = ()
template = 'admin/edit_inline/action_tabular.html'
class ActionErrorList(forms.utils.ErrorList):
def __init__(self, inline_formsets):
for inline_formset in inline_formsets:
self.extend(inline_formset.non_form_errors())
for errors_in_inline_form in inline_formset.errors:
self.extend(errors_in_inline_form.values())
def edit_permissions(modeladmin, request, queryset):
opts = modeladmin.model._meta
app_label = opts.app_label
# Check that the user has the permission to edit permissions
if not (request.user.is_superuser or
request.user.has_perm('authority.change_permission') or
request.user.has_perm('authority.change_foreign_permissions')):
raise PermissionDenied
inline = ActionPermissionInline(queryset.model, modeladmin.admin_site)
formsets = []
for obj in queryset:
prefixes = {}
FormSet = inline.get_formset(request, obj)
prefix = "%s-%s" % (FormSet.get_default_prefix(), obj.pk)
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1:
prefix = "%s-%s" % (prefix, prefixes[prefix])
if request.POST.get('post'):
formset = FormSet(data=request.POST, files=request.FILES,
instance=obj, prefix=prefix)
else:
formset = FormSet(instance=obj, prefix=prefix)
formsets.append(formset)
media = modeladmin.media
inline_admin_formsets = []
for formset in formsets:
fieldsets = list(inline.get_fieldsets(request))
inline_admin_formset = helpers.InlineAdminFormSet(inline, formset, fieldsets)
inline_admin_formsets.append(inline_admin_formset)
media = media + inline_admin_formset.media
ordered_objects = opts.get_ordered_objects()
if request.POST.get('post'):
if all_valid(formsets):
for formset in formsets:
formset.save()
else:
modeladmin.message_user(request, '; '.join(
err.as_text() for formset in formsets for err in formset.errors
))
# redirect to full request path to make sure we keep filter
return HttpResponseRedirect(request.get_full_path())
context = {
'errors': ActionErrorList(formsets),
'title': ugettext('Permissions for %s') % force_text(opts.verbose_name_plural),
'inline_admin_formsets': inline_admin_formsets,
'app_label': app_label,
'change': True,
'ordered_objects': ordered_objects,
'form_url': mark_safe(''),
'opts': opts,
'target_opts': queryset.model._meta,
'content_type_id': ContentType.objects.get_for_model(queryset.model).id,
'save_as': False,
'save_on_top': False,
'is_popup': False,
'media': mark_safe(media),
'show_delete': False,
'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME,
'queryset': queryset,
"object_name": force_text(opts.verbose_name),
}
template_name = getattr(modeladmin, 'permission_change_form_template', [
"admin/%s/%s/permission_change_form.html" % (app_label, opts.object_name.lower()),
"admin/%s/permission_change_form.html" % app_label,
"admin/permission_change_form.html"
])
return render_to_response(template_name, context,
context_instance=template.RequestContext(request))
edit_permissions.short_description = _("Edit permissions for selected %(verbose_name_plural)s")
class PermissionAdmin(admin.ModelAdmin):
list_display = ('codename', 'content_type', 'user', 'group', 'approved')
list_filter = ('approved', 'content_type')
search_fields = ('user__username', 'group__name', 'codename')
raw_id_fields = ('user', 'group', 'creator')
generic_fields = ('content_object',)
actions = ['approve_permissions']
fieldsets = (
(None, {'fields': ('codename', ('content_type', 'object_id'))}),
(_('Permitted'), {'fields': ('approved', 'user', 'group')}),
(_('Creation'), {'fields': ('creator', 'date_requested', 'date_approved')}),
)
def formfield_for_dbfield(self, db_field, **kwargs):
# For generic foreign keys marked as generic_fields we use a special widget
if db_field.name in [f.fk_field for f in self.model._meta.virtual_fields if f.name in self.generic_fields]:
for gfk in self.model._meta.virtual_fields:
if gfk.fk_field == db_field.name:
kwargs['widget'] = GenericForeignKeyRawIdWidget(
gfk.ct_field, self.admin_site._registry.keys())
break
return super(PermissionAdmin, self).formfield_for_dbfield(db_field, **kwargs)
def queryset(self, request):
user = request.user
if (user.is_superuser or
user.has_perm('permissions.change_foreign_permissions')):
return super(PermissionAdmin, self).queryset(request)
return super(PermissionAdmin, self).queryset(request).filter(creator=user)
def approve_permissions(self, request, queryset):
for permission in queryset:
permission.approve(request.user)
message = ungettext("%(count)d permission successfully approved.",
"%(count)d permissions successfully approved.", len(queryset))
self.message_user(request, message % {'count': len(queryset)})
approve_permissions.short_description = _("Approve selected permissions")
admin.site.register(Permission, PermissionAdmin)
if actions:
admin.site.add_action(edit_permissions)
| bsd-3-clause | 261,633,873,510,626,200 | 41.540698 | 115 | 0.65833 | false |
grammarware/slps | shared/python/CBGF3.py | 1 | 3247 | #!/Library/Frameworks/Python.framework/Versions/3.1/bin/python3
# -*- coding: utf-8 -*-
# wiki: ΞBGF
import os
import sys
sys.path.append(os.getcwd().split('slps')[0]+'slps/shared/python')
import slpsns
import BGF3
import xml.etree.ElementTree as ET
class Sequence:
def __init__(self):
self.steps = []
def parse(self,fname):
self.steps = []
self.xml = ET.parse(fname)
for e in self.xml.findall(slpsns.cbgf_('*')):
s = Step()
s.parse(e)
self.steps.append(s)
def addStep(self,s):
self.steps.append(s)
def addFirstStep(self,s):
ns = [s]
ns.extend(self.steps)
self.steps = ns
def getXml(self):
self.ex = ET.Element(slpsns.cbgf_('relationship'))
for e in self.steps:
self.ex.append(e.getXml())
return self.ex
class Step:
def __init__(self,op):
self.name = op
self.params = []
def parse(self,ee):
self.name = ee.tag
for e in e.findall(slpsns.bgf_('*')):
if e.tag == 'expression':
ne = BGF3.Expression()
elif e.tag == 'production':
ne = BGF3.Production()
else:
print('Unknown parameter of type',e.tag)
ne = None
ne.parse(e)
self.params.append(ne)
def setName(self,n):
self.name = n
def addParam(self,p):
self.params.append(p)
def getXml(self):
#print 'Getting the XML of production...'
self.ex = ET.Element(slpsns.cbgf_(self.name))
for p in self.params:
self.ex.append(p.getXml())
return self.ex
class Label:
def __init__(self,n):
self.name = n
def getXml(self):
e = ET.Element('label')
e.text = self.name
return e
def __str__(self):
return self.name
class Root:
def __init__(self,n):
self.name = n
def getXml(self):
e = ET.Element('root')
e.text = self.name
return e
def __str__(self):
return self.name
# the main difference from BGF3.Nonterminal is the absence of wrapping expression
class Nonterminal:
def __init__(self,name):
self.data = name
def parse(self,nontermelem):
self.data = nontermelem.text
def setName(self,name):
self.data = name
def getXml(self):
#print 'Getting the XML of nonterminal',self.data,'...'
self.ex = ET.Element('nonterminal')
self.ex.text = self.data
return self.ex
def __str__(self):
return self.data
# the nonterminal for rename-renameN
class NonterminalFT:
def __init__(self,n1,n2):
self.ntfr = n1
self.ntto = n2
def parse(self,nontermelem):
self.ntfr = nontermelem.findtext('from')
self.ntto = nontermelem.findtext('to')
def setFrom(self,name):
self.ntfr = name
def setTo(self,name):
self.ntto = name
def getXml(self):
#print 'Getting the XML of nonterminal',self.data,'...'
self.ex = ET.Element('nonterminal')
ET.SubElement(self.ex,'from').text = self.ntfr
ET.SubElement(self.ex,'to' ).text = self.ntto
return self.ex
def __str__(self):
return self.ntfr+'→'+self.ntto
# the roots for reroot-reroot
class Roots:
def __init__(self,name,ns):
self.name = name
self.ns = ns[:]
def parse(self,el):
self.name = el.tag
self.ns = []
for nt in el.findall('root'):
self.ns.append(nt.text)
def getXml(self):
#print 'Getting the XML of nonterminal',self.data,'...'
self.ex = ET.Element(self.name)
for nt in self.ns:
ET.SubElement(self.ex,'root').text = nt
return self.ex
def __str__(self):
return ', '.join(self.ns)
| bsd-3-clause | 2,703,336,276,386,303,000 | 23.208955 | 81 | 0.65783 | false |
AutorestCI/azure-sdk-for-python | azure-batch/azure/batch/operations/task_operations.py | 1 | 49623 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from .. import models
class TaskOperations(object):
"""TaskOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API Version. Constant value: "2017-09-01.6.0".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-09-01.6.0"
self.config = config
def add(
self, job_id, task, task_add_options=None, custom_headers=None, raw=False, **operation_config):
"""Adds a task to the specified job.
The maximum lifetime of a task from addition to completion is 7 days.
If a task has not completed within 7 days of being added it will be
terminated by the Batch service and left in whatever state it was in at
that time.
:param job_id: The ID of the job to which the task is to be added.
:type job_id: str
:param task: The task to be added.
:type task: ~azure.batch.models.TaskAddParameter
:param task_add_options: Additional parameters for the operation
:type task_add_options: ~azure.batch.models.TaskAddOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`BatchErrorException<azure.batch.models.BatchErrorException>`
"""
timeout = None
if task_add_options is not None:
timeout = task_add_options.timeout
client_request_id = None
if task_add_options is not None:
client_request_id = task_add_options.client_request_id
return_client_request_id = None
if task_add_options is not None:
return_client_request_id = task_add_options.return_client_request_id
ocp_date = None
if task_add_options is not None:
ocp_date = task_add_options.ocp_date
# Construct URL
url = '/jobs/{jobId}/tasks'
path_format_arguments = {
'jobId': self._serialize.url("job_id", job_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
if return_client_request_id is not None:
header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool')
if ocp_date is not None:
header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123')
# Construct body
body_content = self._serialize.body(task, 'TaskAddParameter')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [201]:
raise models.BatchErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'client-request-id': 'str',
'request-id': 'str',
'ETag': 'str',
'Last-Modified': 'rfc-1123',
'DataServiceId': 'str',
})
return client_raw_response
def list(
self, job_id, task_list_options=None, custom_headers=None, raw=False, **operation_config):
"""Lists all of the tasks that are associated with the specified job.
For multi-instance tasks, information such as affinityId, executionInfo
and nodeInfo refer to the primary task. Use the list subtasks API to
retrieve information about subtasks.
:param job_id: The ID of the job.
:type job_id: str
:param task_list_options: Additional parameters for the operation
:type task_list_options: ~azure.batch.models.TaskListOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of CloudTask
:rtype:
~azure.batch.models.CloudTaskPaged[~azure.batch.models.CloudTask]
:raises:
:class:`BatchErrorException<azure.batch.models.BatchErrorException>`
"""
filter = None
if task_list_options is not None:
filter = task_list_options.filter
select = None
if task_list_options is not None:
select = task_list_options.select
expand = None
if task_list_options is not None:
expand = task_list_options.expand
max_results = None
if task_list_options is not None:
max_results = task_list_options.max_results
timeout = None
if task_list_options is not None:
timeout = task_list_options.timeout
client_request_id = None
if task_list_options is not None:
client_request_id = task_list_options.client_request_id
return_client_request_id = None
if task_list_options is not None:
return_client_request_id = task_list_options.return_client_request_id
ocp_date = None
if task_list_options is not None:
ocp_date = task_list_options.ocp_date
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/jobs/{jobId}/tasks'
path_format_arguments = {
'jobId': self._serialize.url("job_id", job_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
if max_results is not None:
query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1)
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
if return_client_request_id is not None:
header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool')
if ocp_date is not None:
header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.BatchErrorException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.CloudTaskPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.CloudTaskPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def add_collection(
self, job_id, value, task_add_collection_options=None, custom_headers=None, raw=False, **operation_config):
"""Adds a collection of tasks to the specified job.
Note that each task must have a unique ID. The Batch service may not
return the results for each task in the same order the tasks were
submitted in this request. If the server times out or the connection is
closed during the request, the request may have been partially or fully
processed, or not at all. In such cases, the user should re-issue the
request. Note that it is up to the user to correctly handle failures
when re-issuing a request. For example, you should use the same task
IDs during a retry so that if the prior operation succeeded, the retry
will not create extra tasks unexpectedly. If the response contains any
tasks which failed to add, a client can retry the request. In a retry,
it is most efficient to resubmit only tasks that failed to add, and to
omit tasks that were successfully added on the first attempt. The
maximum lifetime of a task from addition to completion is 7 days. If a
task has not completed within 7 days of being added it will be
terminated by the Batch service and left in whatever state it was in at
that time.
:param job_id: The ID of the job to which the task collection is to be
added.
:type job_id: str
:param value: The collection of tasks to add. The total serialized
size of this collection must be less than 4MB. If it is greater than
4MB (for example if each task has 100's of resource files or
environment variables), the request will fail with code
'RequestBodyTooLarge' and should be retried again with fewer tasks.
:type value: list[~azure.batch.models.TaskAddParameter]
:param task_add_collection_options: Additional parameters for the
operation
:type task_add_collection_options:
~azure.batch.models.TaskAddCollectionOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: TaskAddCollectionResult or ClientRawResponse if raw=true
:rtype: ~azure.batch.models.TaskAddCollectionResult or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`BatchErrorException<azure.batch.models.BatchErrorException>`
"""
timeout = None
if task_add_collection_options is not None:
timeout = task_add_collection_options.timeout
client_request_id = None
if task_add_collection_options is not None:
client_request_id = task_add_collection_options.client_request_id
return_client_request_id = None
if task_add_collection_options is not None:
return_client_request_id = task_add_collection_options.return_client_request_id
ocp_date = None
if task_add_collection_options is not None:
ocp_date = task_add_collection_options.ocp_date
task_collection = models.TaskAddCollectionParameter(value=value)
# Construct URL
url = '/jobs/{jobId}/addtaskcollection'
path_format_arguments = {
'jobId': self._serialize.url("job_id", job_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
if return_client_request_id is not None:
header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool')
if ocp_date is not None:
header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123')
# Construct body
body_content = self._serialize.body(task_collection, 'TaskAddCollectionParameter')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.BatchErrorException(self._deserialize, response)
deserialized = None
header_dict = {}
if response.status_code == 200:
deserialized = self._deserialize('TaskAddCollectionResult', response)
header_dict = {
'client-request-id': 'str',
'request-id': 'str',
}
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
client_raw_response.add_headers(header_dict)
return client_raw_response
return deserialized
def delete(
self, job_id, task_id, task_delete_options=None, custom_headers=None, raw=False, **operation_config):
"""Deletes a task from the specified job.
When a task is deleted, all of the files in its directory on the
compute node where it ran are also deleted (regardless of the retention
time). For multi-instance tasks, the delete task operation applies
synchronously to the primary task; subtasks and their files are then
deleted asynchronously in the background.
:param job_id: The ID of the job from which to delete the task.
:type job_id: str
:param task_id: The ID of the task to delete.
:type task_id: str
:param task_delete_options: Additional parameters for the operation
:type task_delete_options: ~azure.batch.models.TaskDeleteOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`BatchErrorException<azure.batch.models.BatchErrorException>`
"""
timeout = None
if task_delete_options is not None:
timeout = task_delete_options.timeout
client_request_id = None
if task_delete_options is not None:
client_request_id = task_delete_options.client_request_id
return_client_request_id = None
if task_delete_options is not None:
return_client_request_id = task_delete_options.return_client_request_id
ocp_date = None
if task_delete_options is not None:
ocp_date = task_delete_options.ocp_date
if_match = None
if task_delete_options is not None:
if_match = task_delete_options.if_match
if_none_match = None
if task_delete_options is not None:
if_none_match = task_delete_options.if_none_match
if_modified_since = None
if task_delete_options is not None:
if_modified_since = task_delete_options.if_modified_since
if_unmodified_since = None
if task_delete_options is not None:
if_unmodified_since = task_delete_options.if_unmodified_since
# Construct URL
url = '/jobs/{jobId}/tasks/{taskId}'
path_format_arguments = {
'jobId': self._serialize.url("job_id", job_id, 'str'),
'taskId': self._serialize.url("task_id", task_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
if return_client_request_id is not None:
header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool')
if ocp_date is not None:
header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123')
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
if if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
if if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.BatchErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'client-request-id': 'str',
'request-id': 'str',
})
return client_raw_response
def get(
self, job_id, task_id, task_get_options=None, custom_headers=None, raw=False, **operation_config):
"""Gets information about the specified task.
For multi-instance tasks, information such as affinityId, executionInfo
and nodeInfo refer to the primary task. Use the list subtasks API to
retrieve information about subtasks.
:param job_id: The ID of the job that contains the task.
:type job_id: str
:param task_id: The ID of the task to get information about.
:type task_id: str
:param task_get_options: Additional parameters for the operation
:type task_get_options: ~azure.batch.models.TaskGetOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: CloudTask or ClientRawResponse if raw=true
:rtype: ~azure.batch.models.CloudTask or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`BatchErrorException<azure.batch.models.BatchErrorException>`
"""
select = None
if task_get_options is not None:
select = task_get_options.select
expand = None
if task_get_options is not None:
expand = task_get_options.expand
timeout = None
if task_get_options is not None:
timeout = task_get_options.timeout
client_request_id = None
if task_get_options is not None:
client_request_id = task_get_options.client_request_id
return_client_request_id = None
if task_get_options is not None:
return_client_request_id = task_get_options.return_client_request_id
ocp_date = None
if task_get_options is not None:
ocp_date = task_get_options.ocp_date
if_match = None
if task_get_options is not None:
if_match = task_get_options.if_match
if_none_match = None
if task_get_options is not None:
if_none_match = task_get_options.if_none_match
if_modified_since = None
if task_get_options is not None:
if_modified_since = task_get_options.if_modified_since
if_unmodified_since = None
if task_get_options is not None:
if_unmodified_since = task_get_options.if_unmodified_since
# Construct URL
url = '/jobs/{jobId}/tasks/{taskId}'
path_format_arguments = {
'jobId': self._serialize.url("job_id", job_id, 'str'),
'taskId': self._serialize.url("task_id", task_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
if return_client_request_id is not None:
header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool')
if ocp_date is not None:
header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123')
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
if if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
if if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.BatchErrorException(self._deserialize, response)
deserialized = None
header_dict = {}
if response.status_code == 200:
deserialized = self._deserialize('CloudTask', response)
header_dict = {
'client-request-id': 'str',
'request-id': 'str',
'ETag': 'str',
'Last-Modified': 'rfc-1123',
'DataServiceId': 'str',
}
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
client_raw_response.add_headers(header_dict)
return client_raw_response
return deserialized
def update(
self, job_id, task_id, task_update_options=None, constraints=None, custom_headers=None, raw=False, **operation_config):
"""Updates the properties of the specified task.
:param job_id: The ID of the job containing the task.
:type job_id: str
:param task_id: The ID of the task to update.
:type task_id: str
:param task_update_options: Additional parameters for the operation
:type task_update_options: ~azure.batch.models.TaskUpdateOptions
:param constraints: Constraints that apply to this task. If omitted,
the task is given the default constraints. For multi-instance tasks,
updating the retention time applies only to the primary task and not
subtasks.
:type constraints: ~azure.batch.models.TaskConstraints
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`BatchErrorException<azure.batch.models.BatchErrorException>`
"""
timeout = None
if task_update_options is not None:
timeout = task_update_options.timeout
client_request_id = None
if task_update_options is not None:
client_request_id = task_update_options.client_request_id
return_client_request_id = None
if task_update_options is not None:
return_client_request_id = task_update_options.return_client_request_id
ocp_date = None
if task_update_options is not None:
ocp_date = task_update_options.ocp_date
if_match = None
if task_update_options is not None:
if_match = task_update_options.if_match
if_none_match = None
if task_update_options is not None:
if_none_match = task_update_options.if_none_match
if_modified_since = None
if task_update_options is not None:
if_modified_since = task_update_options.if_modified_since
if_unmodified_since = None
if task_update_options is not None:
if_unmodified_since = task_update_options.if_unmodified_since
task_update_parameter = models.TaskUpdateParameter(constraints=constraints)
# Construct URL
url = '/jobs/{jobId}/tasks/{taskId}'
path_format_arguments = {
'jobId': self._serialize.url("job_id", job_id, 'str'),
'taskId': self._serialize.url("task_id", task_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
if return_client_request_id is not None:
header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool')
if ocp_date is not None:
header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123')
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
if if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
if if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
# Construct body
body_content = self._serialize.body(task_update_parameter, 'TaskUpdateParameter')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.BatchErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'client-request-id': 'str',
'request-id': 'str',
'ETag': 'str',
'Last-Modified': 'rfc-1123',
'DataServiceId': 'str',
})
return client_raw_response
def list_subtasks(
self, job_id, task_id, task_list_subtasks_options=None, custom_headers=None, raw=False, **operation_config):
"""Lists all of the subtasks that are associated with the specified
multi-instance task.
If the task is not a multi-instance task then this returns an empty
collection.
:param job_id: The ID of the job.
:type job_id: str
:param task_id: The ID of the task.
:type task_id: str
:param task_list_subtasks_options: Additional parameters for the
operation
:type task_list_subtasks_options:
~azure.batch.models.TaskListSubtasksOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: CloudTaskListSubtasksResult or ClientRawResponse if raw=true
:rtype: ~azure.batch.models.CloudTaskListSubtasksResult or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`BatchErrorException<azure.batch.models.BatchErrorException>`
"""
select = None
if task_list_subtasks_options is not None:
select = task_list_subtasks_options.select
timeout = None
if task_list_subtasks_options is not None:
timeout = task_list_subtasks_options.timeout
client_request_id = None
if task_list_subtasks_options is not None:
client_request_id = task_list_subtasks_options.client_request_id
return_client_request_id = None
if task_list_subtasks_options is not None:
return_client_request_id = task_list_subtasks_options.return_client_request_id
ocp_date = None
if task_list_subtasks_options is not None:
ocp_date = task_list_subtasks_options.ocp_date
# Construct URL
url = '/jobs/{jobId}/tasks/{taskId}/subtasksinfo'
path_format_arguments = {
'jobId': self._serialize.url("job_id", job_id, 'str'),
'taskId': self._serialize.url("task_id", task_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
if return_client_request_id is not None:
header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool')
if ocp_date is not None:
header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.BatchErrorException(self._deserialize, response)
deserialized = None
header_dict = {}
if response.status_code == 200:
deserialized = self._deserialize('CloudTaskListSubtasksResult', response)
header_dict = {
'client-request-id': 'str',
'request-id': 'str',
'ETag': 'str',
'Last-Modified': 'rfc-1123',
}
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
client_raw_response.add_headers(header_dict)
return client_raw_response
return deserialized
def terminate(
self, job_id, task_id, task_terminate_options=None, custom_headers=None, raw=False, **operation_config):
"""Terminates the specified task.
When the task has been terminated, it moves to the completed state. For
multi-instance tasks, the terminate task operation applies
synchronously to the primary task; subtasks are then terminated
asynchronously in the background.
:param job_id: The ID of the job containing the task.
:type job_id: str
:param task_id: The ID of the task to terminate.
:type task_id: str
:param task_terminate_options: Additional parameters for the operation
:type task_terminate_options: ~azure.batch.models.TaskTerminateOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`BatchErrorException<azure.batch.models.BatchErrorException>`
"""
timeout = None
if task_terminate_options is not None:
timeout = task_terminate_options.timeout
client_request_id = None
if task_terminate_options is not None:
client_request_id = task_terminate_options.client_request_id
return_client_request_id = None
if task_terminate_options is not None:
return_client_request_id = task_terminate_options.return_client_request_id
ocp_date = None
if task_terminate_options is not None:
ocp_date = task_terminate_options.ocp_date
if_match = None
if task_terminate_options is not None:
if_match = task_terminate_options.if_match
if_none_match = None
if task_terminate_options is not None:
if_none_match = task_terminate_options.if_none_match
if_modified_since = None
if task_terminate_options is not None:
if_modified_since = task_terminate_options.if_modified_since
if_unmodified_since = None
if task_terminate_options is not None:
if_unmodified_since = task_terminate_options.if_unmodified_since
# Construct URL
url = '/jobs/{jobId}/tasks/{taskId}/terminate'
path_format_arguments = {
'jobId': self._serialize.url("job_id", job_id, 'str'),
'taskId': self._serialize.url("task_id", task_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
if return_client_request_id is not None:
header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool')
if ocp_date is not None:
header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123')
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
if if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
if if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [204]:
raise models.BatchErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'client-request-id': 'str',
'request-id': 'str',
'ETag': 'str',
'Last-Modified': 'rfc-1123',
'DataServiceId': 'str',
})
return client_raw_response
def reactivate(
self, job_id, task_id, task_reactivate_options=None, custom_headers=None, raw=False, **operation_config):
"""Reactivates a task, allowing it to run again even if its retry count
has been exhausted.
Reactivation makes a task eligible to be retried again up to its
maximum retry count. The task's state is changed to active. As the task
is no longer in the completed state, any previous exit code or failure
information is no longer available after reactivation. Each time a task
is reactivated, its retry count is reset to 0. Reactivation will fail
for tasks that are not completed or that previously completed
successfully (with an exit code of 0). Additionally, it will fail if
the job has completed (or is terminating or deleting).
:param job_id: The ID of the job containing the task.
:type job_id: str
:param task_id: The ID of the task to reactivate.
:type task_id: str
:param task_reactivate_options: Additional parameters for the
operation
:type task_reactivate_options:
~azure.batch.models.TaskReactivateOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`BatchErrorException<azure.batch.models.BatchErrorException>`
"""
timeout = None
if task_reactivate_options is not None:
timeout = task_reactivate_options.timeout
client_request_id = None
if task_reactivate_options is not None:
client_request_id = task_reactivate_options.client_request_id
return_client_request_id = None
if task_reactivate_options is not None:
return_client_request_id = task_reactivate_options.return_client_request_id
ocp_date = None
if task_reactivate_options is not None:
ocp_date = task_reactivate_options.ocp_date
if_match = None
if task_reactivate_options is not None:
if_match = task_reactivate_options.if_match
if_none_match = None
if task_reactivate_options is not None:
if_none_match = task_reactivate_options.if_none_match
if_modified_since = None
if task_reactivate_options is not None:
if_modified_since = task_reactivate_options.if_modified_since
if_unmodified_since = None
if task_reactivate_options is not None:
if_unmodified_since = task_reactivate_options.if_unmodified_since
# Construct URL
url = '/jobs/{jobId}/tasks/{taskId}/reactivate'
path_format_arguments = {
'jobId': self._serialize.url("job_id", job_id, 'str'),
'taskId': self._serialize.url("task_id", task_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
if return_client_request_id is not None:
header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool')
if ocp_date is not None:
header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123')
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
if if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
if if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [204]:
raise models.BatchErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'client-request-id': 'str',
'request-id': 'str',
'ETag': 'str',
'Last-Modified': 'rfc-1123',
'DataServiceId': 'str',
})
return client_raw_response
| mit | 8,413,724,884,044,743,000 | 48.034585 | 148 | 0.632771 | false |
smart-facility/TransMob | scenario_builder/generate_dwellings.py | 1 | 2754 | #!/usr/bin/env python
import csv, sys
# First, some error handling on command line arguments:
def print_usage():
print("Usage: python generate_dwellings.py scenario_id")
print("where scenario_id = 1, 2, or 3")
sys.exit(1)
if (len(sys.argv)!=2):
print_usage();
scenario = 1
try:
scenario = int(sys.argv[1])
except:
print_usage()
else:
if ((scenario < 1) or (scenario > 3)):
print_usage()
# Define light rail TZs, for scenario 3:
light_rail_TZs = {199,201,207,210,211,237,509,514,515,519,520,521,525,526,527}
header = ''
tables = dict()
for year in range(2006,2037):
tables[year] = []
def scenario1(row):
result = list(range(5))
result[0] = row[0]
for numbedrooms in range(1,5):
result[numbedrooms] = row[numbedrooms]*1.005
return result
def scenario2_initial_year(row):
result=list(range(5))
result[0] = row[0]
result[1] = 1.1*row[1]
result[2] = 1.1*row[2]
result[3] = row[3]
result[4] = row[4]
return result
def scenario2_other_years(row):
return scenario1(row)
def scenario3_initial_year(row):
result=list(range(5))
result[0] = row[0]
if (result[0] in light_rail_TZs):
result[1]=1.1*row[1]
result[2]=1.1*row[2]
else:
result[1]=row[1]
result[2]=row[2]
result[3]=row[3]
result[4]=row[4]
return result
def scenario3_other_years(row):
return scenario1(row)
with open('2006.csv') as csvfile:
reader = csv.reader(csvfile)
header = next(reader)
for year in range(2006,2037):
tables[year].append(header)
for row in reader:
tables[2006].append([int(x) for x in row])
csvfile.close()
print(tables[2006])
if (scenario == 1):
for year in range(2007,2037):
for row in tables[year-1][1:]:
tables[year].append(scenario1(row))
if (scenario == 2):
for rowidx in range(1,len(tables[2006])):
tables[2006][rowidx] = scenario2_initial_year(tables[2006][rowidx])
for year in range(2007,2037):
for row in tables[year-1][1:]:
tables[year].append(scenario2_other_years(row))
if (scenario == 3):
for rowidx in range(1,len(tables[2006])):
tables[2006][rowidx] = scenario3_initial_year(tables[2006][rowidx])
for year in range(2007,2037):
for row in tables[year-1][1:]:
tables[year].append(scenario3_other_years(row))
for year in range(2006,2037):
for rowidx in range(1,len(tables[year])):
tables[year][rowidx] = [str(round(x)) for x in tables[year][rowidx]]
print(tables[2008])
for year in range(2006,2037):
with open(str(year)+'.csv','w') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(tables[year])
csvfile.close()
| lgpl-3.0 | -1,133,053,417,790,501,000 | 23.589286 | 78 | 0.61801 | false |
kolanos/iputil | iputil/filter.py | 1 | 2243 | import inspect
import itertools
import json
import operator
import os
OPERATORS = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
'in': operator.contains,
'nin': lambda x, y: not operator.contains(x, y),
}
def can_take_n_args(func, n=2):
"""Returns true if the provided function can accept two arguments."""
(pos, args, kwargs, defaults) = inspect.getargspec(func)
if args is not None or len(pos) >= n:
return True
return False
def query(d, key, val, operator='==', keynotfound=None):
"""Performs a query on a list of dicts useing the provided operator."""
d = itertools.tee(d, 2)[1]
if callable(operator):
if not can_take_n_args(operator, 2):
raise ValueError('operator must take at least 2 arguments')
op = operator
else:
op = OPERATORS.get(operator, None)
if not op:
raise ValueError('operator must be one of %r' % OPERATORS)
def try_op(func, x, y):
try:
result = func(x, y)
return result
except Exception:
return False
return (x for x in d if try_op(op, x.get(key, keynotfound), val))
class Query(object):
"""
Helper class to make queries chainable. Inspired by SQLAlchemy's
generative selects.
"""
def __init__(self, d):
self.d = itertools.tee(d, 2)[1]
def to_list(self):
return list(itertools.tee(self.d, 2)[1])
def query(self, *args, **kwargs):
return Query(query(self.d, *args, **kwargs))
def filter_ips(cache_path, query):
"""Filter IPs using the provided query parameters"""
if not os.path.exists(cache_path) or not query:
return []
with open(cache_path, 'rb') as f:
cache = json.loads(f.read())
results = []
or_clauses = query.split('or')
for or_clause in or_clauses:
q = Query(cache)
and_clauses = or_clause.split('and')
for and_clause in and_clauses:
parts = and_clause.split(None, 2)
if len(parts) == 3:
q = q.query(parts[0].lower(), parts[2], parts[1])
results = results + q.to_list()
return results
| mit | 4,969,658,122,550,222,000 | 26.691358 | 75 | 0.585823 | false |
gunny26/datalogger | bin/generate_tsas.py | 1 | 5729 | #!/usr/bin/python
from __future__ import print_function
import cProfile
import pstats
import sys
import gc
import datetime
import logging
logging.basicConfig(level=logging.INFO)
import argparse
# own modules
from datalogger import DataLogger as DataLogger
from datalogger import TimeseriesArrayStats as TimeseriesArrayStats
from datalogger import DataLoggerRawFileMissing as DataLoggerRawFileMissing
def gen_caches(project, tablename, datestring):
datalogger = DataLogger(basedir, project, tablename)
caches = datalogger.get_caches(datestring)
suffix = "%s/%s/%s\t" % (datestring, project, tablename)
data = None
if caches["tsa"]["raw"] is None:
if len(caches["tsa"]["keys"]) == 0:
logging.info("%s RAW Data is missing", suffix)
else:
logging.debug("%s RAW Data is archived, tsa archive exists already", suffix)
else:
if len(caches["tsa"]["keys"]) == 0:
logging.info("%s TSA Archive missing, calling get_tsa and load_tsastats", suffix)
data = datalogger.load_tsa(datestring)
else:
if len(caches["tsastat"]["keys"]) == 0:
logging.info("%s TSASTAT Archive missing, calling load_tsastats", suffix)
data = datalogger.load_tsastats(datestring)
else:
if len(caches["ts"]["keys"]) == 0:
logging.info("%s there are no ts archives, something went wrong, or tsa is completely empty, calling load_tsastats", suffix)
data = datalogger.load_tsastats(datestring, cleancache=True)
else:
logging.debug("%s All fine", suffix)
if caches["quantile"]["exists"] is not True:
logging.info("%s Quantile archive is missing, calling load_quantile", suffix)
#data = datalogger.load_quantile(datestring)
del data
del caches
del datalogger
#print(gc.get_count())
def main():
for datestring in tuple(DataLogger.datewalker(startdate, args.enddate)):
start_ts, stop_ts = DataLogger.get_ts_for_datestring(datestring)
logging.debug("working on datestring %s (from %s to %s)", datestring, start_ts, stop_ts)
for project in DataLogger.get_projects(args.basedir):
if args.project is not None:
if project != args.project:
logging.debug("skipping project %s", project)
continue
logging.debug("working on project %s", project)
for tablename in DataLogger.get_tablenames(args.basedir, project):
if args.tablename is not None:
if tablename != args.tablename:
logging.debug("skipping tablename %s", tablename)
continue
logging.debug("working on tablename %s", tablename)
if args.object is not None:
if args.object == "tsastats":
datalogger = DataLogger(basedir, project, tablename)
try:
tsa = datalogger.load_tsastats(datestring, cleancache=True)
except DataLoggerRawFileMissing:
pass
else:
# normal cache check, not trying to recreate
gen_caches(project, tablename, datestring)
if __name__ == "__main__":
basedir = "/var/rrd"
yesterday_datestring = (datetime.date.today() - datetime.timedelta(1)).isoformat()
parser = argparse.ArgumentParser(description='generate TimeseriesArrays on local backend')
parser.add_argument('--basedir', default="/var/rrd", help="basedirectory of datalogger data on local machine, default : %(default)s")
parser.add_argument("-b", '--back', help="how many days back from now")
parser.add_argument("-s", '--startdate', help="start date in isoformat YYYY-MM-DD")
parser.add_argument("-e", '--enddate', default=yesterday_datestring, help="stop date in isoformat YYYY-MM-DD, default : %(default)s")
parser.add_argument("-q", '--quiet', action='store_true', help="set to loglevel ERROR")
parser.add_argument("-v", '--verbose', action='store_true', help="set to loglevel DEBUG")
parser.add_argument("-p", '--project', help="process only this project name")
parser.add_argument("-t", '--tablename', help="process only this tablename")
parser.add_argument("-o", '--object', help="object to recreate")
parser.add_argument("--profile", action="store_true", help="use cProfile to start main")
args = parser.parse_args()
if args.quiet is True:
logging.getLogger("").setLevel(logging.ERROR)
if args.verbose is True:
logging.getLogger("").setLevel(logging.DEBUG)
logging.debug(args)
if (args.back is not None) == (args.startdate is not None):
logging.error("option -b and -e are mutual exclusive, use only one")
sys.exit(1)
startdate = None
if args.back is not None:
startdate = (datetime.date.today() - datetime.timedelta(int(args.back))).isoformat()
elif args.startdate is not None:
startdate = args.startdate
else:
logging.error("you have to provide either -b or -s")
sys.exit(1)
if args.profile is True:
logging.info("profiling enabled")
pstatfilename = "profile.stat"
cProfile.run('main()', pstatfilename)
stats = pstats.Stats(pstatfilename)
stats.strip_dirs()
stats.sort_stats("cumulative")
stats.print_stats()
logging.info("INCOMING CALLERS")
stats.print_callers()
logging.info("OUTGOING CALLEES")
stats.print_callees()
else:
main()
| apache-2.0 | -2,336,117,100,546,261,500 | 47.142857 | 144 | 0.618782 | false |
googleapis/python-service-control | google/cloud/servicecontrol_v1/__init__.py | 1 | 2195 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .services.quota_controller import QuotaControllerClient
from .services.quota_controller import QuotaControllerAsyncClient
from .services.service_controller import ServiceControllerClient
from .services.service_controller import ServiceControllerAsyncClient
from .types.check_error import CheckError
from .types.distribution import Distribution
from .types.http_request import HttpRequest
from .types.log_entry import LogEntry
from .types.log_entry import LogEntryOperation
from .types.log_entry import LogEntrySourceLocation
from .types.metric_value import MetricValue
from .types.metric_value import MetricValueSet
from .types.operation import Operation
from .types.quota_controller import AllocateQuotaRequest
from .types.quota_controller import AllocateQuotaResponse
from .types.quota_controller import QuotaError
from .types.quota_controller import QuotaOperation
from .types.service_controller import CheckRequest
from .types.service_controller import CheckResponse
from .types.service_controller import ReportRequest
from .types.service_controller import ReportResponse
__all__ = (
"QuotaControllerAsyncClient",
"ServiceControllerAsyncClient",
"AllocateQuotaRequest",
"AllocateQuotaResponse",
"CheckError",
"CheckRequest",
"CheckResponse",
"Distribution",
"HttpRequest",
"LogEntry",
"LogEntryOperation",
"LogEntrySourceLocation",
"MetricValue",
"MetricValueSet",
"Operation",
"QuotaControllerClient",
"QuotaError",
"QuotaOperation",
"ReportRequest",
"ReportResponse",
"ServiceControllerClient",
)
| apache-2.0 | 2,142,090,524,948,124,200 | 34.403226 | 74 | 0.781777 | false |
xianjunzhengbackup/code | data science/machine_learning_for_the_web/chapter_8/movie_reviews_analizer_app/scrapy_spider/spiders/recursive_link_results.py | 1 | 2856 | '''
usage: scrapy runspider recursive_link_results.py (or from root folder: scrapy crawl scrapy_spyder_recursive)
'''
#from scrapy.spider import Spider
from scrapy.selector import Selector
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy.http import Request
from scrapy_spider.items import PageItem,LinkItem,SearchItem
class Search(CrawlSpider):
# Parameters set used for spider crawling
name = 'scrapy_spider_recursive'
def __init__(self,url_list,search_id):#specified by -a
#REMARK is allowed_domains is not set then ALL are allowed
self.start_urls = url_list.split(',')
self.search_id = int(search_id)
#allow any link but the ones with different font size(repetitions)
self.rules = (
Rule(LinkExtractor(allow=(),deny=('fontSize=*','infoid=*','SortBy=*', ),unique=True), callback='parse_item', follow=True),
)
super(Search, self).__init__(url_list)
def parse_item(self, response):
sel = Selector(response)
## Get meta info from website
title = sel.xpath('//title/text()').extract()
if len(title)>0:
title = title[0].encode('utf-8')
contents = sel.xpath('/html/head/meta[@name="description"]/@content').extract()
content = ' '.join([c.encode('utf-8') for c in contents]).strip()
fromurl = response.request.headers['Referer']
tourl = response.url
depth = response.request.meta['depth']
#get search item
search_item = SearchItem.django_model.objects.get(id=self.search_id)
#newpage
if not PageItem.django_model.objects.filter(url=tourl).exists():
newpage = PageItem()
newpage['searchterm'] = search_item
newpage['title'] = title
newpage['content'] = content
newpage['url'] = tourl
newpage['depth'] = depth
newpage.save()#cant use pipeline cause the execution can finish here
print fromurl,'--title:',title,'-',response.url,' depth:',depth
#print contents
#if( int(depth)> 1):
# print fromurl,'--title:',title,'-',response.url,' depth:',depth
#get from_id,to_id
from_page = PageItem.django_model.objects.get(url=fromurl)
from_id = from_page.id
to_page = PageItem.django_model.objects.get(url=tourl)
to_id = to_page.id
#newlink
if not LinkItem.django_model.objects.filter(from_id=from_id).filter(to_id=to_id).exists():
newlink = LinkItem()
newlink['searchterm'] = search_item
newlink['from_id'] = from_id
newlink['to_id'] = to_id
newlink.save()
| mit | -7,166,454,287,097,851,000 | 34.259259 | 135 | 0.597689 | false |
heckj/redisco | redisco/tests/__init__.py | 1 | 1709 | import os
import unittest
from redisco.containerstests import (SetTestCase, ListTestCase, TypedListTestCase, SortedSetTestCase, HashTestCase)
from redisco.models.basetests import (ModelTestCase, DateFieldTestCase, FloatFieldTestCase,
BooleanFieldTestCase, ListFieldTestCase, ReferenceFieldTestCase,
DateTimeFieldTestCase, CounterFieldTestCase, CharFieldTestCase,
MutexTestCase)
import redisco
REDIS_DB = int(os.environ.get('REDIS_DB', 15)) # WARNING TESTS FLUSHDB!!!
REDIS_PORT = int(os.environ.get('REDIS_PORT', 6379))
redisco.connection_setup(host="localhost", port=REDIS_PORT, db=REDIS_DB)
typed_list_suite = unittest.TestLoader().loadTestsFromTestCase(TypedListTestCase)
def all_tests():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SetTestCase))
suite.addTest(unittest.makeSuite(ListTestCase))
suite.addTest(unittest.makeSuite(TypedListTestCase))
suite.addTest(unittest.makeSuite(SortedSetTestCase))
suite.addTest(unittest.makeSuite(ModelTestCase))
suite.addTest(unittest.makeSuite(DateFieldTestCase))
suite.addTest(unittest.makeSuite(FloatFieldTestCase))
suite.addTest(unittest.makeSuite(BooleanFieldTestCase))
suite.addTest(unittest.makeSuite(ListFieldTestCase))
suite.addTest(unittest.makeSuite(ReferenceFieldTestCase))
suite.addTest(unittest.makeSuite(DateTimeFieldTestCase))
suite.addTest(unittest.makeSuite(CounterFieldTestCase))
#suite.addTest(unittest.makeSuite(MutexTestCase))
suite.addTest(unittest.makeSuite(HashTestCase))
suite.addTest(unittest.makeSuite(CharFieldTestCase))
return suite
| mit | -2,773,284,535,846,562,000 | 49.264706 | 115 | 0.752487 | false |
jamessanford/assetto-corsa-hot-plugin | hot_app/Pyro4/util.py | 1 | 30379 | """
Miscellaneous utilities.
Pyro - Python Remote Objects. Copyright by Irmen de Jong ([email protected]).
"""
import sys
import zlib
import logging
import linecache
import traceback
import inspect
import Pyro4.errors
import Pyro4.message
try:
import copyreg
except ImportError:
import copy_reg as copyreg
log = logging.getLogger("Pyro4.util")
def getPyroTraceback(ex_type=None, ex_value=None, ex_tb=None):
"""Returns a list of strings that form the traceback information of a
Pyro exception. Any remote Pyro exception information is included.
Traceback information is automatically obtained via ``sys.exc_info()`` if
you do not supply the objects yourself."""
def formatRemoteTraceback(remote_tb_lines):
result = [" +--- This exception occured remotely (Pyro) - Remote traceback:"]
for line in remote_tb_lines:
if line.endswith("\n"):
line = line[:-1]
lines = line.split("\n")
for line2 in lines:
result.append("\n | ")
result.append(line2)
result.append("\n +--- End of remote traceback\n")
return result
try:
if ex_type is not None and ex_value is None and ex_tb is None:
# possible old (3.x) call syntax where caller is only providing exception object
if type(ex_type) is not type:
raise TypeError("invalid argument: ex_type should be an exception type, or just supply no arguments at all")
if ex_type is None and ex_tb is None:
ex_type, ex_value, ex_tb = sys.exc_info()
remote_tb = getattr(ex_value, "_pyroTraceback", None)
local_tb = formatTraceback(ex_type, ex_value, ex_tb, Pyro4.config.DETAILED_TRACEBACK)
if remote_tb:
remote_tb = formatRemoteTraceback(remote_tb)
return local_tb + remote_tb
else:
# hmm. no remote tb info, return just the local tb.
return local_tb
finally:
# clean up cycle to traceback, to allow proper GC
del ex_type, ex_value, ex_tb
def formatTraceback(ex_type=None, ex_value=None, ex_tb=None, detailed=False):
"""Formats an exception traceback. If you ask for detailed formatting,
the result will contain info on the variables in each stack frame.
You don't have to provide the exception info objects, if you omit them,
this function will obtain them itself using ``sys.exc_info()``."""
if ex_type is not None and ex_value is None and ex_tb is None:
# possible old (3.x) call syntax where caller is only providing exception object
if type(ex_type) is not type:
raise TypeError("invalid argument: ex_type should be an exception type, or just supply no arguments at all")
if ex_type is None and ex_tb is None:
ex_type, ex_value, ex_tb = sys.exc_info()
if detailed and sys.platform != "cli": # detailed tracebacks don't work in ironpython (most of the local vars are omitted)
def makeStrValue(value):
try:
return repr(value)
except:
try:
return str(value)
except:
return "<ERROR>"
try:
result = ["-" * 52 + "\n"]
result.append(" EXCEPTION %s: %s\n" % (ex_type, ex_value))
result.append(" Extended stacktrace follows (most recent call last)\n")
skipLocals = True # don't print the locals of the very first stack frame
while ex_tb:
frame = ex_tb.tb_frame
sourceFileName = frame.f_code.co_filename
if "self" in frame.f_locals:
location = "%s.%s" % (frame.f_locals["self"].__class__.__name__, frame.f_code.co_name)
else:
location = frame.f_code.co_name
result.append("-" * 52 + "\n")
result.append("File \"%s\", line %d, in %s\n" % (sourceFileName, ex_tb.tb_lineno, location))
result.append("Source code:\n")
result.append(" " + linecache.getline(sourceFileName, ex_tb.tb_lineno).strip() + "\n")
if not skipLocals:
names = set()
names.update(getattr(frame.f_code, "co_varnames", ()))
names.update(getattr(frame.f_code, "co_names", ()))
names.update(getattr(frame.f_code, "co_cellvars", ()))
names.update(getattr(frame.f_code, "co_freevars", ()))
result.append("Local values:\n")
for name2 in sorted(names):
if name2 in frame.f_locals:
value = frame.f_locals[name2]
result.append(" %s = %s\n" % (name2, makeStrValue(value)))
if name2 == "self":
# print the local variables of the class instance
for name3, value in vars(value).items():
result.append(" self.%s = %s\n" % (name3, makeStrValue(value)))
skipLocals = False
ex_tb = ex_tb.tb_next
result.append("-" * 52 + "\n")
result.append(" EXCEPTION %s: %s\n" % (ex_type, ex_value))
result.append("-" * 52 + "\n")
return result
except Exception:
return ["-" * 52 + "\nError building extended traceback!!! :\n",
"".join(traceback.format_exception(*sys.exc_info())) + '-' * 52 + '\n',
"Original Exception follows:\n",
"".join(traceback.format_exception(ex_type, ex_value, ex_tb))]
else:
# default traceback format.
return traceback.format_exception(ex_type, ex_value, ex_tb)
all_exceptions = {}
if sys.version_info < (3, 0):
import exceptions
for name, t in vars(exceptions).items():
if type(t) is type and issubclass(t, BaseException):
all_exceptions[name] = t
else:
import builtins
for name, t in vars(builtins).items():
if type(t) is type and issubclass(t, BaseException):
all_exceptions[name] = t
for name, t in vars(Pyro4.errors).items():
if type(t) is type and issubclass(t, Pyro4.errors.PyroError):
all_exceptions[name] = t
class SerializerBase(object):
"""Base class for (de)serializer implementations (which must be thread safe)"""
__custom_class_to_dict_registry = {}
__custom_dict_to_class_registry = {}
def serializeData(self, data, compress=False):
"""Serialize the given data object, try to compress if told so.
Returns a tuple of the serialized data (bytes) and a bool indicating if it is compressed or not."""
data = self.dumps(data)
return self.__compressdata(data, compress)
def deserializeData(self, data, compressed=False):
"""Deserializes the given data (bytes). Set compressed to True to decompress the data first."""
if compressed:
data = zlib.decompress(data)
return self.loads(data)
def serializeCall(self, obj, method, vargs, kwargs, compress=False):
"""Serialize the given method call parameters, try to compress if told so.
Returns a tuple of the serialized data and a bool indicating if it is compressed or not."""
data = self.dumpsCall(obj, method, vargs, kwargs)
return self.__compressdata(data, compress)
def deserializeCall(self, data, compressed=False):
"""Deserializes the given call data back to (object, method, vargs, kwargs) tuple.
Set compressed to True to decompress the data first."""
if compressed:
data = zlib.decompress(data)
return self.loadsCall(data)
def loads(self, data):
raise NotImplementedError("implement in subclass")
def loadsCall(self, data):
raise NotImplementedError("implement in subclass")
def dumps(self, data):
raise NotImplementedError("implement in subclass")
def dumpsCall(self, obj, method, vargs, kwargs):
raise NotImplementedError("implement in subclass")
def __compressdata(self, data, compress):
if not compress or len(data) < 200:
return data, False # don't waste time compressing small messages
compressed = zlib.compress(data)
if len(compressed) < len(data):
return compressed, True
return data, False
@classmethod
def register_type_replacement(cls, object_type, replacement_function):
raise NotImplementedError("implement in subclass")
@classmethod
def register_class_to_dict(cls, clazz, converter, serpent_too=True):
"""Registers a custom function that returns a dict representation of objects of the given class.
The function is called with a single parameter; the object to be converted to a dict."""
cls.__custom_class_to_dict_registry[clazz] = converter
if serpent_too:
try:
get_serializer_by_id(SerpentSerializer.serializer_id)
import serpent
def serpent_converter(obj, serializer, stream, level):
d = converter(obj)
serializer.ser_builtins_dict(d, stream, level)
serpent.register_class(clazz, serpent_converter)
except Pyro4.errors.ProtocolError:
pass
@classmethod
def unregister_class_to_dict(cls, clazz):
"""Removes the to-dict conversion function registered for the given class. Objects of the class
will be serialized by the default mechanism again."""
if clazz in cls.__custom_class_to_dict_registry:
del cls.__custom_class_to_dict_registry[clazz]
try:
get_serializer_by_id(SerpentSerializer.serializer_id)
import serpent
serpent.unregister_class(clazz)
except Pyro4.errors.ProtocolError:
pass
@classmethod
def register_dict_to_class(cls, classname, converter):
"""Registers a custom converter function that creates objects from a dict with the given classname tag in it.
The function is called with two parameters: the classname and the dictionary to convert to an instance of the class."""
cls.__custom_dict_to_class_registry[classname] = converter
@classmethod
def unregister_dict_to_class(cls, classname):
"""Removes the converter registered for the given classname. Dicts with that classname tag
will be deserialized by the default mechanism again."""
if classname in cls.__custom_dict_to_class_registry:
del cls.__custom_dict_to_class_registry[classname]
@classmethod
def class_to_dict(cls, obj):
"""Convert a non-serializable object to a dict. Mostly borrowed from serpent."""
for clazz in cls.__custom_class_to_dict_registry:
if isinstance(obj, clazz):
return cls.__custom_class_to_dict_registry[clazz](obj)
if type(obj) in (set, dict, tuple, list):
raise ValueError("couldn't serialize sequence " + str(obj.__class__) + ", one of its elements is unserializable")
if hasattr(obj, "_pyroDaemon"):
obj._pyroDaemon = None
if isinstance(obj, BaseException):
# special case for exceptions
return {
"__class__": obj.__class__.__module__ + "." + obj.__class__.__name__,
"__exception__": True,
"args": obj.args,
"attributes": vars(obj) # add custom exception attributes
}
try:
value = obj.__getstate__()
except AttributeError:
pass
else:
if isinstance(value, dict):
return value
try:
value = dict(vars(obj)) # make sure we can serialize anything that resembles a dict
value["__class__"] = obj.__class__.__module__ + "." + obj.__class__.__name__
return value
except TypeError:
if hasattr(obj, "__slots__"):
# use the __slots__ instead of the vars dict
value = {}
for slot in obj.__slots__:
value[slot] = getattr(obj, slot)
value["__class__"] = obj.__class__.__module__ + "." + obj.__class__.__name__
return value
else:
raise Pyro4.errors.ProtocolError("don't know how to serialize class " + str(obj.__class__) + ". Give it vars() or an appropriate __getstate__")
@classmethod
def dict_to_class(cls, data):
"""
Recreate an object out of a dict containing the class name and the attributes.
Only a fixed set of classes are recognized.
"""
classname = data.get("__class__", "<unknown>")
if isinstance(classname, bytes):
classname = classname.decode("utf-8")
if classname in cls.__custom_dict_to_class_registry:
converter = cls.__custom_dict_to_class_registry[classname]
return converter(classname, data)
if "__" in classname:
raise Pyro4.errors.SecurityError("refused to deserialize types with double underscores in their name: " + classname)
# because of efficiency reasons the constructors below are hardcoded here instead of added on a per-class basis to the dict-to-class registry
if classname.startswith("Pyro4.core."):
if classname == "Pyro4.core.URI":
uri = Pyro4.core.URI.__new__(Pyro4.core.URI)
uri.__setstate_from_dict__(data["state"])
return uri
elif classname == "Pyro4.core.Proxy":
proxy = Pyro4.core.Proxy.__new__(Pyro4.core.Proxy)
proxy.__setstate_from_dict__(data["state"])
return proxy
elif classname == "Pyro4.core.Daemon":
daemon = Pyro4.core.Daemon.__new__(Pyro4.core.Daemon)
daemon.__setstate_from_dict__(data["state"])
return daemon
elif classname.startswith("Pyro4.util."):
if classname == "Pyro4.util.PickleSerializer":
return PickleSerializer()
elif classname == "Pyro4.util.MarshalSerializer":
return MarshalSerializer()
elif classname == "Pyro4.util.JsonSerializer":
return JsonSerializer()
elif classname == "Pyro4.util.SerpentSerializer":
return SerpentSerializer()
elif classname.startswith("Pyro4.errors."):
errortype = getattr(Pyro4.errors, classname.split('.', 2)[2])
if issubclass(errortype, Pyro4.errors.PyroError):
return SerializerBase.make_exception(errortype, data)
elif classname == "Pyro4.futures._ExceptionWrapper":
ex = SerializerBase.dict_to_class(data["exception"])
return Pyro4.futures._ExceptionWrapper(ex)
elif data.get("__exception__", False):
if classname in all_exceptions:
return SerializerBase.make_exception(all_exceptions[classname], data)
# python 2.x: exceptions.ValueError
# python 3.x: builtins.ValueError
# translate to the appropriate namespace...
namespace, short_classname = classname.split('.', 1)
if namespace in ("builtins", "exceptions"):
if sys.version_info < (3, 0):
exceptiontype = getattr(exceptions, short_classname)
if issubclass(exceptiontype, BaseException):
return SerializerBase.make_exception(exceptiontype, data)
else:
exceptiontype = getattr(builtins, short_classname)
if issubclass(exceptiontype, BaseException):
return SerializerBase.make_exception(exceptiontype, data)
# try one of the serializer classes
for serializer in _serializers.values():
if classname == serializer.__class__.__name__:
return serializer
raise Pyro4.errors.ProtocolError("unsupported serialized class: " + classname)
@staticmethod
def make_exception(exceptiontype, data):
ex = exceptiontype(*data["args"])
if "attributes" in data:
# restore custom attributes on the exception object
for attr, value in data["attributes"].items():
setattr(ex, attr, value)
return ex
def recreate_classes(self, literal):
t = type(literal)
if t is set:
return set([self.recreate_classes(x) for x in literal])
if t is list:
return [self.recreate_classes(x) for x in literal]
if t is tuple:
return tuple(self.recreate_classes(x) for x in literal)
if t is dict:
if "__class__" in literal:
return self.dict_to_class(literal)
result = {}
for key, value in literal.items():
result[key] = self.recreate_classes(value)
return result
return literal
def __eq__(self, other):
"""this equality method is only to support the unit tests of this class"""
return isinstance(other, SerializerBase) and vars(self) == vars(other)
def __ne__(self, other):
return not self.__eq__(other)
__hash__ = object.__hash__
class PickleSerializer(SerializerBase):
"""
A (de)serializer that wraps the Pickle serialization protocol.
It can optionally compress the serialized data, and is thread safe.
"""
serializer_id = Pyro4.message.SERIALIZER_PICKLE
def dumpsCall(self, obj, method, vargs, kwargs):
return pickle.dumps((obj, method, vargs, kwargs), Pyro4.config.PICKLE_PROTOCOL_VERSION)
def dumps(self, data):
return pickle.dumps(data, Pyro4.config.PICKLE_PROTOCOL_VERSION)
def loadsCall(self, data):
return pickle.loads(data)
def loads(self, data):
return pickle.loads(data)
@classmethod
def register_type_replacement(cls, object_type, replacement_function):
def copyreg_function(obj):
return replacement_function(obj).__reduce__()
try:
copyreg.pickle(object_type, copyreg_function)
except TypeError:
pass
class MarshalSerializer(SerializerBase):
"""(de)serializer that wraps the marshal serialization protocol."""
serializer_id = Pyro4.message.SERIALIZER_MARSHAL
def dumpsCall(self, obj, method, vargs, kwargs):
return marshal.dumps((obj, method, vargs, kwargs))
def dumps(self, data):
try:
return marshal.dumps(data)
except (ValueError, TypeError):
return marshal.dumps(self.class_to_dict(data))
def loadsCall(self, data):
obj, method, vargs, kwargs = marshal.loads(data)
vargs = self.recreate_classes(vargs)
kwargs = self.recreate_classes(kwargs)
return obj, method, vargs, kwargs
def loads(self, data):
return self.recreate_classes(marshal.loads(data))
@classmethod
def register_type_replacement(cls, object_type, replacement_function):
pass # marshal serializer doesn't support per-type hooks
class SerpentSerializer(SerializerBase):
"""(de)serializer that wraps the serpent serialization protocol."""
serializer_id = Pyro4.message.SERIALIZER_SERPENT
def dumpsCall(self, obj, method, vargs, kwargs):
return serpent.dumps((obj, method, vargs, kwargs), module_in_classname=True)
def dumps(self, data):
return serpent.dumps(data, module_in_classname=True)
def loadsCall(self, data):
obj, method, vargs, kwargs = serpent.loads(data)
vargs = self.recreate_classes(vargs)
kwargs = self.recreate_classes(kwargs)
return obj, method, vargs, kwargs
def loads(self, data):
return self.recreate_classes(serpent.loads(data))
@classmethod
def register_type_replacement(cls, object_type, replacement_function):
def custom_serializer(object, serpent_serializer, outputstream, indentlevel):
replaced = replacement_function(object)
if replaced is object:
serpent_serializer.ser_default_class(replaced, outputstream, indentlevel)
else:
serpent_serializer._serialize(replaced, outputstream, indentlevel)
serpent.register_class(object_type, custom_serializer)
class JsonSerializer(SerializerBase):
"""(de)serializer that wraps the json serialization protocol."""
serializer_id = Pyro4.message.SERIALIZER_JSON
__type_replacements = {}
def dumpsCall(self, obj, method, vargs, kwargs):
data = {"object": obj, "method": method, "params": vargs, "kwargs": kwargs}
data = json.dumps(data, ensure_ascii=False, default=self.default)
return data.encode("utf-8")
def dumps(self, data):
data = json.dumps(data, ensure_ascii=False, default=self.default)
return data.encode("utf-8")
def loadsCall(self, data):
data = data.decode("utf-8")
data = json.loads(data)
vargs = self.recreate_classes(data["params"])
kwargs = self.recreate_classes(data["kwargs"])
return data["object"], data["method"], vargs, kwargs
def loads(self, data):
data = data.decode("utf-8")
return self.recreate_classes(json.loads(data))
def default(self, obj):
replacer = self.__type_replacements.get(type(obj), None)
if replacer:
obj = replacer(obj)
return self.class_to_dict(obj)
@classmethod
def register_type_replacement(cls, object_type, replacement_function):
cls.__type_replacements[object_type] = replacement_function
"""The various serializers that are supported"""
_serializers = {}
_serializers_by_id = {}
def get_serializer(name):
try:
return _serializers[name]
except KeyError:
raise Pyro4.errors.ProtocolError("serializer '%s' is unknown or not available" % name)
def get_serializer_by_id(sid):
try:
return _serializers_by_id[sid]
except KeyError:
raise Pyro4.errors.ProtocolError("no serializer available for id %d" % sid)
# determine the serializers that are supported
try:
import cPickle as pickle
except ImportError:
import pickle
assert Pyro4.config.PICKLE_PROTOCOL_VERSION >= 2, "pickle protocol needs to be 2 or higher"
_ser = PickleSerializer()
_serializers["pickle"] = _ser
_serializers_by_id[_ser.serializer_id] = _ser
import marshal
_ser = MarshalSerializer()
_serializers["marshal"] = _ser
_serializers_by_id[_ser.serializer_id] = _ser
try:
import json
_ser = JsonSerializer()
_serializers["json"] = _ser
_serializers_by_id[_ser.serializer_id] = _ser
except ImportError:
pass
try:
import serpent
if '-' in serpent.__version__:
ver = serpent.__version__.split('-', 1)[0]
else:
ver = serpent.__version__
ver = tuple(map(int, ver.split(".")))
if ver < (1, 7):
raise RuntimeError("requires serpent 1.7 or better")
_ser = SerpentSerializer()
_serializers["serpent"] = _ser
_serializers_by_id[_ser.serializer_id] = _ser
except ImportError:
log.warning("serpent serializer is not available")
pass
del _ser
def getAttribute(obj, attr):
"""
Resolves an attribute name to an object. Raises
an AttributeError if any attribute in the chain starts with a '``_``'.
Doesn't resolve a dotted name, because that is a security vulnerability.
It treats it as a single attribute name (and the lookup will likely fail).
"""
if is_private_attribute(attr):
raise AttributeError("attempt to access private attribute '%s'" % attr)
else:
obj = getattr(obj, attr)
if not Pyro4.config.REQUIRE_EXPOSE or getattr(obj, "_pyroExposed", False):
return obj
raise AttributeError("attempt to access unexposed attribute '%s'" % attr)
def excepthook(ex_type, ex_value, ex_tb):
"""An exception hook you can use for ``sys.excepthook``, to automatically print remote Pyro tracebacks"""
traceback = "".join(getPyroTraceback(ex_type, ex_value, ex_tb))
sys.stderr.write(traceback)
def fixIronPythonExceptionForPickle(exceptionObject, addAttributes):
"""
Function to hack around a bug in IronPython where it doesn't pickle
exception attributes. We piggyback them into the exception's args.
Bug report is at http://ironpython.codeplex.com/workitem/30805
"""
if hasattr(exceptionObject, "args"):
if addAttributes:
# piggyback the attributes on the exception args instead.
ironpythonArgs = vars(exceptionObject)
ironpythonArgs["__ironpythonargs__"] = True
exceptionObject.args += (ironpythonArgs,)
else:
# check if there is a piggybacked object in the args
# if there is, extract the exception attributes from it.
if len(exceptionObject.args) > 0:
piggyback = exceptionObject.args[-1]
if type(piggyback) is dict and piggyback.get("__ironpythonargs__"):
del piggyback["__ironpythonargs__"]
exceptionObject.args = exceptionObject.args[:-1]
exceptionObject.__dict__.update(piggyback)
def get_exposed_members(obj, only_exposed=True):
"""
Return public and exposed members of the given object's class.
You can also provide a class directly.
Private members are ignored no matter what (names starting with underscore).
If only_exposed is True, only members tagged with the @expose decorator are
returned. If it is False, all public members are returned.
The return value consists of the exposed methods, exposed attributes, and methods
tagged as @oneway.
(All this is used as meta data that Pyro sends to the proxy if it asks for it)
"""
if not inspect.isclass(obj):
obj = obj.__class__
methods = set() # all methods
oneway = set() # oneway methods
attrs = set() # attributes
for m in dir(obj): # also lists names inherited from super classes
if is_private_attribute(m):
continue
v = getattr(obj, m)
if inspect.ismethod(v) or inspect.isfunction(v):
if getattr(v, "_pyroExposed", not only_exposed):
methods.add(m)
# check if the method is marked with the @Pyro4.oneway decorator:
if getattr(v, "_pyroOneway", False):
oneway.add(m)
elif inspect.isdatadescriptor(v):
func = getattr(v, "fget", None) or getattr(v, "fset", None) or getattr(v, "fdel", None)
if func is not None and getattr(func, "_pyroExposed", not only_exposed):
attrs.add(m)
# Note that we don't expose plain class attributes no matter what.
# it is a syntax error to add a decorator on them, and it is not possible
# to give them a _pyroExposed tag either.
# The way to expose attributes is by using properties for them.
# This automatically solves the protection/security issue: you have to
# explicitly decide to make an attribute into a @property (and to @expose it
# if REQUIRE_EXPOSED=True) before it is remotely accessible.
return {
"methods": methods,
"oneway": oneway,
"attrs": attrs
}
def get_exposed_property_value(obj, propname, only_exposed=True):
"""
Return the value of an @exposed @property.
If the requested property is not a @property or not exposed,
an AttributeError is raised instead.
"""
v = getattr(obj.__class__, propname)
if inspect.isdatadescriptor(v):
if v.fget and getattr(v.fget, "_pyroExposed", not only_exposed):
return v.fget(obj)
raise AttributeError("attempt to access unexposed or unknown remote attribute '%s'" % propname)
def set_exposed_property_value(obj, propname, value, only_exposed=True):
"""
Sets the value of an @exposed @property.
If the requested property is not a @property or not exposed,
an AttributeError is raised instead.
"""
v = getattr(obj.__class__, propname)
if inspect.isdatadescriptor(v):
pfunc = v.fget or v.fset or v.fdel
if v.fset and getattr(pfunc, "_pyroExposed", not only_exposed):
return v.fset(obj, value)
raise AttributeError("attempt to access unexposed or unknown remote attribute '%s'" % propname)
_private_dunder_methods = frozenset([
"__init__", "__call__", "__new__", "__del__", "__repr__", "__unicode__",
"__str__", "__format__", "__nonzero__", "__bool__", "__coerce__",
"__cmp__", "__eq__", "__ne__", "__hash__",
"__dir__", "__enter__", "__exit__", "__copy__", "__deepcopy__", "__sizeof__",
"__getattr__", "__setattr__", "__hasattr__", "__getattribute__", "__delattr__",
"__instancecheck__", "__subclasscheck__", "__getinitargs__", "__getnewargs__",
"__getstate__", "__setstate__", "__reduce__", "__reduce_ex__",
"__getstate_for_dict__", "__setstate_from_dict__", "__subclasshook__"
])
def is_private_attribute(attr_name):
"""returns if the attribute name is to be considered private or not."""
if attr_name in _private_dunder_methods:
return True
if not attr_name.startswith('_'):
return False
if len(attr_name) > 4 and attr_name.startswith("__") and attr_name.endswith("__"):
return False
return True
| mit | -6,785,843,132,841,132,000 | 41.151989 | 159 | 0.598703 | false |
KlubJagiellonski/pola-ai | data/blur_detection.py | 1 | 1735 | import argparse
import os
import shutil
import cv2
def blur_ratio(filename):
# Positive blur ratio - the lower the more blurred the photo is
image = cv2.imread(filename)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
return cv2.Laplacian(gray, cv2.CV_64F).var()
def blur_distribution(photo_dir):
# Returns blurs of all photos in a directory
blur_ratios = []
for subdir in os.listdir(photo_dir):
if subdir.startswith('.'):
continue
print(subdir)
for photo in os.listdir(os.path.join(photo_dir, subdir)):
if photo.startswith('.'):
continue
photo_path = os.path.join(photo_dir, subdir, photo)
blur_ratios.append(blur_ratio(photo_path))
return sorted(blur_ratios)
def remove_blured(src, dest, threshold=25, ratio=None):
# Copies src into dest and removes blurred photos from dest based on threshold or ratio
if ratio:
blurs = blur_distribution(src)
threshold = blurs[int(len(blurs) * ratio)]
print('Blur threshold: {}'.format(threshold))
shutil.copytree(src, dest)
for subdir in os.listdir(dest):
for photo in os.listdir(os.path.join(dest, subdir)):
photo_path = os.path.join(dest, subdir, photo)
blur = blur_ratio(photo_path)
if blur < threshold:
print('Remove photo {} with a blur score {}'.format(photo_path, blur))
os.remove(photo_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("source_dir", type=str)
parser.add_argument("dest_dir", type=str)
args = parser.parse_args()
remove_blured(args.source_dir, args.dest_dir, ratio=0.05)
| bsd-3-clause | -1,430,805,252,922,194,200 | 33.019608 | 91 | 0.631124 | false |
modoboa/modoboa-installer | modoboa_installer/system.py | 1 | 1659 | """System related functions."""
import grp
import pwd
import sys
from . import utils
def create_user(name, home=None):
"""Create a new system user."""
try:
pwd.getpwnam(name)
except KeyError:
pass
else:
extra_message = "."
if home:
extra_message = (
" but please make sure the {} directory exists.".format(
home))
utils.printcolor(
"User {} already exists, skipping creation{}".format(
name, extra_message), utils.YELLOW)
return
cmd = "useradd -m "
if home:
cmd += "-d {} ".format(home)
utils.exec_cmd("{} {}".format(cmd, name))
if home:
utils.exec_cmd("chmod 755 {}".format(home))
def add_user_to_group(user, group):
"""Add system user to group."""
try:
pwd.getpwnam(user)
except KeyError:
print("User {} does not exist".format(user))
sys.exit(1)
try:
grp.getgrnam(group)
except KeyError:
print("Group {} does not exist".format(group))
sys.exit(1)
utils.exec_cmd("usermod -a -G {} {}".format(group, user))
def enable_service(name):
"""Enable a service at startup."""
utils.exec_cmd("systemctl enable {}".format(name))
def enable_and_start_service(name):
"""Enable a start a service."""
enable_service(name)
code, output = utils.exec_cmd("service {} status".format(name))
action = "start" if code else "restart"
utils.exec_cmd("service {} {}".format(name, action))
def restart_service(name):
"""Restart a service."""
utils.exec_cmd("service {} restart".format(name))
| mit | 8,390,421,269,169,445,000 | 24.921875 | 72 | 0.575648 | false |
grollins/foldkin | foldkin/zam_protein.py | 1 | 1049 | import os.path
from zam.protein import Protein
from zam.sequence import SeqToAA1
DEFAULT_PDB_DIR = os.path.expanduser("~/Dropbox/11_28_2011/pdb")
def create_zam_protein_from_path(file_path):
"""docstring for create_zam_protein"""
p = Protein(file_path)
new_zam_protein = ZamProtein(p)
return new_zam_protein
def create_zam_protein_from_pdb_id(pdb_id):
"""docstring for create_zam_protein"""
file_path = os.path.join(DEFAULT_PDB_DIR, pdb_id + ".pdb")
p = Protein(file_path)
new_zam_protein = ZamProtein(p)
return new_zam_protein
class ZamProtein(object):
"""docstring for ZamProtein"""
def __init__(self, protein):
super(ZamProtein, self).__init__()
self.protein = protein
def __len__(self):
return len(self.protein)
def get_contact_list(self, residue_inds=None):
return self.protein.ResContactList(residue_inds)
def get_sequence(self):
return SeqToAA1(self.protein.Seq)
def compute_aco(self):
return self.protein.MeanContactOrder()
| bsd-2-clause | -148,482,932,432,346,660 | 27.351351 | 64 | 0.669209 | false |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247971765/PyQt4/QtCore/QSemaphore.py | 1 | 1205 | # encoding: utf-8
# module PyQt4.QtCore
# from /usr/lib/python3/dist-packages/PyQt4/QtCore.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import sip as __sip
class QSemaphore(): # skipped bases: <class 'sip.simplewrapper'>
""" QSemaphore(int n=0) """
def acquire(self, int_n=1): # real signature unknown; restored from __doc__
""" QSemaphore.acquire(int n=1) """
pass
def available(self): # real signature unknown; restored from __doc__
""" QSemaphore.available() -> int """
return 0
def release(self, int_n=1): # real signature unknown; restored from __doc__
""" QSemaphore.release(int n=1) """
pass
def tryAcquire(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
QSemaphore.tryAcquire(int n=1) -> bool
QSemaphore.tryAcquire(int, int) -> bool
"""
return False
def __init__(self, int_n=0): # real signature unknown; restored from __doc__
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
| gpl-2.0 | -6,895,287,090,918,313,000 | 29.897436 | 106 | 0.619917 | false |
patrick-winter-knime/mol-struct-nets | molstructnets/steps/training/tensor2d/tensor_2d.py | 1 | 5425 | from keras import models
import numpy
from steps.training.tensor2d import training_array
from keras.callbacks import Callback
from util import data_validation, file_structure, logger, callbacks, file_util, misc, progressbar, constants,\
process_pool
from steps.evaluation.shared import enrichment, roc_curve
from steps.prediction.shared.tensor2d import prediction_array
class Tensor2D:
@staticmethod
def get_id():
return 'tensor_2d'
@staticmethod
def get_name():
return 'Network (Grid)'
@staticmethod
def get_parameters():
parameters = list()
parameters.append({'id': 'epochs', 'name': 'Epochs', 'type': int, 'min': 1,
'description': 'The number of times the model will be trained on the whole data set.'})
parameters.append({'id': 'batch_size', 'name': 'Batch Size', 'type': int, 'default': 100, 'min': 1,
'description': 'Number of data points that will be processed together. A higher number leads'
' to faster processing but needs more memory. Default: 100'})
parameters.append({'id': 'evaluate', 'name': 'Evaluate', 'type': bool, 'default': False,
'description': 'Evaluate on the test data after each epoch. Default: False'})
parameters.append({'id': 'eval_partition_size', 'name': 'Evaluation partition size', 'type': int,
'default': 100, 'min': 1, 'max': 100, 'description':
'The size in percent of the test partition used for evaluation. Default: 100'})
return parameters
@staticmethod
def check_prerequisites(global_parameters, local_parameters):
data_validation.validate_target(global_parameters)
data_validation.validate_partition(global_parameters)
data_validation.validate_preprocessed_specs(global_parameters)
data_validation.validate_network(global_parameters)
@staticmethod
def execute(global_parameters, local_parameters):
model_path = file_structure.get_network_file(global_parameters, True)
epoch_path = model_path[:-3] + '-epochs.txt'
if file_util.file_exists(epoch_path):
with open(epoch_path, 'r') as file:
epoch = int(file.readline())
else:
epoch = 0
if epoch >= local_parameters['epochs']:
logger.log('Skipping step: ' + model_path + ' has already been trained for ' + str(epoch) + ' epochs')
else:
epochs = local_parameters['epochs']
batch_size = local_parameters['batch_size']
model = models.load_model(model_path)
process_pool_ = process_pool.ProcessPool()
arrays = training_array.TrainingArrays(global_parameters, epochs - epoch, epoch, batch_size, multi_process=process_pool_)
callbacks_ = [callbacks.CustomCheckpoint(model_path)]
test_data = None
if local_parameters['evaluate']:
test_data = prediction_array.PredictionArrays(global_parameters, local_parameters['batch_size'],
test=True, runs=epochs - epoch, multi_process=process_pool_,
percent=local_parameters['eval_partition_size'] * 0.01)
chunks = misc.chunk_by_size(len(test_data.input), local_parameters['batch_size'])
callbacks_ = [EvaluationCallback(test_data, chunks, global_parameters[constants.GlobalParameters.seed],
model_path[:-3] + '-eval.csv')] + callbacks_
model.fit(arrays.input, arrays.output, epochs=epochs, shuffle=False, batch_size=batch_size,
callbacks=callbacks_, initial_epoch=epoch)
if test_data is not None:
test_data.close()
arrays.close()
process_pool_.close()
class EvaluationCallback(Callback):
def __init__(self, test_data, chunks, seed, file_path):
self.test_data = test_data
self.seed = seed
self.file_path = file_path
self.chunks = chunks
def on_epoch_end(self, epoch, logs=None):
predictions = numpy.zeros((len(self.test_data.input), 2))
with progressbar.ProgressBar(len(self.test_data.input)) as progress:
for chunk in self.chunks:
predictions_chunk = self.model.predict(self.test_data.input.next())
predictions[chunk['start']:chunk['end']] = predictions_chunk[:]
progress.increment(chunk['size'])
actives, e_auc, efs = enrichment.stats(predictions, self.test_data.output, [5, 10], seed=self.seed)
roc_auc = roc_curve.stats(predictions, self.test_data.output, seed=self.seed)[2]
ef5 = efs[5]
ef10 = efs[10]
write_headline = not file_util.file_exists(self.file_path)
with open(self.file_path, 'a') as file:
if write_headline:
file.write('epoch,e_auc,ef_5,ef_10,roc_auc\n')
file.write(str(epoch + 1) + ',' + str(e_auc) + ',' + str(ef5) + ',' + str(ef10) + ',' + str(roc_auc) + '\n')
logger.log('E AUC: ' + str(round(e_auc, 2)) + ' EF5: ' + str(round(ef5, 2)) + ' EF10: '
+ str(round(ef10, 2)) + ' ROC AUC: ' + str(round(roc_auc, 2)))
| gpl-3.0 | 1,652,882,947,627,846,100 | 51.669903 | 133 | 0.591336 | false |
google-research/google-research | depth_and_motion_learning/depth_motion_field_model.py | 1 | 18887 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A model for training depth egomotion prediction."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from depth_and_motion_learning import depth_prediction_nets
from depth_and_motion_learning import intrinsics_utils
from depth_and_motion_learning import maybe_summary
from depth_and_motion_learning import object_motion_nets
from depth_and_motion_learning import parameter_container
from depth_and_motion_learning import transform_utils
from depth_and_motion_learning.dataset import data_processing
from depth_and_motion_learning.dataset import reader_cityscapes
from depth_and_motion_learning.losses import loss_aggregator
DEFAULT_PARAMS = {
'batch_size': None,
'input': {
'data_path': '',
# If the average L1 distance between two image is less than this
# threshold, they will be assumed to be near duplicates - a situation
# that happens often in robot footage, when the camera and the scene is
# static.
'duplicates_filter_threshold': 0.01,
# Size of shuffling queue. Larger - better shuffling. Smaller - faster
# and less host memory usage.
'shuffle_queue_size': 1024,
# Used in tf.data.Dataset.prefetch.
'prefetch_size': 32,
# Allows arbitrary parameters to be passed to the reader.
'reader': {},
},
'image_preprocessing': {
'data_augmentation': True,
# Size into which images will be resized, after random cropping.
'image_height': 128,
'image_width': 416,
},
'loss_weights': {
'rgb_consistency': 1.0,
'ssim': 3.0,
'depth_consistency': 0.0,
'depth_smoothing': 0.001,
'depth_supervision': 0.0,
'rotation_cycle_consistency': 1e-3,
'translation_cycle_consistency': 5e-2,
'depth_variance': 0.0,
'motion_smoothing': 1.0,
'motion_drift': 0.2,
},
'loss_params': {
# Stops gradient on the target depth when computing the depth
# consistency loss.
'target_depth_stop_gradient': True,
# Normalize the scale by the mean depth.
'scale_normalization': False,
},
'depth_predictor_params': {
'layer_norm_noise_rampup_steps': 10000,
'weight_decay': 0.0,
'learn_scale': False,
'reflect_padding': False,
},
'motion_prediction_params': {
'weight_reg': 0.0,
'align_corners': True,
'auto_mask': True,
},
'learn_intrinsics': {
'enabled': False,
# If True, learn the same set of intrinsic params will be assigned to a
# given video_id (works with the YouTube format in /dataset).
'per_video': False,
# If per_video is true, this is the maximal number of video ids for
# which the hash table that keeps track of the intrsinsics.
'max_number_of_videos': 1000,
},
# True to feed depth predictions into the motion field network.
'cascade': True,
# True to use a pretrained mask network to confine moving objects.
'use_mask': False,
'learn_egomotion': True,
# Number of pixels ro dilate the foreground mask by (0 to not dilate).
'foreground_dilation': 8,
# If nonzero, motion fields will be unfrozen after motion_field_burnin_steps
# steps. Over the first half of the motion_field_burnin_steps steps, the
# motion fields will be zero. Then the ramp up is linear.
'motion_field_burnin_steps': 20000,
# TPUEstimator keys, to allow strict ParameterContainer usage.
'context': None,
'use_tpu': None,
}
def loss_fn(features, mode, params):
"""Computes the training loss for depth and egomotion training.
This function is written with TPU-friendlines in mind.
Args:
features: A dictionary mapping strings to tuples of (tf.Tensor, tf.Tensor),
representing pairs of frames. The loss will be calculated from these
tensors. The expected endpoints are 'rgb', 'depth', 'intrinsics_mat'
and 'intrinsics_mat_inv'.
mode: One of tf.estimator.ModeKeys: TRAIN, PREDICT or EVAL.
params: A dictionary with hyperparameters that optionally override
DEFAULT_PARAMS above.
Returns:
A dictionary mapping each loss name (see DEFAULT_PARAMS['loss_weights']'s
keys) to a scalar tf.Tensor representing the respective loss. The total
training loss.
Raises:
ValueError: `features` endpoints that don't conform with their expected
structure.
"""
params = parameter_container.ParameterContainer.from_defaults_and_overrides(
DEFAULT_PARAMS, params, is_strict=True, strictness_depth=2)
if len(features['rgb']) != 2 or 'depth' in features and len(
features['depth']) != 2:
raise ValueError('RGB and depth endpoints are expected to be a tuple of two'
' tensors. Rather, they are %s.' % str(features))
# On tpu we strive to stack tensors together and perform ops once on the
# entire stack, to save time HBM memory. We thus stack the batch-of-first-
# frames and the batch-of-second frames, for both depth and RGB. The batch
# dimension of rgb_stack and gt_depth_stack are thus twice the original batch
# size.
rgb_stack = tf.concat(features['rgb'], axis=0)
depth_predictor = depth_prediction_nets.ResNet18DepthPredictor(
mode, params.depth_predictor_params.as_dict())
predicted_depth = depth_predictor.predict_depth(rgb_stack)
maybe_summary.histogram('PredictedDepth', predicted_depth)
endpoints = {}
endpoints['predicted_depth'] = tf.split(predicted_depth, 2, axis=0)
endpoints['rgb'] = features['rgb']
# We make the heuristic that depths that are less than 0.2 meters are not
# accurate. This is a rough placeholder for a confidence map that we're going
# to have in future.
if 'depth' in features:
endpoints['groundtruth_depth'] = features['depth']
if params.cascade:
motion_features = [
tf.concat([features['rgb'][0], endpoints['predicted_depth'][0]],
axis=-1),
tf.concat([features['rgb'][1], endpoints['predicted_depth'][1]],
axis=-1)
]
else:
motion_features = features['rgb']
motion_features_stack = tf.concat(motion_features, axis=0)
flipped_motion_features_stack = tf.concat(motion_features[::-1], axis=0)
# Unlike `rgb_stack`, here we stacked the frames in reverse order along the
# Batch dimension. By concatenating the two stacks below along the channel
# axis, we create the following tensor:
#
# Channel dimension (3)
# _ _
# | Frame1-s batch | Frame2-s batch |____Batch
# |_ Frame2-s batch | Frame1-s batch _| dimension (0)
#
# When we send this tensor to the motion prediction network, the first and
# second halves of the result represent the camera motion from Frame1 to
# Frame2 and from Frame2 to Frame1 respectively. Further below we impose a
# loss that drives these two to be the inverses of one another
# (cycle-consistency).
pairs = tf.concat([motion_features_stack, flipped_motion_features_stack],
axis=-1)
rot, trans, residual_translation, intrinsics_mat = (
object_motion_nets.motion_field_net(
images=pairs,
weight_reg=params.motion_prediction_params.weight_reg,
align_corners=params.motion_prediction_params.align_corners,
auto_mask=params.motion_prediction_params.auto_mask))
if params.motion_field_burnin_steps > 0.0:
step = tf.to_float(tf.train.get_or_create_global_step())
burnin_steps = tf.to_float(params.motion_field_burnin_steps)
residual_translation *= tf.clip_by_value(2 * step / burnin_steps - 1, 0.0,
1.0)
# If using grouth truth egomotion
if not params.learn_egomotion:
egomotion_mat = tf.concat(features['egomotion_mat'], axis=0)
rot = transform_utils.angles_from_matrix(egomotion_mat[:, :3, :3])
trans = egomotion_mat[:, :3, 3]
trans = tf.expand_dims(trans, 1)
trans = tf.expand_dims(trans, 1)
if params.use_mask:
mask = tf.to_float(tf.concat(features['mask'], axis=0) > 0)
if params.foreground_dilation > 0:
pool_size = params.foreground_dilation * 2 + 1
mask = tf.nn.max_pool(mask, [1, pool_size, pool_size, 1], [1] * 4, 'SAME')
residual_translation *= mask
maybe_summary.histogram('ResidualTranslation', residual_translation)
maybe_summary.histogram('BackgroundTranslation', trans)
maybe_summary.histogram('Rotation', rot)
endpoints['residual_translation'] = tf.split(residual_translation, 2, axis=0)
endpoints['background_translation'] = tf.split(trans, 2, axis=0)
endpoints['rotation'] = tf.split(rot, 2, axis=0)
if not params.learn_intrinsics.enabled:
endpoints['intrinsics_mat'] = features['intrinsics_mat']
endpoints['intrinsics_mat_inv'] = features['intrinsics_mat_inv']
elif params.learn_intrinsics.per_video:
int_mat = intrinsics_utils.create_and_fetch_intrinsics_per_video_index(
features['video_index'][0],
params.image_preprocessing.image_height,
params.image_preprocessing.image_width,
max_video_index=params.learn_intrinsics.max_number_of_videos)
endpoints['intrinsics_mat'] = tf.concat([int_mat] * 2, axis=0)
endpoints['intrinsics_mat_inv'] = intrinsics_utils.invert_intrinsics_matrix(
int_mat)
else:
# The intrinsic matrix should be the same, no matter the order of
# images (mat = inv_mat). It's probably a good idea to enforce this
# by a loss, but for now we just take their average as a prediction for the
# intrinsic matrix.
intrinsics_mat = 0.5 * sum(tf.split(intrinsics_mat, 2, axis=0))
endpoints['intrinsics_mat'] = [intrinsics_mat] * 2
endpoints['intrinsics_mat_inv'] = [
intrinsics_utils.invert_intrinsics_matrix(intrinsics_mat)] * 2
aggregator = loss_aggregator.DepthMotionFieldLossAggregator(
endpoints, params.loss_weights.as_dict(), params.loss_params.as_dict())
# Add some more summaries.
maybe_summary.image('rgb0', features['rgb'][0])
maybe_summary.image('rgb1', features['rgb'][1])
disp0, disp1 = tf.split(aggregator.output_endpoints['disparity'], 2, axis=0)
maybe_summary.image('disparity0/grayscale', disp0)
maybe_summary.image_with_colormap('disparity0/plasma',
tf.squeeze(disp0, axis=3), 'plasma', 0.0)
maybe_summary.image('disparity1/grayscale', disp1)
maybe_summary.image_with_colormap('disparity1/plasma',
tf.squeeze(disp1, axis=3), 'plasma', 0.0)
if maybe_summary.summaries_enabled():
if 'depth' in features:
gt_disp0 = 1.0 / tf.maximum(features['depth'][0], 0.5)
gt_disp1 = 1.0 / tf.maximum(features['depth'][1], 0.5)
maybe_summary.image('disparity_gt0', gt_disp0)
maybe_summary.image('disparity_gt1', gt_disp1)
depth_proximity_weight0, depth_proximity_weight1 = tf.split(
aggregator.output_endpoints['depth_proximity_weight'], 2, axis=0)
maybe_summary.image('consistency_weight0',
tf.expand_dims(depth_proximity_weight0, -1))
maybe_summary.image('consistency_weight1',
tf.expand_dims(depth_proximity_weight1, -1))
maybe_summary.image('trans', aggregator.output_endpoints['trans'])
maybe_summary.image('trans_inv', aggregator.output_endpoints['inv_trans'])
maybe_summary.image('trans_res', endpoints['residual_translation'][0])
maybe_summary.image('trans_res_inv', endpoints['residual_translation'][1])
return aggregator.losses
def input_fn(params):
"""An Estimator's input_fn for reading and preprocessing training data.
Reads pairs of RGBD frames from sstables, filters out near duplicates and
performs data augmentation.
Args:
params: A dictionary with hyperparameters.
Returns:
A tf.data.Dataset object.
"""
params = parameter_container.ParameterContainer.from_defaults_and_overrides(
DEFAULT_PARAMS, params, is_strict=True, strictness_depth=2)
dataset = reader_cityscapes.read_frame_pairs_from_data_path(
params.input.data_path, params.input.reader)
if params.learn_intrinsics.enabled and params.learn_intrinsics.per_video:
intrinsics_ht = intrinsics_utils.HashTableIndexer(
params.learn_intrinsics.max_number_of_videos)
def key_to_index(input_endpoints):
video_id = input_endpoints.pop('video_id', None)
if (video_id is not None and params.learn_intrinsics.enabled and
params.learn_intrinsics.per_video):
index = intrinsics_ht.get_or_create_index(video_id[0])
input_endpoints['video_index'] = index
input_endpoints['video_index'] = tf.stack([index] * 2)
return input_endpoints
dataset = dataset.map(key_to_index)
def is_duplicate(endpoints):
"""Implements a simple duplicate filter, based on L1 difference in RGB."""
return tf.greater(
tf.reduce_mean(tf.abs(endpoints['rgb'][1] - endpoints['rgb'][0])),
params.input.duplicates_filter_threshold)
if params.input.duplicates_filter_threshold > 0.0:
dataset = dataset.filter(is_duplicate)
# Add data augmentation
if params.image_preprocessing.data_augmentation:
if params.learn_intrinsics.per_video:
raise ('Data augemnation together with learn_intrinsics.per_video is not '
'yet supported.')
def random_crop_and_resize_fn(endpoints):
return data_processing.random_crop_and_resize_pipeline(
endpoints, params.image_preprocessing.image_height,
params.image_preprocessing.image_width)
augmentation_fn = random_crop_and_resize_fn
else:
def resize_fn(endpoints):
return data_processing.resize_pipeline(
endpoints, params.image_preprocessing.image_height,
params.image_preprocessing.image_width)
augmentation_fn = resize_fn
dataset = dataset.map(augmentation_fn)
dataset = dataset.shuffle(params.input.shuffle_queue_size)
dataset = dataset.batch(params.batch_size, drop_remainder=True)
return dataset.prefetch(params.input.prefetch_size)
def get_vars_to_restore_fn(initialization):
"""Returns a vars_to_restore_fn for various types of `initialization`.
Args:
initialization: A string, the type of the initialization. Currently only
'imagenet' is supported.
Raises:
ValueError: `initialization` is not supported
"""
if initialization == 'imagenet':
def is_blacklisted(name):
for key in ['Adam', 'iconv', 'depth_scale', 'upconv', 'disp']:
if key in name:
return True
return False
def vars_to_restore_fn():
"""Returns a dictionary mapping checkpoint variable names to variables."""
vars_to_restore = {}
for v in tf.global_variables():
if is_blacklisted(v.op.name):
print(v.op.name, 'is blacklisted')
continue
if v.op.name.startswith('depth_prediction'):
name = v.op.name.replace('moving_mean', 'mu')
name = name.replace('moving_variance', 'sigma')
vars_to_restore[name[len('depth_prediction') + 1:]] = v
return vars_to_restore
return vars_to_restore_fn
else:
raise ValueError('Unknown initialization %s' % initialization)
def preprocess_masks(endpoints):
def create_mobile_mask(input_mask):
return tf.reduce_all(tf.not_equal(0, input_mask), axis=2, keepdims=True)
output = dict(endpoints)
output['mask'] = tuple([create_mobile_mask(m) for m in endpoints['mask']])
return output
def infer_depth(rgb_image, params):
"""Runs depth inference given an RGB frame.
Args:
rgb_image: A tf.Tensor or shape [B, H, W, 3] containing RGB images.
params: A dictionary of parameters contraining overrides for
DEFAULT_PARAMS.
Returns:
A tf.Tensor of shape [B, H, W, 1] containing the inferred depths.
"""
if rgb_image.shape.rank != 4:
raise ValueError('rgb_image should have rank 4, not %d.' %
rgb_image.shape.rank)
params = parameter_container.ParameterContainer.from_defaults_and_overrides(
DEFAULT_PARAMS, params, is_strict=True, strictness_depth=2)
depth_predictor = depth_prediction_nets.ResNet18DepthPredictor(
tf.estimator.ModeKeys.PREDICT, params.depth_predictor_params.as_dict())
return depth_predictor.predict_depth(rgb_image)
def infer_egomotion(rgb_image1, rgb_image2, params):
"""Runs egomotion inference given two RGB frames.
Args:
rgb_image1: A tf.Tensor or shape [B, H, W, 3] containing RGB images, the
first frame.
rgb_image2: A tf.Tensor or shape [B, H, W, 3] containing RGB images, the
second frame.
params: A dictionary of parameters contraining overrides for DEFAULT_PARAMS.
Returns:
A tuple of two tf.Tensors of shape [B, 3] containing the inferred rotation
angles and translation vector components.
"""
params = parameter_container.ParameterContainer.from_defaults_and_overrides(
DEFAULT_PARAMS, params, is_strict=True, strictness_depth=2)
if rgb_image1.shape.rank != 4 or rgb_image2.shape.rank != 4:
raise ValueError('rgb_image1 and rgb_image1 should have rank 4, not '
'%d and %d.' %
(rgb_image1.shape.rank, rgb_image2.shape.rank))
rgb_stack = tf.concat([rgb_image1, rgb_image2], axis=0)
flipped_rgb_stack = tf.concat([rgb_image2, rgb_image1], axis=0)
rot, trans, _ = object_motion_nets.motion_vector_net(tf.concat(
[rgb_stack, flipped_rgb_stack], axis=3), 0.0, False)
rot12, rot21 = tf.split(rot, 2, axis=0)
trans12, trans21 = tf.split(trans, 2, axis=0)
# rot12 and rot21 should be the inverses on of the other, but in reality they
# not exactly are. Averaging rot12 and inv(rot21) gives a better estimator for
# the rotation. Similarly, trans12 and rot12*trans21 should be the negatives
# one of the other, so we average rot12*trans21 and trans12
# to get a better estimator. TODO(gariel): Check if there's an estimator
# with less variance.
avg_rot = 0.5 * (tf.linalg.inv(rot21) + rot12)
avg_trans = 0.5 * (-tf.squeeze(
tf.matmul(rot12, tf.expand_dims(trans21, -1)), axis=-1) + trans12)
return avg_rot, avg_trans
| apache-2.0 | 2,114,861,337,026,813,700 | 38.762105 | 80 | 0.678721 | false |
mullikine/ranger | ranger/container/settings.py | 1 | 8531 | # Copyright (C) 2009-2013 Roman Zimbelmann <[email protected]>
# This software is distributed under the terms of the GNU GPL version 3.
from inspect import isfunction
from ranger.ext.signals import SignalDispatcher, Signal
from ranger.core.shared import FileManagerAware
from ranger.gui.colorscheme import _colorscheme_name_to_class
import re
import os.path
ALLOWED_SETTINGS = {
'automatically_count_files': bool,
'autosave_bookmarks': bool,
'autoupdate_cumulative_size': bool,
'cd_bookmarks': bool,
'collapse_preview': bool,
'colorscheme': str,
'column_ratios': (tuple, list),
'confirm_on_delete': str,
'dirname_in_tabs': bool,
'display_size_in_main_column': bool,
'display_size_in_status_bar': bool,
'display_tags_in_all_columns': bool,
'draw_borders': bool,
'draw_progress_bar_in_status_bar': bool,
'flushinput': bool,
'hidden_filter': str,
'idle_delay': int,
'max_console_history_size': (int, type(None)),
'max_history_size': (int, type(None)),
'mouse_enabled': bool,
'open_all_images': bool,
'padding_right': bool,
'preview_directories': bool,
'preview_files': bool,
'preview_images': bool,
'preview_max_size': int,
'preview_script': (str, type(None)),
'save_console_history': bool,
'scroll_offset': int,
'shorten_title': int,
'show_cursor': bool, # TODO: not working?
'show_selection_in_titlebar': bool,
'show_hidden_bookmarks': bool,
'show_hidden': bool,
'sort_case_insensitive': bool,
'sort_directories_first': bool,
'sort_reverse': bool,
'sort': str,
'status_bar_on_top': bool,
'tilde_in_titlebar': bool,
'unicode_ellipsis': bool,
'update_title': bool,
'update_tmux_title': bool,
'use_preview_script': bool,
'vcs_aware': bool,
'vcs_backend_bzr': str,
'vcs_backend_git': str,
'vcs_backend_hg': str,
'xterm_alt_key': bool,
}
DEFAULT_VALUES = {
bool: False,
type(None): None,
str: "",
int: 0,
list: [],
tuple: tuple([]),
}
class Settings(SignalDispatcher, FileManagerAware):
def __init__(self):
SignalDispatcher.__init__(self)
self.__dict__['_localsettings'] = dict()
self.__dict__['_localregexes'] = dict()
self.__dict__['_tagsettings'] = dict()
self.__dict__['_settings'] = dict()
for name in ALLOWED_SETTINGS:
self.signal_bind('setopt.'+name,
self._sanitize, priority=1.0)
self.signal_bind('setopt.'+name,
self._raw_set_with_signal, priority=0.2)
def _sanitize(self, signal):
name, value = signal.setting, signal.value
if name == 'column_ratios':
# TODO: cover more cases here
if isinstance(value, tuple):
signal.value = list(value)
if not isinstance(value, list) or len(value) < 2:
signal.value = [1, 1]
else:
signal.value = [int(i) if str(i).isdigit() else 1 \
for i in value]
elif name == 'colorscheme':
_colorscheme_name_to_class(signal)
elif name == 'preview_script':
if isinstance(value, str):
result = os.path.expanduser(value)
if os.path.exists(result):
signal.value = result
else:
signal.value = None
elif name == 'use_preview_script':
if self._settings['preview_script'] is None and value \
and self.fm.ui.is_on:
self.fm.notify("Preview script undefined or not found!",
bad=True)
def set(self, name, value, path=None, tags=None):
assert name in ALLOWED_SETTINGS, "No such setting: {0}!".format(name)
if name not in self._settings:
previous = None
else:
previous=self._settings[name]
assert self._check_type(name, value)
assert not (tags and path), "Can't set a setting for path and tag " \
"at the same time!"
kws = dict(setting=name, value=value, previous=previous,
path=path, tags=tags, fm=self.fm)
self.signal_emit('setopt', **kws)
self.signal_emit('setopt.'+name, **kws)
def get(self, name, path=None):
assert name in ALLOWED_SETTINGS, "No such setting: {0}!".format(name)
if path:
localpath = path
else:
try:
localpath = self.fm.thisdir.path
except:
localpath = path
if localpath:
for pattern, regex in self._localregexes.items():
if name in self._localsettings[pattern] and\
regex.search(localpath):
return self._localsettings[pattern][name]
if self._tagsettings and path:
realpath = os.path.realpath(path)
if realpath in self.fm.tags:
tag = self.fm.tags.marker(realpath)
if tag in self._tagsettings and name in self._tagsettings[tag]:
return self._tagsettings[tag][name]
if name in self._settings:
return self._settings[name]
else:
type_ = self.types_of(name)[0]
value = DEFAULT_VALUES[type_]
self._raw_set(name, value)
self.__setattr__(name, value)
return self._settings[name]
def __setattr__(self, name, value):
if name.startswith('_'):
self.__dict__[name] = value
else:
self.set(name, value, None)
def __getattr__(self, name):
if name.startswith('_'):
return self.__dict__[name]
else:
return self.get(name, None)
def __iter__(self):
for x in self._settings:
yield x
def types_of(self, name):
try:
typ = ALLOWED_SETTINGS[name]
except KeyError:
return tuple()
else:
if isinstance(typ, tuple):
return typ
else:
return (typ, )
def _check_type(self, name, value):
typ = ALLOWED_SETTINGS[name]
if isfunction(typ):
assert typ(value), \
"Warning: The option `" + name + "' has an incorrect type!"
else:
assert isinstance(value, typ), \
"Warning: The option `" + name + "' has an incorrect type!"\
" Got " + str(type(value)) + ", expected " + str(typ) + "!" +\
" Please check if your commands.py is up to date." if not \
self.fm.ui.is_set_up else ""
return True
__getitem__ = __getattr__
__setitem__ = __setattr__
def _raw_set(self, name, value, path=None, tags=None):
if path:
if not path in self._localsettings:
try:
regex = re.compile(path)
except:
# Bad regular expression
return
self._localregexes[path] = regex
self._localsettings[path] = dict()
self._localsettings[path][name] = value
# make sure name is in _settings, so __iter__ runs through
# local settings too.
if not name in self._settings:
type_ = self.types_of(name)[0]
value = DEFAULT_VALUES[type_]
self._settings[name] = value
elif tags:
for tag in tags:
if tag not in self._tagsettings:
self._tagsettings[tag] = dict()
self._tagsettings[tag][name] = value
else:
self._settings[name] = value
def _raw_set_with_signal(self, signal):
self._raw_set(signal.setting, signal.value, signal.path, signal.tags)
class LocalSettings():
def __init__(self, path, parent):
self.__dict__['_parent'] = parent
self.__dict__['_path'] = path
def __setattr__(self, name, value):
if name.startswith('_'):
self.__dict__[name] = value
else:
self._parent.set(name, value, self._path)
def __getattr__(self, name):
if name.startswith('_'):
return self.__dict__[name]
else:
return self._parent.get(name, self._path)
def __iter__(self):
for x in self._parent._settings:
yield x
__getitem__ = __getattr__
__setitem__ = __setattr__
| gpl-3.0 | -4,823,316,337,248,086,000 | 32.586614 | 79 | 0.538155 | false |
smendez-hi/SUMO-hib | tools/visualization/mpl_dump_onNet.py | 1 | 17971 | #!/usr/bin/env python
"""
@file mpl_dump_onNet.py
@author Daniel Krajzewicz
@author Michael Behrisch
@date 2007-10-25
@version $Id: mpl_dump_onNet.py 11671 2012-01-07 20:14:30Z behrisch $
This script reads a network and a dump file and
draws the network, coloring it by the values
found within the dump-file.
matplotlib has to be installed for this purpose
SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/
Copyright (C) 2008-2012 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
from matplotlib import rcParams
from pylab import *
import os, string, sys, StringIO
import math
from optparse import OptionParser
from xml.sax import saxutils, make_parser, handler
def toHex(val):
"""Converts the given value (0-255) into its hexadecimal representation"""
hex = "0123456789abcdef"
return hex[int(val/16)] + hex[int(val - int(val/16)*16)]
def toFloat(val):
"""Converts the given value (0-255) into its hexadecimal representation"""
hex = "0123456789abcdef"
return float(hex.find(val[0])*16 + hex.find(val[1]))
def toColor(val, colormap):
"""Converts the given value (0-1) into a color definition parseable by matplotlib"""
for i in range(0, len(colormap)-1):
if colormap[i+1][0]>val:
scale = (val - colormap[i][0]) / (colormap[i+1][0] - colormap[i][0])
r = colormap[i][1][0] + (colormap[i+1][1][0] - colormap[i][1][0]) * scale
g = colormap[i][1][1] + (colormap[i+1][1][1] - colormap[i][1][1]) * scale
b = colormap[i][1][2] + (colormap[i+1][1][2] - colormap[i][1][2]) * scale
return "#" + toHex(r) + toHex(g) + toHex(b)
return "#" + toHex(colormap[-1][1][0]) + toHex(colormap[-1][1][1]) + toHex(colormap[-1][1][2])
def parseColorMap(mapDef):
ret = []
defs = mapDef.split(",")
for d in defs:
(value, color) = d.split(":")
r = color[1:3]
g = color[3:5]
b = color[5:7]
ret.append( (float(value), ( toFloat(r), toFloat(g), toFloat(b) ) ) )
return ret
class NetReader(handler.ContentHandler):
"""Reads a network, storing the edge geometries, lane numbers and max. speeds"""
def __init__(self):
self._id = ''
self._edge2lanes = {}
self._edge2speed = {}
self._edge2shape = {}
self._edge2from = {}
self._edge2to = {}
self._node2x = {}
self._node2y = {}
self._currentShapes = []
self._parseLane = False
def startElement(self, name, attrs):
self._parseLane = False
if name == 'edge':
if not attrs.has_key('function') or attrs['function'] != 'internal':
self._id = attrs['id']
self._edge2from[attrs['id']] = attrs['from']
self._edge2to[attrs['id']] = attrs['to']
self._edge2lanes[attrs['id']] = 0
self._currentShapes = []
else:
self._id = ""
if name == 'lane' and self._id!="":
self._edge2speed[self._id] = float(attrs['maxspeed'])
self._edge2lanes[self._id] = self._edge2lanes[self._id] + 1
self._parseLane = True
self._currentShapes.append(attrs["shape"])
if name == 'junction':
self._id = attrs['id']
if self._id[0]!=':':
self._node2x[attrs['id']] = attrs['x']
self._node2y[attrs['id']] = attrs['y']
else:
self._id = ""
def endElement(self, name):
if self._parseLane:
self._parseLane = False
if name == 'edge' and self._id!="":
noShapes = len(self._currentShapes)
if noShapes%2 == 1 and noShapes>0:
self._edge2shape[self._id] = self._currentShapes[int(noShapes/2)]
elif noShapes%2 == 0 and len(self._currentShapes[0])!=2:
cshapes = []
minLen = -1
for i in self._currentShapes:
cshape = []
es = i.split(" ")
for e in es:
p = e.split(",")
cshape.append((float(p[0]), float(p[1])))
cshapes.append(cshape)
if minLen==-1 or minLen>len(cshape):
minLen = len(cshape)
self._edge2shape[self._id] = ""
if minLen>2:
for i in range(0, minLen):
x = 0.
y = 0.
for j in range(0, noShapes):
x = x + cshapes[j][i][0]
y = y + cshapes[j][i][1]
x = x / float(noShapes)
y = y / float(noShapes)
if self._edge2shape[self._id] != "":
self._edge2shape[self._id] = self._edge2shape[self._id] + " "
self._edge2shape[self._id] = self._edge2shape[self._id] + str(x) + "," + str(y)
def plotData(self, weights, options, values1, values2, saveName, colorMap):
edge2plotLines = {}
edge2plotColors = {}
edge2plotWidth = {}
xmin = 10000000.
xmax = -10000000.
ymin = 10000000.
ymax = -10000000.
min_width = 0
if options.min_width:
min_width = options.min_width
for edge in self._edge2from:
# compute shape
xs = []
ys = []
if edge not in self._edge2shape or self._edge2shape[edge]=="":
xs.append(float(self._node2x[self._edge2from[edge]]))
xs.append(float(self._node2x[self._edge2to[edge]]))
ys.append(float(self._node2y[self._edge2from[edge]]))
ys.append(float(self._node2y[self._edge2to[edge]]))
else:
shape = self._edge2shape[edge].split(" ")
l = []
for s in shape:
p = s.split(",")
xs.append(float(p[0]))
ys.append(float(p[1]))
for x in xs:
if x<xmin:
xmin = x
if x>xmax:
xmax = x
for y in ys:
if y<ymin:
ymin = y
if y>ymax:
ymax = y
# save shape
edge2plotLines[edge] = (xs, ys)
# compute color
if edge in values2:
c = values2[edge]
else:
c = 0
edge2plotColors[edge] = toColor(c, colorMap)
# compute width
if edge in values1:
w = values1[edge]
if w>0:
w = 10. * math.log(1 + values1[edge]) + min_width
else:
w = min_width
if options.max_width and w>options.max_width:
w = options.max_width
if w<min_width:
w = min_width
edge2plotWidth[edge] = w
else:
edge2plotWidth[edge] = min_width
if options.verbose:
print "x-limits: " + str(xmin) + " - " + str(xmax)
print "y-limits: " + str(ymin) + " - " + str(ymax)
if not options.show:
rcParams['backend'] = 'Agg'
# set figure size
if options.size and not options.show:
f = figure(figsize=(options.size.split(",")))
else:
f = figure()
for edge in edge2plotLines:
plot(edge2plotLines[edge][0], edge2plotLines[edge][1], color=edge2plotColors[edge], linewidth=edge2plotWidth[edge])
# set axes
if options.xticks!="":
(xb, xe, xd, xs) = options.xticks.split(",")
xticks(arange(xb, xe, xd), size = xs)
if options.yticks!="":
(yb, ye, yd, ys) = options.yticks.split(",")
yticks(arange(yb, ye, yd), size = ys)
if options.xlim!="":
(xb, xe) = options.xlim.split(",")
xlim(int(xb), int(xe))
else:
xlim(xmin, xmax)
if options.ylim!="":
(yb, ye) = options.ylim.split(",")
ylim(int(yb), int(ye))
else:
ylim(ymin, ymax)
if saveName:
savefig(saveName);
if options.show:
show()
def plot(self, weights, options, colorMap):
self._minValue1 = weights._minValue1
self._minValue2 = weights._minValue2
self._maxValue1 = weights._maxValue1
self._maxValue2 = weights._maxValue2
if options.join:
self.plotData(weights, options, weights._edge2value1, weights._edge2value2, options.output, colorMap)
else:
for i in weights._intervalBegins:
if options.verbose:
print " Processing step %d..." % i
output = options.output
if output:
output = output.replace("HERE", "%")
output = output % i
self.plotData(weights, options, weights._unaggEdge2value1[i], weights._unaggEdge2value2[i], output, colorMap )
def knowsEdge(self, id):
return id in self._edge2from
class WeightsReader(handler.ContentHandler):
"""Reads the dump file"""
def __init__(self, net, value1, value2):
self._id = ''
self._edge2value2 = {}
self._edge2value1 = {}
self._edge2no1 = {}
self._edge2no2 = {}
self._net = net
self._intervalBegins = []
self._unaggEdge2value2 = {}
self._unaggEdge2value1 = {}
self._beginTime = -1
self._value1 = value1
self._value2 = value2
def startElement(self, name, attrs):
if name == 'interval':
self._beginTime = int(attrs['begin'])
self._intervalBegins.append(self._beginTime)
self._unaggEdge2value2[self._beginTime] = {}
self._unaggEdge2value1[self._beginTime] = {}
if name == 'edge':
if self._net.knowsEdge(attrs['id']):
self._id = attrs['id']
if self._id not in self._edge2value2:
self._edge2value2[self._id] = 0
self._edge2value1[self._id] = 0
self._edge2no1[self._id] = 0
self._edge2no2[self._id] = 0
value1 = self._value1
if attrs.has_key(value1):
value1 = float(attrs[value1])
self._edge2no1[self._id] = self._edge2no1[self._id] + 1
else:
value1 = float(value1)
self._edge2value1[self._id] = self._edge2value1[self._id] + value1
self._unaggEdge2value1[self._beginTime][self._id] = value1
value2 = self._value2
if attrs.has_key(value2):
value2 = float(attrs[value2])
self._edge2no2[self._id] = self._edge2no2[self._id] + 1
else:
value2 = float(value2)
self._edge2value2[self._id] = self._edge2value2[self._id] + value2
self._unaggEdge2value2[self._beginTime][self._id] = value2
def updateExtrema(self, values1ByEdge, values2ByEdge):
for edge in values1ByEdge:
if self._minValue1==-1 or self._minValue1>values1ByEdge[edge]:
self._minValue1 = values1ByEdge[edge]
if self._maxValue1==-1 or self._maxValue1<values1ByEdge[edge]:
self._maxValue1 = values1ByEdge[edge]
if self._minValue2==-1 or self._minValue2>values2ByEdge[edge]:
self._minValue2 = values2ByEdge[edge]
if self._maxValue2==-1 or self._maxValue2<values2ByEdge[edge]:
self._maxValue2 = values2ByEdge[edge]
def valueDependantNorm(self, values, minV, maxV, tendency, percSpeed):
if tendency:
for edge in self._edge2value2:
if values[edge]<0:
values[edge] = 0
else:
values[edge] = 1
elif percSpeed:
for edge in self._edge2value2:
values[edge] = (values[edge] / self._net._edge2speed[edge])
elif minV!=maxV:
for edge in self._edge2value2:
values[edge] = (values[edge] - minV) / (maxV - minV)
def norm(self, tendency, percSpeed):
self._minValue1 = -1
self._maxValue1 = -1
self._minValue2 = -1
self._maxValue2 = -1
# compute mean value if join is set
if options.join:
for edge in self._edge2value2:
if float(self._edge2no1[edge])!=0:
self._edge2value1[edge] = float(self._edge2value1[edge]) / float(self._edge2no1[edge])
else:
self._edge2value1[edge] = float(self._edge2value1[edge])
if float(self._edge2no2[edge])!=0:
self._edge2value2[edge] = float(self._edge2value2[edge]) / float(self._edge2no2[edge])
else:
self._edge2value2[edge] = float(self._edge2value2[edge])
# compute min/max
if options.join:
self.updateExtrema(self._edge2value1, self._edge2value2)
else:
for i in weights._intervalBegins:
self.updateExtrema(self._unaggEdge2value1[i], self._unaggEdge2value2[i])
# norm
if options.verbose:
print "w range: " + str(self._minValue1) + " - " + str(self._maxValue1)
print "c range: " + str(self._minValue2) + " - " + str(self._maxValue2)
if options.join:
self.valueDependantNorm(self._edge2value1, self._minValue1, self._maxValue1, False, percSpeed and self._value1=="speed")
self.valueDependantNorm(self._edge2value2, self._minValue2, self._maxValue2, tendency, percSpeed and self._value2=="speed")
else:
for i in weights._intervalBegins:
self.valueDependantNorm(self._unaggEdge2value1[i], self._minValue1, self._maxValue1, False, percSpeed and self._value1=="speed")
self.valueDependantNorm(self._unaggEdge2value2[i], self._minValue2, self._maxValue2, tendency, percSpeed and self._value2=="speed")
# initialise
optParser = OptionParser()
optParser.add_option("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="tell me what you are doing")
# i/o
optParser.add_option("-n", "--net-file", dest="net",
help="SUMO network to use (mandatory)", metavar="FILE")
optParser.add_option("-d", "--dump", dest="dump",
help="dump file to use", metavar="FILE")
optParser.add_option("-o", "--output", dest="output",
help="(base) name for the output", metavar="FILE")
# data handling
optParser.add_option("-j", "--join", action="store_true", dest="join",
default=False, help="sums up values from all read intervals")
optParser.add_option("-w", "--min-width", dest="min_width",
type="float", help="sets minimum line width")
optParser.add_option("-W", "--max-width", dest="max_width",
type="float", help="sets maximum line width")
optParser.add_option("-c", "--min-color", dest="min_color",
type="float", help="sets minimum color (between 0 and 1)")
optParser.add_option("-C", "--max-color", dest="max_color",
type="float", help="sets maximum color (between 0 and 1)")
optParser.add_option("--tendency-coloring", action="store_true", dest="tendency_coloring",
default=False, help="show only 0/1 color for egative/positive values")
optParser.add_option("--percentage-speed", action="store_true", dest="percentage_speed",
default=False, help="speed is normed to maximum allowed speed on an edge")
optParser.add_option("--values", dest="values",
type="string", default="entered,speed", help="which values shall be parsed")
optParser.add_option("--color-map", dest="colormap",
type="string", default="0:#ff0000,.5:#ffff00,1:#00ff00", help="Defines the color map")
# axes/legend
optParser.add_option("--xticks", dest="xticks",type="string", default="",
help="defines ticks on x-axis")
optParser.add_option("--yticks", dest="yticks",type="string", default="",
help="defines ticks on y-axis")
optParser.add_option("--xlim", dest="xlim",type="string", default="",
help="defines x-axis range")
optParser.add_option("--ylim", dest="ylim",type="string", default="",
help="defines y-axis range")
# output
optParser.add_option("--size", dest="size",type="string", default="",
help="defines the output size")
# processing
optParser.add_option("-s", "--show", action="store_true", dest="show",
default=False, help="shows each plot after generating it")
# parse options
(options, args) = optParser.parse_args()
# check set options
if not options.show and not options.output:
print "Neither show (--show) not write (--output <FILE>)? Exiting..."
exit()
# init color map
colorMap = parseColorMap(options.colormap)
# read network
if options.verbose:
print "Reading net..."
parser = make_parser()
net = NetReader()
parser.setContentHandler(net)
parser.parse(options.net)
# read weights
if options.verbose:
print "Reading weights..."
mValues = options.values.split(",")
weights = WeightsReader(net, mValues[0], mValues[1])
parser.setContentHandler(weights)
parser.parse(options.dump)
# process
if options.verbose:
print "Norming weights..."
weights.norm(options.tendency_coloring, options.percentage_speed)
if options.verbose:
print "Plotting..."
net.plot(weights, options, colorMap)
| gpl-3.0 | -7,227,673,384,518,771,000 | 38.935556 | 147 | 0.537032 | false |
pyro-ppl/numpyro | test/test_pickle.py | 1 | 1681 | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import pickle
import pytest
from jax import random, test_util
import jax.numpy as jnp
import numpyro
import numpyro.distributions as dist
from numpyro.infer import (
HMC,
HMCECS,
MCMC,
NUTS,
SA,
BarkerMH,
DiscreteHMCGibbs,
MixedHMC,
)
def normal_model():
numpyro.sample("x", dist.Normal(0, 1))
def bernoulli_model():
numpyro.sample("x", dist.Bernoulli(0.5))
def logistic_regression():
data = jnp.arange(10)
x = numpyro.sample("x", dist.Normal(0, 1))
with numpyro.plate("N", 10, subsample_size=2):
batch = numpyro.subsample(data, 0)
numpyro.sample("obs", dist.Bernoulli(logits=x), obs=batch)
@pytest.mark.parametrize("kernel", [BarkerMH, HMC, NUTS, SA])
def test_pickle_hmc(kernel):
mcmc = MCMC(kernel(normal_model), num_warmup=10, num_samples=10)
mcmc.run(random.PRNGKey(0))
pickled_mcmc = pickle.loads(pickle.dumps(mcmc))
test_util.check_close(mcmc.get_samples(), pickled_mcmc.get_samples())
@pytest.mark.parametrize("kernel", [DiscreteHMCGibbs, MixedHMC])
def test_pickle_discrete_hmc(kernel):
mcmc = MCMC(kernel(HMC(bernoulli_model)), num_warmup=10, num_samples=10)
mcmc.run(random.PRNGKey(0))
pickled_mcmc = pickle.loads(pickle.dumps(mcmc))
test_util.check_close(mcmc.get_samples(), pickled_mcmc.get_samples())
def test_pickle_hmcecs():
mcmc = MCMC(HMCECS(NUTS(logistic_regression)), num_warmup=10, num_samples=10)
mcmc.run(random.PRNGKey(0))
pickled_mcmc = pickle.loads(pickle.dumps(mcmc))
test_util.check_close(mcmc.get_samples(), pickled_mcmc.get_samples())
| apache-2.0 | 3,439,322,928,740,963,300 | 26.557377 | 81 | 0.693635 | false |
fabianp/pytron | setup.py | 1 | 1329 | from Cython.Distutils import build_ext
import numpy as np
from glob import glob
from setuptools import setup, Extension
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.6
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.2
Programming Language :: Python :: 3.3
Topic :: Software Development
Operating System :: POSIX
Operating System :: Unix
"""
sources =['pytron/tron.pyx', 'pytron/src/tron.cpp', 'pytron/src/tron_helper.cpp'] + \
glob('pytron/src/blas/*.c')
setup(
name='pytron',
description='Python bindings for TRON optimizer',
long_description=open('README.rst').read(),
version='0.3',
author='Fabian Pedregosa',
author_email='[email protected]',
url='http://pypi.python.org/pypi/pytron',
packages=['pytron'],
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
license='Simplified BSD',
requires=['numpy', 'scipy'],
cmdclass={'build_ext': build_ext},
ext_modules=[Extension('pytron.tron',
sources=sources,
language='c++', include_dirs=[np.get_include(), 'pytron/src/'])],
)
| bsd-3-clause | 1,267,563,133,996,209,700 | 28.533333 | 85 | 0.684725 | false |
meskio/bitmask_client | src/leap/bitmask/platform_init/locks.py | 1 | 12103 | # -*- coding: utf-8 -*-
# locks.py
# Copyright (C) 2013 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Utilities for handling multi-platform file locking mechanisms
"""
import logging
import errno
import os
import platform
from leap.bitmask import platform_init
from leap.common.events import signal as signal_event
from leap.common.events import events_pb2 as proto
if platform_init.IS_UNIX:
from fcntl import flock, LOCK_EX, LOCK_NB
else: # WINDOWS
import datetime
import glob
import shutil
import time
from tempfile import gettempdir
from leap.bitmask.util import get_modification_ts, update_modification_ts
logger = logging.getLogger(__name__)
if platform_init.IS_UNIX:
class UnixLock(object):
"""
Uses flock to get an exclusive lock over a file.
See man 2 flock
"""
def __init__(self, path):
"""
iniializes t he UnixLock with the path of the
desired lockfile
"""
self._fd = None
self.path = path
def get_lock(self):
"""
Tries to get a lock, and writes the running pid there if successful
"""
gotit, pid = self._get_lock_and_pid()
return gotit
def get_pid(self):
"""
Returns the pid of the locking process
"""
gotit, pid = self._get_lock_and_pid()
return pid
def _get_lock(self):
"""
Tries to get a lock, returning True if successful
:rtype: bool
"""
self._fd = os.open(self.path, os.O_CREAT | os.O_RDWR)
try:
flock(self._fd, LOCK_EX | LOCK_NB)
except IOError as exc:
# could not get the lock
if exc.args[0] in (errno.EDEADLK, errno.EAGAIN):
# errno 11 or 35
# Resource temporarily unavailable
return False
else:
raise
return True
@property
def locked_by_us(self):
"""
Returns True if the pid in the pidfile
is ours.
:rtype: bool
"""
gotit, pid = self._get_lock_and_pid()
return pid == os.getpid()
def _get_lock_and_pid(self):
"""
Tries to get a lock over the file.
Returns (locked, pid) tuple.
:rtype: tuple
"""
if self._get_lock():
self._write_to_pidfile()
return True, None
return False, self._read_from_pidfile()
def _read_from_pidfile(self):
"""
Tries to read pid from the pidfile,
returns False if no content found.
"""
pidfile = os.read(
self._fd, 16)
if not pidfile:
return False
try:
return int(pidfile.strip())
except Exception as exc:
exc.args += (pidfile, self.lock_file)
raise
def _write_to_pidfile(self):
"""
Writes the pid of the running process
to the pidfile
"""
fd = self._fd
os.ftruncate(fd, 0)
os.write(fd, '%d\n' % os.getpid())
os.fsync(fd)
if platform_init.IS_WIN:
# Time to wait (in secs) before assuming a raise window signal has not been
# ack-ed.
RAISE_WINDOW_TIMEOUT = 2
# How many steps to do while checking lockfile ts update.
RAISE_WINDOW_WAIT_STEPS = 10
def _release_lock(name):
"""
Tries to remove a folder path.
:param name: folder lock to remove
:type name: str
"""
try:
shutil.rmtree(name)
return True
except WindowsError as exc:
if exc.errno in (errno.EPIPE, errno.ENOENT,
errno.ESRCH, errno.EACCES):
logger.warning(
'exception while trying to remove the lockfile dir')
logger.warning('errno %s: %s' % (exc.errno, exc.args[1]))
# path does not exist
return False
else:
logger.debug('errno = %s' % (exc.errno,))
# we did not foresee this error, better add it explicitely
raise
class WindowsLock(object):
"""
Creates a lock based on the atomic nature of mkdir on Windows
system calls.
"""
LOCKBASE = os.path.join(gettempdir(), "bitmask-lock")
def __init__(self):
"""
Initializes the lock.
Sets the lock name to basename plus the process pid.
"""
self._fd = None
pid = os.getpid()
self.name = "%s-%s" % (self.LOCKBASE, pid)
self.pid = pid
def get_lock(self):
"""
Tries to get a lock, and writes the running pid there if successful
"""
gotit = self._get_lock()
return gotit
def _get_lock(self):
"""
Tries to write to a file with the current pid as part of the name
"""
try:
self._fd = os.makedirs(self.name)
except OSError as exc:
# could not create the dir
if exc.args[0] == 183:
logger.debug('cannot create dir')
# cannot create dir with existing name
return False
else:
raise
return self._is_one_pidfile()[0]
def _is_one_pidfile(self):
"""
Returns True, pid if there is only one pidfile with the expected
base path
:rtype: tuple
"""
pidfiles = glob.glob(self.LOCKBASE + '-*')
if len(pidfiles) == 1:
pid = pidfiles[0].split('-')[-1]
return True, int(pid)
else:
return False, None
def get_pid(self):
"""
Returns the pid of the locking process.
:rtype: int
"""
# XXX assert there is only one?
_, pid = self._is_one_pidfile()
return pid
def get_locking_path(self):
"""
Returns the pid path of the locking process.
:rtype: str
"""
pid = self.get_pid()
if pid:
return "%s-%s" % (self.LOCKBASE, pid)
def release_lock(self, name=None):
"""
Releases the pidfile dir for this process, by removing it.
"""
if not name:
name = self.name
_release_lock(name)
@classmethod
def release_all_locks(self):
"""
Releases all locks. Used for clean shutdown.
"""
for lockdir in glob.glob("%s-%s" % (self.LOCKBASE, '*')):
_release_lock(lockdir)
@property
def locked_by_us(self):
"""
Returns True if the pid in the pidfile
is ours.
:rtype: bool
"""
_, pid = self._is_one_pidfile()
return pid == self.pid
def update_ts(self):
"""
Updates the timestamp of the lock.
"""
if self.locked_by_us:
update_modification_ts(self.name)
def write_port(self, port):
"""
Writes the port for windows control to the pidfile folder
Returns True if successful.
:rtype: bool
"""
if not self.locked_by_us:
logger.warning("Tried to write control port to a "
"non-unique pidfile folder")
return False
port_file = os.path.join(self.name, "port")
with open(port_file, 'w') as f:
f.write("%s" % port)
return True
def get_control_port(self):
"""
Reads control port of the main instance from the port file
in the pidfile dir
:rtype: int
"""
pid = self.get_pid()
port_file = os.path.join(self.LOCKBASE + "-%s" % pid, "port")
port = None
try:
with open(port_file) as f:
port_str = f.read()
port = int(port_str.strip())
except IOError as exc:
if exc.errno == errno.ENOENT:
logger.error("Tried to read port from non-existent file")
else:
# we did not know explicitely about this error
raise
return port
def raise_window_ack():
"""
This function is called from the windows callback that is registered
with the raise_window event. It just updates the modification time
of the lock file so we can signal an ack to the instance that tried
to raise the window.
"""
lock = WindowsLock()
lock.update_ts()
def we_are_the_one_and_only():
"""
Returns True if we are the only instance running, False otherwise.
If we came later, send a raise signal to the main instance of the
application.
Under windows we are not using flock magic, so we wait during
RAISE_WINDOW_TIMEOUT time, if not ack is
received, we assume it was a stalled lock, so we remove it and continue
with initialization.
:rtype: bool
"""
_sys = platform.system()
if _sys in ("Linux", "Darwin"):
locker = UnixLock('/tmp/bitmask.lock')
locker.get_lock()
we_are_the_one = locker.locked_by_us
if not we_are_the_one:
signal_event(proto.RAISE_WINDOW)
return we_are_the_one
elif _sys == "Windows":
locker = WindowsLock()
locker.get_lock()
we_are_the_one = locker.locked_by_us
if not we_are_the_one:
locker.release_lock()
lock_path = locker.get_locking_path()
ts = get_modification_ts(lock_path)
nowfun = datetime.datetime.now
t0 = nowfun()
pause = RAISE_WINDOW_TIMEOUT / float(RAISE_WINDOW_WAIT_STEPS)
timeout_delta = datetime.timedelta(0, RAISE_WINDOW_TIMEOUT)
check_interval = lambda: nowfun() - t0 < timeout_delta
# let's assume it's a stalled lock
we_are_the_one = True
signal_event(proto.RAISE_WINDOW)
while check_interval():
if get_modification_ts(lock_path) > ts:
# yay! someone claimed their control over the lock.
# so the lock is alive
logger.debug('Raise window ACK-ed')
we_are_the_one = False
break
else:
time.sleep(pause)
if we_are_the_one:
# ok, it really was a stalled lock. let's remove all
# that is left, and put only ours there.
WindowsLock.release_all_locks()
WindowsLock().get_lock()
return we_are_the_one
else:
logger.warning("Multi-instance checker "
"not implemented for %s" % (_sys))
# lies, lies, lies...
return True
| gpl-3.0 | -3,435,159,310,197,118,000 | 29.032258 | 79 | 0.512683 | false |
RPGOne/Skynet | pytorch-master/torch/nn/parallel/data_parallel.py | 1 | 4062 | import torch
from ..modules import Module
from .scatter_gather import scatter_kwargs, gather
from .replicate import replicate
from .parallel_apply import parallel_apply
class DataParallel(Module):
"""Implements data parallelism at the module level.
This container parallelizes the application of the given module by
splitting the input across the specified devices by chunking in the batch
dimension. In the forward pass, the module is replicated on each device,
and each replica handles a portion of the input. During the backwards
pass, gradients from each replica are summed into the original module.
The batch size should be larger than the number of GPUs used. It should
also be an integer multiple of the number of GPUs so that each chunk is the
same size (so that each GPU processes the same number of samples).
See also: :ref:`cuda-nn-dataparallel-instead`
Arbitrary positional and keyword inputs are allowed to be passed into
DataParallel EXCEPT Tensors. All variables will be scattered on dim
specified (default 0). Primitive types will be broadcasted, but all
other types will be a shallow copy and can be corrupted if written to in
the model's forward pass.
Args:
module: module to be parallelized
device_ids: CUDA devices (default: all devices)
output_device: device location of output (default: device_ids[0])
Example::
>>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2])
>>> output = net(input_var)
"""
# TODO: update notes/cuda.rst when this class handles 8+ GPUs well
def __init__(self, module, device_ids=None, output_device=None, dim=0):
super(DataParallel, self).__init__()
if device_ids is None:
device_ids = list(range(torch.cuda.device_count()))
if output_device is None:
output_device = device_ids[0]
self.dim = dim
self.module = module
self.device_ids = device_ids
self.output_device = output_device
if len(self.device_ids) == 1:
self.module.cuda(device_ids[0])
def forward(self, *inputs, **kwargs):
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
return self.module(*inputs[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs)
return self.gather(outputs, self.output_device)
def replicate(self, module, device_ids):
return replicate(module, device_ids)
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def parallel_apply(self, replicas, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs)
def gather(self, outputs, output_device):
return gather(outputs, output_device, dim=self.dim)
def data_parallel(module, inputs, device_ids, output_device=None, dim=0, module_kwargs=None):
"""Evaluates module(input) in parallel across the GPUs given in device_ids.
This is the functional version of the DataParallel module.
Args:
module: the module to evaluate in parallel
inputs: inputs to the module
device_ids: GPU ids on which to replicate module
output_device: GPU location of the output Use -1 to indicate the CPU.
(default: device_ids[0])
Returns:
a Variable containing the result of module(input) located on
output_device
"""
if not isinstance(inputs, tuple):
inputs = (inputs,)
if output_device is None:
output_device = device_ids[0]
inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim)
if len(device_ids) == 1:
return module(*inputs[0], **module_kwargs[0])
replicas = replicate(module, device_ids[:len(inputs)])
outputs = parallel_apply(replicas, inputs, module_kwargs)
return gather(outputs, output_device, dim)
| bsd-3-clause | -7,975,203,935,464,098,000 | 38.436893 | 93 | 0.679468 | false |
dkgroot/remotecontrol_skinny_phone | tests/conftest.py | 1 | 1313 | import pytest
import re
from SccpLogger.SccpLogger import SccpLogger, EOF, TIMEOUT
def handle_tone(ssh, index, child_result_list):
tone = int(ssh.match.group(2))
dir = int(ssh.match.group(3))
return 'Tone', {'State':ssh.match.group(1).decode('utf-8'),'Type':sccp.lookup_tone(tone),'Direction':sccp.lookup_tonedirection(dir)}
@pytest.yield_fixture(scope="session", autouse=True, params=["10.15.15.205"])
def sccplogger(request):
options={'username':'cisco','password':'cisco','shelluser':'default','shellpasswd':'user','logfilename':'output.log'}
#hostname = '10.15.15.205'
hostname = request.param
_logger = SccpLogger(hostname, options)
try:
print("\nconnecting to %s..." %hostname)
_logger.connect()
print("connected")
_logger.login()
print('logged in to shell. setting up debug environment...')
_logger.setup_debug()
print('starting strace...')
_logger.start_strace()
print('ready to process events...\n')
yield _logger
#_logger.disconnect()
except TIMEOUT:
print("Connection timed out")
except EOF:
print("Disconnect from phone")
except KeyboardInterrupt:
print("Interrupted by User")
except Exception as e:
print("Exception occured: %s" %e)
| gpl-3.0 | -8,339,181,251,816,196,000 | 36.514286 | 135 | 0.642803 | false |
alt-core/sheetbot | models.py | 1 | 5113 | # coding: utf-8
import json
import random
import string
from google.appengine.ext import ndb
class GlobalBotVariables(ndb.Model):
scenario_uri = ndb.StringProperty()
class GroupMembers(ndb.Model):
members = ndb.StringProperty(repeated=True)
class PlayerStatus(ndb.Model):
scene = ndb.StringProperty()
scene_history = ndb.StringProperty(repeated=True)
action_token = ndb.StringProperty()
value = ndb.TextProperty()
class PlayerStatusDB(object):
MAX_HISTORY = 5 # ヒストリーは最大5つまで
def __init__(self, user_id):
self.id = user_id
self.entry = PlayerStatus.get_by_id(user_id)
if self.entry:
self.db = json.loads(self.entry.value) or {}
else:
self.entry = PlayerStatus(id=user_id, scene="*start", value="{}")
self.db = {}
self.is_dirty = False
self.is_values_dirty = False
if self.action_token is None:
self.renew_action_token()
def __getitem__(self, item):
value = self.db[item]
return value
def __setitem__(self, item, value):
if isinstance(value, list) or isinstance(value, dict):
is_ref = True
else:
is_ref = False
if item not in self.db or (self.db[item] != value or is_ref):
# 参照型は直接中身を書き換えられてしまうと更新チェックができないので、保守的に倒す
self.db[item] = value
self.is_dirty = True
self.is_values_dirty = True
def __delitem__(self, item):
del self.db[item]
self.is_dirty = True
self.is_values_dirty = True
def __contains__(self, item):
return item in self.db
def keys(self):
return self.db.keys()
def get(self, item, default=None):
if item in self:
return self[item]
else:
return default
def reset(self):
self.db = {}
self.entry.scene = None
self.entry.scene_history = []
self.is_dirty = True
self.is_values_dirty = True
self.renew_action_token()
@property
def scene(self):
return self.entry.scene
@scene.setter
def scene(self, value):
self.entry.scene = value
self.is_dirty = True
@property
def scene_history(self):
return self.entry.scene_history
@scene_history.setter
def scene_history(self, value):
self.entry.scene_history = value
self.is_dirty = True
def push_scene_history(self, scene_title):
if scene_title is not None:
scene_history = self.scene_history
scene_history.append(scene_title)
self.scene_history = scene_history[-PlayerStatusDB.MAX_HISTORY:]
def pop_scene_history(self):
if len(self.scene_history) > 0:
return self.scene_history.pop()
return None
@property
def action_token(self):
return self.entry.action_token
@action_token.setter
def action_token(self, value):
self.entry.action_token = value
self.is_dirty = True
def renew_action_token(self):
self.action_token = \
u''.join([random.choice(string.ascii_letters) for _ in range(8)])
def __str__(self):
return str(self.db)
def save(self):
if self.is_dirty:
if self.is_values_dirty:
self.entry.value = json.dumps(self.db)
self.entry.put()
class GroupDB(object):
def __init__(self, group_id):
self.entry = GroupMembers.get_by_id(id=group_id)
if self.entry is None:
self.entry = GroupMembers(id=group_id, members=[])
def append_member(self, member):
if member not in self.entry.members:
self.entry.members.append(member)
self.entry.put()
def remove_member(self, member):
if member in self.entry.members:
self.entry.members.remove(member)
self.entry.put()
def clear(self):
if self.entry.members:
del self.entry.members[:]
self.entry.put()
class ImageFileStatDB(ndb.Model):
file_digest = ndb.StringProperty()
width = ndb.IntegerProperty()
height = ndb.IntegerProperty()
@classmethod
def get_cached_image_file_stat(cls, kind, image_url):
key = u'{}|{}'.format(kind, image_url)
stat = cls.get_by_id(id=key)
if stat is None:
return None
size = (stat.width, stat.height)
return stat.file_digest, size
@classmethod
def put_cached_image_file_stat(cls, kind, image_url, file_digest, size):
key = u'{}|{}'.format(kind, image_url)
entry = cls.get_by_id(id=key)
if entry is None:
entry = cls(id=key, file_digest=file_digest, width=size[0], height=size[1])
else:
if entry.file_digest == file_digest:
# 更新しない
return
entry.file_digest = file_digest
entry.width, entry.height = size
entry.put()
| mit | 7,030,397,467,821,336,000 | 26.478022 | 87 | 0.581684 | false |
cdcarter/CumulusCI | cumulusci/core/config.py | 1 | 23279 | import base64
import datetime
import logging
import os
import pickle
import re
import hiyapyco
import sarge
from simple_salesforce import Salesforce
import yaml
import requests
from distutils.version import LooseVersion # pylint: disable=import-error,no-name-in-module
from github3 import login
from Crypto import Random
from Crypto.Cipher import AES
from cumulusci.core.exceptions import ConfigError
from cumulusci.core.exceptions import NotInProject
from cumulusci.core.exceptions import KeychainConnectedAppNotFound
from cumulusci.core.exceptions import ProjectConfigNotFound
from cumulusci.core.exceptions import ScratchOrgException
from cumulusci.core.exceptions import SOQLQueryException
from cumulusci.core.exceptions import KeychainNotFound
from cumulusci.oauth.salesforce import SalesforceOAuth2
__location__ = os.path.dirname(os.path.realpath(__file__))
class BaseConfig(object):
""" Base class for all configuration objects """
defaults = {}
search_path = ['config']
def __init__(self, config=None):
if config is None:
self.config = {}
else:
self.config = config
self._init_logger()
self._load_config()
def _init_logger(self):
""" Initializes self.logger """
self.logger = logging.getLogger(__name__)
def _load_config(self):
""" Performs the logic to initialize self.config """
pass
def __getattr__(self, name):
tree = name.split('__')
if name.startswith('_'):
raise AttributeError('Attribute {} not found'.format(name))
value = None
value_found = False
for attr in self.search_path:
config = getattr(self, attr)
if len(tree) > 1:
# Walk through the config dictionary using __ as a delimiter
for key in tree[:-1]:
config = config.get(key)
if config is None:
break
if config is None:
continue
if tree[-1] in config:
value = config[tree[-1]]
value_found = True
break
if value_found:
return value
else:
return self.defaults.get(name)
class TaskConfig(BaseConfig):
""" A task with its configuration merged """
pass
class FlowConfig(BaseConfig):
""" A flow with its configuration merged """
pass
class BaseTaskFlowConfig(BaseConfig):
""" Base class for all configs that contain tasks and flows """
def list_tasks(self):
""" Returns a list of task info dictionaries with keys 'name' and 'description' """
tasks = []
for task in self.tasks.keys():
task_info = self.tasks[task]
if not task_info:
task_info = {}
tasks.append({
'name': task,
'description': task_info.get('description'),
})
return tasks
def get_task(self, name):
""" Returns a TaskConfig """
config = getattr(self, 'tasks__{}'.format(name))
return TaskConfig(config)
def list_flows(self):
""" Returns a list of flow info dictionaries with keys 'name' and 'description' """
flows = []
return flows
def get_flow(self, name):
""" Returns a FlowConfig """
config = getattr(self, 'flows__{}'.format(name))
return FlowConfig(config)
class BaseProjectConfig(BaseTaskFlowConfig):
""" Base class for a project's configuration which extends the global config """
search_path = ['config']
def __init__(self, global_config_obj):
self.global_config_obj = global_config_obj
self.keychain = None
super(BaseProjectConfig, self).__init__()
@property
def config_global_local(self):
return self.global_config_obj.config_global_local
@property
def config_global(self):
return self.global_config_obj.config_global
@property
def repo_root(self):
root = None
pwd = os.getcwd().split(os.sep)
while pwd:
if os.path.isdir(os.path.join(os.sep, os.path.join(*pwd),'.git')):
break
else:
pwd.pop()
if pwd:
return os.path.join(os.sep, os.path.join(*pwd))
@property
def repo_name(self):
if not self.repo_root:
return
in_remote_origin = False
with open(os.path.join(self.repo_root, '.git', 'config'), 'r') as f:
for line in f:
line = line.strip()
if line == '[remote "origin"]':
in_remote_origin = True
continue
if in_remote_origin and line.find('url =') != -1:
line_parts = line.split('/')
repo_name = line_parts[-1]
if repo_name.endswith('.git'):
repo_name = repo_name[:-4]
return repo_name
@property
def repo_url(self):
if not self.repo_root:
return
git_config_file = os.path.join(self.repo_root, '.git', 'config')
with open(git_config_file, 'r') as f:
in_remote_origin = False
for line in f:
line = line.strip()
if line == '[remote "origin"]':
in_remote_origin = True
continue
if in_remote_origin and 'url = ' in line:
return line[7:]
@property
def repo_owner(self):
if not self.repo_root:
return
in_remote_origin = False
with open(os.path.join(self.repo_root, '.git', 'config'), 'r') as f:
for line in f:
line = line.strip()
if line == '[remote "origin"]':
in_remote_origin = True
continue
if in_remote_origin and line.find('url =') != -1:
line_parts = line.split('/')
return line_parts[-2].split(':')[-1]
@property
def repo_branch(self):
if not self.repo_root:
return
with open(os.path.join(self.repo_root, '.git', 'HEAD'), 'r') as f:
branch_ref = f.read().strip()
if branch_ref.startswith('ref: '):
return '/'.join(branch_ref[5:].split('/')[2:])
@property
def repo_commit(self):
if not self.repo_root:
return
branch = self.repo_branch
if not branch:
return
join_args = [self.repo_root, '.git', 'refs', 'heads']
join_args.extend(branch.split('/'))
commit_file = os.path.join(*join_args)
commit_sha = None
if os.path.isfile(commit_file):
with open(commit_file, 'r') as f:
commit_sha = f.read().strip()
else:
packed_refs_path = os.path.join(
self.repo_root,
'.git',
'packed-refs'
)
with open(packed_refs_path, 'r') as f:
for line in f:
parts = line.split(' ')
if len(parts) == 1:
# Skip lines showing the commit sha of a tag on the preceeding line
continue
if parts[1].replace('refs/remotes/origin/', '').strip() == branch:
commit_sha = parts[0]
break
return commit_sha
def get_latest_version(self, beta=None):
""" Query Github Releases to find the latest production or beta release """
github_config = self.keychain.get_service('github')
gh = login(github_config.username, github_config.password)
repo = gh.repository(self.repo_owner, self.repo_name)
latest_version = None
for release in repo.iter_releases():
if beta:
if 'Beta' not in release.tag_name:
continue
else:
if 'Beta' in release.tag_name:
continue
version = self.get_version_for_tag(release.tag_name)
if version is None:
continue
version = LooseVersion(version)
if not latest_version or version > latest_version:
latest_version = version
return latest_version
@property
def config_project_path(self):
if not self.repo_root:
return
path = os.path.join(self.repo_root, self.config_filename)
if os.path.isfile(path):
return path
@property
def project_local_dir(self):
""" location of the user local directory for the project
e.g., ~/.cumulusci/NPSP-Extension-Test/ """
# depending on where we are in bootstrapping the YamlGlobalConfig
# the canonical projectname could be located in one of two places
if self.project__name:
name = self.project__name
else:
try:
name = self.config_project['project']['name']
except KeyError:
name = ''
path = os.path.join(
os.path.expanduser('~'),
self.global_config_obj.config_local_dir,
name,
)
if not os.path.isdir(path):
os.makedirs(path)
return path
def get_tag_for_version(self, version):
if '(Beta' in version:
tag_version = version.replace(' (','-').replace(')','').replace(' ','_')
tag_name = self.project__git__prefix_beta + tag_version
else:
tag_name = self.project__git__prefix_release + version
return tag_name
def get_version_for_tag(self, tag):
if not tag.startswith(self.project__git__prefix_beta) and not tag.startswith(self.project__git__prefix_release):
return None
if 'Beta' in tag:
version = tag[len(self.project__git__prefix_beta):]
version = version.replace('-',' (').replace('_',' ') + ')'
else:
version = tag[len(self.project__git__prefix_release):]
return version
def set_keychain(self, keychain):
self.keychain = keychain
def _check_keychain(self):
if not self.keychain:
raise KeychainNotFound('Could not find config.keychain. You must call config.set_keychain(keychain) before accessing orgs')
def list_orgs(self):
""" Returns a list of all org names for the project """
self._check_keychain()
return self.keychain.list_orgs()
def get_org(self, name):
""" Returns an OrgConfig for the given org_name """
self._check_keychain()
return self.keychain.get_org(name)
def set_org(self, name, org_config):
""" Creates or updates an org's oauth info """
self._check_keychain()
return self.keychain.set_org(name, org_config)
class BaseGlobalConfig(BaseTaskFlowConfig):
""" Base class for the global config which contains all configuration not specific to projects """
project_config_class = BaseProjectConfig
config_local_dir = '.cumulusci'
def list_projects(self):
""" Returns a list of project names """
raise NotImplementedError('Subclasses must provide an implementation')
def get_project_config(self):
""" Returns a ProjectConfig for the given project """
return self.project_config_class(self)
def create_project(self, project_name, config):
""" Creates a new project configuration and returns it """
raise NotImplementedError('Subclasses must provide an implementation')
class ConnectedAppOAuthConfig(BaseConfig):
""" Salesforce Connected App OAuth configuration """
pass
class OrgConfig(BaseConfig):
""" Salesforce org configuration (i.e. org credentials) """
def refresh_oauth_token(self, connected_app):
client_id = self.client_id
client_secret = self.client_secret
if not client_id:
client_id = connected_app.client_id
client_secret = connected_app.client_secret
sf_oauth = SalesforceOAuth2(
client_id,
client_secret,
connected_app.callback_url, # Callback url isn't really used for this call
auth_site=self.instance_url,
)
resp = sf_oauth.refresh_token(self.refresh_token).json()
if resp != self.config:
self.config.update(resp)
self._load_userinfo()
@property
def start_url(self):
start_url = '%s/secur/frontdoor.jsp?sid=%s' % (self.instance_url, self.access_token)
return start_url
@property
def user_id(self):
return self.id.split('/')[-1]
@property
def org_id(self):
return self.id.split('/')[-2]
@property
def username(self):
""" Username for the org connection. """
return self.userinfo__preferred_username
def load_userinfo(self):
self._load_userinfo()
def _load_userinfo(self):
headers = {"Authorization":"Bearer " + self.access_token}
response = requests.get(self.instance_url+"/services/oauth2/userinfo", headers=headers)
if response != self.config.get('userinfo', {}):
self.config.update({'userinfo':response.json()})
class ScratchOrgConfig(OrgConfig):
""" Salesforce DX Scratch org configuration """
@property
def scratch_info(self):
if hasattr(self, '_scratch_info'):
return self._scratch_info
# Create the org if it hasn't already been created
if not self.created:
self.create_org()
self.logger.info('Getting scratch org info from Salesforce DX')
# Call force:org:open and parse output to get instance_url and access_token
command = 'heroku force:org:open -d -u {}'.format(self.username)
p = sarge.Command(command, stdout=sarge.Capture(buffer_size=-1))
p.run()
org_info = None
stdout_list = []
for line in p.stdout:
if line.startswith('Access org'):
org_info = line.strip()
stdout_list.append(line.strip())
if p.returncode:
message = 'Return code: {}\nstdout: {}\nstderr: {}'.format(
p.returncode,
'\n'.join(stdout_list),
p.stderr,
)
self.logger.error(message)
raise ScratchOrgException(message)
if not org_info:
message = 'Did not find org info in command output:\n{}'.format(p.stdout)
self.logger.error(message)
raise ScratchOrgException(message)
# OrgID is the third word of the output
org_id = org_info.split(' ')[2]
# Username is the sixth word of the output
username = org_info.split(' ')[5]
info_parts = org_info.split('following URL: ')
if len(info_parts) == 1:
message = 'Did not find org info in command output:\n{}'.format(p.stdout)
self.logger.error(message)
raise ScratchOrgException(message)
instance_url, access_token = info_parts[1].split('/secur/frontdoor.jsp?sid=')
self._scratch_info = {
'instance_url': instance_url,
'access_token': access_token,
'org_id': org_id,
'username': username,
}
self._scratch_info_date = datetime.datetime.now()
return self._scratch_info
@property
def access_token(self):
return self.scratch_info['access_token']
@property
def instance_url(self):
return self.scratch_info['instance_url']
@property
def org_id(self):
org_id = self.config.get('org_id')
if not org_id:
org_id = self.scratch_info['org_id']
return org_id
@property
def user_id(self):
if not self.config.get('user_id'):
sf = Salesforce(
instance=self.instance_url.replace('https://', ''),
session_id=self.access_token,
version='38.0',
)
result = sf.query_all(
"SELECT Id FROM User WHERE UserName='{}'".format(
self.username
)
)
self.config['user_id'] = result['records'][0]['Id']
return self.config['user_id']
@property
def username(self):
username = self.config.get('username')
if not username:
username = self.scratch_info['username']
return username
def create_org(self):
""" Uses heroku force:org:create to create the org """
if not self.config_file:
# FIXME: raise exception
return
if not self.scratch_org_type:
self.config['scratch_org_type'] = 'workspace'
command = 'heroku force:org:create -t {} -f {}'.format(self.scratch_org_type, self.config_file)
self.logger.info('Creating scratch org with command {}'.format(command))
p = sarge.Command(command, stdout=sarge.Capture(buffer_size=-1))
p.run()
org_info = None
re_obj = re.compile('Successfully created workspace org: (.+), username: (.+)')
stdout = []
for line in p.stdout:
match = re_obj.search(line)
if match:
self.config['org_id'] = match.group(1)
self.config['username'] = match.group(2)
stdout.append(line)
self.logger.info(line)
if p.returncode:
message = 'Failed to create scratch org: \n{}'.format(''.join(stdout))
raise ScratchOrgException(message)
# Flag that this org has been created
self.config['created'] = True
def delete_org(self):
""" Uses heroku force:org:delete to create the org """
if not self.created:
self.logger.info('Skipping org deletion: the scratch org has not been created')
return
command = 'heroku force:org:delete --force -u {}'.format(self.username)
self.logger.info('Deleting scratch org with command {}'.format(command))
p = sarge.Command(command, stdout=sarge.Capture(buffer_size=-1))
p.run()
org_info = None
stdout = []
for line in p.stdout:
stdout.append(line)
if line.startswith('An error occurred deleting this org'):
self.logger.error(line)
else:
self.logger.info(line)
if p.returncode:
message = 'Failed to delete scratch org: \n{}'.format(''.join(stdout))
raise ScratchOrgException(message)
# Flag that this org has been created
self.config['created'] = False
self.config['username'] = None
def refresh_oauth_token(self, connected_app):
""" Use heroku force:org:open to refresh token instead of built in OAuth handling """
if hasattr(self, '_scratch_info'):
# Cache the scratch_info for 1 hour to avoid unnecessary calls out to heroku CLI
delta = datetime.datetime.now() - self._scratch_info_date
if delta.total_seconds() > 3600:
del self._scratch_info
# This triggers a refresh
self.scratch_info
class ServiceConfig(BaseConfig):
pass
class YamlProjectConfig(BaseProjectConfig):
config_filename = 'cumulusci.yml'
@property
def config_project_local_path(self):
path = os.path.join(
self.project_local_dir,
self.config_filename,
)
if os.path.isfile(path):
return path
def _load_config(self):
""" Loads the configuration for the project """
# Initialize the dictionaries for the individual configs
self.config_project = {}
self.config_project_local = {}
# Verify that we're in a project
repo_root = self.repo_root
if not repo_root:
raise NotInProject('No repository found in current path. You must be inside a repository to initialize the project configuration')
# Verify that the project's root has a config file
if not self.config_project_path:
raise ProjectConfigNotFound(
'The file {} was not found in the repo root: {}'.format(
self.config_filename,
repo_root
)
)
# Start the merged yaml config from the global and global local configs
merge_yaml = [self.global_config_obj.config_global_path]
if self.global_config_obj.config_global_local_path:
merge_yaml.append(self.global_config_obj.config_global_local_path)
# Load the project's yaml config file
with open(self.config_project_path, 'r') as f_config:
project_config = yaml.load(f_config)
if project_config:
self.config_project.update(project_config)
merge_yaml.append(self.config_project_path)
# Load the local project yaml config file if it exists
if self.config_project_local_path:
with open(self.config_project_local_path, 'r') as f_local_config:
local_config = yaml.load(f_local_config)
if local_config:
self.config_project_local.update(local_config)
merge_yaml.append(self.config_project_local_path)
self.config = hiyapyco.load(*merge_yaml, method=hiyapyco.METHOD_MERGE)
class YamlGlobalConfig(BaseGlobalConfig):
config_filename = 'cumulusci.yml'
config_local_dir = '.cumulusci'
project_config_class = YamlProjectConfig
def __init__(self):
self.config_global_local = {}
self.config_global = {}
super(YamlGlobalConfig, self).__init__()
@property
def config_global_local_path(self):
directory = os.path.join(
os.path.expanduser('~'),
self.config_local_dir,
)
if not os.path.exists(directory):
os.makedirs(directory)
config_path = os.path.join(
directory,
self.config_filename,
)
if not os.path.isfile(config_path):
return None
return config_path
def _load_config(self):
""" Loads the local configuration """
# load the global config
self._load_global_config()
merge_yaml = [self.config_global_path]
# Load the local config
if self.config_global_local_path:
config = yaml.load(open(self.config_global_local_path, 'r'))
self.config_global_local = config
if config:
merge_yaml.append(self.config_global_local_path)
self.config = hiyapyco.load(*merge_yaml, method=hiyapyco.METHOD_MERGE)
@property
def config_global_path(self):
return os.path.join( __location__, '..', self.config_filename)
def _load_global_config(self):
""" Loads the configuration for the project """
# Load the global cumulusci.yml file
with open(self.config_global_path, 'r') as f_config:
config = yaml.load(f_config)
self.config_global = config
| bsd-3-clause | 5,008,407,632,024,164,000 | 32.494964 | 143 | 0.57133 | false |
lijiabogithub/QUANTAXIS | test/test_strategy.py | 1 | 10524 | #coding:utf-8
#首先引入QUANTAXIS
import QUANTAXIS as QA
#引入random模块是为了如果用到多账户的时候的初始化
import random
#import (你的策略)
#继承QUANTAXIS的backtest类,backtest会自带account类,market(报价对象,撮合类),setting类(全局设置,局域网数据库,回测账户等等)
class backtest(QA.QA_Backtest):
#对回测过程进行初始化
def init(self):
#对账户进行初始化
self.account=QA.QA_Account()
#设置初始账户资产
self.account.assets=100000
#设置回测的开始结束时间
self.strategy_start_date='2015-01-01'
self.strategy_end_date='2017-04-01'
#设置回测标的,是一个list对象,不过建议只用一个标的
self.strategy_stock_list=['600592.SZ']
#gap是回测时,每日获取数据的前推日期(交易日)
self.strategy_gap=90
#初始化一个cookie
self.account.account_cookie=str(random.random())
#设置全局的数据库地址,回测用户名,密码
self.setting.QA_util_sql_mongo_ip='127.0.0.1'
self.setting.QA_setting_user_name='yutiansut'
self.setting.QA_setting_user_password='yutiansut'
#回测的名字
self.strategy_name='CLBS-01-90days-ANN'
#进行全局初始化和账户初始化
self.setting.QA_setting_init()
self.account.init()
#print(self.account.history_trade)
#input()
#在log中记录数据库信息
QA.QA_util_log_info(self.setting.client)
#根据回测设置计算真实交易的开始和结束时间
self.start_mes=QA.QA_util_realtime(self.strategy_start_date,self.setting.client)
self.end_mes=QA.QA_util_realtime(self.strategy_end_date,self.setting.client)
#从市场中获取数据(基于gap),你也可以不急于gap去自定义自己的获取数据的代码
#调用的数据接口是
#data=QA.QA_fetch_data(回测标的代码,开始时间,结束时间,数据库client)
def BT_get_data_from_market(self,id):
self.coll=self.setting.client.quantaxis.trade_date
start=self.coll.find_one({'num':int(id)-int(self.strategy_gap)})
end=self.coll.find_one({'num':int(id)})
start_date=str(start['date'])[0:10]
end_date=str(end['date'])[0:10]
self.coll2=self.setting.client.quantaxis.stock_day
data=QA.QA_fetch_data(self.strategy_stock_list[0],start_date,end_date,self.coll2)
return data
#从账户中更新数据
def BT_get_data_from_ARP(self):
return self.account.QA_Account_get_message()
def BT_data_handle(self,id):
market_data=self.BT_get_data_from_market(id)
message=self.BT_get_data_from_ARP()
#print(message['body']['account']['cur_profit_present'])
return {'market':market_data,'account':message}
#把从账户,市场的数据组合起来,你也可以自定义自己的指标,数据源,以dict的形式插入进来
#策略开始
def handle_data(self):
QA.QA_util_log_info(self.account.message['body'])
#策略的交易日循环
for i in range(int(self.start_mes['id']),int(self.end_mes['id']),1):
QA.QA_util_log_info('===day start===')
running_date=QA.QA_util_id2date(i,self.setting.client)
QA.QA_util_log_info(running_date)
is_trade=QA.QA_util_is_trade(running_date,self.strategy_stock_list[0],self.setting.client)
if is_trade==False:
QA.QA_util_log_info('停牌中')
else:
data=self.BT_data_handle(i)
result=predict(data['market'],data['account']['body']['account']['profit']*100,data['account']['body']['account']['hold'],data['account']['body']['account']['cur_profit_present']*100)
# print(result)
print(data['account']['body']['account']['hold'])
if result==1 and int(data['account']['body']['account']['hold'])==0:
#print(data['account']['body']['account']['assest_free'])
#print(data['market'][-1][4])
#self.bid.bid['amount']=int(float(data['account']['body']['account']['assest_free'])/float(data['market'][-1][4]))
self.bid.bid['amount']=float(data['account']['body']['account']['assest_free'])/float(data['market'][-1][4])
#self.bid.bid['amount']=1000
#print(self.bid.bid['amount'])
self.bid.bid['price']=float(data['market'][-1][4])
self.bid.bid['code']=str(self.strategy_stock_list[0])[0:6]
self.bid.bid['time']=data['market'][-1][6]
self.bid.bid['towards']=1
self.bid.bid['user']=self.setting.QA_setting_user_name
self.bid.bid['strategy']=self.strategy_name
message=self.market.market_make_deal(self.bid.bid,self.setting.client)
QA.QA_util_log_info(message)
if str(message['header']['status'])[0]=='2':
message=self.account.QA_account_receive_deal(message,self.setting.client)
self.backtest_message=message
QA.QA_SU_save_account_message(message,self.setting.client)
#print('buy----------------------------------------------')
#QA.QA_util_log_info(message)
#input()
elif result==1 and int(data['account']['body']['account']['hold'])==1:
QA.QA_util_log_info('Hold and Watch!!!!!!!!!!!!')
##
self.bid.bid['amount']=int(data['account']['body']['account']['portfolio']['amount'])
self.bid.bid['price']=0
self.bid.bid['code']=str(self.strategy_stock_list[0])[0:6]
self.bid.bid['time']=data['market'][-1][6]
self.bid.bid['towards']=1
self.bid.bid['user']=self.setting.QA_setting_user_name
self.bid.bid['strategy']=self.strategy_name
message=self.market.market_make_deal(self.bid.bid,self.setting.client)
message=self.account.QA_account_receive_deal(message,self.setting.client)
# todo hold profit change
elif result==0 and int(data['account']['body']['account']['hold'])==0:
QA.QA_util_log_info('ZERO and Watch!!!!!!!!!!!!')
elif result==0 and int(data['account']['body']['account']['hold'])==1:
self.bid.bid['amount']=int(data['account']['body']['account']['portfolio']['amount'])
self.bid.bid['price']=float(data['market'][-1][4])
self.bid.bid['code']=str(self.strategy_stock_list[0])[0:6]
self.bid.bid['time']=data['market'][-1][6]
self.bid.bid['towards']=-1
self.bid.bid['user']=self.setting.QA_setting_user_name
self.bid.bid['strategy']=self.strategy_name
message=self.market.market_make_deal(self.bid.bid,self.setting.client)
QA.QA_util_log_info(message)
if str(message['header']['status'])[0]=='2':
print('=================sell start')
print(message)
print('sell end==============')
message=self.account.QA_account_receive_deal(message,self.setting.client)
self.backtest_message=message
QA.QA_SU_save_account_message(message,self.setting.client)
#print('sell----------------------------------------------')
#QA.QA_util_log_info(message)
#input()
#print(message)
#QA.QA_SU_save_account_message(message,self.setting.client)
#self.backtest_message=message
# input()
else:print('not enough data')
# 性能分析
try:
exist_time=int(self.end_mes['id'])-int(self.start_mes['id'])+1
#print(self.backtest_message)
#把这个协议发送给分析引擎,进行分析
performace=QA.QABacktest.QAAnalysis.QA_backtest_analysis_start(self.backtest_message,exist_time)
backtest_mes={
'user':self.setting.QA_setting_user_name,
'strategy':self.strategy_name,
'stock_list': self.strategy_stock_list,
'start_time':self.strategy_start_date,
'end_time':self.strategy_end_date,
'account_cookie':self.account.account_cookie,
'total_returns':self.backtest_message['body']['account']['profit'],
'annualized_returns':performace['annualized_returns'],
'benchmark_annualized_returns':performace['benchmark_annualized_returns'],
'benchmark_assest':performace['benchmark_assest'],
'trade_date':performace['trade_date'],
'win_rate':performace['win_rate'],
'alpha':performace['alpha'],
'beta':performace['beta'],
'sharpe':performace['sharpe'],
'vol':performace['vol'],
'benchmark_vol':performace['benchmark_vol'],
'max_drop':performace['max_drop'],
'exist':exist_time
}
# 策略的汇总存储(会存在backtest_info下)
QA.QA_SU_save_backtest_message(backtest_mes,self.setting.client)
except:
QA.QA_util_log_expection('wrong with performance analysis')
#stock_list=['600592','600538','603588','000001','000002','601801','600613','002138','600010']
#这里就是我们的假策略,你也可以从外部引入
def predict(market,account_profit,if_hold,profit_per_trade):
#简单的策略 示例就是如果持有就买入,空仓就卖出
if if_hold==0:
return 1
else:
return 0
#这里是外部定义的回测标的列表
stock_list=['600592']
# 可以是多个股票,也可以从数据库拿
for item in stock_list:
#调用刚才写好的回测框架
BT=backtest()
#进行初始化
BT.init()
#修改回测标的
BT.strategy_stock_list=[item]
#运行策略
BT.handle_data()
| mit | -3,007,287,637,675,071,000 | 45.97561 | 199 | 0.546729 | false |
gem/oq-svir-qgis | svir/dialogs/load_hcurves_as_layer_dialog.py | 1 | 6001 | # -*- coding: utf-8 -*-
# /***************************************************************************
# Irmt
# A QGIS plugin
# OpenQuake Integrated Risk Modelling Toolkit
# -------------------
# begin : 2013-10-24
# copyright : (C) 2014 by GEM Foundation
# email : [email protected]
# ***************************************************************************/
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
from qgis.core import (
QgsFeature, QgsGeometry, QgsPointXY, edit, QgsTask, QgsApplication)
from svir.dialogs.load_output_as_layer_dialog import LoadOutputAsLayerDialog
from svir.utilities.utils import log_msg, WaitCursorManager
from svir.tasks.extract_npz_task import ExtractNpzTask
class LoadHazardCurvesAsLayerDialog(LoadOutputAsLayerDialog):
"""
Dialog to load hazard curves from an oq-engine output, as layer
"""
def __init__(self, drive_engine_dlg, iface, viewer_dock, session, hostname,
calc_id, output_type='hcurves', path=None, mode=None,
engine_version=None, calculation_mode=None):
assert output_type == 'hcurves'
LoadOutputAsLayerDialog.__init__(
self, drive_engine_dlg, iface, viewer_dock, session, hostname,
calc_id, output_type=output_type, path=path, mode=mode,
engine_version=engine_version, calculation_mode=calculation_mode)
self.setWindowTitle(
'Load hazard curves as layer')
self.create_num_sites_indicator()
self.create_rlz_or_stat_selector(all_ckb=True)
self.create_imt_selector(all_ckb=True)
self.load_all_rlzs_or_stats_chk.setChecked(True)
self.load_all_imts_chk.setChecked(True)
log_msg('Extracting hazard curves.'
' Watch progress in QGIS task bar',
level='I', message_bar=self.iface.messageBar())
self.extract_npz_task = ExtractNpzTask(
'Extract hazard curves', QgsTask.CanCancel, self.session,
self.hostname, self.calc_id, self.output_type, self.finalize_init,
self.on_extract_error)
QgsApplication.taskManager().addTask(self.extract_npz_task)
def set_ok_button(self):
self.ok_button.setEnabled(True)
def populate_dataset(self):
self.dataset = self.npz_file['all']
def populate_rlz_or_stat_cbx(self):
self.rlzs_or_stats = self.npz_file['all'].dtype.names[2:]
for rlz_or_stat in self.rlzs_or_stats:
self.rlz_or_stat_cbx.addItem(rlz_or_stat)
def on_rlz_or_stat_changed(self):
rlz_or_stat = self.rlz_or_stat_cbx.currentText()
dataset = self.npz_file['all'][rlz_or_stat]
self.imts = [imt for imt in dataset.dtype.names]
self.imt_cbx.clear()
for imt in self.imts:
self.imt_cbx.addItem(imt)
def on_imt_changed(self):
self.set_ok_button()
def show_num_sites(self):
self.num_sites_lbl.setText(
self.num_sites_msg % self.dataset.shape)
def populate_out_dep_widgets(self):
self.populate_rlz_or_stat_cbx()
self.populate_dataset()
self.show_num_sites()
def build_layer_name(self, **kwargs):
investigation_time = self.get_investigation_time()
layer_name = "hcurves_%sy" % investigation_time
return layer_name
def get_field_types(self, **kwargs):
field_types = {}
for rlz_or_stat in self.rlzs_or_stats:
if (not self.load_all_rlzs_or_stats_chk.isChecked()
and rlz_or_stat != self.rlz_or_stat_cbx.currentText()):
continue
for imt in self.dataset[rlz_or_stat].dtype.names:
if (not self.load_all_imts_chk.isChecked()
and imt != self.imt_cbx.currentText()):
continue
for iml in self.dataset[rlz_or_stat][imt].dtype.names:
field_name = "%s_%s_%s" % (rlz_or_stat, imt, iml)
# NOTE: assuming that all fields are numeric
field_types[field_name] = 'F'
return field_types
def on_iml_changed(self):
self.set_ok_button()
def read_npz_into_layer(self, field_names, **kwargs):
with edit(self.layer):
lons = self.npz_file['all']['lon']
lats = self.npz_file['all']['lat']
feats = []
for row_idx, row in enumerate(self.dataset):
feat = QgsFeature(self.layer.fields())
for field_name_idx, field_name in enumerate(field_names):
rlz_or_stat, imt, iml = field_name.split('_')
poe = row[rlz_or_stat][imt][iml].item()
feat.setAttribute(field_name, poe)
feat.setGeometry(QgsGeometry.fromPointXY(
QgsPointXY(lons[row_idx], lats[row_idx])))
feats.append(feat)
added_ok = self.layer.addFeatures(feats)
if not added_ok:
msg = 'There was a problem adding features to the layer.'
log_msg(msg, level='C', message_bar=self.iface.messageBar())
def load_from_npz(self):
with WaitCursorManager('Creating layer...', self.iface.messageBar()):
self.build_layer()
self.style_curves()
| agpl-3.0 | 54,562,013,012,367,380 | 41.864286 | 79 | 0.588402 | false |
cliali/py2 | test.py | 1 | 11812 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import telebot
from telebot import types
from telebot import util
import re
import time
from time import sleep
import sys
import json
import os
import logging
import subprocess
import requests
import requests as req
import random
from random import randint
import base64
import urllib
from urllib import urlretrieve as dw
import urllib2
import redis
reload(sys)
sys.setdefaultencoding("utf-8")
#########################################################################################################################################################################
TOKEN = '277679081:AAGk3IXlId9PKUn3n_5wrfrUIR_mgsUVCeE'
bot = telebot.TeleBot(TOKEN)
is_sudo = '242361127'
redis = redis.StrictRedis(host='localhost', port=6379, db=0)
f = "\n \033[01;30m Bot Firstname: {} \033[0m".format(bot.get_me().first_name)
u = "\n \033[01;34m Bot Username: {} \033[0m".format(bot.get_me().username)
i = "\n \033[01;32m Bot ID: {} \033[0m".format(bot.get_me().id)
c = "\n \033[01;31m Bot Is Online Now! \033[0m"
print(f + u + i + c)
bn = "\n Bot Firstname: {} ".format(bot.get_me().first_name)
bu = "\n Bot Username: {} ".format(bot.get_me().username)
bi = "\n Bot ID: {} ".format(bot.get_me().id)
bc = "\n Bot Is Online Now!"
bot.send_message(is_sudo, 'ًں‘‹\n{} {} {} {}'.format(bn,bu,bi,bc))
#########################################################################################################################################################################
markupstart = types.InlineKeyboardMarkup()
markupstart.add(types.InlineKeyboardButton('ًں‡®ًں‡·ظپط§ط±ط³غŒًں‡®ًں‡·', callback_data='farsi'))
markupstart.add(types.InlineKeyboardButton('ًں‡؛ًں‡¸Englishًں‡؛ًں‡¸', callback_data='english'))
markupstartfa = types.InlineKeyboardMarkup()
#markupstartfa.add(types.InlineKeyboardButton()
#markupstartfa.add(types.InlineKeyboardButton()
#markupstartfa.add(types.InlineKeyboardButton()
markupstartfa.add(types.InlineKeyboardButton('ط²ظ…ط§ظ†', callback_data='timefa'))
markupstartfa.add(types.InlineKeyboardButton('ط±ظپطھظ† ط¨ظ‡ طط§ظ„طھ ط§غŒظ†ظ„ط§غŒظ†', switch_inline_query=''))
markupstarten = types.InlineKeyboardMarkup()
#markupstarten.add(types.InlineKeyboardButton()
#markupstarten.add(types.InlineKeyboardButton()
#markupstarten.add(types.InlineKeyboardButton()
markupstarten.add(types.InlineKeyboardButton('date', callback_data='timeen'))
markupstarten.add(types.InlineKeyboardButton('Inline mode', switch_inline_query=''))
markupback = types.InlineKeyboardMarkup()
markupback.add(types.InlineKeyboardButton('ًں”™ط¨ط±ع¯ط´طھ', callback_data='backfa'))
markupbacken = types.InlineKeyboardMarkup()
markupbacken.add(types.InlineKeyboardButton('ًں”™Back', callback_data='backen'))
markupreload = types.InlineKeyboardMarkup()
markupreload.add(types.InlineKeyboardButton('ًں”ƒreload', callback_data='reload'))
markupredatefa = types.InlineKeyboardMarkup()
markupredatefa.add(types.InlineKeyboardButton('ط¨ط±ظˆط² ع©ط±ط¯ظ†', callback_data='refa'))
markupredateen = types.InlineKeyboardMarkup()
markupredateen.add(types.InlineKeyboardButton('refersh', callback_data='reen'))
@bot.message_handler(commands=['start'])
def start(message):
id = message.chat.id
redis.sadd('startmebot',id)
if redis.hget("lang:{}".format(message.chat.id),"farsi"):
bot.send_message(message.chat.id, '*Hi*_\nWelcome To TestBot_*\nPlease Select Your Language*\n`\nط³ظ„ط§ظ…\nط¨ظ‡ ط±ظˆط¨ط§طھ طھط³طھ ط®ظˆط´ ط¢ظ…ط¯غŒط¯\nظ„ط·ظپط§ ط²ط¨ط§ظ† ط®ظˆط¯ ط±ط§ ط§ظ†طھط®ط§ط¨ ع©ظ†غŒط¯`', parse_mode='markdown', reply_markup=markupstart)
elif redis.hget("lang:{}".format(message.chat.id),"english"):
bot.send_message(message.chat.id, '*Hi*_\nWelcome To TestBot_*\nPlease Select Your Language*\n`\nط³ظ„ط§ظ…\nط¨ظ‡ ط±ظˆط¨ط§طھ طھط³طھ ط®ظˆط´ ط¢ظ…ط¯غŒط¯\nظ„ط·ظپط§ ط²ط¨ط§ظ† ط®ظˆط¯ ط±ط§ ط§ظ†طھط®ط§ط¨ ع©ظ†غŒط¯`', parse_mode='markdown', reply_markup=markupstart)
else:
bot.send_message(message.chat.id, '*Hi*_\nWelcome To TestBot_*\nPlease Select Your Language*\n`\nط³ظ„ط§ظ…\nط¨ظ‡ ط±ظˆط¨ط§طھ طھط³طھ ط®ظˆط´ ط¢ظ…ط¯غŒط¯\nظ„ط·ظپط§ ط²ط¨ط§ظ† ط®ظˆط¯ ط±ط§ ط§ظ†طھط®ط§ط¨ ع©ظ†غŒط¯`', parse_mode='markdown', reply_markup=markupstart)
@bot.message_handler(commands=['reload'])
def reload(m):
cid = m.chat.id
bot.send_message(cid, 'reload command:', reply_markup=markupreload)
@bot.callback_query_handler(func=lambda call: True)
def callback_inline(call):
if call.message:
if call.data == "farsi":
redis.hset("lang:{}".format(call.message.chat.id),"farsi",True)
redis.hdel("lang:{}".format(call.message.chat.id),"english")
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="ط²ط¨ط§ظ† ط´ظ…ط§ ط¨ط§ ظ…ظˆظپظ‚غŒطھ ط¨ظ‡ ظپط§ط±ط³غŒ ط§ظ†طھط®ط§ط¨ ط´ط¯\n\nظ„ط·ظپط§ غŒع©ط¯ط§ظ… ط§ط² ط¯ع©ظ…ظ‡ ظ‡ط§غŒ ط²غŒط± ط±ط§ ط§ظ†طھط®ط§ط¨ ع©ظ†غŒط¯ًں‘‡", reply_markup=markupstartfa)
bot.answer_callback_query(callback_query_id=call.id,text="ط®ظˆط´ ط¢ظ…ط¯غŒط¯ًںکٹ")
if call.message:
if call.data == "english":
redis.hset("lang:{}".format(call.message.chat.id),"english",True)
redis.hdel("lang:{}".format(call.message.chat.id),"farsi")
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="Your language selected to englishًں‡؛ًں‡¸\nPlease select one of the buttonًں‘‡", reply_markup=markupstarten)
bot.answer_callback_query(callback_query_id=call.id,text="Wellcomeًںکٹ")
if call.message:
if call.data == "backfa":
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="ط¨ظ‡ ط¹ظ‚ط¨ ط¨ط±ع¯ط´طھغŒط¯ًں”™\n\nظ„ط·ظپط§ غŒع©ط¯ط§ظ… ط§ط² ط¯ع©ظ…ظ‡ ظ‡ط§غŒ ط²غŒط± ط±ط§ ط§ظ†طھط®ط§ط¨ ع©ظ†غŒط¯ًں‘‡", reply_markup=markupstartfa)
bot.answer_callback_query(callback_query_id=call.id, text="ط¨ظ‡ ط¹ظ‚ط¨ ط¨ط±ع¯ط´طھغŒط¯ًں”™")
if call.message:
if call.data == "backen":
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="Come backedًں”™\nPlease select one of the buttonًں‘‡", reply_markup=markupstarten)
bot.answer_callback_query(callback_query_id=call.id, text="Come backedًں”™")
if call.message:
if call.data == "reload":
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="reload: [â–†____________]")
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="reload: [▆▆___________]")
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="reload: [▆▆▆__________]")
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="reload: [▆▆▆▆_________]")
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="reload: [▆▆▆▆▆________]")
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="reload: [▆▆▆▆▆▆_______]")
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="reload: [▆▆▆▆▆▆▆______]")
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="reload: [▆▆▆▆▆▆▆▆_____]")
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="reload: [▆▆▆▆▆▆▆▆▆____]")
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="reload: [▆▆▆▆▆▆▆▆▆▆___]")
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="reload: [▆▆▆▆▆▆▆▆▆▆▆__]")
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="reload: [▆▆▆▆▆▆▆▆▆▆▆▆_]")
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="reload: [▆▆▆▆▆▆▆▆▆▆▆▆▆]")
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="reloaded!", reply_markup=markupreload)
if call.message:
if call.data == "timefa":
reqa = urllib2.Request('http://api.gpmod.ir/time/')
openera = urllib2.build_opener()
fa = openera.open(reqa)
parsed_jsona = json.loads(fa.read())
FAtime = parsed_jsona['FAtime']
FAdate = parsed_jsona['FAdate']
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="طھط§ط±غŒط®: {} \nط³ط§ط¹طھ: {}".format(FAdate,FAtime), reply_markup=markupredatefa)
if call.message:
if call.data == "timeen":
reqa = urllib2.Request('http://api.gpmod.ir/time/')
openera = urllib2.build_opener()
fa = openera.open(reqa)
parsed_jsona = json.loads(fa.read())
ENtime = parsed_jsona['ENtime']
ENdate = parsed_jsona['ENdate']
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="date: {} \ntime: {}".format(ENdate,ENtime), reply_markup=markupredateen)
if call.message:
if call.data == "refa":
reqa = urllib2.Request('http://api.gpmod.ir/time/')
openera = urllib2.build_opener()
fa = openera.open(reqa)
parsed_jsona = json.loads(fa.read())
FAtime = parsed_jsona['FAtime']
FAdate = parsed_jsona['FAdate']
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="طھط§ط±غŒط®: {} \nط³ط§ط¹طھ: {}".format(FAdate,FAtime), reply_markup=markupredatefa)
if call.message:
if call.data == "reen":
reqa = urllib2.Request('http://api.gpmod.ir/time/')
openera = urllib2.build_opener()
fa = openera.open(reqa)
parsed_jsona = json.loads(fa.read())
ENtime = parsed_jsona['ENtime']
ENdate = parsed_jsona['ENdate']
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="date: {} \ntime: {}".format(ENdate,ENtime), reply_markup=markupredateen)
bot.polling(none_stop=True, timeout=20)
| apache-2.0 | 4,858,250,203,884,125,000 | 62.588957 | 283 | 0.629084 | false |
th0ma5w/pyPartOfSpeech | POSTagger.py | 1 | 2826 | """
* pyPOS
*
* Python Version Copyright 2011 Thomas Winningham
* Javascript Version and Comments Copyright 2010, Percy Wegmann
* Licensed under the LGPLv3 license
* http://www.opensource.org/licenses/lgpl-3.0.html
Parts of Speech Tagger
"""
from Lexicon import POSTAGGER_LEXICON
def is_number(s):
'''Simple test of string for number'''
try:
float(s)
return True
except ValueError:
return False
class POSTagger:
def __init__(self):
global POSTAGGER_LEXICON
self.lexicon = POSTAGGER_LEXICON
def wordInLexicon(self,word):
'''Test if the word exists in the lexicon'''
if self.lexicon.has_key(word):
return True
# 1/22/2002 mod (from Lisp code): if not in hash, try lower case:
else:
if self.lexicon.has_key(word.lower()):
return True
return False
def tag(self,words):
'''Tag a list of words'''
ret=[None for x in range(len(words))]
for x in range(len(words)):
ss = False
word = words[x]
if self.lexicon.has_key(word):
ss = self.lexicon[word]
# 1/22/2002 mod (from Lisp code): if not in hash, try lower case:
if not ss:
word = word.lower()
if self.lexicon.has_key(word):
ss = self.lexicon[word]
if (not ss and len(word) == 1):
ret[x] = words[x] + "^"
if not ss:
ret[x] = "NN"
else:
ret[x] = ss[0]
#Apply transformational rules
for x in range(len(words)):
word=ret[x]
# rule 1: DT, {VBD | VBP} --> DT, NN
if x > 0 and ret[x-1] == "DT":
if word == "VBD" or word == "VBP" or word == "VB":
ret[x] = "NN"
# rule 2: convert a noun to a number (CD) if "." appears in the word
if word[0] == "N":
if words[x].__contains__('.'):
ret[x] = "CD"
if is_number(words[x]):
ret[x] = "CD"
# rule 3: convert a noun to a past participle if words[i] ends with "ed"
if ret[x][0] == "N" and words[x][-2:] == "ed":
ret[x] = "VBN"
# rule 4: convert any type to adverb if it ends in "ly";
if ret[x][-2:] == 'ly':
ret[x] = "RB"
# rule 5: convert a common noun (NN or NNS) to a adjective if it ends with "al"
if ret[x][:2] == "NN" and ret[x][-2:] == "al":
ret[x] = ' '.join(str(x),"JJ")
# rule 6: convert a noun to a verb if the preceding work is "would"
if x > 0 and ret[x][:2] == "NN" and words[x-1].lower() == "would":
ret[x] = "VB"
# rule 7: if a word has been categorized as a common noun and it ends with "s",
# then set its type to plural common noun (NNS)
if ret[x] == "NN" and words[x][-1:] == "s":
ret[x] = "NNS"
# rule 8: convert a common noun to a present participle verb (i.e., a gerund)
if ret[x] == "NN" and words[x][-3:] == "ing":
ret[x] = "VBG"
result = zip(words,ret)
return result
if __name__ == "__main__":
print POSTagger().tag(["i", "went", "to", "the", "store", "to", "buy", "5.2", "gallons", "of", "milk"])
| lgpl-3.0 | 7,371,887,207,909,501,000 | 29.387097 | 104 | 0.591295 | false |
facebookexperimental/eden | eden/hg-server/edenscm/hgext/infinitepush/__init__.py | 1 | 13116 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# Infinite push
""" store draft commits in the cloud
Configs::
[infinitepush]
# Server-side and client-side option. Pattern of the infinitepush bookmark
branchpattern = PATTERN
# Server or client
server = False
# Server-side option. Possible values: 'disk' or 'sql'. Fails if not set
indextype = disk
# Server-side option. Used only if indextype=sql.
# Format: 'IP:PORT:DB_NAME:USER:PASSWORD'
sqlhost = IP:PORT:DB_NAME:USER:PASSWORD
# Server-side option. Used only if indextype=disk.
# Filesystem path to the index store
indexpath = PATH
# Server-side option. Possible values: 'disk' or 'external'
# Fails if not set
storetype = disk
# Server-side option.
# Path to the binary that will save bundle to the bundlestore
# Formatted cmd line will be passed to it (see `put_args`)
put_binary = put
# Server-side option. Used only if storetype=external.
# Format cmd-line string for put binary. Placeholder: {filename}
put_args = {filename}
# Server-side option.
# Path to the binary that get bundle from the bundlestore.
# Formatted cmd line will be passed to it (see `get_args`)
get_binary = get
# Server-side option. Used only if storetype=external.
# Format cmd-line string for get binary. Placeholders: {filename} {handle}
get_args = {filename} {handle}
# Server-side option
logfile = FIlE
# Server-side option
loglevel = DEBUG
# Server-side option. Used only if indextype=sql.
# Sets mysql wait_timeout option.
waittimeout = 300
# Server-side option. Used only if indextype=sql.
# Sets mysql innodb_lock_wait_timeout option.
locktimeout = 120
# Server-side option. Used only if indextype=sql.
# limit number of days to generate warning on trying to
# fetch too old commit for hg up / hg pull with short hash rev
shorthasholdrevthreshold = 31
# Server-side option. Used only if indextype=sql.
# Name of the repository
reponame = ''
# Server-side option. Used only if storetype=sql
# Whether or not to record new bundles into the forwardfillqueue table.
forwardfill = False
# Server-side option. Used only if storetype=sql
# Whether or not to record new scratch bookmarks into the
# replaybookmarksqueue table.
replaybookmarks = False
# Client-side option. Used by --list-remote option. List of remote scratch
# patterns to list if no patterns are specified.
defaultremotepatterns = ['*']
# Server-side option. If bookmark that was pushed matches
# `fillmetadatabranchpattern` then background
# `hg debugfillinfinitepushmetadata` process will save metadata
# in infinitepush index for nodes that are ancestor of the bookmark.
fillmetadatabranchpattern = ''
# Instructs infinitepush to forward all received bundle2 parts to the
# bundle for storage. Defaults to False.
storeallparts = True
# Server-side option. Maximum acceptable bundle size in megabytes.
maxbundlesize = 500
# Which compression algorithm to use for infinitepush bundles.
bundlecompression = ZS
[remotenames]
# Client-side option
# This option should be set only if remotenames extension is enabled.
# Whether remote bookmarks are tracked by remotenames extension.
bookmarks = True
"""
from __future__ import absolute_import
from edenscm.mercurial import (
bundle2,
changegroup,
discovery,
error,
extensions,
node as nodemod,
pycompat,
util,
)
from edenscm.mercurial.i18n import _
from . import bundleparts, bundlestore, client, common, infinitepushcommands, server
cmdtable = infinitepushcommands.cmdtable
colortable = {
"commitcloud.changeset": "green",
"commitcloud.meta": "bold",
"commitcloud.commitcloud": "yellow",
}
def reposetup(ui, repo):
common.reposetup(ui, repo)
if common.isserver(ui) and repo.local():
repo.bundlestore = bundlestore.bundlestore(repo)
def uisetup(ui):
# remotenames circumvents the default push implementation entirely, so make
# sure we load after it so that we wrap it.
order = extensions._order
order.remove("infinitepush")
order.append("infinitepush")
extensions._order = order
# Register bundleparts capabilities and handlers.
bundleparts.uisetup(ui)
def extsetup(ui):
common.extsetup(ui)
if common.isserver(ui):
server.extsetup(ui)
else:
client.extsetup(ui)
def _deltaparent(orig, self, revlog, rev, p1, p2, prev):
# This version of deltaparent prefers p1 over prev to use less space
dp = revlog.deltaparent(rev)
if dp == nodemod.nullrev and not revlog.storedeltachains:
# send full snapshot only if revlog configured to do so
return nodemod.nullrev
return p1
def _createbundler(ui, repo, other):
bundler = bundle2.bundle20(ui, bundle2.bundle2caps(other))
compress = ui.config("infinitepush", "bundlecompression", "UN")
bundler.setcompression(compress)
# Disallow pushback because we want to avoid taking repo locks.
# And we don't need pushback anyway
capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, allowpushback=False))
bundler.newpart("replycaps", data=pycompat.encodeutf8(capsblob))
return bundler
def _sendbundle(bundler, other):
stream = util.chunkbuffer(bundler.getchunks())
try:
reply = other.unbundle(stream, [b"force"], other.url())
# Look for an error part in the response. Note that we don't apply
# the reply bundle, as we're not expecting any response, except maybe
# an error. If we receive any extra parts, that is an error.
for part in reply.iterparts():
if part.type == "error:abort":
raise bundle2.AbortFromPart(
part.params["message"], hint=part.params.get("hint")
)
elif part.type == "reply:changegroup":
pass
else:
raise error.Abort(_("unexpected part in reply: %s") % part.type)
except error.BundleValueError as exc:
raise error.Abort(_("missing support for %s") % exc)
def pushbackupbundle(ui, repo, other, outgoing, bookmarks):
"""
push a backup bundle to the server
Pushes an infinitepush bundle containing the commits described in `outgoing`
and the bookmarks described in `bookmarks` to the `other` server.
"""
# Wrap deltaparent function to make sure that bundle takes less space
# See _deltaparent comments for details
extensions.wrapfunction(changegroup.cg2packer, "deltaparent", _deltaparent)
try:
bundler = _createbundler(ui, repo, other)
bundler.addparam("infinitepush", "True")
pushvarspart = bundler.newpart("pushvars")
pushvarspart.addparam("BYPASS_READONLY", "True", mandatory=False)
backup = False
if outgoing and not outgoing.missing and not bookmarks:
ui.status(_("nothing to back up\n"))
return True
if outgoing and outgoing.missing:
backup = True
parts = bundleparts.getscratchbranchparts(
repo,
other,
outgoing,
confignonforwardmove=False,
ui=ui,
bookmark=None,
create=False,
bookmarknode=None,
)
for part in parts:
bundler.addpart(part)
if bookmarks:
backup = True
bundler.addpart(bundleparts.getscratchbookmarkspart(other, bookmarks))
if backup:
_sendbundle(bundler, other)
return backup
finally:
extensions.unwrapfunction(changegroup.cg2packer, "deltaparent", _deltaparent)
def pushbackupbundlewithdiscovery(ui, repo, other, heads, bookmarks):
if heads:
with ui.configoverride({("remotenames", "fastheaddiscovery"): False}):
outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=heads)
else:
outgoing = None
return pushbackupbundle(ui, repo, other, outgoing, bookmarks)
def isbackedupnodes(getconnection, nodes):
"""
check on the server side if the nodes are backed up using 'known' or 'knownnodes' commands
"""
with getconnection() as conn:
if "knownnodes" in conn.peer.capabilities():
return conn.peer.knownnodes([nodemod.bin(n) for n in nodes])
else:
return conn.peer.known([nodemod.bin(n) for n in nodes])
def pushbackupbundledraftheads(ui, repo, getconnection, heads):
"""
push a backup bundle containing non-public heads to the server
Pushes an infinitepush bundle containing the commits that are non-public
ancestors of `heads`, to the `other` server.
"""
if heads:
# Calculate the commits to back-up. The bundle needs to cleanly
# apply to the server, so we need to include the whole draft stack.
commitstobackup = [
ctx.node() for ctx in repo.set("not public() & ::%ln", heads)
]
# Calculate the parent commits of the commits we are backing up.
# These are the public commits that should be on the server.
parentcommits = [
ctx.node() for ctx in repo.set("parents(roots(%ln))", commitstobackup)
]
# Build a discovery object encapsulating the commits to backup.
# Skip the actual discovery process, as we know exactly which
# commits are missing. For the common commits, include all the
# parents of the commits we are sending. In the unlikely event that
# the server is missing public commits, we will try again with
# discovery enabled.
og = discovery.outgoing(repo, commonheads=parentcommits, missingheads=heads)
og._missing = commitstobackup
og._common = parentcommits
else:
og = None
try:
with getconnection() as conn:
return pushbackupbundle(ui, repo, conn.peer, og, None)
except Exception as e:
ui.warn(_("push failed: %s\n") % e)
ui.warn(_("retrying push with discovery\n"))
with getconnection() as conn:
return pushbackupbundlewithdiscovery(ui, repo, conn.peer, heads, None)
def pushbackupbundlestacks(ui, repo, getconnection, heads):
# Push bundles containing the commits. Initially attempt to push one
# bundle for each stack (commits that share a single root). If a stack is
# too large, or if the push fails, and the stack has multiple heads, push
# head-by-head.
roots = repo.set("roots(not public() & ::%ls)", heads)
newheads = set()
failedheads = set()
for root in roots:
ui.status(_("backing up stack rooted at %s\n") % root)
stack = [ctx.hex() for ctx in repo.set("(%n::%ls)", root.node(), heads)]
if len(stack) == 0:
continue
stackheads = [ctx.hex() for ctx in repo.set("heads(%ls)", stack)]
if len(stack) > 1000:
# This stack is too large, something must have gone wrong
ui.warn(
_("not backing up excessively large stack rooted at %s (%d commits)")
% (root, len(stack))
)
failedheads |= set(stackheads)
continue
if len(stack) < 20 and len(stackheads) > 1:
# Attempt to push the whole stack. This makes it easier on the
# server when accessing one of the head commits, as the ancestors
# will always be in the same bundle.
try:
if pushbackupbundledraftheads(
ui, repo, getconnection, [nodemod.bin(h) for h in stackheads]
):
newheads |= set(stackheads)
continue
else:
ui.warn(_("failed to push stack bundle rooted at %s\n") % root)
except Exception as e:
ui.warn(_("push of stack %s failed: %s\n") % (root, e))
ui.warn(_("retrying each head individually\n"))
# The stack only has one head, is large, or pushing the whole stack
# failed, push each head in turn.
for head in stackheads:
try:
if pushbackupbundledraftheads(
ui, repo, getconnection, [nodemod.bin(head)]
):
newheads.add(head)
continue
else:
ui.warn(
_("failed to push stack bundle with head %s\n")
% nodemod.short(nodemod.bin(head))
)
except Exception as e:
ui.warn(
_("push of head %s failed: %s\n")
% (nodemod.short(nodemod.bin(head)), e)
)
failedheads.add(head)
return newheads, failedheads
| gpl-2.0 | 4,742,549,794,455,033,000 | 34.069519 | 94 | 0.634568 | false |
jasondunsmore/heat | heat/tests/openstack/sahara/test_data_source.py | 1 | 4000 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import six
from heat.common import exception
from heat.common import template_format
from heat.engine.resources.openstack.sahara import data_source
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
data_source_template = """
heat_template_version: 2015-10-15
resources:
data-source:
type: OS::Sahara::DataSource
properties:
name: my-ds
type: swift
url: swift://container.sahara/text
credentials:
user: admin
password: swordfish
"""
class SaharaDataSourceTest(common.HeatTestCase):
def setUp(self):
super(SaharaDataSourceTest, self).setUp()
t = template_format.parse(data_source_template)
self.stack = utils.parse_stack(t)
resource_defns = self.stack.t.resource_definitions(self.stack)
self.rsrc_defn = resource_defns['data-source']
self.client = mock.Mock()
self.patchobject(data_source.DataSource, 'client',
return_value=self.client)
def _create_resource(self, name, snippet, stack):
ds = data_source.DataSource(name, snippet, stack)
value = mock.MagicMock(id='12345')
self.client.data_sources.create.return_value = value
scheduler.TaskRunner(ds.create)()
return ds
def test_create(self):
ds = self._create_resource('data-source', self.rsrc_defn, self.stack)
args = self.client.data_sources.create.call_args[1]
expected_args = {
'name': 'my-ds',
'description': '',
'data_source_type': 'swift',
'url': 'swift://container.sahara/text',
'credential_user': 'admin',
'credential_pass': 'swordfish'
}
self.assertEqual(expected_args, args)
self.assertEqual('12345', ds.resource_id)
expected_state = (ds.CREATE, ds.COMPLETE)
self.assertEqual(expected_state, ds.state)
def test_update(self):
ds = self._create_resource('data-source', self.rsrc_defn,
self.stack)
self.rsrc_defn['Properties']['type'] = 'hdfs'
self.rsrc_defn['Properties']['url'] = 'my/path'
scheduler.TaskRunner(ds.update, self.rsrc_defn)()
data = {
'name': 'my-ds',
'description': '',
'type': 'hdfs',
'url': 'my/path',
'credentials': {
'user': 'admin',
'password': 'swordfish'
}
}
self.client.data_sources.update.assert_called_once_with(
'12345', data)
self.assertEqual((ds.UPDATE, ds.COMPLETE), ds.state)
def test_show_attribute(self):
ds = self._create_resource('data-source', self.rsrc_defn, self.stack)
value = mock.MagicMock()
value.to_dict.return_value = {'ds': 'info'}
self.client.data_sources.get.return_value = value
self.assertEqual({'ds': 'info'}, ds.FnGetAtt('show'))
def test_validate_password_without_user(self):
self.rsrc_defn['Properties']['credentials'].pop('user')
ds = data_source.DataSource('data-source', self.rsrc_defn, self.stack)
ex = self.assertRaises(exception.StackValidationFailed, ds.validate)
error_msg = ('Property error: resources.data-source.properties.'
'credentials: Property user not assigned')
self.assertEqual(error_msg, six.text_type(ex))
| apache-2.0 | 1,594,706,282,652,365,600 | 36.037037 | 78 | 0.62625 | false |
chemlab/chemlab | chemlab/core/serialization.py | 1 | 2379 | from __future__ import division, print_function
import json
from collections import Iterable, OrderedDict, namedtuple
import numpy as np
from six import string_types
def isnamedtuple(obj):
"""Heuristic check if an object is a namedtuple."""
return isinstance(obj, tuple) \
and hasattr(obj, "_fields") \
and hasattr(obj, "_asdict") \
and callable(obj._asdict)
def serialize(data):
if data is None or isinstance(data, (bool, int, float, str, string_types)):
return data
if isinstance(data, list):
return [serialize(val) for val in data]
if isinstance(data, OrderedDict):
return {"py/collections.OrderedDict":
[[serialize(k), serialize(v)] for k, v in data.items()]}
if isnamedtuple(data):
return {"py/collections.namedtuple": {
"type": type(data).__name__,
"fields": list(data._fields),
"values": [serialize(getattr(data, f)) for f in data._fields]}}
if isinstance(data, dict):
if all(isinstance(k, str) for k in data):
return {k: serialize(v) for k, v in data.items()}
return {"py/dict": [[serialize(k), serialize(v)] for k, v in data.items()]}
if isinstance(data, tuple):
return {"py/tuple": [serialize(val) for val in data]}
if isinstance(data, set):
return {"py/set": [serialize(val) for val in data]}
if isinstance(data, np.ndarray):
return {"py/numpy.ndarray": {
"values": data.tolist(),
"dtype": str(data.dtype)}}
raise TypeError("Type %s not data-serializable" % type(data))
def restore(dct):
if "py/dict" in dct:
return dict(dct["py/dict"])
if "py/tuple" in dct:
return tuple(dct["py/tuple"])
if "py/set" in dct:
return set(dct["py/set"])
if "py/collections.namedtuple" in dct:
data = dct["py/collections.namedtuple"]
return namedtuple(data["type"], data["fields"])(*data["values"])
if "py/numpy.ndarray" in dct:
data = dct["py/numpy.ndarray"]
return np.array(data["values"], dtype=data["dtype"])
if "py/collections.OrderedDict" in dct:
return OrderedDict(dct["py/collections.OrderedDict"])
return dct
def data_to_json(data):
return json.dumps(serialize(data))
def json_to_data(s):
return json.loads(s, object_hook=restore)
| gpl-3.0 | 295,518,846,294,320,900 | 35.6 | 83 | 0.612022 | false |
zstackio/zstack-woodpecker | integrationtest/vm/multihosts/volumes/paths/ceph_san_path8.py | 1 | 5935 | import zstackwoodpecker.test_state as ts_header
import zstackwoodpecker.operations.resource_operations as res_ops
import random
import os
TestAction = ts_header.TestAction
class VM(object):
def __init__(self, name=None):
self.name = name
self.cloned_name_list = [self.name + '_cloned_vm%s' % i for i in range(5)]
@property
def start(self):
return [TestAction.start_vm, self.name]
@property
def stop(self):
return [TestAction.stop_vm, self.name]
@property
def migrate(self):
return [TestAction.migrate_vm, self.name]
@property
def reinit(self):
return [TestAction.reinit_vm, self.name]
@property
def change_image(self):
return [TestAction.change_vm_image, self.name, os.getenv('imageName_s')]
def clone(self, clone_num=1, full=False):
if full:
return [TestAction.clone_vm, self.name, ','.join(self.cloned_name_list[:clone_num]), "=full"]
else:
return [TestAction.clone_vm, self.name, ','.join(self.cloned_name_list[:clone_num])]
def path():
cond = res_ops.gen_query_conditions('state', '=', "Enabled")
cond = res_ops.gen_query_conditions('status', '=', "Connected", cond)
ps_inv = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond)
cond_imagestore = res_ops.gen_query_conditions('type', '=', "ImageStoreBackupStorage", cond)
cond_ceph = res_ops.gen_query_conditions('type', '=', "Ceph", cond)
imagestore = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond_imagestore)
ceph_bs = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond_ceph)
san_ps = [ps.uuid for ps in ps_inv if ps.type == 'SharedBlock']
ceph_ps = [ps.uuid for ps in ps_inv if ps.type == 'Ceph']
san_vms = [VM('utility_vm_for_robot_test' + '-' + ps.name) for ps in ps_inv if ps.type == 'SharedBlock']
ceph_vms = [VM('utility_vm_for_robot_test' + '-' + ps.name) for ps in ps_inv if ps.type == 'Ceph']
vm2 = VM('vm2')
if san_ps and ceph_ps:
return dict(initial_formation="template3",
path_list=[[TestAction.create_volume, "ceph_volume1", "=ps_uuid::%s" % ceph_ps[0]],
[TestAction.attach_volume, san_vms[-1].name, "ceph_volume1"],
[TestAction.create_volume_snapshot, "ceph_volume1", "ceph_volume1_snapshot1"],
[TestAction.create_volume_snapshot, san_vms[-1].name + '-root', san_vms[-1].name + '-sp1'],
[TestAction.resize_volume, san_vms[-1].name, 5*1024*1024],
[TestAction.create_volume_snapshot, san_vms[-1].name + '-root', san_vms[-1].name + '-sp2'],
san_vms[-1].stop,
[TestAction.reinit_vm, san_vms[-1].name],
san_vms[-1].start,
san_vms[-1].migrate,
[TestAction.create_volume_snapshot, "ceph_volume1", "ceph_volume1_snapshot2"],
san_vms[-1].clone(4),
[TestAction.detach_volume, "ceph_volume1"],
[TestAction.attach_volume, san_vms[-1].cloned_name_list[0], "ceph_volume1"],
[TestAction.detach_volume, "ceph_volume1"],
[TestAction.attach_volume, san_vms[-1].cloned_name_list[1], "ceph_volume1"],
[TestAction.detach_volume, "ceph_volume1"],
[TestAction.attach_volume, san_vms[-1].cloned_name_list[2], "ceph_volume1"],
[TestAction.detach_volume, "ceph_volume1"],
[TestAction.attach_volume, san_vms[-1].cloned_name_list[3], "ceph_volume1"],
[TestAction.create_volume_snapshot, san_vms[-1].name + '-root', san_vms[-1].name + '-sp3'],
[TestAction.create_volume_snapshot, "ceph_volume1", "ceph_volume1_snapshot1"],
[TestAction.create_volume, "san_shared_volume1", "=ps_uuid::%s,scsi,shareable" % random.choice(san_ps)],
ceph_vms[0].migrate,
ceph_vms[0].clone(4),
[TestAction.attach_volume, san_vms[-1].cloned_name_list[0], "san_shared_volume1"],
[TestAction.attach_volume, san_vms[-1].cloned_name_list[1], "san_shared_volume1"],
[TestAction.attach_volume, san_vms[-1].cloned_name_list[2], "san_shared_volume1"],
[TestAction.attach_volume, san_vms[-1].cloned_name_list[3], "san_shared_volume1"],
[TestAction.create_volume_snapshot, san_vms[-1].cloned_name_list[0] + '-root', san_vms[-1].cloned_name_list[0] + '-sp1'],
[TestAction.create_volume_snapshot, san_vms[-1].cloned_name_list[1] + '-root', san_vms[-1].cloned_name_list[1] + '-sp1'],
[TestAction.create_volume_snapshot, san_vms[-1].cloned_name_list[2] + '-root', san_vms[-1].cloned_name_list[2] + '-sp1'],
[TestAction.create_volume_snapshot, san_vms[-1].cloned_name_list[3] + '-root', san_vms[-1].cloned_name_list[3] + '-sp1'],
[TestAction.delete_volume, "ceph_volume1"],
[TestAction.delete_vm, san_vms[-1].cloned_name_list[0]],
[TestAction.delete_vm, san_vms[-1].cloned_name_list[1]],
[TestAction.delete_vm, san_vms[-1].cloned_name_list[2]],
[TestAction.delete_vm, san_vms[-1].cloned_name_list[3]],
])
else:
return dict(initial_formation="template3", path_list=[])
| apache-2.0 | -4,641,030,024,721,269,000 | 60.185567 | 152 | 0.533446 | false |
vivainio/androguard | mercury/client/merc/lib/debuggable.py | 1 | 1148 | #!/usr/bin/python
#
# License: Refer to the README in the root directory
#
import argparse, shlex
from basecmd import BaseCmd
class Debuggable(BaseCmd):
def __init__(self, session):
BaseCmd.__init__(self, session)
self.prompt = "*mercury#debuggable> "
def do_back(self, _args):
"""
Return to menu
"""
return -1
def do_info(self, args):
"""
List debuggable apps on the device with optional filter
usage: info [--filter <filter>]
"""
# Define command-line arguments using argparse
parser = argparse.ArgumentParser(prog = 'info', add_help = False)
parser.add_argument('--filter', '-f', metavar = '<filter>')
try:
# Split arguments using shlex - this means that parameters with spaces can be used - escape " characters inside with \
splitargs = parser.parse_args(shlex.split(args))
print self.session.executeCommand("debuggable", "info", {'filter':splitargs.filter} if splitargs.filter else None).getPaddedErrorOrData()
# FIXME: Choose specific exceptions to catch
except:
pass
| gpl-3.0 | -5,308,201,073,243,280,000 | 27.7 | 149 | 0.626307 | false |
smclt30p/PCS | core/plugins/PluginMenu.py | 1 | 1947 | from PyQt5.QtCore import QObject, pyqtSlot
from PyQt5.QtCore import QSignalMapper
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import QAction
from core.plugins.PluginLoader import PluginLoader
class PluginMenu(QObject):
currRoot = None
def constructMenu(self, root):
self.currRoot = root
root.clear()
plugins = PluginLoader.getLoadedPlugins()
if len(plugins) == 0:
item = root.addAction("No plugins found")
item.setEnabled(False)
return
for plugin in plugins:
item = root.addMenu(plugin.getPluginName())
actionToggle = item.addAction("UNDEFINED")
if plugin.isActive():
actionToggle.setText("Disable")
else:
actionToggle.setText("Enable")
actionSettings = item.addAction("Settings")
item.addSeparator()
actionAbout = item.addAction("About")
if not plugin.hasAbout or not plugin.isActive():
actionAbout.setEnabled(False)
if not plugin.hasSettings or not plugin.isActive():
actionSettings.setEnabled(False)
actionAbout.triggered.connect(self.handleAbout)
#actionSettings.triggered.connect(self.handleToggle)
actionToggle.triggered.connect(self.handleToggle)
actionAbout.plugin = plugin
actionSettings.plugin = plugin
actionToggle.plugin = plugin
return root
def handleToggle(self):
action = self.sender()
if action.plugin.isActive():
action.plugin.setActive(False)
else:
action.plugin.setActive(True)
PluginLoader.reloadPlugins()
if self.currRoot != None:
self.constructMenu(self.currRoot)
def handleAbout(self):
action = self.sender()
action.plugin.getAboutInterface().show() | bsd-2-clause | 1,721,882,309,426,148,900 | 25.324324 | 64 | 0.616846 | false |
lym/allura-git | ForgeSVN/forgesvn/svn_main.py | 1 | 8582 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#-*- python -*-
import logging
from pylons import tmpl_context as c
# Non-stdlib imports
from ming.utils import LazyProperty
from ming.orm.ormsession import ThreadLocalORMSession
from tg import expose, redirect, validate, flash
from tg.decorators import with_trailing_slash, without_trailing_slash
from timermiddleware import Timer
from paste.deploy.converters import asint
# Pyforge-specific imports
import allura.tasks.repo_tasks
from allura.controllers import BaseController
from allura.controllers.repository import RepoRootController
from allura.lib.decorators import require_post
from allura.lib.repository import RepositoryApp, RepoAdminController
from allura.app import SitemapEntry, ConfigOption, AdminControllerMixin
from allura.lib import helpers as h
from allura.lib import validators as v
from allura import model as M
# Local imports
from . import model as SM
from . import version
from . import widgets
from .controllers import BranchBrowser
from .model.svn import svn_path_exists
log = logging.getLogger(__name__)
class ForgeSVNApp(RepositoryApp):
'''This is the SVN app for PyForge'''
__version__ = version.__version__
config_options = RepositoryApp.config_options + [
ConfigOption('checkout_url', str, '')
]
permissions_desc = dict(RepositoryApp.permissions_desc, **{
'write': 'Repo commit access.',
'admin': 'Set permissions, checkout url, and viewable files. Import a remote repo.',
})
tool_label = 'SVN'
tool_description = """
Enterprise-class centralized version control for the masses.
"""
ordinal = 4
forkable = False
default_branch_name = 'HEAD'
def __init__(self, project, config):
super(ForgeSVNApp, self).__init__(project, config)
self.root = BranchBrowser()
default_root = RepoRootController()
self.root.refresh = default_root.refresh
self.root.commit_browser = default_root.commit_browser
self.root.commit_browser_data = SVNCommitBrowserController().commit_browser_data
self.root.status = default_root.status
self.admin = SVNRepoAdminController(self)
@LazyProperty
def repo(self):
return SM.Repository.query.get(app_config_id=self.config._id)
def install(self, project):
'''Create repo object for this tool'''
super(ForgeSVNApp, self).install(project)
SM.Repository(
name=self.config.options.mount_point,
tool='svn',
status='initializing',
fs_path=self.config.options.get('fs_path'))
ThreadLocalORMSession.flush_all()
init_from_url = self.config.options.get('init_from_url')
init_from_path = self.config.options.get('init_from_path')
if init_from_url or init_from_path:
allura.tasks.repo_tasks.clone.post(
cloned_from_path=init_from_path,
cloned_from_name=None,
cloned_from_url=init_from_url)
else:
allura.tasks.repo_tasks.init.post()
def admin_menu(self):
links = super(ForgeSVNApp, self).admin_menu()
links.insert(1, SitemapEntry(
'Import Repo',
c.project.url() + 'admin/' + self.config.options.mount_point + '/' + 'importer/'))
return links
class SVNRepoAdminController(RepoAdminController):
def __init__(self, app):
super(SVNRepoAdminController, self).__init__(app)
self.importer = SVNImportController(self.app)
@without_trailing_slash
@expose('jinja:forgesvn:templates/svn/checkout_url.html')
def checkout_url(self, **kw):
return dict(app=self.app, allow_config=True)
@without_trailing_slash
@expose()
@require_post()
@validate({'external_checkout_url': v.NonHttpUrl})
def set_checkout_url(self, **post_data):
checkout_url = (post_data.get('checkout_url') or '').strip()
external_checkout_url = (post_data.get('external_checkout_url') or '').strip()
if not checkout_url or svn_path_exists("file://%s%s/%s" %
(self.app.repo.fs_path,
self.app.repo.name,
checkout_url)):
if (self.app.config.options.get('checkout_url') or '') != checkout_url:
self.app.config.options.checkout_url = checkout_url
flash("Checkout URL successfully changed")
else:
flash("%s is not a valid path for this repository" %
checkout_url, "error")
if 'external_checkout_url' not in c.form_errors:
if (self.app.config.options.get('external_checkout_url') or '') != external_checkout_url:
self.app.config.options.external_checkout_url = external_checkout_url
flash("External checkout URL successfully changed")
else:
flash("Invalid external checkout URL: %s" % c.form_errors['external_checkout_url'], "error")
class SVNImportController(BaseController, AdminControllerMixin):
import_form = widgets.ImportForm()
def __init__(self, app):
self.app = app
@with_trailing_slash
@expose('jinja:forgesvn:templates/svn/import.html')
def index(self, **kw):
c.is_empty = self.app.repo.is_empty()
c.form = self.import_form
return dict()
@without_trailing_slash
@expose()
@require_post()
@validate(import_form, error_handler=index)
def do_import(self, checkout_url=None, **kwargs):
if self.app.repo.is_empty():
with h.push_context(
self.app.config.project_id,
app_config_id=self.app.config._id):
allura.tasks.repo_tasks.reclone.post(
cloned_from_path=None,
cloned_from_name=None,
cloned_from_url=checkout_url)
M.Notification.post_user(
c.user, self.app.repo, 'importing',
text='''Repository import scheduled,
an email notification will be sent when complete.''')
else:
M.Notification.post_user(
c.user, self.app.repo, 'error',
text="Can't import into non empty repository.")
redirect(c.project.url() + 'admin/tools')
class SVNCommitBrowserController(BaseController):
@without_trailing_slash
@expose('json:')
def commit_browser_data(self, start=None, limit=None, **kw):
data = {
'commits': [],
'next_column': 1,
'max_row': 0,
'built_tree': {},
'next_commit': None,
}
limit, _ = h.paging_sanitizer(limit or 100, 0, 0)
for i, commit in enumerate(c.app.repo.log(revs=start, id_only=False, page_size=limit+1)):
if i >= limit:
data['next_commit'] = str(commit['id'])
break
data['commits'].append(str(commit['id']))
data['built_tree'][commit['id']] = {
'column': 0,
'parents': map(str, commit['parents']),
'short_id': '[r%s]' % commit['id'],
'message': commit['message'],
'oid': str(commit['id']),
'row': i,
'url': c.app.repo.url_for_commit(commit['id']),
}
data['max_row'] = len(data['commits']) - 1
return data
def svn_timers():
return Timer(
'svn_lib.{method_name}', SM.svn.SVNLibWrapper, 'checkout', 'add',
'checkin', 'info2', 'log', 'cat', 'list')
def forgesvn_timers():
return Timer('svn_tool.{method_name}', SM.svn.SVNImplementation, '*')
| apache-2.0 | 8,798,467,815,965,985,000 | 37.657658 | 104 | 0.612561 | false |
bytbox/yasmon | sysmon/tests/testrunner.py | 1 | 1610 | #########################################################################
# YASMon - Yet Another System Monitor #
# Copyright (C) 2010 Scott Lawrence #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
#########################################################################
"""Master YASMon tester.
"""
#unit tests
from unittest import *
#available unit tests
import localtest,remotetest,daemontest
class MyTestRunner():
"""Custom TestRunner implemenation for YASMon.
This implementation uses curses to a limited extent for the
creation of progress bars and, where possible, colored output.
"""
pass
| gpl-3.0 | 3,108,804,698,239,626,000 | 45 | 73 | 0.496894 | false |
Jozhogg/iris | lib/iris/tests/unit/fileformats/grib/test_load_cubes.py | 1 | 3365 | # (C) British Crown Copyright 2014 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.fileformats.grib.load_cubes` function."""
from __future__ import (absolute_import, division, print_function)
import iris.tests as tests
import mock
import iris
import iris.fileformats.grib
import iris.fileformats.grib.load_rules
import iris.fileformats.rules
from iris.fileformats.grib import load_cubes
class TestToggle(tests.IrisTest):
def _test(self, mode, generator, converter):
# Ensure that `load_cubes` defers to
# `iris.fileformats.rules.load_cubes`, passing a correctly
# configured `Loader` instance.
with iris.FUTURE.context(strict_grib_load=mode):
with mock.patch('iris.fileformats.rules.load_cubes') as rules_load:
rules_load.return_value = mock.sentinel.RESULT
result = load_cubes(mock.sentinel.FILES,
mock.sentinel.CALLBACK,
mock.sentinel.REGULARISE)
if mode:
kw_args = {}
else:
kw_args = {'auto_regularise': mock.sentinel.REGULARISE}
loader = iris.fileformats.rules.Loader(
generator, kw_args,
converter, None)
rules_load.assert_called_once_with(mock.sentinel.FILES,
mock.sentinel.CALLBACK,
loader)
self.assertIs(result, mock.sentinel.RESULT)
def test_sloppy_mode(self):
# Ensure that `load_cubes` uses:
# iris.fileformats.grib.grib_generator
# iris.fileformats.grib.load_rules.convert
self._test(False, iris.fileformats.grib.grib_generator,
iris.fileformats.grib.load_rules.convert)
def test_strict_mode(self):
# Ensure that `load_cubes` uses:
# iris.fileformats.grib._message._GribMessage.messages_from_filename
# iris.fileformats.grib._load_convert.convert
self._test(
True,
iris.fileformats.grib._message._GribMessage.messages_from_filename,
iris.fileformats.grib._load_convert.convert)
@tests.skip_data
class Test_load_cubes(tests.IrisTest):
def test_reduced_raw(self):
# Loading a GRIB message defined on a reduced grid without
# interpolating to a regular grid.
gribfile = tests.get_data_path(
("GRIB", "reduced", "reduced_gg.grib2"))
grib_generator = load_cubes(gribfile, auto_regularise=False)
self.assertCML(next(grib_generator))
if __name__ == "__main__":
tests.main()
| lgpl-3.0 | 8,962,534,290,254,425,000 | 38.127907 | 79 | 0.629421 | false |
carolFrohlich/nipype | nipype/interfaces/fsl/tests/test_preprocess.py | 1 | 22869 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from builtins import str
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from builtins import open, open
import os
import tempfile
import shutil
from nipype.testing import (assert_equal, assert_not_equal, assert_raises,
skipif, assert_true)
from nipype.utils.filemanip import split_filename, filename_to_list
from .. import preprocess as fsl
from nipype.interfaces.fsl import Info
from nipype.interfaces.base import File, TraitError, Undefined, isdefined
from nipype.interfaces.fsl import no_fsl
@skipif(no_fsl)
def fsl_name(obj, fname):
"""Create valid fsl name, including file extension for output type.
"""
ext = Info.output_type_to_ext(obj.inputs.output_type)
return fname + ext
tmp_infile = None
tmp_dir = None
@skipif(no_fsl)
def setup_infile():
global tmp_infile, tmp_dir
ext = Info.output_type_to_ext(Info.output_type())
tmp_dir = tempfile.mkdtemp()
tmp_infile = os.path.join(tmp_dir, 'foo' + ext)
open(tmp_infile, 'w')
return tmp_infile, tmp_dir
def teardown_infile(tmp_dir):
shutil.rmtree(tmp_dir)
# test BET
# @with_setup(setup_infile, teardown_infile)
# broken in nose with generators
@skipif(no_fsl)
def test_bet():
tmp_infile, tp_dir = setup_infile()
better = fsl.BET()
yield assert_equal, better.cmd, 'bet'
# Test raising error with mandatory args absent
yield assert_raises, ValueError, better.run
# Test generated outfile name
better.inputs.in_file = tmp_infile
outfile = fsl_name(better, 'foo_brain')
outpath = os.path.join(os.getcwd(), outfile)
realcmd = 'bet %s %s' % (tmp_infile, outpath)
yield assert_equal, better.cmdline, realcmd
# Test specified outfile name
outfile = fsl_name(better, '/newdata/bar')
better.inputs.out_file = outfile
realcmd = 'bet %s %s' % (tmp_infile, outfile)
yield assert_equal, better.cmdline, realcmd
# infile foo.nii doesn't exist
def func():
better.run(in_file='foo2.nii', out_file='bar.nii')
yield assert_raises, TraitError, func
# Our options and some test values for them
# Should parallel the opt_map structure in the class for clarity
opt_map = {
'outline': ('-o', True),
'mask': ('-m', True),
'skull': ('-s', True),
'no_output': ('-n', True),
'frac': ('-f 0.40', 0.4),
'vertical_gradient': ('-g 0.75', 0.75),
'radius': ('-r 20', 20),
'center': ('-c 54 75 80', [54, 75, 80]),
'threshold': ('-t', True),
'mesh': ('-e', True),
'surfaces': ('-A', True)
# 'verbose': ('-v', True),
# 'flags': ('--i-made-this-up', '--i-made-this-up'),
}
# Currently we don't test -R, -S, -B, -Z, -F, -A or -A2
# test each of our arguments
better = fsl.BET()
outfile = fsl_name(better, 'foo_brain')
outpath = os.path.join(os.getcwd(), outfile)
for name, settings in list(opt_map.items()):
better = fsl.BET(**{name: settings[1]})
# Add mandatory input
better.inputs.in_file = tmp_infile
realcmd = ' '.join([better.cmd, tmp_infile, outpath, settings[0]])
yield assert_equal, better.cmdline, realcmd
teardown_infile(tmp_dir)
# test fast
@skipif(no_fsl)
def test_fast():
tmp_infile, tp_dir = setup_infile()
faster = fsl.FAST()
faster.inputs.verbose = True
fasted = fsl.FAST(in_files=tmp_infile, verbose=True)
fasted2 = fsl.FAST(in_files=[tmp_infile, tmp_infile], verbose=True)
yield assert_equal, faster.cmd, 'fast'
yield assert_equal, faster.inputs.verbose, True
yield assert_equal, faster.inputs.manual_seg, Undefined
yield assert_not_equal, faster.inputs, fasted.inputs
yield assert_equal, fasted.cmdline, 'fast -v -S 1 %s' % (tmp_infile)
yield assert_equal, fasted2.cmdline, 'fast -v -S 2 %s %s' % (tmp_infile,
tmp_infile)
faster = fsl.FAST()
faster.inputs.in_files = tmp_infile
yield assert_equal, faster.cmdline, 'fast -S 1 %s' % (tmp_infile)
faster.inputs.in_files = [tmp_infile, tmp_infile]
yield assert_equal, faster.cmdline, 'fast -S 2 %s %s' % (tmp_infile, tmp_infile)
# Our options and some test values for them
# Should parallel the opt_map structure in the class for clarity
opt_map = {'number_classes': ('-n 4', 4),
'bias_iters': ('-I 5', 5),
'bias_lowpass': ('-l 15', 15),
'img_type': ('-t 2', 2),
'init_seg_smooth': ('-f 0.035', 0.035),
'segments': ('-g', True),
'init_transform': ('-a %s' % (tmp_infile), '%s' % (tmp_infile)),
'other_priors': ('-A %s %s %s' % (tmp_infile, tmp_infile,
tmp_infile),
(['%s' % (tmp_infile),
'%s' % (tmp_infile),
'%s' % (tmp_infile)])),
'no_pve': ('--nopve', True),
'output_biasfield': ('-b', True),
'output_biascorrected': ('-B', True),
'no_bias': ('-N', True),
'out_basename': ('-o fasted', 'fasted'),
'use_priors': ('-P', True),
'segment_iters': ('-W 14', 14),
'mixel_smooth': ('-R 0.25', 0.25),
'iters_afterbias': ('-O 3', 3),
'hyper': ('-H 0.15', 0.15),
'verbose': ('-v', True),
'manual_seg': ('-s %s' % (tmp_infile), '%s' % (tmp_infile)),
'probability_maps': ('-p', True),
}
# test each of our arguments
for name, settings in list(opt_map.items()):
faster = fsl.FAST(in_files=tmp_infile, **{name: settings[1]})
yield assert_equal, faster.cmdline, ' '.join([faster.cmd,
settings[0],
"-S 1 %s" % tmp_infile])
teardown_infile(tmp_dir)
@skipif(no_fsl)
def test_fast_list_outputs():
''' By default (no -o), FSL's fast command outputs files into the same
directory as the input files. If the flag -o is set, it outputs files into
the cwd '''
def _run_and_test(opts, output_base):
outputs = fsl.FAST(**opts)._list_outputs()
for output in outputs.values():
filenames = filename_to_list(output)
if filenames is not None:
for filename in filenames:
assert_equal(filename[:len(output_base)], output_base)
# set up
infile, indir = setup_infile()
cwd = tempfile.mkdtemp()
os.chdir(cwd)
yield assert_not_equal, indir, cwd
out_basename = 'a_basename'
# run and test
opts = {'in_files': tmp_infile}
input_path, input_filename, input_ext = split_filename(tmp_infile)
_run_and_test(opts, os.path.join(input_path, input_filename))
opts['out_basename'] = out_basename
_run_and_test(opts, os.path.join(cwd, out_basename))
@skipif(no_fsl)
def setup_flirt():
ext = Info.output_type_to_ext(Info.output_type())
tmpdir = tempfile.mkdtemp()
_, infile = tempfile.mkstemp(suffix=ext, dir=tmpdir)
_, reffile = tempfile.mkstemp(suffix=ext, dir=tmpdir)
return tmpdir, infile, reffile
def teardown_flirt(tmpdir):
shutil.rmtree(tmpdir)
@skipif(no_fsl)
def test_flirt():
# setup
tmpdir, infile, reffile = setup_flirt()
flirter = fsl.FLIRT()
yield assert_equal, flirter.cmd, 'flirt'
flirter.inputs.bins = 256
flirter.inputs.cost = 'mutualinfo'
flirted = fsl.FLIRT(in_file=infile, reference=reffile,
out_file='outfile', out_matrix_file='outmat.mat',
bins=256,
cost='mutualinfo')
flirt_est = fsl.FLIRT(in_file=infile, reference=reffile,
out_matrix_file='outmat.mat',
bins=256,
cost='mutualinfo')
yield assert_not_equal, flirter.inputs, flirted.inputs
yield assert_not_equal, flirted.inputs, flirt_est.inputs
yield assert_equal, flirter.inputs.bins, flirted.inputs.bins
yield assert_equal, flirter.inputs.cost, flirt_est.inputs.cost
realcmd = 'flirt -in %s -ref %s -out outfile -omat outmat.mat ' \
'-bins 256 -cost mutualinfo' % (infile, reffile)
yield assert_equal, flirted.cmdline, realcmd
flirter = fsl.FLIRT()
# infile not specified
yield assert_raises, ValueError, flirter.run
flirter.inputs.in_file = infile
# reference not specified
yield assert_raises, ValueError, flirter.run
flirter.inputs.reference = reffile
# Generate outfile and outmatrix
pth, fname, ext = split_filename(infile)
outfile = fsl_name(flirter, '%s_flirt' % fname)
outmat = '%s_flirt.mat' % fname
realcmd = 'flirt -in %s -ref %s -out %s -omat %s' % (infile, reffile,
outfile, outmat)
yield assert_equal, flirter.cmdline, realcmd
_, tmpfile = tempfile.mkstemp(suffix='.nii', dir=tmpdir)
# Loop over all inputs, set a reasonable value and make sure the
# cmdline is updated correctly.
for key, trait_spec in sorted(fsl.FLIRT.input_spec().traits().items()):
# Skip mandatory inputs and the trait methods
if key in ('trait_added', 'trait_modified', 'in_file', 'reference',
'environ', 'output_type', 'out_file', 'out_matrix_file',
'in_matrix_file', 'apply_xfm', 'ignore_exception',
'terminal_output', 'out_log', 'save_log'):
continue
param = None
value = None
if key == 'args':
param = '-v'
value = '-v'
elif isinstance(trait_spec.trait_type, File):
value = tmpfile
param = trait_spec.argstr % value
elif trait_spec.default is False:
param = trait_spec.argstr
value = True
elif key in ('searchr_x', 'searchr_y', 'searchr_z'):
value = [-45, 45]
param = trait_spec.argstr % ' '.join(str(elt) for elt in value)
else:
value = trait_spec.default
param = trait_spec.argstr % value
cmdline = 'flirt -in %s -ref %s' % (infile, reffile)
# Handle autogeneration of outfile
pth, fname, ext = split_filename(infile)
outfile = fsl_name(fsl.FLIRT(), '%s_flirt' % fname)
outfile = ' '.join(['-out', outfile])
# Handle autogeneration of outmatrix
outmatrix = '%s_flirt.mat' % fname
outmatrix = ' '.join(['-omat', outmatrix])
# Build command line
cmdline = ' '.join([cmdline, outfile, outmatrix, param])
flirter = fsl.FLIRT(in_file=infile, reference=reffile)
setattr(flirter.inputs, key, value)
yield assert_equal, flirter.cmdline, cmdline
# Test OutputSpec
flirter = fsl.FLIRT(in_file=infile, reference=reffile)
pth, fname, ext = split_filename(infile)
flirter.inputs.out_file = ''.join(['foo', ext])
flirter.inputs.out_matrix_file = ''.join(['bar', ext])
outs = flirter._list_outputs()
yield assert_equal, outs['out_file'], \
os.path.join(os.getcwd(), flirter.inputs.out_file)
yield assert_equal, outs['out_matrix_file'], \
os.path.join(os.getcwd(), flirter.inputs.out_matrix_file)
teardown_flirt(tmpdir)
# Mcflirt
@skipif(no_fsl)
def test_mcflirt():
tmpdir, infile, reffile = setup_flirt()
frt = fsl.MCFLIRT()
yield assert_equal, frt.cmd, 'mcflirt'
# Test generated outfile name
frt.inputs.in_file = infile
_, nme = os.path.split(infile)
outfile = os.path.join(os.getcwd(), nme)
outfile = frt._gen_fname(outfile, suffix='_mcf')
realcmd = 'mcflirt -in ' + infile + ' -out ' + outfile
yield assert_equal, frt.cmdline, realcmd
# Test specified outfile name
outfile2 = '/newdata/bar.nii'
frt.inputs.out_file = outfile2
realcmd = 'mcflirt -in ' + infile + ' -out ' + outfile2
yield assert_equal, frt.cmdline, realcmd
opt_map = {
'cost': ('-cost mutualinfo', 'mutualinfo'),
'bins': ('-bins 256', 256),
'dof': ('-dof 6', 6),
'ref_vol': ('-refvol 2', 2),
'scaling': ('-scaling 6.00', 6.00),
'smooth': ('-smooth 1.00', 1.00),
'rotation': ('-rotation 2', 2),
'stages': ('-stages 3', 3),
'init': ('-init %s' % (infile), infile),
'use_gradient': ('-gdt', True),
'use_contour': ('-edge', True),
'mean_vol': ('-meanvol', True),
'stats_imgs': ('-stats', True),
'save_mats': ('-mats', True),
'save_plots': ('-plots', True),
}
for name, settings in list(opt_map.items()):
fnt = fsl.MCFLIRT(in_file=infile, **{name: settings[1]})
instr = '-in %s' % (infile)
outstr = '-out %s' % (outfile)
if name in ('init', 'cost', 'dof', 'mean_vol', 'bins'):
yield assert_equal, fnt.cmdline, ' '.join([fnt.cmd,
instr,
settings[0],
outstr])
else:
yield assert_equal, fnt.cmdline, ' '.join([fnt.cmd,
instr,
outstr,
settings[0]])
# Test error is raised when missing required args
fnt = fsl.MCFLIRT()
yield assert_raises, ValueError, fnt.run
teardown_flirt(tmpdir)
# test fnirt
@skipif(no_fsl)
def test_fnirt():
tmpdir, infile, reffile = setup_flirt()
fnirt = fsl.FNIRT()
yield assert_equal, fnirt.cmd, 'fnirt'
# Test list parameters
params = [('subsampling_scheme', '--subsamp', [4, 2, 2, 1], '4,2,2,1'),
('max_nonlin_iter', '--miter', [4, 4, 4, 2], '4,4,4,2'),
('ref_fwhm', '--reffwhm', [4, 2, 2, 0], '4,2,2,0'),
('in_fwhm', '--infwhm', [4, 2, 2, 0], '4,2,2,0'),
('apply_refmask', '--applyrefmask', [0, 0, 1, 1], '0,0,1,1'),
('apply_inmask', '--applyinmask', [0, 0, 0, 1], '0,0,0,1'),
('regularization_lambda', '--lambda', [0.5, 0.75], '0.5,0.75')]
for item, flag, val, strval in params:
fnirt = fsl.FNIRT(in_file=infile,
ref_file=reffile,
**{item: val})
log = fnirt._gen_fname(infile, suffix='_log.txt', change_ext=False)
iout = fnirt._gen_fname(infile, suffix='_warped')
if item in ('max_nonlin_iter'):
cmd = 'fnirt --in=%s '\
'--logout=%s'\
' %s=%s --ref=%s'\
' --iout=%s' % (infile, log,
flag, strval, reffile, iout)
elif item in ('in_fwhm'):
cmd = 'fnirt --in=%s %s=%s --logout=%s '\
'--ref=%s --iout=%s' % (infile, flag,
strval, log, reffile, iout)
elif item.startswith('apply'):
cmd = 'fnirt %s=%s '\
'--in=%s '\
'--logout=%s '\
'--ref=%s --iout=%s' % (flag, strval,
infile, log,
reffile,
iout)
else:
cmd = 'fnirt '\
'--in=%s --logout=%s '\
'--ref=%s %s=%s --iout=%s' % (infile, log,
reffile,
flag, strval,
iout)
yield assert_equal, fnirt.cmdline, cmd
# Test ValueError is raised when missing mandatory args
fnirt = fsl.FNIRT()
yield assert_raises, ValueError, fnirt.run
fnirt.inputs.in_file = infile
fnirt.inputs.ref_file = reffile
# test files
opt_map = {
'affine_file': ('--aff='),
'inwarp_file': ('--inwarp='),
'in_intensitymap_file': ('--intin='),
'config_file': ('--config='),
'refmask_file': ('--refmask='),
'inmask_file': ('--inmask='),
'field_file': ('--fout='),
'jacobian_file': ('--jout='),
'modulatedref_file': ('--refout='),
'out_intensitymap_file': ('--intout='),
'log_file': ('--logout=')}
for name, settings in list(opt_map.items()):
fnirt = fsl.FNIRT(in_file=infile,
ref_file=reffile,
**{name: infile})
if name in ('config_file', 'affine_file', 'field_file'):
cmd = 'fnirt %s%s --in=%s '\
'--logout=%s '\
'--ref=%s --iout=%s' % (settings, infile, infile, log,
reffile, iout)
elif name in ('refmask_file'):
cmd = 'fnirt --in=%s '\
'--logout=%s --ref=%s '\
'%s%s '\
'--iout=%s' % (infile, log,
reffile,
settings, infile,
iout)
elif name in ('in_intensitymap_file', 'inwarp_file', 'inmask_file', 'jacobian_file'):
cmd = 'fnirt --in=%s '\
'%s%s '\
'--logout=%s --ref=%s '\
'--iout=%s' % (infile,
settings, infile,
log,
reffile,
iout)
elif name in ('log_file'):
cmd = 'fnirt --in=%s '\
'%s%s --ref=%s '\
'--iout=%s' % (infile,
settings, infile,
reffile,
iout)
else:
cmd = 'fnirt --in=%s '\
'--logout=%s %s%s '\
'--ref=%s --iout=%s' % (infile, log,
settings, infile,
reffile, iout)
yield assert_equal, fnirt.cmdline, cmd
teardown_flirt(tmpdir)
@skipif(no_fsl)
def test_applywarp():
tmpdir, infile, reffile = setup_flirt()
opt_map = {
'out_file': ('--out=bar.nii', 'bar.nii'),
'premat': ('--premat=%s' % (reffile), reffile),
'postmat': ('--postmat=%s' % (reffile), reffile),
}
# in_file, ref_file, field_file mandatory
for name, settings in list(opt_map.items()):
awarp = fsl.ApplyWarp(in_file=infile,
ref_file=reffile,
field_file=reffile,
**{name: settings[1]})
if name == 'out_file':
realcmd = 'applywarp --in=%s '\
'--ref=%s --out=%s '\
'--warp=%s' % (infile, reffile,
settings[1], reffile)
else:
outfile = awarp._gen_fname(infile, suffix='_warp')
realcmd = 'applywarp --in=%s '\
'--ref=%s --out=%s '\
'--warp=%s %s' % (infile, reffile,
outfile, reffile,
settings[0])
yield assert_equal, awarp.cmdline, realcmd
awarp = fsl.ApplyWarp(in_file=infile,
ref_file=reffile,
field_file=reffile)
teardown_flirt(tmpdir)
@skipif(no_fsl)
def setup_fugue():
import nibabel as nb
import numpy as np
import os.path as op
d = np.ones((80, 80, 80))
tmpdir = tempfile.mkdtemp()
infile = op.join(tmpdir, 'dumbfile.nii.gz')
nb.Nifti1Image(d, None, None).to_filename(infile)
return tmpdir, infile
def teardown_fugue(tmpdir):
shutil.rmtree(tmpdir)
@skipif(no_fsl)
def test_fugue():
import os.path as op
tmpdir, infile = setup_fugue()
fugue = fsl.FUGUE()
fugue.inputs.save_unmasked_fmap = True
fugue.inputs.fmap_in_file = infile
fugue.inputs.mask_file = infile
fugue.inputs.output_type = "NIFTI_GZ"
res = fugue.run()
if not isdefined(res.outputs.fmap_out_file):
yield False
else:
trait_spec = fugue.inputs.trait('fmap_out_file')
out_name = trait_spec.name_template % 'dumbfile'
out_name += '.nii.gz'
yield assert_equal, op.basename(res.outputs.fmap_out_file), out_name
fugue = fsl.FUGUE()
fugue.inputs.save_unmasked_shift = True
fugue.inputs.fmap_in_file = infile
fugue.inputs.dwell_time = 1.0e-3
fugue.inputs.mask_file = infile
fugue.inputs.output_type = "NIFTI_GZ"
res = fugue.run()
if not isdefined(res.outputs.shift_out_file):
yield False
else:
trait_spec = fugue.inputs.trait('shift_out_file')
out_name = trait_spec.name_template % 'dumbfile'
out_name += '.nii.gz'
yield assert_equal, op.basename(res.outputs.shift_out_file), \
out_name
fugue = fsl.FUGUE()
fugue.inputs.in_file = infile
fugue.inputs.mask_file = infile
# Previously computed with fugue as well
fugue.inputs.shift_in_file = infile
fugue.inputs.output_type = "NIFTI_GZ"
res = fugue.run()
if not isdefined(res.outputs.unwarped_file):
yield False
else:
trait_spec = fugue.inputs.trait('unwarped_file')
out_name = trait_spec.name_template % 'dumbfile'
out_name += '.nii.gz'
yield assert_equal, op.basename(res.outputs.unwarped_file), out_name
teardown_fugue(tmpdir)
@skipif(no_fsl)
def test_first_genfname():
first = fsl.FIRST()
first.inputs.out_file = 'segment.nii'
first.inputs.output_type = "NIFTI_GZ"
value = first._gen_fname(name='original_segmentations')
expected_value = os.path.abspath('segment_all_fast_origsegs.nii.gz')
yield assert_equal, value, expected_value
first.inputs.method = 'none'
value = first._gen_fname(name='original_segmentations')
expected_value = os.path.abspath('segment_all_none_origsegs.nii.gz')
yield assert_equal, value, expected_value
first.inputs.method = 'auto'
first.inputs.list_of_specific_structures = ['L_Hipp', 'R_Hipp']
value = first._gen_fname(name='original_segmentations')
expected_value = os.path.abspath('segment_all_none_origsegs.nii.gz')
yield assert_equal, value, expected_value
@skipif(no_fsl)
def test_deprecation():
interface = fsl.ApplyXfm()
yield assert_true, isinstance(interface, fsl.ApplyXFM)
| bsd-3-clause | -4,196,893,640,865,034,000 | 35.826087 | 93 | 0.526389 | false |
jim-easterbrook/pyctools | src/pyctools/components/io/imagesequencereader.py | 1 | 5381 | # Pyctools - a picture processing algorithm development kit.
# http://github.com/jim-easterbrook/pyctools
# Copyright (C) 2019-20 Pyctools contributors
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
from __future__ import print_function
__all__ = ['ImageSequenceReader']
__docformat__ = 'restructuredtext en'
import os
try:
import cv2
except ImportError:
cv2 = None
import numpy
try:
import PIL.Image as PIL
except ImportError:
PIL = None
from pyctools.core.config import ConfigEnum, ConfigPath
from pyctools.core.base import Component
from pyctools.core.frame import Metadata
from pyctools.core.types import pt_float
class ImageSequenceReader(Component):
"""Read a set of image files (JPEG, PNG, TIFF, etc).
The ``firstfile`` and ``lastfile`` strings must be identical apart
from a decimal number that signifies the position in the sequence.
This number can be anywhere in the filename and need not have
leading zeros.
============= ==== ====
Config
============= ==== ====
``firstfile`` str Path name of first file in the sequence.
``lastfile`` str Path name of last file in the sequence.
``looping`` str Whether to play continuously. Can be ``'off'`` or ``'repeat'``.
============= ==== ====
"""
inputs = []
def initialise(self):
self.metadata = None
self.config['firstfile'] = ConfigPath()
self.config['lastfile'] = ConfigPath()
self.config['looping'] = ConfigEnum(choices=('off', 'repeat'))
def process_frame(self):
self.update_config()
if self.metadata is None:
first_name = self.config['firstfile']
last_name = self.config['lastfile']
# read metadata
self.metadata = Metadata().from_file(first_name)
self.metadata.set_audit(
self, 'data = {}..{}\n'.format(
os.path.basename(first_name), os.path.basename(last_name)),
with_config=self.config)
# compare file names
prefix = ''
for a, b in zip(first_name, last_name):
if a != b:
break
prefix += a
suffix = ''
for a, b in zip(first_name[::-1], last_name[::-1]):
if a != b:
break
suffix = a + suffix
first_frame = first_name[len(prefix):-len(suffix)]
last_frame = last_name[len(prefix):-len(suffix)]
self.format_ = prefix + '{:0' + str(len(first_frame)) + 'd}' + suffix
self.first_frame = int(first_frame)
self.last_frame = int(last_frame)
# initialise looping parameters
self.frame_no = 0
self.frame_idx = self.first_frame
# get path of this frame
if self.frame_idx > self.last_frame:
if self.config['looping'] == 'off':
raise StopIteration()
self.frame_idx = self.first_frame
path = self.format_.format(self.frame_idx)
self.frame_idx += 1
# read data
image = None
if cv2:
# try OpenCV first, as it can do 16 bit
image = cv2.imread(path, cv2.IMREAD_UNCHANGED)
# scale data
if image.dtype == numpy.uint8:
pass
elif image.dtype == numpy.uint16:
image = image.astype(numpy.float32) / numpy.float32(2 ** 8)
else:
self.logger.error('Cannot handle %s data type', str(image.dtype))
raise StopIteration()
# rearrange components
if image.shape[2] == 4:
# RGBA image
B, G, R, A = numpy.dsplit(image, 4)
image = numpy.dstack((R, G, B, A))
frame_type = 'RGBA'
elif image.shape[2] == 3:
# RGB image
B, G, R = numpy.dsplit(image, 3)
image = numpy.dstack((R, G, B))
frame_type = 'RGB'
elif image.shape[2] == 1:
frame_type = 'Y'
else:
frame_type = '???'
if PIL and image is None:
# try PIL as it handles more image types
image = PIL.open(path)
image.load()
frame_type = image.mode
if image is None:
self.logger.error('Cannot read file %s', path)
raise StopIteration()
# send frame
out_frame = self.outframe_pool['output'].get()
out_frame.data = image
out_frame.type = frame_type
out_frame.frame_no = self.frame_no
self.frame_no += 1
out_frame.metadata.copy(self.metadata)
self.send('output', out_frame)
| gpl-3.0 | 8,025,705,260,541,741,000 | 35.358108 | 88 | 0.560676 | false |
XueqingLin/tensorflow | tensorflow/tensorboard/backend/server_test.py | 1 | 18758 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration tests for TensorBoard.
These tests start up a full-fledged TensorBoard server.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import gzip
import json
import numbers
import os
import shutil
import threading
import zlib
from six import BytesIO
from six.moves import http_client
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.contrib.tensorboard.plugins.projector.projector_config_pb2 import ProjectorConfig
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.platform import resource_loader
from tensorflow.python.summary import event_multiplexer
from tensorflow.tensorboard.backend import server
from tensorflow.tensorboard.plugins import REGISTERED_PLUGINS
class TensorboardServerTest(tf.test.TestCase):
_only_use_meta_graph = False # Server data contains only a GraphDef
# Number of scalar-containing events to make.
_SCALAR_COUNT = 99
def setUp(self):
self._GenerateTestData()
self._multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=server.TENSORBOARD_SIZE_GUIDANCE)
server.ReloadMultiplexer(self._multiplexer, {self.get_temp_dir(): None})
# 0 to pick an unused port.
self._server = server.BuildServer(
self._multiplexer, 'localhost', 0, '/foo/logdir/argument')
self._server_thread = threading.Thread(target=self._server.serve_forever)
self._server_thread.daemon = True
self._server_thread.start()
self._connection = http_client.HTTPConnection(
'localhost', self._server.server_address[1])
def tearDown(self):
self._connection.close()
self._server.shutdown()
self._server.server_close()
def _get(self, path, headers={}):
"""Perform a GET request for the given path."""
self._connection.request('GET', path, None, headers)
return self._connection.getresponse()
def _getJson(self, path):
"""Perform a GET request and decode the result as JSON."""
self._connection.request('GET', path)
response = self._connection.getresponse()
self.assertEqual(response.status, 200)
data = response.read()
if response.getheader('Content-Encoding') == 'gzip':
data = gzip.GzipFile('', 'rb', 9, BytesIO(data)).read()
return json.loads(data.decode('utf-8'))
def testBasicStartup(self):
"""Start the server up and then shut it down immediately."""
pass
def testRequestMainPage(self):
"""Navigate to the main page and verify that it returns a 200."""
response = self._get('/')
self.assertEqual(response.status, 200)
def testRequestNonexistentPage(self):
"""Request a page that doesn't exist; it should 404."""
response = self._get('/asdf')
self.assertEqual(response.status, 404)
def testDirectoryTraversal(self):
"""Attempt a directory traversal attack."""
response = self._get('/..' * 30 + '/etc/passwd')
self.assertEqual(response.status, 400)
def testLogdir(self):
"""Test the format of the data/logdir endpoint."""
parsed_object = self._getJson('/data/logdir')
self.assertEqual(parsed_object, {'logdir': '/foo/logdir/argument'})
def testRuns(self):
"""Test the format of the /data/runs endpoint."""
run_json = self._getJson('/data/runs')
# Don't check the actual timestamp since it's time-dependent.
self.assertTrue(isinstance(run_json['run1']['firstEventTimestamp'],
numbers.Number))
del run_json['run1']['firstEventTimestamp']
self.assertEqual(run_json, {'run1': {
'compressedHistograms': ['histogram'],
'scalars': ['simple_values'],
'histograms': ['histogram'],
'images': ['image'],
'audio': ['audio'],
# if only_use_meta_graph, the graph is extracted from the metagraph
'graph': True,
'meta_graph': self._only_use_meta_graph,
'run_metadata': ['test run']}})
def testApplicationPaths_getCached(self):
"""Test the format of the /data/runs endpoint."""
for path in ('/',): # TODO(jart): '/app.js' in open source
connection = http_client.HTTPConnection(
'localhost', self._server.server_address[1])
connection.request('GET', path)
response = connection.getresponse()
self.assertEqual(response.status, 200, msg=path)
self.assertEqual(response.getheader('Cache-Control'),
'private, max-age=3600', msg=path)
connection.close()
def testDataPaths_disableAllCaching(self):
"""Test the format of the /data/runs endpoint."""
for path in ('/data/runs',
'/data/logdir',
'/data/scalars?run=run1&tag=simple_values',
'/data/scalars?run=run1&tag=simple_values&format=csv',
'/data/images?run=run1&tag=image',
'/data/individualImage?run=run1&tag=image&index=0',
'/data/audio?run=run1&tag=audio',
'/data/run_metadata?run=run1&tag=test%20run'):
connection = http_client.HTTPConnection(
'localhost', self._server.server_address[1])
connection.request('GET', path)
response = connection.getresponse()
self.assertEqual(response.status, 200, msg=path)
self.assertEqual(response.getheader('Expires'), '0', msg=path)
response.read()
connection.close()
def testHistograms(self):
"""Test the format of /data/histograms."""
self.assertEqual(
self._getJson('/data/histograms?tag=histogram&run=run1'),
[[0, 0, [0, 2.0, 3.0, 6.0, 5.0, [0.0, 1.0, 2.0], [1.0, 1.0, 1.0]]]])
def testSampleScalars(self):
"""Test the sample_count parameter of /data/scalars."""
for i in xrange(10, self._SCALAR_COUNT, 10):
samples = self._getJson('/data/scalars?sample_count=%d' % i)
values = samples['run1']['simple_values']
# Verify that we got the right amount of values and that we got the
# endpoints.
self.assertEqual(len(values), i)
self.assertEqual(values[0], [100, 10, 1])
self.assertEqual(values[-1], [9900, 990, 99])
def testSampleScalarsWithLargeSampleCount(self):
"""Test using a large sample_count."""
samples = self._getJson('/data/scalars?sample_count=999999')
values = samples['run1']['simple_values']
self.assertEqual(len(values), self._SCALAR_COUNT)
def testImages(self):
"""Test listing images and retrieving an individual image."""
image_json = self._getJson('/data/images?tag=image&run=run1')
image_query = image_json[0]['query']
# We don't care about the format of the image query.
del image_json[0]['query']
self.assertEqual(image_json, [{
'wall_time': 0,
'step': 0,
'height': 1,
'width': 1
}])
response = self._get('/data/individualImage?%s' % image_query)
self.assertEqual(response.status, 200)
def testAudio(self):
"""Test listing audio and retrieving an individual audio clip."""
audio_json = self._getJson('/data/audio?tag=audio&run=run1')
audio_query = audio_json[0]['query']
# We don't care about the format of the audio query.
del audio_json[0]['query']
self.assertEqual(audio_json, [{
'wall_time': 0,
'step': 0,
'content_type': 'audio/wav'
}])
response = self._get('/data/individualAudio?%s' % audio_query)
self.assertEqual(response.status, 200)
def testGraph(self):
"""Test retrieving the graph definition."""
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs')
self.assertEqual(response.status, 200)
graph_pbtxt = response.read()
# Parse the graph from pbtxt into a graph message.
graph = tf.GraphDef()
graph = text_format.Parse(graph_pbtxt, graph)
self.assertEqual(len(graph.node), 2)
self.assertEqual(graph.node[0].name, 'a')
self.assertEqual(graph.node[1].name, 'b')
# Make sure the second node has an attribute that was filtered out because
# it was too large and was added to the "too large" attributes list.
self.assertEqual(list(graph.node[1].attr.keys()), ['_very_large_attrs'])
self.assertEqual(graph.node[1].attr['_very_large_attrs'].list.s,
[b'very_large_attr'])
def testProjectorRunsWithEmbeddings(self):
"""Test the format of /runs endpoint in projector."""
if 'projector' not in REGISTERED_PLUGINS:
return
run_json = self._getJson('/data/plugin/projector/runs')
self.assertEqual(run_json, ['run1'])
def testProjectorInfo(self):
"""Test the format of /info endpoint in projector."""
if 'projector' not in REGISTERED_PLUGINS:
return
info_json = self._getJson('/data/plugin/projector/info?run=run1')
self.assertEqual(info_json['tensors'], {
'var1': {
'shape': [1, 2],
'name': 'var1',
'metadataFile': None,
'bookmarksFile': None,
},
'var2': {
'shape': [10, 10],
'name': 'var2',
'metadataFile': None,
'bookmarksFile': None,
},
'var3': {
'shape': [100, 100],
'name': 'var3',
'metadataFile': None,
'bookmarksFile': None,
}
})
def testProjectorTensor(self):
"""Test the format of /tensor endpoint in projector."""
if 'projector' not in REGISTERED_PLUGINS:
return
tensor_tsv = (self._get('/data/plugin/projector/tensor?run=run1&name=var1')
.read())
self.assertEqual(tensor_tsv, b'6.0\t6.0')
def testAcceptGzip_compressesResponse(self):
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs',
{'Accept-Encoding': 'gzip'})
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader('Content-Encoding'), 'gzip')
pbtxt = gzip.GzipFile('', 'rb', 9, BytesIO(response.read())).read()
graph = text_format.Parse(pbtxt, tf.GraphDef())
self.assertEqual(len(graph.node), 2)
def testAcceptAnyEncoding_compressesResponse(self):
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs',
{'Accept-Encoding': '*'})
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader('Content-Encoding'), 'gzip')
pbtxt = gzip.GzipFile('', 'rb', 9, BytesIO(response.read())).read()
graph = text_format.Parse(pbtxt, tf.GraphDef())
self.assertEqual(len(graph.node), 2)
def testAcceptDoodleEncoding_doesNotCompressResponse(self):
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs',
{'Accept-Encoding': 'doodle'})
self.assertEqual(response.status, 200)
self.assertIsNone(response.getheader('Content-Encoding'))
graph = text_format.Parse(response.read(), tf.GraphDef())
self.assertEqual(len(graph.node), 2)
def testAcceptGzip_doesNotCompressImage(self):
response = self._get('/data/individualImage?run=run1&tag=image&index=0',
{'Accept-Encoding': 'gzip'})
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader('Content-Encoding'), None)
def testRunMetadata(self):
"""Test retrieving the run metadata information."""
response = self._get('/data/run_metadata?run=run1&tag=test%20run')
self.assertEqual(response.status, 200)
run_metadata_pbtxt = response.read()
# Parse from pbtxt into a message.
run_metadata = tf.RunMetadata()
text_format.Parse(run_metadata_pbtxt, run_metadata)
self.assertEqual(len(run_metadata.step_stats.dev_stats), 1)
self.assertEqual(run_metadata.step_stats.dev_stats[0].device, 'test device')
def _GenerateTestData(self):
"""Generates the test data directory.
The test data has a single run named run1 which contains:
- a histogram
- an image at timestamp and step 0
- scalar events containing the value i at step 10 * i and wall time
100 * i, for i in [1, _SCALAR_COUNT).
- a graph definition
"""
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
run1_path = os.path.join(temp_dir, 'run1')
os.makedirs(run1_path)
writer = tf.train.SummaryWriter(run1_path)
histogram_value = tf.HistogramProto(min=0,
max=2,
num=3,
sum=6,
sum_squares=5,
bucket_limit=[0, 1, 2],
bucket=[1, 1, 1])
# Add a simple graph event.
graph_def = tf.GraphDef()
node1 = graph_def.node.add()
node1.name = 'a'
node2 = graph_def.node.add()
node2.name = 'b'
node2.attr['very_large_attr'].s = b'a' * 2048 # 2 KB attribute
meta_graph_def = meta_graph_pb2.MetaGraphDef(graph_def=graph_def)
if self._only_use_meta_graph:
writer.add_meta_graph(meta_graph_def)
else:
writer.add_graph(graph_def)
# Add a simple run metadata event.
run_metadata = tf.RunMetadata()
device_stats = run_metadata.step_stats.dev_stats.add()
device_stats.device = 'test device'
writer.add_run_metadata(run_metadata, 'test run')
# 1x1 transparent GIF.
encoded_image = base64.b64decode(
'R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7')
image_value = tf.Summary.Image(height=1,
width=1,
colorspace=1,
encoded_image_string=encoded_image)
audio_value = tf.Summary.Audio(sample_rate=44100,
length_frames=22050,
num_channels=2,
encoded_audio_string=b'',
content_type='audio/wav')
writer.add_event(tf.Event(wall_time=0,
step=0,
summary=tf.Summary(value=[
tf.Summary.Value(tag='histogram',
histo=histogram_value),
tf.Summary.Value(tag='image',
image=image_value),
tf.Summary.Value(tag='audio',
audio=audio_value)
])))
# Write 100 simple values.
for i in xrange(1, self._SCALAR_COUNT + 1):
writer.add_event(tf.Event(
# We use different values for wall time, step, and the value so we can
# tell them apart.
wall_time=100 * i,
step=10 * i,
summary=tf.Summary(value=[tf.Summary.Value(tag='simple_values',
simple_value=i)])))
writer.flush()
writer.close()
if 'projector' in REGISTERED_PLUGINS:
self._GenerateProjectorTestData(run1_path)
def _GenerateProjectorTestData(self, run_path):
# Write a projector config file in run1.
config_path = os.path.join(run_path, 'projector_config.pbtxt')
config = ProjectorConfig()
config_pbtxt = text_format.MessageToString(config)
with tf.gfile.GFile(config_path, 'w') as f:
f.write(config_pbtxt)
# Write a checkpoint with some dummy variables.
with tf.Graph().as_default():
sess = tf.Session()
checkpoint_path = os.path.join(run_path, 'model')
tf.get_variable(
'var1', [1, 2], initializer=tf.constant_initializer(6.0))
tf.get_variable('var2', [10, 10])
tf.get_variable('var3', [100, 100])
sess.run(tf.initialize_all_variables())
saver = tf.train.Saver()
saver.save(sess, checkpoint_path)
class TensorboardServerUsingMetagraphOnlyTest(TensorboardServerTest):
# Tests new ability to use only the MetaGraphDef
_only_use_meta_graph = True # Server data contains only a MetaGraphDef
class ParseEventFilesSpecTest(tf.test.TestCase):
def testRunName(self):
logdir_string = 'lol:/cat'
expected = {'/cat': 'lol'}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testPathWithColonThatComesAfterASlash_isNotConsideredARunName(self):
logdir_string = '/lol:/cat'
expected = {'/lol:/cat': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testMultipleDirectories(self):
logdir_string = '/a,/b'
expected = {'/a': None, '/b': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testNormalizesPaths(self):
logdir_string = '/lol/.//cat/../cat'
expected = {'/lol/cat': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testAbsolutifies(self):
logdir_string = 'lol/cat'
expected = {os.path.realpath('lol/cat'): None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testRespectsGCSPath(self):
logdir_string = 'gs://foo/path'
expected = {'gs://foo/path': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testDoesNotExpandUserInGCSPath(self):
logdir_string = 'gs://~/foo/path'
expected = {'gs://~/foo/path': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testDoesNotNormalizeGCSPath(self):
logdir_string = 'gs://foo/./path//..'
expected = {'gs://foo/./path//..': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
class TensorBoardAssetsTest(tf.test.TestCase):
def testTagFound(self):
tag = resource_loader.load_resource('tensorboard/TAG')
self.assertTrue(tag)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | -5,388,877,923,216,814,000 | 37.917012 | 97 | 0.625973 | false |
radinformatics/whatisit | whatisit/apps/api/serializers.py | 1 | 1125 | from django.contrib.auth.models import User
from whatisit.apps.wordfish.models import (
Annotation,
AllowedAnnotation,
Report,
ReportCollection,
ReportSet
)
from rest_framework import serializers
class ReportSerializer(serializers.ModelSerializer):
class Meta:
model = Report
fields = ('report_id', 'report_text')
#class SingleReportSerializer(serializers.ModelSerializer):
# class Meta:
# model = Report
# fields = ('id','report_text')
class ReportCollectionSerializer(serializers.ModelSerializer):
class Meta:
model = ReportCollection
fields = ('name',)
class ReportSetSerializer(serializers.ModelSerializer):
reports = serializers.PrimaryKeyRelatedField(many=True, queryset=Report.objects.all())
class Meta:
model = ReportSet
fields = ('name','reports')
#class UserSerializer(serializers.ModelSerializer):
# collections = serializers.PrimaryKeyRelatedField(many=True, queryset=ReportCollection.objects.all())#
# class Meta:
# model = User
# fields = ('id', 'username', 'collections')
| mit | 860,719,285,074,145,900 | 25.162791 | 106 | 0.695111 | false |
akvo/akvo-rsr | akvo/rsr/management/commands/a4a_optimy_import.py | 1 | 11690 | #!/usr/bin/env python3
# Akvo Reporting is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
""" Import projects from Optimy for Aqua for All
Usage:
python manage.py a4a_optimy_import [--project-id <optimy_project_id>]
"""
from itertools import groupby
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
import requests
import textwrap
from akvo.rsr.iso3166 import ISO_3166_COUNTRIES
from akvo.rsr.models import (
BudgetItem,
BudgetItemLabel,
Organisation,
Partnership,
Project,
ProjectCustomField,
ProjectLocation,
)
from akvo.utils import custom_get_or_create_country
BASE_URL = "https://api.optimytool.com/v1.3"
USERNAME = settings.OPTIMY_USERNAME
PASSWORD = settings.OPTIMY_PASSWORD
COUNTRY_NAME_TO_ISO_MAP = {name: code for code, name in ISO_3166_COUNTRIES}
MASTER_PROGRAM_ID = 9062
PROGRAM_IDS = {"VIA Water": 9222, "SCALE": 9224, "Response Facility": 9469}
OPTIMY_FORM_IDS = {
"making-water-count": "68d4a00a-416d-5ce1-9c12-2d6d1dc1a047",
"response-facility": "6e962295-06c9-5de1-a39e-9cd2272b1837",
}
FORM_QUESTION_MAPPING = {
# Making Water Count
"68d4a00a-416d-5ce1-9c12-2d6d1dc1a047": {
"title": "9900586f-3c4b-5e3e-a9e6-a209eb8cb8e3",
# FIXME: subtitle?
"cofinancing-budget": "6c05de7b-4031-5809-a692-a45beadf7cec",
"a4a-budget": "b0268b0c-d7e9-513a-bb27-1de7c0ec593a",
"total-budget": "322932f0-e294-5621-a37b-fd57fec9937a",
"start-date": "b785b97e-64f7-5149-a07b-7216497aa39f",
"end-date": "d3c4132c-1e55-5177-943e-3afa25b092ab",
"project-number": "683c31bc-d1d3-57f2-bf57-2e4c54894181",
"country": "913bec17-7f11-540a-8cb5-c5803e32a98b",
"summary": "02f1316c-4d5c-5989-8183-e392a634d23e",
"program": "09c477bb-d887-5862-9b12-ea5ab566b363",
"grantee": "51550c5f-a019-561d-80ca-50ed38a2bfce"
},
# Response Facility
"6e962295-06c9-5de1-a39e-9cd2272b1837": {
"title": "ed814396-7e42-5a72-a1fb-c478947c499b",
# FIXME: subtitle?
"cofinancing-budget": "ad2b9e11-6ac7-57b2-a20d-d13259f72484",
"a4a-budget": "fac61f74-8d27-5128-9afb-a34283c39e75",
"total-budget": "0b99fc04-bf13-55c2-805a-fec273774a26",
"start-date": "e13cf4d6-d4be-56a3-9228-9c12263ead07",
"end-date": "d6b82834-24e7-5a1b-ab7e-369c745c302a",
"project-number": "fa543aa4-6cf7-53f8-a071-f775d8f89711",
"country": "cdc40519-f33c-5b29-b668-84ff60823ad7",
"summary": "4cff3960-6f4c-5a7f-a681-1dd8382d15e3",
"grantee": "60dfcace-9344-5ddf-89ef-2076f96ec07f"
},
}
CONTRACT_STATUSES = {
"68d4a00a-416d-5ce1-9c12-2d6d1dc1a047": "d30a945f-e524-53fe-8b2f-0c65b27be1ea",
"6e962295-06c9-5de1-a39e-9cd2272b1837": "2df6666f-d73b-5b57-9f66-51150dc9d6c9",
}
A4A = Organisation.objects.get(name="Aqua for All")
DEFAULT_PROJECT_INFO = {
"default_aid_type": "B01",
"default_flow_type": "10",
"default_tied_status": "3",
"default_finance_type": "110",
}
def programs_exist():
program = Project.objects.filter(id=MASTER_PROGRAM_ID).first()
if program is not None:
sub_programs = set(program.descendants(depth=1).values_list("pk", flat=True))
program_ids = set(PROGRAM_IDS.values())
return (sub_programs & program_ids) == program_ids
return False
def get_projects(contracts_only=True):
response = requests.get(f"{BASE_URL}/projects", auth=(USERNAME, PASSWORD))
content = response.json()
projects = content["data"]
if contracts_only:
projects = [
project
for project in projects
if project["status_id"] == CONTRACT_STATUSES[project["form_id"]]
]
return projects
def get_submission_versions(project_id):
response = requests.get(
f"{BASE_URL}/projects/{project_id}/versions", auth=(USERNAME, PASSWORD)
)
data = response.json()["data"]
versions = [
list(versions)[-1]["version_id"]
for form_part_id, versions in groupby(data, key=lambda x: x["form_part_id"])
]
return versions
def get_project_answers(project_id):
version_ids = get_submission_versions(project_id)
answers = []
for version_id in version_ids:
print(f"Fetching answers for {project_id} - {version_id}...")
response = requests.get(
f"{BASE_URL}/projects/{project_id}/versions/{version_id}/answers",
auth=(USERNAME, PASSWORD),
)
data = response.json()["data"]
answers.extend(data)
return {ans["question_id"]: ans for ans in answers}
def get_answer(form_id, answers, key, ans_key="value"):
answer = answers.get(FORM_QUESTION_MAPPING[form_id][key], {}).get(ans_key)
if not answer:
print(f"Could not find answer for {key}")
return answer
def create_project(project, answers):
project_id = project["id"]
form_id = project["form_id"]
if form_id == OPTIMY_FORM_IDS["response-facility"]:
lead_project_id = PROGRAM_IDS["Response Facility"]
else:
program_name = get_answer(form_id, answers, "program", ans_key="answer_name")
lead_project_id = PROGRAM_IDS.get(program_name)
if lead_project_id is None:
print(f"Skipping {project_id} since it has no associated program")
return None
optimy_project_id_field = "Optimy Project ID"
custom_field = ProjectCustomField.objects.filter(
name=optimy_project_id_field, value=project_id
).first()
title = get_answer(form_id, answers, "title")[:200]
project_created = False
if custom_field is not None:
project = custom_field.project
else:
project = Project.objects.create(title=title)
project_created = True
ProjectCustomField.objects.get_or_create(
project=project,
name="Optimy Project ID",
defaults=dict(value=project_id, section="1", order="1"),
)
program = Project.objects.get(pk=lead_project_id)
project.add_to_program(program)
# Add Aqua for All as financing partner
Partnership.objects.get_or_create(
project=project,
organisation=A4A,
iati_organisation_role=Partnership.IATI_FUNDING_PARTNER,
)
# Add implementing partner
grantee = get_answer(form_id, answers, "grantee")
if grantee and project_created:
grantee_org = Organisation.objects.filter(Q(name=grantee) | Q(long_name=grantee)).first()
if not grantee_org:
grantee_org = Organisation.objects.create(
name=textwrap.wrap(grantee, 40)[0],
long_name=grantee
)
Partnership.objects.get_or_create(
project=project,
organisation=grantee_org,
iati_organisation_role=Partnership.IATI_IMPLEMENTING_PARTNER,
)
# Add Aqua for All project Number
project_number_question = get_answer(
form_id, answers, "project-number", "question_name"
)
project_number_value = get_answer(form_id, answers, "project-number")
if project_number_value:
ProjectCustomField.objects.get_or_create(
project=project,
name=project_number_question,
defaults=dict(value=project_number_value, section="1", order="1"),
)
start_date = get_answer(form_id, answers, "start-date")
end_date = get_answer(form_id, answers, "end-date")
iati_id = f"{A4A.iati_org_id}-{project.pk}"
# Update project attributes
data = dict(
title=title,
date_start_planned=start_date,
date_end_planned=end_date,
is_public=False,
project_plan_summary=get_answer(form_id, answers, "summary"),
iati_status="2", # Implementation status
iati_activity_id=iati_id,
)
# NOTE: Don't update Title, description and is_public for existing projects
if not project_created:
data.pop('title')
data.pop('project_plan_summary')
data.pop('is_public')
data.update(DEFAULT_PROJECT_INFO)
for key, value in data.items():
if value is not None:
setattr(project, key, value)
project.save(update_fields=data.keys())
# Create budget objects
BudgetItem.objects.filter(project=project).delete()
# Co-financing budget
other = BudgetItemLabel.objects.get(label="Other")
budget = get_answer(form_id, answers, "cofinancing-budget")
extra = get_answer(form_id, answers, "cofinancing-budget", "answer_name")
if budget:
if extra:
extra = " ".join(extra.split()[1:-1]).title()
BudgetItem.objects.create(
project=project,
label=other,
amount=budget,
other_extra=extra,
value_date=start_date,
period_start=start_date,
period_end=end_date,
)
# A4A budget
budget = get_answer(form_id, answers, "a4a-budget")
extra = get_answer(form_id, answers, "a4a-budget", "answer_name")
if budget:
if extra:
extra = " ".join(extra.split()[1:-1]).title()
BudgetItem.objects.create(
project=project,
label=other,
amount=budget,
other_extra=extra,
value_date=start_date,
period_start=start_date,
period_end=end_date,
)
# Create location objects
if project_created:
project.primary_location = None
if form_id == OPTIMY_FORM_IDS["response-facility"]:
iso_code = get_answer(form_id, answers, "country").lower()
else:
name = get_answer(form_id, answers, "country", ans_key="answer_name")
iso_code = COUNTRY_NAME_TO_ISO_MAP.get(name)
if iso_code:
country = custom_get_or_create_country(iso_code)
ProjectLocation.objects.create(location_target=project, country=country)
else:
print(f"Could not find iso code for {name}")
# Publish the project
project.publish()
return project
def set_program_iati_ids():
for program_id in (MASTER_PROGRAM_ID,) + tuple(PROGRAM_IDS.values()):
program = Project.objects.get(id=program_id)
data = dict(iati_activity_id=f"{A4A.iati_org_id}-{program_id}")
data.update(DEFAULT_PROJECT_INFO)
for key, value in data.items():
setattr(program, key, value)
program.save(update_fields=data.keys())
class Command(BaseCommand):
help = "Import projects from Optimy for Aqua for All"
def add_arguments(self, parser):
parser.add_argument(
"--project-id", type=str, help="ID of the project to import"
)
def handle(self, *args, **options):
if not programs_exist():
raise CommandError("Not all programs are present in the DB")
project_id = options["project_id"]
if not project_id:
print("Fetching projects from Optimy")
projects = get_projects()
else:
projects = [dict(id=project_id)]
# Set program IDs
set_program_iati_ids()
print(f"Importing {len(projects)} Projects ...")
for project in projects:
project_id = project["id"]
answers = get_project_answers(project_id)
project = create_project(project, answers)
if project is not None:
print(f"Imported {project_id} as {project.id} - {project.title}")
| agpl-3.0 | 3,057,250,417,693,562,000 | 34.317221 | 97 | 0.635073 | false |
vsemionov/boomerang | api/token.py | 1 | 1630 | from collections import OrderedDict
from rest_framework import viewsets, mixins, permissions, response
from rest_framework.authtoken.models import Token
from rest_framework.settings import api_settings as rest_settings
from rest_framework_jwt.settings import api_settings as jwt_settings
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
class AuthViewSet(mixins.ListModelMixin,
viewsets.GenericViewSet):
view_name = None
permission_classes = (permissions.IsAuthenticated,)
def get_view_name(self):
return self.view_name
def create_token(self, user):
raise NotImplementedError()
def list(self, request, *args, **kwargs):
user = request.user
token = self.create_token(user)
token = OrderedDict((('username', user.username),
('token', token)))
return response.Response(token)
class TokenViewSet(AuthViewSet):
view_name = 'Token'
def create_token(self, user):
token, created = Token.objects.get_or_create(user=user)
return token.key
class JWTViewSet(AuthViewSet):
view_name = 'JWT'
authentication_classes = tuple(cls for cls in rest_settings.DEFAULT_AUTHENTICATION_CLASSES
if cls is not JSONWebTokenAuthentication)
jwt_payload_handler = staticmethod(jwt_settings.JWT_PAYLOAD_HANDLER)
jwt_encode_handler = staticmethod(jwt_settings.JWT_ENCODE_HANDLER)
def create_token(self, user):
payload = self.jwt_payload_handler(user)
token = self.jwt_encode_handler(payload)
return token
| mit | -8,011,112,225,322,380,000 | 30.960784 | 94 | 0.690184 | false |
shootstar/novatest | nova/cells/state.py | 1 | 14713 | # Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
CellState Manager
"""
import copy
import datetime
import functools
from oslo.config import cfg
from nova.cells import rpc_driver
from nova import context
from nova.db import base
from nova import exception
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import utils
cell_state_manager_opts = [
cfg.IntOpt('db_check_interval',
default=60,
help='Seconds between getting fresh cell info from db.'),
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.import_opt('reserve_percent', 'nova.cells.opts', group='cells')
CONF.import_opt('mute_child_interval', 'nova.cells.opts', group='cells')
#CONF.import_opt('capabilities', 'nova.cells.opts', group='cells')
CONF.register_opts(cell_state_manager_opts, group='cells')
class CellState(object):
"""Holds information for a particular cell."""
def __init__(self, cell_name, is_me=False):
self.name = cell_name
self.is_me = is_me
self.last_seen = datetime.datetime.min
self.capabilities = {}
self.capacities = {}
self.db_info = {}
# TODO(comstud): The DB will specify the driver to use to talk
# to this cell, but there's no column for this yet. The only
# available driver is the rpc driver.
self.driver = rpc_driver.CellsRPCDriver()
def update_db_info(self, cell_db_info):
"""Update cell credentials from db."""
self.db_info = dict(
[(k, v) for k, v in cell_db_info.iteritems()
if k != 'name'])
def update_capabilities(self, cell_metadata):
"""Update cell capabilities for a cell."""
self.last_seen = timeutils.utcnow()
self.capabilities = cell_metadata
def update_capacities(self, capacities):
"""Update capacity information for a cell."""
self.last_seen = timeutils.utcnow()
self.capacities = capacities
def get_cell_info(self):
"""Return subset of cell information for OS API use."""
db_fields_to_return = ['is_parent', 'weight_scale', 'weight_offset']
url_fields_to_return = {
'username': 'username',
'hostname': 'rpc_host',
'port': 'rpc_port',
}
cell_info = dict(name=self.name, capabilities=self.capabilities)
if self.db_info:
for field in db_fields_to_return:
cell_info[field] = self.db_info[field]
url_info = rpc_driver.parse_transport_url(
self.db_info['transport_url'])
for field, canonical in url_fields_to_return.items():
cell_info[canonical] = url_info[field]
return cell_info
def send_message(self, message):
"""Send a message to a cell. Just forward this to the driver,
passing ourselves and the message as arguments.
"""
self.driver.send_message_to_cell(self, message)
def __repr__(self):
me = "me" if self.is_me else "not_me"
return "Cell '%s' (%s)" % (self.name, me)
def sync_from_db(f):
"""Use as a decorator to wrap methods that use cell information to
make sure they sync the latest information from the DB periodically.
"""
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
if self._time_to_sync():
self._cell_db_sync()
return f(self, *args, **kwargs)
return wrapper
class CellStateManager(base.Base):
def __init__(self, cell_state_cls=None):
super(CellStateManager, self).__init__()
if not cell_state_cls:
cell_state_cls = CellState
self.cell_state_cls = cell_state_cls
self.my_cell_state = cell_state_cls(CONF.cells.name, is_me=True)
self.parent_cells = {}
self.child_cells = {}
self.last_cell_db_check = datetime.datetime.min
self._cell_db_sync()
my_cell_capabs = {}
for cap in CONF.cells.capabilities:
name, value = cap.split('=', 1)
if ';' in value:
values = set(value.split(';'))
else:
values = set([value])
my_cell_capabs[name] = values
self.my_cell_state.update_capabilities(my_cell_capabs)
def _refresh_cells_from_db(self, ctxt):
"""Make our cell info map match the db."""
# Add/update existing cells ...
db_cells = self.db.cell_get_all(ctxt)
db_cells_dict = dict([(cell['name'], cell) for cell in db_cells])
# Update current cells. Delete ones that disappeared
for cells_dict in (self.parent_cells, self.child_cells):
for cell_name, cell_info in cells_dict.items():
is_parent = cell_info.db_info['is_parent']
db_dict = db_cells_dict.get(cell_name)
if db_dict and is_parent == db_dict['is_parent']:
cell_info.update_db_info(db_dict)
else:
del cells_dict[cell_name]
# Add new cells
for cell_name, db_info in db_cells_dict.items():
if db_info['is_parent']:
cells_dict = self.parent_cells
else:
cells_dict = self.child_cells
if cell_name not in cells_dict:
cells_dict[cell_name] = self.cell_state_cls(cell_name)
cells_dict[cell_name].update_db_info(db_info)
def _time_to_sync(self):
"""Is it time to sync the DB against our memory cache?"""
diff = timeutils.utcnow() - self.last_cell_db_check
return diff.seconds >= CONF.cells.db_check_interval
def _update_our_capacity(self, context):
"""Update our capacity in the self.my_cell_state CellState.
This will add/update 2 entries in our CellState.capacities,
'ram_free' and 'disk_free'.
The values of these are both dictionaries with the following
format:
{'total_mb': <total_memory_free_in_the_cell>,
'units_by_mb: <units_dictionary>}
<units_dictionary> contains the number of units that we can
build for every instance_type that we have. This number is
computed by looking at room available on every compute_node.
Take the following instance_types as an example:
[{'memory_mb': 1024, 'root_gb': 10, 'ephemeral_gb': 100},
{'memory_mb': 2048, 'root_gb': 20, 'ephemeral_gb': 200}]
capacities['ram_free']['units_by_mb'] would contain the following:
{'1024': <number_of_instances_that_will_fit>,
'2048': <number_of_instances_that_will_fit>}
capacities['disk_free']['units_by_mb'] would contain the following:
{'122880': <number_of_instances_that_will_fit>,
'225280': <number_of_instances_that_will_fit>}
Units are in MB, so 122880 = (10 + 100) * 1024.
NOTE(comstud): Perhaps we should only report a single number
available per instance_type.
"""
reserve_level = CONF.cells.reserve_percent / 100.0
compute_hosts = {}
def _get_compute_hosts():
compute_nodes = self.db.compute_node_get_all(context)
for compute in compute_nodes:
service = compute['service']
if not service or service['disabled']:
continue
host = service['host']
compute_hosts[host] = {
'free_ram_mb': compute['free_ram_mb'],
'free_disk_mb': compute['free_disk_gb'] * 1024,
'total_ram_mb': compute['memory_mb'],
'total_disk_mb': compute['local_gb'] * 1024}
_get_compute_hosts()
if not compute_hosts:
self.my_cell_state.update_capacities({})
return
ram_mb_free_units = {}
disk_mb_free_units = {}
total_ram_mb_free = 0
total_disk_mb_free = 0
def _free_units(total, free, per_inst):
if per_inst:
min_free = total * reserve_level
free = max(0, free - min_free)
return int(free / per_inst)
else:
return 0
def _update_from_values(values, instance_type):
memory_mb = instance_type['memory_mb']
disk_mb = (instance_type['root_gb'] +
instance_type['ephemeral_gb']) * 1024
ram_mb_free_units.setdefault(str(memory_mb), 0)
disk_mb_free_units.setdefault(str(disk_mb), 0)
ram_free_units = _free_units(compute_values['total_ram_mb'],
compute_values['free_ram_mb'], memory_mb)
disk_free_units = _free_units(compute_values['total_disk_mb'],
compute_values['free_disk_mb'], disk_mb)
ram_mb_free_units[str(memory_mb)] += ram_free_units
disk_mb_free_units[str(disk_mb)] += disk_free_units
instance_types = self.db.instance_type_get_all(context)
for compute_values in compute_hosts.values():
total_ram_mb_free += compute_values['free_ram_mb']
total_disk_mb_free += compute_values['free_disk_mb']
for instance_type in instance_types:
_update_from_values(compute_values, instance_type)
capacities = {'ram_free': {'total_mb': total_ram_mb_free,
'units_by_mb': ram_mb_free_units},
'disk_free': {'total_mb': total_disk_mb_free,
'units_by_mb': disk_mb_free_units}}
self.my_cell_state.update_capacities(capacities)
@utils.synchronized('cell-db-sync')
def _cell_db_sync(self):
"""Update status for all cells if it's time. Most calls to
this are from the check_for_update() decorator that checks
the time, but it checks outside of a lock. The duplicate
check here is to prevent multiple threads from pulling the
information simultaneously.
"""
if self._time_to_sync():
LOG.debug(_("Updating cell cache from db."))
self.last_cell_db_check = timeutils.utcnow()
ctxt = context.get_admin_context()
self._refresh_cells_from_db(ctxt)
self._update_our_capacity(ctxt)
@sync_from_db
def get_cell_info_for_neighbors(self):
"""Return cell information for all neighbor cells."""
cell_list = [cell.get_cell_info()
for cell in self.child_cells.itervalues()]
cell_list.extend([cell.get_cell_info()
for cell in self.parent_cells.itervalues()])
return cell_list
@sync_from_db
def get_my_state(self):
"""Return information for my (this) cell."""
return self.my_cell_state
@sync_from_db
def get_child_cells(self):
"""Return list of child cell_infos."""
return self.child_cells.values()
@sync_from_db
def get_parent_cells(self):
"""Return list of parent cell_infos."""
return self.parent_cells.values()
@sync_from_db
def get_parent_cell(self, cell_name):
return self.parent_cells.get(cell_name)
@sync_from_db
def get_child_cell(self, cell_name):
return self.child_cells.get(cell_name)
@sync_from_db
def update_cell_capabilities(self, cell_name, capabilities):
"""Update capabilities for a cell."""
cell = self.child_cells.get(cell_name)
if not cell:
cell = self.parent_cells.get(cell_name)
if not cell:
LOG.error(_("Unknown cell '%(cell_name)s' when trying to "
"update capabilities"),
{'cell_name': cell_name})
return
# Make sure capabilities are sets.
for capab_name, values in capabilities.items():
capabilities[capab_name] = set(values)
cell.update_capabilities(capabilities)
@sync_from_db
def update_cell_capacities(self, cell_name, capacities):
"""Update capacities for a cell."""
cell = self.child_cells.get(cell_name)
if not cell:
cell = self.parent_cells.get(cell_name)
if not cell:
LOG.error(_("Unknown cell '%(cell_name)s' when trying to "
"update capacities"),
{'cell_name': cell_name})
return
cell.update_capacities(capacities)
@sync_from_db
def get_our_capabilities(self, include_children=True):
capabs = copy.deepcopy(self.my_cell_state.capabilities)
if include_children:
for cell in self.child_cells.values():
if timeutils.is_older_than(cell.last_seen,
CONF.cells.mute_child_interval):
continue
for capab_name, values in cell.capabilities.items():
if capab_name not in capabs:
capabs[capab_name] = set([])
capabs[capab_name] |= values
return capabs
def _add_to_dict(self, target, src):
for key, value in src.items():
if isinstance(value, dict):
target.setdefault(key, {})
self._add_to_dict(target[key], value)
continue
target.setdefault(key, 0)
target[key] += value
@sync_from_db
def get_our_capacities(self, include_children=True):
capacities = copy.deepcopy(self.my_cell_state.capacities)
if include_children:
for cell in self.child_cells.values():
self._add_to_dict(capacities, cell.capacities)
return capacities
@sync_from_db
def get_capacities(self, cell_name=None):
if not cell_name or cell_name == self.my_cell_state.name:
return self.get_our_capacities()
if cell_name in self.child_cells:
return self.child_cells[cell_name].capacities
raise exception.CellNotFound(cell_name=cell_name)
| apache-2.0 | -7,506,351,170,441,942,000 | 37.215584 | 78 | 0.583498 | false |
bonyuta0204/NetDissec | src/netprobe_pytorch.py | 1 | 10545 | #!/usr/bin/env python
# Bolei added
import pdb
import torch
import torchvision
from torch.autograd import Variable as V
from torchvision import transforms as trn
import os
import numpy
import glob
import shutil
import codecs
import time
import sys
os.environ['GLOG_minloglevel'] = '2'
import caffe
from caffe.proto import caffe_pb2
from google.protobuf import text_format
from scipy.misc import imresize, imread
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.interpolation import zoom
from tempfile import NamedTemporaryFile
from contextlib import contextmanager
from collections import namedtuple
import upsample
import rotate
import expdir
caffe.set_mode_gpu()
caffe.set_device(0)
def create_probe(
directory, dataset, definition, weights, mean, blobs,
colordepth=3,
rotation_seed=None, rotation_power=1,
limit=None, split=None,
batch_size=16, ahead=4,
cl_args=None, verbose=True):
# If we're already done, skip it!
ed = expdir.ExperimentDirectory(directory)
if all(ed.has_mmap(blob=b) for b in blobs):
return
'''
directory: where to place the probe_conv5.mmap files.
data: the AbstractSegmentation data source to draw upon
definition: the filename for the caffe prototxt
weights: the filename for the caffe model weights
mean: to use to normalize rgb values for the network
blobs: ['conv3', 'conv4', 'conv5'] to probe
'''
if verbose:
print 'Opening dataset', dataset
data = loadseg.SegmentationData(args.dataset)
# the network to dissect
if args.weights == None:
# load the imagenet pretrained model
net = torchvision.models.__dict__[args.definition](pretrained=True)
else:
# load your own model
net = torchvision.models.__dict__[
args.definition](num_classes=args.num_classes)
checkpoint = torch.load(args.weights)
# the data parallel layer will add 'module' before each layer name
state_dict = {str.replace(k, 'module.', ''): v for k,
v in checkpoint['state_dict'].iteritems()}
net.load_state_dict(state_dict)
net.eval()
# hook up to get the information for each selected layer
layers = net._modules.keys()
size_blobs_output = []
def hook_size(module, input, output):
size_blobs_output.append(output.data.size())
input_sample = V(torch.randn(1, 3, args.input_size, args.input_size))
for blob in blobs:
net._modules.get(blob).register_forward_hook(hook_size)
output_sample = net(input_sample)
input_dim = [args.input_size, args.input_size]
data_size = data.size(split) # the image size
if limit is not None:
data_size = min(data_size, limit)
# Make sure we have a directory to work in
ed.ensure_dir()
# Step 0: write a README file with generated information.
ed.save_info(dict(
dataset=dataset,
split=split,
definition=definition,
weights=weights,
mean=mean,
blobs=blobs,
input_dim=input_dim,
rotation_seed=rotation_seed,
rotation_power=rotation_power))
# Clear old probe data
ed.remove_all('*.mmap*')
# Create new (empty) mmaps
if verbose:
print 'Creating new mmaps.'
out = {}
rot = None
if rotation_seed is not None:
rot = {}
for idx, blob in enumerate(blobs):
#shape = (data_size, ) + net.blobs[blob].data.shape[1:]
shape = (data_size, int(size_blobs_output[idx][1]), int(
size_blobs_output[idx][2]), int(size_blobs_output[idx][3]))
out[blob] = ed.open_mmap(blob=blob, mode='w+', shape=shape)
# Rather than use the exact RF, here we use some heuristics to compute the approximate RF
size_RF = (args.input_size /
size_blobs_output[idx][2], args.input_size / size_blobs_output[idx][3])
fieldmap = ((0, 0), size_RF, size_RF)
ed.save_info(blob=blob, data=dict(
name=blob, shape=shape, fieldmap=fieldmap))
# The main loop
if verbose:
print 'Beginning work.'
pf = loadseg.SegmentationPrefetcher(data, categories=['image'],
split=split, once=True, batch_size=batch_size, ahead=ahead)
index = 0
start_time = time.time()
last_batch_time = start_time
batch_size = 0
net.cuda()
# hook the feature extractor
features_blobs = []
def hook_feature(module, input, output):
features_blobs.append(output.data.cpu().numpy())
for blob in blobs:
net._modules.get(blob).register_forward_hook(hook_feature)
for batch in pf.tensor_batches(bgr_mean=mean):
del features_blobs[:] # clear up the feature basket
batch_time = time.time()
rate = index / (batch_time - start_time + 1e-15)
batch_rate = batch_size / (batch_time - last_batch_time + 1e-15)
last_batch_time = batch_time
if verbose:
print 'netprobe index', index, 'items per sec', batch_rate, rate
sys.stdout.flush()
inp = batch[0]
batch_size = len(inp)
if limit is not None and index + batch_size > limit:
# Truncate last if limited
batch_size = limit - index
inp = inp[:batch_size]
if colordepth == 1:
inp = numpy.mean(inp, axis=1, keepdims=True)
# previous feedforward case
inp = inp[:, ::-1, :, :]
inp_tensor = V(torch.from_numpy(inp.copy()))
# approximately normalize the input to make the images scaled at around 1.
inp_tensor.div_(255.0 * 0.224)
inp_tensor = inp_tensor.cuda()
result = net.forward(inp_tensor)
# output the hooked feature
for i, key in enumerate(blobs):
out[key][index:index +
batch_size] = numpy.copy(features_blobs[i][:batch_size])
# print 'Recording data in mmap done'
index += batch_size
if index >= data_size:
break
assert index == data_size, (
"Data source should return evey item once %d %d." %
(index, data_size))
if verbose:
print 'Renaming mmaps.'
for blob in blobs:
ed.finish_mmap(out[blob])
# Final step: write the README file
write_readme_file([
('cl_args', cl_args),
('data', data),
('definition', definition),
('weight', weights),
('mean', mean),
('blobs', blobs)], ed, verbose=verbose)
def ensure_dir(targetdir):
if not os.path.isdir(targetdir):
try:
os.makedirs(targetdir)
except:
print 'Could not create', targetdir
pass
def write_readme_file(args, ed, verbose):
'''
Writes a README.txt that describes the settings used to geenrate the ds.
'''
with codecs.open(ed.filename('README.txt'), 'w', 'utf-8') as f:
def report(txt):
f.write('%s\n' % txt)
if verbose:
print txt
title = '%s network probe' % ed.basename()
report('%s\n%s' % (title, '=' * len(title)))
for key, val in args:
if key == 'cl_args':
if val is not None:
report('Command-line args:')
for ck, cv in vars(val).items():
report(' %s: %r' % (ck, cv))
report('%s: %r' % (key, val))
report('\ngenerated at: %s' % time.strftime("%Y-%m-%d %H:%M"))
try:
label = subprocess.check_output(['git', 'rev-parse', 'HEAD'])
report('git label: %s' % label)
except:
pass
if __name__ == '__main__':
import sys
import traceback
import argparse
try:
import loadseg
parser = argparse.ArgumentParser(
description='Probe a caffe network and save results in a directory.')
parser.add_argument(
'--directory',
default='.',
help='output directory for the net probe')
parser.add_argument(
'--blobs',
nargs='*',
help='network blob names to collect')
parser.add_argument(
'--definition',
help='the deploy prototext defining the net')
parser.add_argument(
'--weights',
default=None,
help='the pretrained weight')
parser.add_argument(
'--mean',
nargs='*', type=float,
help='mean values to subtract from input')
parser.add_argument(
'--dataset',
help='the directory containing the dataset to use')
parser.add_argument(
'--split',
help='the split of the dataset to use')
parser.add_argument(
'--limit',
type=int, default=None,
help='limit dataset to this size')
parser.add_argument(
'--batch_size',
type=int, default=64,
help='the batch size to use')
parser.add_argument(
'--input_size',
type=int, default=224,
help='the image size input to the network(usually it is 224x224, but alexnet uses 227x227)')
parser.add_argument(
'--ahead',
type=int, default=4,
help='number of batches to prefetch')
parser.add_argument(
'--rotation_seed',
type=int, default=None,
help='the seed for the random rotation to apply')
parser.add_argument(
'--rotation_power',
type=float, default=1.0,
help='the power of hte random rotation')
parser.add_argument(
'--colordepth',
type=int, default=3,
help='set to 1 for grayscale')
parser.add_argument(
'--num_classes',
type=int, default=365,
help='the number of classes for the network output(default is 365)')
args = parser.parse_args()
create_probe(
args.directory, args.dataset, args.definition, args.weights,
numpy.array(args.mean, dtype=numpy.float32), args.blobs,
batch_size=args.batch_size, ahead=args.ahead, limit=args.limit,
colordepth=args.colordepth,
rotation_seed=args.rotation_seed, rotation_power=args.rotation_power,
split=args.split, cl_args=args, verbose=True)
except:
traceback.print_exc(file=sys.stdout)
sys.exit(1)
| mit | -4,024,479,165,878,248,000 | 32.906752 | 104 | 0.583215 | false |
walter-weinmann/wwe-euler | problems/069_Solution.py | 1 | 1675 | '''
Created on 19.08.2013
@author: Walter
Euler's Totient function, φ(n) [sometimes called the phi function], is used to
determine the number of numbers less than n which are relatively prime to n.
For example, as 1, 2, 4, 5, 7, and 8, are all less than nine and relatively
prime to nine, φ(9)=6.
n Relatively Prime φ(n) n/φ(n)
2 1 1 2
3 1,2 2 1.5
4 1,3 2 2
5 1,2,3,4 4 1.25
6 1,5 2 3
7 1,2,3,4,5,6 6 1.1666...
8 1,3,5,7 4 2
9 1,2,4,5,7,8 6 1.5
10 1,3,7,9 4 2.5
It can be seen that n=6 produces a maximum n/φ(n) for n ≤ 10.
Find the value of n ≤ 1,000,000 for which n/φ(n) is a maximum.
'''
import unittest
def totient_maximum(pInt):
lCount = 0
lMax = pInt - 1
lSum = 0
while (lCount < lMax):
lCount += 1
if lCount % 3 == 0 or lCount % 5 == 0:
lSum += lCount
return lSum
# =============================================================================
# Unit tests
# -----------------------------------------------------------------------------
class Test(unittest.TestCase):
def test_totient_maximum_10(self):
self.assertEqual(totient_maximum(10), 23)
def test_totient_maximum_1000(self):
self.assertEqual(totient_maximum(1000), 233168)
# =============================================================================
# Solution of the Euler task
# -----------------------------------------------------------------------------
print(totient_maximum(1000))
| apache-2.0 | -6,612,717,353,856,141,000 | 26.295082 | 79 | 0.43003 | false |
jonathanslenders/pyvim | pyvim/completion.py | 1 | 4439 | from __future__ import unicode_literals
from prompt_toolkit.completion import Completer, Completion
import re
import weakref
__all__ = (
'DocumentCompleter',
)
class DocumentWordsCompleter(Completer):
"""
Completer that completes on words that appear already in the open document.
"""
def get_completions(self, document, complete_event):
word_before_cursor = document.get_word_before_cursor()
# Create a set of words that could be a possible completion.
words = set()
for w in re.split(r'\W', document.text):
if len(w) > 1:
if w.startswith(word_before_cursor) and w != word_before_cursor:
words.add(w)
# Yield Completion instances.
for w in sorted(words):
yield Completion(w, start_position=-len(word_before_cursor))
class DocumentCompleter(Completer):
"""
This is the general completer for EditorBuffer completions.
Depending on the file type and settings, it selects another completer to
call.
"""
def __init__(self, editor, editor_buffer):
# (Weakrefs, they are already pointing to us.)
self._editor_ref = weakref.ref(editor)
self._editor_buffer_ref = weakref.ref(editor_buffer)
def get_completions(self, document, complete_event):
editor = self._editor_ref()
location = self._editor_buffer_ref().location or '.txt'
# Select completer.
if location.endswith('.py') and editor.enable_jedi:
completer = _PythonCompleter(location)
else:
completer = DocumentWordsCompleter()
# Call completer.
return completer.get_completions(document, complete_event)
class _PythonCompleter(Completer):
"""
Wrapper around the Jedi completion engine.
"""
def __init__(self, location):
self.location = location
def get_completions(self, document, complete_event):
script = self._get_jedi_script_from_document(document)
if script:
try:
completions = script.completions()
except TypeError:
# Issue #9: bad syntax causes completions() to fail in jedi.
# https://github.com/jonathanslenders/python-prompt-toolkit/issues/9
pass
except UnicodeDecodeError:
# Issue #43: UnicodeDecodeError on OpenBSD
# https://github.com/jonathanslenders/python-prompt-toolkit/issues/43
pass
except AttributeError:
# Jedi issue #513: https://github.com/davidhalter/jedi/issues/513
pass
except ValueError:
# Jedi issue: "ValueError: invalid \x escape"
pass
except KeyError:
# Jedi issue: "KeyError: u'a_lambda'."
# https://github.com/jonathanslenders/ptpython/issues/89
pass
except IOError:
# Jedi issue: "IOError: No such file or directory."
# https://github.com/jonathanslenders/ptpython/issues/71
pass
else:
for c in completions:
yield Completion(c.name_with_symbols, len(c.complete) - len(c.name_with_symbols),
display=c.name_with_symbols)
def _get_jedi_script_from_document(self, document):
import jedi # We keep this import in-line, to improve start-up time.
# Importing Jedi is 'slow'.
try:
return jedi.Script(
document.text,
column=document.cursor_position_col,
line=document.cursor_position_row + 1,
path=self.location)
except ValueError:
# Invalid cursor position.
# ValueError('`column` parameter is not in a valid range.')
return None
except AttributeError:
# Workaround for #65: https://github.com/jonathanslenders/python-prompt-toolkit/issues/65
# See also: https://github.com/davidhalter/jedi/issues/508
return None
except IndexError:
# Workaround Jedi issue #514: for https://github.com/davidhalter/jedi/issues/514
return None
except KeyError:
# Workaroud for a crash when the input is "u'", the start of a unicode string.
return None
| bsd-3-clause | -7,874,857,262,923,508,000 | 35.991667 | 101 | 0.590674 | false |
anderfosca/contextbroker | code/virtualenv/broker/modules/update.py | 1 | 6420 | __author__ = 'anderson'
import xml.etree.ElementTree as ET
import sys
import re
import generic_response
import pymongo
from pymongo import MongoClient
from dateutil.parser import parse
import logging
def send_to_consumer(url, xml_string):
print "sending to: " + url + '\n' + xml_string
#r = requests.post(url, xml_string)
#print r.json(), r.status_code
# context_update
# dados esperados: xml com informacoes do update do Provider
# descricao: Registra os dados fornecidos pelo Provider na tabela de registros (registryTable)
# retorna: mensagem de sucesso ou erro
# TODO verificar consistencia dos dados
# TODO verificar erros possiveis
def context_update(xml_string_original):
"""
:rtype : str
"""
logger = logging.getLogger('broker')
logger.info('update - Initiate')
xml_string = re.sub(' xmlns="[^"]+"', '', xml_string_original, count=1)
xml_string = re.sub(' xmlns:xsi="[^"]+"', '', xml_string, count=1)
xml_string = re.sub(' xsi:schemaLocation="[^"]+"', '', xml_string, count=1)
root = ET.fromstring(xml_string)
for ctxEl in root.find('ctxEls').findall('ctxEl'):
nameProv = ctxEl.find('contextProvider').get('id')
version = ctxEl.find('contextProvider').get('v')
entityType = ctxEl.find('entity').get('type')
entityId = ctxEl.find('entity').get('id')
scope = ctxEl.find('scope').text
timestamp = ctxEl.find('timestamp').text
expires = ctxEl.find('expires').text
if parse(timestamp) > parse(expires):
logger.warn('update - Timestamp after Expires')
return generic_response.generate_response('ERROR','400','Bad Parameters: Timestamp after Expires',
'update',nameProv,version,entityId,entityType,scope)
parList=[]
for par in list(ctxEl.find('dataPart')):
parList.append(ET.tostring(par))
dataPart = "".join(parList)
try:
###################################MONGODB
client = MongoClient()
db = client.broker
provider_el = db.providers.find_one({'name': nameProv})
scope_el = db.scopes.find_one({'name': scope, 'provider_id': provider_el['_id']})
if provider_el is None or scope_el is None: # se provider ou scope inexistente, ja descarta
return generic_response.generate_response('ERROR','400','Bad Paramenters',
'update',nameProv,version,entityId,entityType,scope)
##################################MONGODB
#########################MONGODB
logger.info('update - Inserting entity: %s', entityType+'|'+entityId)
entity_element = {'name': entityId, 'type': entityType}
db.entities.update_one(entity_element, {'$setOnInsert': entity_element}, upsert=True)
entity_el = db.entities.find_one(entity_element)
#########################MONGODB
#################################MONGODB
logger.info('update - Inserting Registry for Provider: %s; Scope: %s; Entity: %s',
provider_el['name'], scope_el['name'], entity_el['type']+'|'+entity_el['name'])
on_insert = {'provider': provider_el, 'scope': scope_el, 'entity': entity_el}
on_update = {'timestamp': timestamp, 'expires': expires, 'data_part': dataPart}
if db.registries.update_one(on_insert, {'$setOnInsert': on_insert, '$set': on_update}, upsert=True).upserted_id:
logger.info('update - Inserted Registry for Provider: %s; Scope: %s; Entity: %s',
provider_el['name'], scope_el['name'], entity_el['type']+'|'+entity_el['name'])
else:
logger.info('update - Updated Registry for Provider: %s; Scope: %s; Entity: %s',
provider_el['name'], scope_el['name'], entity_el['type']+'|'+entity_el['name'])
################################MONGODB
# hora de conferir as subscriptions
logger.info('update - Checking Subscriptions for Scope: %s; Entity: %s', scope, entityType+'|'+entityId)
results = check_subscriptions(entityId, entityType, scope)
if results.count() > 0:
logger.info('update - Found Subscriptions for Scope: %s; Entity: %s', scope, entityType+'|'+entityId)
for result in results:
logger.info('update - Sending to Subscripted: %s', result['callback_url'])
send_to_consumer(result['callback_url'], xml_string_original)
return generic_response.generate_response('OK','200','Update and Subscription Success','update',
nameProv,version,entityId,entityType,scope)
else:
logger.info('update - No Subscriptions found for Scope: %s; Entity: %s', scope, entityType+'|'+entityId)
return generic_response.generate_response('OK','200','Update Success','update',
nameProv,version,entityId,entityType,scope)
except Exception as e:
logger.error('update - Internal Error: %s', sys.exc_info()[0])
error_message = "Internal Error"
return generic_response.generate_response('ERROR','500',error_message,'update',
nameProv,version,entityId,entityType,scope)
# check_subscriptions
# dados esperados: entity, scope
# descricao: Consumer envia entidade e escopos sobre os quais deseja receber atualizacoes, na sua Url, e um tempo de
# vida para a subscription
# # retorna: mensagem de sucesso ou erro
def check_subscriptions(entity_name, entity_type, scope):
"""
:rtype : str
:returns :
"""
#################################MONGODB
client = MongoClient()
db = client.broker
entity_el_id = db.entities.find_one({'name': entity_name, 'type': entity_type}, {'_id': 1})["_id"]
scope_el_id = db.scopes.find_one({'name': scope}, {'_id': 1})["_id"]
results = db.subscriptions.find({'entity_id': entity_el_id,
'scopes': {'$in': [scope_el_id]}}, {'callback_url': 1})
################################MONGODB
for r in results:
print r['callback_url']
return results
| gpl-2.0 | 1,476,318,394,329,219,600 | 50.36 | 124 | 0.568536 | false |
zenoss/pywbem | tests/unittest/functiontest/test_conftest.py | 1 | 2251 | """
This test module contains test cases for testing the conftest.py module,
at least those functions that can reasonably be tested in a standalone
manner.
"""
from __future__ import print_function, absolute_import
from lxml import etree
from ...functiontest.conftest import xml_embed, xml_unembed
class Test_EmbedUnembed(object):
"""Test case for xml_embed() / xml_unembed() functions."""
@staticmethod
def test_unembed_simple():
"""Unembed a simple embedded instance string."""
emb_str = b'<INSTANCE NAME="C1">' \
b'<PROPERTY NAME="P1">' \
b'<VALUE>V1</VALUE>' \
b'</PROPERTY>' \
b'</INSTANCE>'
instance_elem = xml_unembed(emb_str)
assert instance_elem.tag == 'INSTANCE'
assert len(instance_elem.attrib) == 1
assert 'NAME' in instance_elem.attrib
assert instance_elem.attrib['NAME'] == 'C1'
assert len(instance_elem) == 1
property_elem = instance_elem[0]
assert property_elem.tag == 'PROPERTY'
assert len(property_elem.attrib) == 1
assert 'NAME' in property_elem.attrib
assert property_elem.attrib['NAME'] == 'P1'
assert len(property_elem) == 1
value_elem = property_elem[0]
assert value_elem.tag == 'VALUE'
assert len(value_elem.attrib) == 0 # pylint: disable=len-as-condition
assert value_elem.text == 'V1'
@staticmethod
def test_embed_simple():
"""Embed a simple instance."""
instance_elem = etree.Element('INSTANCE')
instance_elem.attrib['NAME'] = 'C1'
property_elem = etree.SubElement(instance_elem, 'PROPERTY')
property_elem.attrib['NAME'] = 'P1'
value_elem = etree.SubElement(property_elem, 'VALUE')
value_elem.text = 'V1'
emb_str = xml_embed(instance_elem)
exp_emb_str = b'<INSTANCE NAME="C1">' \
b'<PROPERTY NAME="P1">' \
b'<VALUE>V1</VALUE>' \
b'</PROPERTY>' \
b'</INSTANCE>'
assert emb_str == exp_emb_str
| lgpl-2.1 | 1,940,382,869,996,159,500 | 33.106061 | 78 | 0.583741 | false |
arne-cl/discoursegraphs | src/discoursegraphs/readwrite/exmaralda.py | 1 | 19867 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Arne Neumann
"""
The ``exmaralda`` module converts a ``DiscourseDocumentGraph`` (possibly
containing multiple annotation layers) into an Exmaralda ``*.exb`` file
and vice versa.
"""
import os
import sys
from collections import defaultdict
from lxml import etree
from lxml.builder import ElementMaker
from discoursegraphs import (DiscourseDocumentGraph, EdgeTypes,
get_annotation_layers,
get_pointing_chains, get_span,
select_nodes_by_layer)
from discoursegraphs.util import create_dir
class ExmaraldaFile(object):
"""
This class converts a DiscourseDocumentGraph into an Exmaralda file.
Attributes
----------
toknode2id : dict
maps from a token node ID to its Exmaralda ID (ID in the common
timeline)
"""
def __init__(self, docgraph, remove_redundant_layers=True):
"""
Parameters
----------
docgraph : DiscourseDocumentGraph
the document graph to be converted
"""
self.toknode2id = {node_id: i
for i, node_id in enumerate(docgraph.tokens)}
self.E = ElementMaker()
self.tier_count = 0
self.tree = self.__add_document_structure(docgraph,
remove_redundant_layers)
def __str__(self):
"""
returns the generated Exmaralda ``*.exb`` file as a string.
"""
return etree.tostring(self.tree, pretty_print=True,
xml_declaration=True, encoding='UTF-8')
def write(self, output_filepath):
"""
serialize the ExmaraldaFile instance and write it to a file.
Parameters
----------
output_filepath : str
relative or absolute path to the Exmaralda file to be created
"""
with open(output_filepath, 'w') as out_file:
out_file.write(self.__str__())
def __create_document_header(self):
"""
Look, mum! XML generation without string concatenation!1!!
This creates an empty, but functional header for an Exmaralda *.exb
file.
"""
E = self.E
root = E('basic-transcription')
head = E('head')
meta = E('meta-information')
project = E('project-name')
tname = E('transcription-name')
ref_file = E('referenced-file', url="")
ud = E('ud-meta-information')
comment = E('comment')
tconvention = E('transcription-convention')
meta.append(project)
meta.append(tname)
meta.append(ref_file)
meta.append(ud)
meta.append(comment)
meta.append(tconvention)
speakers = E('speakertable')
head.append(meta)
head.append(speakers)
root.append(head)
return root
def __add_document_structure(self, docgraph,
remove_redundant_layers=True):
"""return an Exmaralda XML etree representation a docgraph"""
E = self.E
root = self.__create_document_header()
body = E('basic-body')
timeline = E('common-timeline')
# for n tokens we need to create n+1 timeline indices
for i in xrange(len(docgraph.tokens)+1):
idx = str(i)
# example: <tli id="T0" time="0"/>
timeline.append(E('tli', {'id': 'T'+idx, 'time': idx}))
body.append(timeline)
body = self.__add_token_tiers(docgraph, body)
annotation_layers = get_annotation_layers(docgraph)
for layer in annotation_layers:
if not remove_redundant_layers: # add all layers
self.__add_annotation_tier(docgraph, body, layer)
elif is_informative(layer): # only add informative layers
self.__add_annotation_tier(docgraph, body, layer)
self.__add_coreference_chain_tiers(docgraph, body)
root.append(body)
return root
def __add_annotation_tier(self, docgraph, body, annotation_layer):
"""
adds a span-based annotation layer as a <tier> to the Exmaralda <body>.
Parameter
---------
docgraph : DiscourseDocumentGraph
the document graph from which the chains will be extracted
body : etree._Element
an etree representation of the <basic_body> element (and all its
descendants) of the Exmaralda file
annotation_layer : str
the name of a layer, e.g. 'tiger', 'tiger:token' or 'mmax:sentence'
"""
layer_cat = annotation_layer.split(':')[-1]
temp_tier = self.E('tier',
{'id': "TIE{}".format(self.tier_count),
'category': layer_cat, 'type': "t",
'display-name': "[{}]".format(annotation_layer)})
self.tier_count += 1
for node_id in select_nodes_by_layer(docgraph, annotation_layer):
span_node_ids = get_span(docgraph, node_id)
if span_node_ids:
start_id, end_id = self.__span2event(span_node_ids)
event_label = docgraph.node[node_id].get('label', '')
event = self.E('event',
{'start': "T{}".format(start_id),
'end': "T{}".format(end_id)},
event_label)
temp_tier.append(event)
body.append(temp_tier)
def __add_coreference_chain_tiers(self, docgraph, body,
min_chain_length=3):
"""
Parameters
----------
docgraph : DiscourseDocumentGraph
the document graph from which the chains will be extracted
body : etree._Element
an etree representation of the <basic_body> element (and all its
descendants) of the Exmaralda file
min_chain_length : int
don't add tiers for chains with less than N elements (default: 3)
TODO: this method assumes that each pointing relation chains signifies
a coreference chain.
"""
E = self.E
for i, chain in enumerate(get_pointing_chains(docgraph)):
chain_tier = E('tier',
{'id': "TIE{}".format(self.tier_count),
'category': "chain", 'type': "t",
'display-name': "[coref-chain-{}]".format(i)})
self.tier_count += 1
chain_length = len(chain)
if chain_length < min_chain_length:
continue # ignore short chains
for j, node_id in enumerate(chain):
span_node_ids = get_span(docgraph, node_id)
if span_node_ids:
start_id, end_id = self.__span2event(span_node_ids)
element_str = "chain_{0}: {1}/{2}".format(
i, chain_length-j, chain_length)
chain_tier.append(
E('event', {'start': "T{}".format(start_id),
'end': "T{}".format(end_id)}, element_str))
body.append(chain_tier)
def __add_token_tiers(self, docgraph, body):
"""
adds all tiers that annotate single tokens (e.g. token string, lemma,
POS tag) to the etree representation of the Exmaralda XML file.
Parameters
----------
docgraph : DiscourseDocumentGraph
the document graph to be converted
body : etree._Element
an etree representation of the <basic_body> element (and all its
descendants) of the Exmaralda file
"""
E = self.E
token_tier = E('tier',
{'id': "TIE{}".format(self.tier_count),
'category': "tok", 'type': "t",
'display-name': "[tok]"})
self.tier_count += 1
token_attribs = defaultdict(lambda: defaultdict(str))
for token_node_id in docgraph.tokens:
for attrib in docgraph.node[token_node_id]:
is_boring_attrib = attrib in ('layers', 'label')
is_boring_cat = attrib.split(':')[-1] in ('token',
'id', 'word',
'morph', 'lemma')
if not is_boring_attrib and not is_boring_cat:
token_attribs[attrib][token_node_id] = \
docgraph.node[token_node_id][attrib]
for i, (_tok_id, token_str) in enumerate(docgraph.get_tokens()):
# example: <event start="T0" end="T1">Zum</event>
token_tier.append(
E('event', {'start': "T{}".format(i),
'end': "T{}".format(i+1)}, token_str))
body.append(token_tier)
for anno_tier in token_attribs:
category = anno_tier.split(':')[-1]
temp_tier = E(
'tier', {'id': "TIE{}".format(self.tier_count),
'category': category, 'type': "t",
'display-name': "[{}]".format(anno_tier)})
self.tier_count += 1
for token_node_id in token_attribs[anno_tier]:
token_tier_id = self.toknode2id[token_node_id]
token_attrib = token_attribs[anno_tier][token_node_id]
temp_tier.append(
E('event', {'start': "T{}".format(token_tier_id),
'end': "T{}".format(token_tier_id+1)},
token_attrib))
body.append(temp_tier)
return body
def __span2event(self, span_node_ids):
"""
converts a span of tokens (list of token node IDs) into an Exmaralda
event (start and end ID).
Parameters
----------
span_node_ids : list of str
sorted list of node IDs representing a span of tokens
Returns
-------
event : tuple of (str, str)
event start ID and event end ID
"""
return (self.toknode2id[span_node_ids[0]],
self.toknode2id[span_node_ids[-1]]+1)
class ExmaraldaDocumentGraph(DiscourseDocumentGraph):
"""graph representation of an Exmaralda-annotated document"""
def __init__(self, exmaralda_file, name=None, namespace='exmaralda',
token_tier='tok', ignored_tier_categories=None):
"""
generates a document graph from an Exmaralda *.exb file
Parameters
----------
exmaralda_file : str
path to an *.exb file
name : str or None
name of the document graph. If None, will be set to the input
file's basename
namespace : str
namespace of the graph, default: exmaralda
token_tier: str
the category attribute of the <tier> that contains the tokens.
default: tok
ignored_tier_categories : None or list of str
a list of tier categories which will not be added to the document
graph
"""
# super calls __init__() of base class DiscourseDocumentGraph
super(ExmaraldaDocumentGraph, self).__init__()
self.name = name if name else os.path.basename(exmaralda_file)
self.ns = namespace
self.root = self.ns+':root_node'
tree = etree.parse(exmaralda_file)
self.tokens = []
self.__add_tokenization(tree)
if ignored_tier_categories:
for tier in tree.iter('tier'):
if tier.attrib['category'] not in ignored_tier_categories:
self.__add_tier(tier, token_tier_name=token_tier)
else:
for tier in tree.iter('tier'):
self.__add_tier(tier, token_tier_name=token_tier)
def __add_tokenization(self, tree):
"""adds a node for each token ID in the document"""
for token_id in self.get_token_ids(tree):
self.add_node(token_id, layers={self.ns})
self.tokens.append(token_id)
def __add_tier(self, tier, token_tier_name):
"""
adds a tier to the document graph (either as additional attributes
to the token nodes or as a span node with outgoing edges to the token
nodes it represents)
"""
if tier.attrib['category'] == token_tier_name:
self.__add_tokens(tier)
else:
if self.is_token_annotation_tier(tier):
self.__add_token_annotation_tier(tier)
else:
self.__add_span_tier(tier)
def __add_tokens(self, token_tier):
"""
adds all tokens to the document graph. Exmaralda considers them to
be annotations as well, that's why we could only extract the token
node IDs from the timeline (using ``__add_tokenization()``), but not
the tokens themselves.
Parameters
----------
token_tier : etree._Element
an etree element representing the <tier> which contains the tokens
"""
for event in token_tier.iter('event'):
assert len(self.gen_token_range(event.attrib['start'],
event.attrib['end'])) == 1, \
"Events in the token tier must not span more than one token."
token_id = event.attrib['start']
self.node[token_id][self.ns+':token'] = event.text
def is_token_annotation_tier(self, tier):
"""
returns True, iff all events in the given tier annotate exactly one
token.
"""
for i, event in enumerate(tier.iter('event')):
if self.indexdelta(event.attrib['end'], event.attrib['start']) != 1:
return False
return True
def __add_token_annotation_tier(self, tier):
"""
adds a tier to the document graph, in which each event annotates
exactly one token.
"""
for i, event in enumerate(tier.iter('event')):
anno_key = '{0}:{1}'.format(self.ns, tier.attrib['category'])
anno_val = event.text if event.text else ''
self.node[event.attrib['start']][anno_key] = anno_val
def __add_span_tier(self, tier):
"""
adds a tier to the document graph in which each event annotates a span
of one or more tokens.
"""
tier_id = tier.attrib['id']
# add the tier's root node with an inbound edge from the document root
self.add_node(
tier_id, layers={self.ns, self.ns+':tier'},
attr_dict={self.ns+':category': tier.attrib['category'],
self.ns+':type': tier.attrib['type'],
self.ns+':display-name': tier.attrib['display-name']})
self.add_edge(self.root, tier_id, edge_type=EdgeTypes.dominance_relation)
# add a node for each span, containing an annotation.
# add an edge from the tier root to each span and an edge from each
# span to the tokens it represents
for i, event in enumerate(tier.iter('event')):
span_id = '{}_{}'.format(tier_id, i)
span_tokens = self.gen_token_range(event.attrib['start'], event.attrib['end'])
annotation = event.text if event.text else ''
self.add_node(
span_id, layers={self.ns, self.ns+':span'},
attr_dict={self.ns+':annotation': annotation,
'label': annotation})
self.add_edge(tier_id, span_id, edge_type=EdgeTypes.dominance_relation)
for token_id in span_tokens:
self.add_edge(span_id, token_id,
edge_type=EdgeTypes.spanning_relation)
@staticmethod
def get_token_ids(tree):
"""
returns a list of all token IDs occuring the the given exmaralda file,
sorted by their time stamp in ascending order.
"""
def tok2time(token_element):
'''
extracts the time (float) of a <tli> element
(i.e. the absolute position of a token in the document)
'''
return float(token_element.attrib['time'])
timeline = tree.find('//common-timeline')
return (tok.attrib['id']
for tok in sorted((tli for tli in timeline.iterchildren()),
key=tok2time))
@staticmethod
def tokenid2index(token_id):
"""converts a token ID (e.g. 'T0') to its index (i.e. 0)"""
return int(token_id[1:])
def indexdelta(self, stop_id, start_id):
"""returns the distance (int) between to idices.
Two consecutive tokens must have a delta of 1.
"""
return self.tokenid2index(stop_id) - self.tokenid2index(start_id)
def gen_token_range(self, start_id, stop_id):
"""
returns a list of all token IDs in the given, left-closed,
right-open interval (i.e. includes start_id, but excludes stop_id)
>>> gen_token_range('T0', 'T1')
['T0']
>>> gen_token_range('T1', 'T5')
['T1', 'T2', 'T3', 'T4']
"""
index_range = range(self.tokenid2index(start_id), self.tokenid2index(stop_id))
return ["T{}".format(index) for index in index_range]
def is_informative(layer):
"""
returns true, iff the annotation layer contains information that 'makes
sense' in Exmaralda (i.e. there are annotations we don't need and which
would clutter the Exmaralda Partitur editor).
Parameters
----------
layer : str
the name of a layer, e.g. 'tiger', 'tiger:token' or 'mmax:sentence'
Returns
-------
is_informative : bool
Returns True, iff the layer is likely to contain information that
should be exported to Exmaralda. Usually, we don't want to include
information about sentence or token boundaries, since they are already
obvious from the token layer.
"""
# very dirty hack
# TODO: fix Issue #36 (efficient self.layers / get_hierarchical_layers()
return layer not in ('tiger', 'tiger:token', 'tiger:sentence:root',
'tiger:sentence:vroot', 'tiger:edge', 'tiger:secedge',
'exmaralda', 'exmaralda:tier',
'discoursegraph')
# pseudo-function to create a document graph from an Exmaralda file
read_exb = read_exmaralda = ExmaraldaDocumentGraph
def write_exb(docgraph, output_file):
"""
converts a DiscourseDocumentGraph into an Exmaralda ``*.exb`` file and
writes it to the given file (or file path).
"""
exmaralda_file = ExmaraldaFile(docgraph)
assert isinstance(output_file, (str, file))
if isinstance(output_file, str):
path_to_file = os.path.dirname(output_file)
if not os.path.isdir(path_to_file):
create_dir(path_to_file)
exmaralda_file.write(output_file)
else: # output_file is a file object
output_file.write(exmaralda_file.__str__())
# alias for write_exb(): convert docgraph into Exmaralda file
write_exmaralda = write_exb
if __name__ == "__main__":
import argparse
import cPickle as pickle
parser = argparse.ArgumentParser()
parser.add_argument('input_file',
help='pickle file of a document graph to be converted')
parser.add_argument('output_file', nargs='?', default=sys.stdout)
args = parser.parse_args(sys.argv[1:])
assert os.path.isfile(args.input_file), \
"'{}' isn't a file".format(args.input_file)
with open(args.input_file, 'rb') as docgraph_file:
docgraph = pickle.load(docgraph_file)
write_exb(docgraph, args.output_file)
| bsd-3-clause | 1,024,340,223,439,078,100 | 37.353282 | 90 | 0.554236 | false |
novapost/workalendar | workalendar/europe/russia.py | 1 | 3547 | from datetime import date
from ..core import OrthodoxCalendar, MON, daterange, cleaned_date
from ..registry_tools import iso_register
@iso_register('RU')
class Russia(OrthodoxCalendar):
'Russia'
# Civil holidays
include_labour_day = True
FIXED_HOLIDAYS = OrthodoxCalendar.FIXED_HOLIDAYS + (
(1, 2, "Day After New Year"),
(2, 23, "Defendence of the Fatherland"),
(3, 8, "International Women's Day"),
(5, 9, "Victory Day"),
(6, 12, "National Day"),
(11, 4, "Day of Unity"),
)
# Christian holidays
include_christmas = False
covid19_2020_start = date(2020, 3, 28)
covid19_2020_end = date(2020, 4, 30)
def get_fixed_holidays(self, year):
if year >= 1992:
self.labour_day_label = "The Day of Spring and Labour"
else:
self.labour_day_label = "International Workers' Day"
days = super().get_fixed_holidays(year)
if year >= 2005:
days.extend([
(date(year, 1, 3), "Third Day after New Year"),
(date(year, 1, 4), "Fourth Day after New Year"),
(date(year, 1, 5), "Fifth Day after New Year"),
(date(year, 1, 6), "Sixth Day after New Year"),
(date(year, 1, 8), "Eighth Day after New Year"),
])
if year == 2020:
index = 1
for day in daterange(self.covid19_2020_start,
self.covid19_2020_end):
days.append(
(day, f"Non-Working Day (COVID-19) #{index}")
)
index += 1
# Adding May extra days
days.extend([
(date(year, 5, 4), "May 4th, 2020 holiday"),
(date(year, 5, 5), "Day of Spring and Labor"),
])
# Extra COVID-19 in May
days.extend([
(date(year, 5, 6), "Covid-19 May #1"),
(date(year, 5, 7), "Covid-19 May #2"),
(date(year, 5, 8), "Covid-19 May #3"),
])
# Constitution Vote Public Holiday
days.append((date(year, 7, 1), "Constitution Vote Public Holiday"))
elif year == 2021:
days.extend([
(date(year, 2, 22), "Day Before Defendence of the Fatherland"),
(date(year, 11, 5), 'Day After Day of Unity'),
(date(year, 12, 31), "New Year's Eve"),
])
return days
def get_calendar_holidays(self, year):
holidays = super().get_calendar_holidays(year)
shifts = []
for day, label in holidays:
if day.month == 1 and day.day in range(1, 9):
continue
# Add an exception for 2020 non-working days due to COVID-19
if self.covid19_2020_start <= day <= self.covid19_2020_end:
continue # pragma: no cover
if day.weekday() in self.get_weekend_days():
shifts.append((
self.get_first_weekday_after(day, MON),
label + " shift"
))
holidays.extend(shifts)
return holidays
def is_working_day(self, day,
extra_working_days=None, extra_holidays=None):
day = cleaned_date(day)
if day == date(2021, 2, 20):
return True
return super().is_working_day(
day,
extra_working_days=extra_working_days,
extra_holidays=extra_holidays
)
| mit | 5,417,020,881,726,888,000 | 33.105769 | 79 | 0.50578 | false |
dnguyen0304/clare | clare/clare/application/room_list_watcher/tests/test_scrapers.py | 1 | 1168 | # -*- coding: utf-8 -*-
import mock
from nose.tools import assert_equal
from .. import scrapers
class TestBufferingSourceAdapter(object):
def __init__(self):
self.elements = None
self.scraper = None
self.source = None
self.n = None
def setup(self):
self.elements = xrange(2)
self.scraper = scrapers.Nop()
self.scraper.scrape = mock.Mock(return_value=self.elements)
self.source = scrapers.BufferingSourceAdapter(scraper=self.scraper,
url=None)
self.n = len(self.elements)
def test_scrape_is_not_called_while_buffer_has_elements(self):
for i in xrange(self.n):
self.source.emit()
assert_equal(self.scraper.scrape.call_count, 1)
def test_scrape_is_called_when_buffer_is_empty(self):
for i in xrange(self.n + 1):
self.source.emit()
assert_equal(self.scraper.scrape.call_count, 2)
def test_records_are_ordered_and_reversed(self):
records = [self.source.emit() for i in xrange(self.n)]
assert_equal(*map(list, (reversed(records), self.elements)))
| mit | -6,742,735,069,976,354,000 | 30.567568 | 75 | 0.607021 | false |
adammck/rapidsms-community-apps | tags/app.py | 1 | 1323 | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import rapidsms
from models import *
class App(rapidsms.App):
def parse(self, msg):
text = msg.text
msg.tags = []
# check the contents of this message for EVERY SINGLE
# TAG that we know of. TODO: cache this for a little
# while to avoid kicking the crap out of the database
for tag in Tag.objects.all():
if tag.match(text):
# log and add this tag object to the message
self.info("Tagged message with: %r" % (tag))
msg.tags.append(tag)
# remove this tag from the message string,
# so other apps don't have to deal with it.
# this allows the tag syntax to play nice
# with other prefix-based apps
text = tag.crop(text)
# if we found and stripped tags out of the
# message, update the object and log it
if text != msg.text:
self.info("Message is now: %s" % (text))
msg.text = text
# not so important, but make a note if
# the message didn't contain tags. just
# in case it _should_ have, we can at
# least see that the app is working
else:
self.debug("No tags were found")
| bsd-3-clause | -7,288,373,953,676,032,000 | 31.268293 | 61 | 0.555556 | false |
mistercrunch/panoramix | superset/dashboards/commands/importers/v0.py | 1 | 12454 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import logging
import time
from copy import copy
from datetime import datetime
from typing import Any, Dict, Optional
from flask_babel import lazy_gettext as _
from sqlalchemy.orm import make_transient, Session
from superset import ConnectorRegistry, db
from superset.commands.base import BaseCommand
from superset.connectors.sqla.models import SqlaTable, SqlMetric, TableColumn
from superset.datasets.commands.importers.v0 import import_dataset
from superset.exceptions import DashboardImportException
from superset.models.dashboard import Dashboard
from superset.models.slice import Slice
from superset.utils.dashboard_filter_scopes_converter import (
convert_filter_scopes,
copy_filter_scopes,
)
logger = logging.getLogger(__name__)
def import_chart(
slc_to_import: Slice,
slc_to_override: Optional[Slice],
import_time: Optional[int] = None,
) -> int:
"""Inserts or overrides slc in the database.
remote_id and import_time fields in params_dict are set to track the
slice origin and ensure correct overrides for multiple imports.
Slice.perm is used to find the datasources and connect them.
:param Slice slc_to_import: Slice object to import
:param Slice slc_to_override: Slice to replace, id matches remote_id
:returns: The resulting id for the imported slice
:rtype: int
"""
session = db.session
make_transient(slc_to_import)
slc_to_import.dashboards = []
slc_to_import.alter_params(remote_id=slc_to_import.id, import_time=import_time)
slc_to_import = slc_to_import.copy()
slc_to_import.reset_ownership()
params = slc_to_import.params_dict
datasource = ConnectorRegistry.get_datasource_by_name(
session,
slc_to_import.datasource_type,
params["datasource_name"],
params["schema"],
params["database_name"],
)
slc_to_import.datasource_id = datasource.id # type: ignore
if slc_to_override:
slc_to_override.override(slc_to_import)
session.flush()
return slc_to_override.id
session.add(slc_to_import)
logger.info("Final slice: %s", str(slc_to_import.to_json()))
session.flush()
return slc_to_import.id
def import_dashboard(
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
dashboard_to_import: Dashboard,
import_time: Optional[int] = None,
) -> int:
"""Imports the dashboard from the object to the database.
Once dashboard is imported, json_metadata field is extended and stores
remote_id and import_time. It helps to decide if the dashboard has to
be overridden or just copies over. Slices that belong to this
dashboard will be wired to existing tables. This function can be used
to import/export dashboards between multiple superset instances.
Audit metadata isn't copied over.
"""
def alter_positions(
dashboard: Dashboard, old_to_new_slc_id_dict: Dict[int, int]
) -> None:
"""Updates slice_ids in the position json.
Sample position_json data:
{
"DASHBOARD_VERSION_KEY": "v2",
"DASHBOARD_ROOT_ID": {
"type": "DASHBOARD_ROOT_TYPE",
"id": "DASHBOARD_ROOT_ID",
"children": ["DASHBOARD_GRID_ID"]
},
"DASHBOARD_GRID_ID": {
"type": "DASHBOARD_GRID_TYPE",
"id": "DASHBOARD_GRID_ID",
"children": ["DASHBOARD_CHART_TYPE-2"]
},
"DASHBOARD_CHART_TYPE-2": {
"type": "CHART",
"id": "DASHBOARD_CHART_TYPE-2",
"children": [],
"meta": {
"width": 4,
"height": 50,
"chartId": 118
}
},
}
"""
position_data = json.loads(dashboard.position_json)
position_json = position_data.values()
for value in position_json:
if (
isinstance(value, dict)
and value.get("meta")
and value.get("meta", {}).get("chartId")
):
old_slice_id = value["meta"]["chartId"]
if old_slice_id in old_to_new_slc_id_dict:
value["meta"]["chartId"] = old_to_new_slc_id_dict[old_slice_id]
dashboard.position_json = json.dumps(position_data)
logger.info("Started import of the dashboard: %s", dashboard_to_import.to_json())
session = db.session
logger.info("Dashboard has %d slices", len(dashboard_to_import.slices))
# copy slices object as Slice.import_slice will mutate the slice
# and will remove the existing dashboard - slice association
slices = copy(dashboard_to_import.slices)
# Clearing the slug to avoid conflicts
dashboard_to_import.slug = None
old_json_metadata = json.loads(dashboard_to_import.json_metadata or "{}")
old_to_new_slc_id_dict: Dict[int, int] = {}
new_timed_refresh_immune_slices = []
new_expanded_slices = {}
new_filter_scopes = {}
i_params_dict = dashboard_to_import.params_dict
remote_id_slice_map = {
slc.params_dict["remote_id"]: slc
for slc in session.query(Slice).all()
if "remote_id" in slc.params_dict
}
for slc in slices:
logger.info(
"Importing slice %s from the dashboard: %s",
slc.to_json(),
dashboard_to_import.dashboard_title,
)
remote_slc = remote_id_slice_map.get(slc.id)
new_slc_id = import_chart(slc, remote_slc, import_time=import_time)
old_to_new_slc_id_dict[slc.id] = new_slc_id
# update json metadata that deals with slice ids
new_slc_id_str = str(new_slc_id)
old_slc_id_str = str(slc.id)
if (
"timed_refresh_immune_slices" in i_params_dict
and old_slc_id_str in i_params_dict["timed_refresh_immune_slices"]
):
new_timed_refresh_immune_slices.append(new_slc_id_str)
if (
"expanded_slices" in i_params_dict
and old_slc_id_str in i_params_dict["expanded_slices"]
):
new_expanded_slices[new_slc_id_str] = i_params_dict["expanded_slices"][
old_slc_id_str
]
# since PR #9109, filter_immune_slices and filter_immune_slice_fields
# are converted to filter_scopes
# but dashboard create from import may still have old dashboard filter metadata
# here we convert them to new filter_scopes metadata first
filter_scopes = {}
if (
"filter_immune_slices" in i_params_dict
or "filter_immune_slice_fields" in i_params_dict
):
filter_scopes = convert_filter_scopes(old_json_metadata, slices)
if "filter_scopes" in i_params_dict:
filter_scopes = old_json_metadata.get("filter_scopes")
# then replace old slice id to new slice id:
if filter_scopes:
new_filter_scopes = copy_filter_scopes(
old_to_new_slc_id_dict=old_to_new_slc_id_dict,
old_filter_scopes=filter_scopes,
)
# override the dashboard
existing_dashboard = None
for dash in session.query(Dashboard).all():
if (
"remote_id" in dash.params_dict
and dash.params_dict["remote_id"] == dashboard_to_import.id
):
existing_dashboard = dash
dashboard_to_import = dashboard_to_import.copy()
dashboard_to_import.id = None
dashboard_to_import.reset_ownership()
# position_json can be empty for dashboards
# with charts added from chart-edit page and without re-arranging
if dashboard_to_import.position_json:
alter_positions(dashboard_to_import, old_to_new_slc_id_dict)
dashboard_to_import.alter_params(import_time=import_time)
dashboard_to_import.remove_params(param_to_remove="filter_immune_slices")
dashboard_to_import.remove_params(param_to_remove="filter_immune_slice_fields")
if new_filter_scopes:
dashboard_to_import.alter_params(filter_scopes=new_filter_scopes)
if new_expanded_slices:
dashboard_to_import.alter_params(expanded_slices=new_expanded_slices)
if new_timed_refresh_immune_slices:
dashboard_to_import.alter_params(
timed_refresh_immune_slices=new_timed_refresh_immune_slices
)
new_slices = (
session.query(Slice).filter(Slice.id.in_(old_to_new_slc_id_dict.values())).all()
)
if existing_dashboard:
existing_dashboard.override(dashboard_to_import)
existing_dashboard.slices = new_slices
session.flush()
return existing_dashboard.id
dashboard_to_import.slices = new_slices
session.add(dashboard_to_import)
session.flush()
return dashboard_to_import.id # type: ignore
def decode_dashboards( # pylint: disable=too-many-return-statements
o: Dict[str, Any]
) -> Any:
"""
Function to be passed into json.loads obj_hook parameter
Recreates the dashboard object from a json representation.
"""
from superset.connectors.druid.models import (
DruidCluster,
DruidColumn,
DruidDatasource,
DruidMetric,
)
if "__Dashboard__" in o:
return Dashboard(**o["__Dashboard__"])
if "__Slice__" in o:
return Slice(**o["__Slice__"])
if "__TableColumn__" in o:
return TableColumn(**o["__TableColumn__"])
if "__SqlaTable__" in o:
return SqlaTable(**o["__SqlaTable__"])
if "__SqlMetric__" in o:
return SqlMetric(**o["__SqlMetric__"])
if "__DruidCluster__" in o:
return DruidCluster(**o["__DruidCluster__"])
if "__DruidColumn__" in o:
return DruidColumn(**o["__DruidColumn__"])
if "__DruidDatasource__" in o:
return DruidDatasource(**o["__DruidDatasource__"])
if "__DruidMetric__" in o:
return DruidMetric(**o["__DruidMetric__"])
if "__datetime__" in o:
return datetime.strptime(o["__datetime__"], "%Y-%m-%dT%H:%M:%S")
return o
def import_dashboards(
session: Session,
content: str,
database_id: Optional[int] = None,
import_time: Optional[int] = None,
) -> None:
"""Imports dashboards from a stream to databases"""
current_tt = int(time.time())
import_time = current_tt if import_time is None else import_time
data = json.loads(content, object_hook=decode_dashboards)
if not data:
raise DashboardImportException(_("No data in file"))
for table in data["datasources"]:
import_dataset(table, database_id, import_time=import_time)
session.commit()
for dashboard in data["dashboards"]:
import_dashboard(dashboard, import_time=import_time)
session.commit()
class ImportDashboardsCommand(BaseCommand):
"""
Import dashboard in JSON format.
This is the original unversioned format used to export and import dashboards
in Superset.
"""
# pylint: disable=unused-argument
def __init__(
self, contents: Dict[str, str], database_id: Optional[int] = None, **kwargs: Any
):
self.contents = contents
self.database_id = database_id
def run(self) -> None:
self.validate()
for file_name, content in self.contents.items():
logger.info("Importing dashboard from file %s", file_name)
import_dashboards(db.session, content, self.database_id)
def validate(self) -> None:
# ensure all files are JSON
for content in self.contents.values():
try:
json.loads(content)
except ValueError:
logger.exception("Invalid JSON file")
raise
| apache-2.0 | 3,782,625,302,160,400,400 | 35.521994 | 88 | 0.638028 | false |
lino-framework/lino | lino/modlib/checkdata/choicelists.py | 1 | 5000 | # Copyright 2015-2020 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
from django.utils import translation
from lino.core.gfks import gfk2lookup
from lino.core.roles import SiteStaff
from django.utils.text import format_lazy
from lino.api import dd, rt, _
if False:
class Feedbacks(dd.ChoiceList):
verbose_name = _("Checkdata feedback")
verbose_name_plural = _("Checkdata feedback")
add = Feedbacks.add_item()
add("10", _("Ignorable"), 'ignorable')
add("20", _("Serious"), 'serious')
class Severities(dd.ChoiceList):
verbose_name = _("Severity")
verbose_name_plural = _("Data problem severities")
add = Severities.add_item()
add("10", _("Note"), 'note')
add("20", _("Warning"), 'warning')
add("30", _("Error"), 'error')
class Checker(dd.Choice):
verbose_name = None
severity = None
self = None
model = None
help_text = None
def __init__(self):
# value = self.__module__ + '.' + self.__class__.__name__
value = self.__module__.split('.')[-2] + '.' + self.__class__.__name__
# if isinstance(self.model, six.string_types):
# value = self.model + '.' + self.__class__.__name__
# else:
# value = self.model.__name__ + '.' + self.__class__.__name__
if self.verbose_name is None:
text = value
else:
text = self.verbose_name
super(Checker, self).__init__(value, text, None)
@classmethod
def activate(cls):
if cls.self is not None:
raise Exception("Duplicate call to {0}.activate()".format(cls))
cls.self = cls()
Checkers.add_item_instance(cls.self)
@classmethod
def update_unbound_problems(cls, **kwargs):
assert cls.self.model is None
cls.self.update_problems(**kwargs)
todo, done = cls.self.update_problems(**kwargs)
msg = "Found {0} and fixed {1} data problems for {2}."
dd.logger.info(msg.format(len(todo), len(done), cls.self))
@classmethod
def check_instance(cls, *args, **kwargs):
return cls.self.get_checkdata_problems(*args, **kwargs)
def get_checkable_models(self):
if self.model is None:
return [None]
return rt.models_by_base(self.model, toplevel_only=True)
def resolve_model(self, site):
if isinstance(self.model, str):
self.model = dd.resolve_model(self.model, strict=True)
def update_problems(self, obj=None, delete=True, fix=False):
Problem = rt.models.checkdata.Problem
if delete:
# if obj is None:
# flt = {
# Problem.owner.ct_field.name + "__isnull": True,
# Problem.owner.fk_field.name + "__isnull": True
# }
# else:
# flt = gfk2lookup(Problem.owner, obj, checker=self)
flt = gfk2lookup(Problem.owner, obj, checker=self)
qs = Problem.objects.filter(**flt)
qs.delete()
done = []
todo = []
for fixable, msg in self.get_checkdata_problems(obj, fix):
if fixable:
# attn: do not yet translate
# msg = string_concat(u"(\u2605) ", msg)
msg = format_lazy("(\u2605) {}", msg)
if fixable and fix:
done.append(msg)
else:
todo.append(msg)
if len(todo):
# dd.logger.info("%s : %s", obj, todo)
user = self.get_responsible_user(obj)
if user is None:
lang = dd.get_default_language()
else:
lang = user.language
with translation.override(lang):
if obj is None:
for msg in todo:
prb = Problem(message=str(msg), checker=self, user=user)
prb.full_clean()
prb.save()
else:
msg = '\n'.join([str(s) for s in todo])
prb = Problem(owner=obj, message=msg, checker=self, user=user)
prb.full_clean()
prb.save()
return (todo, done)
def get_checkdata_problems(self, obj, fix=False):
return []
def get_responsible_user(self, obj):
return dd.plugins.checkdata.get_responsible_user(self, obj)
class Checkers(dd.ChoiceList):
required_roles = dd.login_required(SiteStaff)
verbose_name = _("Data checker")
verbose_name_plural = _("Data checkers")
item_class = Checker
max_length = 250
# e.g. "lino_welfare.modlib.pcsw.models.ClientCoachingsChecker"
column_names = "value text"
show_values = False
detail_layout = """
value text
checkdata.ProblemsByChecker
"""
@dd.receiver(dd.pre_analyze)
def resolve_checkers(sender, **kw):
for chk in Checkers.get_list_items():
chk.resolve_model(sender)
| bsd-2-clause | -260,389,720,889,201,000 | 32.112583 | 82 | 0.5528 | false |
CHBMB/LazyLibrarian | lib/fuzzywuzzy/fuzz.py | 1 | 8419 | #!/usr/bin/env python
# encoding: utf-8
"""
fuzz.py
Copyright (c) 2011 Adam Cohen
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import unicode_literals
import platform
import warnings
try:
from .StringMatcher import StringMatcher as SequenceMatcher
except ImportError:
#if platform.python_implementation() != "PyPy":
# warnings.warn('Using slow pure-python SequenceMatcher. Install python-Levenshtein to remove this warning')
from difflib import SequenceMatcher
from . import utils
###########################
# Basic Scoring Functions #
###########################
@utils.check_for_none
@utils.check_empty_string
def ratio(s1, s2):
s1, s2 = utils.make_type_consistent(s1, s2)
m = SequenceMatcher(None, s1, s2)
return utils.intr(100 * m.ratio())
@utils.check_for_none
@utils.check_empty_string
def partial_ratio(s1, s2):
""""Return the ratio of the most similar substring
as a number between 0 and 100."""
s1, s2 = utils.make_type_consistent(s1, s2)
if len(s1) <= len(s2):
shorter = s1
longer = s2
else:
shorter = s2
longer = s1
m = SequenceMatcher(None, shorter, longer)
blocks = m.get_matching_blocks()
# each block represents a sequence of matching characters in a string
# of the form (idx_1, idx_2, len)
# the best partial match will block align with at least one of those blocks
# e.g. shorter = "abcd", longer = XXXbcdeEEE
# block = (1,3,3)
# best score === ratio("abcd", "Xbcd")
scores = []
for block in blocks:
long_start = block[1] - block[0] if (block[1] - block[0]) > 0 else 0
long_end = long_start + len(shorter)
long_substr = longer[long_start:long_end]
m2 = SequenceMatcher(None, shorter, long_substr)
r = m2.ratio()
if r > .995:
return 100
else:
scores.append(r)
return utils.intr(100 * max(scores))
##############################
# Advanced Scoring Functions #
##############################
def _process_and_sort(s, force_ascii, full_process=True):
"""Return a cleaned string with token sorted."""
# pull tokens
ts = utils.full_process(s, force_ascii=force_ascii) if full_process else s
tokens = ts.split()
# sort tokens and join
sorted_string = u" ".join(sorted(tokens))
return sorted_string.strip()
# Sorted Token
# find all alphanumeric tokens in the string
# sort those tokens and take ratio of resulting joined strings
# controls for unordered string elements
@utils.check_for_none
def _token_sort(s1, s2, partial=True, force_ascii=True, full_process=True):
sorted1 = _process_and_sort(s1, force_ascii, full_process=full_process)
sorted2 = _process_and_sort(s2, force_ascii, full_process=full_process)
if partial:
return partial_ratio(sorted1, sorted2)
else:
return ratio(sorted1, sorted2)
def token_sort_ratio(s1, s2, force_ascii=True, full_process=True):
"""Return a measure of the sequences' similarity between 0 and 100
but sorting the token before comparing.
"""
return _token_sort(s1, s2, partial=False, force_ascii=force_ascii, full_process=full_process)
def partial_token_sort_ratio(s1, s2, force_ascii=True, full_process=True):
"""Return the ratio of the most similar substring as a number between
0 and 100 but sorting the token before comparing.
"""
return _token_sort(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process)
@utils.check_for_none
def _token_set(s1, s2, partial=True, force_ascii=True, full_process=True):
"""Find all alphanumeric tokens in each string...
- treat them as a set
- construct two strings of the form:
<sorted_intersection><sorted_remainder>
- take ratios of those two strings
- controls for unordered partial matches"""
p1 = utils.full_process(s1, force_ascii=force_ascii) if full_process else s1
p2 = utils.full_process(s2, force_ascii=force_ascii) if full_process else s2
if not utils.validate_string(p1):
return 0
if not utils.validate_string(p2):
return 0
# pull tokens
tokens1 = set(p1.split())
tokens2 = set(p2.split())
intersection = tokens1.intersection(tokens2)
diff1to2 = tokens1.difference(tokens2)
diff2to1 = tokens2.difference(tokens1)
sorted_sect = " ".join(sorted(intersection))
sorted_1to2 = " ".join(sorted(diff1to2))
sorted_2to1 = " ".join(sorted(diff2to1))
combined_1to2 = sorted_sect + " " + sorted_1to2
combined_2to1 = sorted_sect + " " + sorted_2to1
# strip
sorted_sect = sorted_sect.strip()
combined_1to2 = combined_1to2.strip()
combined_2to1 = combined_2to1.strip()
if partial:
ratio_func = partial_ratio
else:
ratio_func = ratio
pairwise = [
ratio_func(sorted_sect, combined_1to2),
ratio_func(sorted_sect, combined_2to1),
ratio_func(combined_1to2, combined_2to1)
]
return max(pairwise)
def token_set_ratio(s1, s2, force_ascii=True, full_process=True):
return _token_set(s1, s2, partial=False, force_ascii=force_ascii, full_process=full_process)
def partial_token_set_ratio(s1, s2, force_ascii=True, full_process=True):
return _token_set(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process)
###################
# Combination API #
###################
# q is for quick
def QRatio(s1, s2, force_ascii=True):
p1 = utils.full_process(s1, force_ascii=force_ascii)
p2 = utils.full_process(s2, force_ascii=force_ascii)
if not utils.validate_string(p1):
return 0
if not utils.validate_string(p2):
return 0
return ratio(p1, p2)
def UQRatio(s1, s2):
return QRatio(s1, s2, force_ascii=False)
# w is for weighted
def WRatio(s1, s2, force_ascii=True):
"""Return a measure of the sequences' similarity between 0 and 100,
using different algorithms.
"""
p1 = utils.full_process(s1, force_ascii=force_ascii)
p2 = utils.full_process(s2, force_ascii=force_ascii)
if not utils.validate_string(p1):
return 0
if not utils.validate_string(p2):
return 0
# should we look at partials?
try_partial = True
unbase_scale = .95
partial_scale = .90
base = ratio(p1, p2)
len_ratio = float(max(len(p1), len(p2))) / min(len(p1), len(p2))
# if strings are similar length, don't use partials
if len_ratio < 1.5:
try_partial = False
# if one string is much much shorter than the other
if len_ratio > 8:
partial_scale = .6
if try_partial:
partial = partial_ratio(p1, p2) * partial_scale
ptsor = partial_token_sort_ratio(p1, p2, full_process=False) \
* unbase_scale * partial_scale
ptser = partial_token_set_ratio(p1, p2, full_process=False) \
* unbase_scale * partial_scale
return utils.intr(max(base, partial, ptsor, ptser))
else:
tsor = token_sort_ratio(p1, p2, full_process=False) * unbase_scale
tser = token_set_ratio(p1, p2, full_process=False) * unbase_scale
return utils.intr(max(base, tsor, tser))
def UWRatio(s1, s2):
"""Return a measure of the sequences' similarity between 0 and 100,
using different algorithms. Same as WRatio but preserving unicode.
"""
return WRatio(s1, s2, force_ascii=False)
| gpl-3.0 | -7,268,800,526,245,139,000 | 30.650376 | 115 | 0.663618 | false |
tuhuayuan/chatmind | main.py | 1 | 1130 | # -*- coding: UTF-8 -*-
import tornado.web
import tornado.httpserver
import tornado.ioloop
import wechat
from tornado.options import define, options
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/wechat_index", wechat.IndexHandler)
]
settings = {
'debug': options.debug
}
tornado.web.Application.__init__(self, handlers, **settings)
class BaseHandler(tornado.web.RequestHandler):
@property
def dbsession(self):
return None
def main():
define("port", default=2080, help="run on the givent port", type=int)
define("debug", default=False, help="run in debug mode", type=bool)
define("config", default="", help="load the givent config file")
tornado.options.parse_command_line()
try:
tornado.options.parse_config_file(options.config)
except IOError:
options.print_help()
return
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
| mit | -8,163,191,410,808,373,000 | 26.560976 | 73 | 0.646018 | false |
lxml/lxml | src/lxml/tests/test_isoschematron.py | 1 | 35134 | # -*- coding: utf-8 -*-
"""
Test cases related to ISO-Schematron parsing and validation
"""
from __future__ import absolute_import
import unittest
from lxml import isoschematron
from .common_imports import etree, HelperTestCase, fileInTestDir, doctest, make_doctest
class ETreeISOSchematronTestCase(HelperTestCase):
def test_schematron(self):
tree_valid = self.parse('<AAA><BBB/><CCC/></AAA>')
tree_invalid = self.parse('<AAA><BBB/><CCC/><DDD/></AAA>')
schema = self.parse('''\
<schema xmlns="http://purl.oclc.org/dsdl/schematron" >
<pattern id="OpenModel">
<title>Open Model</title>
<rule context="AAA">
<assert test="BBB"> BBB element is not present</assert>
<assert test="CCC"> CCC element is not present</assert>
</rule>
</pattern>
<pattern id="ClosedModel">
<title>Closed model"</title>
<rule context="AAA">
<assert test="BBB"> BBB element is not present</assert>
<assert test="CCC"> CCC element is not present</assert>
<assert test="count(BBB|CCC) = count (*)">There is an extra element</assert>
</rule>
</pattern>
</schema>
''')
schema = isoschematron.Schematron(schema)
self.assertTrue(schema.validate(tree_valid))
self.assertTrue(not schema.validate(tree_invalid))
def test_schematron_elementtree_error(self):
self.assertRaises(ValueError, isoschematron.Schematron, etree.ElementTree())
# an empty pattern is valid in iso schematron
def test_schematron_empty_pattern(self):
schema = self.parse('''\
<schema xmlns="http://purl.oclc.org/dsdl/schematron" >
<pattern id="OpenModel">
<title>Open model</title>
</pattern>
</schema>
''')
schema = isoschematron.Schematron(schema)
self.assertTrue(schema)
def test_schematron_invalid_schema_empty(self):
schema = self.parse('''\
<schema xmlns="http://purl.oclc.org/dsdl/schematron" />
''')
self.assertRaises(etree.SchematronParseError,
isoschematron.Schematron, schema)
def test_schematron_invalid_schema_namespace(self):
schema = self.parse('''\
<schema xmlns="mynamespace" />
''')
self.assertRaises(etree.SchematronParseError,
isoschematron.Schematron, schema)
def test_schematron_from_tree(self):
schema = self.parse('''\
<sch:schema xmlns:sch="http://purl.oclc.org/dsdl/schematron">
<sch:pattern id="number_of_entries">
<sch:title>mandatory number_of_entries tests</sch:title>
<sch:rule context="number_of_entries">
<sch:assert test="text()=count(../entries/entry)">[ERROR] number_of_entries (<sch:value-of select="."/>) must equal the number of entries/entry elements (<sch:value-of select="count(../entries/entry)"/>)</sch:assert>
</sch:rule>
</sch:pattern>
</sch:schema>
''')
schematron = isoschematron.Schematron(schema)
self.assertTrue(isinstance(schematron, isoschematron.Schematron))
def test_schematron_from_element(self):
schema = self.parse('''\
<sch:schema xmlns:sch="http://purl.oclc.org/dsdl/schematron">
<sch:pattern id="number_of_entries">
<sch:title>mandatory number_of_entries tests</sch:title>
<sch:rule context="number_of_entries">
<sch:assert test="text()=count(../entries/entry)">[ERROR] number_of_entries (<sch:value-of select="."/>) must equal the number of entries/entry elements (<sch:value-of select="count(../entries/entry)"/>)</sch:assert>
</sch:rule>
</sch:pattern>
</sch:schema>
''')
schematron = isoschematron.Schematron(schema.getroot())
self.assertTrue(isinstance(schematron, isoschematron.Schematron))
def test_schematron_from_file(self):
schematron = isoschematron.Schematron(file=fileInTestDir('test.sch'))
self.assertTrue(isinstance(schematron, isoschematron.Schematron))
def test_schematron_call(self):
schema = self.parse('''\
<sch:schema xmlns:sch="http://purl.oclc.org/dsdl/schematron">
<sch:pattern id="number_of_entries">
<sch:title>mandatory number_of_entries tests</sch:title>
<sch:rule context="number_of_entries">
<sch:assert test="text()=count(../entries/entry)">[ERROR] number_of_entries (<sch:value-of select="."/>) must equal the number of entries/entry elements (<sch:value-of select="count(../entries/entry)"/>)</sch:assert>
</sch:rule>
</sch:pattern>
</sch:schema>
''')
tree_valid = self.parse('''\
<message>
<number_of_entries>0</number_of_entries>
<entries>
</entries>
</message>
''')
tree_invalid = self.parse('''\
<message>
<number_of_entries>3</number_of_entries>
<entries>
<entry>Entry 1</entry>
<entry>Entry 2</entry>
</entries>
</message>
''')
schematron = isoschematron.Schematron(schema)
self.assertTrue(schematron(tree_valid), schematron.error_log)
valid = schematron(tree_invalid)
self.assertTrue(not valid)
def test_schematron_validate(self):
schema = self.parse('''\
<sch:schema xmlns:sch="http://purl.oclc.org/dsdl/schematron">
<sch:pattern id="number_of_entries">
<sch:title>mandatory number_of_entries tests</sch:title>
<sch:rule context="number_of_entries">
<sch:assert test="text()=count(../entries/entry)">[ERROR] number_of_entries (<sch:value-of select="."/>) must equal the number of entries/entry elements (<sch:value-of select="count(../entries/entry)"/>)</sch:assert>
</sch:rule>
</sch:pattern>
</sch:schema>
''')
tree_valid = self.parse('''\
<message>
<number_of_entries>0</number_of_entries>
<entries>
</entries>
</message>
''')
tree_invalid = self.parse('''\
<message>
<number_of_entries>3</number_of_entries>
<entries>
<entry>Entry 1</entry>
<entry>Entry 2</entry>
</entries>
</message>
''')
schematron = isoschematron.Schematron(schema)
self.assertTrue(schematron.validate(tree_valid), schematron.error_log)
valid = schematron.validate(tree_invalid)
self.assertTrue(not valid)
def test_schematron_assertValid(self):
schema = self.parse('''\
<sch:schema xmlns:sch="http://purl.oclc.org/dsdl/schematron">
<sch:pattern id="number_of_entries">
<sch:title>mandatory number_of_entries tests</sch:title>
<sch:rule context="number_of_entries">
<sch:assert test="text()=count(../entries/entry)">[ERROR] number_of_entries (<sch:value-of select="."/>) must equal the number of entries/entry elements (<sch:value-of select="count(../entries/entry)"/>)</sch:assert>
</sch:rule>
</sch:pattern>
</sch:schema>
''')
tree_valid = self.parse('''\
<message>
<number_of_entries>0</number_of_entries>
<entries>
</entries>
</message>
''')
tree_invalid = self.parse('''\
<message>
<number_of_entries>3</number_of_entries>
<entries>
<entry>Entry 1</entry>
<entry>Entry 2</entry>
</entries>
</message>
''')
schematron = isoschematron.Schematron(schema)
self.assertTrue(schematron(tree_valid), schematron.error_log)
self.assertRaises(etree.DocumentInvalid, schematron.assertValid,
tree_invalid)
def test_schematron_error_log(self):
schema = self.parse('''\
<sch:schema xmlns:sch="http://purl.oclc.org/dsdl/schematron">
<sch:pattern id="number_of_entries">
<sch:title>mandatory number_of_entries tests</sch:title>
<sch:rule context="number_of_entries">
<sch:assert test="text()=count(../entries/entry)">[ERROR] number_of_entries (<sch:value-of select="."/>) must equal the number of entries/entry elements (<sch:value-of select="count(../entries/entry)"/>)</sch:assert>
</sch:rule>
</sch:pattern>
</sch:schema>
''')
tree_valid = self.parse('''\
<message>
<number_of_entries>0</number_of_entries>
<entries>
</entries>
</message>
''')
tree_invalid = self.parse('''\
<message>
<number_of_entries>3</number_of_entries>
<entries>
<entry>Entry 1</entry>
<entry>Entry 2</entry>
</entries>
</message>
''')
schematron = isoschematron.Schematron(schema)
self.assertTrue(schematron(tree_valid), schematron.error_log)
valid = schematron(tree_invalid)
self.assertTrue(not valid)
self.assertEqual(len(schematron.error_log), 1,
'expected single error: %s (%s errors)' %
(schematron.error_log, len(schematron.error_log)))
def test_schematron_result_report(self):
schema = self.parse('''\
<sch:schema xmlns:sch="http://purl.oclc.org/dsdl/schematron">
<sch:pattern id="number_of_entries">
<sch:title>mandatory number_of_entries tests</sch:title>
<sch:rule context="number_of_entries">
<sch:assert test="text()=count(../entries/entry)">[ERROR] number_of_entries (<sch:value-of select="."/>) must equal the number of entries/entry elements (<sch:value-of select="count(../entries/entry)"/>)</sch:assert>
</sch:rule>
</sch:pattern>
</sch:schema>
''')
tree_valid = self.parse('''\
<message>
<number_of_entries>0</number_of_entries>
<entries>
</entries>
</message>
''')
tree_invalid = self.parse('''\
<message>
<number_of_entries>3</number_of_entries>
<entries>
<entry>Entry 1</entry>
<entry>Entry 2</entry>
</entries>
</message>
''')
schematron = isoschematron.Schematron(schema, store_report=True)
self.assertTrue(schematron(tree_valid), schematron.error_log)
valid = schematron(tree_invalid)
self.assertTrue(not valid)
self.assertTrue(
isinstance(schematron.validation_report, etree._ElementTree),
'expected a validation report result tree, got: %s' % schematron.validation_report)
schematron = isoschematron.Schematron(schema, store_report=False)
self.assertTrue(schematron(tree_valid), schematron.error_log)
valid = schematron(tree_invalid)
self.assertTrue(not valid)
self.assertTrue(schematron.validation_report is None,
'validation reporting switched off, still: %s' % schematron.validation_report)
def test_schematron_store_schematron(self):
schema = self.parse('''\
<sch:schema xmlns:sch="http://purl.oclc.org/dsdl/schematron">
<sch:pattern id="number_of_entries">
<sch:title>mandatory number_of_entries tests</sch:title>
<sch:rule context="number_of_entries">
<sch:assert test="text()=count(../entries/entry)">[ERROR] number_of_entries (<sch:value-of select="."/>) must equal the number of entries/entry elements (<sch:value-of select="count(../entries/entry)"/>)</sch:assert>
</sch:rule>
</sch:pattern>
</sch:schema>
''')
schematron = isoschematron.Schematron(schema)
self.assertTrue(schematron.validator_xslt is None)
schematron = isoschematron.Schematron(schema, store_schematron=True)
self.assertTrue(isinstance(schematron.schematron, etree._ElementTree),
'expected schematron schema to be stored')
def test_schematron_store_xslt(self):
schema = self.parse('''\
<sch:schema xmlns:sch="http://purl.oclc.org/dsdl/schematron">
<sch:pattern id="number_of_entries">
<sch:title>mandatory number_of_entries tests</sch:title>
<sch:rule context="number_of_entries">
<sch:assert test="text()=count(../entries/entry)">[ERROR] number_of_entries (<sch:value-of select="."/>) must equal the number of entries/entry elements (<sch:value-of select="count(../entries/entry)"/>)</sch:assert>
</sch:rule>
</sch:pattern>
</sch:schema>
''')
schematron = isoschematron.Schematron(schema)
self.assertTrue(schematron.validator_xslt is None)
schematron = isoschematron.Schematron(schema, store_xslt=True)
self.assertTrue(isinstance(schematron.validator_xslt, etree._ElementTree),
'expected validator xslt to be stored')
def test_schematron_abstract(self):
schema = self.parse('''\
<sch:schema xmlns:sch="http://purl.oclc.org/dsdl/schematron">
<sch:title>iso schematron validation</sch:title>
<sch:ns uri="http://www.w3.org/2001/XMLSchema-instance" prefix="xsi"/>
<sch:ns uri="http://codespeak.net/lxml/objectify/pytype" prefix="py"/>
<!-- of course, these only really make sense when combined with a schema that
ensures datatype xs:dateTime -->
<sch:pattern abstract="true" id="abstract.dateTime.tz_utc">
<sch:rule context="$datetime">
<sch:let name="tz" value="concat(substring-after(substring-after(./text(), 'T'), '+'), substring-after(substring-after(./text(), 'T'), '-'))"/>
<sch:let name="lastchar" value="substring(./text(), string-length(./text()))"/>
<sch:assert test="$lastchar='Z' or $tz='00:00'">[ERROR] element (<sch:value-of select="name(.)"/>) dateTime value (<sch:value-of select="."/>) is not qualified as UTC (tz: <sch:value-of select="$tz"/>)</sch:assert>
</sch:rule>
</sch:pattern>
<sch:pattern abstract="true" id="abstract.dateTime.tz_utc_nillable">
<sch:rule context="$datetime">
<sch:let name="tz" value="concat(substring-after(substring-after(./text(), 'T'), '+'), substring-after(substring-after(./text(), 'T'), '-'))"/>
<sch:let name="lastchar" value="substring(./text(), string-length(./text()))"/>
<sch:assert test="@xsi:nil='true' or ($lastchar='Z' or $tz='00:00')">[ERROR] element (<sch:value-of select="name(.)"/>) dateTime value (<sch:value-of select="."/>) is not qualified as UTC (tz: <sch:value-of select="$tz"/>)</sch:assert>
</sch:rule>
</sch:pattern>
<sch:pattern is-a="abstract.dateTime.tz_utc" id="datetime" >
<sch:param name="datetime" value="datetime"/>
</sch:pattern>
<sch:pattern is-a="abstract.dateTime.tz_utc_nillable" id="nillableDatetime">
<sch:param name="datetime" value="nillableDatetime"/>
</sch:pattern>
</sch:schema>
''')
valid_trees = [
self.parse('''\
<root xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<datetime>2009-12-10T15:21:00Z</datetime>
<nillableDatetime xsi:nil="true"/>
</root>
'''),
self.parse('''\
<root xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<datetime>2009-12-10T15:21:00Z</datetime>
<nillableDatetime>2009-12-10T15:21:00Z</nillableDatetime>
</root>
'''),
self.parse('''\
<root xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<datetime>2009-12-10T15:21:00+00:00</datetime>
<nillableDatetime>2009-12-10T15:21:00-00:00</nillableDatetime>
</root>
'''),
]
schematron = isoschematron.Schematron(schema)
for tree_valid in valid_trees:
self.assertTrue(schematron(tree_valid), schematron.error_log)
tree_invalid = self.parse('''\
<root xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<datetime>2009-12-10T16:21:00+01:00</datetime>
<nillableDatetime>2009-12-10T16:21:00+01:00</nillableDatetime>
</root>
''')
expected = 2
valid = schematron(tree_invalid)
self.assertTrue(not valid)
self.assertEqual(
len(schematron.error_log), expected,
'expected %s errors: %s (%s errors)' %
(expected, schematron.error_log, len(schematron.error_log)))
tree_invalid = self.parse('''\
<root xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<datetime xsi:nil="true"/>
<nillableDatetime>2009-12-10T16:21:00Z</nillableDatetime>
</root>
''')
expected = 1
valid = schematron(tree_invalid)
self.assertTrue(not valid)
self.assertEqual(
len(schematron.error_log), expected,
'expected %s errors: %s (%s errors)' %
(expected, schematron.error_log, len(schematron.error_log)))
def test_schematron_phases(self):
schema = self.parse('''\
<sch:schema xmlns:sch="http://purl.oclc.org/dsdl/schematron">
<sch:title>iso schematron validation</sch:title>
<sch:ns uri="http://www.w3.org/2001/XMLSchema-instance" prefix="xsi"/>
<sch:ns uri="http://codespeak.net/lxml/objectify/pytype" prefix="py"/>
<sch:phase id="mandatory">
<sch:active pattern="number_of_entries"/>
</sch:phase>
<sch:phase id="datetime_checks">
<sch:active pattern="datetime"/>
<sch:active pattern="nillableDatetime"/>
</sch:phase>
<sch:phase id="full">
<sch:active pattern="number_of_entries"/>
<sch:active pattern="datetime"/>
<sch:active pattern="nillableDatetime"/>
</sch:phase>
<!-- of course, these only really make sense when combined with a schema that
ensures datatype xs:dateTime -->
<sch:pattern abstract="true" id="abstract.dateTime.tz_utc">
<sch:rule context="$datetime">
<sch:let name="tz" value="concat(substring-after(substring-after(./text(), 'T'), '+'), substring-after(substring-after(./text(), 'T'), '-'))"/>
<sch:let name="lastchar" value="substring(./text(), string-length(./text()))"/>
<sch:assert test="$lastchar='Z' or $tz='00:00'">[ERROR] element (<sch:value-of select="name(.)"/>) dateTime value (<sch:value-of select="."/>) is not qualified as UTC (tz: <sch:value-of select="$tz"/>)</sch:assert>
</sch:rule>
</sch:pattern>
<sch:pattern abstract="true" id="abstract.dateTime.tz_utc_nillable">
<sch:rule context="$datetime">
<sch:let name="tz" value="concat(substring-after(substring-after(./text(), 'T'), '+'), substring-after(substring-after(./text(), 'T'), '-'))"/>
<sch:let name="lastchar" value="substring(./text(), string-length(./text()))"/>
<sch:assert test="@xsi:nil='true' or ($lastchar='Z' or $tz='00:00')">[ERROR] element (<sch:value-of select="name(.)"/>) dateTime value (<sch:value-of select="."/>) is not qualified as UTC (tz: <sch:value-of select="$tz"/>)</sch:assert>
</sch:rule>
</sch:pattern>
<sch:pattern id="number_of_entries">
<sch:title>mandatory number_of_entries test</sch:title>
<sch:rule context="number_of_entries">
<sch:assert test="text()=count(../entries/entry)">[ERROR] number_of_entries (<sch:value-of select="."/>) must equal the number of entries/entry elements (<sch:value-of select="count(../entries/entry)"/>)</sch:assert>
</sch:rule>
</sch:pattern>
<sch:pattern id="datetime" is-a="abstract.dateTime.tz_utc">
<sch:param name="datetime" value="datetime"/>
</sch:pattern>
<sch:pattern id="nillableDatetime" is-a="abstract.dateTime.tz_utc_nillable">
<sch:param name="datetime" value="nillableDatetime"/>
</sch:pattern>
</sch:schema>
''')
tree_valid = self.parse('''\
<message xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<datetime>2009-12-10T15:21:00Z</datetime>
<nillableDatetime xsi:nil="true"/>
<number_of_entries>0</number_of_entries>
<entries>
</entries>
</message>
''')
tree_invalid = self.parse('''\
<message>
<datetime>2009-12-10T16:21:00+01:00</datetime>
<nillableDatetime>2009-12-10T16:21:00+01:00</nillableDatetime>
<number_of_entries>3</number_of_entries>
<entries>
<entry>Entry 1</entry>
<entry>Entry 2</entry>
</entries>
</message>
''')
# check everything (default phase #ALL)
schematron = isoschematron.Schematron(schema)
self.assertTrue(schematron(tree_valid), schematron.error_log)
expected = 3
valid = schematron(tree_invalid)
self.assertTrue(not valid)
self.assertEqual(
len(schematron.error_log), expected,
'expected %s errors: %s (%s errors)' %
(expected, schematron.error_log, len(schematron.error_log)))
# check phase mandatory
schematron = isoschematron.Schematron(
schema, compile_params={'phase': 'mandatory'})
self.assertTrue(schematron(tree_valid), schematron.error_log)
expected = 1
valid = schematron(tree_invalid)
self.assertTrue(not valid)
self.assertEqual(
len(schematron.error_log), expected,
'expected %s errors: %s (%s errors)' %
(expected, schematron.error_log, len(schematron.error_log)))
# check phase datetime_checks
schematron = isoschematron.Schematron(
schema, compile_params={'phase': 'datetime_checks'})
self.assertTrue(schematron(tree_valid), schematron.error_log)
expected = 2
valid = schematron(tree_invalid)
self.assertTrue(not valid)
self.assertEqual(
len(schematron.error_log), expected,
'expected %s errors: %s (%s errors)' %
(expected, schematron.error_log, len(schematron.error_log)))
# check phase full
schematron = isoschematron.Schematron(
schema, compile_params={'phase': 'full'})
self.assertTrue(schematron(tree_valid), schematron.error_log)
expected = 3
valid = schematron(tree_invalid)
self.assertTrue(not valid)
self.assertEqual(
len(schematron.error_log), expected,
'expected %s errors: %s (%s errors)' %
(expected, schematron.error_log, len(schematron.error_log)))
def test_schematron_phases_kwarg(self):
schema = self.parse('''\
<sch:schema xmlns:sch="http://purl.oclc.org/dsdl/schematron">
<sch:title>iso schematron validation</sch:title>
<sch:ns uri="http://www.w3.org/2001/XMLSchema-instance" prefix="xsi"/>
<sch:ns uri="http://codespeak.net/lxml/objectify/pytype" prefix="py"/>
<sch:phase id="mandatory">
<sch:active pattern="number_of_entries"/>
</sch:phase>
<sch:phase id="datetime_checks">
<sch:active pattern="datetime"/>
<sch:active pattern="nillableDatetime"/>
</sch:phase>
<sch:phase id="full">
<sch:active pattern="number_of_entries"/>
<sch:active pattern="datetime"/>
<sch:active pattern="nillableDatetime"/>
</sch:phase>
<!-- of course, these only really make sense when combined with a schema that
ensures datatype xs:dateTime -->
<sch:pattern abstract="true" id="abstract.dateTime.tz_utc">
<sch:rule context="$datetime">
<sch:let name="tz" value="concat(substring-after(substring-after(./text(), 'T'), '+'), substring-after(substring-after(./text(), 'T'), '-'))"/>
<sch:let name="lastchar" value="substring(./text(), string-length(./text()))"/>
<sch:assert test="$lastchar='Z' or $tz='00:00'">[ERROR] element (<sch:value-of select="name(.)"/>) dateTime value (<sch:value-of select="."/>) is not qualified as UTC (tz: <sch:value-of select="$tz"/>)</sch:assert>
</sch:rule>
</sch:pattern>
<sch:pattern abstract="true" id="abstract.dateTime.tz_utc_nillable">
<sch:rule context="$datetime">
<sch:let name="tz" value="concat(substring-after(substring-after(./text(), 'T'), '+'), substring-after(substring-after(./text(), 'T'), '-'))"/>
<sch:let name="lastchar" value="substring(./text(), string-length(./text()))"/>
<sch:assert test="@xsi:nil='true' or ($lastchar='Z' or $tz='00:00')">[ERROR] element (<sch:value-of select="name(.)"/>) dateTime value (<sch:value-of select="."/>) is not qualified as UTC (tz: <sch:value-of select="$tz"/>)</sch:assert>
</sch:rule>
</sch:pattern>
<sch:pattern id="number_of_entries">
<sch:title>mandatory number_of_entries test</sch:title>
<sch:rule context="number_of_entries">
<sch:assert test="text()=count(../entries/entry)">[ERROR] number_of_entries (<sch:value-of select="."/>) must equal the number of entries/entry elements (<sch:value-of select="count(../entries/entry)"/>)</sch:assert>
</sch:rule>
</sch:pattern>
<sch:pattern id="datetime" is-a="abstract.dateTime.tz_utc">
<sch:param name="datetime" value="datetime"/>
</sch:pattern>
<sch:pattern id="nillableDatetime" is-a="abstract.dateTime.tz_utc_nillable">
<sch:param name="datetime" value="nillableDatetime"/>
</sch:pattern>
</sch:schema>
''')
tree_valid = self.parse('''\
<message xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<datetime>2009-12-10T15:21:00Z</datetime>
<nillableDatetime xsi:nil="true"/>
<number_of_entries>0</number_of_entries>
<entries>
</entries>
</message>
''')
tree_invalid = self.parse('''\
<message>
<datetime>2009-12-10T16:21:00+01:00</datetime>
<nillableDatetime>2009-12-10T16:21:00+01:00</nillableDatetime>
<number_of_entries>3</number_of_entries>
<entries>
<entry>Entry 1</entry>
<entry>Entry 2</entry>
</entries>
</message>
''')
# check everything (default phase #ALL)
schematron = isoschematron.Schematron(schema)
self.assertTrue(schematron(tree_valid), schematron.error_log)
expected = 3
valid = schematron(tree_invalid)
self.assertTrue(not valid)
self.assertEqual(
len(schematron.error_log), expected,
'expected %s errors: %s (%s errors)' %
(expected, schematron.error_log, len(schematron.error_log)))
# check phase mandatory
schematron = isoschematron.Schematron(schema, phase='mandatory')
self.assertTrue(schematron(tree_valid), schematron.error_log)
expected = 1
valid = schematron(tree_invalid)
self.assertTrue(not valid)
self.assertEqual(
len(schematron.error_log), expected,
'expected %s errors: %s (%s errors)' %
(expected, schematron.error_log, len(schematron.error_log)))
# check phase datetime_checks
schematron = isoschematron.Schematron(schema, phase='datetime_checks')
self.assertTrue(schematron(tree_valid), schematron.error_log)
expected = 2
valid = schematron(tree_invalid)
self.assertTrue(not valid)
self.assertEqual(
len(schematron.error_log), expected,
'expected %s errors: %s (%s errors)' %
(expected, schematron.error_log, len(schematron.error_log)))
# check phase full
schematron = isoschematron.Schematron(schema, phase='full')
self.assertTrue(schematron(tree_valid), schematron.error_log)
expected = 3
valid = schematron(tree_invalid)
self.assertTrue(not valid)
self.assertEqual(
len(schematron.error_log), expected, 'expected %s errors: %s (%s errors)' %
(expected, schematron.error_log, len(schematron.error_log)))
def test_schematron_xmlschema_embedded(self):
schema = self.parse('''\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:sch="http://purl.oclc.org/dsdl/schematron">
<xs:element name="message">
<xs:complexType>
<xs:sequence>
<xs:element name="number_of_entries" type="xs:positiveInteger">
<xs:annotation>
<xs:appinfo>
<sch:pattern id="number_of_entries">
<sch:title>mandatory number_of_entries tests</sch:title>
<sch:rule context="number_of_entries">
<sch:assert test="text()=count(../entries/entry)">[ERROR] number_of_entries (<sch:value-of select="."/>) must equal the number of entries/entry elements (<sch:value-of select="count(../entries/entry)"/>)</sch:assert>
</sch:rule>
</sch:pattern>
</xs:appinfo>
</xs:annotation>
</xs:element>
<xs:element name="entries">
<xs:complexType>
<xs:sequence>
<xs:element name="entry" type="xs:string" minOccurs="0" maxOccurs="unbounded"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>
''')
tree_valid = self.parse('''\
<message>
<number_of_entries>2</number_of_entries>
<entries>
<entry>Entry 1</entry>
<entry>Entry 2</entry>
</entries>
</message>
''')
tree_invalid = self.parse('''\
<message>
<number_of_entries>1</number_of_entries>
<entries>
<entry>Entry 1</entry>
<entry>Entry 2</entry>
</entries>
</message>
''')
xmlschema = etree.XMLSchema(schema)
schematron = isoschematron.Schematron(schema)
# fwiw, this must also be XMLSchema-valid
self.assertTrue(xmlschema(tree_valid), xmlschema.error_log)
self.assertTrue(schematron(tree_valid))
# still schema-valid
self.assertTrue(xmlschema(tree_invalid), xmlschema.error_log)
self.assertTrue(not schematron(tree_invalid))
def test_schematron_relaxng_embedded(self):
schema = self.parse('''\
<grammar xmlns="http://relaxng.org/ns/structure/1.0"
xmlns:sch="http://purl.oclc.org/dsdl/schematron"
datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
<start>
<ref name="message"/>
</start>
<define name="message">
<element name="message">
<element name="number_of_entries">
<!-- RelaxNG can be mixed freely with stuff from other namespaces -->
<sch:pattern id="number_of_entries">
<sch:title>mandatory number_of_entries tests</sch:title>
<sch:rule context="number_of_entries">
<sch:assert test="text()=count(../entries/entry)">[ERROR] number_of_entries (<sch:value-of select="."/>) must equal the number of entries/entry elements (<sch:value-of select="count(../entries/entry)"/>)</sch:assert>
</sch:rule>
</sch:pattern>
<data type="positiveInteger"/>
</element>
<element name="entries">
<zeroOrMore>
<element name="entry"><data type="string"/></element>
</zeroOrMore>
</element>
</element>
</define>
</grammar>
''')
tree_valid = self.parse('''\
<message>
<number_of_entries>2</number_of_entries>
<entries>
<entry>Entry 1</entry>
<entry>Entry 2</entry>
</entries>
</message>
''')
tree_invalid = self.parse('''\
<message>
<number_of_entries>1</number_of_entries>
<entries>
<entry>Entry 1</entry>
<entry>Entry 2</entry>
</entries>
</message>
''')
relaxng = etree.RelaxNG(schema)
schematron = isoschematron.Schematron(schema)
# fwiw, this must also be RelaxNG-valid
self.assertTrue(relaxng(tree_valid), relaxng.error_log)
self.assertTrue(schematron(tree_valid))
# still schema-valid
self.assertTrue(relaxng(tree_invalid), relaxng.error_log)
self.assertTrue(not schematron(tree_invalid))
def test_schematron_invalid_args(self):
schema = self.parse('''\
<sch:schema xmlns:sch="http://purl.oclc.org/dsdl/schematron">
<sch:pattern id="number_of_entries">
<sch:title>mandatory number_of_entries tests</sch:title>
<sch:rule context="number_of_entries">
<sch:assert test="text()=count(../entries/entry)">[ERROR] number_of_entries (<sch:value-of select="."/>) must equal the number of entries/entry elements (<sch:value-of select="count(../entries/entry)"/>)</sch:assert>
</sch:rule>
</sch:pattern>
</sch:schema>
''')
# handing phase as keyword arg will *not* raise the type error
self.assertRaises(TypeError, isoschematron.Schematron, schema,
compile_params={'phase': None})
def test_schematron_customization(self):
class MySchematron(isoschematron.Schematron):
def _extract(self, root):
schematron = (root.xpath(
'//sch:schema',
namespaces={'sch': "http://purl.oclc.org/dsdl/schematron"})
or [None])[0]
return schematron
def _include(self, schematron, **kwargs):
raise RuntimeError('inclusion unsupported')
def _expand(self, schematron, **kwargs):
raise RuntimeError('expansion unsupported')
def _validation_errors(self, validationReport):
valid = etree.XPath(
'count(//svrl:successful-report[@flag="critical"])=1',
namespaces={'svrl': isoschematron.SVRL_NS})(
validationReport)
if valid:
return []
error = etree.Element('Error')
error.text = 'missing critical condition report'
return [error]
tree_valid = self.parse('<AAA><BBB/><CCC/></AAA>')
tree_invalid = self.parse('<AAA><BBB/><CCC/><DDD/></AAA>')
schema = self.parse('''\
<schema xmlns="http://www.example.org/yet/another/schema/dialect">
<schema xmlns="http://purl.oclc.org/dsdl/schematron" >
<pattern id="OpenModel">
<title>Open Model</title>
<rule context="AAA">
<report test="BBB" flag="info">BBB element must be present</report>
<report test="CCC" flag="info">CCC element must be present</report>
</rule>
</pattern>
<pattern id="ClosedModel">
<title>Closed model"</title>
<rule context="AAA">
<report test="BBB" flag="info">BBB element must be present</report>
<report test="CCC" flag="info">CCC element must be present</report>
<report test="count(BBB|CCC) = count(*)" flag="critical">Only BBB and CCC children must be present</report>
</rule>
</pattern>
</schema>
</schema>
''')
# check if overridden _include is run
self.assertRaises(RuntimeError, MySchematron, schema, store_report=True)
# check if overridden _expand is run
self.assertRaises(RuntimeError, MySchematron, schema, store_report=True,
include=False)
schema = MySchematron(schema, store_report=True, include=False,
expand=False)
self.assertTrue(schema.validate(tree_valid))
self.assertTrue(not schema.validate(tree_invalid))
#TODO: test xslt parameters for inclusion, expand & compile steps (?)
def test_schematron_fail_on_report(self):
tree_valid = self.parse('<AAA><BBB/><CCC/></AAA>')
tree_invalid = self.parse('<AAA><BBB/><CCC/><DDD/></AAA>')
schema = self.parse('''\
<schema xmlns="http://purl.oclc.org/dsdl/schematron" >
<pattern id="OpenModel">
<title>Simple Report</title>
<rule context="AAA">
<report test="DDD"> DDD element must not be present</report>
</rule>
</pattern>
</schema>
''')
schema_report = isoschematron.Schematron(
schema, error_finder=isoschematron.Schematron.ASSERTS_AND_REPORTS)
schema_no_report = isoschematron.Schematron(schema)
self.assertTrue(schema_report.validate(tree_valid))
self.assertTrue(not schema_report.validate(tree_invalid))
self.assertTrue(schema_no_report.validate(tree_valid))
self.assertTrue(schema_no_report.validate(tree_invalid))
def test_suite():
suite = unittest.TestSuite()
suite.addTests([unittest.makeSuite(ETreeISOSchematronTestCase)])
suite.addTests(doctest.DocTestSuite(isoschematron))
suite.addTests(
[make_doctest('../../../doc/validation.txt')])
return suite
if __name__ == '__main__':
print('to test use test.py %s' % __file__)
| bsd-3-clause | 8,032,202,764,286,995,000 | 39.383908 | 252 | 0.632123 | false |
zwffff2015/stock | api/tushareApi.py | 1 | 1084 | import tushare as ts
def getOneSpecifiedPriceHistoryData(code, start, end, priceType='close'):
data = getSpecifiedPriceHistoryData(code, start, end, priceType)
# print data
for item in data:
return data[item]
def getSpecifiedPriceHistoryData(code, start, end, priceType='close'):
# print code, start, end
data = ts.get_k_data(code, ktype='D', autype='qfq', start=start, end=end)
if len(data) <= 0:
return dict({'default': 0})
closeList = dict(data[priceType])
return closeList
def getRealTimeData(code, priceType='price'):
data = ts.get_realtime_quotes(code)
# print data
if data is None:
return 0
priceList = data[priceType].values[0]
return priceList
def getSimpleHistoryData(code, start, end):
data = ts.get_k_data(code, ktype='D', autype='qfq', start=start, end=end)
if len(data) <= 0:
return None
return data
def getHistoryData(code, start, end):
data = ts.get_hist_data(code, ktype='D', start=start, end=end)
if len(data) <= 0:
return None
return data
| mit | 638,128,801,533,022,700 | 26.1 | 77 | 0.654059 | false |
bblay/iris | lib/iris/tests/unit/fileformats/grib/load_rules/test_convert.py | 1 | 2726 | # (C) British Crown Copyright 2013, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for :func:`iris.fileformats.grib.load_rules.convert`."""
# Import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
import mock
from iris.coords import DimCoord
from iris.tests.test_grib_load import TestGribSimple
class Test_GribLevels(TestGribSimple):
def test_grib2_height(self):
grib = self.mock_grib()
grib.edition = 2
grib.typeOfFirstFixedSurface = 103
grib.scaledValueOfFirstFixedSurface = 12345
grib.scaleFactorOfFirstFixedSurface = 0
grib.typeOfSecondFixedSurface = 255
cube = self.cube_from_message(grib)
self.assertEqual(
cube.coord('height'),
DimCoord(12345, standard_name="height", units="m"))
def test_grib2_bounded_height(self):
grib = self.mock_grib()
grib.edition = 2
grib.typeOfFirstFixedSurface = 103
grib.scaledValueOfFirstFixedSurface = 12345
grib.scaleFactorOfFirstFixedSurface = 0
grib.typeOfSecondFixedSurface = 103
grib.scaledValueOfSecondFixedSurface = 54321
grib.scaleFactorOfSecondFixedSurface = 0
cube = self.cube_from_message(grib)
self.assertEqual(
cube.coord('height'),
DimCoord(33333, standard_name="height", units="m",
bounds=[[12345, 54321]]))
def test_grib2_diff_bound_types(self):
grib = self.mock_grib()
grib.edition = 2
grib.typeOfFirstFixedSurface = 103
grib.scaledValueOfFirstFixedSurface = 12345
grib.scaleFactorOfFirstFixedSurface = 0
grib.typeOfSecondFixedSurface = 102
grib.scaledValueOfSecondFixedSurface = 54321
grib.scaleFactorOfSecondFixedSurface = 0
with mock.patch('warnings.warn') as warn:
cube = self.cube_from_message(grib)
warn.assert_called_with(
"Different vertical bound types not yet handled.")
if __name__ == "__main__":
tests.main()
| gpl-3.0 | -3,581,160,497,658,930,000 | 36.342466 | 74 | 0.68562 | false |
AutorestCI/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/models/disk_instance_view.py | 1 | 1509 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DiskInstanceView(Model):
"""The instance view of the disk.
:param name: The disk name.
:type name: str
:param encryption_settings: Specifies the encryption settings for the OS
Disk. <br><br> Minimum api-version: 2015-06-15
:type encryption_settings:
list[~azure.mgmt.compute.v2017_03_30.models.DiskEncryptionSettings]
:param statuses: The resource status information.
:type statuses:
list[~azure.mgmt.compute.v2017_03_30.models.InstanceViewStatus]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'encryption_settings': {'key': 'encryptionSettings', 'type': '[DiskEncryptionSettings]'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(self, name=None, encryption_settings=None, statuses=None):
super(DiskInstanceView, self).__init__()
self.name = name
self.encryption_settings = encryption_settings
self.statuses = statuses
| mit | -3,492,721,895,791,761,000 | 37.692308 | 97 | 0.616302 | false |
IBM-Security/ibmsecurity | ibmsecurity/isam/base/lmi.py | 1 | 2757 | import logging
import time
from ibmsecurity.appliance.ibmappliance import IBMError
logger = logging.getLogger(__name__)
def restart(isamAppliance, check_mode=False, force=False):
"""
Restart LMI
"""
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_post("Restarting LMI", "/restarts/restart_server", {})
def get(isamAppliance, check_mode=False, force=False):
"""
Get LMI Status
"""
# Be sure to ignore server error
return isamAppliance.invoke_get("Get LMI Status", "/lmi", ignore_error=True)
def await_startup(isamAppliance, wait_time=300, check_freq=5, start_time=None, check_mode=False, force=False):
"""
Wait for appliance to bootup or LMI to restart
Checking lmi responding is best option from REST API perspective
# Frequency (in seconds) when routine will check if server is up
# check_freq (seconds)
# Ideally start_time should be taken before restart request is send to LMI
# start_time (REST API standard)
# Time to wait for appliance/lmi to respond and have a different start time
# wait_time (seconds)
# Note: This function will not work unless first steps are completed.
"""
# Get the current start_time if not provided
if start_time is None:
ret_obj = get(isamAppliance)
start_time = ret_obj['data'][0]['start_time']
sec = 0
warnings = []
# Now check if it is up and running
while 1:
ret_obj = get(isamAppliance)
if ret_obj['rc'] == 0 and isinstance(ret_obj['data'], list) and len(ret_obj['data']) > 0 and 'start_time' in \
ret_obj['data'][0] and ret_obj['data'][0]['start_time'] != start_time:
logger.info("Server is responding and has a different start time!")
return isamAppliance.create_return_object(warnings=warnings)
else:
time.sleep(check_freq)
sec += check_freq
logger.debug(
"Server is not responding yet. Waited for {0} secs, next check in {1} secs.".format(sec, check_freq))
if sec >= wait_time:
warnings.append("The LMI restart not detected or completed, exiting... after {0} seconds".format(sec))
break
return isamAppliance.create_return_object(warnings=warnings)
def restart_and_wait(isamAppliance, wait_time=300, check_freq=5, check_mode=False, force=False):
ret_obj = get(isamAppliance)
_start_time = ret_obj['data'][0]['start_time']
restart(isamAppliance, check_mode, force)
return await_startup(isamAppliance, wait_time=wait_time, check_freq=check_freq, start_time=_start_time,
check_mode=False, force=False)
| apache-2.0 | -7,524,142,561,415,226,000 | 34.346154 | 118 | 0.654697 | false |
runt18/nupic | tests/integration/nupic/opf/opf_checkpoint_test/opf_checkpoint_test.py | 1 | 17683 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import csv
import os
import shutil
from nupic.data.file_record_stream import FileRecordStream
from nupic.frameworks.opf.experiment_runner import runExperiment, getCheckpointParentDir
from nupic.support import initLogging
from nupic.support.unittesthelpers.testcasebase import (
unittest, TestCaseBase as HelperTestCaseBase)
try:
import capnp
except ImportError:
capnp = None
_EXPERIMENT_BASE = os.path.join(os.path.abspath(
os.path.dirname(__file__)), "experiments")
class MyTestCaseBase(HelperTestCaseBase):
def shortDescription(self):
""" Override to force unittest framework to use test method names instead
of docstrings in the report.
"""
return None
def compareOPFPredictionFiles(self, path1, path2, temporal,
maxMismatches=None):
""" Compare temporal or non-temporal predictions for the given experiment
that just finished executing
experimentName: e.g., "gym"; this string will be used to form
a directory path to the experiments.
maxMismatches: Maximum number of row mismatches to report before
terminating the comparison; None means: report all
mismatches
Returns: True if equal; False if different
"""
experimentLabel = "{0!s} prediction comparison".format( \
("Temporal" if temporal else "Non-Temporal"))
print "{0!s}: Performing comparison of OPF prediction CSV files {1!r} and {2!r}".format(
experimentLabel, path1, path2)
# Open CSV readers
#
self.assertTrue(
os.path.isfile(path1),
msg="OPF prediction file path1 {0!s} doesn't exist or is not a file".format((
path1)))
(opf1CsvReader, opf1FieldNames) = self._openOpfPredictionCsvFile(path1)
self.assertTrue(
os.path.isfile(path2),
msg="OPF prediction file path2 {0!s} doesn't exist or is not a file".format((
path2)))
(opf2CsvReader, opf2FieldNames) = self._openOpfPredictionCsvFile(path2)
self.assertEqual(len(opf1FieldNames), len(opf2FieldNames),
("%s: Mismatch in number of prediction columns: "
"opf1: %s, opf2: %s") % (
experimentLabel, len(opf1FieldNames),
len(opf2FieldNames)))
self.assertEqual(opf1FieldNames, opf2FieldNames)
# Each data row is assumed to be arranged as follows:
#
# reset, actual-field1, prediction-field1, actual-field2,
# prediction-field2, etc.
#
# Presently, we only compare the predicted values that need to match.
opf1EOF = False
opf2EOF = False
opf1CurrentDataRowIndex = -1
opf2CurrentDataRowIndex = -1
if temporal:
# Skip the first data rows for temporal tests, since they don't contain
# prediction values.
_skipOpf1Row = opf1CsvReader.next()
opf1CurrentDataRowIndex += 1
_skipOpf2Row = opf2CsvReader.next()
opf2CurrentDataRowIndex += 1
fieldsIndexesToCompare = tuple(xrange(2, len(opf1FieldNames), 2))
self.assertGreater(len(fieldsIndexesToCompare), 0)
print ("%s: Comparing fields at indexes: %s; "
"opf1Labels: %s; opf2Labels: %s") % (
experimentLabel,
fieldsIndexesToCompare,
[opf1FieldNames[i] for i in fieldsIndexesToCompare],
[opf2FieldNames[i] for i in fieldsIndexesToCompare])
for i in fieldsIndexesToCompare:
self.assertTrue(opf1FieldNames[i].endswith("predicted"),
msg="{0!r} doesn't end with 'predicted'".format(opf1FieldNames[i]))
self.assertTrue(opf2FieldNames[i].endswith("predicted"),
msg="{0!r} doesn't end with 'predicted'".format(opf2FieldNames[i]))
mismatchCount = 0
while True:
try:
opf1Row = opf1CsvReader.next()
except StopIteration:
opf1EOF = True
else:
opf1CurrentDataRowIndex += 1
try:
opf2Row = opf2CsvReader.next()
except StopIteration:
opf2EOF = True
else:
opf2CurrentDataRowIndex += 1
if opf1EOF != opf2EOF:
print ("%s: ERROR: Data row counts mismatch: "
"opf1EOF: %s, opf1CurrentDataRowIndex: %s; "
"opf2EOF: %s, opf2CurrentDataRowIndex: %s") % (
experimentLabel,
opf1EOF, opf1CurrentDataRowIndex,
opf2EOF, opf2CurrentDataRowIndex)
return False
if opf1EOF and opf2EOF:
# Done with both prediction datasets
break
# Compare the rows
self.assertEqual(len(opf1Row), len(opf2Row))
for i in fieldsIndexesToCompare:
opf1FloatValue = float(opf1Row[i])
opf2FloatValue = float(opf2Row[i])
if opf1FloatValue != opf2FloatValue:
mismatchCount += 1
print ("%s: ERROR: mismatch in "
"prediction values: dataRowIndex: %s, fieldIndex: %s (%r); "
"opf1FieldValue: <%s>, opf2FieldValue: <%s>; "
"opf1FieldValueAsFloat: %s, opf2FieldValueAsFloat: %s; "
"opf1Row: %s, opf2Row: %s") % (
experimentLabel,
opf1CurrentDataRowIndex,
i,
opf1FieldNames[i],
opf1Row[i],
opf2Row[i],
opf1FloatValue,
opf2FloatValue,
opf1Row,
opf2Row)
# Stop comparison if we exceeded the allowed number of mismatches
if maxMismatches is not None and mismatchCount >= maxMismatches:
break
if mismatchCount != 0:
print "{0!s}: ERROR: there were {1!s} mismatches between {2!r} and {3!r}".format(
experimentLabel, mismatchCount, path1, path2)
return False
# A difference here would indicate a logic error in this method
self.assertEqual(opf1CurrentDataRowIndex, opf2CurrentDataRowIndex)
print ("%s: Comparison of predictions "
"completed: OK; number of prediction rows examined: %s; "
"path1: %r; path2: %r") % \
(experimentLabel,
opf1CurrentDataRowIndex + 1,
path1,
path2)
return True
def _openOpfPredictionCsvFile(self, filepath):
""" Open an OPF prediction CSV file and advance it to the first data row
Returns: the tuple (csvReader, fieldNames), where 'csvReader' is the
csv reader object, and 'fieldNames' is a sequence of field
names.
"""
# Open the OPF prediction file
csvReader = self._openCsvFile(filepath)
# Advance it past the three NUPIC header lines
names = csvReader.next()
_types = csvReader.next()
_specials = csvReader.next()
return (csvReader, names)
@staticmethod
def _openCsvFile(filepath):
# We'll be operating on csvs with arbitrarily long fields
size = 2**27
csv.field_size_limit(size)
rawFileObj = open(filepath, 'r')
csvReader = csv.reader(rawFileObj, dialect='excel')
return csvReader
def _createExperimentArgs(self, experimentDir,
newSerialization=False,
additionalArgs=()):
args = []
args.append(experimentDir)
if newSerialization:
args.append("--newSerialization")
args += additionalArgs
return args
def _testSamePredictions(self, experiment, predSteps, checkpointAt,
predictionsFilename, additionalFields=None,
newSerialization=False):
""" Test that we get the same predictions out from the following two
scenarios:
a_plus_b: Run the network for 'a' iterations followed by 'b' iterations
a, followed by b: Run the network for 'a' iterations, save it, load it
back in, then run for 'b' iterations.
Parameters:
-----------------------------------------------------------------------
experiment: base directory of the experiment. This directory should
contain the following:
base.py
a_plus_b/description.py
a/description.py
b/description.py
The sub-directory description files should import the
base.py and only change the first and last record used
from the data file.
predSteps: Number of steps ahead predictions are for
checkpointAt: Number of iterations that 'a' runs for.
IMPORTANT: This must match the number of records that
a/description.py runs for - it is NOT dynamically stuffed into
the a/description.py.
predictionsFilename: The name of the predictions file that the OPF
generates for this experiment (for example
'DefaulTask.NontemporalMultiStep.predictionLog.csv')
newSerialization: Whether to use new capnproto serialization.
"""
# Get the 3 sub-experiment directories
aPlusBExpDir = os.path.join(_EXPERIMENT_BASE, experiment, "a_plus_b")
aExpDir = os.path.join(_EXPERIMENT_BASE, experiment, "a")
bExpDir = os.path.join(_EXPERIMENT_BASE, experiment, "b")
# Run a+b
args = self._createExperimentArgs(aPlusBExpDir,
newSerialization=newSerialization)
_aPlusBExp = runExperiment(args)
# Run a, the copy the saved checkpoint into the b directory
args = self._createExperimentArgs(aExpDir,
newSerialization=newSerialization)
_aExp = runExperiment(args)
if os.path.exists(os.path.join(bExpDir, 'savedmodels')):
shutil.rmtree(os.path.join(bExpDir, 'savedmodels'))
shutil.copytree(src=os.path.join(aExpDir, 'savedmodels'),
dst=os.path.join(bExpDir, 'savedmodels'))
args = self._createExperimentArgs(bExpDir,
newSerialization=newSerialization,
additionalArgs=['--load=DefaultTask'])
_bExp = runExperiment(args)
# Now, compare the predictions at the end of a+b to those in b.
aPlusBPred = FileRecordStream(os.path.join(aPlusBExpDir, 'inference',
predictionsFilename))
bPred = FileRecordStream(os.path.join(bExpDir, 'inference',
predictionsFilename))
colNames = [x[0] for x in aPlusBPred.getFields()]
actValueColIdx = colNames.index('multiStepPredictions.actual')
predValueColIdx = colNames.index('multiStepPredictions.{0:d}'.format((predSteps)))
# Skip past the 'a' records in aPlusB
for i in range(checkpointAt):
aPlusBPred.next()
# Now, read through the records that don't have predictions yet
for i in range(predSteps):
aPlusBPred.next()
bPred.next()
# Now, compare predictions in the two files
rowIdx = checkpointAt + predSteps + 4 - 1
epsilon = 0.0001
while True:
rowIdx += 1
try:
rowAPB = aPlusBPred.next()
rowB = bPred.next()
# Compare actuals
self.assertEqual(rowAPB[actValueColIdx], rowB[actValueColIdx],
"Mismatch in actual values: row %d of a+b has %s and row %d of "
"b has %s" % (rowIdx, rowAPB[actValueColIdx], rowIdx-checkpointAt,
rowB[actValueColIdx]))
# Compare predictions, within nearest epsilon
predAPB = eval(rowAPB[predValueColIdx])
predB = eval(rowB[predValueColIdx])
# Sort with highest probabilities first
predAPB = [(a, b) for b, a in predAPB.items()]
predB = [(a, b) for b, a in predB.items()]
predAPB.sort(reverse=True)
predB.sort(reverse=True)
if additionalFields is not None:
for additionalField in additionalFields:
fieldIdx = colNames.index(additionalField)
self.assertEqual(rowAPB[fieldIdx], rowB[fieldIdx],
"Mismatch in field \'%s\' values: row %d of a+b has value: (%s)\n"
" and row %d of b has value: %s" % \
(additionalField, rowIdx, rowAPB[fieldIdx],
rowIdx-checkpointAt, rowB[fieldIdx]))
self.assertEqual(len(predAPB), len(predB),
"Mismatch in predicted values: row %d of a+b has %d predictions: "
"\n (%s) and row %d of b has %d predictions:\n (%s)" % \
(rowIdx, len(predAPB), predAPB, rowIdx-checkpointAt, len(predB),
predB))
for i in range(len(predAPB)):
(aProb, aValue) = predAPB[i]
(bProb, bValue) = predB[i]
self.assertLess(abs(aValue-bValue), epsilon,
"Mismatch in predicted values: row %d of a+b predicts value %s "
"and row %d of b predicts %s" % (rowIdx, aValue,
rowIdx-checkpointAt, bValue))
self.assertLess(abs(aProb-bProb), epsilon,
"Mismatch in probabilities: row %d of a+b predicts %s with "
"probability %s and row %d of b predicts %s with probability %s" \
% (rowIdx, aValue, aProb, rowIdx-checkpointAt, bValue, bProb))
except StopIteration:
break
# clean up model checkpoint directories
shutil.rmtree(getCheckpointParentDir(aExpDir))
shutil.rmtree(getCheckpointParentDir(bExpDir))
shutil.rmtree(getCheckpointParentDir(aPlusBExpDir))
print "Predictions match!"
@staticmethod
def _testBackwardsCompatibility(experiment, checkpointName):
""" Test that we can load in a checkpoint saved by an earlier version of
the OPF.
Parameters:
-----------------------------------------------------------------------
experiment: Directory of the experiment.
checkpointName: which checkpoint to verify
"""
# Get the experiment directories
expDir = os.path.join(_EXPERIMENT_BASE, experiment)
# Copy the pertinent checkpoint
if os.path.exists(os.path.join(expDir, 'savedmodels')):
shutil.rmtree(os.path.join(expDir, 'savedmodels'))
shutil.copytree(src=os.path.join(expDir, checkpointName),
dst=os.path.join(expDir, 'savedmodels'))
# Run it from the checkpoint
_aPlusBExp = runExperiment(args=[expDir, '--load=DefaultTask',
'--noCheckpoint'])
class PositiveTests(MyTestCaseBase):
def test_NonTemporalMultiStep(self):
""" Test that we get the same predictions out of a model that was
saved and reloaded from a checkpoint as we do from one that runs
continuously.
"""
self._testSamePredictions(
experiment="non_temporal_multi_step", predSteps=24, checkpointAt=250,
predictionsFilename=
"DefaultTask.NontemporalMultiStep.predictionLog.csv")
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def test_NonTemporalMultiStepNew(self):
""" Test that we get the same predictions out of a model that was
saved and reloaded from a checkpoint as we do from one that runs
continuously.
Uses new capnproto serialization.
"""
self._testSamePredictions(
experiment="non_temporal_multi_step", predSteps=24, checkpointAt=250,
predictionsFilename=
"DefaultTask.NontemporalMultiStep.predictionLog.csv",
newSerialization=True)
@unittest.skip("Currently Fails: NUP-1864")
def test_TemporalMultiStep(self):
""" Test that we get the same predictions out of a model that was
saved and reloaded from a checkpoint as we do from one that runs
continuously.
"""
self._testSamePredictions(experiment="temporal_multi_step", predSteps=24,
checkpointAt=250,
predictionsFilename='DefaultTask.TemporalMultiStep.predictionLog.csv')
@unittest.skip("Currently Fails: NUP-1864")
def test_TemporalAnomaly(self):
""" Test that we get the same predictions out of a model that was
saved and reloaded from a checkpoint as we do from one that runs
continuously.
"""
self._testSamePredictions(experiment="temporal_anomaly", predSteps=1,
checkpointAt=250,
predictionsFilename='DefaultTask.TemporalAnomaly.predictionLog.csv',
additionalFields=['anomalyScore'])
def test_BackwardsCompatibility(self):
""" Test that we can load in a checkpoint saved by an earlier version of
the OPF.
"""
self._testBackwardsCompatibility(
os.path.join('backwards_compatibility', 'a'),
'savedmodels_2012-10-05')
if __name__ == "__main__":
initLogging(verbose=True)
unittest.main()
| agpl-3.0 | 1,837,218,215,731,194,600 | 34.366 | 92 | 0.622745 | false |
avelino/pycorreios | setup.py | 1 | 1456 | from setuptools import setup
import os
DESCRIPTION = "API for Brazillian Correios in Python"
LONG_DESCRIPTION = None
try:
LONG_DESCRIPTION = open('README.md').read()
except:
pass
def get_version(version_tuple):
version = '%s.%s' % (version_tuple[0], version_tuple[1])
if version_tuple[2]:
version = '%s.%s' % (version, version_tuple[2])
return version
# Dirty hack to get version number from pycorreios/__init__.py - we can't
# file is read
init = os.path.join(os.path.dirname(__file__), 'pycorreios', '__init__.py')
version_line = list(filter(lambda l: l.startswith('VERSION'), open(init)))[0]
VERSION = get_version(eval(version_line.split('=')[-1]))
print(VERSION)
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
]
setup(name='pycorreios',
version=VERSION,
packages=[
'test',
'pycorreios',
],
author='Thiago Avelino',
author_email='[email protected]',
url='https://github.com/avelino/pycorreios/',
license='MIT',
include_package_data=True,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
platforms=['any'],
classifiers=CLASSIFIERS,
test_suite='test',
)
| mit | 2,467,762,093,427,377,000 | 27 | 77 | 0.64011 | false |
kstateome/django-cas | cas/backends.py | 1 | 7139 | import logging
from xml.dom import minidom
import time
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
from django.conf import settings
from django.contrib.auth import get_user_model
from cas.exceptions import CasTicketException
from cas.models import Tgt, PgtIOU
from cas.utils import cas_response_callbacks
__all__ = ['CASBackend']
logger = logging.getLogger(__name__)
def _verify_cas1(ticket, service):
"""
Verifies CAS 1.0 authentication ticket.
:param: ticket
:param: service
Returns username on success and None on failure.
"""
params = {'ticket': ticket, 'service': service}
url = (urljoin(settings.CAS_SERVER_URL, 'validate') + '?' +
urlencode(params))
page = urlopen(url)
try:
verified = page.readline().strip()
if verified == 'yes':
return page.readline().strip()
else:
return None
finally:
page.close()
def _verify_cas2(ticket, service):
"""
Verifies CAS 2.0+ XML-based authentication ticket.
:param: ticket
:param: service
"""
return _internal_verify_cas(ticket, service, 'proxyValidate')
def _verify_cas3(ticket, service):
return _internal_verify_cas(ticket, service, 'p3/proxyValidate')
def _internal_verify_cas(ticket, service, suffix):
"""Verifies CAS 2.0 and 3.0 XML-based authentication ticket.
Returns username on success and None on failure.
"""
params = {'ticket': ticket, 'service': service}
if settings.CAS_PROXY_CALLBACK:
params['pgtUrl'] = settings.CAS_PROXY_CALLBACK
url = (urljoin(settings.CAS_SERVER_URL, suffix) + '?' +
urlencode(params))
page = urlopen(url)
username = None
try:
response = page.read()
tree = ElementTree.fromstring(response)
document = minidom.parseString(response)
if tree[0].tag.endswith('authenticationSuccess'):
if settings.CAS_RESPONSE_CALLBACKS:
cas_response_callbacks(tree)
username = tree[0][0].text
# The CAS Response includes the PGT_IOU, which we use to lookup the PGT/TGT.
pgt_element = document.getElementsByTagName('cas:proxyGrantingTicket')
if pgt_element:
pgt_iou_token = pgt_element[0].firstChild.nodeValue
try:
pgt_iou_mapping = _get_pgt_iou_mapping(pgt_iou_token)
except Exception as e:
logger.warning('Failed to do proxy authentication. %s' % e)
else:
try:
tgt = Tgt.objects.get(username=username)
except Tgt.DoesNotExist:
Tgt.objects.create(username=username, tgt=pgt_iou_mapping.tgt)
logger.info('Creating TGT ticket for {user}'.format(user=username))
else:
tgt.tgt = pgt_iou_mapping.tgt
tgt.save()
pgt_iou_mapping.delete()
else:
failure = document.getElementsByTagName('cas:authenticationFailure')
if failure:
logger.warn('Authentication failed from CAS server: %s',
failure[0].firstChild.nodeValue)
except Exception as e:
logger.error('Failed to verify CAS authentication: {message}'.format(
message=e
))
finally:
page.close()
return username
def verify_proxy_ticket(ticket, service):
"""
Verifies CAS 2.0+ XML-based proxy ticket.
:param: ticket
:param: service
Returns username on success and None on failure.
"""
params = {'ticket': ticket, 'service': service}
url = (urljoin(settings.CAS_SERVER_URL, 'proxyValidate') + '?' +
urlencode(params))
page = urlopen(url)
try:
response = page.read()
tree = ElementTree.fromstring(response)
if tree[0].tag.endswith('authenticationSuccess'):
username = tree[0][0].text
proxies = []
if len(tree[0]) > 1:
for element in tree[0][1]:
proxies.append(element.text)
return {"username": username, "proxies": proxies}
else:
return None
finally:
page.close()
_PROTOCOLS = {'1': _verify_cas1, '2': _verify_cas2, '3': _verify_cas3}
if settings.CAS_VERSION not in _PROTOCOLS:
raise ValueError('Unsupported CAS_VERSION %r' % settings.CAS_VERSION)
_verify = _PROTOCOLS[settings.CAS_VERSION]
def _get_pgt_iou_mapping(pgt_iou):
"""
Returns the instance of PgtIou -> Pgt mapping which is associated with the provided pgt_iou token.
Because this mapping is created in a different request which the CAS server makes to the proxy callback url, the
PGTIOU->PGT mapping might not be found yet in the database by this calling thread, hence the attempt to get
the ticket is retried for up to 5 seconds.
This should be handled some better way.
Users can opt out of this waiting period by setting CAS_PGT_FETCH_WAIT = False
:param: pgt_iou
"""
retries_left = 5
if not settings.CAS_PGT_FETCH_WAIT:
retries_left = 1
while retries_left:
try:
return PgtIOU.objects.get(pgtIou=pgt_iou)
except PgtIOU.DoesNotExist:
if settings.CAS_PGT_FETCH_WAIT:
time.sleep(1)
retries_left -= 1
logger.info('Did not fetch ticket, trying again. {tries} tries left.'.format(
tries=retries_left
))
raise CasTicketException("Could not find pgt for pgtIou %s" % pgt_iou)
class CASBackend(object):
"""
CAS authentication backend
"""
supports_object_permissions = False
supports_inactive_user = False
def authenticate(self, request, ticket, service):
"""
Verifies CAS ticket and gets or creates User object
NB: Use of PT to identify proxy
"""
User = get_user_model()
username = _verify(ticket, service)
if not username:
return None
try:
user = User.objects.get(username__iexact=username)
except User.DoesNotExist:
# user will have an "unusable" password
if settings.CAS_AUTO_CREATE_USER:
user = User.objects.create_user(username, '')
user.save()
else:
user = None
return user
def get_user(self, user_id):
"""
Retrieve the user's entry in the User model if it exists
"""
User = get_user_model()
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
| mit | -9,150,461,524,364,492,000 | 27.217391 | 118 | 0.600364 | false |
macosforge/ccs-calendarserver | txweb2/filter/range.py | 1 | 4686 | # -*- test-case-name: txweb2.test.test_stream -*-
import os
import time
from txweb2 import http, http_headers, responsecode, stream
# Some starts at writing a response filter to handle request ranges.
class UnsatisfiableRangeRequest(Exception):
pass
def canonicalizeRange((start, end), size):
"""Return canonicalized (start, end) or raises UnsatisfiableRangeRequest
exception.
NOTE: end is the last byte *inclusive*, which is not the usual convention
in python! Be very careful! A range of 0,1 should return 2 bytes."""
# handle "-500" ranges
if start is None:
start = max(0, size - end)
end = None
if end is None or end >= size:
end = size - 1
if start >= size:
raise UnsatisfiableRangeRequest
return start, end
def makeUnsatisfiable(request, oldresponse):
if request.headers.hasHeader('if-range'):
return oldresponse # Return resource instead of error
response = http.Response(responsecode.REQUESTED_RANGE_NOT_SATISFIABLE)
response.headers.setHeader("content-range", ('bytes', None, None, oldresponse.stream.length))
return response
def makeSegment(inputStream, lastOffset, start, end):
offset = start - lastOffset
length = end + 1 - start
if offset != 0:
before, inputStream = inputStream.split(offset)
before.close()
return inputStream.split(length)
def rangefilter(request, oldresponse):
if oldresponse.stream is None:
return oldresponse
size = oldresponse.stream.length
if size is None:
# Does not deal with indeterminate length outputs
return oldresponse
oldresponse.headers.setHeader('accept-ranges', ('bytes',))
rangespec = request.headers.getHeader('range')
# If we've got a range header and the If-Range header check passes, and
# the range type is bytes, do a partial response.
if (
rangespec is not None and http.checkIfRange(request, oldresponse) and
rangespec[0] == 'bytes'
):
# If it's a single range, return a simple response
if len(rangespec[1]) == 1:
try:
start, end = canonicalizeRange(rangespec[1][0], size)
except UnsatisfiableRangeRequest:
return makeUnsatisfiable(request, oldresponse)
response = http.Response(responsecode.PARTIAL_CONTENT, oldresponse.headers)
response.headers.setHeader('content-range', ('bytes', start, end, size))
content, after = makeSegment(oldresponse.stream, 0, start, end)
after.close()
response.stream = content
return response
else:
# Return a multipart/byteranges response
lastOffset = -1
offsetList = []
for arange in rangespec[1]:
try:
start, end = canonicalizeRange(arange, size)
except UnsatisfiableRangeRequest:
continue
if start <= lastOffset:
# Stupid client asking for out-of-order or overlapping ranges, PUNT!
return oldresponse
offsetList.append((start, end))
lastOffset = end
if not offsetList:
return makeUnsatisfiable(request, oldresponse)
content_type = oldresponse.headers.getRawHeaders('content-type')
boundary = "%x%x" % (int(time.time() * 1000000), os.getpid())
response = http.Response(responsecode.PARTIAL_CONTENT, oldresponse.headers)
response.headers.setHeader(
'content-type',
http_headers.MimeType('multipart', 'byteranges',
[('boundary', boundary)])
)
response.stream = out = stream.CompoundStream()
lastOffset = 0
origStream = oldresponse.stream
headerString = "\r\n--%s" % boundary
if len(content_type) == 1:
headerString += '\r\nContent-Type: %s' % content_type[0]
headerString += "\r\nContent-Range: %s\r\n\r\n"
for start, end in offsetList:
out.addStream(
headerString %
http_headers.generateContentRange(('bytes', start, end, size))
)
content, origStream = makeSegment(origStream, lastOffset, start, end)
lastOffset = end + 1
out.addStream(content)
origStream.close()
out.addStream("\r\n--%s--\r\n" % boundary)
return response
else:
return oldresponse
__all__ = ['rangefilter']
| apache-2.0 | 1,989,880,107,851,133,000 | 32.71223 | 97 | 0.597525 | false |
Brainiq7/Ananse | ananse_dl/extractor/gdcvault.py | 1 | 6407 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urllib_request,
)
class GDCVaultIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?gdcvault\.com/play/(?P<id>\d+)/(?P<name>(\w|-)+)'
_TESTS = [
{
'url': 'http://www.gdcvault.com/play/1019721/Doki-Doki-Universe-Sweet-Simple',
'md5': '7ce8388f544c88b7ac11c7ab1b593704',
'info_dict': {
'id': '1019721',
'ext': 'mp4',
'title': 'Doki-Doki Universe: Sweet, Simple and Genuine (GDC Next 10)'
}
},
{
'url': 'http://www.gdcvault.com/play/1015683/Embracing-the-Dark-Art-of',
'info_dict': {
'id': '1015683',
'ext': 'flv',
'title': 'Embracing the Dark Art of Mathematical Modeling in AI'
},
'params': {
'skip_download': True, # Requires rtmpdump
}
},
# {
# 'url': 'http://www.gdcvault.com/play/1015301/Thexder-Meets-Windows-95-or',
# 'md5': 'a5eb77996ef82118afbbe8e48731b98e',
# 'info_dict': {
# 'id': '1015301',
# 'ext': 'flv',
# 'title': 'Thexder Meets Windows 95, or Writing Great Games in the Windows 95 Environment',
# }
# }
]
def _parse_mp4(self, xml_description):
video_formats = []
mp4_video = xml_description.find('./metadata/mp4video')
if mp4_video is None:
return None
mobj = re.match(r'(?P<root>https?://.*?/).*', mp4_video.text)
video_root = mobj.group('root')
formats = xml_description.findall('./metadata/MBRVideos/MBRVideo')
for format in formats:
mobj = re.match(r'mp4\:(?P<path>.*)', format.find('streamName').text)
url = video_root + mobj.group('path')
vbr = format.find('bitrate').text
video_formats.append({
'url': url,
'vbr': int(vbr),
})
return video_formats
def _parse_flv(self, xml_description):
video_formats = []
akami_url = xml_description.find('./metadata/akamaiHost').text
slide_video_path = xml_description.find('./metadata/slideVideo').text
video_formats.append({
'url': 'rtmp://' + akami_url + '/' + slide_video_path,
'format_note': 'slide deck video',
'quality': -2,
'preference': -2,
'format_id': 'slides',
})
speaker_video_path = xml_description.find('./metadata/speakerVideo').text
video_formats.append({
'url': 'rtmp://' + akami_url + '/' + speaker_video_path,
'format_note': 'speaker video',
'quality': -1,
'preference': -1,
'format_id': 'speaker',
})
return video_formats
def _login(self, webpage_url, video_id):
(username, password) = self._get_login_info()
if username is None or password is None:
self.report_warning('It looks like ' + webpage_url + ' requires a login. Try specifying a username and password and try again.')
return None
mobj = re.match(r'(?P<root_url>https?://.*?/).*', webpage_url)
login_url = mobj.group('root_url') + 'api/login.php'
logout_url = mobj.group('root_url') + 'logout'
login_form = {
'email': username,
'password': password,
}
request = compat_urllib_request.Request(login_url, compat_urllib_parse.urlencode(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
self._download_webpage(request, video_id, 'Logging in')
start_page = self._download_webpage(webpage_url, video_id, 'Getting authenticated video page')
self._download_webpage(logout_url, video_id, 'Logging out')
return start_page
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage_url = 'http://www.gdcvault.com/play/' + video_id
start_page = self._download_webpage(webpage_url, video_id)
direct_url = self._search_regex(
r's1\.addVariable\("file",\s*encodeURIComponent\("(/[^"]+)"\)\);',
start_page, 'url', default=None)
if direct_url:
video_url = 'http://www.gdcvault.com/' + direct_url
title = self._html_search_regex(
r'<td><strong>Session Name</strong></td>\s*<td>(.*?)</td>',
start_page, 'title')
return {
'id': video_id,
'url': video_url,
'ext': 'flv',
'title': title,
}
xml_root = self._html_search_regex(
r'<iframe src="(?P<xml_root>.*?)player.html.*?".*?</iframe>',
start_page, 'xml root', default=None)
if xml_root is None:
# Probably need to authenticate
login_res = self._login(webpage_url, video_id)
if login_res is None:
self.report_warning('Could not login.')
else:
start_page = login_res
# Grab the url from the authenticated page
xml_root = self._html_search_regex(
r'<iframe src="(.*?)player.html.*?".*?</iframe>',
start_page, 'xml root')
xml_name = self._html_search_regex(
r'<iframe src=".*?\?xml=(.+?\.xml).*?".*?</iframe>',
start_page, 'xml filename', default=None)
if xml_name is None:
# Fallback to the older format
xml_name = self._html_search_regex(r'<iframe src=".*?\?xmlURL=xml/(?P<xml_file>.+?\.xml).*?".*?</iframe>', start_page, 'xml filename')
xml_decription_url = xml_root + 'xml/' + xml_name
xml_description = self._download_xml(xml_decription_url, video_id)
video_title = xml_description.find('./metadata/title').text
video_formats = self._parse_mp4(xml_description)
if video_formats is None:
video_formats = self._parse_flv(xml_description)
return {
'id': video_id,
'title': video_title,
'formats': video_formats,
}
| unlicense | 8,123,906,485,022,277,000 | 37.596386 | 146 | 0.524739 | false |
Mandrilux/GOC_2017 | api/polydevs/parking/views.py | 1 | 1100 | from django.shortcuts import get_object_or_404
from rest_framework import viewsets, filters
from parking.models import Parking
from parking.serializers import ParkingSerializer, ParkingDetailSerializer
from parking.filters import FilterParking
class MultipleFieldLookupMixin(object):
def get_object(self):
queryset = self.get_queryset()
queryset = self.filter_queryset(queryset)
filter = {}
for field in self.lookup_fields:
if self.kwargs[field]:
filter[field] = self.kwargs[field]
return get_object_or_404(queryset, **filter)
class ParkingViewSet(MultipleFieldLookupMixin,
viewsets.ModelViewSet):
queryset = Parking.objects.all()
serializer_class = ParkingSerializer
filter_backends = (filters.DjangoFilterBackend,)
lookup_fields = ('lon', 'lat',)
filter_class = FilterParking
def get_serializer_class(self):
if self.action is 'retrieve' or self.action is 'update' or self.action is 'delete':
return ParkingDetailSerializer
return ParkingSerializer
| agpl-3.0 | 6,739,548,767,564,175,000 | 35.666667 | 91 | 0.7 | false |
meltwater/proxymatic | src/proxymatic/discovery/registrator.py | 1 | 2745 | import logging
import json
import socket
import traceback
import urllib2
from urlparse import urlparse
from proxymatic.services import Server, Service
from proxymatic import util
class RegistratorEtcdDiscovery(object):
def __init__(self, backend, url):
self._backend = backend
self._url = urlparse(url)
self._healthy = False
self.priority = 5
def isHealthy(self):
return self._healthy
def start(self):
def action():
# Fetch all registered service instances
geturl = 'http://%s/v2/keys%s?recursive=true' % (self._url.netloc, self._url.path)
logging.debug("GET registrator services from %s", geturl)
response = urllib2.urlopen(geturl)
waitIndex = int(response.info().getheader('X-Etcd-Index')) + 1
services = self._parse(response.read())
self._backend.update(self, services)
logging.info("Refreshed services from registrator store %s", self._url.geturl())
# Signal that we're up and running
self._healthy = True
# Long poll for updates
pollurl = 'http://%s/v2/keys%s?wait=true&recursive=true&waitIndex=%s' % (self._url.netloc, self._url.path, waitIndex)
urllib2.urlopen(pollurl).read()
# Run action() in thread with retry on error
util.run(action, "etcd error from '" + self._url.geturl() + "': %s")
def _parse(self, content):
services = {}
state = json.loads(content)
for node in util.rget(state, 'node', 'nodes') or []:
for backend in util.rget(node, 'nodes') or []:
try:
parts = backend['key'].split(':')
port = int(parts[2])
protocol = parts[3] if len(parts) > 3 else 'tcp'
key = '%s/%s' % (port, protocol.lower())
# Resolve hostnames since HAproxy wants IP addresses
endpoint = backend['value'].split(':')
ipaddr = socket.gethostbyname(endpoint[0])
server = Server(ipaddr, endpoint[1], endpoint[0])
# Append backend to service
if key not in services:
name = node['key'].split('/')[-1]
services[key] = Service(name, 'registrator:%s' % self._url.geturl(), port, protocol)
services[key] = services[key].addServer(server)
except Exception as e:
logging.warn("Failed to parse service %s backend %s/%s: %s", node['key'], backend['key'], backend['value'], str(e))
logging.debug(traceback.format_exc())
return services
| mit | -343,665,641,712,587,600 | 39.970149 | 135 | 0.55592 | false |
CumulusNetworks/pyjeet | pyjeet/logcontainer.py | 1 | 5027 | #
# Copyright 2014 Cumulus Networks, Inc. All rights reserved.
# Author: Alexandre Renard <[email protected]>
#
# pyjeet --
# the distributed log analysis tool for networking troubleshooting.
#
from abc import ABCMeta, abstractmethod
from file import *
from network_obj import *
class LogContainer:
__metaclass__ = ABCMeta
def __init__(self):
self.files = []
self.logs = []
self.interfaces = []
self.selected_interfaces = []
self.bridges = []
self.selected_bridges = []
@abstractmethod
def get_interfaces_files(self, standalone):
'''
Return the config files needed to configure interfaces
'''
pass
def set_selected_interfaces(self, selected_interfaces, normalizer=None):
#select user interfaces with user input in the loaded interfaces
if not self.interfaces and normalizer:
self.load_interfaces(normalizer)
#if no particular interface is chosen get them all on all keyword
if len(selected_interfaces) and "all" in selected_interfaces:
self.selected_interfaces = self.interfaces
else:
self.selected_interfaces = self.get_interfaces_from_names(selected_interfaces)
def set_selected_bridges(self, selected_bridges, normalizer=None):
#select bridges with user input in the loaded bridges
if not self.bridges and normalizer:
self.load_bridges(normalizer)
#if no particular bridge is chosen get them all on all keyword
if len(selected_bridges) and "all" in selected_bridges:
self.selected_bridges = self.bridges
else:
self.selected_bridges = self.get_bridges_from_names(selected_bridges)
def clear_selected_interfaces(self):
self.selected_interfaces = []
def clear_selected_bridges(self):
self.selected_bridges = []
def load_interfaces(self, normalizer, standalone=False):
#loads all interfaces from interface conf files
files_info = self.get_interfaces_files(standalone)
for info in files_info:
for data in File(info['name'], info['content']).normalize(normalizer, is_log=False,debug_context=True).data:
if not self.find_interface(data):
if not data.has_key('linux_interface'):
continue
self.interfaces.append(Interface(data))
return self
def load_bridges(self, standalone=False):
#loads all bridges from brctl conf files
brctl_data = self.get_bridges_files(standalone)
for line in brctl_data:
line = line.split()
if len(line) == 1:
# if vlan interface given take the whole interface
inf = self.get_if_object_from_name(line[0].split('.')[-1])
if inf is not None:
inf.set_bridge(self.bridges[-1])
self.bridges[-1].add_if(inf)
elif len(line) == 4:
self.bridges.append(Bridge(line[0]))
inf = self.get_if_object_from_name(line[-1].split('.')[-1])
if inf is not None:
inf.set_bridge(self.bridges[-1])
self.bridges[-1].add_if(inf)
else:
logging.debug("Weird number of parameters in line from brctl show")
continue
return self
def get_if_object_from_name(self, linux_name):
for interface in self.interfaces:
if interface.linux == linux_name:
return interface
def find_interface(self, data):
for interface in self.interfaces:
linux = data.get('linux_interface')
if linux and interface.linux == linux:
interface.update(data)
return True
sdk = data.get('sdk_interface')
if sdk and interface.sdk == sdk:
interface.update(data)
return True
id = data.get('id_interface')
if id and interface.id == id:
interface.update(data)
return True
return False
def get_interfaces_from_names(self, interfaces_name):
return [interface for interface in self.interfaces if
(interface.linux and interfaces_name.count(interface.linux)) or (
interface.sdk and interfaces_name.count(interface.sdk))]
def get_bridges_from_names(self, bridges_name):
return [bridge for bridge in self.bridges if
(bridge.name and bridges_name.count(bridge.name))]
def normalize_files(self, normalizer, timestamp, interval, normalized_logs=None):
for f in self.files:
f.normalize(normalizer, timestamp, interval, True, True, normalized_logs)
return self
def sort_logs(self):
for f in self.files:
self.logs.extend(f.data)
self.logs.sort(lambda l1, l2: int(l1.date - l2.date))
return self
| gpl-2.0 | 6,239,281,146,889,183,000 | 38.273438 | 120 | 0.600955 | false |
yarray/md2pdf | compile.py | 1 | 2492 | #! /usr/bin/env python
'''
compiler from markdown file to pdf using wkhtmltopdf
Usage:
compile.py [--script FILE] [--style FILE]
[--pdf-options STRING] [--toc] <input> [<output>]
compile.py (-h | --help)
Options:
-h --help Show help screen
--script FILE Script reference to be used in html
--style FILE The css stylesheet
--pdf-options STRING Options passed to wkhtmltopdf
--toc Generate table of content
'''
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), 'lib'))
from docopt import docopt
def guess_convertor():
def aux(name):
return os.path.join(os.path.dirname(__file__), name)
possible_vers = ['wkhtmltopdf-amd64', 'wkhtmltopdf-i386']
return [cand for cand in
[os.path.join(os.path.dirname(__file__), name)
for name in possible_vers]
if os.path.isfile(cand)][0]
def compile_to_html(source, toc=False):
if toc:
try:
import markdown
with open(source) as f:
return markdown.markdown(f.read(), extensions=['toc'])
except:
return os.popen('markdown {0}'.format(source)).read()
return os.popen('markdown {0}'.format(source)).read()
def build_sample(html, style):
text = ''
if style:
text += '<head><link href="{0}" rel="stylesheet"></head>\n'\
.format(style)
return text + html
def write_html(html, name, script_name):
text = '<html lang="en-us">'
text += html
if script_name:
with open(script_name) as sc:
text += sc.read()
text += '</html>'
with open(name, 'w') as f:
f.write(text)
def generate_pdf(for_print, output, options):
cmd = guess_convertor() + ' --encoding utf-8 -s A4 ' + \
(options + ' ' if options else '') + \
'./{0} --javascript-delay 1000 {1}'.format(for_print, output)
print cmd
os.system(cmd)
if __name__ == '__main__':
args = docopt(__doc__)
source = args['<input>']
name, ext = os.path.splitext(source)
out_name = args['<output>'] if args['<output>'] else (name + '.pdf')
sample = name + '.sample.html'
style = args['--style']
script = args['--script']
pdf_options = args['--pdf-options']
html = compile_to_html(source, args['--toc'])
write_html(build_sample(html, style), sample, script)
generate_pdf(sample, out_name, pdf_options)
| mit | 2,322,206,154,730,118,000 | 27 | 72 | 0.570225 | false |
scorpionis/docklet | src/slot_scheduler.py | 1 | 10313 | # coding=UTF-8
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
import math
import random
import numpy as np
from mdkp import Colony
from machine import AllocationOfMachine
import heapq
from connection import *
import time
import _thread
import logging
import json
import jsonpickle
from log import slogger
#import log
machine_queue = []
# only used for test
task_requests = {}
tasks = {}
machines = {}
restricted_index = 0
node_manager = None
etcdclient = None
def generate_test_data(cpu,mem, count,type,id_base):
task_requests = {}
cpu_arr = np.random.binomial(cpu, 1/16, count)
mem_arr = np.random.binomial(mem, 1/16, count)
# cpu_arr = np.random.uniform(1,cpu,cpu*machines)
# mem_arr = np.random.uniform(1,mem,cpu*machines)
bids = np.random.uniform(1,100,count)
for i in range(0+id_base,count):
if cpu_arr[i]==0 or mem_arr[i] ==0:
continue
task = {
'id': str(i),
'cpus': str(int(math.ceil(cpu_arr[i]))),
'mems': str(int(math.ceil(mem_arr[i]))),
'bid': str(int(bids[i]))
}
key = str(i)
task_requests[key] = task
# write to a file
# with open('uniform_tasks.txt','w') as f:
# for key, task in tasks.items():
# f.write(str(task['cpus'])+' '+str(task['mems'])+' '+str(task['bid'])+'\n')
return task_requests
def parse_test_data(filename,cpus,mems, count):
global tasks
with open(filename,'r') as f:
i =0
for line in f.readlines()[0:count]:
arr = line.split()
task = {
'id': str(i),
'cpus': float(arr[0]),
'mems': float(arr[1]),
'bid': int(arr[2])
}
key = str(i)
task_requests[key] = task
i+=1
print(task)
def add_machine(id, cpus=24, mems=240000):
global machines
global machine_queue
machine = AllocationOfMachine(id, cpus, mems)
machines[id] = machine
heapq.heappush(machine_queue,machine)
# to-do:改成多线程,直接运行每个线程
# machine.colony.run()
send_colony("create",machine.machineid, str(machine.reliable_cpus), str(machine.reliable_mems))
sync_colony()
# save_machine in etcd
# save_machine(machine)
return machine
def pre_allocate(task):
global restricted_index
if 'bid' in task and task['bid']!='0':
machine = heapq.heappop(machine_queue)
task['machineid'] = machine.machineid
task['allocation_type'] = 'none'
task['allocation_cpus'] = str(int(task['cpus'])*1000)
task['allocation_mems'] = task['mems']
task['allocation_mems_sw'] = str( 2 * int(task['mems']) )
task['allocation_mems_soft'] = str( 2 * int(task['mems']) )
tasks[task['id']] = task
machine.total_value += int(task['bid'])
heapq.heappush(machine_queue,machine)
# save machine and task
# save_machine(machine)
# save_task(task)
else:
if(restricted_index >= len(machines)):
restricted_index = 0
slogger.debug("restricted_index: ", restricted_index)
values = list(machines.values())
task['machineid'] = values[restricted_index].machineid
restricted_index += 1
task['allocation_type'] = 'none'
task['allocation_cpus'] = str(int(task['cpus'])*1000)
task['allocation_mems'] = task['mems']
task['allocation_mems_sw'] = str( 2 * int(task['mems']) )
task['allocation_memsp_soft'] = str( 2 * int(task['mems']) )
tasks[task['id']] = task
# save task
# save_task(task)
return task
def allocate(id):
task = tasks[id]
machineid = task['machineid']
machine = machines[machineid]
if 'bid' in task and task['bid']!='0':
# slogger.debug("dispatch reliable")
task = machine.add_reliable_task(task)
# save task and machine
# save_task(task)
# save_machine(machine)
# slogger.debug("pop machine: id = %s", machine.machineid)
send_task(machine,task,"add")
else:
# slogger.debug("dispatch restricted")
task = machine.add_restricted_task(task)
# save task and machine
# save_task(task)
# save_machine(machine)
return task
def release(id):
task = tasks[id]
machineid = tasks[id]['machineid']
machine = machines[machineid]
if 'bid' in task and task['bid']!='0':
slogger.debug("release reliable")
machine.release_reliable_task(id)
send_task(machine,task,'delete')
else:
slogger.debug("release restricted")
machine.release_restricted_task(id)
def after_release(id):
task = tasks[id]
for index,machine in enumerate(machine_queue):
if task['machineid'] == machine.machineid:
del machine_queue[index]
break
machine.total_value -= int(task['bid'])
heapq.heappush(machine_queue,machine)
del tasks[id]
def init_scheduler():
#启动c程序,后台运行
import os
os.system("/home/augustin/docklet/src/aco-mmdkp/acommdkp >/home/augustin/docklet/src/aco-mmdkp.log 2>&1 &")
slogger.setLevel(logging.INFO)
slogger.info("init scheduler!")
init_sync_socket()
init_colony_socket()
init_task_socket()
init_result_socket()
_thread.start_new_thread(recv_result,(machines,))
def recover_scheduler():
global machines
global tasks
global machine_queue
#启动c程序,后台运行
import os
os.system("/home/augustin/docklet/src/aco-mmdkp/acommdkp >/home/augustin/docklet/src/aco-mmdkp.log 2>&1 &")
slogger.setLevel(logging.INFO)
slogger.info("recover scheduler!")
init_sync_socket()
init_colony_socket()
init_task_socket()
init_result_socket()
# recover alll the machines
[status, runlist] = etcdclient.listdir("machines/runnodes")
for node in runlist:
nodeip = node['key'].rsplit('/',1)[1]
if node['value'] == 'ok':
slogger.info ("running node %s" % nodeip)
# inform dscheduler the recovered running nodes
import dscheduler
slogger.info("recover machine %s to scheduler",nodeip)
machine = load_machine(nodeip)
# recover machine_queue
heapq.heappush(machine_queue,machine)
# send machine to C process
send_colony("create",machine.machineid, str(machine.reliable_cpus), str(machine.reliable_mems))
sync_colony()
# recover recv_result thread
_thread.start_new_thread(recv_result,(machines,))
# recover all the tasks
load_tasks()
# send tasks to colony
for id,task in tasks.items():
machineid = task['machineid']
machine = machines[machineid]
send_task(machine,task,"add")
def save_machine(machine):
machine_str = jsonpickle.encode(machine)
etcdclient.setkey("/scheduler/machines/"+machine.machineid, machine_str)
def load_machine(ip):
global machines
[string,machine_str] = etcdclient.getkey("/scheduler/machines/"+ip)
machine = jsonpickle.decode(machine_str)
machines[machine.machineid]=machine
return machine
def load_machines():
global machines
[status,kvs] = etcdclient.listdir("/scheduler/machines/")
for kv in kvs:
machine_str = kv['value']
machine = jsonpickle.decode(machine_str)
machines[machine.id]=machine
def save_task(task):
task_str = json.dumps(task)
etcdclient.setkey("/scheduler/tasks/"+task['id'], task_str)
def load_tasks():
global tasks
[status,kvs] = etcdclient.listdir("/scheduler/tasks/")
for kv in kvs:
task_str = kv['value']
task = jsonpickle.decode(task_str)
if task['machineid'] in machines.keys():
tasks[kv['key']]=task
def test_all(requests):
init_scheduler()
for i in range(0,100):
add_machine("m"+str(i),64,256)
slogger.info("add colonies done!")
if not requests:
requests = generate_test_data(64,256,16*2*100,"reliable",0)
# generate_test_data(64,256,1,"restricted",192)
for index,request in requests.items():
pre_allocate(request)
slogger.info("pre allocate tasks done")
for index,request in requests.items():
allocate(request['id'])
slogger.info("allocate tasks done")
time.sleep(10)
# for index,request in requests.items():
# release(request['id'])
# slogger.info("release tasks done")
# for index,request in requests.items():
# after_release(request['id'])
# slogger.info("after release tasks done")
social_welfare = 0
for index,machine in machines.items():
total_value = 0
for taskid in machine.reliable_allocations:
total_value += int(tasks[taskid]['bid'])
print("machine %s has total_value %d" % (machine.machineid, total_value))
social_welfare += total_value
print("social welfare:", social_welfare)
def test_slot_allocate(requests):
if not requests:
requests = generate_test_data(64,256,16*2*10,'reliable',0)
slot_cpu = 4.0
slot_mem = 16.0
for index, request in requests.items():
slots_c = math.ceil(float(request['cpus']) / slot_cpu)
slots_m = math.ceil(float(request['mems']) / slot_mem)
slots = slots_c if slots_c > slots_m else slots_m
# print("slots: ", slots)
request['slots'] = slots
request['bid_per_slot']= float(request['bid'])/slots
sorted_requests = sorted(requests.values(), key=lambda k:k['bid_per_slot'], reverse = True)
slots_total = 1600
slots_tmp = 0
bids_tmp = 0
for sr in sorted_requests:
slots_tmp += sr['slots']
if slots_tmp <= slots_total:
bids_tmp += int(sr['bid'])
else:
break
print("total social welfare: ", bids_tmp)
def compare_with_slot():
requests = generate_test_data(64,256,16*4*100,'reliable',0)
test_all(requests)
test_slot_allocate(requests)
if __name__ == '__main__':
# test_pub_socket();
# test_colony_socket();
# test_all();
# test_slot_allocate(None)
compare_with_slot()
| bsd-3-clause | -4,433,797,019,322,345,000 | 27.312155 | 111 | 0.602205 | false |
nicostephan/pypuf | pypuf/simulation/arbiter_based/ltfarray.py | 1 | 15968 | from numpy import prod, shape, sign, dot, array, tile, transpose, concatenate, dstack, swapaxes, sqrt, amax, vectorize
from numpy.random import RandomState
from pypuf import tools
from pypuf.simulation.base import Simulation
class LTFArray(Simulation):
"""
Class that simulates k LTFs with n bits and a constant term each
and constant bias added.
"""
@staticmethod
def combiner_xor(r):
"""
combines output responses with the XOR operation
:param r: a list with a number of vectors of single LTF results
:return: a list of full results, one for each
"""
return prod(r, 1)
@staticmethod
def combiner_ip_mod2(r):
"""
combines output responses with the inner product mod 2 operation
:param r: a list with a number of vectors of single LTF results
:return: a list of full results, one for each
"""
n = len(r[0])
assert n % 2 == 0, 'IP mod 2 is only defined for even n. Sorry!'
return prod(
transpose(
[
amax((r[:,i], r[:,i+1]), 0)
for i in range(0, n, 2)
])
, 1)
@staticmethod
def transform_id(cs, k):
"""
Input transformation that does nothing.
:return:
"""
return array([
tile(c, (k, 1)) # same unmodified challenge for all k LTFs
for c in cs
])
@staticmethod
def transform_atf(cs, k):
"""
Input transformation that simulates an Arbiter PUF
:return:
"""
# Transform with ATF monomials
cs = transpose(
array([
prod(cs[:,i:], 1)
for i in range(len(cs[0]))
])
)
# Same challenge for all k Arbiters
return __class__.transform_id(cs, k)
@staticmethod
def transform_mm(cs, k):
N = len(cs)
n = len(cs[0])
assert k == 2, 'MM transform currently only implemented for k=2. Sorry!'
assert n % 2 == 0, 'MM transform only defined for even n. Sorry!'
cs_1 = cs
cs_2 = transpose(
concatenate(
(
[ cs[:,0] ],
[ cs[:,i] * cs[:,i+1] for i in range(0, n, 2) ],
[ cs[:,i] * cs[:,i+1] * cs[:,i+2] for i in range(0, n-2, 2) ]
)
)
)
result = swapaxes(dstack((cs_1, cs_2)), 1, 2)
assert result.shape == (N, 2, n)
return result
@staticmethod
def transform_lightweight_secure(cs, k):
N = len(cs)
n = len(cs[0])
assert n % 2 == 0, 'Secure Lightweight Input Transformation only defined for even n. Sorry!'
cs = transpose(
concatenate(
(
[ cs[:,i] * cs[:,i+1] for i in range(0, n, 2) ], # ( x1x2, x3x4, ... xn-1xn )
[ cs[:,0] ], # ( x1 )
[ cs[:,i] * cs[:,i+1] for i in range(1, n-2, 2) ], # ( x2x3, x4x5, ... xn-2xn-1 )
)
)
)
assert cs.shape == (N, n)
return __class__.transform_shift(cs, k)
@staticmethod
def transform_shift_lightweight_secure(cs, k):
"""
Input transform as defined by Majzoobi et al. 2008, but with the shift
operation executed first.
"""
N = len(cs)
n = len(cs[0])
assert n % 2 == 0, 'Secure Lightweight Input Transformation only defined for even n. Sorry!'
shifted = __class__.transform_shift(cs, k)
cs = transpose(
concatenate(
(
[ shifted[:,:,i] * shifted[:,:,i+1] for i in range(0, n, 2) ],
[ shifted[:,:,0] ],
[ shifted[:,:,i] * shifted[:,:,i+1] for i in range(1, n-2, 2) ],
)
),
(1, 2, 0)
)
assert cs.shape == (N, k, n)
return cs
@staticmethod
def transform_soelter_lightweight_secure(cs, k):
"""
Input transformation like defined by Majzoobi et al. (cf. transform_lightweight_secure),
but differs in one bit. Introduced by Sölter.
"""
N = len(cs)
n = len(cs[0])
assert n % 2 == 0, 'Sölter\'s Secure Lightweight Input Transformation only defined for even n. Sorry!'
n_half = int(n/2)
cs = transpose(
concatenate(
(
[cs[:, i] * cs[:, i + 1] for i in range(0, n, 2)], # ( x1x2, x3x4, ... xn-1xn )
[cs[:, n_half]], # ( x_(n/2+1) )
[cs[:, i] * cs[:, i + 1] for i in range(1, n - 2, 2)], # ( x2x3, x4x5, ... xn-2xn-1 )
)
)
)
assert cs.shape == (N, n)
return __class__.transform_shift(cs, k)
@staticmethod
def transform_shift(cs, k):
N = len(cs)
n = len(cs[0])
result = swapaxes(array([
concatenate((cs[:,l:], cs[:,:l]), axis=1)
for l in range(k)
]), 0, 1)
assert result.shape == (N, k, n)
return result
@staticmethod
def transform_1_n_bent(cs, k):
"""
For one LTF, we compute the input as follows: the i-th input bit will be the result
of the challenge shifted by i bits to the left, then input into inner product mod 2
function.
The other LTF get the original input.
"""
N = len(cs)
n = len(cs[0])
assert n % 2 == 0, '1-n bent transform only defined for even n. Sorry!'
shift_challenges = __class__.transform_shift(cs, n)
assert shift_challenges.shape == (N, n, n)
bent_challenges = transpose(
array(
[
__class__.combiner_ip_mod2(shift_challenges[:,i,:])
for i in range(n)
]
)
)
assert bent_challenges.shape == (N, n)
return array([
concatenate(
(
[bent_challenges[j]], # 'bent' challenge as generated above
tile(cs[j], (k - 1, 1)) # unmodified challenge for k-1 LTFs
),
axis=0
)
for j in range(N)
])
@staticmethod
def transform_1_1_bent(cs, k):
"""
For one LTF, we compute the input as follows: the first input bit will be
the result of IPmod2 of the original challenge, all other input bits will
remain the same.
The other LTF get the original input.
"""
N = len(cs)
n = len(cs[0])
assert k >= 2, '1-n bent transform currently only implemented for k>=2. Sorry!'
assert n % 2 == 0, '1-n bent transform only defined for even n. Sorry!'
bent_challenge_bits = __class__.combiner_ip_mod2(cs)
assert bent_challenge_bits.shape == (N, )
return array([
concatenate(
(
[concatenate(([[bent_challenge_bits[j]], cs[j][1:]]))], # 'bent' challenge bit plus remainder unchanged
tile(cs[j], (k - 1, 1)) # unmodified challenge for k-1 LTFs
),
axis=0
)
for j in range(N)
])
@staticmethod
def transform_polynomial(cs, k):
"""
This input transformation interprets a challenge c as a
polynomial over the finite field GF(2^n)=F2/f*F2, where f is a
irreducible polynomial of degree n.
The irreducible polynomial f is hard coded and
of degree 8, 16, 24, 32, 48, or 64.
Each Arbiter Chain i receives as input the polynomial c^i
as element of GF(2^n).
"""
N = len(cs)
n = len(cs[0])
assert n in [8, 16, 24, 32, 48, 64], 'Polynomial transformation is only implemented for challenges with n in {8, 16, 24, 32, 48, 64}. ' \
'Sorry!'
if n == 64:
f = [1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1]
elif n == 48:
f = [1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1,
0, 0, 1]
elif n == 32:
f = [1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
elif n == 24:
f = [1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
elif n == 16:
f = [1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1]
elif n == 8:
f = [1, 0, 1, 0, 0, 1, 1, 0, 1]
""" Transform challenge to 0,1 array to compute transformation with numpy. """
vtransform_to_01 = vectorize(tools.transform_challenge_11_to_01)
cs_01 = array([vtransform_to_01(c) for c in cs])
""" Compute c^i for each challenge for i from 1 to k. """
cs = concatenate([
[tools.poly_mult_div(c, f, k) for c in cs_01]
])
""" Transform challenges back to -1,1 notation. """
vtransform_to_11 = vectorize(tools.transform_challenge_01_to_11)
result = array([vtransform_to_11(c) for c in cs])
assert result.shape == (N, k, n), 'The resulting challenges have not the desired shape. Sorry!'
return result
@staticmethod
def transform_permutation_atf(cs, k):
"""
This transformation performs first a pseudorandom permutation of the challenge k times before applying the
ATF transformation to each challenge.
:param cs:
:param k:
:return:
"""
N = len(cs)
n = len(cs[0])
seed = 0x1234
""" Perform random permutations """
cs_permuted = array(
[
[RandomState(seed + i).permutation(c)
for i in range(k)]
for c in cs
]
)
""" Perform atf transform """
result = transpose(
array([
prod(cs_permuted[:, :, i:], 2)
for i in range(n)
]),
(1, 2, 0)
)
assert result.shape == (N, k, n), 'The resulting challenges have not the desired shape. Sorry!'
return result
@staticmethod
def transform_random(cs, k):
"""
This input transformation chooses for each Arbiter Chain an random challenge based on the initial challenge.
"""
N = len(cs)
n = len(cs[0])
vtransform_to_01 = vectorize(tools.transform_challenge_11_to_01)
cs_01 = array([vtransform_to_01(c) for c in cs])
result = array([RandomState(c).choice((-1, 1), (k, n)) for c in cs_01])
assert result.shape == (N, k, n), 'The resulting challenges have not the desired shape. Sorry!'
return result
@staticmethod
def transform_concat(transform_1, nn, transform_2):
"""
This input transformation will transform the first nn bit of each challenge using transform_1,
the remaining bits using transform_2.
:return: A function that can perform the desired transformation
"""
def transform(cs, k):
(N,n) = cs.shape
cs1 = cs[:,:nn]
cs2 = cs[:,nn:]
transformed_1 = transform_1(cs1, k)
transformed_2 = transform_2(cs2, k)
assert transformed_1.shape == (N, k, nn)
assert transformed_2.shape == (N, k, n - nn)
return concatenate(
(
transformed_1,
transformed_2
),
axis=2
)
transform.__name__ = 'transform_concat_%s_nn%i_%s' % \
(
transform_1.__name__.replace('transform_', ''),
nn,
transform_2.__name__.replace('transform_', '')
)
return transform
@staticmethod
def normal_weights(n, k, mu=0, sigma=1, random_instance=RandomState()):
"""
Returns weights for an array of k LTFs of size n each.
The weights are drawn from a normal distribution with given
mean and std. deviation, if parameters are omitted, the
standard normal distribution is used.
The `normal` method of the optionally provided PRNG instance
is used to obtain the weights. If no PRNG instance provided,
a fresh `numpy.random.RandomState` instance is used.
"""
return random_instance.normal(loc=mu, scale=sigma, size=(k, n))
def __init__(self, weight_array, transform, combiner, bias=False):
"""
Initializes an LTFArray based on given weight_array and
combiner function with appropriate transformation of challenges.
The bias is committed through the (n+1)th value in weight_array.
So the parameter bias only states if the given weight_array has
n+1 values (or not) while the challenges still has length n.
"""
(self.k, self.n) = shape(weight_array)
self.weight_array = weight_array
self.transform = transform
self.combiner = combiner
self.bias = bias
def eval(self, inputs):
"""
evaluates a given list of challenges regarding bias
:param x: list of challenges
:return: list of responses
"""
if self.bias:
inputs = tools.iter_append_last(inputs, 1)
return sign(self.val(inputs))
def val(self, inputs):
return self.combiner(self.ltf_eval(self.transform(inputs, self.k)))
def ltf_eval(self, inputs):
"""
:return: array
"""
return transpose(
array([
dot(
inputs[:,l],
self.weight_array[l]
)
for l in range(self.k)
])
)
class NoisyLTFArray(LTFArray):
"""
Class that simulates k LTFs with n bits and a constant term each
with noise effect and constant bias added.
"""
@staticmethod
def sigma_noise_from_random_weights(n, sigma_weight, noisiness=0.1):
"""
returns sd of noise (sigma_noise) out of n stages with
sd of weight differences (sigma_weight) and noisiness factor
"""
return sqrt(n) * sigma_weight * noisiness
def __init__(self, weight_array, transform, combiner, sigma_noise,
random_instance=RandomState(), bias=False):
"""
Initializes LTF array like in LTFArray and uses the provided
PRNG instance for drawing noise values. If no PRNG provided, a
fresh `numpy.random.RandomState` instance is used.
"""
super().__init__(weight_array, transform, combiner, bias)
self.sigma_noise = sigma_noise
self.random = random_instance
def ltf_eval(self, inputs):
"""
Calculates weight_array with given set of challenges including noise.
The noise effect is a normal distributed random variable with mu=0,
sigma=sigma_noise.
Random numbers are drawn from the PRNG instance generated when
initializing the NoisyLTFArray.
"""
noise = self.random.normal(loc=0, scale=self.sigma_noise, size=(1, self.k))
return super().ltf_eval(inputs) + noise
| gpl-3.0 | -530,663,471,708,576,300 | 33.633406 | 145 | 0.500626 | false |
arowser/wireshark-xcoin | tools/wireshark_gen.py | 1 | 95941 | # -*- python -*-
#
# wireshark_gen.py (part of idl2wrs)
#
# Author : Frank Singleton ([email protected])
#
# Copyright (C) 2001 Frank Singleton, Ericsson Inc.
#
# This file is a backend to "omniidl", used to generate "Wireshark"
# dissectors from CORBA IDL descriptions. The output language generated
# is "C". It will generate code to use the GIOP/IIOP get_CDR_XXX API.
#
# Please see packet-giop.h in Wireshark distro for API description.
# Wireshark is available at http://www.wireshark.org/
#
# Omniidl is part of the OmniOrb distribution, and is available at
# http://omniorb.sourceforge.net
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
# 02111-1307, USA.
#
# Description:
#
# Omniidl Back-end which parses an IDL list of "Operation" nodes
# passed from wireshark_be2.py and generates "C" code for compiling
# as a plugin for the Wireshark IP Protocol Analyser.
#
#
# Strategy (sneaky but ...)
#
# problem: I dont know what variables to declare until AFTER the helper functions
# have been built, so ...
#
# There are 2 passes through genHelpers, the first one is there just to
# make sure the fn_hash data struct is populated properly.
# The second pass is the real thing, generating code and declaring
# variables (from the 1st pass) properly.
#
"""Wireshark IDL compiler back-end."""
from omniidl import idlast, idltype, idlutil, output
import sys, string
import tempfile
#
# Output class, generates "C" src code for the sub-dissector
#
# in:
#
#
# self - me
# st - output stream
# node - a reference to an Operations object.
# name - scoped name (Module::Module::Interface:: .. ::Operation
#
#
# TODO -- FS
#
# 1. generate hf[] data for searchable fields (but what is searchable?) [done, could be improved]
# 2. add item instead of add_text() [done]
# 3. sequence handling [done]
# 4. User Exceptions [done]
# 5. Fix arrays, and structs containing arrays [done]
# 6. Handle pragmas.
# 7. Exception can be common to many operations, so handle them outside the
# operation helper functions [done]
# 8. Automatic variable declaration [done, improve, still get some collisions.add variable delegator function ]
# For example, mutlidimensional arrays.
# 9. wchar and wstring handling [giop API needs improving]
# 10. Support Fixed [done]
# 11. Support attributes (get/set) [started, needs language mapping option, perhaps wireshark GUI option
# to set the attribute function prefix or suffix ? ] For now the prefix is "_get" and "_set"
# eg: attribute string apple => _get_apple and _set_apple
#
# 12. Implement IDL "union" code [done]
# 13. Implement support for plugins [done]
# 14. Dont generate code for empty operations (cf: exceptions without members)
# 15. Generate code to display Enums numerically and symbolically [done]
# 16. Place structs/unions in subtrees
# 17. Recursive struct and union handling [done]
# 18. Improve variable naming for display (eg: structs, unions etc) [done]
#
# Also test, Test, TEST
#
#
# Strategy:
# For every operation and attribute do
# For return val and all parameters do
# find basic IDL type for each parameter
# output get_CDR_xxx
# output exception handling code
# output attribute handling code
#
#
class wireshark_gen_C:
#
# Turn DEBUG stuff on/off
#
DEBUG = 0
#
# Some string constants for our templates
#
c_u_octet8 = "guint64 u_octet8;"
c_s_octet8 = "gint64 s_octet8;"
c_u_octet4 = "guint32 u_octet4;"
c_s_octet4 = "gint32 s_octet4;"
c_u_octet2 = "guint16 u_octet2;"
c_s_octet2 = "gint16 s_octet2;"
c_u_octet1 = "guint8 u_octet1;"
c_s_octet1 = "gint8 s_octet1;"
c_float = "gfloat my_float;"
c_double = "gdouble my_double;"
c_seq = "const gchar *seq = NULL;" # pointer to buffer of gchars
c_i = "guint32 i_"; # loop index
c_i_lim = "guint32 u_octet4_loop_"; # loop limit
c_u_disc = "guint32 disc_u_"; # unsigned int union discriminant variable name (enum)
c_s_disc = "gint32 disc_s_"; # signed int union discriminant variable name (other cases, except Enum)
#
# Constructor
#
def __init__(self, st, protocol_name, dissector_name ,description):
self.st = output.Stream(tempfile.TemporaryFile(),4) # for first pass only
self.st_save = st # where 2nd pass should go
self.protoname = protocol_name # Protocol Name (eg: ECHO)
self.dissname = dissector_name # Dissector name (eg: echo)
self.description = description # Detailed Protocol description (eg: Echo IDL Example)
self.exlist = [] # list of exceptions used in operations.
#self.curr_sname # scoped name of current opnode or exnode I am visiting, used for generating "C" var declares
self.fn_hash = {} # top level hash to contain key = function/exception and val = list of variable declarations
# ie a hash of lists
self.fn_hash_built = 0 # flag to indicate the 1st pass is complete, and the fn_hash is correctly
# populated with operations/vars and exceptions/vars
#
# genCode()
#
# Main entry point, controls sequence of
# generated code.
#
#
def genCode(self,oplist, atlist, enlist, stlist, unlist): # operation,attribute,enums,struct and union lists
self.genHelpers(oplist,stlist,unlist) # sneaky .. call it now, to populate the fn_hash
# so when I come to that operation later, I have the variables to
# declare already.
self.genExceptionHelpers(oplist) # sneaky .. call it now, to populate the fn_hash
# so when I come to that exception later, I have the variables to
# declare already.
self.genAttributeHelpers(atlist) # sneaky .. call it now, to populate the fn_hash
# so when I come to that exception later, I have the variables to
# declare already.
self.fn_hash_built = 1 # DONE, so now I know , see genOperation()
self.st = self.st_save
self.genHeader() # initial dissector comments
self.genEthCopyright() # Wireshark Copyright comments.
self.genGPL() # GPL license
self.genIncludes()
self.genPrototype()
self.genProtocol()
self.genDeclares(oplist,atlist,enlist,stlist,unlist)
if (len(atlist) > 0):
self.genAtList(atlist) # string constant declares for Attributes
if (len(enlist) > 0):
self.genEnList(enlist) # string constant declares for Enums
self.genExceptionHelpers(oplist) # helper function to decode user exceptions that have members
self.genExceptionDelegator(oplist) # finds the helper function to decode a user exception
if (len(atlist) > 0):
self.genAttributeHelpers(atlist) # helper function to decode "attributes"
self.genHelpers(oplist,stlist,unlist) # operation, struct and union decode helper functions
self.genMainEntryStart(oplist)
self.genOpDelegator(oplist)
self.genAtDelegator(atlist)
self.genMainEntryEnd()
self.gen_proto_register(oplist, atlist, stlist, unlist)
self.gen_proto_reg_handoff(oplist)
# All the dissectors are now built-in
#self.gen_plugin_register()
#self.dumpvars() # debug
self.genModelines();
#
# genHeader
#
# Generate Standard Wireshark Header Comments
#
#
def genHeader(self):
self.st.out(self.template_Header,dissector_name=self.dissname)
if self.DEBUG:
print "XXX genHeader"
#
# genEthCopyright
#
# Wireshark Copyright Info
#
#
def genEthCopyright(self):
if self.DEBUG:
print "XXX genEthCopyright"
self.st.out(self.template_wireshark_copyright)
#
# genModelines
#
# Modelines info
#
#
def genModelines(self):
if self.DEBUG:
print "XXX genModelines"
self.st.out(self.template_Modelines)
#
# genGPL
#
# GPL license
#
#
def genGPL(self):
if self.DEBUG:
print "XXX genGPL"
self.st.out(self.template_GPL)
#
# genIncludes
#
# GPL license
#
#
def genIncludes(self):
if self.DEBUG:
print "XXX genIncludes"
self.st.out(self.template_Includes)
#
# genOpDeclares()
#
# Generate hf variables for operation filters
#
# in: opnode ( an operation node)
#
def genOpDeclares(self, op):
if self.DEBUG:
print "XXX genOpDeclares"
print "XXX return type = " , op.returnType().kind()
sname = self.namespace(op, "_")
rt = op.returnType()
if (rt.kind() != idltype.tk_void):
if (rt.kind() == idltype.tk_alias): # a typdef return val possibly ?
#self.get_CDR_alias(rt, rt.name() )
self.st.out(self.template_hf, name=sname + "_return")
else:
self.st.out(self.template_hf, name=sname + "_return")
for p in op.parameters():
self.st.out(self.template_hf, name=sname + "_" + p.identifier())
#
# genAtDeclares()
#
# Generate hf variables for attributes
#
# in: at ( an attribute)
#
def genAtDeclares(self, at):
if self.DEBUG:
print "XXX genAtDeclares"
for decl in at.declarators():
sname = self.namespace(decl, "_")
self.st.out(self.template_hf, name="get" + "_" + sname + "_" + decl.identifier())
if not at.readonly():
self.st.out(self.template_hf, name="set" + "_" + sname + "_" + decl.identifier())
#
# genStDeclares()
#
# Generate hf variables for structs
#
# in: st ( a struct)
#
def genStDeclares(self, st):
if self.DEBUG:
print "XXX genStDeclares"
sname = self.namespace(st, "_")
for m in st.members():
for decl in m.declarators():
self.st.out(self.template_hf, name=sname + "_" + decl.identifier())
#
# genExDeclares()
#
# Generate hf variables for user exception filters
#
# in: exnode ( an exception node)
#
def genExDeclares(self,ex):
if self.DEBUG:
print "XXX genExDeclares"
sname = self.namespace(ex, "_")
for m in ex.members():
for decl in m.declarators():
self.st.out(self.template_hf, name=sname + "_" + decl.identifier())
#
# genUnionDeclares()
#
# Generate hf variables for union filters
#
# in: un ( an union)
#
def genUnionDeclares(self,un):
if self.DEBUG:
print "XXX genUnionDeclares"
sname = self.namespace(un, "_")
self.st.out(self.template_hf, name=sname + "_" + un.identifier())
for uc in un.cases(): # for all UnionCase objects in this union
for cl in uc.labels(): # for all Caselabel objects in this UnionCase
self.st.out(self.template_hf, name=sname + "_" + uc.declarator().identifier())
#
# genExpertInfoDeclares()
#
# Generate ei variables for expert info filters
#
def genExpertInfoDeclares(self):
if self.DEBUG:
print "XXX genExpertInfoDeclares"
self.st.out(self.template_proto_register_ei_filters, dissector_name=self.dissname)
#
# genDeclares
#
# generate function prototypes if required
#
# Currently this is used for struct and union helper function declarations.
#
def genDeclares(self,oplist,atlist,enlist,stlist,unlist):
if self.DEBUG:
print "XXX genDeclares"
# prototype for operation filters
self.st.out(self.template_hf_operations)
#operation specific filters
if (len(oplist) > 0):
self.st.out(self.template_proto_register_op_filter_comment)
for op in oplist:
self.genOpDeclares(op)
#attribute filters
if (len(atlist) > 0):
self.st.out(self.template_proto_register_at_filter_comment)
for at in atlist:
self.genAtDeclares(at)
#struct filters
if (len(stlist) > 0):
self.st.out(self.template_proto_register_st_filter_comment)
for st in stlist:
self.genStDeclares(st)
# exception List filters
exlist = self.get_exceptionList(oplist) # grab list of exception nodes
if (len(exlist) > 0):
self.st.out(self.template_proto_register_ex_filter_comment)
for ex in exlist:
if (ex.members()): # only if has members
self.genExDeclares(ex)
#union filters
if (len(unlist) > 0):
self.st.out(self.template_proto_register_un_filter_comment)
for un in unlist:
self.genUnionDeclares(un)
#expert info filters
self.genExpertInfoDeclares()
# prototype for start_dissecting()
self.st.out(self.template_prototype_start_dissecting)
# struct prototypes
if len(stlist):
self.st.out(self.template_prototype_struct_start)
for st in stlist:
#print st.repoId()
sname = self.namespace(st, "_")
self.st.out(self.template_prototype_struct_body, stname=st.repoId(),name=sname)
self.st.out(self.template_prototype_struct_end)
# union prototypes
if len(unlist):
self.st.out(self.template_prototype_union_start)
for un in unlist:
sname = self.namespace(un, "_")
self.st.out(self.template_prototype_union_body, unname=un.repoId(),name=sname)
self.st.out(self.template_prototype_union_end)
#
# genPrototype
#
#
def genPrototype(self):
self.st.out(self.template_prototype, dissector_name=self.dissname)
#
# genProtocol
#
#
def genProtocol(self):
self.st.out(self.template_protocol, dissector_name=self.dissname)
self.st.out(self.template_init_boundary)
#
# genMainEntryStart
#
def genMainEntryStart(self,oplist):
self.st.out(self.template_main_dissector_start, dissname=self.dissname, disprot=self.protoname)
self.st.inc_indent()
self.st.out(self.template_main_dissector_switch_msgtype_start)
self.st.out(self.template_main_dissector_switch_msgtype_start_request_reply)
self.st.inc_indent()
#
# genMainEntryEnd
#
def genMainEntryEnd(self):
self.st.out(self.template_main_dissector_switch_msgtype_end_request_reply)
self.st.dec_indent()
self.st.out(self.template_main_dissector_switch_msgtype_all_other_msgtype)
self.st.dec_indent()
self.st.out(self.template_main_dissector_end)
#
# genAtList
#
# in: atlist
#
# out: C code for IDL attribute decalarations.
#
# NOTE: Mapping of attributes to operation(function) names is tricky.
#
# The actual accessor function names are language-mapping specific. The attribute name
# is subject to OMG IDL's name scoping rules; the accessor function names are
# guaranteed not to collide with any legal operation names specifiable in OMG IDL.
#
# eg:
#
# static const char get_Penguin_Echo_get_width_at[] = "get_width" ;
# static const char set_Penguin_Echo_set_width_at[] = "set_width" ;
#
# or:
#
# static const char get_Penguin_Echo_get_width_at[] = "_get_width" ;
# static const char set_Penguin_Echo_set_width_at[] = "_set_width" ;
#
# TODO: Implement some language dependant templates to handle naming conventions
# language <=> attribute. for C, C++. Java etc
#
# OR, just add a runtime GUI option to select language binding for attributes -- FS
#
#
#
# ie: def genAtlist(self,atlist,language)
#
def genAtList(self,atlist):
self.st.out(self.template_comment_attributes_start)
for n in atlist:
for i in n.declarators(): #
sname = self.namespace(i, "_")
atname = i.identifier()
self.st.out(self.template_attributes_declare_Java_get, sname=sname, atname=atname)
if not n.readonly():
self.st.out(self.template_attributes_declare_Java_set, sname=sname, atname=atname)
self.st.out(self.template_comment_attributes_end)
#
# genEnList
#
# in: enlist
#
# out: C code for IDL Enum decalarations using "static const value_string" template
#
def genEnList(self,enlist):
self.st.out(self.template_comment_enums_start)
for enum in enlist:
sname = self.namespace(enum, "_")
self.st.out(self.template_comment_enum_comment, ename=enum.repoId())
self.st.out(self.template_value_string_start, valstringname=sname)
for enumerator in enum.enumerators():
self.st.out(self.template_value_string_entry, intval=str(self.valFromEnum(enum,enumerator)), description=enumerator.identifier())
#atname = n.identifier()
self.st.out(self.template_value_string_end, valstringname=sname)
self.st.out(self.template_comment_enums_end)
#
# genExceptionDelegator
#
# in: oplist
#
# out: C code for User exception delegator
#
# eg:
#
#
def genExceptionDelegator(self,oplist):
self.st.out(self.template_main_exception_delegator_start)
self.st.inc_indent()
exlist = self.get_exceptionList(oplist) # grab list of ALL UNIQUE exception nodes
for ex in exlist:
if self.DEBUG:
print "XXX Exception " , ex.repoId()
print "XXX Exception Identifier" , ex.identifier()
print "XXX Exception Scoped Name" , ex.scopedName()
if (ex.members()): # only if has members
sname = self.namespace(ex, "_")
exname = ex.repoId()
self.st.out(self.template_ex_delegate_code, sname=sname, exname=ex.repoId())
self.st.dec_indent()
self.st.out(self.template_main_exception_delegator_end)
#
# genAttribueHelpers()
#
# Generate private helper functions to decode Attributes.
#
# in: atlist
#
# For readonly attribute - generate get_xxx()
# If NOT readonly attribute - also generate set_xxx()
#
def genAttributeHelpers(self,atlist):
if self.DEBUG:
print "XXX genAttributeHelpers: atlist = ", atlist
self.st.out(self.template_attribute_helpers_start)
for attrib in atlist:
for decl in attrib.declarators():
self.genAtHelper(attrib,decl,"get") # get accessor
if not attrib.readonly():
self.genAtHelper(attrib,decl,"set") # set accessor
self.st.out(self.template_attribute_helpers_end)
#
# genAtHelper()
#
# Generate private helper functions to decode an attribute
#
# in: at - attribute node
# in: decl - declarator belonging to this attribute
# in: order - to generate a "get" or "set" helper
def genAtHelper(self,attrib,decl,order):
if self.DEBUG:
print "XXX genAtHelper"
sname = order + "_" + self.namespace(decl, "_") # must use set or get prefix to avoid collision
self.curr_sname = sname # update current opnode/exnode scoped name
if not self.fn_hash_built:
self.fn_hash[sname] = [] # init empty list as val for this sname key
# but only if the fn_hash is not already built
self.st.out(self.template_attribute_helper_function_start, sname=sname, atname=decl.repoId())
self.st.inc_indent()
if (len(self.fn_hash[sname]) > 0):
self.st.out(self.template_helper_function_vars_start)
self.dumpCvars(sname)
self.st.out(self.template_helper_function_vars_end )
self.getCDR(attrib.attrType(), sname + "_" + decl.identifier() )
self.st.dec_indent()
self.st.out(self.template_attribute_helper_function_end)
#
# genExceptionHelpers()
#
# Generate private helper functions to decode Exceptions used
# within operations
#
# in: oplist
#
def genExceptionHelpers(self,oplist):
exlist = self.get_exceptionList(oplist) # grab list of exception nodes
if self.DEBUG:
print "XXX genExceptionHelpers: exlist = ", exlist
self.st.out(self.template_exception_helpers_start)
for ex in exlist:
if (ex.members()): # only if has members
#print "XXX Exception = " + ex.identifier()
self.genExHelper(ex)
self.st.out(self.template_exception_helpers_end)
#
# genExhelper()
#
# Generate private helper functions to decode User Exceptions
#
# in: exnode ( an exception node)
#
def genExHelper(self,ex):
if self.DEBUG:
print "XXX genExHelper"
sname = self.namespace(ex, "_")
self.curr_sname = sname # update current opnode/exnode scoped name
if not self.fn_hash_built:
self.fn_hash[sname] = [] # init empty list as val for this sname key
# but only if the fn_hash is not already built
self.st.out(self.template_exception_helper_function_start, sname=sname, exname=ex.repoId())
self.st.inc_indent()
if (len(self.fn_hash[sname]) > 0):
self.st.out(self.template_helper_function_vars_start)
self.dumpCvars(sname)
self.st.out(self.template_helper_function_vars_end )
for m in ex.members():
if self.DEBUG:
print "XXX genExhelper, member = ", m, "member type = ", m.memberType()
for decl in m.declarators():
if self.DEBUG:
print "XXX genExhelper, d = ", decl
if decl.sizes(): # an array
indices = self.get_indices_from_sizes(decl.sizes())
string_indices = '%i ' % indices # convert int to string
self.st.out(self.template_get_CDR_array_comment, aname=decl.identifier(), asize=string_indices)
self.st.out(self.template_get_CDR_array_start, aname=decl.identifier(), aval=string_indices)
self.addvar(self.c_i + decl.identifier() + ";")
self.st.inc_indent()
self.getCDR(m.memberType(), sname + "_" + decl.identifier() )
self.st.dec_indent()
self.st.out(self.template_get_CDR_array_end)
else:
self.getCDR(m.memberType(), sname + "_" + decl.identifier() )
self.st.dec_indent()
self.st.out(self.template_exception_helper_function_end)
#
# genHelpers()
#
# Generate private helper functions for each IDL operation.
# Generate private helper functions for each IDL struct.
# Generate private helper functions for each IDL union.
#
#
# in: oplist, stlist, unlist
#
def genHelpers(self,oplist,stlist,unlist):
for op in oplist:
self.genOperation(op)
for st in stlist:
self.genStructHelper(st)
for un in unlist:
self.genUnionHelper(un)
#
# genOperation()
#
# Generate private helper functions for a specificIDL operation.
#
# in: opnode
#
def genOperation(self,opnode):
if self.DEBUG:
print "XXX genOperation called"
sname = self.namespace(opnode, "_")
if not self.fn_hash_built:
self.fn_hash[sname] = [] # init empty list as val for this sname key
# but only if the fn_hash is not already built
self.curr_sname = sname # update current opnode's scoped name
opname = opnode.identifier()
self.st.out(self.template_helper_function_comment, repoid=opnode.repoId() )
self.st.out(self.template_helper_function_start, sname=sname)
self.st.inc_indent()
if (len(self.fn_hash[sname]) > 0):
self.st.out(self.template_helper_function_vars_start)
self.dumpCvars(sname)
self.st.out(self.template_helper_function_vars_end )
self.st.out(self.template_helper_switch_msgtype_start)
self.st.out(self.template_helper_switch_msgtype_request_start)
self.st.inc_indent()
self.genOperationRequest(opnode)
self.st.out(self.template_helper_switch_msgtype_request_end)
self.st.dec_indent()
self.st.out(self.template_helper_switch_msgtype_reply_start)
self.st.inc_indent()
self.st.out(self.template_helper_switch_rep_status_start)
self.st.out(self.template_helper_switch_msgtype_reply_no_exception_start)
self.st.inc_indent()
self.genOperationReply(opnode)
self.st.out(self.template_helper_switch_msgtype_reply_no_exception_end)
self.st.dec_indent()
self.st.out(self.template_helper_switch_msgtype_reply_user_exception_start)
self.st.inc_indent()
self.genOpExceptions(opnode)
self.st.out(self.template_helper_switch_msgtype_reply_user_exception_end)
self.st.dec_indent()
self.st.out(self.template_helper_switch_msgtype_reply_default_start, dissector_name=self.dissname)
self.st.out(self.template_helper_switch_msgtype_reply_default_end)
self.st.out(self.template_helper_switch_rep_status_end)
self.st.dec_indent()
self.st.out(self.template_helper_switch_msgtype_default_start, dissector_name=self.dissname)
self.st.out(self.template_helper_switch_msgtype_default_end)
self.st.out(self.template_helper_switch_msgtype_end)
self.st.dec_indent()
self.st.out(self.template_helper_function_end, sname=sname)
#
# Decode function parameters for a GIOP request message
#
#
def genOperationRequest(self,opnode):
for p in opnode.parameters():
if p.is_in():
if self.DEBUG:
print "XXX parameter = " ,p
print "XXX parameter type = " ,p.paramType()
print "XXX parameter type kind = " ,p.paramType().kind()
self.getCDR(p.paramType(), self.curr_sname + "_" + p.identifier())
#
# Decode function parameters for a GIOP reply message
#
def genOperationReply(self,opnode):
rt = opnode.returnType() # get return type
if self.DEBUG:
print "XXX genOperationReply"
print "XXX opnode = " , opnode
print "XXX return type = " , rt
print "XXX return type.unalias = " , rt.unalias()
print "XXX return type.kind() = " , rt.kind();
sname = self.namespace(opnode, "_")
if (rt.kind() == idltype.tk_alias): # a typdef return val possibly ?
#self.getCDR(rt.decl().alias().aliasType(),"dummy") # return value maybe a typedef
self.get_CDR_alias(rt, sname + "_return" )
#self.get_CDR_alias(rt, rt.name() )
else:
self.getCDR(rt, sname + "_return") # return value is NOT an alias
for p in opnode.parameters():
if p.is_out(): # out or inout
self.getCDR(p.paramType(), self.curr_sname + "_" + p.identifier())
#self.st.dec_indent()
def genOpExceptions(self,opnode):
for ex in opnode.raises():
if ex.members():
#print ex.members()
for m in ex.members():
t=0
#print m.memberType(), m.memberType().kind()
#
# Delegator for Operations
#
def genOpDelegator(self,oplist):
for op in oplist:
iname = "/".join(op.scopedName()[:-1])
opname = op.identifier()
sname = self.namespace(op, "_")
self.st.out(self.template_op_delegate_code, interface=iname, sname=sname, opname=opname)
#
# Delegator for Attributes
#
def genAtDelegator(self,atlist):
for a in atlist:
for i in a.declarators():
atname = i.identifier()
sname = self.namespace(i, "_")
self.st.out(self.template_at_delegate_code_get, sname=sname)
if not a.readonly():
self.st.out(self.template_at_delegate_code_set, sname=sname)
#
# Add a variable declaration to the hash of list
#
def addvar(self, var):
if not ( var in self.fn_hash[self.curr_sname] ):
self.fn_hash[self.curr_sname].append(var)
#
# Print the variable declaration from the hash of list
#
def dumpvars(self):
for fn in self.fn_hash.keys():
print "FN = " + fn
for v in self.fn_hash[fn]:
print "-> " + v
#
# Print the "C" variable declaration from the hash of list
# for a given scoped operation name (eg: tux_penguin_eat)
#
def dumpCvars(self, sname):
for v in self.fn_hash[sname]:
self.st.out(v)
#
# Given an enum node, and a enumerator node, return
# the enumerator's numerical value.
#
# eg: enum Color {red,green,blue} should return
# val = 1 for green
#
def valFromEnum(self,enumNode, enumeratorNode):
if self.DEBUG:
print "XXX valFromEnum, enumNode = ", enumNode, " from ", enumNode.repoId()
print "XXX valFromEnum, enumeratorNode = ", enumeratorNode, " from ", enumeratorNode.repoId()
if isinstance(enumeratorNode,idlast.Enumerator):
value = enumNode.enumerators().index(enumeratorNode)
return value
## tk_null = 0
## tk_void = 1
## tk_short = 2
## tk_long = 3
## tk_ushort = 4
## tk_ulong = 5
## tk_float = 6
## tk_double = 7
## tk_boolean = 8
## tk_char = 9
## tk_octet = 10
## tk_any = 11
## tk_TypeCode = 12
## tk_Principal = 13
## tk_objref = 14
## tk_struct = 15
## tk_union = 16
## tk_enum = 17
## tk_string = 18
## tk_sequence = 19
## tk_array = 20
## tk_alias = 21
## tk_except = 22
## tk_longlong = 23
## tk_ulonglong = 24
## tk_longdouble = 25
## tk_wchar = 26
## tk_wstring = 27
## tk_fixed = 28
## tk_value = 29
## tk_value_box = 30
## tk_native = 31
## tk_abstract_interface = 32
#
# getCDR()
#
# This is the main "iterator" function. It takes a node, and tries to output
# a get_CDR_XXX accessor method(s). It can call itself multiple times
# if I find nested structures etc.
#
def getCDR(self,type,name="fred"):
pt = type.unalias().kind() # param CDR type
pn = name # param name
if self.DEBUG:
print "XXX getCDR: kind = " , pt
print "XXX getCDR: name = " , pn
if pt == idltype.tk_ulong:
self.get_CDR_ulong(pn)
elif pt == idltype.tk_longlong:
self.get_CDR_longlong(pn)
elif pt == idltype.tk_ulonglong:
self.get_CDR_ulonglong(pn)
elif pt == idltype.tk_void:
self.get_CDR_void(pn)
elif pt == idltype.tk_short:
self.get_CDR_short(pn)
elif pt == idltype.tk_long:
self.get_CDR_long(pn)
elif pt == idltype.tk_ushort:
self.get_CDR_ushort(pn)
elif pt == idltype.tk_float:
self.get_CDR_float(pn)
elif pt == idltype.tk_double:
self.get_CDR_double(pn)
elif pt == idltype.tk_fixed:
self.get_CDR_fixed(type.unalias(),pn)
elif pt == idltype.tk_boolean:
self.get_CDR_boolean(pn)
elif pt == idltype.tk_char:
self.get_CDR_char(pn)
elif pt == idltype.tk_octet:
self.get_CDR_octet(pn)
elif pt == idltype.tk_any:
self.get_CDR_any(pn)
elif pt == idltype.tk_string:
self.get_CDR_string(pn)
elif pt == idltype.tk_wstring:
self.get_CDR_wstring(pn)
elif pt == idltype.tk_wchar:
self.get_CDR_wchar(pn)
elif pt == idltype.tk_enum:
#print type.decl()
self.get_CDR_enum(pn,type)
#self.get_CDR_enum(pn)
elif pt == idltype.tk_struct:
self.get_CDR_struct(type,pn)
elif pt == idltype.tk_TypeCode: # will I ever get here ?
self.get_CDR_TypeCode(pn)
elif pt == idltype.tk_sequence:
if type.unalias().seqType().kind() == idltype.tk_octet:
self.get_CDR_sequence_octet(type,pn)
else:
self.get_CDR_sequence(type,pn)
elif pt == idltype.tk_objref:
self.get_CDR_objref(type,pn)
elif pt == idltype.tk_array:
pn = pn # Supported elsewhere
elif pt == idltype.tk_union:
self.get_CDR_union(type,pn)
elif pt == idltype.tk_alias:
if self.DEBUG:
print "XXXXX Alias type XXXXX " , type
self.get_CDR_alias(type,pn)
else:
self.genWARNING("Unknown typecode = " + '%i ' % pt) # put comment in source code
#
# get_CDR_XXX methods are here ..
#
#
def get_CDR_ulong(self,pn):
self.st.out(self.template_get_CDR_ulong, hfname=pn)
def get_CDR_short(self,pn):
self.st.out(self.template_get_CDR_short, hfname=pn)
def get_CDR_void(self,pn):
self.st.out(self.template_get_CDR_void, hfname=pn)
def get_CDR_long(self,pn):
self.st.out(self.template_get_CDR_long, hfname=pn)
def get_CDR_ushort(self,pn):
self.st.out(self.template_get_CDR_ushort, hfname=pn)
def get_CDR_float(self,pn):
self.st.out(self.template_get_CDR_float, hfname=pn)
def get_CDR_double(self,pn):
self.st.out(self.template_get_CDR_double, hfname=pn)
def get_CDR_longlong(self,pn):
self.st.out(self.template_get_CDR_longlong, hfname=pn)
def get_CDR_ulonglong(self,pn):
self.st.out(self.template_get_CDR_ulonglong, hfname=pn)
def get_CDR_boolean(self,pn):
self.st.out(self.template_get_CDR_boolean, hfname=pn)
def get_CDR_fixed(self,type,pn):
if self.DEBUG:
print "XXXX calling get_CDR_fixed, type = ", type
print "XXXX calling get_CDR_fixed, type.digits() = ", type.digits()
print "XXXX calling get_CDR_fixed, type.scale() = ", type.scale()
string_digits = '%i ' % type.digits() # convert int to string
string_scale = '%i ' % type.scale() # convert int to string
string_length = '%i ' % self.dig_to_len(type.digits()) # how many octets to hilight for a number of digits
self.st.out(self.template_get_CDR_fixed, varname=pn, digits=string_digits, scale=string_scale, length=string_length )
self.addvar(self.c_seq)
def get_CDR_char(self,pn):
self.st.out(self.template_get_CDR_char, hfname=pn)
def get_CDR_octet(self,pn):
self.st.out(self.template_get_CDR_octet, hfname=pn)
def get_CDR_any(self,pn):
self.st.out(self.template_get_CDR_any, varname=pn)
def get_CDR_enum(self,pn,type):
#self.st.out(self.template_get_CDR_enum, hfname=pn)
sname = self.namespace(type.unalias(), "_")
self.st.out(self.template_get_CDR_enum_symbolic, valstringarray=sname,hfname=pn)
self.addvar(self.c_u_octet4)
def get_CDR_string(self,pn):
self.st.out(self.template_get_CDR_string, hfname=pn)
def get_CDR_wstring(self,pn):
self.st.out(self.template_get_CDR_wstring, varname=pn)
self.addvar(self.c_u_octet4)
self.addvar(self.c_seq)
def get_CDR_wchar(self,pn):
self.st.out(self.template_get_CDR_wchar, varname=pn)
self.addvar(self.c_s_octet1)
self.addvar(self.c_seq)
def get_CDR_TypeCode(self,pn):
self.st.out(self.template_get_CDR_TypeCode, varname=pn)
self.addvar(self.c_u_octet4)
def get_CDR_objref(self,type,pn):
self.st.out(self.template_get_CDR_object)
def get_CDR_sequence_len(self,pn):
self.st.out(self.template_get_CDR_sequence_length, seqname=pn)
def get_CDR_union(self,type,pn):
if self.DEBUG:
print "XXX Union type =" , type, " pn = ",pn
print "XXX Union type.decl()" , type.decl()
print "XXX Union Scoped Name" , type.scopedName()
# If I am a typedef union {..}; node then find the union node
if isinstance(type.decl(), idlast.Declarator):
ntype = type.decl().alias().aliasType().decl()
else:
ntype = type.decl() # I am a union node
if self.DEBUG:
print "XXX Union ntype =" , ntype
sname = self.namespace(ntype, "_")
self.st.out(self.template_union_start, name=sname )
# Output a call to the union helper function so I can handle recursive union also.
self.st.out(self.template_decode_union,name=sname)
self.st.out(self.template_union_end, name=sname )
#
# getCDR_hf()
#
# This takes a node, and tries to output the appropriate item for the
# hf array.
#
def getCDR_hf(self,type,desc,filter,hf_name="fred"):
pt = type.unalias().kind() # param CDR type
pn = hf_name # param name
if self.DEBUG:
print "XXX getCDR_hf: kind = " , pt
print "XXX getCDR_hf: name = " , pn
if pt == idltype.tk_ulong:
self.get_CDR_ulong_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_longlong:
self.get_CDR_longlong_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_ulonglong:
self.get_CDR_ulonglong_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_void:
pt = pt # no hf_ variables needed
elif pt == idltype.tk_short:
self.get_CDR_short_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_long:
self.get_CDR_long_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_ushort:
self.get_CDR_ushort_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_float:
self.get_CDR_float_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_double:
self.get_CDR_double_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_fixed:
pt = pt # no hf_ variables needed
elif pt == idltype.tk_boolean:
self.get_CDR_boolean_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_char:
self.get_CDR_char_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_octet:
self.get_CDR_octet_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_any:
pt = pt # no hf_ variables needed
elif pt == idltype.tk_string:
self.get_CDR_string_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_wstring:
self.get_CDR_wstring_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_wchar:
self.get_CDR_wchar_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_enum:
self.get_CDR_enum_hf(pn, type, desc, filter, self.dissname)
elif pt == idltype.tk_struct:
pt = pt # no hf_ variables needed (should be already contained in struct members)
elif pt == idltype.tk_TypeCode: # will I ever get here ?
self.get_CDR_TypeCode_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_sequence:
if type.unalias().seqType().kind() == idltype.tk_octet:
self.get_CDR_sequence_octet_hf(type, pn, desc, filter, self.dissname)
else:
self.get_CDR_sequence_hf(type, pn, desc, filter, self.dissname)
elif pt == idltype.tk_objref:
pt = pt # no object specific hf_ variables used, use generic ones from giop dissector
elif pt == idltype.tk_array:
pt = pt # Supported elsewhere
elif pt == idltype.tk_union:
pt = pt # no hf_ variables needed (should be already contained in union members)
elif pt == idltype.tk_alias:
if self.DEBUG:
print "XXXXX Alias type hf XXXXX " , type
self.get_CDR_alias_hf(type,pn)
else:
self.genWARNING("Unknown typecode = " + '%i ' % pt) # put comment in source code
#
# get_CDR_XXX_hf methods are here ..
#
#
def get_CDR_ulong_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_ulong_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_short_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_short_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_long_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_long_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_ushort_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_ushort_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_float_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_float_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_double_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_double_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_longlong_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_longlong_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_ulonglong_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_ulonglong_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_boolean_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_boolean_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_char_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_char_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_octet_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_octet_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_enum_hf(self,pn,type,desc,filter,diss):
sname = self.namespace(type.unalias(), "_")
self.st.out(self.template_get_CDR_enum_symbolic_hf, valstringarray=sname,hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_string_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_string_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_wstring_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_wstring_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
# self.addvar(self.c_u_octet4)
# self.addvar(self.c_seq)
def get_CDR_wchar_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_wchar_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
# self.addvar(self.c_s_octet1)
# self.addvar(self.c_seq)
def get_CDR_TypeCode_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_TypeCode_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_sequence_octet_hf(self,type,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_sequence_octet_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_sequence_hf(self,type,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_sequence_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_alias_hf(self,type,pn):
if self.DEBUG:
print "XXX get_CDR_alias_hf, type = " ,type , " pn = " , pn
print "XXX get_CDR_alias_hf, type.decl() = " ,type.decl()
print "XXX get_CDR_alias_hf, type.decl().alias() = " ,type.decl().alias()
decl = type.decl() # get declarator object
if (decl.sizes()): # a typedef array
#indices = self.get_indices_from_sizes(decl.sizes())
#string_indices = '%i ' % indices # convert int to string
#self.st.out(self.template_get_CDR_array_comment, aname=pn, asize=string_indices)
#self.st.out(self.template_get_CDR_array_start, aname=pn, aval=string_indices)
#self.addvar(self.c_i + pn + ";")
#self.st.inc_indent()
self.getCDR_hf(type.decl().alias().aliasType(), pn )
#self.st.dec_indent()
#self.st.out(self.template_get_CDR_array_end)
else: # a simple typdef
if self.DEBUG:
print "XXX get_CDR_alias_hf, type = " ,type , " pn = " , pn
print "XXX get_CDR_alias_hf, type.decl() = " ,type.decl()
self.getCDR_hf(type, decl.identifier() )
#
# Code to generate Union Helper functions
#
# in: un - a union node
#
#
def genUnionHelper(self,un):
if self.DEBUG:
print "XXX genUnionHelper called"
print "XXX Union type =" , un
print "XXX Union type.switchType()" , un.switchType()
print "XXX Union Scoped Name" , un.scopedName()
sname = self.namespace(un, "_")
self.curr_sname = sname # update current opnode/exnode/stnode/unnode scoped name
if not self.fn_hash_built:
self.fn_hash[sname] = [] # init empty list as val for this sname key
# but only if the fn_hash is not already built
self.st.out(self.template_union_helper_function_start, sname=sname, unname=un.repoId())
self.st.inc_indent()
if (len(self.fn_hash[sname]) > 0):
self.st.out(self.template_helper_function_vars_start)
self.dumpCvars(sname)
self.st.out(self.template_helper_function_vars_end )
st = un.switchType().unalias() # may be typedef switch type, so find real type
self.st.out(self.template_comment_union_code_start, uname=un.repoId() )
self.getCDR(st, sname + "_" + un.identifier());
# Depending on what kind of discriminant I come accross (enum,integer,char,
# short, boolean), make sure I cast the return value of the get_XXX accessor
# to an appropriate value. Omniidl idlast.CaseLabel.value() accessor will
# return an integer, or an Enumerator object that is then converted to its
# integer equivalent.
#
#
# NOTE - May be able to skip some of this stuff, but leave it in for now -- FS
#
if (st.kind() == idltype.tk_enum):
std = st.decl()
self.st.out(self.template_comment_union_code_discriminant, uname=std.repoId() )
self.st.out(self.template_union_code_save_discriminant_enum, discname=un.identifier() )
self.addvar(self.c_s_disc + un.identifier() + ";")
elif (st.kind() == idltype.tk_long):
self.st.out(self.template_union_code_save_discriminant_long, discname=un.identifier() )
self.addvar(self.c_s_disc + un.identifier() + ";")
elif (st.kind() == idltype.tk_ulong):
self.st.out(self.template_union_code_save_discriminant_ulong, discname=un.identifier() )
self.addvar(self.c_s_disc + un.identifier() + ";")
elif (st.kind() == idltype.tk_short):
self.st.out(self.template_union_code_save_discriminant_short, discname=un.identifier() )
self.addvar(self.c_s_disc + un.identifier() + ";")
elif (st.kind() == idltype.tk_ushort):
self.st.out(self.template_union_code_save_discriminant_ushort, discname=un.identifier() )
self.addvar(self.c_s_disc + un.identifier() + ";")
elif (st.kind() == idltype.tk_boolean):
self.st.out(self.template_union_code_save_discriminant_boolean, discname=un.identifier() )
self.addvar(self.c_s_disc + un.identifier() + ";")
elif (st.kind() == idltype.tk_char):
self.st.out(self.template_union_code_save_discriminant_char, discname=un.identifier() )
self.addvar(self.c_s_disc + un.identifier() + ";")
else:
print "XXX Unknown st.kind() = ", st.kind()
#
# Loop over all cases in this union
#
for uc in un.cases(): # for all UnionCase objects in this union
for cl in uc.labels(): # for all Caselabel objects in this UnionCase
# get integer value, even if discriminant is
# an Enumerator node
if isinstance(cl.value(),idlast.Enumerator):
if self.DEBUG:
print "XXX clv.identifier()", cl.value().identifier()
print "XXX clv.repoId()", cl.value().repoId()
print "XXX clv.scopedName()", cl.value().scopedName()
# find index of enumerator in enum declaration
# eg: RED is index 0 in enum Colors { RED, BLUE, GREEN }
clv = self.valFromEnum(std,cl.value())
else:
clv = cl.value()
#print "XXX clv = ",clv
#
# if char, dont convert to int, but put inside single quotes so that it is understood by C.
# eg: if (disc == 'b')..
#
# TODO : handle \xxx chars generically from a function or table lookup rather than
# a whole bunch of "if" statements. -- FS
if (st.kind() == idltype.tk_char):
if (clv == '\n'): # newline
string_clv = "'\\n'"
elif (clv == '\t'): # tab
string_clv = "'\\t'"
else:
string_clv = "'" + clv + "'"
else:
string_clv = '%i ' % clv
#
# If default case, then skp comparison with discriminator
#
if not cl.default():
self.st.out(self.template_comment_union_code_label_compare_start, discname=un.identifier(),labelval=string_clv )
self.st.inc_indent()
else:
self.st.out(self.template_comment_union_code_label_default_start )
self.getCDR(uc.caseType(),sname + "_" + uc.declarator().identifier())
if not cl.default():
self.st.dec_indent()
self.st.out(self.template_comment_union_code_label_compare_end )
else:
self.st.out(self.template_comment_union_code_label_default_end )
self.st.dec_indent()
self.st.out(self.template_union_helper_function_end)
#
# Currently, get_CDR_alias is geared to finding typdef
#
def get_CDR_alias(self,type,pn):
if self.DEBUG:
print "XXX get_CDR_alias, type = " ,type , " pn = " , pn
print "XXX get_CDR_alias, type.decl() = " ,type.decl()
print "XXX get_CDR_alias, type.decl().alias() = " ,type.decl().alias()
decl = type.decl() # get declarator object
if (decl.sizes()): # a typedef array
indices = self.get_indices_from_sizes(decl.sizes())
string_indices = '%i ' % indices # convert int to string
self.st.out(self.template_get_CDR_array_comment, aname=pn, asize=string_indices)
self.st.out(self.template_get_CDR_array_start, aname=pn, aval=string_indices)
self.addvar(self.c_i + pn + ";")
self.st.inc_indent()
self.getCDR(type.decl().alias().aliasType(), pn )
self.st.dec_indent()
self.st.out(self.template_get_CDR_array_end)
else: # a simple typdef
if self.DEBUG:
print "XXX get_CDR_alias, type = " ,type , " pn = " , pn
print "XXX get_CDR_alias, type.decl() = " ,type.decl()
self.getCDR(type, pn )
#
# Handle structs, including recursive
#
def get_CDR_struct(self,type,pn):
# If I am a typedef struct {..}; node then find the struct node
if isinstance(type.decl(), idlast.Declarator):
ntype = type.decl().alias().aliasType().decl()
else:
ntype = type.decl() # I am a struct node
sname = self.namespace(ntype, "_")
self.st.out(self.template_structure_start, name=sname )
# Output a call to the struct helper function so I can handle recursive structs also.
self.st.out(self.template_decode_struct,name=sname)
self.st.out(self.template_structure_end, name=sname )
#
# genStructhelper()
#
# Generate private helper functions to decode a struct
#
# in: stnode ( a struct node)
#
def genStructHelper(self,st):
if self.DEBUG:
print "XXX genStructHelper"
sname = self.namespace(st, "_")
self.curr_sname = sname # update current opnode/exnode/stnode scoped name
if not self.fn_hash_built:
self.fn_hash[sname] = [] # init empty list as val for this sname key
# but only if the fn_hash is not already built
self.st.out(self.template_struct_helper_function_start, sname=sname, stname=st.repoId())
self.st.inc_indent()
if (len(self.fn_hash[sname]) > 0):
self.st.out(self.template_helper_function_vars_start)
self.dumpCvars(sname)
self.st.out(self.template_helper_function_vars_end )
for m in st.members():
for decl in m.declarators():
if decl.sizes(): # an array
indices = self.get_indices_from_sizes(decl.sizes())
string_indices = '%i ' % indices # convert int to string
self.st.out(self.template_get_CDR_array_comment, aname=decl.identifier(), asize=string_indices)
self.st.out(self.template_get_CDR_array_start, aname=decl.identifier(), aval=string_indices)
self.addvar(self.c_i + decl.identifier() + ";")
self.st.inc_indent()
self.getCDR(m.memberType(), sname + "_" + decl.identifier() )
self.st.dec_indent()
self.st.out(self.template_get_CDR_array_end)
else:
self.getCDR(m.memberType(), sname + "_" + decl.identifier() )
self.st.dec_indent()
self.st.out(self.template_struct_helper_function_end)
#
# Generate code to access a sequence of a type
#
def get_CDR_sequence(self,type, pn):
self.st.out(self.template_get_CDR_sequence_length, seqname=pn )
self.st.out(self.template_get_CDR_sequence_loop_start, seqname=pn )
self.addvar(self.c_i_lim + pn + ";" )
self.addvar(self.c_i + pn + ";")
self.st.inc_indent()
self.getCDR(type.unalias().seqType(), pn ) # and start all over with the type
self.st.dec_indent()
self.st.out(self.template_get_CDR_sequence_loop_end)
#
# Generate code to access a sequence of octet
#
def get_CDR_sequence_octet(self,type, pn):
self.st.out(self.template_get_CDR_sequence_length, seqname=pn)
self.st.out(self.template_get_CDR_sequence_octet, seqname=pn)
self.addvar(self.c_i_lim + pn + ";")
self.addvar("gchar * binary_seq_" + pn + ";")
self.addvar("gchar * text_seq_" + pn + ";")
#
# namespace()
#
# in - op node
#
# out - scoped operation name, using sep character instead of "::"
#
# eg: Penguin::Echo::echoWString => Penguin_Echo_echoWString if sep = "_"
#
#
def namespace(self,node,sep):
sname = string.replace(idlutil.ccolonName(node.scopedName()), '::', sep)
#print "XXX namespace: sname = " + sname
return sname
#
# generate code for plugin initialisation
#
def gen_plugin_register(self):
self.st.out(self.template_plugin_register, description=self.description, protocol_name=self.protoname, dissector_name=self.dissname)
#
# generate register_giop_user_module code, and register only
# unique interfaces that contain operations. Also output
# a heuristic register in case we want to use that.
#
# TODO - make this a command line option
#
# -e explicit
# -h heuristic
#
def gen_proto_reg_handoff(self, oplist):
self.st.out(self.template_proto_reg_handoff_start, dissector_name=self.dissname)
self.st.inc_indent()
for iname in self.get_intlist(oplist):
self.st.out(self.template_proto_reg_handoff_body, dissector_name=self.dissname, protocol_name=self.protoname, interface=iname )
self.st.out(self.template_proto_reg_handoff_heuristic, dissector_name=self.dissname, protocol_name=self.protoname)
self.st.dec_indent()
self.st.out(self.template_proto_reg_handoff_end)
#
# generate hf_ array element for operation, attribute, enums, struct and union lists
#
def genOp_hf(self,op):
sname = self.namespace(op, "_")
opname = sname[string.find(sname, "_")+1:]
opname = opname[:string.find(opname, "_")]
rt = op.returnType()
if (rt.kind() != idltype.tk_void):
if (rt.kind() == idltype.tk_alias): # a typdef return val possibly ?
self.getCDR_hf(rt, rt.name(),\
opname + "." + op.identifier() + ".return", sname + "_return")
else:
self.getCDR_hf(rt, "Return value",\
opname + "." + op.identifier() + ".return", sname + "_return")
for p in op.parameters():
self.getCDR_hf(p.paramType(), p.identifier(),\
opname + "." + op.identifier() + "." + p.identifier(), sname + "_" + p.identifier())
def genAt_hf(self,at):
for decl in at.declarators():
sname = self.namespace(decl, "_")
atname = sname[string.find(sname, "_")+1:]
atname = atname[:string.find(atname, "_")]
self.getCDR_hf(at.attrType(), decl.identifier(),\
atname + "." + decl.identifier() + ".get", "get" + "_" + sname + "_" + decl.identifier())
if not at.readonly():
self.getCDR_hf(at.attrType(), decl.identifier(),\
atname + "." + decl.identifier() + ".set", "set" + "_" + sname + "_" + decl.identifier())
def genSt_hf(self,st):
sname = self.namespace(st, "_")
stname = sname[string.find(sname, "_")+1:]
stname = stname[:string.find(stname, "_")]
for m in st.members():
for decl in m.declarators():
self.getCDR_hf(m.memberType(), st.identifier() + "_" + decl.identifier(),\
st.identifier() + "." + decl.identifier(), sname + "_" + decl.identifier())
def genEx_hf(self,ex):
sname = self.namespace(ex, "_")
exname = sname[string.find(sname, "_")+1:]
exname = exname[:string.find(exname, "_")]
for m in ex.members():
for decl in m.declarators():
self.getCDR_hf(m.memberType(), ex.identifier() + "_" + decl.identifier(),\
exname + "." + ex.identifier() + "_" + decl.identifier(), sname + "_" + decl.identifier())
def genUnion_hf(self,un):
sname = self.namespace(un, "_")
unname = sname[:string.rfind(sname, "_")]
unname = string.replace(unname, "_", ".")
self.getCDR_hf(un.switchType().unalias(), un.identifier(),\
unname + "." + un.identifier(), sname + "_" + un.identifier())
for uc in un.cases(): # for all UnionCase objects in this union
for cl in uc.labels(): # for all Caselabel objects in this UnionCase
self.getCDR_hf(uc.caseType(), un.identifier() + "_" + uc.declarator().identifier(),\
unname + "." + un.identifier() + "." + uc.declarator().identifier(),\
sname + "_" + uc.declarator().identifier())
#
# generate proto_register_<protoname> code,
#
# in - oplist[], atlist[], stline[], unlist[]
#
def gen_proto_register(self, oplist, atlist, stlist, unlist):
self.st.out(self.template_proto_register_start, dissector_name=self.dissname)
#operation specific filters
self.st.out(self.template_proto_register_op_filter_comment)
for op in oplist:
self.genOp_hf(op)
#attribute filters
self.st.out(self.template_proto_register_at_filter_comment)
for at in atlist:
self.genAt_hf(at)
#struct filters
self.st.out(self.template_proto_register_st_filter_comment)
for st in stlist:
if (st.members()): # only if has members
self.genSt_hf(st)
# exception List filters
exlist = self.get_exceptionList(oplist) # grab list of exception nodes
self.st.out(self.template_proto_register_ex_filter_comment)
for ex in exlist:
if (ex.members()): # only if has members
self.genEx_hf(ex)
# Union filters
self.st.out(self.template_proto_register_un_filter_comment)
for un in unlist:
self.genUnion_hf(un)
self.st.out(self.template_proto_register_end, description=self.description, protocol_name=self.protoname, dissector_name=self.dissname)
#
# in - oplist[]
#
# out - a list of unique interface names. This will be used in
# register_giop_user_module(dissect_giop_auto, "TEST IDL", "Penguin/Echo" ); so the operation
# name must be removed from the scope. And we also only want unique interfaces.
#
def get_intlist(self,oplist):
int_hash = {} # holds a hash of unique interfaces
for op in oplist:
sc = op.scopedName() # eg: penguin,tux,bite
sc1 = sc[:-1] # drop last entry
sn = idlutil.slashName(sc1) # penguin/tux
if not int_hash.has_key(sn):
int_hash[sn] = 0; # dummy val, but at least key is unique
ret = int_hash.keys()
ret.sort()
return ret
#
# in - oplist[]
#
# out - a list of exception nodes (unique). This will be used in
# to generate dissect_exception_XXX functions.
#
def get_exceptionList(self,oplist):
ex_hash = {} # holds a hash of unique exceptions.
for op in oplist:
for ex in op.raises():
if not ex_hash.has_key(ex):
ex_hash[ex] = 0; # dummy val, but at least key is unique
if self.DEBUG:
print "XXX Exception = " + ex.identifier()
ret = ex_hash.keys()
ret.sort()
return ret
#
# Simple function to take a list of array sizes and find the
# total number of elements
#
#
# eg: temp[4][3] = 12 elements
#
def get_indices_from_sizes(self,sizelist):
val = 1;
for i in sizelist:
val = val * i
return val
#
# Determine how many octets contain requested number
# of digits for an "fixed" IDL type "on the wire"
#
def dig_to_len(self,dignum):
return (dignum/2) + 1
#
# Output some TODO comment
#
def genTODO(self,message):
self.st.out(self.template_debug_TODO, message=message)
#
# Output some WARNING comment
#
def genWARNING(self,message):
self.st.out(self.template_debug_WARNING, message=message)
#
# Templates for C code
#
template_helper_function_comment = """\
/*
* @repoid@
*/"""
template_helper_function_vars_start = """\
/* Operation specific Variable declarations Begin */"""
template_helper_function_vars_end = """\
/* Operation specific Variable declarations End */
(void)item; /* Avoid coverity param_set_but_unused parse warning */
"""
template_helper_function_start = """\
static void
decode_@sname@(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, proto_item *item _U_, int *offset _U_, MessageHeader *header, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{"""
template_helper_function_end = """\
}
"""
#
# proto_reg_handoff() templates
#
template_proto_reg_handoff_start = """\
/* register me as handler for these interfaces */
void proto_reg_handoff_giop_@dissector_name@(void)
{"""
template_proto_reg_handoff_body = """\
/* Register for Explicit Dissection */
register_giop_user_module(dissect_@dissector_name@, \"@protocol_name@\", \"@interface@\", proto_@dissector_name@ ); /* explicit dissector */
"""
template_proto_reg_handoff_heuristic = """\
/* Register for Heuristic Dissection */
register_giop_user(dissect_@dissector_name@, \"@protocol_name@\" ,proto_@dissector_name@); /* heuristic dissector */
"""
template_proto_reg_handoff_end = """\
}
"""
#
# Prototype
#
template_prototype = """
void proto_register_giop_@dissector_name@(void);
void proto_reg_handoff_giop_@dissector_name@(void);"""
#
# Initialize the protocol
#
template_protocol = """
/* Initialise the protocol and subtree pointers */
static int proto_@dissector_name@ = -1;
static gint ett_@dissector_name@ = -1;
"""
#
# Initialize the boundary Alignment
#
template_init_boundary = """
/* Initialise the initial Alignment */
static guint32 boundary = GIOP_HEADER_SIZE; /* initial value */"""
#
# plugin_register and plugin_reg_handoff templates
#
template_plugin_register = """
#if 0
WS_DLL_PUBLIC_DEF void
plugin_register(void)
{
if (proto_@dissector_name@ == -1) {
proto_register_giop_@dissector_name@();
}
}
WS_DLL_PUBLIC_DEF void
plugin_reg_handoff(void){
proto_register_handoff_giop_@dissector_name@();
}
#endif
"""
#
# proto_register_<dissector name>(void) templates
#
template_proto_register_start = """
/* Register the protocol with Wireshark */
void proto_register_giop_@dissector_name@(void)
{
/* setup list of header fields */
static hf_register_info hf[] = {
/* field that indicates the currently ongoing request/reply exchange */
{&hf_operationrequest, {"Request_Operation","giop-@[email protected]_Operation",FT_STRING,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
template_proto_register_end = """
};
static ei_register_info ei[] = {
{ &ei_@dissector_name@_unknown_giop_msg, { "giop-@[email protected]_giop_msg", PI_PROTOCOL, PI_WARN, "Unknown GIOP message", EXPFILL }},
{ &ei_@dissector_name@_unknown_exception, { "giop-@[email protected]_exception", PI_PROTOCOL, PI_WARN, "Unknown exception", EXPFILL }},
{ &ei_@dissector_name@_unknown_reply_status, { "giop-@[email protected]_reply_status", PI_PROTOCOL, PI_WARN, "Unknown reply status", EXPFILL }},
};
/* setup protocol subtree array */
static gint *ett[] = {
&ett_@dissector_name@,
};
expert_module_t* expert_@dissector_name@;
/* Register the protocol name and description */
proto_@dissector_name@ = proto_register_protocol(\"@description@\" , \"@protocol_name@\", \"giop-@dissector_name@\" );
proto_register_field_array(proto_@dissector_name@, hf, array_length(hf));
proto_register_subtree_array(ett, array_length(ett));
expert_@dissector_name@ = expert_register_protocol(proto_@dissector_name@);
expert_register_field_array(expert_@dissector_name@, ei, array_length(ei));
}
"""
template_proto_register_op_filter_comment = """\
/* Operation filters */"""
template_proto_register_at_filter_comment = """\
/* Attribute filters */"""
template_proto_register_st_filter_comment = """\
/* Struct filters */"""
template_proto_register_ex_filter_comment = """\
/* User exception filters */"""
template_proto_register_un_filter_comment = """\
/* Union filters */"""
template_proto_register_ei_filters = """\
/* Expert info filters */
static expert_field ei_@dissector_name@_unknown_giop_msg = EI_INIT;
static expert_field ei_@dissector_name@_unknown_exception = EI_INIT;
static expert_field ei_@dissector_name@_unknown_reply_status = EI_INIT;
"""
#
# template for delegation code
#
template_op_delegate_code = """\
if (strcmp(operation, "@opname@") == 0
&& (!idlname || strcmp(idlname, \"@interface@\") == 0)) {
item = process_RequestOperation(tvb, pinfo, ptree, header, operation); /* fill-up Request_Operation field & info column */
tree = start_dissecting(tvb, pinfo, ptree, offset);
decode_@sname@(tvb, pinfo, tree, item, offset, header, operation, stream_is_big_endian);
return TRUE;
}
"""
#
# Templates for the helper functions
#
#
#
template_helper_switch_msgtype_start = """\
switch(header->message_type) {"""
template_helper_switch_msgtype_default_start = """\
default:
/* Unknown GIOP Message */
expert_add_info_format(pinfo, item, &ei_@dissector_name@_unknown_giop_msg, "Unknown GIOP message %d", header->message_type);"""
template_helper_switch_msgtype_default_end = """\
break;"""
template_helper_switch_msgtype_end = """\
} /* switch(header->message_type) */"""
template_helper_switch_msgtype_request_start = """\
case Request:"""
template_helper_switch_msgtype_request_end = """\
break;"""
template_helper_switch_msgtype_reply_start = """\
case Reply:"""
template_helper_switch_msgtype_reply_no_exception_start = """\
case NO_EXCEPTION:"""
template_helper_switch_msgtype_reply_no_exception_end = """\
break;"""
template_helper_switch_msgtype_reply_user_exception_start = """\
case USER_EXCEPTION:"""
template_helper_switch_msgtype_reply_user_exception_end = """\
break;"""
template_helper_switch_msgtype_reply_default_start = """\
default:
/* Unknown Exception */
expert_add_info_format(pinfo, item, &ei_@dissector_name@_unknown_exception, "Unknown exception %d", header->rep_status);"""
template_helper_switch_msgtype_reply_default_end = """\
break;"""
template_helper_switch_msgtype_reply_end = """\
break;"""
template_helper_switch_msgtype_default_start = """\
default:
/* Unknown GIOP Message */
expert_add_info_format(pinfo, item, &ei_@dissector_name@_unknown_giop_msg, "Unknown GIOP message %d", header->message_type);"""
template_helper_switch_msgtype_default_end = """\
break;"""
template_helper_switch_rep_status_start = """\
switch(header->rep_status) {"""
template_helper_switch_rep_status_default_start = """\
default:
/* Unknown Reply Status */
expert_add_info_format(pinfo, item, &ei_@dissector_name@_unknown_reply_status, "Unknown reply status %d", header->rep_status);"""
template_helper_switch_rep_status_default_end = """\
break;"""
template_helper_switch_rep_status_end = """\
} /* switch(header->rep_status) */
break;"""
#
# Templates for get_CDR_xxx accessors
#
template_get_CDR_ulong = """\
proto_tree_add_uint(tree, hf_@hfname@, tvb, *offset-4, 4, get_CDR_ulong(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_short = """\
proto_tree_add_int(tree, hf_@hfname@, tvb, *offset-2, 2, get_CDR_short(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_void = """\
/* Function returns void */
"""
template_get_CDR_long = """\
proto_tree_add_int(tree, hf_@hfname@, tvb, *offset-4, 4, get_CDR_long(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_ushort = """\
proto_tree_add_uint(tree, hf_@hfname@, tvb, *offset-2, 2, get_CDR_ushort(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_float = """\
proto_tree_add_float(tree, hf_@hfname@, tvb, *offset-4, 4, get_CDR_float(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_double = """\
proto_tree_add_double(tree, hf_@hfname@, tvb, *offset-8, 8, get_CDR_double(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_longlong = """\
proto_tree_add_int64(tree, hf_@hfname@, tvb, *offset-8, 8, get_CDR_long_long(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_ulonglong = """\
proto_tree_add_uint64(tree, hf_@hfname@, tvb, *offset-8, 8, get_CDR_ulong_long(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_boolean = """\
proto_tree_add_boolean(tree, hf_@hfname@, tvb, *offset-1, 1, get_CDR_boolean(tvb,offset));
"""
template_get_CDR_char = """\
proto_tree_add_uint(tree, hf_@hfname@, tvb, *offset-1, 1, get_CDR_char(tvb,offset));
"""
template_get_CDR_octet = """\
proto_tree_add_uint(tree, hf_@hfname@, tvb, *offset-1, 1, get_CDR_octet(tvb,offset));
"""
template_get_CDR_any = """\
get_CDR_any(tvb, pinfo, tree, item, offset, stream_is_big_endian, boundary, header);
"""
template_get_CDR_fixed = """\
get_CDR_fixed(tvb, pinfo, item, &seq, offset, @digits@, @scale@);
proto_tree_add_text(tree,tvb,*offset-@length@, @length@, "@varname@ < @digits@, @scale@> = %s",seq);
"""
template_get_CDR_enum_symbolic = """\
u_octet4 = get_CDR_enum(tvb,offset,stream_is_big_endian, boundary);
/* coverity[returned_pointer] */
item = proto_tree_add_uint(tree, hf_@hfname@, tvb, *offset-4, 4, u_octet4);
"""
template_get_CDR_string = """\
giop_add_CDR_string(tree, tvb, offset, stream_is_big_endian, boundary, hf_@hfname@);
"""
template_get_CDR_wstring = """\
u_octet4 = get_CDR_wstring(tvb, &seq, offset, stream_is_big_endian, boundary, header);
proto_tree_add_text(tree,tvb,*offset-u_octet4,u_octet4,"@varname@ (%u) = %s",
u_octet4, (u_octet4 > 0) ? seq : \"\");
"""
template_get_CDR_wchar = """\
s_octet1 = get_CDR_wchar(tvb, &seq, offset, header);
if (tree) {
if (s_octet1 > 0)
proto_tree_add_text(tree,tvb,*offset-1-s_octet1,1,"length = %u",s_octet1);
if (s_octet1 < 0)
s_octet1 = -s_octet1;
if (s_octet1 > 0)
proto_tree_add_text(tree,tvb,*offset-s_octet1,s_octet1,"@varname@ = %s",seq);
}
"""
template_get_CDR_TypeCode = """\
u_octet4 = get_CDR_typeCode(tvb, pinfo, tree, offset, stream_is_big_endian, boundary, header);
"""
template_get_CDR_object = """\
get_CDR_object(tvb, pinfo, tree, offset, stream_is_big_endian, boundary);
"""
template_get_CDR_sequence_length = """\
u_octet4_loop_@seqname@ = get_CDR_ulong(tvb, offset, stream_is_big_endian, boundary);
/* coverity[returned_pointer] */
item = proto_tree_add_uint(tree, hf_@seqname@, tvb,*offset-4, 4, u_octet4_loop_@seqname@);
"""
template_get_CDR_sequence_loop_start = """\
for (i_@seqname@=0; i_@seqname@ < u_octet4_loop_@seqname@; i_@seqname@++) {
"""
template_get_CDR_sequence_loop_end = """\
}
"""
template_get_CDR_sequence_octet = """\
if (u_octet4_loop_@seqname@ > 0 && tree) {
get_CDR_octet_seq(tvb, &binary_seq_@seqname@, offset,
u_octet4_loop_@seqname@);
text_seq_@seqname@ = make_printable_string(binary_seq_@seqname@,
u_octet4_loop_@seqname@);
proto_tree_add_text(tree, tvb, *offset - u_octet4_loop_@seqname@,
u_octet4_loop_@seqname@, \"@seqname@: %s\", text_seq_@seqname@);
}
"""
template_get_CDR_array_start = """\
for (i_@aname@=0; i_@aname@ < @aval@; i_@aname@++) {
"""
template_get_CDR_array_end = """\
}
"""
template_get_CDR_array_comment = """\
/* Array: @aname@[ @asize@] */
"""
template_structure_start = """\
/* Begin struct \"@name@\" */"""
template_structure_end = """\
/* End struct \"@name@\" */"""
template_union_start = """\
/* Begin union \"@name@\" */"""
template_union_end = """\
/* End union \"@name@\" */"""
#
# Templates for get_CDR_xxx_hf accessors
#
template_get_CDR_ulong_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT32,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_short_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_INT16,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_long_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_INT32,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_ushort_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT16,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_float_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_FLOAT,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_double_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_DOUBLE,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_longlong_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_INT64,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_ulonglong_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT64,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_boolean_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_BOOLEAN,8,NULL,0x01,NULL,HFILL}},"""
template_get_CDR_char_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT8,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_octet_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT8,BASE_HEX,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_enum_symbolic_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT32,BASE_DEC,VALS(@valstringarray@),0x0,NULL,HFILL}},"""
template_get_CDR_string_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_STRING,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_wstring_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_STRING,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_wchar_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT16,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_TypeCode_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT32,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_sequence_hf = """\
{&hf_@hfname@, {"Seq length of @descname@","giop-@dissector_name@.@filtername@",FT_UINT32,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_sequence_octet_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT8,BASE_HEX,NULL,0x0,NULL,HFILL}},"""
#
# Program Header Template
#
template_Header = """\
/* packet-@[email protected]
*
* Routines for IDL dissection
*
* Autogenerated from idl2wrs
* Copyright 2001 Frank Singleton <frank.singleton@@ericsson.com>
*/
"""
template_wireshark_copyright = """\
/*
* Wireshark - Network traffic analyzer
* By Gerald Combs
* Copyright 1999 - 2012 Gerald Combs
*/
"""
#
# GPL Template
#
template_GPL = """\
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
"""
#
# Modelines Template
#
template_Modelines = """\
/*
* Editor modelines
*
* Local Variables:
* c-basic-offset: 4
* tab-width: 8
* indent-tabs-mode: nil
* End:
*
* ex: set shiftwidth=4 tabstop=8 expandtab:
* :indentSize=4:tabSize=8:noTabs=true:
*/"""
#
# Includes template
#
template_Includes = """\
#include "config.h"
#include <gmodule.h>
#include <string.h>
#include <glib.h>
#include <epan/packet.h>
#include <epan/proto.h>
#include <epan/dissectors/packet-giop.h>
#include <epan/expert.h>
#ifdef _MSC_VER
/* disable warning: "unreference local variable" */
#pragma warning(disable:4101)
#endif
#if defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wunused-function"
#pragma GCC diagnostic ignored "-Wunused-variable"
#endif"""
#
# Main dissector entry templates
#
template_main_dissector_start = """\
/*
* Called once we accept the packet as being for us; it sets the
* Protocol and Info columns and creates the top-level protocol
* tree item.
*/
static proto_tree *
start_dissecting(tvbuff_t *tvb, packet_info *pinfo, proto_tree *ptree, int *offset)
{
proto_item *ti = NULL;
proto_tree *tree = NULL; /* init later, inside if(tree) */
col_set_str(pinfo->cinfo, COL_PROTOCOL, \"@disprot@\");
/*
* Do not clear COL_INFO, as nothing is being written there by
* this dissector yet. So leave it as is from the GIOP dissector.
* TODO: add something useful to COL_INFO
* col_clear(pinfo->cinfo, COL_INFO);
*/
if (ptree) {
ti = proto_tree_add_item(ptree, proto_@dissname@, tvb, *offset, -1, ENC_NA);
tree = proto_item_add_subtree(ti, ett_@dissname@);
}
return tree;
}
static proto_item*
process_RequestOperation(tvbuff_t *tvb, packet_info *pinfo, proto_tree *ptree, MessageHeader *header, const gchar *operation)
{
proto_item *pi;
if(header->message_type == Reply) {
/* fill-up info column */
col_append_fstr(pinfo->cinfo, COL_INFO, " op = %s",operation);
}
/* fill-up the field */
pi=proto_tree_add_string(ptree, hf_operationrequest, tvb, 0, 0, operation);
PROTO_ITEM_SET_GENERATED(pi);
return pi;
}
static gboolean
dissect_@dissname@(tvbuff_t *tvb, packet_info *pinfo, proto_tree *ptree, int *offset, MessageHeader *header, const gchar *operation, gchar *idlname)
{
proto_item *item _U_;
proto_tree *tree _U_;
gboolean stream_is_big_endian = is_big_endian(header); /* get endianess */
/* If we have a USER Exception, then decode it and return */
if ((header->message_type == Reply) && (header->rep_status == USER_EXCEPTION)) {
return decode_user_exception(tvb, pinfo, ptree, offset, header, operation, stream_is_big_endian);
}
"""
template_main_dissector_switch_msgtype_start = """\
switch(header->message_type) {
"""
template_main_dissector_switch_msgtype_start_request_reply = """\
case Request:
case Reply:
"""
template_main_dissector_switch_msgtype_end_request_reply = """\
break;
"""
template_main_dissector_switch_msgtype_all_other_msgtype = """\
case CancelRequest:
case LocateRequest:
case LocateReply:
case CloseConnection:
case MessageError:
case Fragment:
return FALSE; /* not handled yet */
default:
return FALSE; /* not handled yet */
} /* switch */
"""
template_main_dissector_end = """\
return FALSE;
} /* End of main dissector */
"""
#-------------------------------------------------------------#
# Exception handling templates #
#-------------------------------------------------------------#
template_exception_helpers_start = """\
/* Begin Exception Helper Functions */
"""
template_exception_helpers_end = """\
/* End Exception Helper Functions */
"""
#
# template for Main delegator for exception handling
#
template_main_exception_delegator_start = """\
/*
* Main delegator for exception handling
*
*/
static gboolean
decode_user_exception(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *ptree _U_, int *offset _U_, MessageHeader *header, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
proto_tree *tree _U_;
if (!header->exception_id)
return FALSE;
"""
#
# template for exception delegation code body
#
template_ex_delegate_code = """\
if (strcmp(header->exception_id, "@exname@") == 0) {
tree = start_dissecting(tvb, pinfo, ptree, offset);
decode_ex_@sname@(tvb, pinfo, tree, offset, header, operation, stream_is_big_endian); /* @exname@ */
return TRUE;
}
"""
#
# End of Main delegator for exception handling
#
template_main_exception_delegator_end = """
return FALSE; /* user exception not found */
}
"""
#
# template for exception helper code
#
template_exception_helper_function_start = """\
/* Exception = @exname@ */
static void
decode_ex_@sname@(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
proto_item *item _U_;
"""
template_exception_helper_function_end = """\
}
"""
#
# template for struct helper code
#
template_struct_helper_function_start = """\
/* Struct = @stname@ */
static void
decode_@sname@_st(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, proto_item *item _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
"""
template_struct_helper_function_end = """\
}
"""
#
# template for union helper code
#
template_union_helper_function_start = """\
/* Union = @unname@ */
static void
decode_@sname@_un(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
proto_item* item _U_;
"""
template_union_helper_function_end = """\
}
"""
#-------------------------------------------------------------#
# Value string templates #
#-------------------------------------------------------------#
template_value_string_start = """\
static const value_string @valstringname@[] = {
"""
template_value_string_entry = """\
{ @intval@, \"@description@\" },"""
template_value_string_end = """\
{ 0, NULL },
};
"""
#-------------------------------------------------------------#
# Enum handling templates #
#-------------------------------------------------------------#
template_comment_enums_start = """\
/*
* IDL Enums Start
*/
"""
template_comment_enums_end = """\
/*
* IDL Enums End
*/
"""
template_comment_enum_comment = """\
/*
* Enum = @ename@
*/"""
#-------------------------------------------------------------#
# Attribute handling templates #
#-------------------------------------------------------------#
template_comment_attributes_start = """\
/*
* IDL Attributes Start
*/
"""
#
# get/set accessor method names are language mapping dependant.
#
template_attributes_declare_Java_get = """static const char get_@sname@_at[] = \"_get_@atname@\" ;"""
template_attributes_declare_Java_set = """static const char set_@sname@_at[] = \"_set_@atname@\" ;"""
template_comment_attributes_end = """
/*
* IDL Attributes End
*/
"""
#
# template for Attribute delegation code
#
# Note: _get_xxx() should only be called for Reply with NO_EXCEPTION
# Note: _set_xxx() should only be called for Request
#
#
template_at_delegate_code_get = """\
if (strcmp(operation, get_@sname@_at) == 0 && (header->message_type == Reply) && (header->rep_status == NO_EXCEPTION) ) {
tree = start_dissecting(tvb, pinfo, ptree, offset);
decode_get_@sname@_at(tvb, pinfo, tree, offset, header, operation, stream_is_big_endian);
return TRUE;
}
"""
template_at_delegate_code_set = """\
if (strcmp(operation, set_@sname@_at) == 0 && (header->message_type == Request) ) {
tree = start_dissecting(tvb, pinfo, ptree, offset);
decode_set_@sname@_at(tvb, pinfo, tree, offset, header, operation, stream_is_big_endian);
return TRUE;
}
"""
template_attribute_helpers_start = """\
/* Begin Attribute Helper Functions */
"""
template_attribute_helpers_end = """\
/* End Attribute Helper Functions */
"""
#
# template for attribute helper code
#
template_attribute_helper_function_start = """\
/* Attribute = @atname@ */
static void
decode_@sname@_at(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
proto_item* item _U_;
"""
template_attribute_helper_function_end = """\
}
"""
#-------------------------------------------------------------#
# Debugging templates #
#-------------------------------------------------------------#
#
# Template for outputting TODO "C" comments
# so user know I need ti improve something.
#
template_debug_TODO = """\
/* TODO - @message@ */
"""
#
# Template for outputting WARNING "C" comments
# so user know if I have found a problem.
#
template_debug_WARNING = """\
/* WARNING - @message@ */
"""
#-------------------------------------------------------------#
# IDL Union templates #
#-------------------------------------------------------------#
template_comment_union_code_start = """\
/*
* IDL Union Start - @uname@
*/
"""
template_comment_union_code_end = """
/*
* IDL union End - @uname@
*/
"""
template_comment_union_code_discriminant = """\
/*
* IDL Union - Discriminant - @uname@
*/
"""
#
# Cast Unions types to something appropriate
# Enum value cast to guint32, all others cast to gint32
# as omniidl accessor returns integer or Enum.
#
template_union_code_save_discriminant_enum = """\
disc_s_@discname@ = (gint32) u_octet4; /* save Enum Value discriminant and cast to gint32 */
"""
template_union_code_save_discriminant_long = """\
disc_s_@discname@ = (gint32) s_octet4; /* save gint32 discriminant and cast to gint32 */
"""
template_union_code_save_discriminant_ulong = """\
disc_s_@discname@ = (gint32) u_octet4; /* save guint32 discriminant and cast to gint32 */
"""
template_union_code_save_discriminant_short = """\
disc_s_@discname@ = (gint32) s_octet2; /* save gint16 discriminant and cast to gint32 */
"""
template_union_code_save_discriminant_ushort = """\
disc_s_@discname@ = (gint32) u_octet2; /* save guint16 discriminant and cast to gint32 */
"""
template_union_code_save_discriminant_char = """\
disc_s_@discname@ = (gint32) u_octet1; /* save guint1 discriminant and cast to gint32 */
"""
template_union_code_save_discriminant_boolean = """\
disc_s_@discname@ = (gint32) u_octet1; /* save guint1 discriminant and cast to gint32 */
"""
template_comment_union_code_label_compare_start = """\
if (disc_s_@discname@ == @labelval@) {
"""
template_comment_union_code_label_compare_end = """\
return; /* End Compare for this discriminant type */
}
"""
template_comment_union_code_label_default_start = """
/* Default Union Case Start */
"""
template_comment_union_code_label_default_end = """\
/* Default Union Case End */
"""
#
# Templates for function prototypes.
# This is used in genDeclares() for declaring function prototypes
# for structs and union helper functions.
#
template_hf_operations = """
static int hf_operationrequest = -1;/* Request_Operation field */
"""
template_hf = """\
static int hf_@name@ = -1;"""
template_prototype_start_dissecting = """
static proto_tree *start_dissecting(tvbuff_t *tvb, packet_info *pinfo, proto_tree *ptree, int *offset);
"""
template_prototype_struct_start = """\
/* Struct prototype declaration Start */
"""
template_prototype_struct_end = """\
/* Struct prototype declaration End */
"""
template_prototype_struct_body = """\
/* Struct = @stname@ */
static void decode_@name@_st(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, proto_item *item _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_);
"""
template_decode_struct = """\
decode_@name@_st(tvb, pinfo, tree, item, offset, header, operation, stream_is_big_endian);"""
template_prototype_union_start = """\
/* Union prototype declaration Start */"""
template_prototype_union_end = """\
/* Union prototype declaration End */"""
template_prototype_union_body = """
/* Union = @unname@ */
static void decode_@name@_un(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_);
"""
template_decode_union = """
decode_@name@_un(tvb, pinfo, tree, offset, header, operation, stream_is_big_endian);
"""
#
# Editor modelines - http://www.wireshark.org/tools/modelines.html
#
# Local variables:
# c-basic-offset: 4
# indent-tabs-mode: nil
# End:
#
# vi: set shiftwidth=4 expandtab:
# :indentSize=4:noTabs=true:
#
| gpl-2.0 | 8,077,415,331,128,886,000 | 31.867763 | 223 | 0.592677 | false |
ngageoint/scale | scale/storage/migrations/0008_auto_20170609_1443.py | 1 | 1859 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-09 14:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('batch', '0002_auto_20170412_1225'),
('recipe', '0018_recipefile_recipe_input'),
('storage', '0007_auto_20170412_1225'),
]
operations = [
migrations.AddField(
model_name='scalefile',
name='batch',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='batch.Batch'),
),
migrations.AddField(
model_name='scalefile',
name='job_output',
field=models.CharField(blank=True, max_length=250, null=True),
),
migrations.AddField(
model_name='scalefile',
name='recipe',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='recipe.Recipe'),
),
migrations.AddField(
model_name='scalefile',
name='recipe_job',
field=models.CharField(blank=True, max_length=250, null=True),
),
migrations.AddField(
model_name='scalefile',
name='recipe_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='recipe.RecipeType'),
),
migrations.AddField(
model_name='scalefile',
name='source_ended',
field=models.DateTimeField(blank=True, db_index=True, null=True),
),
migrations.AddField(
model_name='scalefile',
name='source_started',
field=models.DateTimeField(blank=True, db_index=True, null=True),
),
]
| apache-2.0 | -3,557,771,446,890,161,700 | 34.075472 | 128 | 0.586337 | false |
dganbold/reinforcement_learning | NeuralQLearning/cartpole_learn.py | 1 | 3105 | #
#
import gym
from NeuralQLearner import *
#
#
if __name__ == "__main__":
# ----------------------------------------
# Define parameters for greedy policy
epsilon = 0.5 # exploration
epsilon_floor = 0.1
exploration_decay = 0.998
# Define parameters for Q-learning
alpha = 0.2
gamma = 0.98
epoch = 1000
max_steps = 200
max_memory = max_steps*10
batch_size = int(32)
# ----------------------------------------
# Actions
# Type: Discrete(2)
# Num | Observation
# 0 | Push cart to the left
# 1 | Push cart to the right
n_action = 2
actions = np.array([0, 1])
# ----------------------------------------
# Observation
# Type: Box(4)
# Num | Observation | Min | Max
# 0 | Cart Position | -2.4 | 2.4
# 1 | Cart Velocity | -Inf | Inf
# 2 | Pole Angle | -41.8 | 41.8
# 3 | Pole Velocity | -Inf | Inf
n_input = 4
observation = []
# ----------------------------------------
# Define environment/game
env_name = 'CartPole-v0'
env = gym.make(env_name)
# ----------------------------------------
# Initialize Neural Q-Learn object
AI = NeuralQLearner(n_input, actions, batch_size, epsilon, alpha, gamma)
#AI.plotQ()
# Initialize experience replay object
exp = Experience(max_memory)
# ----------------------------------------
# Train
for e in range(epoch):
# Get initial input
observation = env.reset()
observation_init = observation
# Training for single episode
step = 0
total_reward = 0
game_over = False
while (not game_over):
observation_capture = observation
#env.render()
# Epsilon-Greedy policy
action = AI.eGreedy(observation)
# Apply action, get rewards and new state
observation, reward, game_over, info = env.step(action)
# Store experience
# input[i] = [[state_t, action_t, reward_t, state_t+1], game_over?]
exp.memorize([observation_capture, action, reward, observation], game_over)
# Recall and replay experience
miniBatch = exp.recall(batch_size)
# Refinement of model
if len(miniBatch) == batch_size:
AI.train_Q_network(miniBatch)
#
step += 1
total_reward += reward
# End of the single episode training
print('#TRAIN Episode:%3i, Reward:%7.3f, Steps:%3i, Exploration:%1.4f'%(e, total_reward, step, AI.epsilon))
# Update exploration
AI.epsilon *= exploration_decay
AI.epsilon = max(epsilon_floor, AI.epsilon)
# Plot
#AI.plotQupdate()
#
# ----------------------------------------
# Export trained Neural-Net
AI.exportNetwork('models/%s_Q_network_epoch_%d' % (env_name, epoch))
# ----------------------------------------
print("Done!.")
# Some delay
raw_input('Press enter to terminate:')
# Close environment
env.close()
# EOF
| mit | -7,300,266,675,279,254,000 | 30.683673 | 115 | 0.498229 | false |
osantana/pactum | tests/test_route.py | 1 | 1759 | import pytest
from pactum.route import Route
def test_basic_route():
route = Route('/test/', actions=[])
assert route.path == '/test/'
assert len(route.actions) == 0
assert route.parameters == []
def test_route_class_definition():
class TestRoute(Route):
path = '/test/'
actions = []
route = TestRoute()
assert route.path == '/test/'
assert len(route.actions) == 0
assert route.parameters == []
def test_prefer_parameter_to_class_definition(action):
class TestRoute(Route):
path = '/test/'
actions = []
route = TestRoute(
path="/test_by_param/",
actions=[action]
)
assert len(route.actions) == 1
assert route.path == "/test_by_param/"
assert route.actions[0].parent == route
def test_fail_route_with_no_path(resource):
with pytest.raises(TypeError):
Route(actions=[])
def test_route_with_parameters():
route = Route('/test/{test-id}', actions=[])
assert route.path == '/test/{test-id}'
assert route.parameters == ['test-id']
def test_route_with_multiple_parameters():
route = Route('/test/{test_id}/{test.slug}/{test-uuid}', actions=[])
assert route.path == '/test/{test_id}/{test.slug}/{test-uuid}'
assert route.parameters == ['test_id', 'test.slug', 'test-uuid']
def test_route_with_querystrings(querystring):
route = Route('/test/', querystrings=[querystring])
assert route.path == '/test/'
assert len(route.querystrings) == 1
def test_class_defined_route_with_querystrings(querystring):
class TestRoute(Route):
path = '/test/'
querystrings = [querystring]
route = TestRoute()
assert route.path == '/test/'
assert len(route.querystrings) == 1
| gpl-3.0 | 8,277,605,372,824,144,000 | 23.09589 | 72 | 0.621944 | false |
elifesciences/builder | src/tests/base.py | 1 | 5983 | from datetime import datetime
import json
import logging
import os
from os.path import join
from random import randint
from subprocess import check_output
# pylint: disable-msg=import-error
from unittest2 import TestCase
from buildercore.command import settings
from buildercore import config, project
from buildercore import bootstrap, cfngen, lifecycle, core
import cfn
import imp
# import pytest # see ../conftest.py
LOG = logging.getLogger(__name__)
def generate_environment_name():
"""to avoid multiple people clashing while running their builds
and new builds clashing with older ones"""
who = check_output('whoami').rstrip().decode()
now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
return "-".join([who, now, str(randint(1, 1000000))]) # ll: luke-20180420022437-51631
this_dir = os.path.realpath(os.path.dirname(__file__))
fixtures_dir = join(this_dir, 'fixtures')
def switch_in_test_settings(projects_files=None):
if not projects_files:
projects_files = ['src/tests/fixtures/projects/']
config.PROJECTS_FILES = projects_files
# lsh@2021-06-22: may not be necessary any more.
# project_map now returns a deepcopy of cached results.
project._project_map.cache_clear()
config.app.cache_clear()
def switch_out_test_settings():
# clear any caches and reload the config module
project._project_map.cache_clear()
imp.reload(config)
def test_project_list():
switch_in_test_settings()
return project.aws_projects().keys()
def elife_project_list():
switch_out_test_settings()
return project.aws_projects().keys()
class BaseCase(TestCase):
maxDiff = None
def __init__(self, *args, **kwargs):
super(BaseCase, self).__init__(*args, **kwargs)
switch_in_test_settings()
self.fixtures_dir = fixtures_dir
# TODO: python2 warning
# pylint: disable=E1101
def assertCountEqual(self, *args):
parent = super(BaseCase, self)
if not hasattr(parent, 'assertCountEqual'):
self.assertItemsEqual(*args)
else:
parent.assertCountEqual(*args)
# pyline: disable=invalid-name
def assertAllPairsEqual(self, fn, pair_lst):
"given a function and a list of (given, expected) asserts all fn(given) == expected"
for given, expected in pair_lst:
with self.subTest(given=given):
actual = fn(given)
self.assertEqual(expected, actual, "failed, %r != %r" % (expected, actual))
# pyline: disable=invalid-name
def assertAllTrue(self, fn, lst):
"given a function a list of values, asserts all fn(value) are true"
for x in lst:
with self.subTest(given=x):
self.assertTrue(fn(x), "failed, fn(%s) != True" % x)
# pyline: disable=invalid-name
def assertAllNotTrue(self, fn, lst):
"given a function a list of values, asserts all fn(value) are NOT true"
for x in lst:
with self.subTest(given=x):
self.assertNotEqual(fn(x), "failed, fn(%s) != False" % x)
class BaseIntegrationCase(BaseCase):
@classmethod
def set_up_stack(cls, project, explicitly_start=False):
switch_in_test_settings()
# to re-use an existing stack, ensure cls.reuse_existing_stack is True
# this will read the instance name from a temporary file (if it exists) and
# look for that, creating it if doesn't exist yet
# also ensure cls.cleanup is False so the instance isn't destroyed after tests complete
cls.reuse_existing_stack = config.TWI_REUSE_STACK
cls.cleanup = config.TWI_CLEANUP
cls.stacknames = []
cls.environment = generate_environment_name()
# cls.temp_dir, cls.rm_temp_dir = utils.tempdir()
# debugging only, where we keep an instance up between processes
cls.state, cls.statefile = {}, '/tmp/.open-test-instances.txt'
if cls.reuse_existing_stack and os.path.exists(cls.statefile):
# evidence of a previous instance and we've been told to re-use old instances
old_state = json.load(open(cls.statefile, 'r'))
old_env = old_state.get('environment')
# test if the old stack still exists ...
if old_env and core.describe_stack(project + "--" + old_env, allow_missing=True):
cls.state = old_state
cls.environment = old_env
else:
# nope. old statefile is bogus, delete it
os.unlink(cls.statefile)
cls.state['environment'] = cls.environment # will be saved later
with settings(abort_on_prompts=True):
cls.stackname = '%s--%s' % (project, cls.environment)
cls.stacknames.append(cls.stackname)
if cls.cleanup:
LOG.info("ensure_destroyed %s", cls.stackname)
cfn.ensure_destroyed(cls.stackname)
cls.context, cls.cfn_template, _ = cfngen.generate_stack(project, stackname=cls.stackname)
cls.region = cls.context['aws']['region']
LOG.info("create_stack %s", cls.stackname)
bootstrap.create_stack(cls.stackname)
if explicitly_start:
LOG.info("start %s", cls.stackname)
lifecycle.start(cls.stackname)
@classmethod
def tear_down_stack(cls):
try:
if cls.reuse_existing_stack:
json.dump(cls.state, open(cls.statefile, 'w'))
if cls.cleanup:
for stackname in cls.stacknames:
LOG.info("ensure_destroyed %s", stackname)
cfn.ensure_destroyed(stackname)
# cls.rm_temp_dir()
# cls.assertFalse(os.path.exists(cls.temp_dir), "failed to delete path %r in tearDown" % cls.temp_dir)
except BaseException:
# important, as anything in body will silently fail
LOG.exception('uncaught error tearing down test class')
| mit | -4,768,471,650,384,098,000 | 37.6 | 114 | 0.632459 | false |
ondrejch/FSM | scripts/mk1/writecore.py | 1 | 1343 | #!/usr/bin/env python3
#
# Write the FastDrum Serpent deck
# Ondrej Chvala, [email protected]
# 2016-08-02
import drumdeck
import os
import argparse
# Serpent deck file name
filename = "ffrr.inp"
dirname = "./"
# Command line argument
parser = argparse.ArgumentParser(description='Writes Serpent2 input deck of the Fast Cube Reactor.')
parser.add_argument('--latsize', metavar='N', type=int, nargs='?', default=27,
help='lattice size, default = 27') #, required=False)
parser.add_argument('--fuelradius', metavar='r', type=float, nargs='?', default=1.17,
help='fuel rod radius [cm], default = 1.17 cm')
parser.add_argument('--reflector', metavar='refl', type=float, nargs='?', default=50,
help='fuel rod radius [cm], default = 50 cm')
# Parse command line arguments
args = vars(parser.parse_args())
N = args['latsize']
r_fuel = args['fuelradius']
d_refl = args['reflector']
# Make the deck
s2_deck = drumdeck.write_deck(N, r_fuel, d_refl)
fname = dirname + filename
print("Writing deck for lattice size ",N,", fuel radius ",r_fuel," cm, reflector thickness ",d_refl, " cm.")
# Write the deck
try:
f = open(fname, 'w')
f.write(s2_deck)
f.close()
print("Deck written,")
except IOError as e:
print("Unable to write to file", fname)
print(e)
| gpl-3.0 | -5,168,715,324,540,304,000 | 27.574468 | 108 | 0.647803 | false |
pitunti/alfaPitunti | mediaserver/platformcode/controllers/html.py | 1 | 33340 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Controlador para HTML
# ------------------------------------------------------------
import json
import os
import re
import threading
import time
import channelselector
from controller import Controller
from controller import Platformtools
from platformcode import config
from core.item import Item
from core.tmdb import Tmdb
from platformcode import launcher, logger
from core import filetools
# <addon id="plugin.video.alfa" name="Alfa" version="2.3.0" provider-name="Alfa Addon">
data = filetools.read(filetools.join(config.get_runtime_path(), "addon.xml"))
aux = re.findall('<addon id="plugin.video.alfa" name="Alfa" version="([^"]+)"', data, re.MULTILINE | re.DOTALL)
version = "???"
if len(aux) > 0:
version = aux[0]
class html(Controller):
pattern = re.compile("##")
name = "HTML"
def __init__(self, handler=None, ID=None):
super(html, self).__init__(handler, ID)
self.platformtools = platform(self)
self.data = {}
if self.handler:
self.client_ip = handler.client.getpeername()[0]
self.send_message({"action": "connect",
"data": {"version": "Alfa %s" % version,
"date": "--/--/----"}})
t = threading.Thread(target=launcher.start, name=ID)
t.setDaemon(True)
t.start()
def run(self, path):
if path:
item = Item().fromurl(path)
else:
item = Item(channel="channelselector", action="mainlist", viewmode="banner")
launcher.run(item)
def get_data(self, id):
while not "id" in self.data or not self.data["id"] == id:
time.sleep(0.1)
data = self.data["result"]
self.data = {}
return data
def send_message(self, data):
import random
ID = "%032x" % (random.getrandbits(128))
data["id"] = ID
self.handler.sendMessage(json.dumps(data))
return ID
class platform(Platformtools):
def __init__(self, controller):
self.controller = controller
self.handler = controller.handler
self.get_data = controller.get_data
self.send_message = controller.send_message
def render_items(self, itemlist, parent_item):
"""
Función encargada de mostrar el itemlist, se pasa como parametros el itemlist y el item del que procede
@type itemlist: list
@param itemlist: lista de elementos a mostrar
@type parent_item: item
@param parent_item: elemento padre
"""
# Si el itemlist no es un list salimos
if not type(itemlist) == list:
JsonData = {}
JsonData["action"] = "HideLoading"
JsonData["data"] = {}
self.send_message(JsonData)
return
# Si no hay ningun item, mostramos un aviso
if not len(itemlist):
itemlist.append(Item(title="No hay elementos que mostrar"))
if parent_item.channel == "channelselector" and not parent_item.action == "filterchannels":
parent_item.viewmode = "banner"
elif parent_item.channel == "channelselector" and parent_item.action == "filterchannels":
parent_item.viewmode = "channel"
if not parent_item.viewmode:
parent_item.viewmode = "list"
# Item Atrás
if not (parent_item.channel == "channelselector" and parent_item.action == "mainlist") and not \
itemlist[0].action == "go_back":
if parent_item.viewmode in ["banner", "channel"]:
itemlist.insert(0, Item(title="Atrás", action="go_back",
thumbnail=channelselector.get_thumb("back.png", "banner_")))
else:
itemlist.insert(0, Item(title="Atrás", action="go_back",
thumbnail=channelselector.get_thumb("back.png")))
JsonData = {}
JsonData["action"] = "EndItems"
JsonData["data"] = {}
JsonData["data"]["itemlist"] = []
JsonData["data"]["viewmode"] = parent_item.viewmode
JsonData["data"]["category"] = parent_item.category.capitalize()
JsonData["data"]["host"] = self.controller.host
if parent_item.url: JsonData["data"]["url"] = parent_item.url
# Recorremos el itemlist
for item in itemlist:
if not item.thumbnail and item.action == "search": item.thumbnail = channelselector.get_thumb("search.png")
if not item.thumbnail and item.folder == True: item.thumbnail = channelselector.get_thumb("folder.png", "banner")
if not item.thumbnail and item.folder == False: item.thumbnail = channelselector.get_thumb("nofolder.png")
if "http://media.xxxxx/" in item.thumbnail and not item.thumbnail.startswith(
"http://media.xxxxxxxx/thumb_"):
if parent_item.viewmode in ["banner", "channel"]:
item.thumbnail = channelselector.get_thumbnail_path("banner") + os.path.basename(item.thumbnail)
else:
item.thumbnail = channelselector.get_thumbnail_path() + os.path.basename(item.thumbnail)
# Estas imagenes no estan en banner, asi que si queremos banner, para que no se vean mal las quitamos
elif parent_item.viewmode in ["banner", "channel"] and item.thumbnail.startswith(
"http://media.xxxxx/thumb_"):
item.thumbnail = ""
# Si el item no contiene categoria,le ponemos la del item padre
if item.category == "":
item.category = parent_item.category
# Si el item no contiene fanart,le ponemos la del item padre
if item.fanart == "":
item.fanart = parent_item.fanart
title = item.title.replace(item.title.lstrip(), "").replace(" ", " ") + item.title.lstrip()
# Formatear titulo
if item.text_color:
title = '[COLOR %s]%s[/COLOR]' % (item.text_color, title)
if item.text_bold:
title = '[B]%s[/B]' % title
if item.text_italic:
title = '[I]%s[/I]' % title
title = self.kodi_labels_to_html(title)
# Añade headers a las imagenes si estan en un servidor con cloudflare
from core import httptools
item.thumbnail = httptools.get_url_headers(item.thumbnail)
item.fanart = httptools.get_url_headers(item.fanart)
JsonItem = {}
JsonItem["title"] = title
JsonItem["thumbnail"] = item.thumbnail
JsonItem["fanart"] = item.fanart
JsonItem["plot"] = item.plot
JsonItem["action"] = item.action
JsonItem["url"] = item.tourl()
JsonItem["context"] = []
if not item.action == "go_back":
for Comando in self.set_context_commands(item, parent_item):
JsonItem["context"].append({"title": Comando[0], "url": Comando[1]})
JsonData["data"]["itemlist"].append(JsonItem)
ID = self.send_message(JsonData)
self.get_data(ID)
def set_context_commands(self, item, parent_item):
"""
Función para generar los menus contextuales.
1. Partiendo de los datos de item.context
a. Metodo antiguo item.context tipo str separando las opciones por "|" (ejemplo: item.context = "1|2|3")
(solo predefinidos)
b. Metodo list: item.context es un list con las diferentes opciones del menu:
- Predefinidos: Se cargara una opcion predefinida con un nombre.
item.context = ["1","2","3"]
- dict(): Se cargara el item actual modificando los campos que se incluyan en el dict() en caso de
modificar los campos channel y action estos serán guardados en from_channel y from_action.
item.context = [{"title":"Nombre del menu", "action": "action del menu", "channel",
"channel del menu"}, {...}]
2. Añadiendo opciones segun criterios
Se pueden añadir opciones al menu contextual a items que cumplan ciertas condiciones
3. Añadiendo opciones a todos los items
Se pueden añadir opciones al menu contextual para todos los items
@param item: elemento que contiene los menu contextuales
@type item: item
@param parent_item:
@type parent_item: item
"""
context_commands = []
# Creamos un list con las diferentes opciones incluidas en item.context
if type(item.context) == str:
context = item.context.split("|")
elif type(item.context) == list:
context = item.context
else:
context = []
# Opciones segun item.context
for command in context:
# Predefinidos
if type(command) == str:
if command == "buscar_trailer":
context_commands.append(("Buscar Trailer",
item.clone(channel="trailertools", action="buscartrailer",
contextual=True).tourl()))
# Formato dict
if type(command) == dict:
# Los parametros del dict, se sobreescriben al nuevo context_item en caso de sobreescribir "action" y
# "channel", los datos originales se guardan en "from_action" y "from_channel"
if "action" in command:
command["from_action"] = item.action
if "channel" in command:
command["from_channel"] = item.channel
context_commands.append(
(command["title"], item.clone(**command).tourl()))
# Opciones segun criterios
# Ir al Menu Principal (channel.mainlist)
if parent_item.channel not in ["news",
"channelselector"] and item.action != "mainlist" and parent_item.action != "mainlist":
context_commands.append(("Ir al Menu Principal", Item(channel=item.channel, action="mainlist").tourl()))
# Añadir a Favoritos
if item.channel not in ["favorites", "videolibrary", "help", "setting",
""] and not parent_item.channel == "favorites":
context_commands.append((config.get_localized_string(30155),
item.clone(channel="favorites", action="addFavourite", from_channel=item.channel,
from_action=item.action).tourl()))
# Añadimos opción contextual para Añadir la serie completa a la videoteca
if item.channel != "videolibrary" and item.action in ["episodios", "get_episodios"] \
and (item.contentSerieName or item.show):
context_commands.append(("Añadir Serie a Videoteca",
item.clone(action="add_serie_to_library", from_action=item.action).tourl()))
# Añadir Pelicula a videoteca
if item.channel != "videolibrary" and item.action in ["detail", "findvideos"] \
and item.contentType == 'movie':
context_commands.append(("Añadir Pelicula a Videoteca",
item.clone(action="add_pelicula_to_library", from_action=item.action).tourl()))
# Descargar pelicula
if item.contentType == "movie" and not item.channel == "downloads":
context_commands.append(("Descargar Pelicula",
item.clone(channel="downloads", action="save_download", from_channel=item.channel,
from_action=item.action).tourl()))
# Descargar serie
if item.contentType == "tvshow" and not item.channel == "downloads":
context_commands.append(("Descargar Serie",
item.clone(channel="downloads", action="save_download", from_channel=item.channel,
from_action=item.action).tourl()))
# Descargar episodio
if item.contentType == "episode" and not item.channel == "downloads":
context_commands.append(("Descargar Episodio",
item.clone(channel="downloads", action="save_download", from_channel=item.channel,
from_action=item.action).tourl()))
# Descargar temporada
if item.contentType == "season" and not item.channel == "downloads":
context_commands.append(("Descargar Temporada",
item.clone(channel="downloads", action="save_download", from_channel=item.channel,
from_action=item.action).tourl()))
# Abrir configuración
if parent_item.channel not in ["setting", "news", "search"]:
context_commands.append(("Abrir Configuración", Item(channel="setting", action="mainlist").tourl()))
return sorted(context_commands, key=lambda comand: comand[0])
def dialog_ok(self, heading, line1, line2="", line3=""):
text = line1
if line2: text += "\n" + line2
if line3: text += "\n" + line3
text = self.kodi_labels_to_html(text)
JsonData = {}
JsonData["action"] = "Alert"
JsonData["data"] = {}
JsonData["data"]["title"] = heading
JsonData["data"]["text"] = unicode(text, "utf8", "ignore").encode("utf8")
ID = self.send_message(JsonData)
self.get_data(ID)
def dialog_notification(self, heading, message, icon=0, time=5000, sound=True):
JsonData = {}
JsonData["action"] = "notification"
JsonData["data"] = {}
JsonData["data"]["title"] = self.kodi_labels_to_html(heading)
JsonData["data"]["text"] = self.kodi_labels_to_html(message)
JsonData["data"]["icon"] = icon
JsonData["data"]["sound"] = sound
JsonData["data"]["time"] = time
self.send_message(JsonData)
return
def dialog_yesno(self, heading, line1, line2="", line3="", nolabel="No", yeslabel="Si", autoclose=""):
text = line1
if line2: text += "\n" + line2
if line3: text += "\n" + line3
text = self.kodi_labels_to_html(text)
heading = self.kodi_labels_to_html(heading)
JsonData = {}
JsonData["action"] = "AlertYesNo"
JsonData["data"] = {}
JsonData["data"]["title"] = heading
JsonData["data"]["text"] = text
ID = self.send_message(JsonData)
response = self.get_data(ID)
return response
def dialog_select(self, heading, list):
JsonData = {}
heading = self.kodi_labels_to_html(heading)
JsonData["action"] = "List"
JsonData["data"] = {}
JsonData["data"]["title"] = heading
JsonData["data"]["list"] = []
for Elemento in list:
JsonData["data"]["list"].append(self.kodi_labels_to_html(Elemento))
ID = self.send_message(JsonData)
response = self.get_data(ID)
return response
def dialog_progress(self, heading, line1, line2="", line3=""):
class Dialog(object):
def __init__(self, heading, line1, line2, line3, platformtools):
self.platformtools = platformtools
self.closed = False
self.heading = self.platformtools.kodi_labels_to_html(heading)
text = line1
if line2: text += "\n" + line2
if line3: text += "\n" + line3
text = self.platformtools.kodi_labels_to_html(text)
JsonData = {}
JsonData["action"] = "Progress"
JsonData["data"] = {}
JsonData["data"]["title"] = heading
JsonData["data"]["text"] = text
JsonData["data"]["percent"] = 0
ID = self.platformtools.send_message(JsonData)
self.platformtools.get_data(ID)
def iscanceled(self):
JsonData = {}
JsonData["action"] = "ProgressIsCanceled"
JsonData["data"] = {}
ID = self.platformtools.send_message(JsonData)
response = self.platformtools.get_data(ID)
return response
def update(self, percent, line1, line2="", line3=""):
text = line1
if line2: text += "\n" + line2
if line3: text += "\n" + line3
text = self.platformtools.kodi_labels_to_html(text)
JsonData = {}
JsonData["action"] = "ProgressUpdate"
JsonData["data"] = {}
JsonData["data"]["title"] = self.heading
JsonData["data"]["text"] = text
JsonData["data"]["percent"] = percent
self.platformtools.send_message(JsonData)
def close(self):
JsonData = {}
JsonData["action"] = "ProgressClose"
JsonData["data"] = {}
ID = self.platformtools.send_message(JsonData)
self.platformtools.get_data(ID)
self.closed = True
return Dialog(heading, line1, line2, line3, self)
def dialog_progress_bg(self, heading, message=""):
class Dialog(object):
def __init__(self, heading, message, platformtools):
self.platformtools = platformtools
self.closed = False
self.heading = self.platformtools.kodi_labels_to_html(heading)
message = self.platformtools.kodi_labels_to_html(message)
JsonData = {}
JsonData["action"] = "ProgressBG"
JsonData["data"] = {}
JsonData["data"]["title"] = heading
JsonData["data"]["text"] = message
JsonData["data"]["percent"] = 0
ID = self.platformtools.send_message(JsonData)
self.platformtools.get_data(ID)
def isFinished(self):
return not self.closed
def update(self, percent=0, heading="", message=""):
JsonData = {}
JsonData["action"] = "ProgressBGUpdate"
JsonData["data"] = {}
JsonData["data"]["title"] = self.platformtools.kodi_labels_to_html(heading)
JsonData["data"]["text"] = self.platformtools.kodi_labels_to_html(message)
JsonData["data"]["percent"] = percent
self.platformtools.send_message(JsonData)
def close(self):
JsonData = {}
JsonData["action"] = "ProgressBGClose"
JsonData["data"] = {}
ID = self.platformtools.send_message(JsonData)
self.platformtools.get_data(ID)
self.closed = True
return Dialog(heading, message, self)
def dialog_input(self, default="", heading="", hidden=False):
JsonData = {}
JsonData["action"] = "Keyboard"
JsonData["data"] = {}
JsonData["data"]["title"] = self.kodi_labels_to_html(heading)
JsonData["data"]["text"] = default
JsonData["data"]["password"] = hidden
ID = self.send_message(JsonData)
response = self.get_data(ID)
return response
def dialog_numeric(self, type, heading, default=""):
return self.dialog_input("", heading, False)
def itemlist_refresh(self):
JsonData = {}
JsonData["action"] = "Refresh"
JsonData["data"] = {}
ID = self.send_message(JsonData)
self.get_data(ID)
def itemlist_update(self, item):
JsonData = {}
JsonData["action"] = "Update"
JsonData["data"] = {}
JsonData["data"]["url"] = item.tourl()
ID = self.send_message(JsonData)
self.get_data(ID)
def is_playing(self):
JsonData = {}
JsonData["action"] = "isPlaying"
JsonData["data"] = {}
ID = self.send_message(JsonData)
response = self.get_data(ID)
return response
def play_video(self, item):
if item.contentTitle:
title = item.contentTitle
elif item.fulltitle:
title = item.fulltitle
else:
title = item.title
if item.contentPlot:
plot = item.contentPlot
else:
plot = item.plot
if item.server == "torrent":
self.play_torrent(item)
else:
JsonData = {}
JsonData["action"] = "Play"
JsonData["data"] = {}
JsonData["data"]["title"] = title
JsonData["data"]["plot"] = plot
JsonData["data"]["video_url"] = item.video_url
JsonData["data"]["url"] = item.url
JsonData["data"]["host"] = self.controller.host
ID = self.send_message(JsonData)
self.get_data(ID)
def play_torrent(self, item):
import time
import os
played = False
# Importamos el cliente
from btserver import Client
# Iniciamos el cliente:
c = Client(url=item.url, is_playing_fnc=self.is_playing, wait_time=None, timeout=5,
temp_path=os.path.join(config.get_data_path(), "torrent"))
# Mostramos el progreso
progreso = self.dialog_progress("Alfa - Torrent", "Iniciando...")
# Mientras el progreso no sea cancelado ni el cliente cerrado
while not progreso.iscanceled() and not c.closed:
try:
# Obtenemos el estado del torrent
s = c.status
# Montamos las tres lineas con la info del torrent
txt = '%.2f%% de %.1fMB %s | %.1f kB/s' % \
(s.progress_file, s.file_size, s.str_state, s._download_rate)
txt2 = 'S: %d(%d) P: %d(%d) | DHT:%s (%d) | Trakers: %d' % \
(
s.num_seeds, s.num_complete, s.num_peers, s.num_incomplete, s.dht_state, s.dht_nodes,
s.trackers)
txt3 = 'Origen Peers TRK: %d DHT: %d PEX: %d LSD %d ' % \
(s.trk_peers, s.dht_peers, s.pex_peers, s.lsd_peers)
progreso.update(s.buffer, txt, txt2, txt3)
time.sleep(1)
# Si el buffer se ha llenado y la reproduccion no ha sido iniciada, se inicia
if s.buffer == 100 and not played:
# Cerramos el progreso
progreso.close()
# Obtenemos el playlist del torrent
item.video_url = c.get_play_list()
item.server = "directo"
self.play_video(item)
# Marcamos como reproducido para que no se vuelva a iniciar
played = True
# Y esperamos a que el reproductor se cierre
while self.is_playing():
time.sleep(1)
# Cuando este cerrado, Volvemos a mostrar el dialogo
progreso = self.dialog_progress("Alfa - Torrent", "Iniciando...")
except:
import traceback
logger.info(traceback.format_exc())
break
progreso.update(100, "Terminando y eliminando datos", " ", " ")
# Detenemos el cliente
if not c.closed:
c.stop()
# Y cerramos el progreso
progreso.close()
return
def open_settings(self, items):
from platformcode import config
JsonData = {}
JsonData["action"] = "OpenConfig"
JsonData["data"] = {}
JsonData["data"]["title"] = "Opciones"
JsonData["data"]["items"] = []
for item in items:
if item.get('option') == 'hidden':
item['hidden'] = True
for key in item:
if key in ["lvalues", "label", "category"]:
try:
ops = item[key].split("|")
for x, op in enumerate(ops):
ops[x] = config.get_localized_string(int(ops[x]))
item[key] = "|".join(ops)
except:
pass
JsonData["data"]["items"].append(item)
ID = self.send_message(JsonData)
response = self.get_data(ID)
if response:
from platformcode import config
config.set_settings(response)
JsonData = {}
JsonData["action"] = "HideLoading"
JsonData["data"] = {}
self.send_message(JsonData)
def show_channel_settings(self, list_controls=None, dict_values=None, caption="", callback=None, item=None,
custom_button=None, channelpath=None):
from platformcode import config
from core import channeltools
from core import servertools
import inspect
if not os.path.isdir(os.path.join(config.get_data_path(), "settings_channels")):
os.mkdir(os.path.join(config.get_data_path(), "settings_channels"))
title = caption
if type(custom_button) == dict:
custom_button = {"label": custom_button.get("label", ""),
"function": custom_button.get("function", ""),
"visible": bool(custom_button.get("visible", True)),
"close": bool(custom_button.get("close", False))}
else:
custom_button = None
# Obtenemos el canal desde donde se ha echo la llamada y cargamos los settings disponibles para ese canal
if not channelpath:
channelpath = inspect.currentframe().f_back.f_back.f_code.co_filename
channelname = os.path.basename(channelpath).replace(".py", "")
ch_type = os.path.basename(os.path.dirname(channelpath))
# Si no tenemos list_controls, hay que sacarlos del json del canal
if not list_controls:
# Si la ruta del canal esta en la carpeta "channels", obtenemos los controles y valores mediante chaneltools
if os.path.join(config.get_runtime_path(), "channels") in channelpath:
# La llamada se hace desde un canal
list_controls, default_values = channeltools.get_channel_controls_settings(channelname)
kwargs = {"channel": channelname}
# Si la ruta del canal esta en la carpeta "servers", obtenemos los controles y valores mediante servertools
elif os.path.join(config.get_runtime_path(), "servers") in channelpath:
# La llamada se hace desde un server
list_controls, default_values = servertools.get_server_controls_settings(channelname)
kwargs = {"server": channelname}
# En caso contrario salimos
else:
return None
# Si no se pasan dict_values, creamos un dict en blanco
if dict_values == None:
dict_values = {}
# Ponemos el titulo
if caption == "":
caption = str(config.get_localized_string(30100)) + " -- " + channelname.capitalize()
elif caption.startswith('@') and unicode(caption[1:]).isnumeric():
caption = config.get_localized_string(int(caption[1:]))
JsonData = {}
JsonData["action"] = "OpenConfig"
JsonData["data"] = {}
JsonData["data"]["title"] = self.kodi_labels_to_html(caption)
JsonData["data"]["custom_button"] = custom_button
JsonData["data"]["items"] = []
# Añadir controles
for c in list_controls:
if not "default" in c: c["default"] = ""
if not "color" in c: c["color"] = "auto"
if not "label" in c: continue
# Obtenemos el valor
if "id" in c:
if not c["id"] in dict_values:
if not callback:
c["value"] = config.get_setting(c["id"], **kwargs)
else:
c["value"] = c["default"]
dict_values[c["id"]] = c["value"]
else:
c["value"] = dict_values[c["id"]]
# Translation
if c['label'].startswith('@') and unicode(c['label'][1:]).isnumeric():
c['label'] = str(config.get_localized_string(c['label'][1:]))
if c["label"].endswith(":"): c["label"] = c["label"][:-1]
if c['type'] == 'list':
lvalues = []
for li in c['lvalues']:
if li.startswith('@') and unicode(li[1:]).isnumeric():
lvalues.append(str(config.get_localized_string(li[1:])))
else:
lvalues.append(li)
c['lvalues'] = lvalues
c["label"] = self.kodi_labels_to_html(c["label"])
JsonData["data"]["items"].append(c)
ID = self.send_message(JsonData)
close = False
while True:
data = self.get_data(ID)
if type(data) == dict:
JsonData["action"] = "HideLoading"
JsonData["data"] = {}
self.send_message(JsonData)
for v in data:
if data[v] == "true": data[v] = True
if data[v] == "false": data[v] = False
if unicode(data[v]).isnumeric(): data[v] = int(data[v])
if callback and '.' in callback:
package, callback = callback.rsplit('.', 1)
else:
package = '%s.%s' % (ch_type, channelname)
cb_channel = None
try:
cb_channel = __import__(package, None, None, [package])
except ImportError:
logger.error('Imposible importar %s' % package)
if callback:
# Si existe una funcion callback la invocamos ...
return getattr(cb_channel, callback)(item, data)
else:
# si no, probamos si en el canal existe una funcion 'cb_validate_config' ...
try:
return getattr(cb_channel, 'cb_validate_config')(item, data)
except AttributeError:
# ... si tampoco existe 'cb_validate_config'...
for v in data:
config.set_setting(v, data[v], **kwargs)
elif data == "custom_button":
if '.' in callback:
package, callback = callback.rsplit('.', 1)
else:
package = '%s.%s' % (ch_type, channelname)
try:
cb_channel = __import__(package, None, None, [package])
except ImportError:
logger.error('Imposible importar %s' % package)
else:
return_value = getattr(cb_channel, custom_button['function'])(item, dict_values)
if custom_button["close"] == True:
return return_value
else:
JsonData["action"] = "custom_button"
JsonData["data"] = {}
JsonData["data"]["values"] = dict_values
JsonData["data"]["return_value"] = return_value
ID = self.send_message(JsonData)
elif data == False:
return None
def show_video_info(self, data, caption="", item=None, scraper=Tmdb):
from platformcode import html_info_window
return html_info_window.InfoWindow().start(self, data, caption, item, scraper)
def show_recaptcha(self, key, url):
from platformcode import html_recaptcha
return html_recaptcha.recaptcha().start(self, key, url)
def kodi_labels_to_html(self, text):
text = re.sub(r"(?:\[I\])(.*?)(?:\[/I\])", r"<i>\1</i>", text)
text = re.sub(r"(?:\[B\])(.*?)(?:\[/B\])", r"<b>\1</b>", text)
text = re.sub(r"(?:\[COLOR (?:0x)?([0-f]{2})([0-f]{2})([0-f]{2})([0-f]{2})\])(.*?)(?:\[/COLOR\])",
lambda m: "<span style='color: rgba(%s,%s,%s,%s)'>%s</span>" % (
int(m.group(2), 16), int(m.group(3), 16), int(m.group(4), 16), int(m.group(1), 16) / 255.0,
m.group(5)), text)
text = re.sub(r"(?:\[COLOR (?:0x)?([0-f]{2})([0-f]{2})([0-f]{2})\])(.*?)(?:\[/COLOR\])",
r"<span style='color: #\1\2\3'>\4</span>", text)
text = re.sub(r"(?:\[COLOR (?:0x)?([a-z|A-Z]+)\])(.*?)(?:\[/COLOR\])", r"<span style='color: \1'>\2</span>",
text)
return text
| gpl-3.0 | -5,294,933,708,679,458,000 | 40.85804 | 125 | 0.526156 | false |
canihavesomecoffee/sample-platform | tests/test_regression/TestControllers.py | 1 | 26178 | from unittest import mock
from flask import g
from sqlalchemy import and_
from werkzeug.exceptions import NotFound
from mod_auth.models import Role
from mod_customized.models import CustomizedTest
from mod_regression.models import (Category, InputType, OutputType,
RegressionTest, RegressionTestOutput,
RegressionTestOutputFiles)
from mod_sample.models import Sample
from mod_test.models import Test, TestResultFile
from tests.base import BaseTestCase
class TestControllers(BaseTestCase):
def test_root(self):
response = self.app.test_client().get('/regression/')
self.assertEqual(response.status_code, 200)
self.assert_template_used('regression/index.html')
def test_specific_regression_test_loads(self):
response = self.app.test_client().get('/regression/test/1/view')
self.assertEqual(response.status_code, 200)
self.assert_template_used('regression/test_view.html')
regression_test = RegressionTest.query.filter(RegressionTest.id == 1).first()
self.assertIn(regression_test.command, str(response.data))
def test_regression_test_status_toggle(self):
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
regression_test = RegressionTest.query.filter(RegressionTest.id == 1).first()
response = c.get('/regression/test/1/toggle')
self.assertEqual(response.status_code, 200)
self.assertEqual('success', response.json['status'])
if regression_test.active == 1:
self.assertEqual('False', response.json['active'])
else:
self.assertEqual('True', response.json['active'])
@mock.patch('mod_regression.controllers.RegressionTestOutput')
def test_download_result_file_not_found(self, mock_regression_output):
"""
Test that non-existent result file gives 404.
"""
from mod_regression.controllers import test_result_file
mock_regression_output.query.filter.return_value.first.return_value = None
with self.assertRaises(NotFound):
test_result_file(1)
mock_regression_output.query.filter.assert_called_once_with(mock_regression_output.id == 1)
@mock.patch('mod_regression.controllers.RegressionTestOutputFiles')
def test_download_result_file_not_found_variant(self, mock_regression_output_file):
"""
Test that non-existent result file gives 404.
"""
from mod_regression.controllers import multiple_test_result_file
mock_regression_output_file.query.filter.return_value.first.return_value = None
with self.assertRaises(NotFound):
multiple_test_result_file(1)
mock_regression_output_file.query.filter.assert_called_once_with(mock_regression_output_file.id == 1)
@mock.patch('mod_regression.controllers.serve_file_download')
@mock.patch('mod_regression.controllers.RegressionTestOutput')
def test_download_result_file(self, mock_regression_output, mock_serve):
"""
Test that correct result file triggers serve download.
"""
from mod_regression.controllers import test_result_file
response = test_result_file(1)
mock_regression_output.query.filter.assert_called_once_with(mock_regression_output.id == 1)
mock_serve.assert_called_once()
@mock.patch('mod_regression.controllers.serve_file_download')
@mock.patch('mod_regression.controllers.RegressionTestOutputFiles')
def test_download_result_file_variant(self, mock_regression_output_file, mock_serve):
"""
Test that correct result file triggers serve download for variants.
"""
from mod_regression.controllers import multiple_test_result_file
response = multiple_test_result_file(1)
mock_regression_output_file.query.filter.assert_called_once_with(mock_regression_output_file.id == 1)
mock_serve.assert_called_once()
def test_regression_test_deletion_Without_login(self):
response = self.app.test_client().get('/regression/test/9432/delete')
self.assertEqual(response.status_code, 302)
self.assertIn(b'/account/login?next=regression.test_delete', response.data)
def test_delete_if_will_throw_404(self):
"""
Check if it will throw an error 404
:return:
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response = c.get('/regression/test/9432/delete')
self.assertEqual(response.status_code, 404)
def test_delete(self):
"""
Check it will delete RegressionTest as well as the Customized test
linked with it
"""
customized_test = CustomizedTest(test_id=1, regression_id=1)
g.db.add(customized_test)
g.db.commit()
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response = c.get('/regression/test/1/delete')
self.assertEqual(response.status_code, 200)
response = c.post(
'/regression/test/1/delete', data=dict(
hidden='yes',
submit=True
)
)
self.assertEqual(response.status_code, 302)
self.assertEqual(RegressionTest.query.filter(RegressionTest.id == 1).first(), None)
self.assertEqual(CustomizedTest.query.filter(CustomizedTest.regression_id == 1).first(), None)
def test_add_category(self):
"""
Check it will add a category
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
c.post('/regression/category_add',
data=dict(category_name="Lost", category_description="And found", submit=True))
self.assertNotEqual(Category.query.filter(Category.name == "Lost").first(), None)
def test_add_category_empty(self):
"""
Check it won't add a category with an empty name
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
response = c.post(
'/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response = c.post(
'/regression/category_add', data=dict(category_name="", category_description="And Lost", submit=True))
self.assertEqual(Category.query.filter(Category.name == "").first(), None)
self.assertEqual(Category.query.filter(Category.description == "And Lost").first(), None)
def test_edit_category(self):
"""
Check it will edit a category
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
new_category = Category(name="C-137", description="Wubba lubba dub dub")
g.db.add(new_category)
g.db.commit()
c.post('/regression/category/1/edit',
data=dict(category_name="Sheldon", category_description="That's my spot", submit=True))
self.assertNotEqual(Category.query.filter(Category.name == "Sheldon").first(), None)
def test_edit_category_empty(self):
"""
Check it won't edit a category with an empty name
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
new_category = Category(name="C-137", description="Wubba lubba dub dub")
g.db.add(new_category)
g.db.commit()
c.post('/regression/category/1/edit', data=dict(category_name="", category_description="GG", submit=True))
self.assertEqual(Category.query.filter(Category.name == "").first(), None)
self.assertEqual(Category.query.filter(Category.description == "GG").first(), None)
self.assertNotEqual(Category.query.filter(Category.name == "C-137").first(), None)
def test_edit_wrong_category(self):
"""
Check it will throw 404 if trying to edit a category which doesn't exist
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
new_category = Category(name="C-137", description="Wubba lubba dub dub")
g.db.add(new_category)
g.db.commit()
response = c.post(
'regression/category/1729/edit',
data=dict(category_name="Sheldon", category_description="That's my spot", submit=True)
)
self.assertEqual(response.status_code, 404)
def test_add_test(self):
"""
Check it will add a regression test
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
c.post('/regression/test/new', data=dict(
sample_id=1,
command="-autoprogram -out=ttxt -latin1 -2",
input_type="file",
output_type="file",
category_id=1,
expected_rc=25,
submit=True,
))
self.assertNotEqual(RegressionTest.query.filter(RegressionTest.id == 3).first(), None)
def test_add_test_empty_erc(self):
"""
Check it will not add a regression test with empty Expected Runtime Code
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
c.post('/regression/test/new', data=dict(
sample_id=1,
command="-autoprogram -out=ttxt -latin1 -2",
input_type=InputType.file,
output_type=OutputType.file,
category_id=1,
submit=True,
))
self.assertEqual(RegressionTest.query.filter(RegressionTest.id == 3).first(), None)
def test_category_deletion_without_login(self):
response = self.app.test_client().get('/regression/category/9432/delete')
self.assertEqual(response.status_code, 302)
self.assertIn(b'/account/login?next=regression.category_delete', response.data)
def test_category_delete_if_will_throw_404(self):
"""
Check if it will throw an error 404
:return:
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response_regression = c.get('/regression/category/9432/delete')
self.assertEqual(response_regression.status_code, 404)
def test_category_delete(self):
"""
Check it will delete the Category
:return:
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response = c.get('/regression/category/1/delete')
self.assertEqual(response.status_code, 200)
response = c.post('/regression/category/1/delete', data=dict(
hidden='yes',
submit=True
))
self.assertEqual(response.status_code, 302)
def test_edit_test(self):
"""
Check it will edit a regression test
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
c.post('/regression/test/2/edit', data=dict(
sample_id=1,
command="-demogorgans",
input_type="file",
output_type="file",
category_id=2,
expected_rc=25,
submit=True,
))
self.assertNotEqual(RegressionTest.query.filter(RegressionTest.command == "-demogorgans").first(), None)
category = Category.query.filter(Category.id == 1).first()
for i in category.regression_tests:
self.assertNotEqual(i.id, 2)
category = Category.query.filter(Category.id == 2).first()
for i in category.regression_tests:
if i.id == 2:
break
else:
self.fail("No tests in category")
def test_edit_test_empty_erc(self):
"""
Check it will not edit a regression test with empty Expected Runtime Code
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
c.post('/regression/test/1/edit', data=dict(
sample_id=1,
command="-demogorgans",
input_type="file",
output_type="file",
category_id=2,
submit=True,
))
self.assertEqual(RegressionTest.query.filter(RegressionTest.command == "-demogorgans").first(), None)
category = Category.query.filter(Category.id == 1).first()
for i in category.regression_tests:
if i.id == 1:
break
else:
self.fail("No tests in category")
category = Category.query.filter(Category.id == 2).first()
for i in category.regression_tests:
self.assertNotEqual(i.id, 1)
def test_edit_wrong_test(self):
"""
Check it will throw 404 if trying to edit a regression test which doesn't exist
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response_regression = c.post('/regression/test/42/edit', data=dict(
sample_id=1,
command="-demogorgans",
input_type="file",
output_type="file",
expected_rc=25,
category_id=2,
submit=True,
))
self.assertEqual(response_regression.status_code, 404)
def test_edit_test_same_category(self):
"""
Check it won't create problems edit a regression test and not changing its category
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
c.post('/regression/test/2/edit', data=dict(
sample_id=1,
command="-demogorgans",
input_type="file",
output_type="file",
category_id=1,
expected_rc=25,
submit=True,
))
self.assertNotEqual(RegressionTest.query.filter(RegressionTest.command == "-demogorgans").first(), None)
category = Category.query.filter(Category.id == 1).first()
for i in category.regression_tests:
if i.id == 2:
break
else:
self.fail("No tests in category")
def test_if_test_regression_view_throws_a_not_found_error(self):
"""
Check if the test doesn't exist and will throw an error 404
"""
response = self.app.test_client().get('regression/test/1337/view')
self.assertEqual(response.status_code, 404)
def test_if_test_toggle_view_throws_a_not_found_error(self):
"""
Check if the test toggle doesn't exist and will throw an error 404
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response = c.get('regression/test/1337/toggle')
self.assertEqual(response.status_code, 404)
def test_sample_view(self):
"""
Test if it'll return a valid sample
"""
response = self.app.test_client().get('/regression/sample/1')
sample = Sample.query.filter(Sample.id == 1).first()
self.assertEqual(response.status_code, 200)
self.assert_context('sample', sample)
def test_sample_view_nonexistent(self):
"""
Test if it'll return a valid sample
"""
response = self.app.test_client().get('/regression/sample/13423423')
self.assertEqual(response.status_code, 404)
def test_add_output(self):
"""
Check if, it will add an output
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
c.post('/regression/test/2/output/new',
data=dict(output_file=2, test_id="Test id 2 with output out2", submit=True))
self.assertNotEqual(
RegressionTestOutputFiles.query.filter(
and_(
RegressionTestOutputFiles.regression_test_output_id == 2,
RegressionTestOutputFiles.file_hashes == "out2"
)
).first(),
None
)
def test_add_output_wrong_regression_test(self):
"""
Check it will throw 404 for a regression_test which does't exist
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response = c.post(
'/regression/test/69420/output/new',
data=dict(output_file=2, test_id="Test id 2 with output out2", submit=True)
)
self.assertEqual(response.status_code, 404)
def test_add_output_without_login(self):
response = self.app.test_client().get('/regression/test/69420/output/new')
self.assertEqual(response.status_code, 302)
self.assertIn(b'/account/login?next=regression.output_add', response.data)
def test_remove_output(self):
"""
Check if, it will remove an output
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
rtof = RegressionTestOutputFiles.query.filter(
and_(
RegressionTestOutputFiles.regression_test_output_id == 2,
RegressionTestOutputFiles.file_hashes == "bluedabadee"
)
).first()
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response = c.post(
'/regression/test/2/output/remove',
data=dict(output_file=rtof.id, submit=True)
)
self.assertEqual(response.status_code, 302)
self.assertEqual(
RegressionTestOutputFiles.query.filter(
and_(
RegressionTestOutputFiles.id == rtof.id
)
).first(),
None
)
def test_remove_output_wrong_regression_test(self):
"""
Check it will throw 404 for a regression_test which doesn't exist
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response = c.post(
'/regression/test/69420/output/remove',
data=dict(output_file=2, submit=True)
)
self.assertEqual(response.status_code, 404)
def test_remove_output_without_login(self):
"""
Check it removes output without login
"""
response = self.app.test_client().get('/regression/test/69420/output/remove')
self.assertEqual(response.status_code, 302)
self.assertIn(b'/account/login?next=regression.output_remove', response.data)
def test_add_output_empty_got(self):
"""
Check if, it will add an output with empty got
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
c.post('/regression/test/2/output/new',
data=dict(output_file=1, submit=True))
self.assertEqual(
RegressionTestOutputFiles.query.filter(
and_(
RegressionTestOutputFiles.regression_test_output_id == 1,
)
).count(),
0
)
def test_add_output_empty_output_file(self):
"""
Check if, it will add an output with empty rto
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
c.post('/regression/test/2/output/new',
data=dict(test_id="Test id 2 with output demogorgans", submit=True))
self.assertEqual(
RegressionTestOutputFiles.query.filter(
and_(
RegressionTestOutputFiles.file_hashes == "demogorgans",
)
).count(),
0
)
def test_add_output_wrong_rto_id(self):
"""
Check if, it will add an output with wrong regression_test_output_id
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
c.post('/regression/test/2/output/new',
data=dict(output_file=69420, test_id="Test id 2 with output out2", submit=True))
self.assertEqual(
RegressionTestOutputFiles.query.filter(
and_(
RegressionTestOutputFiles.regression_test_output_id == 69420,
RegressionTestOutputFiles.file_hashes == "out2"
)
).first(),
None
)
def test_add_test_output_and_check_double_hashes(self):
"""
Check if the add output method checks for double hashes
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
add_rt_rto_trf = [
RegressionTest(1, "-autoprogram -out=ttxt -latin1 -2", InputType.file, OutputType.file, 3, 0),
RegressionTestOutput(3, "sample_out3", ".srt", ""),
RegressionTestOutput(3, "sample_out4", ".srt", ""),
TestResultFile(2, 3, 3, "sample_out3", "out3"),
TestResultFile(2, 3, 4, "sample_out4", "out3")
]
g.db.add_all(add_rt_rto_trf)
g.db.commit()
self.assertEqual(
TestResultFile.query.filter(
TestResultFile.got == "out3"
).count(),
2
)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
c.post('/regression/test/3/output/new',
data=dict(output_file=3, test_id="Test id 2 with output out3", submit=True))
self.assertEqual(
RegressionTestOutputFiles.query.filter(
RegressionTestOutputFiles.file_hashes == "out3"
).count(),
1
)
| isc | 886,495,612,370,523,300 | 42.557404 | 118 | 0.596226 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.