repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
thomasowenmclean/aced | aced/widgets/editorwidget.py | 1 | 3575 | from qtpy import QtWidgets, QtCore, QtGui
from ..backend import franktonjunction, themeing, themeing_constants
from ..connections import signals
import uuid
from ..globals import pointers
from ..gui import ui_editorwidget
class EditorViewWidget(ui_editorwidget.ui_EditorViewWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.textframe = TextFrame(parent)
self.text = self.textframe.text
self.previewframe = PreviewFrame(parent)
self.preview = self.previewframe.preview
self.splitter.addWidget(self.textframe)
self.splitter.addWidget(self.previewframe)
self.preview_is_shown = True
def toggle_preview(self):
if not self.preview_is_shown:
self.show_preview()
self.preview_is_shown = True
else:
self.hide_preview()
self.preview_is_shown = False
def show_preview(self):
self.splitter.setSizes([1, 1])
show_sheet = self.text.highlighter.splitter_shown_sheet
self.splitter.setStyleSheet(show_sheet)
def hide_preview(self):
self.splitter.setSizes([1, 0])
hide_sheet = self.text.highlighter.splitter_hidden_sheet
self.splitter.setStyleSheet(hide_sheet)
class TextFrame(ui_editorwidget.ui_EditorFrame):
def __init__(self, parent=None):
super().__init__(parent)
self.text = TextEditor(parent)
self.layout.addWidget(self.text)
class PreviewFrame(ui_editorwidget.ui_EditorFrame):
def __init__(self, parent=None):
super().__init__(parent)
self.preview = ui_editorwidget.ui_Preview(parent)
self.layout.addWidget(self.preview)
self.setMinimumSize(0, 0)
self.layout.setContentsMargins(30, 30, 30, 30)
class TextEditor(ui_editorwidget.ui_TextEditor):
def __init__(self, parent=None):
super().__init__(parent)
self.highlighter = themeing.MarkdownHighlighter(self)
self._id = None
self.completer = None
# For citations
self.bibpath = None
self.suppressbib = False
self.cslpath = None
self.reflist_title = None
# For filehandling
self.savepath = None
self.savedir = None
self.textChanged.connect(self.on_text_change)
self.cursorPositionChanged.connect(self.on_cursor_change)
def on_text_change(self):
franktonjunction.previewthread().start()
franktonjunction.statusthread().start()
self.maybe_new_block_state()
def on_cursor_change(self):
franktonjunction.statusthread().start()
self.maybe_new_block_state()
def get_block_state(self, cursor=None):
if not cursor:
cursor = self.textCursor()
try:
return cursor.block.userData().in_block
except AttributeError:
return
def maybe_new_block_state(self):
state = self.get_block_state()
signals.BlockFormatSignal.emit(state)
@property
def id(self):
if not self._id:
self._id = uuid.uuid1().hex
return self._id
def contextMenuEvent(self, event):
spelldict = self.highlighter.spelldict
if not spelldict:
return self.acceptcontext(event)
else:
return franktonjunction.check_this(self, event)
def acceptcontext(self, event):
menu = self.createStandardContextMenu()
menu.exec_(event.globalPos())
def keyPressEvent(self, event):
signals.EditorKeyPressSignal.emit(event)
| gpl-2.0 | -835,179,962,398,171,500 | 26.929688 | 68 | 0.637762 | false | 3.873239 | false | false | false |
xju2/xaodtools | scripts/analysis_template.py | 1 | 4725 | #!/usr/bin/evn python
head_file="""
/*
* description of the analysis
*
*/
#ifndef __MYXAODTOOLS_ANALYSISNAME_H__
#define __MYXAODTOOLS_ANALYSISNAME_H__
#include <vector>
#include <string>
#include "MyXAODTools/AnalysisBase.h"
#include "AsgTools/ToolHandle.h"
using namespace std;
class ANALYSISNAME : public AnalysisBase
{
public:
ANALYSISNAME();
virtual ~ANALYSISNAME();
int initialize();
void ClearBranch();
int process(Long64_t ientry); // main program
private:
/** private methods */
void AttachBranchToTree();
void CreateBranch();
private:
/* specific branches used in this analysis */
private:
/* specific Tools used in this analysis */
};
#endif
"""
src_file = """
#include <stdlib.h>
#include <TFile.h>
#include "MyXAODTools/ANALYSISNAME.h"
#include "MyXAODTools/Helper.h"
#include "CPAnalysisExamples/errorcheck.h"
#include "xAODBase/IParticleHelpers.h"
#include "xAODMuon/MuonContainer.h"
#include "xAODEgamma/ElectronContainer.h"
#include "xAODEgamma/PhotonContainer.h"
#include "xAODJet/JetContainer.h"
#include "xAODMissingET/MissingETContainer.h"
ANALYSISNAME::ANALYSISNAME():
AnalysisBase()
{
if(APP_NAME==NULL) APP_NAME = "ANALYSISNAME";
string maindir(getenv("ROOTCOREBIN"));
// don't forget to change your SUSY configuration!
m_susy_config = Form("%s/data/MyXAODTools/monojet.conf", maindir.c_str());
trigger_map_ = { // your triggers go here.
// {"HLT_xe70", false},
};
}
int ANALYSISNAME::initialize()
{
if( initializeBasicTools() != 0 ){
return 1;
}
CreateBranch();
AttachBranchToTree();
// initiate tools
return 0;
}
ANALYSISNAME::~ANALYSISNAME(){
}
void ANALYSISNAME::CreateBranch()
{
CreateBasicBranch();
return ;
}
void ANALYSISNAME::ClearBranch(){
ClearBasicBranch();
}
void ANALYSISNAME::AttachBranchToTree()
{
AttachBasicToTree();
// event_br->AttachBranchToTree(*physics);
// muon_br, el_br, jet_br, ph_br
// set your own branches
// physics->Branch("has_bad_muon", &m_hasBadMuon, "has_bad_muon/O");
}
int ANALYSISNAME::process(Long64_t ientry)
{
int sc = Start(ientry);
if(m_debug) {
Info(APP_NAME, " ANALYSISNAME: processing");
}
if(sc != 0) return sc;
// event_br->Fill(*ei);
// Start ANALYSISNAME
/*get physics objects*/
// Electrons
// xAOD::ElectronContainer* electrons_copy = NULL;
// xAOD::ShallowAuxContainer* electrons_copyaux = NULL;
// CHECK( m_objTool->GetElectrons(electrons_copy, electrons_copyaux, true) );
// Muons
// xAOD::MuonContainer* muons_copy = NULL;
// xAOD::ShallowAuxContainer* muons_copyaux = NULL;
// CHECK( m_objTool->GetMuons(muons_copy, muons_copyaux, true) );
// Jets
// xAOD::JetContainer* jets_copy = NULL;
// xAOD::ShallowAuxContainer* jets_copyaux = NULL;
// CHECK( m_objTool->GetJets(jets_copy,jets_copyaux, true) );
// Photons
// xAOD::PhotonContainer* ph_copy = nullptr;
// xAOD::ShallowAuxContainer* phAux_copy = nullptr;
// CHECK(m_objTool->GetPhotons(ph_copy, phAux_copy, true));
///////////////////////
// do overlap removal before object selection
// turn off the harmonization
///////////////////////
// bool doHarmonization = false;
// CHECK( m_objTool->OverlapRemoval(
// electrons_copy, muons_copy,
// jets_copy, ph_copy) );
//discard the event if any jets is labelled as 'bad'
// bool passJetCleaning = true;
// for(const auto& jet : *jets_copy){
// m_objTool->IsBJet(*jet) ;
// if ( jet->pt() > 20e3 )
// {
// if( dec_bad(*jet) && dec_passOR(*jet)){
// passJetCleaning = false;
// break;
// }
// }
// }
// if ( !passJetCleaning ) return 1;
// electron selections
// for(const auto& el : *electrons_copy){
// if( !(bool) dec_baseline(*el) || !(bool) dec_passOR(*el)){
// continue;
// }
// m_nBaseEl ++;
// }
// // muon selections
// for(const auto& mu : *muons_copy){
// if( !(bool) dec_baseline(*mu) || !(bool) dec_passOR(*mu) ){
// continue;
// }
// if( dec_bad(*mu) ) m_hasBadMuon = true;
// if( dec_cosmic(*mu) ) m_hasCosmicMuon = true;
// muon_br->Fill(*mu, ei, vertice);
// m_nBaseMu ++;
// }
// // photon selections
// for(const auto& ph : *ph_copy) {
// if( !(bool) dec_baseline(*ph) || !(bool) dec_passOR(*ph) ){
// continue;
// }
// m_nBasePhoton ++;
// }
// Fill your tree!!!!
physics->Fill();
return 0;
}
"""
| mit | 5,394,879,387,766,987,000 | 22.743719 | 81 | 0.589841 | false | 2.966102 | false | false | false |
mgedmin/mgp2pdf | samples/doctests2/source/makeslide.py | 1 | 6195 | #!/usr/bin/env python
r"""
Produce a MagicPoint file from a text file with simple markup.
Markup:
directives (lines starting with .)
MagicPoint text (everything else)
You can use MagicPoint markup (lines starting with %), but that is discouraged.
You should know that \ is MagicPoint's escape character. If you want to
include a literal \ in the text, write \\. You should also know that lines
starting with a # are comments. If you want to include a line starting with
a # in the output, write \#
Directives:
.title
.author
.email
.conference
title page elements
.titlepage
produce a title page
.page
start a new page (one--two lines of text, centered vertically)
.midpage
start a new page (three lines of text, centered vertically)
.bigpage
start a new big page (four lines of text, centered vertically)
.pageofcode
start a new page for code examples (12 lines), enables Python syntax
.pageoftext
start a new page for text examples (12 lines)
.italic
switch font to italic
.monospace
switch font to monospace
.normal
switch font to normal
.python
enable Python syntax highlight
.defaultsyntax
use default syntax highlight (doctests)
.nosyntax
disable syntax highlight
Empty lines following a directive are skipped.
makeslide.py was written by Marius Gedminas <[email protected]>
"""
import re
import sys
import string
import keyword
import fileinput
templates = dict(
titlepage = string.Template('''\
#!/usr/bin/env mgp
# Note: tabs and trailing spaces are *important* in this file
# - Preamble ----------------------------------------------------------------
%deffont "standard" xfont "verdana"
%deffont "thick" xfont "verdana-bold"
%deffont "em" xfont "verdana-medium-i"
%deffont "mono" xfont "andale mono"
%default 1 area 90 90, vgap 260, size 8, font "standard", fore "#134d73", back "white", right, newimage -zoom 50 "povlogo.png"
%default 2 center, size 5
%default 3 size 8, vgap 80
%default 4 font "em", size 7, vgap 10
%default 5 font "standard", size 3
# ---------------------------------------------------------------------------
%page
%pcache 1 1 0 1
%ccolor "#134d73"
%nodefault
%size 7, font "standard", vgap 20, fore "black", back "white"
%center, font "thick", size 11
$title
%center, font "standard", size 7
%size 5, font "standard", fore "#134d73"
$author
%size 4
$email
%size 2
%size 5
Programmers of Vilnius
%size 4
http://pov.lt/
%size 2
%newimage "povlogo.png"
%fore "black"
$conference
'''),
pageofcode = string.Template('''\
%page
%nodefault
%area 90 90, vgap 60, size 8, font "standard", fore "#134d73", back "white", right, newimage -zoom 50 "povlogo.png"
%left, size 6, vgap 10
'''),
pageoftext = string.Template('''\
%page
%nodefault
%area 90 90, vgap 60, size 8, font "standard", fore "#134d73", back "white", right, newimage -zoom 50 "povlogo.png"
%left, size 6, vgap 10
'''),
page = string.Template('''\
%page
'''),
bigpage = string.Template('''\
%page
%nodefault
%area 90 90, vgap 60, size 8, font "standard", fore "#134d73", back "white", right, newimage -zoom 50 "povlogo.png"
%center, size 8, vgap 80
'''),
midpage = string.Template('''\
%page
%nodefault
%area 90 90, vgap 60, size 8, font "standard", fore "#134d73", back "white", right, newimage -zoom 50 "povlogo.png"
%center, size 8, vgap 80
'''),
italic = string.Template('%font "em"\n'),
monospace = string.Template('%font "mono"\n'),
normal = string.Template('%font "standard"\n'),
)
python_syntax_patterns = {
r'\b(?P<kw>%s)\b' % '|'.join(keyword.kwlist): string.Template('''
%cont, font "thick"
$kw
%cont, font "standard"
'''),
"(?P<s>'.*?')": string.Template('''
%cont, fore "#13734d"
$s
%cont, fore "#134d73"
''')
}
class PythonSyntaxHighligh(string.Template):
def substitute(self, **kw):
kw['line'] = apply_syntax_patterns(python_syntax_patterns, kw['line'])
return super(PythonSyntaxHighligh, self).substitute(**kw)
line_patterns = {
r'^(?P<indent>\s*)(?P<prefix>\.\.\.|>>>)(?P<line>.*)$': PythonSyntaxHighligh('''\
$indent
%cont, font "mono", fore "#00aaaa"
$prefix
%cont, font "standard", fore "#134d73"
$line
%font "standard"'''),
}
syntax_modes = {
'nosyntax': {},
'defaultsyntax': line_patterns,
'python': python_syntax_patterns,
'pageofcode': python_syntax_patterns,
}
default_syntax = line_patterns
def apply_syntax_patterns(syntax_patterns, line):
idx = 0
mega_re = []
for idx, (pat, tmpl) in enumerate(syntax_patterns.items()):
mega_re.append('(?P<r%d>%s)' % (idx, pat))
mega_re = '|'.join(mega_re)
def replacement(match):
for idx, (pat, tmpl) in enumerate(syntax_patterns.items()):
if match.group('r%d' % idx):
return tmpl.substitute(**match.groupdict())
assert False, 'empty match?'
if mega_re:
line = re.sub(mega_re, replacement, line)
return line
def preprocess(inputfile, outputfile):
args = {'title': '', 'author': '', 'email': '', 'conference': ''}
syntax_patterns = default_syntax
skipping_empty_lines = True
for line in inputfile:
if not line.strip() and skipping_empty_lines:
continue
line = line.rstrip('\n')
if line.startswith('.') and not line.startswith('...'):
keyword = line.split()[0][1:]
if keyword in args:
args[keyword] = line[len(keyword)+1:].strip()
elif keyword in templates:
print >> outputfile, templates[keyword].substitute(**args),
syntax_patterns = syntax_modes.get(keyword, default_syntax)
elif keyword in syntax_modes:
syntax_patterns = syntax_modes[keyword]
else:
print >> sys.stderr, ".%s ignored" % keyword
skipping_empty_lines = True
else:
skipping_empty_lines = False
line = apply_syntax_patterns(syntax_patterns, line)
print >> outputfile, line
if __name__ == '__main__':
preprocess(fileinput.input(), sys.stdout)
| gpl-2.0 | 5,074,787,198,690,758,000 | 26.290749 | 126 | 0.619209 | false | 3.468645 | false | false | false |
systempuntoout/buckethandle | app/controllers/ajax.py | 1 | 1741 | from google.appengine.api import memcache
import logging, web
import app.db.models as models
from google.appengine.ext import ereporter
import app.utility.utils as utils
from app.utility.utils import cachepage
ereporter.register_logger()
render = web.render
class Tags:
"""
Return tags for auto completion
"""
@cachepage()
def GET(self):
web.header('Content-type', 'text/plain')
try:
tag_filter = web.input()['q']
return models.Tag.get_tags_by_filter(tag_filter)
except Exception, exception:
return ""
class Markdown:
"""
Return markdown data for Markitup preview
"""
def POST(self):
data = web.input(data = None)['data']
return render.admin.markdown_preview(data)
class Links:
"""
Check if a given link is already stored
"""
def GET(self):
web.header('Content-type', 'application/json')
link = web.input(check = None)['check']
if link and utils.link_is_valid(link):
link_is_stored = models.Post.get_post_by_link(link.strip())
if not link_is_stored:
if link.strip().endswith('/'):
link_is_stored = models.Post.get_post_by_link(link.strip()[:-1]) #check without slash
else:
link_is_stored = models.Post.get_post_by_link("%s%s" % (link.strip(), '/')) #check with slash
if link_is_stored:
return '{"result":"[ The link is known ]","clazz":"message_KO"}'
else:
return '{"result":"[ This link looks new ]","clazz":"message_OK"}'
else:
return '{"result":"","clazz":""}'
| bsd-3-clause | 8,339,046,259,761,821,000 | 31.849057 | 113 | 0.561172 | false | 3.956818 | false | false | false |
Sprytile/Sprytile | rx/concurrency/newthreadscheduler.py | 2 | 2094 | import time
import logging
import threading
from rx.core import Scheduler, Disposable
from .schedulerbase import SchedulerBase
from .eventloopscheduler import EventLoopScheduler
log = logging.getLogger('Rx')
class NewThreadScheduler(SchedulerBase):
"""Creates an object that schedules each unit of work on a separate thread.
"""
def __init__(self, thread_factory=None):
super(NewThreadScheduler, self).__init__()
def default_factory(target, args=None):
t = threading.Thread(target=target, args=args or [])
t.setDaemon(True)
return t
self.thread_factory = thread_factory or default_factory
def schedule(self, action, state=None):
"""Schedules an action to be executed."""
scheduler = EventLoopScheduler(thread_factory=self.thread_factory, exit_if_empty=True)
return scheduler.schedule(action, state)
def schedule_relative(self, duetime, action, state=None):
"""Schedules an action to be executed after duetime."""
scheduler = EventLoopScheduler(thread_factory=self.thread_factory, exit_if_empty=True)
return scheduler.schedule_relative(duetime, action, state)
def schedule_absolute(self, duetime, action, state=None):
"""Schedules an action to be executed at duetime."""
return self.schedule_relative(duetime - self.now, action, state=None)
def schedule_periodic(self, period, action, state=None):
"""Schedule a periodic piece of work."""
secs = self.to_relative(period) / 1000.0
disposed = []
s = [state]
def run():
while True:
time.sleep(secs)
if disposed:
return
new_state = action(s[0])
if new_state is not None:
s[0] = new_state
thread = self.thread_factory(run)
thread.start()
def dispose():
disposed.append(True)
return Disposable.create(dispose)
Scheduler.new_thread = new_thread_scheduler = NewThreadScheduler()
| mit | 2,661,126,048,347,628,500 | 29.347826 | 94 | 0.634193 | false | 4.256098 | false | false | false |
shakedel/tensorboard | tensorboard/loader.py | 1 | 23028 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorBoard data ingestion module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import locale
import logging
import re
import sys
import time
import threading
import types # pylint: disable=unused-import
import six
import tensorflow as tf
from tensorboard import util
class Record(collections.namedtuple('Record', ('record', 'offset'))):
"""Value class for a record returned by RecordReader.
Fields:
record: The byte string record that was read.
offset: The byte offset in the file *after* this record was read.
:type record: str
:type offset: int
"""
__slots__ = () # Enforces use of only tuple fields.
@util.closeable
@six.python_2_unicode_compatible
class RecordReader(object):
"""Pythonic veneer around PyRecordReader."""
def __init__(self, path, start_offset=0):
"""Creates new instance.
Args:
path: Path of file. This can be on a remote file system if the
TensorFlow build supports it.
start_offset: Byte offset to seek in file once it's opened.
:type path: str
:type start_offset: int
"""
self.path = tf.compat.as_text(path)
self._offset = start_offset
self._size = -1
self._reader = None # type: tf.pywrap_tensorflow.PyRecordReader
self._is_closed = False
self._lock = threading.Lock()
def get_size(self):
"""Returns byte length of file.
This is guaranteed to return a number greater than or equal to the
offset of the last record returned by get_next_record().
This method can be called after the instance has been closed.
Raises:
IOError: If file has shrunk from last read offset, or start
offset, or last read size.
:rtype: int
"""
size = tf.gfile.Stat(self.path).length
minimum = max(self._offset, self._size)
if size < minimum:
raise IOError('File shrunk: %d < %d: %s' % (size, minimum, self.path))
self._size = size
return size
def get_next_record(self):
"""Reads record from file.
Returns:
A Record or None if no more were available.
Raises:
IOError: On open or read error, or if close was called.
tf.errors.DataLossError: If corruption was encountered in the
records file.
:rtype: Record
"""
if self._is_closed:
raise IOError('%s is closed' % self)
if self._reader is None:
self._reader = self._open()
try:
with tf.errors.raise_exception_on_not_ok_status() as status:
self._reader.GetNext(status)
except tf.errors.OutOfRangeError:
# We ignore partial read exceptions, because a record may be truncated.
# PyRecordReader holds the offset prior to the failed read, so retrying
# will succeed.
return None
self._offset = self._reader.offset()
return Record(self._reader.record(), self._offset)
def close(self):
"""Closes record reader if open.
Further reads are not permitted after this method is called.
"""
if self._is_closed:
return
if self._reader is not None:
self._reader.Close()
self._is_closed = True
self._reader = None
def _open(self):
with tf.errors.raise_exception_on_not_ok_status() as status:
return tf.pywrap_tensorflow.PyRecordReader_New(
tf.resource_loader.readahead_file_path(tf.compat.as_bytes(self.path)),
self._offset, tf.compat.as_bytes(''), status)
def __str__(self):
return u'RecordReader{%s}' % self.path
@util.closeable
@six.python_2_unicode_compatible
class BufferedRecordReader(object):
"""Wrapper around RecordReader that does threaded read-ahead.
This class implements the same interface as RecordReader. It prevents
remote file systems from devastating loader performance. It does not
degrade throughput on local file systems.
The thread is spawned when the first read operation happens. The
thread will diligently try to buffer records in the background. Its
goal is to sleep as much as possible without blocking read operations.
This class is thread safe. It can be used from multiple threads
without any need for external synchronization.
"""
READ_AHEAD_AGGRESSION = 2.3 # Does full replenish when ~40% full.
READ_AHEAD_BYTES = 16 * 1024 * 1024
STAT_INTERVAL_SECONDS = 4.0
def __init__(self, path,
start_offset=0,
read_ahead=READ_AHEAD_BYTES,
stat_interval=STAT_INTERVAL_SECONDS,
clock=time.time,
record_reader_factory=RecordReader):
"""Creates new instance.
The i/o thread is not started until the first read happens.
Args:
path: Path of file. This can be on a remote file system if the
TensorFlow build supports it.
start_offset: Byte offset to seek in file once it's opened.
read_ahead: The number of record bytes to buffer into memory
before the thread starts blocking. This value must be >0 and
the default is BufferedRecordReader.READ_AHEAD_BYTES.
stat_interval: A float with the minimum number of seconds between
stat calls, to determine the file size. If this is 0.0 then
the thread will stat after every re-buffer, but never be
woken up in order to stat.
clock: Function returning a float with the number of seconds
since the UNIX epoch in zulu time.
record_reader_factory: The RecordReader constructor, which can be
changed for testing.
:type path: str
:type start_offset: int
:type read_ahead: int
:type clock: () -> float
:type record_reader_factory: (str, int) -> RecordReader
"""
self.path = tf.compat.as_text(path)
self._read_ahead = read_ahead
self._stat_interval = stat_interval
self._clock = clock
self._is_closed = False
self._has_reached_end = False
self._offset = 0
self._size = -1
self._last_stat = 0.0
self._buffered = 0
self._reader = record_reader_factory(self.path, start_offset)
self._records = collections.deque() # type: collections.deque[Record]
self._read_exception = \
None # type: tuple[BaseException, BaseException, types.TracebackType]
self._close_exception = \
None # type: tuple[BaseException, BaseException, types.TracebackType]
self._lock = threading.Lock()
self._wake_up_producer = threading.Condition(self._lock)
self._wake_up_consumers = threading.Condition(self._lock)
self._thread = threading.Thread(target=self._run,
name=_shorten_event_log_path(self.path))
def get_size(self):
"""Returns byte length of file.
This is guaranteed to return a number greater than or equal to the
offset of the last record returned by get_next_record().
In the average case, this method will not block. However, if the
i/o thread has not yet computed this value, then this method will
block on a stat call.
This method can be called after the instance has been closed.
Returns:
The byte length of file, which might increase over time, but is
guaranteed to never decrease. It's also guaranteed that it will
be greater than or equal to the offset field of any Record.
:rtype: int
"""
with self._lock:
if self._should_stat():
self._stat()
return self._size
def get_next_record(self):
"""Reads one record.
When this method is first called, it will spawn the thread and
block until a record is read. Once the thread starts, it will queue
up records which can be read without blocking. The exception is
when we reach the end of the file, in which case each repeated call
will be synchronous. There is no background polling. If new data is
appended to the file, new records won't be buffered until this
method is invoked again. The caller should take care to meter calls
to this method once it reaches the end of file, lest they impact
performance.
Returns:
A Record object, or None if there are no more records available
at the moment.
Raises:
IOError: If this instance has been closed.
tf.errors.DataLossError: If corruption was encountered in the
records file.
Exception: To propagate any exceptions that may have been thrown
by the read operation in the other thread. If an exception is
thrown, then all subsequent calls to this method will rethrow
that same exception.
:rtype: Record
"""
with self._lock:
if self._is_closed:
raise IOError('%s is closed' % self)
if not self._thread.is_alive():
self._thread.start()
else:
record = self._get_record()
if record is not None:
if self._should_wakeup():
self._wake_up_producer.notify()
return record
self._has_reached_end = False
self._wake_up_producer.notify()
while not (self._read_exception or
self._has_reached_end or
self._records):
self._wake_up_consumers.wait()
return self._get_record()
def close(self):
"""Closes event log reader if open.
If the i/o thread is running, this method blocks until it has been
shut down.
Further reads are not permitted after this method is called.
Raises:
Exception: To propagate any exceptions that may have been thrown
by the close operation in the other thread. If an exception
is thrown, then all subsequent calls to this method will
rethrow that same exception.
"""
with self._lock:
if not self._is_closed:
self._is_closed = True
if not self._thread.is_alive():
self._reader = None
return
self._wake_up_producer.notify()
while self._reader is not None:
self._wake_up_consumers.wait()
if self._close_exception is not None:
six.reraise(*self._close_exception)
def _get_record(self):
if self._read_exception is not None:
six.reraise(*self._read_exception)
if not self._records:
return None
record = self._records.popleft()
self._buffered -= len(record.record)
return record
@util.guarded_by('_lock')
def _should_wakeup(self):
return (self._is_closed or
self._read_exception is None and
(self._should_rebuffer() or
(self._stat_interval and self._should_stat())))
@util.guarded_by('_lock')
def _should_rebuffer(self):
return (not self._has_reached_end and
(float(self._buffered) <
self._read_ahead / BufferedRecordReader.READ_AHEAD_AGGRESSION))
@util.guarded_by('_lock')
def _should_stat(self):
return (self._read_exception is None and
(self._offset > self._size or
self._last_stat <= self._clock() - self._stat_interval))
@util.guarded_by('_lock')
def _stat(self):
try:
now = self._clock()
self._size = self._reader.get_size()
self._last_stat = now
except Exception as e: # pylint: disable=broad-except
tf.logging.debug('Stat failed: %s', e)
self._read_exception = sys.exc_info()
def _run(self):
while True:
with self._lock:
while not self._should_wakeup():
self._wake_up_producer.wait()
if self._is_closed:
try:
self._reader.close()
tf.logging.debug('Closed')
except Exception as e: # pylint: disable=broad-except
self._close_exception = sys.exc_info()
tf.logging.debug('Close failed: %s', e)
self._reader = None
self._wake_up_consumers.notify_all()
return
if self._buffered >= self._read_ahead:
tf.logging.debug('Waking up to stat')
self._stat()
continue
# Calculate a good amount of data to read outside the lock.
# The less we have buffered, the less re-buffering we'll do.
# We want to minimize wait time in the other thread. See the
# following contour plot: https://goo.gl/HTBcCU
x = float(self._buffered)
y = BufferedRecordReader.READ_AHEAD_AGGRESSION
c = float(self._read_ahead)
want = int(min(c - x, y/c * x**y + 1))
# Perform re-buffering outside lock.
self._rebuffer(want)
def _rebuffer(self, want):
tf.logging.debug('Waking up to read %s bytes', _localize_int(want))
records = []
read_exception = self._read_exception
if read_exception is None:
try:
while want > 0:
record = self._reader.get_next_record()
if record is None:
break
self._offset = record.offset
records.append(record)
want -= len(record.record)
except Exception as e: # pylint: disable=broad-except
tf.logging.debug('Read failed: %s', e)
read_exception = sys.exc_info()
with self._lock:
self._read_exception = read_exception
if self._should_stat():
self._stat()
if not self._read_exception:
if not records:
self._has_reached_end = True
else:
for record in records:
self._records.append(record)
self._buffered += len(record.record)
self._wake_up_consumers.notify_all()
def __str__(self):
return u'BufferedRecordReader{%s}' % self.path
class RateCounter(object):
"""Utility class for tracking how much a number increases each second.
The rate is calculated by averaging of samples within a time window,
which weights recent samples more strongly.
"""
def __init__(self, window, clock=time.time):
"""Creates new instance.
Args:
window: The maximum number of seconds across which rate is
averaged. In practice, the rate might be averaged over a time
period greater than window if set_value is being called less
frequently than window.
clock: Function returning a float with the number of seconds
since the UNIX epoch in zulu time.
:type window: float
:type clock: () -> float
"""
self._window = window
self._clock = clock
self._points = collections.deque()
self._last_value = None # type: float
self._last_time = None # type: float
def get_rate(self):
"""Determines rate of increase in value per second averaged over window.
Returns:
An integer representing the rate or None if not enough
information has been collected yet.
:rtype: int
"""
points = []
total_elapsed = 0.0
total_weight = 0.0
for rate, elapsed, _ in self._points:
weight = 1.0 / (total_elapsed + 1) * elapsed
total_elapsed += elapsed
total_weight += weight
points.append((rate, weight))
if not total_weight:
return 0
return int(sum(w / total_weight * r for r, w in points))
def set_value(self, value):
"""Sets number state.
This method adds a delta between value and the value of the last
time this method was called. Therefore the first invocation does
not add a delta.
Raises:
ValueError: If value is less than the last value.
:type value: float
"""
value = float(value)
now = self._clock()
if self._last_value is None:
self._last_value = value
self._last_time = now
return
if value < self._last_value:
raise ValueError('%f < %f' % (value, self._last_value))
delta = value - self._last_value
elapsed = now - self._last_time
if not elapsed:
return
self._points.appendleft((delta / elapsed, elapsed, now))
self._last_time = now
self._last_value = value
self._remove_old_points()
def bump(self):
"""Makes time since last set_value count for nothing."""
self._last_time = self._clock()
def _remove_old_points(self):
threshold = self._clock() - self._window
while self._points:
r, e, t = self._points.pop()
if t > threshold:
self._points.append((r, e, t))
break
@util.closeable
class Progress(object):
"""Terminal UI for displaying job progress in terms of bytes.
On teletypes, this class will display a nice ephemeral unicode
progress bar. Otherwise it just emits periodic log messages.
This class keeps track of the rate at which input is processed, as
well as the rate it grows. These values are represented to the user
using the DELTA and NABLA symbols.
An alarm is displayed if the consumption rate falls behind the
production rate. In order for this to be calculated properly, the
sleep method of this class should be used rather than time.sleep.
"""
BAR_INTERVAL_SECONDS = 0.25
BAR_LOGGER = logging.getLogger('tensorflow' + util.LogHandler.EPHEMERAL)
BAR_WIDTH = 45
BLOCK_DARK = u'\u2593'
BLOCK_LIGHT = u'\u2591'
DELTA = u'\u2206'
LOG_INTERVAL_SECONDS = 5.0
NABLA = u'\u2207'
RATE_WINDOW = 20.0
def __init__(self, clock=time.time,
sleep=time.sleep,
log_callback=tf.logging.info,
bar_callback=BAR_LOGGER.info,
rate_counter_factory=RateCounter):
"""Creates new instance.
Args:
clock: Function returning a float with the number of seconds
since the UNIX epoch in zulu time.
sleep: Injected time.sleep function.
log_callback: Callback for emitting normal log records.
bar_callback: Callback for emitting ephemeral bar records.
rate_counter_factory: Constructor to RateCounter, which can be
swapped out for testing.
:type clock: () -> float
:type sleep: (float) -> None
:type rate_counter_factory: (float) -> RateCounter
"""
self._clock = clock
self._sleep = sleep
self._log_callback = log_callback
self._bar_callback = bar_callback
self._initialized = False
self._offset = 0
self._size = 0
self._last_log_time = 0.0
self._last_bar_time = 0.0
self._last_log_offset = -1
self._last_bar_offset = -1
self._rate_offset = rate_counter_factory(Progress.RATE_WINDOW)
self._rate_size = rate_counter_factory(Progress.RATE_WINDOW)
def set_progress(self, offset, size):
"""Updates the progress bar state.
This method will cause progress information to be occasionally
written out.
Args:
offset: The number of bytes processed so far.
size: The total number of bytes. This is allowed to increase or
decrease, but it must remain at least offset.
Raises:
ValueError: If offset is greater than size, or offset or size
decreased from the last invocation.
:type offset: int
:type size: int
"""
if offset > size:
raise ValueError('offset (%d) can not exceed size (%d)' % (offset, size))
self._rate_offset.set_value(offset)
self._rate_size.set_value(size)
self._offset = offset
self._size = size
now = self._clock()
if not self._initialized:
self._last_log_time = now
self._last_bar_time = now
self._initialized = True
return
elapsed = now - self._last_log_time
if elapsed >= Progress.LOG_INTERVAL_SECONDS:
self._last_log_time = now
self._show_log()
elapsed = now - self._last_bar_time
if elapsed >= Progress.BAR_INTERVAL_SECONDS:
self._last_bar_time = now
self._show_bar()
def close(self):
"""Forces progress to be written to log.
This method exists because we don't want the progress bar to say
something like 98% once the file is done loading.
"""
self._show_log(can_stall=False)
self._show_bar(can_stall=False)
# Instructs util.LogHandler to clear the ephemeral logging state.
self._bar_callback('')
def sleep(self, seconds):
"""Sleeps for a given number of seconds.
Time spent sleeping in this method does not have a detrimental
impact on the consumption rate.
:type seconds: float
"""
self._sleep(seconds)
self._rate_offset.bump()
def _show_log(self, can_stall=True):
is_stalled = can_stall and self._offset == self._last_log_offset
self._last_log_offset = self._offset
self._log_callback('Loaded %s', self._get_message(is_stalled))
def _show_bar(self, can_stall=True):
is_stalled = can_stall and self._offset == self._last_bar_offset
self._last_bar_offset = self._offset
sofar = int(self._get_fraction() * Progress.BAR_WIDTH)
bar = (Progress.BLOCK_DARK * sofar +
Progress.BLOCK_LIGHT * (Progress.BAR_WIDTH - sofar))
self._bar_callback(u'%s %s ', bar, self._get_message(is_stalled))
def _get_message(self, is_stalled):
rate_offset = self._rate_offset.get_rate() # summary processing speed
rate_size = self._rate_size.get_rate() # summary production speed
message = u'%d%% of %s%s%s' % (
int(self._get_fraction() * 100.0),
_localize_int(self._size),
self._get_rate_suffix(Progress.DELTA, rate_offset),
self._get_rate_suffix(Progress.NABLA, rate_size))
if rate_offset and rate_size and rate_offset < rate_size:
# If TensorFlow is writing summaries to disk faster than we can
# insert them into the database, that's kind of problematic.
message += u' ' + self._make_red(u'[meltdown]')
elif is_stalled:
message += u' %s[stalled]%s' % (util.Ansi.BOLD, util.Ansi.RESET)
return message
def _get_fraction(self):
if not self._size:
return 0.0
else:
return float(self._offset) / self._size
def _get_rate_suffix(self, symbol, rate):
if not rate:
return u''
return u' %s %sB/s' % (symbol, _localize_int(rate))
def _make_red(self, text):
return (util.Ansi.BOLD +
util.Ansi.RED +
(util.Ansi.FLIP if self._offset % 2 == 0 else u'') +
text +
util.Ansi.RESET)
_SHORTEN_EVENT_LOG_PATH_PATTERN = re.compile(r'(?:[^/\\]+[/\\])?(?:[^/\\]+)$')
def _shorten_event_log_path(path):
"""Makes an event log path more human readable.
Returns:
Path containing only basename and the first parent directory name,
if there is one.
:type path: str
:rtype: str
"""
m = _SHORTEN_EVENT_LOG_PATH_PATTERN.search(path)
return m.group(0) if m else None
def _localize_int(n):
"""Adds locale specific thousands group separators.
:type n: int
:rtype: str
"""
return locale.format('%d', n, grouping=True)
| apache-2.0 | -8,526,100,744,995,340,000 | 31.991404 | 80 | 0.64617 | false | 3.905699 | false | false | false |
raspibo/ibt2 | tests/ibt2_tests.py | 1 | 11802 | #!/usr/bin/env python3
"""I'll Be There, 2 (ibt2) - tests
Copyright 2016-2017 Davide Alberani <[email protected]>
RaspiBO <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import requests
import monco
BASE_URL = 'http://localhost:3000/v1.1/'
DB_NAME = 'ibt2_test'
def dictInDict(d, dContainer):
for k, v in d.items():
if k not in dContainer:
return False
if v != dContainer[k]:
return False
return True
class Ibt2Tests(unittest.TestCase):
def setUp(self):
self.monco_conn = monco.Monco(dbName=DB_NAME)
self.connection = self.monco_conn.connection
self.db = self.monco_conn.db
self.db['attendees'].drop()
self.db['days'].drop()
self.db['groups'].drop()
self.db['settings'].drop()
self.db['users'].delete_one({'username': 'newuser'})
self.db['users'].delete_one({'username': 'newuser2'})
def tearDown(self):
self.db['attendees'].drop()
self.db['days'].drop()
self.db['groups'].drop()
self.db['settings'].drop()
self.db['users'].delete_one({'username': 'newuser'})
self.db['users'].delete_one({'username': 'newuser2'})
def add_attendee(self, attendee):
r = requests.post('%sattendees' % BASE_URL, json=attendee)
r.raise_for_status()
return r
def test_add_attendee(self):
attendee = {'name': 'A Name', 'day': '2017-01-15', 'group': 'A group'}
r = self.add_attendee(attendee)
rj = r.json()
id_ = rj.get('_id')
self.assertTrue(dictInDict(attendee, rj))
r = requests.get(BASE_URL + 'attendees/' + id_)
r.raise_for_status()
rj = r.json()
self.assertTrue(dictInDict(attendee, rj))
def test_put_attendee(self):
attendee = {'name': 'A Name', 'day': '2017-01-15', 'group': 'A group'}
r = self.add_attendee(attendee)
update = {'notes': 'A note'}
r = requests.post(BASE_URL + 'attendees', json=attendee)
r.raise_for_status()
id_ = r.json().get('_id')
r = requests.put(BASE_URL + 'attendees/' + id_, json=update)
r.raise_for_status()
r = requests.get('%s%s/%s' % (BASE_URL, 'attendees', id_))
r.raise_for_status()
rj = r.json()
final = attendee.copy()
final.update(update)
self.assertTrue(dictInDict(final, rj))
def test_delete_attendee(self):
attendee = {'name': 'A Name', 'day': '2017-01-15', 'group': 'A group'}
r = self.add_attendee(attendee)
id_ = r.json().get('_id')
r.connection.close()
r = requests.delete(BASE_URL + 'attendees/' + id_)
r.raise_for_status()
self.assertTrue(r.json().get('success'))
r.connection.close()
def test_get_days(self):
self.add_attendee({'day': '2017-01-15', 'name': 'A name', 'group': 'group A'})
self.add_attendee({'day': '2017-01-16', 'name': 'A new name', 'group': 'group C'})
self.add_attendee({'day': '2017-01-15', 'name': 'Another name', 'group': 'group A'})
self.add_attendee({'day': '2017-01-15', 'name': 'Yet another name', 'group': 'group B'})
r = requests.get(BASE_URL + 'days')
r.raise_for_status()
rj = r.json()
self.assertEqual([x.get('day') for x in rj['days']], ['2017-01-15', '2017-01-16'])
self.assertEqual([x.get('group') for x in rj['days'][0]['groups']], ['group A', 'group B'])
self.assertTrue(len(rj['days'][0]['groups'][0]['attendees']) == 2)
self.assertTrue(len(rj['days'][0]['groups'][1]['attendees']) == 1)
self.assertEqual([x.get('group') for x in rj['days'][1]['groups']], ['group C'])
self.assertTrue(len(rj['days'][1]['groups'][0]['attendees']) == 1)
def test_get_days_summary(self):
self.add_attendee({'day': '2017-01-15', 'name': 'A name', 'group': 'group A'})
self.add_attendee({'day': '2017-01-16', 'name': 'A new name', 'group': 'group C'})
self.add_attendee({'day': '2017-01-15', 'name': 'Another name', 'group': 'group A'})
self.add_attendee({'day': '2017-01-15', 'name': 'Yet another name', 'group': 'group B'})
r = requests.get(BASE_URL + 'days?summary=1')
r.raise_for_status()
rj = r.json()
self.assertEqual(rj,
{"days": [{"groups_count": 2, "day": "2017-01-15"}, {"groups_count": 1, "day": "2017-01-16"}]})
def test_create_user(self):
r = requests.post(BASE_URL + 'users', json={'username': 'newuser', 'password': 'ibt2'})
r.raise_for_status()
r.connection.close()
s = self.login('newuser', 'ibt2')
r = s.get(BASE_URL + 'users/current')
r.raise_for_status()
r.connection.close()
def test_update_user(self):
r = requests.post(BASE_URL + 'users', json={'username': 'newuser', 'password': 'ibt2'})
r.raise_for_status()
id_ = r.json()['_id']
r = requests.post(BASE_URL + 'users', json={'username': 'newuser2', 'password': 'ibt2'})
r.raise_for_status()
id2_ = r.json()['_id']
r = requests.put(BASE_URL + 'users/' + id_, json={'email': '[email protected]'})
self.assertRaises(requests.exceptions.HTTPError, r.raise_for_status)
s = self.login('newuser', 'ibt2')
r = s.put(BASE_URL + 'users/' + id_, json={'email': '[email protected]'})
r.raise_for_status()
self.assertEqual(r.json().get('email'), '[email protected]')
r.connection.close()
r = s.put(BASE_URL + 'users/' + id2_, json={'email': '[email protected]'})
self.assertRaises(requests.exceptions.HTTPError, r.raise_for_status)
r.connection.close()
s = self.login('admin', 'ibt2')
r = s.put(BASE_URL + 'users/' + id_, json={'email': '[email protected]'})
r.raise_for_status()
self.assertEqual(r.json().get('email'), '[email protected]')
r.connection.close()
def test_delete_user(self):
r = requests.post(BASE_URL + 'users', json={'username': 'newuser', 'password': 'ibt2'})
r.raise_for_status()
id_ = r.json()['_id']
r = requests.post(BASE_URL + 'users', json={'username': 'newuser2', 'password': 'ibt2'})
r.raise_for_status()
id2_ = r.json()['_id']
r = requests.delete(BASE_URL + 'users/' + id_)
self.assertRaises(requests.exceptions.HTTPError, r.raise_for_status)
r.connection.close()
s = self.login('newuser', 'ibt2')
r = s.delete(BASE_URL + 'users/' + id_)
self.assertRaises(requests.exceptions.HTTPError, r.raise_for_status)
r.connection.close()
r = s.delete(BASE_URL + 'users/' + id2_)
self.assertRaises(requests.exceptions.HTTPError, r.raise_for_status)
r.connection.close()
s = self.login('admin', 'ibt2')
r = s.delete(BASE_URL + 'users/' + id2_)
r.raise_for_status()
r.connection.close()
def test_duplicate_user(self):
r = requests.post(BASE_URL + 'users', json={'username': 'newuser', 'password': 'ibt2'})
r.raise_for_status()
r = requests.post(BASE_URL + 'users', json={'username': 'newuser', 'password': 'ibt3'})
self.assertRaises(requests.exceptions.HTTPError, r.raise_for_status)
def login(self, username, password):
s = requests.Session()
r = s.post(BASE_URL + 'login', json={'username': username, 'password': password})
r.raise_for_status()
r.connection.close()
return s
def test_created_by(self):
s = self.login('admin', 'ibt2')
r = s.get(BASE_URL + 'users/current')
r.raise_for_status()
user_id = r.json()['_id']
r.connection.close()
attendee = {'day': '2017-01-15', 'name': 'A name', 'group': 'group A'}
r = s.post('%sattendees' % BASE_URL, json=attendee)
r.raise_for_status()
rj = r.json()
self.assertEqual(user_id, rj['created_by'])
self.assertEqual(user_id, rj['updated_by'])
r.connection.close()
def test_put_day(self):
day = {'day': '2017-01-16', 'notes': 'A day note'}
self.add_attendee({'day': '2017-01-16', 'name': 'A new name', 'group': 'group C'})
r = requests.put(BASE_URL + 'days/2017-01-16/info', json=day)
r.raise_for_status()
rj = r.json()
self.assertTrue(dictInDict(day, rj))
r = requests.get(BASE_URL + 'days/2017-01-16')
r.raise_for_status()
rj = r.json()
self.assertTrue(dictInDict(day, rj))
def test_put_group(self):
self.add_attendee({'day': '2017-01-16', 'name': 'A new name', 'group': 'A group'})
group = {'group': 'A group', 'day': '2017-01-16', 'notes': 'A group note'}
r = requests.put(BASE_URL + 'days/2017-01-16/groups/A group/info', json=group)
r.raise_for_status()
rj = r.json()
self.assertTrue(dictInDict(group, rj))
r = requests.get(BASE_URL + 'days/2017-01-16')
r.raise_for_status()
rj = r.json()
self.assertTrue(dictInDict(group, rj['groups'][0]))
def test_delete_group(self):
self.add_attendee({'day': '2017-01-16', 'name': 'A new name', 'group': 'A group'})
s = self.login('admin', 'ibt2')
r = s.delete(BASE_URL + 'days/2017-01-16/groups/A group', params={'day': '2017-01-16', 'group': 'A group'})
r.raise_for_status()
rj = r.json()
r.connection.close()
r = requests.get(BASE_URL + 'days/2017-01-16')
r.raise_for_status()
rj = r.json()
self.assertTrue(rj == {})
r.connection.close()
def test_settings(self):
r = requests.get(BASE_URL + 'settings/non-existant')
r.raise_for_status()
rj = r.json()
r.connection.close()
self.assertEqual({'non-existant': None}, rj)
settings = {'key1': 'value1', 'key2': 'value2'}
r = requests.post(BASE_URL + 'settings', json=settings)
self.assertRaises(requests.exceptions.HTTPError, r.raise_for_status)
s = self.login('admin', 'ibt2')
r = s.post(BASE_URL + 'settings', json=settings)
r.raise_for_status()
rj = r.json()
r.connection.close()
self.assertTrue('error' not in rj)
r = requests.get(BASE_URL + 'settings')
r.raise_for_status()
rj = r.json()
r.connection.close()
self.assertEqual(rj, settings)
r = requests.get(BASE_URL + 'settings/key1')
r.raise_for_status()
rj = r.json()
r.connection.close()
self.assertEqual(rj, {'key1': 'value1'})
r = requests.get(BASE_URL + 'settings/key2')
r.raise_for_status()
rj = r.json()
r.connection.close()
self.assertEqual(rj, {'key2': 'value2'})
r = s.put(BASE_URL + 'settings/key2', json={'key2': 'value3'})
r.raise_for_status()
rj = r.json()
r.connection.close()
self.assertTrue('error' not in rj)
r = requests.get(BASE_URL + 'settings/key2')
r.raise_for_status()
rj = r.json()
r.connection.close()
self.assertEqual(rj, {'key2': 'value3'})
if __name__ == '__main__':
unittest.main(verbosity=2)
| apache-2.0 | -564,341,801,183,677,900 | 40.556338 | 120 | 0.565836 | false | 3.229885 | true | false | false |
Gagaro/PimpMyBot | pimpmybot/core_modules/shop/shop.py | 1 | 1181 | """ Import the Base Module, A module is part of the bot who manage some stuff """
from __future__ import absolute_import, unicode_literals
from datetime import datetime, timedelta
import schedule
from utils import db
from utils.modules import BaseModule
from utils.translations import _
from .api import buy_item, ShopItemParameter
from .models import ShopItem, BoughtItem
class ShopModule(BaseModule):
""" Module for the ping handler """
identifier = 'shop'
title = "Shop"
description = _("Allow users to buy things with money.")
dependencies = ['users', 'money']
menus = [{
"title": _("Shop"),
"icon": "shopping-cart",
"view": "shop:items",
}]
api = {
'buy_item': {
'title': _('Buy an item'),
'method': buy_item,
'parameters': [
ShopItemParameter('item', title="Shop item"),
]
}
}
upgrades = []
def install(self):
super(ShopModule, self).install()
db.create_tables([ShopItem, BoughtItem])
def uninstall(self):
super(ShopModule, self).uninstall()
db.drop_tables([ShopItem, BoughtItem])
| mit | -6,916,275,225,974,315,000 | 24.673913 | 81 | 0.602032 | false | 3.923588 | false | false | false |
eandersson/amqpstorm | amqpstorm/tests/functional/legacy_tests.py | 1 | 4436 | import time
from amqpstorm import Channel
from amqpstorm.tests.functional.utility import TestFunctionalFramework
from amqpstorm.tests.functional.utility import setup
class LegacyFunctionalTests(TestFunctionalFramework):
def configure(self):
self.disable_logging_validation()
@setup(queue=True)
def test_functional_start_stop_consumer_tuple(self):
self.channel.queue.declare(self.queue_name)
self.channel.confirm_deliveries()
for _ in range(5):
self.channel.basic.publish(body=self.message,
routing_key=self.queue_name)
# Store and inbound messages.
inbound_messages = []
def on_message(body, channel, method, properties):
self.assertIsInstance(body, (bytes, str))
self.assertIsInstance(channel, Channel)
self.assertIsInstance(properties, dict)
self.assertIsInstance(method, dict)
inbound_messages.append(body)
if len(inbound_messages) >= 5:
channel.stop_consuming()
self.channel.basic.consume(callback=on_message,
queue=self.queue_name,
no_ack=True)
# Sleep for 0.01s to make sure RabbitMQ has time to catch up.
time.sleep(0.01)
self.channel.start_consuming(to_tuple=True)
# Make sure all five messages were downloaded.
self.assertEqual(len(inbound_messages), 5)
@setup(queue=True)
def test_functional_publish_and_consume_five_messages_tuple(self):
self.channel.queue.declare(self.queue_name)
self.channel.confirm_deliveries()
for _ in range(5):
self.channel.basic.publish(body=self.message,
routing_key=self.queue_name)
# Store and inbound messages.
inbound_messages = []
def on_message(body, channel, method, properties):
self.assertEqual(body, self.message.encode('utf-8'))
self.assertIsInstance(body, (bytes, str))
self.assertIsInstance(channel, Channel)
self.assertIsInstance(properties, dict)
self.assertIsInstance(method, dict)
inbound_messages.append(body)
self.channel.basic.consume(callback=on_message,
queue=self.queue_name,
no_ack=True)
# Sleep for 0.01s to make sure RabbitMQ has time to catch up.
time.sleep(0.01)
self.channel.process_data_events(to_tuple=True)
# Make sure all five messages were downloaded.
self.assertEqual(len(inbound_messages), 5)
@setup(queue=True)
def test_functional_generator_consume(self):
self.channel.queue.declare(self.queue_name)
self.channel.confirm_deliveries()
for _ in range(5):
self.channel.basic.publish(body=self.message,
routing_key=self.queue_name)
self.channel.basic.consume(queue=self.queue_name,
no_ack=True)
# Sleep for 0.01s to make sure RabbitMQ has time to catch up.
time.sleep(0.01)
# Store and inbound messages.
inbound_messages = []
for message in self.channel.build_inbound_messages(
break_on_empty=True,
to_tuple=True):
self.assertIsInstance(message, tuple)
self.assertIsInstance(message[0], bytes)
self.assertIsInstance(message[1], Channel)
self.assertIsInstance(message[2], dict)
self.assertIsInstance(message[3], dict)
inbound_messages.append(message)
# Make sure all five messages were downloaded.
self.assertEqual(len(inbound_messages), 5)
@setup(queue=True)
def test_functional_publish_and_get_five_messages(self):
self.channel.queue.declare(self.queue_name)
# Publish 5 Messages.
for _ in range(5):
self.channel.basic.publish(body=self.message,
routing_key=self.queue_name)
# Sleep for 0.01s to make sure RabbitMQ has time to catch up.
time.sleep(0.01)
# Get 5 messages.
for _ in range(5):
payload = self.channel.basic.get(self.queue_name, to_dict=True)
self.assertIsInstance(payload, dict)
| mit | 1,982,864,760,062,838,500 | 36.277311 | 75 | 0.599639 | false | 4.236867 | true | false | false |
greencoder/hopefullysunny-django | vendor/noaa/stations.py | 1 | 2310 | import os
import shutil
import noaa.models
import noaa.utils
def nearest_stations_with_distance(lat, lon, stations, radius=10.0,
units="miles"):
"""Find all stations within radius of target.
:param lat:
:param lon:
:param stations: list of stations objects to scan
:param radius:
:param units:
:returns: [(dist, station)]
"""
matches = []
for station in stations:
s_lat = station.location.lat
s_lon = station.location.lon
dist = noaa.utils.earth_distance(
s_lat, s_lon, lat, lon, dist_units=units)
if dist <= radius:
matches.append((dist, station))
matches.sort()
return matches
def nearest_station(lat, lon, stations):
"""Find single nearest station.
:param lat:
:param lon:
:param stations: list of stations objects to scan
"""
matches = nearest_stations_with_distance(lat, lon, stations)
if matches:
dist, station = matches[0]
else:
station = None
return station
def get_stations_from_cache(filename):
if not os.path.exists(filename):
resp = noaa.stations.fetch_station_data()
with open(filename, "w") as f:
shutil.copyfileobj(resp, f)
stations = noaa.stations.get_stations_from_file(filename)
return stations
def get_stations_from_web():
resp = fetch_station_data()
stations = _parse_stations(resp)
return stations
def get_stations_from_file(filename):
with open(filename) as f:
stations = _parse_stations(f)
return stations
def fetch_station_data():
STATIONS_URL = "http://www.weather.gov/xml/current_obs/index.xml"
resp = noaa.utils.open_url(STATIONS_URL)
return resp
def _parse_stations(fileobj):
stations = []
tree = noaa.utils.parse_xml(fileobj)
for station_e in tree.getroot().findall('station'):
lat = float(station_e.find('latitude').text)
lon = float(station_e.find('longitude').text)
description = station_e.find('state').text
location = noaa.models.Location(lat, lon, description)
station_id = station_e.find('station_id').text
station = noaa.models.Station(station_id, location)
stations.append(station)
return stations
| mit | 4,585,618,660,175,534,000 | 24.666667 | 69 | 0.630303 | false | 3.598131 | false | false | false |
fferri/pygolog | golog_program.py | 1 | 7227 | #!/usr/bin/env python3
from strips import *
#def astar(p, s, a):
# start = (p, s, a)
# closed = []
# open = []
# gh = []
#
# def heuristic_cost_estimate(x): return 0
#
# def add_to_open(x, g, h):
# if x not in open:
# open.append(x)
# gh = (g, h)
#
# def find_next_best():
# current = None
# g, h = 0, 0
# for i in range(len(open)):
# if current is None or gh[i][0] + gh[i][1] < g + h:
# current = open[i]
# g, h = gh[i]
# return (current, g, h)
#
# def move_to_closed(x):
# if x in open:
# i = open.index(x)
# del open[i]
# del gh[i]
# if current not in closed:
# closed.append(x)
#
# def update_gh(x, g, h)
#
# add_to_open(start, 0, heuristic_cost_estimate(start))
#
# while open:
# current, g, h = find_next_best()
#
# p, s, a = current
# if p.final(s):
# yield current
#
# move_to_closed(current)
#
# for next1 in p.trans(s):
# if next1 in closed:
# continue
# p1, s1, a1 = next1
# g1 = g + 1 # 1 == dist_between(current, next1)
#
# if next1 not in open or g1 < gh[open.index(next1)][0]:
# i = open.index(next1)
# gh[next1][0] = g1
# if next1 not in open:
# open.add(next1)
def trans_star(p, s, a):
if p.final(s):
yield (p, s, a)
for p1, s1, a1 in p.trans(s):
yield from trans_star(p1, s1, a + a1)
def indigolog(p, s, a, exec_cb=lambda a: None, exog_cb=lambda s: s):
# at each step apply exogenous events if any:
s = exog_cb(s)
for p1, s1, a1 in p.trans(s):
# commit to the first step, since we are executing in an online fashion:
exec_cb(a1)
return indigolog(p1, s1, a + a1, exec_cb, exog_cb)
else: return p.final(s)
class Program:
pass
class Choose(Program):
def __init__(self, p1, p2, *ps):
self.p1 = p1
self.p2 = Choose(p2, ps[0], *ps[1:]) if ps else p2
def trans(self, s):
yield from self.p1.trans(s)
yield from self.p2.trans(s)
def final(self, s):
return self.p1.final(s) or self.p2.final(s)
def __repr__(self): return 'Choose(%s, %s)' % (self.p1, self.p2)
class Empty(Program):
def trans(self, s):
yield from () # yield nothing
def final(self, s):
return True
def __repr__(self): return 'Empty()'
class Exec(Program):
def __init__(self, ground_action):
self.ground_action = ground_action
def trans(self, s):
try: yield (Empty(), self.ground_action.apply(s), [self.ground_action])
except UnsatisfiedPreconditions: pass
def final(self, s):
return False
def __repr__(self): return 'Exec(%s)' % (self.ground_action)
class If(Program):
def __init__(self, condition, p1, p2):
self.condition = condition
self.p1 = p1
self.p2 = p2
def trans(self, s):
if self.condition(s): yield from self.p1.trans(s)
else: yield from self.p2.trans(s)
def final(self, s):
if self.condition(s): return self.p1.final(s)
else: return self.p2.final(s)
def __repr__(self): return 'If(%s, %s, %s)' % (self.condition, self.p1, self.p2)
class Pick(Program):
def __init__(self, domain, p1):
self.domain = domain
self.p1 = p1
def trans(self, s):
for obj in Object.get_objects_of_type(self.domain):
yield from self.p1(obj).trans(s)
def final(self, s):
for obj in Object.get_objects_of_type(self.domain):
if self.p1(obj).final(s): return True
return False
def __repr__(self): return 'Pick(%s, %s)' % (self.domain.__name__, self.p1)
class Search(Program):
def __init__(self, p1):
self.p1 = p1
def trans(self, s):
yield from trans_star(self.p1, s, [])
def final(self, s):
return any(trans_star(self.p1, s, []))
def __repr__(self): return 'Search(%s)' % self.p1
class Sequence(Program):
def __init__(self, p1, p2, *ps):
self.p1 = p1
self.p2 = Sequence(p2, ps[0], *ps[1:]) if ps else p2
def trans(self, s):
if self.p1.final(s):
yield from self.p2.trans(s)
for pn, sn, an in self.p1.trans(s):
yield (Sequence(pn, self.p2), sn, an)
def final(self, s):
return self.p1.final(s) and self.p2.final(s)
def __repr__(self): return 'Sequence(%s, %s)' % (self.p1, self.p2)
class Star(Program):
def __init__(self, p1):
self.p1 = p1
def trans(self, s):
for pn, sn, an in self.p1.trans(s):
yield (Sequence(pn, self), sn, an)
def final(self, s):
return True
def __repr__(self): return 'Star(%s)' % (self.p1)
class Test(Program):
def __init__(self, condition):
self.condition = condition
def trans(self, s):
if self.condition(s):
yield (Empty(), s, [])
def final(self, s):
return False
def __repr__(self): return 'Test(%s)' % self.condition
class While(Program):
def __init__(self, condition, p1):
self.condition = condition
self.p1 = p1
def trans(self, s):
if self.condition(s):
for pn, sn, an in self.p1.trans(s):
yield (Sequence(pn, self), sn, an)
def final(self, s):
return not self.condition(s) or self.p1.final(s)
def __repr__(self): return 'While(%s, %s)' % (self.condition, self.p1)
# ConGolog constructs:
class Conc(Program):
def __init__(self, p1, p2, *ps):
self.p1 = p1
self.p2 = Conc(p2, ps[0], *ps[1:]) if ps else p2
def trans(self, s):
p1_trans = False
for pn, sn, an in self.p1.trans(s):
p1_trans = True
yield (Conc(pn, self.p2), sn, an)
if p1_trans: return
for pn, sn, an in self.p2.trans(s):
yield (Conc(self.p1, pn), sn, an)
def final(self, s):
return self.p1.final(s) and self.p2.final(s)
def __repr__(self): return 'Conc(%s, %s)' % (self.p1, self.p2)
class PConc(Program):
def __init__(self, p1, p2, *ps):
self.p1 = p1
self.p2 = PConc(p2, ps[0], *ps[1:]) if ps else p2
def trans(self, s):
p1_trans = False
for pn, sn, an in self.p1.trans(s):
p1_trans = True
yield (PConc(pn, self.p2), sn, an)
if p1_trans: return
for pn, sn, an in self.p2.trans(s):
yield (PConc(self.p1, pn), sn, an)
def final(self, s):
return self.p1.final(s) and self.p2.final(s)
def __repr__(self): return 'PConc(%s, %s)' % (self.p1, self.p2)
class IConc(Program):
def __init__(self, p1):
self.p1 = p1
def trans(self, s):
for pn, sn, an in self.p1.trans(s):
yield (Conc(pn, IConc(self.p1)), sn, an)
def final(self, s):
return True
def __repr__(self): return 'IConc(%s)' % (self.p1)
def interrupt(trigger, body):
return While(lambda s: True, If(trigger, body, Test(lambda s: False)))
def prioritized_interrupts(*args):
return PConc(*args)
| bsd-3-clause | 6,223,094,759,193,637,000 | 25.569853 | 84 | 0.52968 | false | 2.904743 | false | false | false |
openbaton/openbaton-cli | org/openbaton/v2/services.py | 1 | 1738 | import logging
from org.openbaton.v2.cmd import BaseObCmd
from org.openbaton.v2.utils import get_result_to_list, get_result_to_show, parse_path_or_json, result_to_str
class Services(BaseObCmd):
"""Command to manage Services. It allows to:
* show details of a specific Service passing an id
* list all saved Services
* delete a specific Service passing an id
* create a specific Service passing a path to a file or directly the json content
"""
log = logging.getLogger(__name__)
keys_to_list = ["id", "name"]
keys_to_exclude = []
def find(self, params):
if not params:
return "ERROR: missing <service-id>"
_id = params[0]
return result_to_str(get_result_to_show(self.app.ob_client.get_service(_id),
excluded_keys=self.keys_to_exclude,
_format=self.app.format))
def create(self, params):
if not params:
return "ERROR: missing <service> or <path-to-json>"
service_str = "".join(params)
self.log.debug("String service is %s" % service_str)
service = parse_path_or_json(service_str)
return self.app.ob_client.create_service(service)
def delete(self, params):
if not params:
return "ERROR: missing <servicename>"
_id = params[0]
self.app.ob_client.delete_service(_id)
return "INFO: Deleted service with id %s" % _id
def list(self, params=None):
return result_to_str(
get_result_to_list(self.app.ob_client.list_services(), keys=self.keys_to_list, _format=self.app.format),
_format=self.app.format)
| apache-2.0 | -8,828,528,111,368,179,000 | 36.782609 | 116 | 0.595512 | false | 3.761905 | false | false | false |
bhavanaananda/DataStage | src/AdminUIHandler/MiscLib/tests/TestCombinators.py | 8 | 1261 | # $Id: TestCombinators.py 1047 2009-01-15 14:48:58Z graham $
#
# Unit testing for WebBrick library combinators
# See http://pyunit.sourceforge.net/pyunit.html
#
import sys
import unittest
sys.path.append("../..")
from MiscLib.Combinators import *
class TestCombinators(unittest.TestCase):
def setUp(self):
return
def tearDown(self):
return
# Test cases
def testApply(self):
# Is function application like BCPL? (fn can be a variable)
def ap(f,v): return f(v)
def inc(n): return n+1
assert ap(inc,2)==3
def testCurry(self):
def f(a,b,c): return a+b+c
g = curry(f,1,2)
assert g(3) == 6
def testCompose(self):
def f(a,b,c): return a+b+c
def g(a,b): return a*b
h = compose(f,g,1000,200)
assert h(3,4) == 1212, "h(3,4) is "+str(h(3,4))
# Code to run unit tests directly from command line.
def getTestSuite():
suite = unittest.TestSuite()
suite.addTest(TestCombinators("testApply"))
suite.addTest(TestCombinators("testCurry"))
suite.addTest(TestCombinators("testCompose"))
return suite
if __name__ == "__main__":
# unittest.main()
runner = unittest.TextTestRunner()
runner.run(getTestSuite())
| mit | 4,510,261,431,196,569,600 | 23.72549 | 68 | 0.620143 | false | 3.208651 | true | false | false |
MattDevo/coreboot | util/riscv/sifive-gpt.py | 2 | 6519 | #!/usr/bin/env python3
# This file is part of the coreboot project.
#
# Copyright (C) 2018 Jonathan Neuschäfer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import sys, os, struct, uuid, zlib, io
# This script wraps the bootblock in a GPT partition, because that's what
# SiFive's bootrom will load.
# Size of a GPT disk block, in bytes
BLOCK_SIZE = 512
BLOCK_MASK = BLOCK_SIZE - 1
# Size of the bootcode part of the MBR
MBR_BOOTCODE_SIZE = 0x1be
# MBR trampoline to bootblock
MBR_BOOTCODE = bytes([
# j pc + 0x0800
0x6f, 0x00, 0x10, 0x00,
])
# A protecive MBR, without the bootcode part
PROTECTIVE_MBR_FOOTER = bytes([
0x00, 0x00, 0x02, 0x00, 0xee, 0xff, 0xff, 0xff,
0x01, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x55, 0xaa
])
# A "protective MBR"[1], which may also contain some boot code.
# [1]: https://en.wikipedia.org/wiki/GUID_Partition_Table#PROTECTIVE-MBR
class ProtectiveMBR:
def __init__(self):
self.bootcode = MBR_BOOTCODE + bytes(MBR_BOOTCODE_SIZE - len(MBR_BOOTCODE))
def generate(self, stream):
assert len(self.bootcode) == MBR_BOOTCODE_SIZE
mbr = self.bootcode + PROTECTIVE_MBR_FOOTER
assert len(mbr) == BLOCK_SIZE
stream.write(mbr)
# Generate a GUID from a string
class GUID(uuid.UUID):
def __init__(self, string):
super().__init__(string)
def get_bytes(self):
return self.bytes_le
DUMMY_GUID_DISK_UNIQUE = GUID('17145242-abaa-441d-916a-3f26c970aba2')
DUMMY_GUID_PART_UNIQUE = GUID('7552133d-c8de-4a20-924c-0e85f5ea81f2')
GUID_TYPE_FSBL = GUID('5B193300-FC78-40CD-8002-E86C45580B47')
# A GPT disk header
# https://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_table_header_(LBA_1)
class GPTHeader:
def __init__(self):
self.current_lba = 1
self.backup_lba = 1
self.first_usable_lba = 2
self.last_usable_lba = 0xff # dummy value
self.uniq = DUMMY_GUID_DISK_UNIQUE
self.part_entries_lba = 2
self.part_entries_number = 0
self.part_entries_crc32 = 0
self.part_entry_size = 128
def pack_with_crc(self, crc):
header_size = 92
header = struct.pack('<8sIIIIQQQQ16sQIII',
b'EFI PART', 0x10000, header_size, crc, 0,
self.current_lba, self.backup_lba, self.first_usable_lba,
self.last_usable_lba, self.uniq.get_bytes(),
self.part_entries_lba, self.part_entries_number,
self.part_entry_size, self.part_entries_crc32)
assert len(header) == header_size
return header
def generate(self, stream):
crc = zlib.crc32(self.pack_with_crc(0))
header = self.pack_with_crc(crc)
stream.write(header.ljust(BLOCK_SIZE, b'\0'))
# A GPT partition entry.
# https://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_entries_(LBA_2-33)
class GPTPartition:
def __init__(self):
self.type = GUID('00000000-0000-0000-0000-000000000000')
self.uniq = GUID('00000000-0000-0000-0000-000000000000')
self.first_lba = 0
self.last_lba = 0
self.attr = 0
self.name = ''
def generate(self, stream):
name_utf16 = self.name.encode('UTF-16LE')
part = struct.pack('<16s16sQQQ72s',
self.type.get_bytes(), self.uniq.get_bytes(),
self.first_lba, self.last_lba, self.attr,
name_utf16.ljust(72, b'\0'))
assert len(part) == 128
stream.write(part)
class GPTImage:
# The final image consists of:
# - A protective MBR
# - A GPT header
# - A few GPT partition entries
# - The content of the bootblock
def __init__(self):
self.mbr = ProtectiveMBR()
self.header = GPTHeader()
self.partitions = [ GPTPartition() for i in range(8) ]
self.bootblock = b''
# Fix up a few numbers to ensure consistency between the different
# components.
def fixup(self):
# Align the bootblock to a whole number to LBA blocks
bootblock_size = (len(self.bootblock) + BLOCK_SIZE - 1) & ~BLOCK_MASK
self.bootblock = self.bootblock.ljust(bootblock_size)
# Propagate the number of partition entries
self.header.part_entries_number = len(self.partitions)
self.header.first_usable_lba = 2 + self.header.part_entries_number // 4
# Create a partition entry for the bootblock
self.partitions[0].type = GUID_TYPE_FSBL
self.partitions[0].uniq = DUMMY_GUID_PART_UNIQUE
self.partitions[0].first_lba = self.header.first_usable_lba
self.partitions[0].last_lba = \
self.header.first_usable_lba + bootblock_size // BLOCK_SIZE
# Calculate the CRC32 checksum of the partitions array
partition_array = io.BytesIO()
for part in self.partitions:
part.generate(partition_array)
self.header.part_entries_crc32 = zlib.crc32(partition_array.getvalue())
def generate(self, stream):
self.mbr.generate(stream)
self.header.generate(stream)
for part in self.partitions:
part.generate(stream)
stream.write(self.bootblock)
if __name__ == '__main__':
if len(sys.argv) != 3:
print('Usage:', file=sys.stderr)
print(' %s bootblock.raw.bin bootblock.bin' % sys.argv[0],
file=sys.stderr)
sys.exit(1)
image = GPTImage()
with open(sys.argv[1], 'rb') as f:
image.bootblock = f.read()
image.fixup()
# Verify if first partition is at expected lba, otherwise trampoline will
# fail
if image.partitions[0].first_lba != 4:
print('Warning: First partition not at expected location (LBA 4)')
sys.exit(1)
with open(sys.argv[2], 'wb') as f:
image.generate(f)
| gpl-2.0 | -5,781,629,859,728,216,000 | 32.772021 | 83 | 0.637005 | false | 3.112703 | false | false | false |
xen0n/gingerprawn | gingerprawn/shrimp/librarian/librarian_icon.py | 1 | 3467 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# JNMaster / librarian / icon for the shrimp
#
# Copyright (C) 2011 Wang Xuerui <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from wx.lib.embeddedimage import PyEmbeddedImage
# generated from file 'librarian_icon2.png'
SHRIMP_ICON = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAYAAACqaXHeAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJ"
"bWFnZVJlYWR5ccllPAAAA2RpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADw/eHBhY2tldCBiZWdp"
"bj0i77u/IiBpZD0iVzVNME1wQ2VoaUh6cmVTek5UY3prYzlkIj8+IDx4OnhtcG1ldGEgeG1sbnM6"
"eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IkFkb2JlIFhNUCBDb3JlIDUuMC1jMDYwIDYxLjEz"
"NDc3NywgMjAxMC8wMi8xMi0xNzozMjowMCAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJo"
"dHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlw"
"dGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wTU09Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEu"
"MC9tbS8iIHhtbG5zOnN0UmVmPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvc1R5cGUvUmVz"
"b3VyY2VSZWYjIiB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iIHhtcE1N"
"Ok9yaWdpbmFsRG9jdW1lbnRJRD0ieG1wLmRpZDo3ODMzNkUwMTNENjNFMDExQkUxM0E4NkVCMjc4"
"M0JGMCIgeG1wTU06RG9jdW1lbnRJRD0ieG1wLmRpZDoxOEM3Qjc0MTYzM0QxMUUwQkQzOUY1QzYy"
"REI3OThDQyIgeG1wTU06SW5zdGFuY2VJRD0ieG1wLmlpZDoxOEM3Qjc0MDYzM0QxMUUwQkQzOUY1"
"QzYyREI3OThDQyIgeG1wOkNyZWF0b3JUb29sPSJBZG9iZSBQaG90b3Nob3AgQ1M1IFdpbmRvd3Mi"
"PiA8eG1wTU06RGVyaXZlZEZyb20gc3RSZWY6aW5zdGFuY2VJRD0ieG1wLmlpZDo3ODMzNkUwMTNE"
"NjNFMDExQkUxM0E4NkVCMjc4M0JGMCIgc3RSZWY6ZG9jdW1lbnRJRD0ieG1wLmRpZDo3ODMzNkUw"
"MTNENjNFMDExQkUxM0E4NkVCMjc4M0JGMCIvPiA8L3JkZjpEZXNjcmlwdGlvbj4gPC9yZGY6UkRG"
"PiA8L3g6eG1wbWV0YT4gPD94cGFja2V0IGVuZD0iciI/Pq0H8mEAAANESURBVHja7FvrbdswEKaM"
"/C6YBQJNEGiCoFogqRdooQWaaoLCExiaQOgEjruAgiwQIRMIWSCCF3BN4BQwKsWXyBMD+gACAaIH"
"77u77x6Uk+PxSGKWFYlczgCcATgDELmwLDAsBClPax2Cru86IwKQn9YB1guAQWMFgAciXxKApTkg"
"Pa09JgihkuCv2AFgHpDFngbz2AFwIcyLnky8KSQAOgfPuORAqKDmyD4LAL3j5xWn9YcDI3gAXJBg"
"IwEjWxqABuk99UR4tbE0Q79HYDPFv09dnPAlcJIkvjd3UHjIncN3raHX2A38Iir3L5Ct01rEOoUa"
"4Sso1QFYWwVx7nQejg3AmwEhDkrngv9lQGw15/ZWgh0CssZnsGwOTZKJfNHtBpcmwUdFZ1gIlG/B"
"3b1kl9CzAHPtG0WV+DDnBZgckEFM65LlTy53f5NUj7tZu0KYCBVQjh40V0U+jspyybWlra4YIzGm"
"+IuB4q9wj4g4p+5J5wLgKwSqCWVk8iwoY3NJ1qhddJCuSTAFixUW94qUkY3KtqENRNYQ67aTnQ7b"
"+q6yAOWGDy7Fu/VdeEAJRKerfK9Ifbw3ebf+HAByUHxD9E53Ouj0dC23wbC+aQhQsExh0NH1sOEa"
"/s40vSqVVIYdJgAUrLEm5ud4NWxYd9bXgOKlBMzadb6+kCheELsDzA4mMK1l/UAl1u99A8Bc9C+x"
"P7VlSt9abrSUEF/jw/oiEpwTX7WG8q0CgCnXv/fVqKwELzO1ILv2B2xSdd+bgm9QiE+VBluoy3WE"
"Mfy1QUvaWnhVTTzKFAleamxsa2EZCl5CNcG6J55lJVGwFxARc8cr2JiNW/YGnnKLMqZRzANsBpQq"
"2WvMBrx8PySaB2BPhQcAck1v2VmG2iQAvucBLoVyU6WKePqibAkAbIqkYa7o/DOaJUJgXG22XBpt"
"BXMGOirUrl2GAPaXojbl8ZggszkAhPadoA4PmNYowXOA6eBlLM+xAJAK3L1x3RKHDEAx0RihVoJL"
"xv4rmXEMpkuCoQIwZv+9r1I4RABS8v+ZIY0JgKeR8pmPcA8VgMqX8p8BgIJ8PCZHaYdDAWBQfkM8"
"/o4oVABKX52e8UAkRjn/cjR2AP4JMAAjpl+Wmz6N/QAAAABJRU5ErkJggg==")
# vi:ai:et:ts=2 sw=2 sts=2 fenc=utf-8
| gpl-3.0 | -3,908,397,957,452,425,700 | 58.775862 | 78 | 0.892126 | false | 1.619337 | false | false | false |
ccstr/sorting-analysis | sorting_analysis.py | 1 | 5463 | # Analyzes the performance of insertion sort and merge sort.
# Copyright (C) 2015
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import math
import matplotlib.pyplot as plt
import random
import time
start_time = time.time()
def mergesort(A):
"""Sorts array A by recursive mergesort. Returns the sorted array A and the
number of comparisons nc.
"""
n = len(A)
nc = 0
# if n > 1
if n > 1:
# copy A[0..floor(n/2) - 1] to B[0..floor(n/2) - 1]
B = A[:int(n/2)]
# copy A[floor(n/2)..n-1] to C[0..ceiling(n/2) - 1]
C = A[int(n/2):]
# Mergesort(B[0..floor(n/2) - 1])
B, first_comparisons = mergesort(B)
nc += first_comparisons
# Mergesort(C[0..ceiling(n/2) - 1])
C, second_comparisons = mergesort(C)
nc += second_comparisons
# Merge(B, C, A)
A, merge_comparisons = merge(B, C, A)
nc += merge_comparisons
return A, nc
def merge(B, C, A):
"""Merges two sorted arrays into one sorted array. Returns the sorted array
A and the number of comparisons nc.
"""
# Initialize the number of inputs and the number of comparisons.
nc = 0
p = len(B)
q = len(C)
# i <- 0; j <- 0; k <- 0
i = 0
j = 0
k = 0
# while i < p and j < q do
while i < p and j < q:
# if B[i] <= C[j]
if B[i] <= C[j]:
# A[k] <- B[i]; i <- i + 1
A[k] = B[i]
i += 1
nc += 1
# else A[k] <- C[j]; j <- j + 1
else:
A[k] = C[j]
j += 1
nc += 1
# k <- k + 1
k += 1
# if i = p
if i is p:
# copy C[j..q - 1] to A[k..p + q - 1]
A[k:] = C[j:]
# else copy B[i..p - 1] to A[k..p + q -1]
else:
A[k:] = B[i:]
return A, nc
def insertionsort(A):
"""Sorts array A by insertionsort. Returns the sorted array A and the number
of comparisons nc.
"""
# Initialize the number of inputs and the number of comparisons.
n = len(A)
nc = 0
# for i <- 1 to n - 1 do
for i in range(len(A)):
# v <- A[i]
v = A[i]
# j <- i - 1
j = i - 1
# while j >= 0 and A[j] > v do
while j >= 0 and A[j] > v:
nc += 1
# A[j + 1] <- A[j]
A[j + 1] = A[j]
# j <- j - 1
j -= 1
# A[j + 1] <- v
A[j + 1] = v
return A, nc
def random_array(n):
"""Randomly creates an array of integers of size n."""
# Initialize the array.
array = []
# For every number in the range of n:
for number in range(n):
# Randomly create an integer.
integer = random.randint(0, n)
# Append this integer to the array.
array.append(integer)
# Return the array.
return array
def sorted_array(n):
"""Randomly creates a sorted array of size n."""
# Create a random array.
array = random_array(n)
# Sort the random array.
array.sort()
# Return the array.
return array
def reversed_array(n):
"""Randomly creates a sorted array in reverse order of size n."""
# Create a random array.
array = random_array(n)
# Sort the random array.
array.sort()
# Reverse the sorted array.
array.reverse()
# Return the array.
return array
def FirstPlot(x,y,c):
logx = []
logy = []
for i in x:
logx.append(math.log10(i))
for i in y:
logy.append(math.log10(i))
print('Plotting now!')
plt.plot(logx,logy,label=c)
plt.legend()
print('Done plotting!')
input_sizes = [10, 30, 100, 300, 1000, 3000, 10000]
mergesort_random_nc = []
mergesort_sorted_nc = []
mergesort_reversed_nc = []
for n in input_sizes:
mergesort_random_nc.append(mergesort(random_array(n))[1])
mergesort_sorted_nc.append(mergesort(sorted_array(n))[1])
mergesort_reversed_nc.append(mergesort(reversed_array(n))[1])
insertionsort_random_nc = []
insertionsort_sorted_nc = []
insertionsort_reversed_nc = []
for n in input_sizes:
insertionsort_random_nc.append(insertionsort(random_array(n))[1])
insertionsort_sorted_nc.append(insertionsort(sorted_array(n))[1])
insertionsort_reversed_nc.append(insertionsort(reversed_array(n))[1])
print("Input sizes:", input_sizes, "\n")
print("Number of comparisons using merge sort for corresponding input sizes:")
print("Random:", mergesort_random_nc)
print("Sorted:", mergesort_sorted_nc)
print("Reversed:", mergesort_reversed_nc, "\n")
print("Number of comparisons using merge sort for corresponding input sizes:")
print("Random:", insertionsort_random_nc)
print("Sorted:", insertionsort_sorted_nc, "ERROR")
print("Reversed:", insertionsort_reversed_nc, "\n")
end_time = time.time()
runtime_minutes = (end_time - start_time) / 60
print("Runtime in minutes:", runtime_minutes)
| gpl-3.0 | -5,408,541,337,670,909,000 | 28.52973 | 80 | 0.585576 | false | 3.277145 | false | false | false |
geertj/ravstack | lib/ravstack/main.py | 1 | 4819 | #
# This file is part of ravstack. Ravstack is free software available under
# the terms of the MIT license. See the file "LICENSE" that was provided
# together with this source file for the licensing terms.
#
# Copyright (c) 2015 the ravstack authors. See the file "AUTHORS" for a
# complete list.
"""Ravello Ironic command-line utility.
Usage:
ravstack [options] setup
ravstack [options] proxy-create
ravstack [options] node-create [-c <cpus>] [-m <memory>]
[-D <disk>] [-n <count>]
ravstack [options] node-dump
ravstack [options] node-list [--all [--cached]]
ravstack [options] node-start <node>
ravstack [options] node-stop <node>
ravstack [options] node-reboot <node>
ravstack [options] node-get-boot-device <node>
ravstack [options] node-set-boot-device <node> <bootdev>
ravstack [options] node-get-macs <node> [--cached]
ravstack [options] fixup
ravstack [options] endpoint-resolve <port> [-t <timeout>]
[--start-port <base>] [--num-ports <count>]
ravstack --help
Command help:
setup Create ravstack directories and config file.
proxy-create Create SSH -> Ravello API proxy.
node-create Create a new node.
node-dump Dump node definitions to specified file.
node-list List powered on nodes. (--all lists all nodes)
node-start Start a node.
node-stop Stop a node.
node-reboot Reboot a node.
node-get-boot-device Return boot device for <node>.
node-set-boot-device Set boot device for <node> to <bootdev>.
The boot device may be "hd" or "network".
node-get-macs Return MAC addresses for <node>.
fixup Fix Ravello and OS config after one or
more nodes were deployed.
endpoint-resolve Resolve an endpoint for a local service using
a public IP address or under portmapping.
Options:
-d, --debug Enable debugging.
-v, --verbose Be verbose.
--log-stderr Show logs on standard error.
-u <username>, --username=<username>
Ravello API username.
-p <password>, --password=<password>
Ravello API password.
-a <application>, --application=<application>
The Ravello application name.
--all List all nodes.
--cached Allow use of cached information.
Options for `node-create`:
-c <cpus>, --cpus=<cpus>
The number of CPUs. [default: 2]
-m <memory>, --memory=<memory>
The amount of memory in MB. [default: 8192]
-D <disk>, --disk=<disk>
The size of the disk in GB. [default: 60]
-n <count>, --count=<count>
The number of nodes to create. [default: 1]
Options for `endpoint-resolve`:
-t <timeout>, --timeout <timeout>
Timeout. [default: 2]
--start-port <port> Starting port for endpoint resolution with
portmapping. [default: 10000]
--num-ports <count> Number of ports to scan for endpoint resulution
with portmapping. [default: 50]
"""
from __future__ import absolute_import, print_function
import docopt
from . import factory, setup, node, proxy, fixup, endpoint, runtime
from .runtime import CONF
def main():
"""Ravstack main entry point."""
args = docopt.docopt(__doc__)
CONF.update_from_args(args)
CONF.update_to_env()
runtime.setup_logging() # logging configuration might have changed
env = factory.get_environ(args)
if args['setup']:
setup.do_setup(env)
elif args['proxy-create']:
proxy.do_create(env)
elif args['node-create']:
node.do_create(env)
elif args['node-dump']:
node.do_dump(env)
elif args['node-list'] and not args.get('--all'):
node.do_list_running(env, False)
elif args['node-list']:
node.do_list_all(env)
elif args['node-start']:
node.do_start(env, args['<node>'])
elif args['node-stop']:
node.do_stop(env, args['<node>'])
elif args['node-reboot']:
node.do_reboot(env, args['<node>'])
elif args['node-get-boot-device']:
node.do_get_boot_device(env, args['<node>'])
elif args['node-set-boot-device']:
node.do_set_boot_device(env, args['<node>'], args['<bootdev>'])
elif args['node-get-macs']:
node.do_get_macs(env, args['<node>'], False)
elif args['fixup']:
fixup.do_fixup(env)
elif args['endpoint-resolve']:
endpoint.do_resolve(env, args['<port>'])
def run_main():
"""Setuptools entry point."""
runtime.run_main(main)
if __name__ == '__main__':
run_main()
| mit | 1,762,956,767,902,170,600 | 35.233083 | 74 | 0.595767 | false | 3.827641 | false | false | false |
henkhaus/wow | oldcode/adg_bulk.py | 1 | 1591 | import pymongo
from pymongo import MongoClient, UpdateOne
from wowlib import wowapi, binary_search
import time
client = MongoClient()
print ('Initiating db connection and getting wow data')
db = client.wowdoc
data = wowapi.auctionurl('Shadow-Council')
posts = db.auctiondata
#create counters
count = 0
time = time.time()
#create list for operations
operations = {}
#create bulk upsert
bulk = posts.initialize_ordered_bulk_op()
print("Connected")
#create list of disctinct auctions in memory
print("Creating Auction List")
auction_list = []
auctions = posts.find().distinct('auc')
for auction in auctions:
auction_list.append(auction)
print("Auction List Created")
#Iterate through data returned from wowapi
for auction in data:
row = data[count]
#create new json, this allows you to add data not returned from wowapi
newrow = {'buyout': row['buyout'],
'timeLeft': row['timeLeft'],
'quantity': row['quantity'],
'seed': row['seed'],
'username': {'name':row['owner'], 'server':row['ownerRealm']},
'owner': row['owner'],
'item': row['item'],
'rand': row['rand'],
'bid': row['bid'],
'context': row['context'],
'auc': row['auc'],
'ownerRealm': row['ownerRealm'],
'viewtime': time,
'timeupdated': time,
'itemname': "-----<None Defined>-----",
'status':"Active",
'bidincrease': 'N',
}
count += 1
operations[str(newrow['auc'])]=newrow
print("all auctions created in operations")
posts.insert_one(operations)
print ('new doc added') | apache-2.0 | 1,605,230,015,981,572,400 | 22.890625 | 74 | 0.637964 | false | 3.385106 | false | false | false |
edx/course-discovery | course_discovery/apps/course_metadata/migrations/0074_auto_20171212_2016.py | 1 | 1216 | # Generated by Django 1.11.3 on 2017-12-12 20:16
import django.db.models.deletion
import stdimage.models
from django.db import migrations, models
import course_discovery.apps.course_metadata.utils
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0073_program_instructors'),
]
operations = [
migrations.AlterField(
model_name='course',
name='image',
field=stdimage.models.StdImageField(blank=True, help_text='Please provide a course preview image', null=True, upload_to=course_discovery.apps.course_metadata.utils.UploadToFieldNamePath('uuid', path='media/course/image')),
),
migrations.AlterField(
model_name='courseentitlement',
name='partner',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Partner'),
),
migrations.AlterField(
model_name='person',
name='profile_image',
field=stdimage.models.StdImageField(blank=True, null=True, upload_to=course_discovery.apps.course_metadata.utils.UploadToFieldNamePath('uuid', path='media/people/profile_images')),
),
]
| agpl-3.0 | -9,120,434,024,624,371,000 | 35.848485 | 234 | 0.666941 | false | 4.039867 | false | false | false |
culturagovbr/sistema-nacional-cultura | planotrabalho/views.py | 1 | 13891 | import json
from django.shortcuts import redirect
from django.http import Http404, HttpResponseRedirect, request
from django.http import HttpResponse
from django.views.generic.edit import CreateView
from django.views.generic.edit import UpdateView
from django.views.generic import ListView
from django.views.generic import DetailView
from django.urls import reverse_lazy
from .models import PlanoTrabalho
from .models import CriacaoSistema
from .models import OrgaoGestor2
from .models import Conselheiro
from .models import ConselhoCultural
from .models import FundoCultura
from .models import FundoDeCultura
from .models import PlanoCultura
from .models import PlanoDeCultura
from .models import Componente
from .models import ConselhoDeCultura
from adesao.models import SistemaCultura
from .forms import CriarComponenteForm
from .forms import CriarFundoForm
from .forms import CriarPlanoForm
from .forms import CriarConselhoForm
from .forms import DesabilitarConselheiroForm
from .forms import CriarConselheiroForm
from .forms import AlterarConselheiroForm
from .forms import CriarOrgaoGestorForm
from adesao.utils import atualiza_session
class PlanoTrabalho(DetailView):
model = SistemaCultura
template_name = 'planotrabalho/plano_trabalho.html'
def get_context_data(self, **kwargs):
try:
context = super(PlanoTrabalho, self).get_context_data(**kwargs)
sistema_id = self.request.session['sistema_cultura_selecionado']['id']
context['sistema'] = SistemaCultura.objects.get(id=sistema_id)
except:
return context
return context
class CadastrarComponente(CreateView):
model = Componente
form_class = CriarComponenteForm
def get_form_kwargs(self):
kwargs = super(CadastrarComponente, self).get_form_kwargs()
sistema_id = self.request.session['sistema_cultura_selecionado']['id']
self.sistema = SistemaCultura.objects.get(id=sistema_id)
kwargs['sistema'] = self.sistema
kwargs['logged_user'] = self.request.user
return kwargs
def get_success_url(self):
return reverse_lazy('planotrabalho:planotrabalho', kwargs={'pk': self.sistema.id})
def form_valid(self, form):
super(CadastrarComponente, self).form_valid(form)
sistema_atualizado = SistemaCultura.sistema.get(ente_federado__id=self.sistema.ente_federado.id)
atualiza_session(sistema_atualizado, self.request)
return redirect(reverse_lazy('planotrabalho:planotrabalho', kwargs={'pk': self.sistema.id}))
class CadastrarLegislacao(CadastrarComponente):
template_name = 'planotrabalho/cadastrar_legislacao.html'
def get_form_kwargs(self):
kwargs = super(CadastrarLegislacao, self).get_form_kwargs()
kwargs['tipo'] = 'legislacao'
return kwargs
class CadastrarPlanoDeCultura(CadastrarComponente):
model = PlanoDeCultura
form_class = CriarPlanoForm
template_name = 'planotrabalho/cadastrar_plano.html'
class CadastrarOrgaoGestor(CadastrarComponente):
model = OrgaoGestor2
form_class = CriarOrgaoGestorForm
template_name = 'planotrabalho/cadastrar_orgao.html'
def get_form_kwargs(self):
kwargs = super(CadastrarOrgaoGestor, self).get_form_kwargs()
kwargs['tipo'] = 'orgao_gestor'
return kwargs
def form_valid(self, form):
obj=super().form_valid(form)
return HttpResponseRedirect('/adesao/home/')
class CadastrarFundoDeCultura(CadastrarComponente):
model = FundoDeCultura
form_class = CriarFundoForm
template_name = 'planotrabalho/cadastrar_fundo.html'
def get_context_data(self, **kwargs):
context = super(CadastrarFundoDeCultura, self).get_context_data(**kwargs)
context['is_edit'] = False
return context
class CadastrarConselhoDeCultura(CadastrarComponente):
model = ConselhoDeCultura
form_class = CriarConselhoForm
template_name = 'planotrabalho/cadastrar_conselho.html'
class AlterarLegislacao(UpdateView):
model = Componente
form_class = CriarComponenteForm
template_name = 'planotrabalho/cadastrar_legislacao.html'
def get_form_kwargs(self):
kwargs = super(AlterarLegislacao, self).get_form_kwargs()
sistema_id = self.request.session['sistema_cultura_selecionado']['id']
self.sistema = SistemaCultura.objects.get(id=sistema_id)
kwargs['tipo'] = 'legislacao'
kwargs['sistema'] = self.sistema
kwargs['logged_user'] = self.request.user
return kwargs
def get_context_data(self, **kwargs):
context = super(AlterarLegislacao, self).get_context_data(**kwargs)
context['is_edit'] = True
return context
def get_success_url(self):
return reverse_lazy('planotrabalho:planotrabalho', kwargs={'pk': self.sistema.id})
class AlterarPlanoCultura(UpdateView):
model = PlanoDeCultura
form_class = CriarPlanoForm
template_name = 'planotrabalho/cadastrar_plano.html'
def get_form_kwargs(self):
kwargs = super(AlterarPlanoCultura, self).get_form_kwargs()
sistema_id = self.object.plano.last().id
self.sistema = SistemaCultura.objects.get(id=sistema_id)
kwargs['sistema'] = self.sistema
kwargs['logged_user'] = self.request.user
kwargs['initial']['local_monitoramento'] = self.object.local_monitoramento
kwargs['initial']['ano_inicio_curso'] = self.object.ano_inicio_curso
kwargs['initial']['ano_termino_curso'] = self.object.ano_termino_curso
kwargs['initial']['esfera_federacao_curso'] = self.object.esfera_federacao_curso
kwargs['initial']['tipo_oficina'] = self.object.tipo_oficina
kwargs['initial']['perfil_participante'] = self.object.perfil_participante
kwargs['initial']['anexo_na_lei'] = self.object.anexo_na_lei
kwargs['initial']['metas_na_lei'] = self.object.metas_na_lei
if self.object.anexo_na_lei:
kwargs['initial']['possui_anexo'] = True
elif not self.object.anexo_na_lei and self.object.anexo and self.object.anexo.arquivo:
kwargs['initial']['possui_anexo'] = True
kwargs['initial']['anexo_lei'] = self.object.anexo.arquivo
else:
kwargs['initial']['possui_anexo'] = False
if self.object.metas_na_lei:
kwargs['initial']['possui_metas'] = True
elif not self.object.metas_na_lei and self.object.metas and self.object.metas.arquivo:
kwargs['initial']['possui_metas'] = True
kwargs['initial']['arquivo_metas'] = self.object.metas.arquivo
else:
kwargs['initial']['possui_metas'] = False
if self.object.local_monitoramento:
kwargs['initial']['monitorado'] = True
else:
kwargs['initial']['monitorado'] = False
if self.object.ano_inicio_curso:
kwargs['initial']['participou_curso'] = True
else:
kwargs['initial']['participou_curso'] = False
return kwargs
def get_context_data(self, **kwargs):
context = super(AlterarPlanoCultura, self).get_context_data(**kwargs)
context['is_edit'] = True
return context
def get_success_url(self):
return reverse_lazy('planotrabalho:planotrabalho', kwargs={'pk': self.sistema.id})
class AlterarOrgaoGestor(UpdateView):
model = OrgaoGestor2
form_class = CriarOrgaoGestorForm
template_name = 'planotrabalho/cadastrar_orgao.html'
def get_form_kwargs(self):
kwargs = super(AlterarOrgaoGestor, self).get_form_kwargs()
sistema_id = self.object.orgao_gestor.last().id
self.sistema = SistemaCultura.objects.get(id=sistema_id)
kwargs['tipo'] = 'orgao_gestor'
kwargs['sistema'] = self.sistema
kwargs['logged_user'] = self.request.user
if self.sistema.orgao_gestor and self.sistema.orgao_gestor.perfil:
kwargs['initial']['perfil'] = self.sistema.orgao_gestor.perfil
if self.object.comprovante_cnpj is None:
kwargs['initial']['possui_cnpj'] = False
else:
kwargs['initial']['possui_cnpj'] = True
kwargs['initial']['comprovante_cnpj'] = self.object.comprovante_cnpj.arquivo
kwargs['initial']['cnpj'] = self.sistema.orgao_gestor.cnpj
kwargs['initial']['banco'] = self.sistema.orgao_gestor.banco
kwargs['initial']['agencia'] = self.sistema.orgao_gestor.agencia
kwargs['initial']['conta'] = self.sistema.orgao_gestor.conta
kwargs['initial']['termo_responsabilidade'] = True
return kwargs
def get_context_data(self, **kwargs):
context = super(AlterarOrgaoGestor, self).get_context_data(**kwargs)
context['is_edit'] = True
return context
def get_success_url(self):
return reverse_lazy('planotrabalho:planotrabalho', kwargs={'pk': self.sistema.id})
class AlterarFundoCultura(UpdateView):
model = FundoDeCultura
form_class = CriarFundoForm
template_name = 'planotrabalho/cadastrar_fundo.html'
def get_form_kwargs(self):
kwargs = super(AlterarFundoCultura, self).get_form_kwargs()
sistema_id = self.object.fundo_cultura.last().id
self.sistema = SistemaCultura.objects.get(id=sistema_id)
kwargs['sistema'] = self.sistema
kwargs['logged_user'] = self.request.user
if self.sistema.legislacao and self.sistema.legislacao.arquivo == self.object.arquivo:
kwargs['initial']['mesma_lei'] = True
else:
kwargs['initial']['mesma_lei'] = False
if self.object.comprovante_cnpj is None:
kwargs['initial']['possui_cnpj'] = False
else:
kwargs['initial']['possui_cnpj'] = True
kwargs['initial']['comprovante'] = self.object.comprovante_cnpj.arquivo
kwargs['initial']['banco'] = self.object.banco
kwargs['initial']['agencia'] = self.object.agencia
kwargs['initial']['conta'] = self.object.conta
kwargs['initial']['termo_responsabilidade'] = True
return kwargs
def get_context_data(self, **kwargs):
context = super(AlterarFundoCultura, self).get_context_data(**kwargs)
context['is_edit'] = True
return context
def get_success_url(self):
return reverse_lazy('planotrabalho:planotrabalho', kwargs={'pk': self.sistema.id})
class AlterarConselhoCultura(UpdateView):
model = ConselhoDeCultura
form_class = CriarConselhoForm
template_name = 'planotrabalho/cadastrar_conselho.html'
def get_form_kwargs(self):
kwargs = super(AlterarConselhoCultura, self).get_form_kwargs()
sistema_id = self.object.conselho.first().id
self.sistema = SistemaCultura.objects.get(id=sistema_id)
kwargs['sistema'] = self.sistema
kwargs['logged_user'] = self.request.user
if self.object.lei:
kwargs['initial']['arquivo_lei'] = self.object.lei.arquivo
kwargs['initial']['data_publicacao_lei'] = self.object.lei.data_publicacao
if self.sistema.legislacao and self.sistema.legislacao.arquivo == self.object.lei.arquivo:
kwargs['initial']['mesma_lei'] = True
else:
kwargs['initial']['mesma_lei'] = False
if self.object.arquivo:
kwargs['initial']['possui_ata'] = True
else:
kwargs['initial']['possui_ata'] = False
return kwargs
def get_context_data(self, **kwargs):
context = super(AlterarConselhoCultura, self).get_context_data(**kwargs)
context['is_edit'] = True
return context
def get_success_url(self):
return reverse_lazy('planotrabalho:planotrabalho', kwargs={'pk': self.sistema.id})
class CriarConselheiro(CreateView):
form_class = CriarConselheiroForm
template_name = 'planotrabalho/cadastrar_conselheiros.html'
def get_form_kwargs(self):
kwargs = super(CriarConselheiro, self).get_form_kwargs()
kwargs['conselho'] = self.request.session['sistema_cultura_selecionado']['conselho']
return kwargs
def get_success_url(self):
return reverse_lazy('planotrabalho:listar_conselheiros')
class ListarConselheiros(ListView):
model = Conselheiro
template_name = 'planotrabalho/listar_conselheiros.html'
paginate_by = 12
def get_queryset(self):
q = self.request.session['sistema_cultura_selecionado']['conselho']
conselheiros = Conselheiro.objects.filter(conselho__id=q, situacao=1) # 1 = Habilitado
return conselheiros
class AlterarConselheiro(UpdateView):
form_class = AlterarConselheiroForm
template_name = 'planotrabalho/cadastrar_conselheiros.html'
def get_queryset(self):
pk = self.kwargs['pk']
conselheiro = Conselheiro.objects.filter(id=pk)
return conselheiro
def get_success_url(self):
return reverse_lazy('planotrabalho:listar_conselheiros')
class DesabilitarConselheiro(UpdateView):
form_class = DesabilitarConselheiroForm
template_name = 'planotrabalho/desabilitar_conselheiro.html'
def get_queryset(self):
pk = self.kwargs['pk']
conselheiro = Conselheiro.objects.filter(id=pk)
return conselheiro
def get_success_url(self):
return reverse_lazy('planotrabalho:listar_conselheiros')
def get_conselheiros(request):
if request.is_ajax() and request.GET.get('id', None):
pk = request.GET.get('id')
conselheiros = Conselheiro.objects.filter(conselho__pk=pk)
response = {}
response['conselheiros'] = list(conselheiros.values_list('nome', 'email', 'segmento'))
return HttpResponse(
json.dumps(response),
content_type="application/json")
else:
return Http404()
| agpl-3.0 | 2,528,503,939,964,666,400 | 35.459318 | 104 | 0.673458 | false | 3.168568 | false | false | false |
att-comdev/deckhand | deckhand/control/views/revision.py | 1 | 3030 | # Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from deckhand.common import utils
from deckhand.control import common
from deckhand import types
class ViewBuilder(common.ViewBuilder):
"""Model revision API responses as a python dictionary."""
_collection_name = 'revisions'
def list(self, revisions):
resp_body = {
'count': len(revisions),
'results': []
}
for revision in revisions:
body = {'tags': set(), 'buckets': set()}
rev_documents = revision.pop('documents')
for attr in ('id', 'created_at'):
body[utils.to_camel_case(attr)] = revision[attr]
body['tags'].update([t['tag'] for t in revision['tags']])
body['buckets'].update(
[d['bucket_name'] for d in rev_documents])
body['tags'] = sorted(body['tags'])
body['buckets'] = sorted(body['buckets'])
resp_body['results'].append(body)
return resp_body
def show(self, revision):
"""Generate view for showing revision details.
Each revision's documents should only be validation policies.
"""
validation_policies = []
tags = collections.OrderedDict()
success_status = 'success'
for vp in [d for d in revision['documents']
if d['schema'].startswith(types.VALIDATION_POLICY_SCHEMA)]:
validation_policy = {}
validation_policy['name'] = vp.get('name')
validation_policy['url'] = self._gen_url(vp)
try:
validation_policy['status'] = vp['data']['validations'][0][
'status']
except KeyError:
validation_policy['status'] = 'unknown'
validation_policies.append(validation_policy)
if validation_policy['status'] != 'success':
success_status = 'failed'
for tag in revision['tags']:
tags.setdefault(tag['tag'], tag['data'])
buckets = sorted(
set([d['bucket_name'] for d in revision['documents']]))
return {
'id': revision.get('id'),
'createdAt': revision.get('created_at'),
'url': self._gen_url(revision),
'validationPolicies': validation_policies,
'status': success_status,
'tags': dict(tags),
'buckets': buckets
}
| apache-2.0 | 8,199,247,008,162,782,000 | 32.666667 | 78 | 0.588779 | false | 4.449339 | false | false | false |
siavooshpayandehazad/NoC_Router | Scripts/include/fault_injector_do.py | 3 | 51818 | import random
import sys
import numpy
from Scripts.include.package import *
#----------------------------------------------------------------------------------------------
#
# Fault Class
#
#----------------------------------------------------------------------------------------------
class fault:
location = None
bitwidth = None
Type = None
mean_time = None
std_dev = None
shut_down_time = None
def __init__(self, loc, width, fault_type, mean_time, std_dev, shut_down_time):
if width > 1:
random_position = random.randint(0, width-1)
self.location = loc+"("+str(random_position)+")"
else:
self.location = loc
self.bitwidth = width
self.Type = fault_type
self.mean_time = mean_time
self.std_dev = std_dev
self.shut_down_time = shut_down_time
def report(self):
"""
The fault reports its location, signal width, type, MTBF, STD_Dev and shutdown time!
"""
print "Location: ", self.location, "\twidth: ", self.bitwidth, "\tfault_type: ", '%5s' %self.Type,\
"\tMTBF: ", self.mean_time, "\tstd deviation: ", self.std_dev , "\tshutdown time", \
self.shut_down_time
#----------------------------------------------------------------------------------------------
#
# Other functions
#
#----------------------------------------------------------------------------------------------
def report_faults(fault_list):
"""
Reports all the faults in the fault list
"""
print "---------------------------------------"
print "fault injection points:"
for fault in fault_list:
fault.report()
print "---------------------------------------"
#----------------------------------------------------------------------------------------------
# Generating signals for different modules
# for this purpose we only consider fault injection points marked with X:
#
# .-------------.
# .----> | Checkers | <---.
# | | Module | |
# | '-------------' |
# | ^ |
# | | |
# | X |
# | .-------------. |
# | | Module | |
# -----o----->| under |--X--o------->
# | check |
# '-------------'
#
#----------------------------------------------------------------------------------------------
def list_all_the_links(network_size):
"""
takes the network size and returns a list of all the RX signals in the network
"""
list_of_ports = []
list_of_widths = []
for i in range(0, network_size*network_size):
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":RX_L")
list_of_widths.append(32)
if i/network_size != 0:
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":RX_N")
list_of_widths.append(32)
if i/network_size != network_size-1:
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":RX_S")
list_of_widths.append(32)
if i%network_size != 0:
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":RX_W")
list_of_widths.append(32)
if i%network_size != network_size-1:
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":RX_E")
list_of_widths.append(32)
return list_of_ports, list_of_widths
def list_all_the_lbdr_signals(network_size):
"""
takes the network size and returns a list of all the relevant LBDR signals in the network
"""
list_of_ports = []
list_of_widths = []
# Every router has the Local port
for i in range(0, network_size*network_size):
# internal signals of LBDR with packet drop
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:N1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:E1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:W1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:S1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:Req_N_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:Req_E_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:Req_W_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:Req_S_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:Req_L_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:grants")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:packet_drop_in")
# output signal(s) of LBDR with packet drop
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:packet_drop_order")
list_of_widths += [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
# for i in range(0, network_size*2):
if i/network_size != 0: # has port N
# internal signals of LBDR with packet drop
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:N1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:E1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:W1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:S1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:Req_N_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:Req_E_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:Req_W_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:Req_S_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:Req_L_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:grants")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:packet_drop_in")
# output signal(s) of LBDR with packet drop
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:packet_drop_order")
list_of_widths += [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
if i/network_size != network_size-1: # has port S
# internal signals of LBDR with packet drop
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:N1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:E1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:W1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:S1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:Req_N_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:Req_E_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:Req_W_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:Req_S_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:Req_L_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:grants")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:packet_drop_in")
# output signal(s) of LBDR with packet drop
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:packet_drop_order")
list_of_widths += [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
if i%network_size != 0: # has port W
# internal signals of LBDR with packet drop
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:N1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:E1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:W1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:S1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:Req_N_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:Req_E_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:Req_W_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:Req_S_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:Req_L_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:grants")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:packet_drop_in")
# output signal(s) of LBDR with packet drop
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:packet_drop_order")
list_of_widths += [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
if i%network_size != network_size-1: # has port E
# internal signals of LBDR with packet drop
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:N1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:E1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:W1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:S1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:Req_N_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:Req_E_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:Req_W_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:Req_S_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:Req_L_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:grants")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:packet_drop_in")
# output signal(s) of LBDR with packet drop
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:packet_drop_order")
list_of_widths += [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
return list_of_ports, list_of_widths
def list_all_the_arbiter_signals(network_size):
"""
takes the network size and returns a list of all the relevant arbiter signals in the network
"""
list_of_ports = []
list_of_widths = []
# Every router has the Local port
for i in range(0, network_size*network_size):
# Output signals of Allocator related to output N
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:valid_L")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_L_N") # The _signal should have fault injected or the main one ? i.e. grant_N_N or grant_N_N_signal ?! Because checker is checking grant_N_N_sig.
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_L_E")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_L_W")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_L_S")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_L_L")
# Internal signals of Allocator related to output N
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:credit_counter_L_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:credit_counter_L_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_L")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_N_L")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_E_L") # Input E requesting Output L ?!
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_W_L")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_S_L")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_L_L")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_L_N_sig") # ??
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_L_E_sig")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_L_W_sig")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_L_S_sig")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_L_L_sig")
list_of_widths += [1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
# for i in range(0, network_size*2):
if i/network_size != 0: # has port N
# Output signals of Allocator related to output N
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:valid_N")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N_N") # The _signal should have fault injected or the main one ? i.e. grant_N_N or grant_N_N_signal ?! Because checker is checking grant_N_N_sig.
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N_E")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N_W")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N_S")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N_L")
# Internal signals of Allocator related to output N
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:credit_counter_N_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:credit_counter_N_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_N_N")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_E_N") # Input E requesting Output N ?!
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_W_N")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_S_N")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_L_N")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N_N_sig") # ??
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N_E_sig")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N_W_sig")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N_S_sig")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N_L_sig")
list_of_widths += [1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
if i/network_size != network_size-1: # has port S
# Output signals of Allocator related to output S
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:valid_S")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_S_N") # The _signal should have fault injected or the main one ? i.e. grant_N_N or grant_N_N_signal ?! Because checker is checking grant_N_N_sig.
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_S_E")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_S_W")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_S_S")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_S_L")
# # Internal signals of Allocator related to output S
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:credit_counter_S_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:credit_counter_S_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_N_S")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_E_S") # Input E requesting Output S ?!
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_W_S")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_S_S")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_L_S")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_S_N_sig") # ??
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_S_E_sig")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_S_W_sig")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_S_S_sig")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_S_L_sig")
list_of_widths += [1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
if i%network_size != 0: # has port W
# Output signals of Allocator related to output W
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:valid_W")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_W_N") # The _signal should have fault injected or the main one ? i.e. grant_N_N or grant_N_N_signal ?! Because checker is checking grant_N_N_sig.
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_W_E")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_W_W")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_W_S")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_W_L")
# # Internal signals of Allocator related to output W
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:credit_counter_W_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:credit_counter_W_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_W")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_N_W")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_E_W") # Input E requesting Output W ?!
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_W_W")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_S_W")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_L_W")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_W_N_sig") # ??
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_W_E_sig")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_W_W_sig")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_W_S_sig")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_W_L_sig")
list_of_widths += [1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
if i%network_size != network_size-1: # has port E
# Output signals of Allocator related to output E
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:valid_E")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_E_N") # The _signal should have fault injected or the main one ? i.e. grant_N_N or grant_N_N_signal ?! Because checker is checking grant_N_N_sig.
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_E_E")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_E_W")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_E_S")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_E_L")
# # Internal signals of Allocator related to output E
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:credit_counter_E_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:credit_counter_E_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_E")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_N_E")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_E_E")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_W_E") # Input W requesting Output E ?!
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_S_E")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_L_E")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_E_N_sig") # ??
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_E_E_sig")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_E_W_sig")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_E_S_sig")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_E_L_sig")
list_of_widths += [1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
return list_of_ports, list_of_widths
def list_all_the_fifo_signals(network_size):
"""
takes the network size and returns a list of all the relevant FIFO signals in the network
"""
list_of_ports = []
list_of_widths = []
# Every router has the Local port
for i in range(0, network_size*network_size):
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:FIFO_MEM_1")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:FIFO_MEM_2")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:FIFO_MEM_3")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:FIFO_MEM_4")
# Internal signals of FIFO
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:read_pointer")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:read_pointer_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:write_pointer")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:write_pointer_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:credit_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:empty")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:full")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:read_en")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:write_en")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:fake_credit")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:fake_credit_counter")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:fake_credit_counter_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:state_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:state_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:fault_info_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:fault_info_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:faulty_packet_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:faulty_packet_in")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:flit_type") -- flit_type is an alias
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:write_fake_flit")
list_of_widths += [4, 4, 4, 4, 1, 1, 1, 1, 1, 1, 2, 2, 5, 5, 1, 1, 1, 1, 1]
# for i in range(0, network_size*2):
if i/network_size != 0: # has port N
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:FIFO_MEM_1")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:FIFO_MEM_2")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:FIFO_MEM_3")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:FIFO_MEM_4")
# Internal signals of FIFO
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:read_pointer")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:read_pointer_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:write_pointer")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:write_pointer_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:credit_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:empty")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:full")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:read_en")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:write_en")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:fake_credit")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:fake_credit_counter")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:fake_credit_counter_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:state_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:state_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:fault_info_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:fault_info_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:faulty_packet_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:faulty_packet_in")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:flit_type") -- flit_type is an alias
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:write_fake_flit")
list_of_widths += [4, 4, 4, 4, 1, 1, 1, 1, 1, 1, 2, 2, 5, 5, 1, 1, 1, 1, 1]
if i/network_size != network_size-1: # has port S
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:FIFO_MEM_1")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:FIFO_MEM_2")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:FIFO_MEM_3")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:FIFO_MEM_4")
# Internal signals of FIFO
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:read_pointer")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:read_pointer_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:write_pointer")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:write_pointer_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:credit_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:empty")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:full")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:read_en")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:write_en")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:fake_credit")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:fake_credit_counter")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:fake_credit_counter_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:state_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:state_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:fault_info_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:fault_info_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:faulty_packet_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:faulty_packet_in")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:flit_type") -- flit_type is an alias
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:write_fake_flit")
list_of_widths += [4, 4, 4, 4, 1, 1, 1, 1, 1, 1, 2, 2, 5, 5, 1, 1, 1, 1, 1]
if i%network_size != 0: # has port W
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:FIFO_MEM_1")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:FIFO_MEM_2")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:FIFO_MEM_3")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:FIFO_MEM_4")
# Internal signals of FIFO
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:read_pointer")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:read_pointer_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:write_pointer")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:write_pointer_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:credit_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:empty")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:full")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:read_en")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:write_en")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:fake_credit")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:fake_credit_counter")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:fake_credit_counter_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:state_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:state_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:fault_info_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:fault_info_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:faulty_packet_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:faulty_packet_in")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:flit_type") -- flit_type is an alias
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:write_fake_flit")
list_of_widths += [4, 4, 4, 4, 1, 1, 1, 1, 1, 1, 2, 2, 5, 5, 1, 1, 1, 1, 1]
if i%network_size != network_size-1: # has port E
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:FIFO_MEM_1")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:FIFO_MEM_2")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:FIFO_MEM_3")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:FIFO_MEM_4")
# Internal signals of FIFO
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:read_pointer")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:read_pointer_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:write_pointer")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:write_pointer_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:credit_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:empty")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:full")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:read_en")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:write_en")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:fake_credit")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:fake_credit_counter")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:fake_credit_counter_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:state_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:state_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:fault_info_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:fault_info_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:faulty_packet_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:faulty_packet_in")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:flit_type") -- flit_type is an alias
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:write_fake_flit")
list_of_widths += [4, 4, 4, 4, 1, 1, 1, 1, 1, 1, 2, 2, 5, 5, 1, 1, 1, 1, 1]
return list_of_ports, list_of_widths
def generate_links_dictionary(network_size, sim_time):
"""
This function generates random faults on all RX signals of the network
"""
list_of_ports = []
list_of_widths = []
ports, widths = list_all_the_links(network_size)
list_of_ports += ports
list_of_widths += widths
# ports, widths = list_all_the_lbdr_signals(network_size)
# list_of_ports += ports
# list_of_widths += widths
# ports, widths = list_all_the_fifo_signals(network_size)
# list_of_ports += ports
# list_of_widths += widths
# ports, widths = list_all_the_arbiter_signals(network_size)
# list_of_ports += ports
# list_of_widths += widths
random.seed(FAULT_RANDOM_SEED)
fault_list = []
for item in list_of_ports:
item_index = list_of_ports.index(item)
width = list_of_widths[item_index]
# fault_type = random.choice(["T", "P", "I", "T->P", "T->I"])
fault_type = random.choice(["T"])
shut_down_time = None
std_dev = None
if fault_type == "T": # Transient fault
frequency = random.choice(["H", "M", "L"])
if frequency == "H":
mean_time = int((1000000000/Fault_Per_Second)/HIGH_FAULT_RATE)
elif frequency == "M":
mean_time = int((1000000000/Fault_Per_Second)/LOW_FAULT_RATE)
else:
mean_time = int((1000000000/Fault_Per_Second)/MEDIUM_FAULT_RATE)
std_dev = int(mean_time*0.1+1)
elif fault_type == "I" or fault_type == "T->I": # Intermittent fault or transient to intermittent
mean_time = int(MTB_INTERMITTENT_BURST)
std_dev = int(mean_time*0.1+1)
elif fault_type == "P": # its Permentent fault
mean_time = None
std_dev = None
shut_down_time = random.randint(int(sim_time*0.1), int(sim_time*0.9))
elif fault_type == "T->P": # Transient goes to Intermittent and then to Permanent
mean_time = int(1000000000/Fault_Per_Second)
shut_down_time = random.randint(int(sim_time*0.1), int(sim_time*0.9))
std_dev = int(mean_time*0.1+1)
new_fault = fault(item, width, fault_type, mean_time, std_dev, shut_down_time)
fault_list.append(new_fault)
report_faults(fault_list)
return fault_list
def parse_fault_info_file(file_path):
"""
If you want to feed the fault info from a file...
the file lines should be organized like this:
fault_location: signal_width fault_type MTBF std_deviation shutdown_time
fault_location: the signal bit that you want to inject the fault on.
signal_width: The width of the signal that you intend to inject the bit-flip in
fault_type: should be chosen from the follwoing list:
* T : Transient
* I : Intermittent
* P : Permanent
* T->P : Transient to Intermittent to permanent
* T->I : Transient to Intermittent
MTBF: Mean time between the faults
std_deviation: Standard deviation used for generating faults
shutdown_time: Time in ns when the signal would be permanently faulty only used when
you need permanent fault. otherwise "None".
Example:
tb_network_2x2:NoC:R_0:RX_L(21) 32 I 1000 101 None
"""
fault_list = []
fault_info_file = open(file_path, 'r')
line = fault_info_file.readline()
while line != "":
split_line = line.split()
fault_location = split_line[0]
signal_width = int(split_line[1])
fault_type = split_line[2]
fault_MTBF = split_line[3]
fault_STD = split_line[4]
shut_down_time = split_line[5]
new_fault = fault(fault_location, signal_width, fault_type, fault_MTBF, fault_STD, shut_down_time)
fault_list.append(new_fault)
line = fault_info_file.readline()
return fault_list
#----------------------------------------------------------------------------------------------
#
# Generating the actual do file.
#
#----------------------------------------------------------------------------------------------
def generate_fault_injection_do(file_path, sim_time, sim_end, fault_list):
"""
Generates a do file for modelsim for injecting the faults
fault_path: string : path to the fault_inject.do
sim_time: integer : How long do you want to inject faults in the simulation ns
sim_end: integer : end of simulation
fault_list: list : list of fault objects for injection
the generated faults would look like these:
*T: ___|____________|____________|____________|____________|____________|____________|____________|
Transient faults happen periodically with a normal distribution with mean time between faults and a
standard deviation
*I: ____________________________||||||||||______________________________________||||||||||_________
Intermittent faults happen in bursts periodically with a normal distribution with mean time between
faults and a standard deviation. each burst injects 10 stuck at faults.
*P: __________________________________________________|''''''''''''''''''''''''''''''''''''''''''''
Permanent faults happen right after the specified shutdown time.
*T->I: ___|____________|____________|____________||||||||||____________________________||||||||||_____
first it behaves as Transient, then becomes intermittent. For transient MTBF and Std_Dev it uses the
specified values in the fault object. for intermittent faults it uses the values specified in package
file.
*T->P: ___|____________|____________|____________||||||||||______________________|''''''''''''''''''''
First it behaves as transient, then turns into intermittent and then permanent. For transient MTBF and
Std_Dev it uses the specified values in the fault object. for intermittent faults it uses the values
specified in package file. for becomming permanent, it uses the shutdown time specified in the fault
object.
"""
list_of_links = fault_list
delay = 1000000000/Fault_Per_Second
deviation = int(delay/10)
if deviation == 0:
deviation = 1
fault_inject_file = open(file_path+'/fault_inject.do', 'w')
permanently_faulty_locations = []
temp_dict = {}
for item in list_of_links:
if item.Type == "T":
fault_time = 0
time_until_next_fault = numpy.random.normal(item.mean_time, item.std_dev)
fault_time += time_until_next_fault
while fault_time < sim_time:
if int(fault_time) in temp_dict.keys():
temp_dict[int(fault_time)].append(item)
else:
temp_dict[int(fault_time)] = [item]
time_until_next_fault = numpy.random.normal(item.mean_time, item.std_dev)
fault_time += time_until_next_fault
if item.Type == "I":
fault_time = 0
time_until_next_fault = numpy.random.normal(item.mean_time, item.std_dev)
fault_time += time_until_next_fault
while fault_time < sim_time:
for event in range(0, EVENTS_PER_BURST):
if int(fault_time+event) in temp_dict.keys():
temp_dict[int(fault_time+event)].append(item)
else:
temp_dict[int(fault_time+event)] = [item]
time_until_next_fault = numpy.random.normal(item.mean_time, item.std_dev)
fault_time += time_until_next_fault
if item.Type == "T->I":
permanently_faulty_locations.append(item)
fault_time = 0
time_until_next_fault = numpy.random.normal(item.mean_time, item.std_dev)
fault_time += time_until_next_fault
while fault_time < int(sim_time*0.5):
if int(fault_time) in temp_dict.keys():
temp_dict[int(fault_time)].append(item)
else:
temp_dict[int(fault_time)] = [item]
time_until_next_fault = numpy.random.normal(item.mean_time, item.std_dev)
fault_time += time_until_next_fault
time_until_next_fault = numpy.random.normal(int(MTB_INTERMITTENT_BURST), \
int(MTB_INTERMITTENT_BURST*0.1+1))
fault_time += time_until_next_fault
while fault_time+EVENTS_PER_BURST < int(sim_time):
for event in range(0, EVENTS_PER_BURST):
if int(fault_time+event) in temp_dict.keys():
temp_dict[int(fault_time+event)].append(item)
else:
temp_dict[int(fault_time+event)] = [item]
time_until_next_fault = numpy.random.normal(int(MTB_INTERMITTENT_BURST), \
int(MTB_INTERMITTENT_BURST*0.1+1))
fault_time += time_until_next_fault
if item.Type == "P":
permanently_faulty_locations.append(item)
if item.Type == "T->P":
permanently_faulty_locations.append(item)
fault_time = 0
time_until_next_fault = numpy.random.normal(item.mean_time, item.std_dev)
fault_time += time_until_next_fault
while fault_time < int(item.shut_down_time*0.5):
if int(fault_time) in temp_dict.keys():
temp_dict[int(fault_time)].append(item)
else:
temp_dict[int(fault_time)] = [item]
time_until_next_fault = numpy.random.normal(item.mean_time, item.std_dev)
fault_time += time_until_next_fault
time_until_next_fault = numpy.random.normal(int(MTB_INTERMITTENT_BURST), \
int(MTB_INTERMITTENT_BURST*0.1+1))
fault_time += time_until_next_fault
while fault_time+EVENTS_PER_BURST < int(item.shut_down_time):
for event in range(0, EVENTS_PER_BURST):
if int(fault_time+event) in temp_dict.keys():
temp_dict[int(fault_time+event)].append(item)
else:
temp_dict[int(fault_time+event)] = [item]
time_until_next_fault = numpy.random.normal(int(MTB_INTERMITTENT_BURST), \
int(MTB_INTERMITTENT_BURST*0.1+1))
fault_time += time_until_next_fault
fault_inject_file.write("#################################\n")
current_time = 0
for i in range(0, sim_time):
for permanent_fault_location in permanently_faulty_locations:
if i == permanent_fault_location.shut_down_time:
location = permanent_fault_location.location
fault_inject_file.write("# ###################################################\n")
fault_inject_file.write("# Shutting down signal: "+location+" for good!\n")
fault_inject_file.write("force -drive sim/:"+location+" U 1ns\n")
fault_inject_file.write("# ###################################################\n")
if i in temp_dict.keys():
last_time = current_time
current_time = i
fault_inject_file.write("run "+str(current_time-last_time)+"ns\n")
for item in temp_dict[i]:
location = item.location
if item.Type == "I" or item.Type == "T->I" or item.Type == "T->P":
string = "force -drive sim/:"+location+" " + str(random.choice(["0", "1"]))
string += " 0 ns -cancel 1ns"
else:
string = "force -drive sim/:"+location+" " + str(random.choice(["0", "1"]))
random_start = random.randint(0, deviation)
string += " "+str(random_start)+"ns -cancel "+str(random_start+1)+"ns"
fault_inject_file.write(string+"\n")
fault_inject_file.write("run "+str(sim_end-sim_time)+"ns\n")
fault_inject_file.write("stop")
fault_inject_file.close()
| gpl-3.0 | -1,147,701,157,328,502,400 | 67.002625 | 263 | 0.636343 | false | 2.551982 | false | false | false |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/cogent/phylo/distance.py | 1 | 13243 | #!/usr/bin/env python
"""Estimating pairwise distances between sequences.
"""
from cogent.util import parallel, table, warning, progress_display as UI
from cogent.maths.stats.util import Numbers
from cogent import LoadSeqs, LoadTree
from warnings import warn
__author__ = "Gavin Huttley"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["Gavin Huttley", "Peter Maxwell", "Matthew Wakefield"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Gavin Huttley"
__email__ = "[email protected]"
__status__ = "Production"
class EstimateDistances(object):
"""Base class used for estimating pairwise distances between sequences.
Can also estimate other parameters from pairs."""
def __init__(self, seqs, submodel, threeway=False, motif_probs = None,
do_pair_align=False, rigorous_align=False, est_params=None,
modify_lf=None):
"""Arguments:
- seqs: an Alignment or SeqCollection instance with > 1 sequence
- submodel: substitution model object Predefined models can
be imported from cogent.evolve.models
- threeway: a boolean flag for using threeway comparisons to
estimate distances. default False. Ignored if do_pair_align is
True.
- do_pair_align: if the input sequences are to be pairwise aligned
first and then the distance will be estimated. A pair HMM based
on the submodel will be used.
- rigorous_align: if True the pairwise alignments are actually
numerically optimised, otherwise the current substitution model
settings are used. This slows down estimation considerably.
- est_params: substitution model parameters to save estimates from
in addition to length (distance)
- modify_lf: a callback function for that takes a likelihood
function (with alignment set) and modifies it. Can be used to
configure local_params, set bounds, optimise using a restriction
for faster performance.
Note: Unless you know a priori your alignment will be flush ended
(meaning no sequence has terminal gaps) it is advisable to construct a
substitution model that recodes gaps. Otherwise the terminal gaps will
significantly bias the estimation of branch lengths when using
do_pair_align.
"""
if do_pair_align:
self.__threeway = False
else:
# whether pairwise is to be estimated from 3-way
self.__threeway = [threeway, False][do_pair_align]
self.__seq_collection = seqs
self.__seqnames = seqs.getSeqNames()
self.__motif_probs = motif_probs
# the following may be pairs or three way combinations
self.__combination_aligns = None
self._do_pair_align = do_pair_align
self._rigorous_align = rigorous_align
# substitution model stuff
self.__sm = submodel
self._modify_lf = modify_lf
# store for the results
self.__param_ests = {}
self.__est_params = list(est_params or [])
self.__run = False # a flag indicating whether estimation completed
# whether we're on the master CPU or not
self._on_master_cpu = parallel.getCommunicator().Get_rank() == 0
def __str__(self):
return str(self.getTable())
def __make_pairwise_comparison_sets(self):
comps = []
names = self.__seq_collection.getSeqNames()
n = len(names)
for i in range(0, n - 1):
for j in range(i + 1, n):
comps.append((names[i], names[j]))
return comps
def __make_threeway_comparison_sets(self):
comps = []
names = self.__seq_collection.getSeqNames()
n = len(names)
for i in range(0, n - 2):
for j in range(i + 1, n - 1):
for k in range(j + 1, n):
comps.append((names[i], names[j], names[k]))
return comps
def __make_pair_alignment(self, seqs, opt_kwargs):
lf = self.__sm.makeLikelihoodFunction(\
LoadTree(tip_names=seqs.getSeqNames()),
aligned=False)
lf.setSequences(seqs.NamedSeqs)
# allow user to modify the lf config
if self._modify_lf:
lf = self._modify_lf(lf)
if self._rigorous_align:
lf.optimise(**opt_kwargs)
lnL = lf.getLogLikelihood()
(vtLnL, aln) = lnL.edge.getViterbiScoreAndAlignment()
return aln
@UI.display_wrap
def __doset(self, sequence_names, dist_opt_args, aln_opt_args, ui):
# slice the alignment
seqs = self.__seq_collection.takeSeqs(sequence_names)
if self._do_pair_align:
ui.display('Aligning', progress=0.0, current=.5)
align = self.__make_pair_alignment(seqs, aln_opt_args)
ui.display('', progress=.5, current=.5)
else:
align = seqs
ui.display('', progress=0.0, current=1.0)
# note that we may want to consider removing the redundant gaps
# create the tree object
tree = LoadTree(tip_names = sequence_names)
# make the parameter controller
lf = self.__sm.makeLikelihoodFunction(tree)
if not self.__threeway:
lf.setParamRule('length', is_independent = False)
if self.__motif_probs:
lf.setMotifProbs(self.__motif_probs)
lf.setAlignment(align)
# allow user modification of lf using the modify_lf
if self._modify_lf:
lf = self._modify_lf(lf)
lf.optimise(**dist_opt_args)
# get the statistics
stats_dict = lf.getParamValueDict(['edge'],
params=['length'] + self.__est_params)
# if two-way, grab first distance only
if not self.__threeway:
result = {'length': stats_dict['length'].values()[0] * 2.0}
else:
result = {'length': stats_dict['length']}
# include any other params requested
for param in self.__est_params:
result[param] = stats_dict[param].values()[0]
return result
@UI.display_wrap
def run(self, dist_opt_args=None, aln_opt_args=None, ui=None, **kwargs):
"""Start estimating the distances between sequences. Distance estimation
is done using the Powell local optimiser. This can be changed using the
dist_opt_args and aln_opt_args.
Arguments:
- show_progress: whether to display progress. More detailed progress
information from individual optimisation is controlled by the
..opt_args.
- dist_opt_args, aln_opt_args: arguments for the optimise method for
the distance estimation and alignment estimation respectively."""
if 'local' in kwargs:
warn("local argument ignored, provide it to dist_opt_args or"\
" aln_opt_args", DeprecationWarning, stacklevel=2)
ui.display("Distances")
dist_opt_args = dist_opt_args or {}
aln_opt_args = aln_opt_args or {}
# set the optimiser defaults
dist_opt_args['local'] = dist_opt_args.get('local', True)
aln_opt_args['local'] = aln_opt_args.get('local', True)
# generate the list of unique sequence sets (pairs or triples) to be
# analysed
if self.__threeway:
combination_aligns = self.__make_threeway_comparison_sets()
desc = "triplet "
else:
combination_aligns = self.__make_pairwise_comparison_sets()
desc = "pair "
labels = [desc + ','.join(names) for names in combination_aligns]
def _one_alignment(comp):
result = self.__doset(comp, dist_opt_args, aln_opt_args)
return (comp, result)
for (comp, value) in ui.imap(_one_alignment, combination_aligns,
labels=labels):
self.__param_ests[comp] = value
def getPairwiseParam(self, param, summary_function="mean"):
"""Return the pairwise statistic estimates as a dictionary keyed by
(seq1, seq2)
Arguments:
- param: name of a parameter in est_params or 'length'
- summary_function: a string naming the function used for
estimating param from threeway distances. Valid values are 'mean'
(default) and 'median'."""
summary_func = summary_function.capitalize()
pairwise_stats = {}
assert param in self.__est_params + ['length'], \
"unrecognised param %s" % param
if self.__threeway and param == 'length':
pairwise = self.__make_pairwise_comparison_sets()
# get all the distances involving this pair
for a, b in pairwise:
values = Numbers()
for comp_names, param_vals in self.__param_ests.items():
if a in comp_names and b in comp_names:
values.append(param_vals[param][a] + \
param_vals[param][b])
pairwise_stats[(a,b)] = getattr(values, summary_func)
else:
# no additional processing of the distances is required
for comp_names, param_vals in self.__param_ests.items():
pairwise_stats[comp_names] = param_vals[param]
return pairwise_stats
def getPairwiseDistances(self,summary_function="mean", **kwargs):
"""Return the pairwise distances as a dictionary keyed by (seq1, seq2).
Convenience interface to getPairwiseParam.
Arguments:
- summary_function: a string naming the function used for
estimating param from threeway distances. Valid values are 'mean'
(default) and 'median'.
"""
return self.getPairwiseParam('length',summary_function=summary_function,
**kwargs)
def getParamValues(self, param, **kwargs):
"""Returns a Numbers object with all estimated values of param.
Arguments:
- param: name of a parameter in est_params or 'length'
- **kwargs: arguments passed to getPairwiseParam"""
ests = self.getPairwiseParam(param, **kwargs)
return Numbers(ests.values())
def getTable(self,summary_function="mean", **kwargs):
"""returns a Table instance of the distance matrix.
Arguments:
- summary_function: a string naming the function used for
estimating param from threeway distances. Valid values are 'mean'
(default) and 'median'."""
d = \
self.getPairwiseDistances(summary_function=summary_function,**kwargs)
if not d:
d = {}
for s1 in self.__seqnames:
for s2 in self.__seqnames:
if s1 == s2:
continue
else:
d[(s1,s2)] = 'Not Done'
twoD = []
for s1 in self.__seqnames:
row = [s1]
for s2 in self.__seqnames:
if s1 == s2:
row.append('')
continue
try:
row.append(d[(s1,s2)])
except KeyError:
row.append(d[(s2,s1)])
twoD.append(row)
T = table.Table(['Seq1 \ Seq2'] + self.__seqnames, twoD, row_ids = True,
missing_data = "*")
return T
def getNewickTrees(self):
"""Returns a list of Newick format trees for supertree methods."""
trees = []
for comp_names, param_vals in self.__param_ests.items():
tips = []
for name in comp_names:
tips.append(repr(name)+":%s" % param_vals[name])
trees.append("("+",".join(tips)+");")
return trees
def writeToFile(self, filename, summary_function="mean", format='phylip',
**kwargs):
"""Save the pairwise distances to a file using phylip format. Other
formats can be obtained by getting to a Table. If running in parallel,
the master CPU writes out.
Arguments:
- filename: where distances will be written, required.
- summary_function: a string naming the function used for
estimating param from threeway distances. Valid values are 'mean'
(default) and 'median'.
- format: output format of distance matrix
"""
if self._on_master_cpu:
# only write output from 0th node
table = self.getTable(summary_function=summary_function, **kwargs)
table.writeToFile(filename, format=format)
| mit | 1,284,532,346,036,393,500 | 40.255452 | 80 | 0.56679 | false | 4.271935 | false | false | false |
bharadwajyarlagadda/korona | setup.py | 1 | 1670 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def parse_requirements(filename):
return [line.strip()
for line in read(filename).strip().split('\n')
if line.strip()]
pkg = {}
exec(read('korona/__pkg__.py'), pkg)
readme = read('README.rst')
changelog = read('CHANGELOG.rst')
requirements = parse_requirements('requirements.txt')
setup(
name=pkg['__package_name__'],
version=pkg['__version__'],
url=pkg['__url__'],
license=pkg['__license__'],
author=pkg['__author__'],
author_email=pkg['__email__'],
description=pkg['__description__'],
long_description=readme + '\n\n' + changelog,
packages=find_packages(exclude=['tests', 'tasks']),
install_requires=requirements,
keywords='make html built html create html korona html maker html build',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5'
]
)
| mit | 1,441,907,457,221,212,400 | 30.509434 | 77 | 0.607784 | false | 4.093137 | false | false | false |
chokribr/inveniocnudst | modules/bibdocfile/lib/bibdocfile_web_tests.py | 8 | 5273 | # -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibDocFile module web tests."""
import time
from invenio.config import CFG_SITE_SECURE_URL
from invenio.testutils import make_test_suite, \
run_test_suite, \
InvenioWebTestCase
class InvenioBibDocFileWebTest(InvenioWebTestCase):
"""BibDocFile web tests."""
def test_add_new_file(self):
"""bibdocfile - web test add a new file"""
self.browser.get(CFG_SITE_SECURE_URL + "/record/5?ln=en")
# login as admin
self.login(username="admin", password="")
self.find_element_by_link_text_with_timeout("Manage Files of This Record")
self.browser.find_element_by_link_text("Manage Files of This Record").click()
self.find_element_by_xpath_with_timeout("//div[@id='uploadFileInterface']//input[@type='button' and @value='Add new file']")
self.browser.find_element_by_xpath("//div[@id='uploadFileInterface']//input[@type='button' and @value='Add new file']").click()
self.wait_element_displayed_with_timeout(self.browser.find_element_by_id("balloonReviseFileInput"))
filename = "Tiger_" + time.strftime("%Y%m%d%H%M%S")
self.fill_textbox(textbox_name="rename", text=filename)
self.fill_textbox(textbox_id="balloonReviseFileInput", text="/opt/invenio/lib/webtest/invenio/test.pdf")
self.find_element_by_id_with_timeout("bibdocfilemanagedocfileuploadbutton")
self.browser.find_element_by_id("bibdocfilemanagedocfileuploadbutton").click()
self.wait_element_hidden_with_timeout(self.browser.find_element_by_id("balloonReviseFileInput"))
self.find_elements_by_class_name_with_timeout('reviseControlFileColumn')
self.page_source_test(expected_text=filename)
self.find_element_by_id_with_timeout("applyChanges")
self.browser.find_element_by_id("applyChanges").click()
self.page_source_test(expected_text='Your modifications to record #5 have been submitted')
self.logout()
def test_revise_file(self):
"""bibdocfile - web test revise a file"""
self.browser.get(CFG_SITE_SECURE_URL + "/record/6?ln=en")
# login as admin
self.login(username="admin", password="")
self.find_element_by_link_text_with_timeout("Manage Files of This Record")
self.browser.find_element_by_link_text("Manage Files of This Record").click()
self.find_element_by_link_text_with_timeout("revise")
self.browser.find_element_by_link_text("revise").click()
self.find_element_by_id_with_timeout("balloonReviseFileInput")
self.wait_element_displayed_with_timeout(self.browser.find_element_by_id("balloonReviseFileInput"))
self.fill_textbox(textbox_id="balloonReviseFileInput", text="/opt/invenio/lib/webtest/invenio/test.pdf")
self.find_element_by_id_with_timeout("bibdocfilemanagedocfileuploadbutton")
self.browser.find_element_by_id("bibdocfilemanagedocfileuploadbutton").click()
self.wait_element_hidden_with_timeout(self.browser.find_element_by_id("balloonReviseFileInput"))
self.find_element_by_id_with_timeout("applyChanges")
self.browser.find_element_by_id("applyChanges").click()
self.page_source_test(expected_text='Your modifications to record #6 have been submitted')
self.logout()
def test_delete_file(self):
"""bibdocfile - web test delete a file"""
self.browser.get(CFG_SITE_SECURE_URL + "/record/8?ln=en")
# login as admin
self.login(username="admin", password="")
self.find_element_by_link_text_with_timeout("Manage Files of This Record")
self.browser.find_element_by_link_text("Manage Files of This Record").click()
self.browser.find_element_by_xpath("(//div[@id='uploadFileInterface']//tr[@class='even']//a[text()='delete'])[1]").click()
self.handle_popup_dialog()
time.sleep(1)
self.page_source_test(expected_text=['9812226', 'pdf', 'ps.gz'],
unexpected_text=['9812226.fig1.ps.gz'])
self.find_element_by_name_with_timeout("cancel")
self.browser.find_element_by_name("cancel").click()
self.handle_popup_dialog()
time.sleep(1)
self.page_source_test(expected_text='Your modifications to record #8 have been cancelled')
self.logout()
TEST_SUITE = make_test_suite(InvenioBibDocFileWebTest, )
if __name__ == '__main__':
run_test_suite(TEST_SUITE, warn_user=True)
| gpl-2.0 | 3,547,754,400,962,725,400 | 50.194175 | 135 | 0.680068 | false | 3.579769 | true | false | false |
NikolayKutsoloshchenko/Selenium-1- | test_selenium11.py | 1 | 2644 | #encoding=UTF-8
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
import pytest
@pytest.fixture
def driver(request):
driver = webdriver.Chrome()
driver.get('http://localhost/litecart/en/')
request.addfinalizer(driver.quit)
return driver
def test_login(driver):
driver.find_element_by_xpath('//a[contains(text(),"New customers click here")]').click()
City= driver.find_element_by_xpath('//input [@name="city"]')
City.send_keys('Random City') # Почему то у города нет атрибута required, хотя он обязателен
Required_elements= driver.find_elements_by_css_selector('input[required="required"]') # получили все обязательные поля
for element in Required_elements:
name = element.get_attribute('name')
if name == 'firstname':
element.send_keys('Nikolay')
elif name == 'lastname':
element.send_keys('Kutsoloshchenko')
elif name == 'address1':
element.send_keys('Random name str.')
elif name == 'postcode':
element.send_keys('12345')
elif name =='email':
element.send_keys('[email protected]')
elif name =='phone':
text = element.get_attribute('placeholder') + '123456789'
element.send_keys(text)
elif name == 'password':
element.send_keys('Qwert12345')
elif name == 'confirmed_password':
element.send_keys('Qwert12345')
driver.find_element_by_css_selector('button[type ="submit"]').click()
WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.XPATH, '//a[contains(text(),"Logout")]')))
driver.find_element_by_xpath('//a[contains(text(),"Logout")]').click()
WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.CSS_SELECTOR,'input[name="email"]')))
email_field = driver.find_element_by_css_selector('input[name="email"]')
email_field.send_keys('[email protected]')
password_field = driver.find_element_by_css_selector('input[name="password"]')
password_field.send_keys('Qwert12345')
driver.find_element_by_css_selector('button[name="login"]').click()
WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.XPATH, '//a[contains(text(),"Logout")]')))
driver.find_element_by_xpath('//a[contains(text(),"Logout")]').click() | apache-2.0 | -8,080,912,313,460,937,000 | 47.557692 | 122 | 0.659029 | false | 3.433333 | false | false | false |
wscullin/spack | var/spack/repos/builtin/packages/libfontenc/package.py | 3 | 1702 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libfontenc(AutotoolsPackage):
"""libfontenc - font encoding library."""
homepage = "http://cgit.freedesktop.org/xorg/lib/libfontenc"
url = "https://www.x.org/archive/individual/lib/libfontenc-1.1.3.tar.gz"
version('1.1.3', '0ffa28542aa7d246299b1f7211cdb768')
depends_on('zlib')
depends_on('xproto', type='build')
depends_on('[email protected]:', type='build')
depends_on('util-macros', type='build')
| lgpl-2.1 | 9,090,554,424,462,816,000 | 41.55 | 81 | 0.670388 | false | 3.757174 | false | false | false |
MalloyDelacroix/DownloaderForReddit | DownloaderForReddit/extractors/reddit_video_extractor.py | 1 | 5936 | """
Downloader for Reddit takes a list of reddit users and subreddits and downloads content posted to reddit either by the
users or on the subreddits.
Copyright (C) 2017, Kyle Hickey
This file is part of the Downloader for Reddit.
Downloader for Reddit is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Downloader for Reddit is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Downloader for Reddit. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import requests
from .base_extractor import BaseExtractor
from ..core.errors import Error
from ..utils import reddit_utils, video_merger
class RedditVideoExtractor(BaseExtractor):
url_key = ['v.redd.it']
def __init__(self, post, **kwargs):
super().__init__(post, **kwargs)
self.post = post
self.host_vid = self.get_host_vid()
self.url = None
self.audio_url = None
self.get_vid_url()
def get_host_vid(self):
"""
Finds the actual submission that holds the video file to be extracted. If the post is the original post that
the video was uploaded to, then None is returned. If the post is a crosspost from another location,
the parent crosspost is returned as it is the post which holds the full video information.
:return: The top level post which holds the video information to be downloaded if the supplied post is a
crosspost, otherwise None.
"""
if hasattr(self.submission, 'crosspost_parent'):
try:
r = reddit_utils.get_reddit_instance()
parent_submission = r.submission(self.submission.crosspost_parrent.split('_')[1])
parent_submission.title # fetch info from server to load submission
return parent_submission
except AttributeError:
pass
return self.submission
def get_vid_url(self):
"""
Extracts the video url from the reddit post and determines if the post is a video and will contain an audio
file.
"""
try:
self.url = self.host_vid.media['reddit_video']['fallback_url']
except (AttributeError, TypeError):
self.url = self.host_vid.url
if self.url is not None:
self.get_audio_url()
def is_gif(self):
return self.host_vid.media['reddit_video']['is_gif']
def extract_content(self):
if self.settings_manager.download_reddit_hosted_videos:
if self.url is not None:
video_content = self.get_video_content()
try:
if self.audio_url is not None:
audio_content = self.get_audio_content()
if audio_content is not None and video_content is not None:
merge_set = video_merger.MergeSet(
video_id=video_content.id,
audio_id=audio_content.id,
date_modified=self.post.date_posted
)
video_merger.videos_to_merge.append(merge_set)
except:
message = 'Failed to located content'
self.handle_failed_extract(error=Error.FAILED_TO_LOCATE, message=message, log_exception=True,
extractor_error_message=message)
else:
message = 'Failed to find acceptable url for download'
self.handle_failed_extract(error=Error.FAILED_TO_LOCATE, message=message, log_exception=True,
extractor_error_message=message)
def get_video_content(self):
ext = 'mp4'
content = self.make_content(self.url, ext, name_modifier='(video)' if self.audio_url is not None else '')
return content
def get_audio_url(self):
"""
Iterates through what I'm sure will be an increasing list of parsers to find a valid audio url. Because not only
does reddit separate the audio files from its video files when hosting a video, but they also change the path
to get the audio file about every three months for some reason.
"""
parsers = [
lambda url: url.rsplit('/', 1)[0] + '/audio',
lambda url: re.sub('DASH_[A-z 0-9]+', 'DASH_audio', url)
]
for parser in parsers:
try:
url = parser(self.url)
if self.check_audio_content(url):
self.audio_url = url
return
except AttributeError:
self.logger.error('Failed to get audio link for reddit video.', extra=self.get_log_data())
def check_audio_content(self, audio_url):
"""
Checks the extracted audio url to make sure that a valid status code is returned. Reddit videos are being
mislabeled by reddit as being videos when they are in fact gifs. This rectifies the problem by checking that
the audio link is valid before trying to make content from the audio portion of a video which does not have
audio.
:return: True if the audio link is valid, False if not.
"""
response = requests.head(audio_url)
return response.status_code == 200
def get_audio_content(self):
ext = 'mp3'
content = self.make_content(self.audio_url, ext, name_modifier='(audio)')
return content
| gpl-3.0 | 487,824,003,986,175,740 | 41.705036 | 120 | 0.611691 | false | 4.40683 | false | false | false |
tomwright01/iowa_oct_analysis | App/readIowaData.py | 1 | 6078 | import numpy as np
import re
import xml.etree.ElementTree
import logging
logger = logging.getLogger(__name__)
def readIowaSurfaces(fname):
"""Read the surfaces .xml file generated from OCTExplorer
Params:
fname - full path to the .xml file
Returns:
{'scan_size' - {'x','y','z'} - scan_size in voxels,
'voxel_size' - {'x','y','z'} - voxel_size in microns,
'scan_system' - 'cirrus'|'bioptigen' - Recording system manufacturer,
#TODO - find out what the value is for .jpg images and heidelburg
'eye': 'OD'|'OS'
'surface_names': [] - list of surfaces identified
'surface_data': an [nSurfaces,nBscans,nAscans] numpy.maskedarray of the surface data
each (Surface,x,y) value is an integer value indicating the depth of the surface
pixels are counted from the top of the scan and numbered from 1
Examples:
Description:
OCTExplorer.exe (https://www.iibi.uiowa.edu/content/iowa-reference-algorithms-human-and-murine-oct-retinal-layer-analysis-and-display)
implements the Iowa Reference Algorithm to segment Ocular Coherence Tomography files from a variety of commercial systems.
The principle output is an .xml file containing information delineating 10 retinal surfaces.
This function reads the xml file to extract metadata and returns the surface information as numpy ndarray.
"""
#define labels for the 10 retinal surfaces
#surface_labels = ['ILM','RNFL-GCL','GCL-IPL','IPL-INL','INL-OPL',
#'OPL-HFL','BMEIS','IS/OSJ','IB_OPR','IB_RPE','OB_RPE']
surface_labels = {}
logger.debug('Loading surfaces file:{}'.format(fname))
xml_root = xml.etree.ElementTree.parse(fname).getroot()
# OCTexplorer version 3 used a <z> element for suface heights, version 4 uses <y>
version = xml_root.find('version').text
if version.startswith('3'):
value_element = 'z'
else:
value_element = 'y'
# first lets extract the scan size information
scan_size = {'x': int(xml_root.find('./scan_characteristics/size/x').text),
'y': int(xml_root.find('./scan_characteristics/size/y').text),
'z': int(xml_root.find('./scan_characteristics/size/z').text)}
voxel_size = {'x': float(xml_root.find('./scan_characteristics/voxel_size/x').text),
'y': float(xml_root.find('./scan_characteristics/voxel_size/y').text),
'z': float(xml_root.find('./scan_characteristics/voxel_size/z').text)}
# try to exract system information
system = xml_root.find('./scan_characteristics/manufacturer').text.lower()
if bool(re.search('carl zeiss',system)):
system = 'cirrus'
elif bool(re.search('Bioptigen',system)):
system = 'bioptigen'
else:
logger.warn('Unknown system type')
system = 'unknown'
# structure to hold layer measurements
# data in this structure is in pixels and can be used by the centering function
nlayers = int(xml_root.find('surface_num').text)
if version.startswith('3'):
data = np.empty((nlayers,
scan_size['y'],
scan_size['x']),
dtype=np.float)
else:
data = np.empty((nlayers,
scan_size['z'],
scan_size['x']),
dtype=np.float)
p = re.compile('.*\((.*)\)')
for surface in xml_root.findall('surface'):
# identify which surface this is and assign an index
# can't use the label in the xml file as these are not contiguous
surface_name = surface.find('name').text
logger.debug('Loading surface:{}'.format(surface_name))
surface_idx = np.NaN
# extract the surface label
match = re.match(p, surface_name)
if match:
if not match.group(1) in surface_labels.keys():
#surface not seen before add the label and description
surface_labels[match.group(1)] = (match.group(0),len(surface_labels))
surface_idx = surface_labels[match.group(1)][1]
else:
logger.warning('Failed to identify surface:{}'.format(surface_name))
break
logger.debug('Surface index:{}'.format(surface_idx))
# loop through all the bscans
surface_bscans = surface.findall('bscan')
for bscan_idx in range(data.shape[1]):
bscan = surface_bscans[bscan_idx]
data[surface_idx,bscan_idx,:] = [int(z.text) for z in bscan.findall(value_element)]
# .xml file may also contain information on where segmentation has failed
# create a structure to store this information
undef_mask = np.zeros(data.shape,dtype=np.bool)
undef_xml = xml_root.find('undefined_region')
if undef_xml is not None:
for ascan in undef_xml.findall('ascan'):
x = int(ascan.find('x').text)
y = int(ascan.find('y').text)
undef_mask[:,y,x] = True
data = np.ma.MaskedArray(data, mask = undef_mask)
laterality = xml_root.find('scan_characteristics').find('laterality').text
if laterality.upper() in ['OD','OS']:
laterality = laterality.upper()
else:
# not defined in the xml file, see if we can extract from the filename
p = re.compile('(OD|OS)')
m = re.search(p,fname)
if m:
laterality = m.group(0)
return {'scan_size':scan_size,
'voxel_size':voxel_size,
'scan_system':system,
'eye':laterality,
'surface_names':surface_labels,
'surface_data':data}
def readIowaCenter(fname):
"""Load the GridCenter.xml file
Params:
fname - full path to the _GridCenter_Iowa.xml file
Returns:
(center_x,center_y) - scan center in pixels
"""
xml_root = xml.etree.ElementTree.parse(fname)
c = xml_root.find('center')
center_x = int(c.find('x').text)
center_y = int(c.find('y').text)
return (center_x,center_y)
| mit | 3,205,874,098,201,724,000 | 39.52 | 138 | 0.609905 | false | 3.659241 | false | false | false |
jonfoster/pyxb1 | tests/drivers/test-wildcard.py | 1 | 9405 | import pyxb.binding.generate
import pyxb.utils.domutils
from xml.dom import Node
import os.path
schema_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'../schemas/test-wildcard.xsd'))
code = pyxb.binding.generate.GeneratePython(schema_location=schema_path)
rv = compile(code, 'test', 'exec')
eval(rv)
from pyxb.exceptions_ import *
import unittest
def nc_not (ns_or_absent):
return ( pyxb.xmlschema.structures.Wildcard.NC_not, ns_or_absent )
class TestIntensionalSet (unittest.TestCase):
def testTest (self):
ns = 'URN:namespace'
not_nc = nc_not(ns)
self.assert_(isinstance(not_nc, tuple))
self.assertEqual(2, len(not_nc))
self.assertEqual(pyxb.xmlschema.structures.Wildcard.NC_not, not_nc[0])
self.assertEqual(ns, not_nc[1])
def testUnion_1 (self):
UNION = pyxb.xmlschema.structures.Wildcard.IntensionalUnion
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertEqual(nc_any, UNION([ nc_any, nc_any ]))
self.assertEqual(nc_not(ns1), UNION([ nc_not(ns1), nc_not(ns1) ]))
self.assertEqual(set([ns1]), UNION([ set([ns1]), set([ns1]) ]))
def testUnion_2 (self):
UNION = pyxb.xmlschema.structures.Wildcard.IntensionalUnion
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertEqual(nc_any, UNION([ nc_any, set([ns1]) ]))
self.assertEqual(nc_any, UNION([ nc_any, nc_not(ns1) ]))
self.assertEqual(nc_any, UNION([ nc_any, nc_not(None) ]))
def testUnion_3 (self):
UNION = pyxb.xmlschema.structures.Wildcard.IntensionalUnion
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertEqual(set([ns1, ns2]), UNION([set([ns1]), set([ns2])]))
self.assertEqual(set([None, ns1]), UNION([set([None]), set([ns1])]))
self.assertEqual(set([None]), UNION([set([None]), set([None])]))
def testUnion_4 (self):
UNION = pyxb.xmlschema.structures.Wildcard.IntensionalUnion
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertEqual(nc_not(None), UNION([nc_not(ns1), nc_not(ns2)]))
self.assertEqual(nc_not(None), UNION([nc_not(ns1), nc_not(None)]))
def testUnion_5 (self):
UNION = pyxb.xmlschema.structures.Wildcard.IntensionalUnion
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertEqual(nc_any, UNION([nc_not(ns1), set([ns1, None])])) # 5.1
self.assertEqual(nc_not(None), UNION([nc_not(ns1), set([ns1, ns2])])) # 5.2
self.assertRaises(SchemaValidationError, UNION, [nc_not(ns1), set([None, ns2])]) # 5.3
self.assertEqual(nc_not(ns1), UNION([nc_not(ns1), set([ns2])])) # 5.4
def testUnion_6 (self):
UNION = pyxb.xmlschema.structures.Wildcard.IntensionalUnion
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertEqual(nc_any, UNION([nc_not(None), set([ns1, ns2, None])])) # 6.1
self.assertEqual(nc_not(None), UNION([nc_not(None), set([ns1, ns2])])) # 6.2
def testIntersection_1 (self):
ISECT = pyxb.xmlschema.structures.Wildcard.IntensionalIntersection
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertEqual(nc_any, ISECT([ nc_any, nc_any ]))
self.assertEqual(nc_not(ns1), ISECT([ nc_not(ns1), nc_not(ns1) ]))
self.assertEqual(set([ns1]), ISECT([ set([ns1]), set([ns1]) ]))
def testIntersection_2 (self):
ISECT = pyxb.xmlschema.structures.Wildcard.IntensionalIntersection
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertEqual(set([ns1]), ISECT([ nc_any, set([ns1]) ]))
self.assertEqual(nc_not(ns1), ISECT([ nc_any, nc_not(ns1) ]))
self.assertEqual(nc_not(None), ISECT([ nc_any, nc_not(None) ]))
def testIntersection_3 (self):
ISECT = pyxb.xmlschema.structures.Wildcard.IntensionalIntersection
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertEqual(set([ns2]), ISECT([nc_not(ns1), set([ns1, ns2, None])]))
self.assertEqual(set([ns2]), ISECT([nc_not(ns1), set([ns1, ns2])]))
self.assertEqual(set([ns2]), ISECT([nc_not(ns1), set([ns2])]))
def testIntersection_4 (self):
ISECT = pyxb.xmlschema.structures.Wildcard.IntensionalIntersection
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertEqual(set([ns2]), ISECT([set([ns1, ns2]), set([ns2, None])]))
self.assertEqual(set([ns2, None]), ISECT([set([None, ns1, ns2]), set([ns2, None])]))
self.assertEqual(set([]), ISECT([set([ns1]), set([ns2, None])]))
self.assertEqual(set([]), ISECT([set([ns1]), set([ns2, ns1]), set([ns2, None])]))
self.assertEqual(set([ns1]), ISECT([set([ns1, None]), set([None, ns2, ns1]), set([ns1, ns2])]))
def testIntersection_5 (self):
ISECT = pyxb.xmlschema.structures.Wildcard.IntensionalIntersection
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertRaises(SchemaValidationError, ISECT, [nc_not(ns1), nc_not(ns2)])
def testIntersection_6 (self):
ISECT = pyxb.xmlschema.structures.Wildcard.IntensionalIntersection
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertEqual(nc_not(ns1), ISECT([nc_not(ns1), nc_not(None)]))
class TestWildcard (unittest.TestCase):
def testElement (self):
# NB: Test on CTD, not element
self.assert_(wrapper_._HasWildcardElement)
xmls = '<wrapper><first/><second/><third/></wrapper>'
doc = pyxb.utils.domutils.StringToDOM(xmls)
instance = wrapper.createFromDOM(doc.documentElement)
self.assert_(isinstance(instance.wildcardElements(), list))
self.assertEquals(1, len(instance.wildcardElements()))
# Alternative parser path
instance = CreateFromDocument(xmls)
self.assert_(isinstance(instance.wildcardElements(), list))
self.assertEquals(1, len(instance.wildcardElements()))
def _validateWildcardWrappingRecognized (self, instance):
self.assert_(isinstance(instance.wildcardElements(), list))
self.assertEquals(1, len(instance.wildcardElements()))
dom = instance.wildcardElements()[0]
self.assertTrue(isinstance(dom, Node))
self.assertEquals(Node.ELEMENT_NODE, dom.nodeType)
self.assertEquals('third', dom.nodeName)
self.assertEquals(1, len(dom.childNodes))
cdom = dom.firstChild
self.assertTrue(isinstance(cdom, Node))
self.assertEquals(Node.ELEMENT_NODE, cdom.nodeType)
self.assertEquals('selt', cdom.nodeName)
ccdom = cdom.firstChild
self.assertTrue(isinstance(ccdom, Node))
self.assertEquals(Node.TEXT_NODE, ccdom.nodeType)
self.assertEquals('text', ccdom.data)
def testWildcardWrappingRecognized (self):
# NB: Test on CTD, not element
self.assert_(wrapper_._HasWildcardElement)
xmls = '<wrapper><first/><second/><third><selt>text</selt></third></wrapper>'
doc = pyxb.utils.domutils.StringToDOM(xmls)
instance = wrapper.createFromDOM(doc.documentElement)
self._validateWildcardWrappingRecognized(instance)
# Alternative parser path
instance = CreateFromDocument(xmls)
self._validateWildcardWrappingRecognized(instance)
def testMultiElement (self):
tested_overmax = False
for rep in range(0, 6):
xmls = '<wrapper><first/><second/>%s</wrapper>' % (''.join(rep * ['<third/>']),)
doc = pyxb.utils.domutils.StringToDOM(xmls)
if 3 >= rep:
instance = wrapper.createFromDOM(doc.documentElement)
self.assert_(isinstance(instance.wildcardElements(), list))
self.assertEquals(rep, len(instance.wildcardElements()))
for i in range(0, rep):
self.assertEquals('third', instance.wildcardElements()[i].nodeName)
else:
tested_overmax = True
self.assertRaises(ExtraContentError, wrapper.createFromDOM, doc.documentElement)
self.assert_(tested_overmax)
def testAttribute (self):
# NB: Test on CTD, not element
self.assert_(isinstance(wrapper_._AttributeWildcard, pyxb.binding.content.Wildcard))
xmls = '<wrapper myattr="true" auxattr="somevalue"/>'
doc = pyxb.utils.domutils.StringToDOM(xmls)
instance = wrapper.createFromDOM(doc.documentElement)
self.assert_(isinstance(instance.wildcardAttributeMap(), dict))
self.assertEquals(1, len(instance.wildcardAttributeMap()))
self.assertEquals('somevalue', instance.wildcardAttributeMap()['auxattr'])
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 3,425,045,343,297,419,000 | 44.216346 | 103 | 0.632749 | false | 3.373386 | true | false | false |
qtile/qtile | libqtile/scripts/run_cmd.py | 2 | 3183 | # Copyright (c) 2014, Roger Duran
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the \"Software\"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Command-line wrapper to run commands and add rules to new windows
"""
import argparse
import atexit
import subprocess
from libqtile import ipc
from libqtile.command import graph
def run_cmd(opts) -> None:
if opts.socket is None:
socket = ipc.find_sockfile()
else:
socket = opts.socket
client = ipc.Client(socket)
root = graph.CommandGraphRoot()
cmd = [opts.cmd]
if opts.args:
cmd.extend(opts.args)
proc = subprocess.Popen(cmd)
match_args = {"net_wm_pid": proc.pid}
rule_args = {"float": opts.float, "intrusive": opts.intrusive,
"group": opts.group, "break_on_match": not opts.dont_break}
cmd = root.call("add_rule")
_, rule_id = client.send((root.selectors, cmd.name, (match_args, rule_args), {}))
def remove_rule() -> None:
cmd = root.call("remove_rule")
client.send((root.selectors, cmd.name, (rule_id,), {}))
atexit.register(remove_rule)
proc.wait()
def add_subcommand(subparsers, parents):
parser = subparsers.add_parser(
"run-cmd",
parents=parents,
help="A wrapper around the command graph"
)
parser.add_argument(
'-s',
'--socket',
help='Use specified communication socket.')
parser.add_argument(
'-i',
'--intrusive',
action='store_true',
help='If the new window should be intrusive.')
parser.add_argument(
'-f',
'--float',
action='store_true',
help='If the new window should be float.')
parser.add_argument(
'-b',
'--dont-break',
action='store_true',
help='Do not break on match (keep applying rules).')
parser.add_argument(
'-g',
'--group',
help='Set the window group.')
parser.add_argument(
'cmd',
help='Command to execute.'),
parser.add_argument(
'args',
nargs=argparse.REMAINDER,
metavar='[args ...]',
help='Optional arguments to pass to command.'
)
parser.set_defaults(func=run_cmd)
| mit | 170,839,424,257,160,400 | 30.83 | 85 | 0.647817 | false | 3.958955 | false | false | false |
kotnik/reddrip | reddrip/util/log.py | 1 | 1054 | """ Logging utilities.
"""
import logging
import logging.handlers
from colorama import Fore, Style
COLORS = {
'DEBUG': Style.DIM,
'INFO': Style.NORMAL,
'WARNING': Style.BRIGHT,
'ERROR': Fore.RED,
'CRITICAL': Style.BRIGHT + Fore.RED,
}
class ColoredFormatter(logging.Formatter):
def format(self, record):
return COLORS[record.levelname] + logging.Formatter.format(self, record) + Style.RESET_ALL
def setup_logging(verbose=True, color=True):
""" Sets logging format. """
logging.getLogger().setLevel(logging.DEBUG)
stream = logging.StreamHandler()
stream.setLevel(logging.DEBUG if verbose else logging.INFO)
if color:
stream_format = ColoredFormatter(
"%(asctime)s %(name)s %(levelname)s %(message)s"
)
else:
stream_format = logging.Formatter(
"%(asctime)s %(name)s %(levelname)s %(message)s"
)
stream.setFormatter(stream_format)
logging.getLogger().addHandler(stream)
logging.getLogger('requests').setLevel(logging.ERROR)
| gpl-3.0 | -8,945,876,508,965,732,000 | 25.35 | 98 | 0.658444 | false | 3.875 | false | false | false |
toladata/TolaTables | tola/models.py | 1 | 1390 | from django.db import models
from django.contrib.auth.signals import user_logged_in, user_logged_out
from urllib2 import urlopen
import json
class LoggedUser(models.Model):
username = models.CharField(max_length=30, primary_key=True)
country = models.CharField(max_length=100, blank=False)
email = models.CharField(max_length=100, blank=False,
default='[email protected]')
def __unicode__(self):
return self.username
def login_user(sender, request, user, **kwargs):
country = get_user_country(request)
if user:
LoggedUser(username=user.username, country=country,
email=user.email).save()
def logout_user(sender, request, user, **kwargs):
try:
if user:
user = LoggedUser.objects.get(pk=user.username)
user.delete()
except LoggedUser.DoesNotExist:
pass
user_logged_in.connect(login_user)
user_logged_out.connect(logout_user)
def get_user_country(request):
# Automatically geolocate the connecting IP
ip = request.META.get('REMOTE_ADDR')
try:
response = urlopen('http://ipinfo.io/' + ip + '/json').read()
response = json.loads(response)
return response['country'].lower()
except Exception, e:
response = "undefined"
return response
| gpl-2.0 | 3,115,477,970,496,844,000 | 29.888889 | 72 | 0.627338 | false | 3.982808 | false | false | false |
gpospelov/BornAgain | Examples/scatter2d/CylindersInBA.py | 1 | 1340 | """
Cylinder form factor in Born approximation
"""
import bornagain as ba
from bornagain import deg, nm
def get_sample():
"""
Returns a sample with cylinders in a homogeneous environment ("Vacuum"),
implying a simulation in plain Born approximation.
"""
# Define materials
material_Particle = ba.HomogeneousMaterial("Particle", 0.0006, 2e-08)
material_Vacuum = ba.HomogeneousMaterial("Vacuum", 0.0, 0.0)
# Define form factors
ff = ba.FormFactorCylinder(5.0*nm, 5.0*nm)
# Define particles
particle = ba.Particle(material_Particle, ff)
# Define particle layouts
layout = ba.ParticleLayout()
layout.addParticle(particle, 1.0)
layout.setWeight(1)
layout.setTotalParticleSurfaceDensity(0.01)
# Define layers
layer = ba.Layer(material_Vacuum)
layer.addLayout(layout)
# Define sample
sample = ba.MultiLayer()
sample.addLayer(layer)
return sample
def get_simulation(sample):
beam = ba.Beam(1.0, 0.1*nm, ba.Direction(0.2*deg, 0*deg))
detector = ba.SphericalDetector(200, -2*deg, 2*deg, 200, 0*deg, 2*deg)
simulation = ba.GISASSimulation(beam, sample, detector)
return simulation
if __name__ == '__main__':
import ba_plot
sample = get_sample()
simulation = get_simulation(sample)
ba_plot.run_and_plot(simulation)
| gpl-3.0 | -4,805,916,511,805,866,000 | 24.769231 | 76 | 0.678358 | false | 3.418367 | false | false | false |
maferelo/saleor | saleor/graphql/core/fields.py | 1 | 6097 | from functools import partial
import graphene
from django.db.models.query import QuerySet
from django_measurement.models import MeasurementField
from django_prices.models import MoneyField, TaxedMoneyField
from graphene.relay import PageInfo
from graphene_django.converter import convert_django_field
from graphene_django.fields import DjangoConnectionField
from graphql_relay.connection.arrayconnection import connection_from_list_slice
from promise import Promise
from .types.common import Weight
from .types.money import Money, TaxedMoney
def patch_pagination_args(field: DjangoConnectionField):
"""Add descriptions to pagination arguments in a connection field.
By default Graphene's connection fields comes without description for pagination
arguments. This functions patches those fields to add the descriptions.
"""
field.args["first"].description = "Return the first n elements from the list."
field.args["last"].description = "Return the last n elements from the list."
field.args[
"before"
].description = (
"Return the elements in the list that come before the specified cursor."
)
field.args[
"after"
].description = (
"Return the elements in the list that come after the specified cursor."
)
class BaseConnectionField(graphene.ConnectionField):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
patch_pagination_args(self)
class BaseDjangoConnectionField(DjangoConnectionField):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
patch_pagination_args(self)
@convert_django_field.register(TaxedMoneyField)
def convert_field_taxed_money(*_args):
return graphene.Field(TaxedMoney)
@convert_django_field.register(MoneyField)
def convert_field_money(*_args):
return graphene.Field(Money)
@convert_django_field.register(MeasurementField)
def convert_field_measurements(*_args):
return graphene.Field(Weight)
class PrefetchingConnectionField(BaseDjangoConnectionField):
@classmethod
def connection_resolver(
cls,
resolver,
connection,
default_manager,
max_limit,
enforce_first_or_last,
root,
info,
**args,
):
# Disable `enforce_first_or_last` if not querying for `edges`.
values = [
field.name.value for field in info.field_asts[0].selection_set.selections
]
if "edges" not in values:
enforce_first_or_last = False
return super().connection_resolver(
resolver,
connection,
default_manager,
max_limit,
enforce_first_or_last,
root,
info,
**args,
)
@classmethod
def resolve_connection(cls, connection, default_manager, args, iterable):
if iterable is None:
iterable = default_manager
if isinstance(iterable, QuerySet):
_len = iterable.count()
else:
_len = len(iterable)
connection = connection_from_list_slice(
iterable,
args,
slice_start=0,
list_length=_len,
list_slice_length=_len,
connection_type=connection,
edge_type=connection.Edge,
pageinfo_type=PageInfo,
)
connection.iterable = iterable
connection.length = _len
return connection
class FilterInputConnectionField(BaseDjangoConnectionField):
def __init__(self, *args, **kwargs):
self.filter_field_name = kwargs.pop("filter_field_name", "filter")
self.filter_input = kwargs.get(self.filter_field_name)
self.filterset_class = None
if self.filter_input:
self.filterset_class = self.filter_input.filterset_class
super().__init__(*args, **kwargs)
@classmethod
def connection_resolver(
cls,
resolver,
connection,
default_manager,
max_limit,
enforce_first_or_last,
filterset_class,
filters_name,
root,
info,
**args,
):
# Disable `enforce_first_or_last` if not querying for `edges`.
values = [
field.name.value for field in info.field_asts[0].selection_set.selections
]
if "edges" not in values:
enforce_first_or_last = False
first = args.get("first")
last = args.get("last")
if enforce_first_or_last:
assert first or last, (
"You must provide a `first` or `last` value to properly "
"paginate the `{}` connection."
).format(info.field_name)
if max_limit:
if first:
assert first <= max_limit, (
"Requesting {} records on the `{}` connection exceeds the "
"`first` limit of {} records."
).format(first, info.field_name, max_limit)
args["first"] = min(first, max_limit)
if last:
assert last <= max_limit, (
"Requesting {} records on the `{}` connection exceeds the "
"`last` limit of {} records."
).format(last, info.field_name, max_limit)
args["last"] = min(last, max_limit)
iterable = resolver(root, info, **args)
on_resolve = partial(cls.resolve_connection, connection, default_manager, args)
filter_input = args.get(filters_name)
if filter_input and filterset_class:
iterable = filterset_class(
data=dict(filter_input), queryset=iterable, request=info.context
).qs
if Promise.is_thenable(iterable):
return Promise.resolve(iterable).then(on_resolve)
return on_resolve(iterable)
def get_resolver(self, parent_resolver):
return partial(
super().get_resolver(parent_resolver),
self.filterset_class,
self.filter_field_name,
)
| bsd-3-clause | 4,927,105,402,108,838,000 | 30.266667 | 87 | 0.609152 | false | 4.275596 | false | false | false |
CCLab/Raw-Salad | scripts/autoupload/hierarchy_inserter.py | 1 | 24456 | # -*- coding: utf-8 -*-
'''
Created on 25-08-2011
'''
class HierarchyInserter:
"""Inserts hierarchy from the given collection. Uses hierarchy described in
object of the following form:
{
"columns": [list of columns creating hierarchy],
"field_type_label": name of column(that will be inserted), representing
type of row(position in hierarchy),
"field_name_label": name of column(that will be inserted), representing
name of row,
"name_column": number of column which value represents name of data row
and will be moved to new field(its name is field_name_label),
"lowest_type": name that will be inserted to the type column in rows
that are in the lowest position in hierarchy
"summable": [list of columns that should be summed after creating hierarchy]
}
Data passed to HierarchyInserter should be correct, otherwise created
data will contain bugs.
"""
def __init__(self, csv_data, hierarchy_def, schema_descr,
add_id=False, teryt_data=None):
"""Initiates object.
Arguments:
csv_data -- data that needs hierarchy inserting
hierarchy_def -- object representing hierarchy in data
schema_descr -- objecy describing schema of data
add_id -- if new column with id should be prepended for each row
teryt_data -- data from file with TERYT codes, can be used to generate id
"""
self.csv_data = csv_data
self.hierarchy_fields = hierarchy_def['columns']
self.hierarchy_field_label = hierarchy_def['field_name_label']
self.hierarchy_columns_labels = [self.csv_data.get_header()[i] for i in self.hierarchy_fields]
self.type_label = hierarchy_def['field_type_label']
self.lowest_type = hierarchy_def['lowest_type']
self.modified_rows = []
self.name_column_nr = hierarchy_def['name_column']
self.summable = hierarchy_def['summable']
self.fields = schema_descr['fields']
self.delete_order = sorted(self.hierarchy_fields + [self.name_column_nr], reverse=True)
self.hierarchy_obj = HierarchyNode(0)
self.add_id = add_id
self.use_teryt = False
if self.add_id:
if teryt_data:
self.use_teryt = True
self.teryt_id_generator = TerytIdGenerator(teryt_data)
self.bad_hierarchy_log = []
def insert_hierarchy(self):
"""Process of inserting hierarchy is as follows:
- for header: remove hierarchy fields and type column,
prepend field_type_label, field_name_label
- for row: create rows representing hierarchy(if not created yet) and
remove hierarchy fields and type column, prepend type of row
(lowest_type) and value of name column
Additionally id of rows can be inserted(and then id field is inserted
in header).
Firstly, changes header, then for each row gets its hierarchy, if
it is new hierarchy, adds new rows representing this hierarchy,
in the end clears hierarchy from the row.
After that, if id were generated, fills summable columns
in added hierarchy rows.
"""
self.bad_hierarchy_log = []
header = self.csv_data.get_header()
for nr in self.delete_order:
del header[nr]
header.insert(0, self.type_label)
header.insert(1, self.hierarchy_field_label)
if self.add_id:
header.insert(0, 'id')
self.modified_rows = [header]
row = self.csv_data.get_next_row(row_type='list')
if row is None:
print 'Only header in csv data. No data rows were changed'
return
row_len = len(header)
old_hierarchy = []
new_hierarchy = []
i = 1
while row is not None:
i += 1
try:
new_hierarchy = self.get_hierarchy(row)
except HierarchyError as e:
log = ('row nr %d, ' % i) + e.log
self.bad_hierarchy_log.append(log)
else:
if new_hierarchy != old_hierarchy:
new_hierarchy_rows = self.create_hierarchy_rows(new_hierarchy,
row_len)
self.modified_rows.extend(new_hierarchy_rows)
self.modified_rows.append(self.clean_row(row, new_hierarchy))
old_hierarchy = new_hierarchy
row = self.csv_data.get_next_row(row_type='list')
if self.add_id:
self.fill_summable_values()
def all_rows_correct(self):
"""Returns True if no errors were found, otherwise False."""
return self.bad_hierarchy_log == []
def get_modified_rows(self):
"""Returns list of modified rows if no errors were found, otherwise
empty list.
"""
if self.all_rows_correct():
return self.modified_rows
else:
return []
def get_hierarchy_errors_log(self):
"""Returns string containing errors separated by new line."""
return '\n'.join(self.bad_hierarchy_log)
def clean_row(self, row, hierarchy):
"""Adds id of this row to hierarchy object.
Removes hierarchy fields from the row, moves its type and name(fields
described by name and type column in schema) to the beginning of it.
Adds rows's id if add_id parameter was set to True on constructing
this object.
Arguments:
row -- row to clean
hierarchy -- hierarchy in the row
"""
cleaned_row = row[:]
node = self.get_hierarchy_node(hierarchy)
next_id = node.get_new_child_id()
row_node = HierarchyNode(next_id)
node.add_child(row_node, next_id)
hierarchy_field_name = cleaned_row[self.name_column_nr]
for nr in self.delete_order:
del cleaned_row[nr]
cleaned_row.insert(0, self.lowest_type)
cleaned_row.insert(1, hierarchy_field_name)
if self.add_id:
row_hierarchy = hierarchy + [next_id]
full_id = self.get_full_id(row_hierarchy)
cleaned_row.insert(0, full_id)
return cleaned_row
def get_hierarchy(self, row):
"""Returns list representing hierarchy in the row.
Arguments:
row -- data row
"""
hierarchy = []
for nr in self.hierarchy_fields:
if row[nr] == '':
break
hierarchy.append(row[nr])
return hierarchy
def create_hierarchy_rows(self, new_hierarchy, row_len):
"""Returns rows list of hierarchy rows that should be put inside
data to show new_hierarchy. If hierarchy rows have been added for
new_hierarchy already, empty list will be returned. Hierarchy rows
will be have not empty: id(if created), type, name and summable fields.
Arguments:
new_hierarchy -- list representing hierarchy in row in data
row_len -- length of that row, needed create correct hierarchy row
"""
hierarchy_rows = []
partial_hierarchy = []
act_hierarchy_obj = self.hierarchy_obj
i = 0
for field in new_hierarchy:
partial_hierarchy.append(field)
child = act_hierarchy_obj.get_child(field)
# if this row represents new hierarchy
if child is None:
if self.use_teryt:
new_id = self.teryt_id_generator.get_teryt_id(partial_hierarchy)
if new_id is None:
self.teryt_id_generator.add_teryt_unit(partial_hierarchy)
new_id = self.teryt_id_generator.get_teryt_id(partial_hierarchy)
else:
new_id = act_hierarchy_obj.get_new_child_id()
child = HierarchyNode(new_id)
act_hierarchy_obj.add_child(child, field)
new_row = ['' for _ in range(row_len)]
if self.add_id:
new_row[0] = self.get_full_id(partial_hierarchy)
new_row[1] = self.hierarchy_columns_labels[i]
new_row[2] = field
else:
new_row[0] = self.hierarchy_columns_labels[i]
new_row[1] = field
hierarchy_rows.append(new_row)
act_hierarchy_obj = child
i += 1
return hierarchy_rows
def get_hierarchy_node(self, hierarchy):
"""Returns HierarchyNode representing hierarchy. If there was not
created node representing this hierarchy, None is returned.
Arguments:
hierarchy - hierarchy list
"""
node = self.hierarchy_obj
for field in hierarchy:
if not node.has_child(field):
return None
node = node.get_child(field)
return node
def get_full_id(self, hierarchy):
"""Returns id for row with specified hierarchy. If there is no node
representing such a hierarchy, HierarchyError is thrown.
Arguments:
hierarchy -- hierarchy list
"""
id_list = []
node = self.hierarchy_obj
for field in hierarchy:
if not node.has_child(field):
raise HierarchyError('Can not create full id for hierarchy %s' % hierarchy)
node = node.get_child(field)
id_list.append( str(node.get_id()) )
return '-'.join(id_list)
def fill_summable_values(self):
"""Fills summable columns in added hierarchy rows."""
summable_cols = self.summable[:]
for i in range(len(summable_cols)):
for col_nr in self.delete_order:
if col_nr < summable_cols[i]:
summable_cols[i] -= 1
if self.add_id:
summable_cols[i] += 3
else:
summable_cols[i] += 2
summable_cols_types = [self.fields[i]['type'] for i in self.summable]
rows_dict = {}
i = -1
for row in self.modified_rows:
i += 1
# omitting header
if i == 0:
continue
id = row[0]
rows_dict[id] = row
parent_id = self.get_parent_id(id)
while parent_id:
parent_row = rows_dict[parent_id]
j = 0
for col_nr in summable_cols:
value = row[col_nr]
type = summable_cols_types[j]
if parent_row[col_nr] == '':
parent_row[col_nr] = 0
if value == '':
continue
if type == 'int':
parent_row[col_nr] += int(value)
elif type == 'float' and value != '':
commas_in_field = value.count(',')
dots_in_field = value.count('.')
if commas_in_field > 0:
if dots_in_field > 0:
parent_row[col_nr] += float( value.replace(',', '', commas_in_field) )
else:
value = value.replace(',', '', commas_in_field - 1)
parent_row[col_nr] += float( value.replace(',', '.') )
j += 1
parent_id = self.get_parent_id(parent_id)
def get_parent_id(self, id):
"""Returns id of parent of row with given id. If this row has
no parent, None is returned.
Parameters:
id -- id of child
"""
if id.count('-') == 0:
return None
return id.rsplit('-', 1)[0]
class HierarchyNode:
"""Helper class used to remember ids of hierarchy elements."""
def __init__(self, id):
"""Initiates object.
Arguments:
id -- id of this node(integer)
"""
self.id = id
self.children = {}
self.last_child_id = 0
def add_child(self, node, key):
"""Adds a child node to the list of children of this node. Inserts
it under specified key.
Arguments:
node -- child node
key -- id connected with child node
"""
self.children[key] = node
self.last_child_id += 1
def get_child(self, key):
"""Returns child node with given id. If there is no node with this
id, None is returned.
Arguments:
key -- id connected with child node
"""
if self.has_child(key):
return self.children[key]
return None
def has_child(self, key):
"""Returns True, if there is node connected with value key,
otherwise False.
Arguments:
key -- id connected with child node
"""
return key in self.children
def get_new_child_id(self):
"""Returns id of next child."""
return self.last_child_id + 1
def get_id(self):
"""Returns id of this node."""
return self.id
class HierarchyError(Exception):
"""Class representing errors which happen during processing
hierarchy in data.
"""
def __init__(self, log):
"""Initiates object.
Arguments:
log -- error log
"""
self.log = log
def __str__(self):
"""Returns string representation of error."""
return repr(self.log)
class TerytIdGenerator:
"""Class creating TERYT codes."""
def __init__(self, data):
"""Initiates this object using data from file with TERYT codes.
Arguments:
data -- csv data of file with TERYT codes
"""
self.codes = {}
self.errors = []
row = data.get_next_row()
while row:
type = row['Nazwa typu jednostki']
name = unicode(row['Nazwa']).lower()
full_code = row['TERYT']
woj_code = full_code[:2]
pow_code = full_code[2:4]
gm_code = full_code[4:6]
if self.is_type_ignored(type):
row = data.get_next_row()
continue
if self.is_wojewodztwo(type):
self.codes[name] = {'id': woj_code, 'name': name, 'powiats': {}}
last_woj = self.codes[name]
last_woj_code = woj_code
elif self.is_powiat(type):
new_pow_dict = {'id': pow_code, 'name': name, 'gminas': {}}
if woj_code == last_woj_code:
last_woj['powiats'][name] = new_pow_dict
else:
woj = self.get_teryt_object(woj_code)
if woj is None:
self.errors.append('Error: unknown województwo, code=%s' % woj_code)
print 'Error: unknown województwo, code=', woj_code
row = data.get_next_row()
continue
woj['powiats'][name] = new_pow_dict
last_pow = new_pow_dict
last_pow_code = pow_code
elif self.is_gmina(type):
new_gm_dict = {'id': gm_code, 'name': name}
if woj_code == last_woj_code and pow_code == last_pow_code:
last_pow['gminas'][name] = new_gm_dict
else:
pow = self.get_teryt_object(woj_code, pow_code)
if pow is None:
self.errors.append('Error: unknown powiat, code=%s' % pow_code)
print 'Error: unknown powiat, code=', pow_code
row = data.get_next_row()
continue
pow['gminas'][name] = new_gm_dict
else:
self.errors.append('Error: unknown unit type: %s' % type)
print 'Error: unknown unit type:', type
row = data.get_next_row()
def is_wojewodztwo(self, type):
return type == 'województwo'.decode('utf-8')
def is_powiat(self, type):
return type in ['powiat'.decode('utf-8'), 'miasto na prawach powiatu'.decode('utf-8'),
'miasto stołeczne, na prawach powiatu'.decode('utf-8')]
def is_gmina(self, type):
return type in ['gmina miejska'.decode('utf-8'), 'gmina wiejska'.decode('utf-8'),
'gmina miejsko-wiejska'.decode('utf-8'), 'dzielnica'.decode('utf-8'),
'delegatura'.decode('utf-8'), 'gmina miejska, miasto stołeczne'.decode('utf-8')]
def is_type_ignored(self, type):
return type in ['miasto', 'obszar wiejski']
def get_teryt_object(self, woj_code, pow_code=None, gm_code=None):
"""Returns dict representing teritorial unit which code is
woj_code[ + pow_code[ + gm_code]]. If such a unit cannot be found,
None is returned.
Arguments:
woj_code -- code of unit's wojewodztwo
pow_code -- code of unit's powiat
gm_code -- code of unit's gmina
"""
woj_dict = None
for woj in self.codes:
if woj['id'] == woj_code:
woj_dict = woj
last_dict = woj_dict
break
if woj_dict is None:
return None
if pow_code:
pow_dict = None
for pow in woj_dict['powiats']:
if pow['id'] == pow_code:
pow_dict = pow
last_dict = pow_dict
break
if pow_dict is None:
return None
if gm_code:
for gm in pow_dict:
if gm['id'] == gm_code:
gm_dict = gm
last_dict = gm_dict
break
if gm_dict is None:
return None
return last_dict
def get_teryt_name(self, code):
"""Returns name of teritorial unit which code is
woj_code[ + pow_code[ + gm_code]]. If such a unit cannot be found,
None is returned.
Arguments:
code -- unit's TERYT code
"""
woj_code = code[:2]
if len(code) > 3:
pow_code = code[2:4]
if len(code) > 5:
gm_code = code[4:6]
teryt_object_dict = self.get_teryt_object(woj_code, pow_code, gm_code)
try:
return teryt_object_dict['name']
except TypeError:
return None
def get_teryt_id(self, hierarchy):
"""Returns teryt id of teritorial unit represented by hierarchy.
Letters in hierarchy strings are lowercased and changed so that
they could be in the same form as they are expected.
If such a unit can not be found, returns None.
Arguments:
hierarchy -- list containing name of unit's wojewodztwo,
powiat(optionally) and gmina(optionally)
"""
modified_hierarchy = [unicode(name).lower() for name in hierarchy]
woj_name = modified_hierarchy[0]
pow_name, gm_name = None, None
if len(modified_hierarchy) > 1:
pow_name = self.correct_powiat_name(modified_hierarchy[1])
if len(modified_hierarchy) > 2:
gm_name = self.correct_gmina_name(modified_hierarchy[2])
tmp_obj = self.codes
try:
tmp_obj = tmp_obj[woj_name]
except KeyError:
return None
if pow_name:
try:
tmp_obj = tmp_obj['powiats'][pow_name]
except KeyError:
return None
if gm_name:
try:
tmp_obj = tmp_obj['gminas'][gm_name]
except KeyError:
return None
return tmp_obj['id']
def add_teryt_unit(self, hierarchy):
""" Add new teritorial unit. If it exists in actual hierarchy,
nothing will happen. Otherwise, this unit will be placed in hierarchy.
Arguments:
hierarchy -- hierarchy of new teritorial unit
"""
modified_hierarchy = [unicode(name).lower() for name in hierarchy]
if len(modified_hierarchy) > 1:
modified_hierarchy[1] = self.correct_powiat_name(modified_hierarchy[1])
if len(modified_hierarchy) > 2:
modified_hierarchy[2] = self.correct_gmina_name(modified_hierarchy[2])
if self.get_teryt_id(modified_hierarchy):
return
tmp_obj = self.codes
i = 0
for field in modified_hierarchy:
if field in tmp_obj:
if i == 0:
tmp_obj = tmp_obj[field]['powiats']
else:
tmp_obj = tmp_obj[field]['gminas']
else:
if i == 0:
id = self.find_highest_id(tmp_obj) + 1
tmp_obj[field] = {'id': str(id), 'name': field, 'powiats': {}}
elif i == 1:
id = self.find_highest_id(tmp_obj['powiats']) + 1
tmp_obj[field] = {'id': str(id), 'name': field, 'gminas': {}}
elif i == 2:
id = self.find_highest_id(tmp_obj['gminas']) + 1
tmp_obj[field] = {'id': str(id), 'name': field}
i += 1
def find_highest_id(self, objects):
"""Returns highest id of objects in list.
Argument:
objects -- list of objects that have id value
"""
highest_id = 0
for obj in objects:
id = int(objects[obj]['id'])
if id > highest_id:
highest_id = id
return highest_id
def correct_powiat_name(self, full_name):
"""Returns only powiat's name, without 'powiat' part, 'm. st.'
Arguments:
full_name -- full name of powiat
"""
short_name = full_name
if 'powiat' in short_name:
short_name = short_name.lstrip('powiat')
if short_name[0] == ' ':
short_name = short_name[1:]
if short_name.startswith('m.'):
short_name = short_name.lstrip('m.')
if short_name[0] == ' ':
short_name = short_name[1:]
if short_name.startswith('st.'):
short_name = short_name.lstrip('st.')
if short_name[0] == ' ':
short_name = short_name[1:]
return short_name
def correct_gmina_name(self, full_name):
"""Returns only gmina's name, without 'm.' part.
Arguments:
full_name -- full name of gmina
"""
short_name = full_name
if 'gmina' in short_name:
short_name = short_name.lstrip('gmina')
if short_name[0] == ' ':
short_name = short_name[1:]
if short_name.startswith('m.'):
short_name = short_name.lstrip('m.')
if short_name[0] == ' ':
short_name = short_name[1:]
if short_name.startswith('st.'):
short_name = short_name.lstrip('st.')
if short_name[0] == ' ':
short_name = short_name[1:]
if short_name.endswith(' - miasto'):
short_name = short_name.replace(' - miasto' , '')
if short_name == 'święta katarzyna'.decode('utf-8'):
print 'Zamiana'
short_name = 'siechnice'
if short_name == 'rejowec':
print 'Zamiana'
short_name = 'rejowiec'
return short_name | bsd-3-clause | -3,468,115,035,328,612,000 | 35.656672 | 104 | 0.511146 | false | 4.142494 | false | false | false |
andreagrandi/workshopvenues | workshopvenues/venues/migrations/0002_auto__del_venues__add_facility__add_venue.py | 1 | 3771 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Venues'
db.delete_table(u'venues_venues')
# Adding model 'Facility'
db.create_table(u'venues_facility', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=30)),
))
db.send_create_signal(u'venues', ['Facility'])
# Adding model 'Venue'
db.create_table(u'venues_venue', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=30)),
('website', self.gf('django.db.models.fields.CharField')(max_length=50)),
('address', self.gf('django.db.models.fields.CharField')(max_length=200)),
('town', self.gf('django.db.models.fields.CharField')(max_length=30)),
('postcode', self.gf('django.db.models.fields.CharField')(max_length=10)),
))
db.send_create_signal(u'venues', ['Venue'])
# Adding M2M table for field facilities on 'Venue'
m2m_table_name = db.shorten_name(u'venues_venue_facilities')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('venue', models.ForeignKey(orm[u'venues.venue'], null=False)),
('facility', models.ForeignKey(orm[u'venues.facility'], null=False))
))
db.create_unique(m2m_table_name, ['venue_id', 'facility_id'])
def backwards(self, orm):
# Adding model 'Venues'
db.create_table(u'venues_venues', (
('town', self.gf('django.db.models.fields.CharField')(max_length=30)),
('website', self.gf('django.db.models.fields.CharField')(max_length=50)),
('name', self.gf('django.db.models.fields.CharField')(max_length=30)),
('postcode', self.gf('django.db.models.fields.CharField')(max_length=10)),
('address', self.gf('django.db.models.fields.CharField')(max_length=200)),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal(u'venues', ['Venues'])
# Deleting model 'Facility'
db.delete_table(u'venues_facility')
# Deleting model 'Venue'
db.delete_table(u'venues_venue')
# Removing M2M table for field facilities on 'Venue'
db.delete_table(db.shorten_name(u'venues_venue_facilities'))
models = {
u'venues.facility': {
'Meta': {'object_name': 'Facility'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'venues.venue': {
'Meta': {'object_name': 'Venue'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'facilities': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['venues.Facility']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'town': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['venues'] | bsd-3-clause | -7,566,447,154,236,077,000 | 45 | 141 | 0.5834 | false | 3.440693 | false | false | false |
andreffs18/flask-template-project | project/commands/delete_user.py | 1 | 1056 | # !/usr/bin/python
# -*- coding: utf-8 -*-
from flask import current_app as app
from flask_script import Command, Option
from project.user.models.user import User
from project.user.services.delete_user_service import DeleteUserService
class DeleteUserCommand(Command):
"""Deletes user by giving username"""
def __init__(self):
super(DeleteUserCommand, self).__init__()
self.username = None
def get_options(self):
return [
Option('-u', '--username', dest='username', default=self.username),
]
def run(self, **kwargs):
app.logger.info("Running {} with arguments {}".format(self.__class__.__name__, kwargs))
self.__dict__.update(**kwargs) # update self's with kwargs
try:
user = User._objects.get(username=self.username)
DeleteUserService(user).call()
app.logger.info("User \"{}\" was successfully deleted!".format(self.username))
except Exception as e:
app.logger.error("Something went wrong :s. {}".format(e))
| mit | 4,887,916,959,062,820,000 | 34.2 | 95 | 0.626894 | false | 4.125 | false | false | false |
tyarkoni/featureX | pliers/utils/updater.py | 2 | 3221 | """ Utility to check if results have changed in foreign APIs. """
import glob
import datetime
from os.path import realpath, join, dirname, exists, expanduser
import hashlib
import pickle
import pandas as pd
import numpy as np
from pliers.stimuli import load_stims
from pliers.transformers import get_transformer
def hash_data(data, blocksize=65536):
"""" Hashes list of data, strings or data """
data = pickle.dumps(data)
hasher = hashlib.sha1()
hasher.update(data)
return hasher.hexdigest()
def check_updates(transformers, datastore=None, stimuli=None):
""" Run transformers through a battery of stimuli, and check if output has
changed. Store results in csv file for comparison.
Args:
transformers (list): A list of tuples of transformer names and
dictionary of parameters to instantiate with (or empty dict).
datastore (str): Filepath of CSV file with results. Stored in home dir
by default.
stimuli (list): List of stimuli file paths to extract from. If None,
use test data.
"""
# Find datastore file
datastore = datastore or expanduser('~/.pliers_updates')
prior_data = pd.read_csv(datastore) if exists(datastore) else None
# Load stimuli
stimuli = stimuli or glob.glob(
join(dirname(realpath(__file__)), '../tests/data/image/CC0/*'))
stimuli = load_stims(stimuli)
# Get transformers
loaded_transformers = {get_transformer(name, **params): (name, params)
for name, params in transformers}
# Transform stimuli
results = pd.DataFrame({'time_extracted': [datetime.datetime.now()]})
for trans in loaded_transformers.keys():
for stim in stimuli:
if trans._stim_matches_input_types(stim):
res = trans.transform(stim)
try: # Add iterable
res = [getattr(res, '_data', res.data) for r in res]
except TypeError:
res = getattr(res, '_data', res.data)
res = hash_data(res)
results["{}.{}".format(trans.__hash__(), stim.name)] = [res]
# Check for mismatches
mismatches = []
if prior_data is not None:
last = prior_data[
prior_data.time_extracted == prior_data.time_extracted.max()]. \
iloc[0].drop('time_extracted')
for label, value in results.iteritems():
old = last.get(label)
new = value.values[0]
if old is not None:
if isinstance(new, str):
if new != old:
mismatches.append(label)
elif not np.isclose(old, new):
mismatches.append(label)
results = prior_data.append(results)
results.to_csv(datastore, index=False)
# Get corresponding transformer name and parameters
def get_trans(hash_tr):
for obj, attr in loaded_transformers.items():
if str(obj.__hash__()) == hash_tr:
return attr
delta_t = {m.split('.')[0] for m in mismatches}
delta_t = [get_trans(dt) for dt in delta_t]
return {'transformers': delta_t, 'mismatches': mismatches}
| bsd-3-clause | -5,437,736,890,417,419,000 | 31.867347 | 78 | 0.605092 | false | 4.1242 | false | false | false |
ruymanengithub/vison | vison/support/files.py | 1 | 1544 | """
IO related functions.
:requires: PyFITS
:requires: NumPy
:author: Sami-Matias Niemi
"""
import datetime
import pickle
import os
import numpy as np
from pdb import set_trace as stop
def cPickleDumpDictionary(dictionary, output, protocol=2):
"""
Dumps a dictionary of data to a cPickled file.
:param dictionary: a Python data container does not have to be a dictionary
:param output: name of the output file
:return: None
"""
out = open(output, 'wb')
pickle.dump(dictionary, out, protocol=protocol)
out.close()
def cPickleRead(ffile):
"""
Loads data from a pickled file.
"""
with open(ffile, 'rb') as f:
try:
inpick = pickle.load(f)
except UnicodeDecodeError:
inpick = pickle.load(f, encoding='latin1')
f.close()
return inpick
def cPickleDump(data, output, protocol=2):
"""
Dumps data to a cPickled file.
:param data: a Python data container
:param output: name of the output file
:return: None
"""
out = open(output, 'wb')
pickle.dump(data, out, protocol=protocol)
out.close()
def convert_fig_to_eps(figname):
"""Converts a figure to .eps. Returns new file name."""
root = os.path.splitext(figname)
epsname = '%s.eps' % root
os.system('convert %s %s' % (figname, epsname))
return epsname
def test():
data = ['a']
picklef = 'test_cpickle.pick'
cPickleDump(data, picklef)
data = cPickleRead(picklef)
stop()
if __name__ == '__main__':
test()
| gpl-3.0 | 4,842,947,694,927,793,000 | 19.051948 | 79 | 0.628886 | false | 3.525114 | false | false | false |
Reddcraft/reddblockd | lib/config.py | 1 | 1210 | # -*- coding: utf-8 -*-
VERSION = "1.5.0" #should keep up with the counterwallet version it works with (for now at least)
DB_VERSION = 22 #a db version increment will cause counterblockd to rebuild its database off of counterpartyd
CAUGHT_UP = False #atomic state variable, set to True when counterpartyd AND counterblockd are caught up
UNIT = 100000000
SUBDIR_ASSET_IMAGES = "asset_img" #goes under the data dir and stores retrieved asset images
SUBDIR_FEED_IMAGES = "feed_img" #goes under the data dir and stores retrieved feed images
MARKET_PRICE_DERIVE_NUM_POINTS = 8 #number of last trades over which to derive the market price (via WVAP)
# FROM counterpartyd
# NOTE: These constants must match those in counterpartyd/lib/config.py
REGULAR_DUST_SIZE = 5430
MULTISIG_DUST_SIZE = 5430 * 2
ORDER_BTC_DUST_LIMIT_CUTOFF = MULTISIG_DUST_SIZE
mongo_db = None #will be set on server init
BTC = 'BTC'
XCP = 'XCP'
MAX_REORG_NUM_BLOCKS = 10 #max reorg we'd likely ever see
ARMORY_UTXSVR_PORT_MAINNET = 6590
ARMORY_UTXSVR_PORT_TESTNET = 6591
QUOTE_ASSETS = ['BTC', 'XBTC', 'XCP'] # define the priority for quote asset
MARKET_LIST_QUOTE_ASSETS = ['XCP', 'XBTC', 'BTC'] # define the order in the market list | mit | -7,548,761,881,956,013,000 | 36.84375 | 110 | 0.746281 | false | 2.99505 | false | false | false |
SergeySatskiy/codimension | codimension/editor/linenomargin.py | 1 | 5174 | # -*- coding: utf-8 -*-
#
# codimension - graphics python two-way code editor and analyzer
# Copyright (C) 2017 Sergey Satskiy <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# The original code is taken from qutepart project and adopted for Codimension
# See https://github.com/andreikop/qutepart
#
"""Line numbers margin"""
from ui.qt import QWidget, QPainter, Qt
from qutepart.margins import MarginBase
from utils.misc import extendInstance
from utils.globals import GlobalData
from utils.colorfont import getZoomedMarginFont
class CDMLineNumberMargin(QWidget):
"""Line number area widget"""
_LEFT_MARGIN = 5
_RIGHT_MARGIN = 3
def __init__(self, parent):
QWidget.__init__(self, parent)
extendInstance(self, MarginBase)
MarginBase.__init__(self, parent, 'cdm_line_number_margin', 0)
self.__bgColor = GlobalData().skin['marginPaper']
self.__fgColor = GlobalData().skin['marginColor']
self.__width = self.__calculateWidth()
self.onTextZoomChanged()
# The width needs to be re-calculated when the margin is drawn the
# first time. The problem is that if the widget is not on the screen
# then the font metrics are not calculated properly and thus the width
# is not shown right. What I observed is an offset up to 2 pixels.
self.__firstTime = True
self._qpart.blockCountChanged.connect(self.__updateWidth)
# Arguments: newBlockCount
def __updateWidth(self, _=None):
"""Updates the margin width"""
newWidth = self.__calculateWidth()
if newWidth != self.__width:
self.__width = newWidth
self._qpart.updateViewport()
def paintEvent(self, event):
"""Paints the margin"""
if self.__firstTime:
self.__updateWidth()
self.__firstTime = False
painter = QPainter(self)
painter.fillRect(event.rect(), self.__bgColor)
painter.setPen(self.__fgColor)
block = self._qpart.firstVisibleBlock()
blockNumber = block.blockNumber()
top = int(self._qpart.blockBoundingGeometry(block).
translated(self._qpart.contentOffset()).top())
bottom = top + int(self._qpart.blockBoundingRect(block).height())
boundingRect = self._qpart.blockBoundingRect(block)
availableWidth = self.__width - self._RIGHT_MARGIN - self._LEFT_MARGIN
# The margin font could be smaller than the main area font
topShift = int((self._qpart.fontMetrics().height() -
self.fontMetrics().height()) / 2)
if topShift < 0:
topShift = 0
availableHeight = self._qpart.fontMetrics().height()
while block.isValid() and top <= event.rect().bottom():
if block.isVisible() and bottom >= event.rect().top():
number = str(blockNumber + 1)
painter.drawText(self._LEFT_MARGIN, top + topShift,
availableWidth,
availableHeight,
Qt.AlignRight, number)
block = block.next()
boundingRect = self._qpart.blockBoundingRect(block)
top = bottom
bottom = top + int(boundingRect.height())
blockNumber += 1
def __calculateWidth(self):
"""Calculates the margin width"""
digits = len(str(max(1, self._qpart.blockCount())))
digitsWidth = self.fontMetrics().width('9') * digits
return self._LEFT_MARGIN + digitsWidth + self._RIGHT_MARGIN
def width(self):
"""Desired width. Includes text and margins"""
return self.__width
def setFont(self, font):
"""Overloaded to adjust the width if needed"""
QWidget.setFont(self, font)
self.__updateWidth()
def setBackgroundColor(self, color):
"""Sets the new background color"""
if self.__bgColor != color:
self.__bgColor = color
self.update()
def setForegroundColor(self, color):
"""Sets the new foreground color"""
if self.__fgColor != color:
self.__fgColor = color
self.update()
def onTextZoomChanged(self):
"""Triggered when a zoom has been changed"""
self.setFont(getZoomedMarginFont())
def onClose(self):
"""The editor is going to be closed"""
self._qpart.blockCountChanged.disconnect(self.__updateWidth)
# Release the editor reference
self._qpart = None
| gpl-3.0 | -2,211,287,564,557,201,400 | 34.682759 | 78 | 0.626208 | false | 4.132588 | false | false | false |
ssut/py-googletrans | googletrans/utils.py | 1 | 1945 | """A conversion module for googletrans"""
import json
import re
def build_params(query, src, dest, token, override):
params = {
'client': 'webapp',
'sl': src,
'tl': dest,
'hl': dest,
'dt': ['at', 'bd', 'ex', 'ld', 'md', 'qca', 'rw', 'rm', 'ss', 't'],
'ie': 'UTF-8',
'oe': 'UTF-8',
'otf': 1,
'ssel': 0,
'tsel': 0,
'tk': token,
'q': query,
}
if override is not None:
for key, value in get_items(override):
params[key] = value
return params
def legacy_format_json(original):
# save state
states = []
text = original
# save position for double-quoted texts
for i, pos in enumerate(re.finditer('"', text)):
# pos.start() is a double-quote
p = pos.start() + 1
if i % 2 == 0:
nxt = text.find('"', p)
states.append((p, text[p:nxt]))
# replace all wiered characters in text
while text.find(',,') > -1:
text = text.replace(',,', ',null,')
while text.find('[,') > -1:
text = text.replace('[,', '[null,')
# recover state
for i, pos in enumerate(re.finditer('"', text)):
p = pos.start() + 1
if i % 2 == 0:
j = int(i / 2)
nxt = text.find('"', p)
# replacing a portion of a string
# use slicing to extract those parts of the original string to be kept
text = text[:p] + states[j][1] + text[nxt:]
converted = json.loads(text)
return converted
def get_items(dict_object):
for key in dict_object:
yield key, dict_object[key]
def format_json(original):
try:
converted = json.loads(original)
except ValueError:
converted = legacy_format_json(original)
return converted
def rshift(val, n):
"""python port for '>>>'(right shift with padding)
"""
return (val % 0x100000000) >> n
| mit | -610,993,380,705,895,800 | 23.620253 | 82 | 0.517224 | false | 3.529946 | false | false | false |
Azure/azure-storage-python | azure-storage-common/azure/storage/common/_common_conversion.py | 1 | 3229 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import base64
import hashlib
import hmac
import sys
from io import (SEEK_SET)
from dateutil.tz import tzutc
from ._error import (
_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM,
_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM,
)
from .models import (
_unicode_type,
)
if sys.version_info < (3,):
def _str(value):
if isinstance(value, unicode):
return value.encode('utf-8')
return str(value)
else:
_str = str
def _to_str(value):
return _str(value) if value is not None else None
def _int_to_str(value):
return str(int(value)) if value is not None else None
def _bool_to_str(value):
if value is None:
return None
if isinstance(value, bool):
if value:
return 'true'
else:
return 'false'
return str(value)
def _to_utc_datetime(value):
return value.strftime('%Y-%m-%dT%H:%M:%SZ')
def _datetime_to_utc_string(value):
# Azure expects the date value passed in to be UTC.
# Azure will always return values as UTC.
# If a date is passed in without timezone info, it is assumed to be UTC.
if value is None:
return None
if value.tzinfo:
value = value.astimezone(tzutc())
return value.strftime('%a, %d %b %Y %H:%M:%S GMT')
def _encode_base64(data):
if isinstance(data, _unicode_type):
data = data.encode('utf-8')
encoded = base64.b64encode(data)
return encoded.decode('utf-8')
def _decode_base64_to_bytes(data):
if isinstance(data, _unicode_type):
data = data.encode('utf-8')
return base64.b64decode(data)
def _decode_base64_to_text(data):
decoded_bytes = _decode_base64_to_bytes(data)
return decoded_bytes.decode('utf-8')
def _sign_string(key, string_to_sign, key_is_base64=True):
if key_is_base64:
key = _decode_base64_to_bytes(key)
else:
if isinstance(key, _unicode_type):
key = key.encode('utf-8')
if isinstance(string_to_sign, _unicode_type):
string_to_sign = string_to_sign.encode('utf-8')
signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256)
digest = signed_hmac_sha256.digest()
encoded_digest = _encode_base64(digest)
return encoded_digest
def _get_content_md5(data):
md5 = hashlib.md5()
if isinstance(data, bytes):
md5.update(data)
elif hasattr(data, 'read'):
pos = 0
try:
pos = data.tell()
except:
pass
for chunk in iter(lambda: data.read(4096), b""):
md5.update(chunk)
try:
data.seek(pos, SEEK_SET)
except (AttributeError, IOError):
raise ValueError(_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM.format('data'))
else:
raise ValueError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format('data'))
return base64.b64encode(md5.digest()).decode('utf-8')
def _lower(text):
return text.lower()
| mit | 2,243,570,909,125,569,300 | 24.626984 | 83 | 0.598947 | false | 3.505972 | false | false | false |
bjowac/impara-benchexec | benchexec/runexecutor.py | 2 | 39980 | """
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2015 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# prepare for Python 3
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import glob
import logging
import multiprocessing
import os
import resource
import signal
import subprocess
import sys
import threading
import time
from . import __version__
from . import util as util
from .cgroups import *
from . import oomhandler
from benchexec import systeminfo
read_file = util.read_file
write_file = util.write_file
_WALLTIME_LIMIT_DEFAULT_OVERHEAD = 30 # seconds more than cputime limit
_ULIMIT_DEFAULT_OVERHEAD = 30 # seconds after cgroups cputime limit
_BYTE_FACTOR = 1000 # byte in kilobyte
try:
from subprocess import DEVNULL
except ImportError:
DEVNULL = open(os.devnull, 'rb')
def main(argv=None):
"""
A simple command-line interface for the runexecutor module of BenchExec.
"""
if argv is None:
argv = sys.argv
# parse options
parser = argparse.ArgumentParser(
fromfile_prefix_chars='@',
description=
"""Execute a command with resource limits and measurements.
Command-line parameters can additionally be read from a file if file name prefixed with '@' is given as argument.
Part of BenchExec: https://github.com/dbeyer/benchexec/""")
parser.add_argument("args", nargs="+", metavar="ARG",
help='command line to run (prefix with "--" to ensure all arguments are treated correctly)')
parser.add_argument("--input", metavar="FILE",
help="name of file used as stdin for command (default: /dev/null; use - for stdin passthrough)")
parser.add_argument("--output", default="output.log", metavar="FILE",
help="name of file where command output is written")
parser.add_argument("--maxOutputSize", type=int, metavar="BYTES",
help="shrink output file to approximately this size if necessary (by removing lines from the middle of the output)")
parser.add_argument("--memlimit", type=int, metavar="BYTES",
help="memory limit in bytes")
parser.add_argument("--timelimit", type=int, metavar="SECONDS",
help="CPU time limit in seconds")
parser.add_argument("--softtimelimit", type=int, metavar="SECONDS",
help='"soft" CPU time limit in seconds (command will be send the TERM signal at this time)')
parser.add_argument("--walltimelimit", type=int, metavar="SECONDS",
help='wall time limit in seconds (default is CPU time limit plus a few seconds)')
parser.add_argument("--cores", type=util.parse_int_list, metavar="N,M-K",
help="list of CPU cores to use")
parser.add_argument("--memoryNodes", type=util.parse_int_list, metavar="N,M-K",
help="list of memory nodes to use")
parser.add_argument("--dir", metavar="DIR",
help="working directory for executing the command (default is current directory)")
parser.add_argument("--version", action="version", version="%(prog)s " + __version__)
verbosity = parser.add_mutually_exclusive_group()
verbosity.add_argument("--debug", action="store_true",
help="show debug output")
verbosity.add_argument("--quiet", action="store_true",
help="show only warnings")
options = parser.parse_args(argv[1:])
# For integrating into some benchmarking frameworks,
# there is a DEPRECATED special mode
# where the first and only command-line argument is a serialized dict
# with additional options
env = {}
if len(options.args) == 1 and options.args[0].startswith("{"):
data = eval(options.args[0])
options.args = data["args"]
env = data.get("env", {})
options.debug = data.get("debug", options.debug)
if "maxLogfileSize" in data:
options.maxOutputSize = data["maxLogfileSize"] * _BYTE_FACTOR * _BYTE_FACTOR # MB to bytes
# setup logging
logLevel = logging.INFO
if options.debug:
logLevel = logging.DEBUG
elif options.quiet:
logLevel = logging.WARNING
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s",
level=logLevel)
if options.input == '-':
stdin = sys.stdin
elif options.input is not None:
if options.input == options.output:
sys.exit("Input and output files cannot be the same.")
try:
stdin = open(options.input, 'rt')
except IOError as e:
sys.exit(e)
else:
stdin = None
executor = RunExecutor()
# ensure that process gets killed on interrupt/kill signal
def signal_handler_kill(signum, frame):
executor.stop()
signal.signal(signal.SIGTERM, signal_handler_kill)
signal.signal(signal.SIGINT, signal_handler_kill)
logging.info('Starting command ' + ' '.join(options.args))
logging.info('Writing output to ' + options.output)
# actual run execution
try:
result = executor.execute_run(
args=options.args,
output_filename=options.output,
stdin=stdin,
hardtimelimit=options.timelimit,
softtimelimit=options.softtimelimit,
walltimelimit=options.walltimelimit,
cores=options.cores,
memlimit=options.memlimit,
memory_nodes=options.memoryNodes,
environments=env,
workingDir=options.dir,
maxLogfileSize=options.maxOutputSize)
finally:
if stdin:
stdin.close()
# exit_code is a special number:
# It is a 16bit int of which the lowest 7 bit are the signal number,
# and the high byte is the real exit code of the process (here 0).
exit_code = result['exitcode']
return_value = exit_code // 256
exitSignal = exit_code % 128
def print_optional_result(key):
if key in result:
# avoid unicode literals such that the string can be parsed by Python 3.2
print(key + "=" + str(result[key]).replace("'u", ''))
# output results
print_optional_result('terminationreason')
print("exitcode=" + str(exit_code))
if (exitSignal == 0) or (return_value != 0):
print("returnvalue=" + str(return_value))
if exitSignal != 0 :
print("exitsignal=" + str(exitSignal))
print("walltime=" + str(result['walltime']) + "s")
print("cputime=" + str(result['cputime']) + "s")
print_optional_result('memory')
if 'energy' in result:
for key, value in result['energy'].items():
print("energy-{0}={1}".format(key, value))
class RunExecutor():
def __init__(self):
self.PROCESS_KILLED = False
self.SUB_PROCESSES_LOCK = threading.Lock() # needed, because we kill the process asynchronous
self.SUB_PROCESSES = set()
self._termination_reason = None
self._init_cgroups()
def _init_cgroups(self):
"""
This function initializes the cgroups for the limitations and measurements.
"""
self.cgroups = find_my_cgroups()
self.cgroups.require_subsystem(CPUACCT)
if CPUACCT not in self.cgroups:
logging.warning('Without cpuacct cgroups, cputime measurement and limit might not work correctly if subprocesses are started.')
self.cgroups.require_subsystem(FREEZER)
if FREEZER not in self.cgroups:
logging.warning('Cannot reliably kill sub-processes without freezer cgroup.')
self.cgroups.require_subsystem(MEMORY)
if MEMORY not in self.cgroups:
logging.warning('Cannot measure memory consumption without memory cgroup.')
self.cgroups.require_subsystem(CPUSET)
self.cpus = None # to indicate that we cannot limit cores
self.memory_nodes = None # to indicate that we cannot limit cores
if CPUSET in self.cgroups:
# Read available cpus/memory nodes:
try:
self.cpus = util.parse_int_list(self.cgroups.get_value(CPUSET, 'cpus'))
except ValueError as e:
logging.warning("Could not read available CPU cores from kernel: {0}".format(e.strerror))
logging.debug("List of available CPU cores is {0}.".format(self.cpus))
try:
self.memory_nodes = util.parse_int_list(self.cgroups.get_value(CPUSET, 'mems'))
except ValueError as e:
logging.warning("Could not read available memory nodes from kernel: {0}".format(e.strerror))
logging.debug("List of available memory nodes is {0}.".format(self.memory_nodes))
def _setup_cgroups(self, args, my_cpus, memlimit, memory_nodes):
"""
This method creates the CGroups for the following execution.
@param args: the command line to run, used only for logging
@param my_cpus: None or a list of the CPU cores to use
@param memlimit: None or memory limit in bytes
@param memory_nodes: None or a list of memory nodes of a NUMA system to use
@return cgroups: a map of all the necessary cgroups for the following execution.
Please add the process of the following execution to all those cgroups!
"""
# Setup cgroups, need a single call to create_cgroup() for all subsystems
subsystems = [CPUACCT, FREEZER, MEMORY]
if my_cpus is not None:
subsystems.append(CPUSET)
subsystems = [s for s in subsystems if s in self.cgroups]
cgroups = self.cgroups.create_fresh_child_cgroup(*subsystems)
logging.debug("Executing {0} in cgroups {1}.".format(args, cgroups))
# Setup cpuset cgroup if necessary to limit the CPU cores/memory nodes to be used.
if my_cpus is not None:
my_cpus_str = ','.join(map(str, my_cpus))
cgroups.set_value(CPUSET, 'cpus', my_cpus_str)
my_cpus_str = cgroups.get_value(CPUSET, 'cpus')
logging.debug('Executing {0} with cpu cores [{1}].'.format(args, my_cpus_str))
if memory_nodes is not None:
cgroups.set_value(CPUSET, 'mems', ','.join(map(str, memory_nodes)))
memory_nodesStr = cgroups.get_value(CPUSET, 'mems')
logging.debug('Executing {0} with memory nodes [{1}].'.format(args, memory_nodesStr))
# Setup memory limit
if memlimit is not None:
limit = 'limit_in_bytes'
cgroups.set_value(MEMORY, limit, memlimit)
swap_limit = 'memsw.limit_in_bytes'
# We need swap limit because otherwise the kernel just starts swapping
# out our process if the limit is reached.
# Some kernels might not have this feature,
# which is ok if there is actually no swap.
if not cgroups.has_value(MEMORY, swap_limit):
if systeminfo.has_swap():
sys.exit('Kernel misses feature for accounting swap memory, but machine has swap. Please set swapaccount=1 on your kernel command line or disable swap with "sudo swapoff -a".')
else:
try:
cgroups.set_value(MEMORY, swap_limit, memlimit)
except IOError as e:
if e.errno == 95: # kernel responds with error 95 (operation unsupported) if this is disabled
sys.exit('Memory limit specified, but kernel does not allow limiting swap memory. Please set swapaccount=1 on your kernel command line or disable swap with "sudo swapoff -a".')
raise e
memlimit = cgroups.get_value(MEMORY, limit)
logging.debug('Executing {0} with memory limit {1} bytes.'.format(args, memlimit))
if MEMORY in cgroups \
and not cgroups.has_value(MEMORY, 'memsw.max_usage_in_bytes') \
and systeminfo.has_swap():
logging.warning('Kernel misses feature for accounting swap memory, but machine has swap. Memory usage may be measured inaccurately. Please set swapaccount=1 on your kernel command line or disable swap with "sudo swapoff -a".')
if MEMORY in cgroups:
try:
# Note that this disables swapping completely according to
# https://www.kernel.org/doc/Documentation/cgroups/memory.txt
# (unlike setting the global swappiness to 0).
# Our process might get killed because of this.
cgroups.set_value(MEMORY, 'swappiness', '0')
except IOError as e:
logging.warning('Could not disable swapping for benchmarked process: ' + str(e))
return cgroups
def _execute(self, args, output_filename, stdin, cgroups, hardtimelimit, softtimelimit, walltimelimit, myCpuCount, memlimit, environments, workingDir):
"""
This method executes the command line and waits for the termination of it.
"""
def preSubprocess():
os.setpgrp() # make subprocess to group-leader
os.nice(5) # increase niceness of subprocess
if hardtimelimit is not None:
# Also use ulimit for CPU time limit as a fallback if cgroups don't work.
if CPUACCT in cgroups:
# Use a slightly higher limit to ensure cgroups get used
# (otherwise we cannot detect the timeout properly).
ulimit = hardtimelimit + _ULIMIT_DEFAULT_OVERHEAD
else:
ulimit = hardtimelimit
resource.setrlimit(resource.RLIMIT_CPU, (ulimit, ulimit))
# put us into the cgroup(s)
pid = os.getpid()
# On some systems, cgrulesengd would move our process into other cgroups.
# We disable this behavior via libcgroup if available.
# Unfortunately, logging/printing does not seem to work here.
from ctypes import cdll
try:
libcgroup = cdll.LoadLibrary('libcgroup.so.1')
failure = libcgroup.cgroup_init()
if failure:
pass
#print('Could not initialize libcgroup, error {}'.format(success))
else:
CGROUP_DAEMON_UNCHANGE_CHILDREN = 0x1
failure = libcgroup.cgroup_register_unchanged_process(pid, CGROUP_DAEMON_UNCHANGE_CHILDREN)
if failure:
pass
#print('Could not register process to cgrulesndg, error {}. Probably the daemon will mess up our cgroups.'.format(success))
except OSError:
pass
#print('libcgroup is not available: {}'.format(e.strerror))
cgroups.add_task(pid)
# Setup environment:
# If keepEnv is set, start from a fresh environment, otherwise with the current one.
# keepEnv specifies variables to copy from the current environment,
# newEnv specifies variables to set to a new value,
# additionalEnv specifies variables where some value should be appended, and
# clearEnv specifies variables to delete.
runningEnv = os.environ.copy() if not environments.get("keepEnv", {}) else {}
for key, value in environments.get("keepEnv", {}).items():
if key in os.environ:
runningEnv[key] = os.environ[key]
for key, value in environments.get("newEnv", {}).items():
runningEnv[key] = value
for key, value in environments.get("additionalEnv", {}).items():
runningEnv[key] = os.environ.get(key, "") + value
for key in environments.get("clearEnv", {}).items():
runningEnv.pop(key, None)
logging.debug("Using additional environment {0}.".format(str(environments)))
# write command line into outputFile
try:
outputFile = open(output_filename, 'w') # override existing file
except IOError as e:
sys.exit(e)
outputFile.write(' '.join(args) + '\n\n\n' + '-' * 80 + '\n\n\n')
outputFile.flush()
timelimitThread = None
oomThread = None
energyBefore = util.measure_energy()
walltime_before = time.time()
p = None
try:
p = subprocess.Popen(args,
stdin=stdin,
stdout=outputFile, stderr=outputFile,
env=runningEnv, cwd=workingDir,
close_fds=True,
preexec_fn=preSubprocess)
except OSError as e:
logging.critical("OSError {0} while starting '{1}' in '{2}': {3}."
.format(e.errno, args[0], workingDir or '.', e.strerror))
return (0, 0, 0, None)
try:
with self.SUB_PROCESSES_LOCK:
self.SUB_PROCESSES.add(p)
# hard time limit with cgroups is optional (additionally enforce by ulimit)
cgroup_hardtimelimit = hardtimelimit if CPUACCT in cgroups else None
if any([cgroup_hardtimelimit, softtimelimit, walltimelimit]):
# Start a timer to periodically check timelimit
timelimitThread = _TimelimitThread(cgroups, cgroup_hardtimelimit, softtimelimit, walltimelimit, p, myCpuCount, self._set_termination_reason)
timelimitThread.start()
if memlimit is not None:
try:
oomThread = oomhandler.KillProcessOnOomThread(cgroups, p,
self._set_termination_reason)
oomThread.start()
except OSError as e:
logging.critical("OSError {0} during setup of OomEventListenerThread: {1}.".format(e.errno, e.strerror))
try:
logging.debug("waiting for: pid:{0}".format(p.pid))
pid, returnvalue, ru_child = os.wait4(p.pid, 0)
logging.debug("waiting finished: pid:{0}, retVal:{1}".format(pid, returnvalue))
except OSError as e:
returnvalue = 0
ru_child = None
if self.PROCESS_KILLED:
# OSError 4 (interrupted system call) seems always to happen if we killed the process ourselves after Ctrl+C was pressed
logging.debug("OSError {0} while waiting for termination of {1} ({2}): {3}.".format(e.errno, args[0], p.pid, e.strerror))
else:
logging.critical("OSError {0} while waiting for termination of {1} ({2}): {3}.".format(e.errno, args[0], p.pid, e.strerror))
finally:
walltime_after = time.time()
with self.SUB_PROCESSES_LOCK:
self.SUB_PROCESSES.discard(p)
if timelimitThread:
timelimitThread.cancel()
if oomThread:
oomThread.cancel()
outputFile.close() # normally subprocess closes file, we do this again
logging.debug("size of logfile '{0}': {1}".format(output_filename, str(os.path.getsize(output_filename))))
# kill all remaining processes if some managed to survive
cgroups.kill_all_tasks()
energy = util.measure_energy(energyBefore)
walltime = walltime_after - walltime_before
cputime = ru_child.ru_utime + ru_child.ru_stime if ru_child else 0
return (returnvalue, walltime, cputime, energy)
def _get_exact_measures(self, cgroups, returnvalue, walltime, cputime):
"""
This method tries to extract better measures from cgroups.
"""
cputime2 = None
if CPUACCT in cgroups:
# We want to read the value from the cgroup.
# The documentation warns about outdated values.
# So we read twice with 0.1s time difference,
# and continue reading as long as the values differ.
# This has never happened except when interrupting the script with Ctrl+C,
# but just try to be on the safe side here.
tmp = cgroups.read_cputime()
tmp2 = None
while tmp != tmp2:
time.sleep(0.1)
tmp2 = tmp
tmp = cgroups.read_cputime()
cputime2 = tmp
memUsage = None
if MEMORY in cgroups:
# This measurement reads the maximum number of bytes of RAM+Swap the process used.
# For more details, c.f. the kernel documentation:
# https://www.kernel.org/doc/Documentation/cgroups/memory.txt
memUsageFile = 'memsw.max_usage_in_bytes'
if not cgroups.has_value(MEMORY, memUsageFile):
memUsageFile = 'max_usage_in_bytes'
if not cgroups.has_value(MEMORY, memUsageFile):
logging.warning('Memory-usage is not available due to missing files.')
else:
try:
memUsage = int(cgroups.get_value(MEMORY, memUsageFile))
except IOError as e:
if e.errno == 95: # kernel responds with error 95 (operation unsupported) if this is disabled
logging.critical("Kernel does not track swap memory usage, cannot measure memory usage. "
+ "Please set swapaccount=1 on your kernel command line.")
else:
raise e
logging.debug('Run exited with code {0}, walltime={1}, cputime={2}, cgroup-cputime={3}, memory={4}'
.format(returnvalue, walltime, cputime, cputime2, memUsage))
# Usually cputime2 (measured with cgroups) seems to be 0.01s greater
# than cputime (measured with ulimit).
# Furthermore, cputime might miss some subprocesses,
# therefore we expect cputime2 to be always greater (and more correct).
# However, sometimes cputime is a little bit bigger than cputime2.
# For small values, this is probably because cputime counts since fork,
# whereas cputime2 counts only after cgroups.add_task()
# (so overhead from runexecutor is correctly excluded in cputime2).
# For large values, a difference may also indicate a problem with cgroups,
# for example another process moving our benchmarked process between cgroups,
# thus we warn if the difference is substantial and take the larger ulimit value.
if cputime2 is not None:
if cputime > 0.5 and (cputime * 0.95) > cputime2:
logging.warning('Cputime measured by wait was {0}, cputime measured by cgroup was only {1}, perhaps measurement is flawed.'.format(cputime, cputime2))
else:
cputime = cputime2
return (cputime, memUsage)
def execute_run(self, args, output_filename, stdin=None,
hardtimelimit=None, softtimelimit=None, walltimelimit=None,
cores=None, memlimit=None, memory_nodes=None,
environments={}, workingDir=None, maxLogfileSize=None):
"""
This function executes a given command with resource limits,
and writes the output to a file.
@param args: the command line to run
@param output_filename: the file where the output should be written to
@param stdin: What to uses as stdin for the process (None: /dev/null, a file descriptor, or a file object)
@param hardtimelimit: None or the CPU time in seconds after which the tool is forcefully killed.
@param softtimelimit: None or the CPU time in seconds after which the tool is sent a kill signal.
@param walltimelimit: None or the wall time in seconds after which the tool is forcefully killed (default: hardtimelimit + a few seconds)
@param cores: None or a list of the CPU cores to use
@param memlimit: None or memory limit in bytes
@param memory_nodes: None or a list of memory nodes in a NUMA system to use
@param environments: special environments for running the command
@param workingDir: None or a directory which the execution should use as working directory
@param maxLogfileSize: None or a number of bytes to which the output of the tool should be truncated approximately if there is too much output.
@return: a tuple with walltime in seconds, cputime in seconds, memory usage in bytes, returnvalue, and process output
"""
if stdin == subprocess.PIPE:
sys.exit('Illegal value subprocess.PIPE for stdin')
elif stdin is None:
stdin = DEVNULL
if hardtimelimit is not None:
if hardtimelimit <= 0:
sys.exit("Invalid time limit {0}.".format(hardtimelimit))
if softtimelimit is not None:
if softtimelimit <= 0:
sys.exit("Invalid soft time limit {0}.".format(softtimelimit))
if hardtimelimit and (softtimelimit > hardtimelimit):
sys.exit("Soft time limit cannot be larger than the hard time limit.")
if not CPUACCT in self.cgroups:
sys.exit("Soft time limit cannot be specified without cpuacct cgroup.")
if walltimelimit is None:
if hardtimelimit is not None:
walltimelimit = hardtimelimit + _WALLTIME_LIMIT_DEFAULT_OVERHEAD
elif softtimelimit is not None:
walltimelimit = softtimelimit + _WALLTIME_LIMIT_DEFAULT_OVERHEAD
else:
if walltimelimit <= 0:
sys.exit("Invalid wall time limit {0}.".format(walltimelimit))
if cores is not None:
if self.cpus is None:
sys.exit("Cannot limit CPU cores without cpuset cgroup.")
coreCount = len(cores)
if coreCount == 0:
sys.exit("Cannot execute run without any CPU core.")
if not set(cores).issubset(self.cpus):
sys.exit("Cores {0} are not allowed to be used".format(list(set(cores).difference(self.cpus))))
else:
try:
coreCount = multiprocessing.cpu_count()
except NotImplementedError:
coreCount = 1
if memlimit is not None:
if memlimit <= 0:
sys.exit("Invalid memory limit {0}.".format(memlimit))
if not MEMORY in self.cgroups:
sys.exit("Memory limit specified, but cannot be implemented without cgroup support.")
if memory_nodes is not None:
if self.memory_nodes is None:
sys.exit("Cannot restrict memory nodes without cpuset cgroup.")
if len(memory_nodes) == 0:
sys.exit("Cannot execute run without any memory node.")
if not set(memory_nodes).issubset(self.memory_nodes):
sys.exit("Memory nodes {0} are not allowed to be used".format(list(set(memory_nodes).difference(self.memory_nodes))))
if workingDir:
if not os.path.exists(workingDir):
sys.exit("Working directory {0} does not exist.".format(workingDir))
if not os.path.isdir(workingDir):
sys.exit("Working directory {0} is not a directory.".format(workingDir))
if not os.access(workingDir, os.X_OK):
sys.exit("Permission denied for working directory {0}.".format(workingDir))
self._termination_reason = None
logging.debug("execute_run: setting up Cgroups.")
cgroups = self._setup_cgroups(args, cores, memlimit, memory_nodes)
throttle_check = _CPUThrottleCheck(cores)
swap_check = _SwapCheck()
try:
logging.debug("execute_run: executing tool.")
(exitcode, walltime, cputime, energy) = \
self._execute(args, output_filename, stdin, cgroups,
hardtimelimit, softtimelimit, walltimelimit,
coreCount, memlimit,
environments, workingDir)
logging.debug("execute_run: getting exact measures.")
(cputime, memUsage) = self._get_exact_measures(cgroups, exitcode, walltime, cputime)
finally: # always try to cleanup cgroups, even on sys.exit()
logging.debug("execute_run: cleaning up CGroups.")
cgroups.remove()
# if exception is thrown, skip the rest, otherwise perform normally
if throttle_check.has_throttled():
logging.warning('CPU throttled itself during benchmarking due to overheating. Benchmark results are unreliable!')
if swap_check.has_swapped():
logging.warning('System has swapped during benchmarking. Benchmark results are unreliable!')
_reduce_file_size_if_necessary(output_filename, maxLogfileSize)
if exitcode not in [0,1]:
logging.debug("execute_run: analysing output for crash-info.")
_get_debug_output_after_crash(output_filename)
logging.debug("execute_run: Run execution returns with code {0}, walltime={1}, cputime={2}, memory={3}, energy={4}"
.format(exitcode, walltime, cputime, memUsage, energy))
result = {'walltime': walltime,
'cputime': cputime,
'exitcode': exitcode,
}
if memUsage:
result['memory'] = memUsage
if self._termination_reason:
result['terminationreason'] = self._termination_reason
if energy:
result['energy'] = energy
return result
def _set_termination_reason(self, reason):
self._termination_reason = reason
def stop(self):
self._set_termination_reason('killed')
self.PROCESS_KILLED = True
with self.SUB_PROCESSES_LOCK:
for process in self.SUB_PROCESSES:
logging.warning('Killing process {0} forcefully.'.format(process.pid))
util.kill_process(process.pid)
def _reduce_file_size_if_necessary(fileName, maxSize):
"""
This function shrinks a file.
We remove only the middle part of a file,
the file-start and the file-end remain unchanged.
"""
if maxSize is None: return # disabled, nothing to do
fileSize = os.path.getsize(fileName)
if fileSize < (maxSize + 500): return # not necessary
logging.warning("Logfile '{0}' is too big (size {1} bytes). Removing lines.".format(fileName, fileSize))
# We partition the file into 3 parts:
# A) start: maxSize/2 bytes we want to keep
# B) middle: part we want to remove
# C) end: maxSize/2 bytes we want to keep
# Trick taken from StackOverflow:
# https://stackoverflow.com/questions/2329417/fastest-way-to-delete-a-line-from-large-file-in-python
# We open the file twice at the same time, once for reading and once for writing.
# We position the one file object at the beginning of B
# and the other at the beginning of C.
# Then we copy the content of C into B, overwriting what is there.
# Afterwards we truncate the file after A+C.
with open(fileName, 'r+b') as outputFile:
with open(fileName, 'rb') as inputFile:
# Position outputFile between A and B
outputFile.seek(maxSize // 2)
outputFile.readline() # jump to end of current line so that we truncate at line boundaries
if outputFile.tell() == fileSize:
# readline jumped to end of file because of a long line
return
outputFile.write("\n\n\nWARNING: YOUR LOGFILE WAS TOO LONG, SOME LINES IN THE MIDDLE WERE REMOVED.\n\n\n\n".encode())
# Position inputFile between B and C
inputFile.seek(-maxSize // 2, os.SEEK_END) # jump to beginning of second part we want to keep from end of file
inputFile.readline() # jump to end of current line so that we truncate at line boundaries
# Copy C over B
_copy_all_lines_from_to(inputFile, outputFile)
outputFile.truncate()
def _copy_all_lines_from_to(inputFile, outputFile):
"""
Copy all lines from an input file object to an output file object.
"""
currentLine = inputFile.readline()
while currentLine:
outputFile.write(currentLine)
currentLine = inputFile.readline()
def _get_debug_output_after_crash(output_filename):
"""
Segmentation faults and some memory failures reference a file
with more information (hs_err_pid_*). We append this file to the log.
The format that we expect is a line
"# An error report file with more information is saved as:"
and the file name of the dump file on the next line.
"""
foundDumpFile = False
with open(output_filename, 'r+') as outputFile:
for line in outputFile:
if foundDumpFile:
try:
dumpFileName = line.strip(' #\n')
outputFile.seek(0, os.SEEK_END) # jump to end of log file
with open(dumpFileName, 'r') as dumpFile:
_copy_all_lines_from_to(dumpFile, outputFile)
os.remove(dumpFileName)
except IOError as e:
logging.warning('Could not append additional segmentation fault information from {0} ({1})'.format(dumpFile, e.strerror))
break
if util.decode_to_string(line).startswith('# An error report file with more information is saved as:'):
logging.debug('Going to append error report file')
foundDumpFile = True
class _TimelimitThread(threading.Thread):
"""
Thread that periodically checks whether the given process has already
reached its timelimit. After this happens, the process is terminated.
"""
def __init__(self, cgroups, hardtimelimit, softtimelimit, walltimelimit, process, cpuCount=1,
callbackFn=lambda reason: None):
super(_TimelimitThread, self).__init__()
if hardtimelimit or softtimelimit:
assert CPUACCT in cgroups
assert walltimelimit is not None
self.daemon = True
self.cgroups = cgroups
self.timelimit = hardtimelimit or (60*60*24*365*100) # large dummy value
self.softtimelimit = softtimelimit or (60*60*24*365*100) # large dummy value
self.latestKillTime = time.time() + walltimelimit
self.cpuCount = cpuCount
self.process = process
self.callback = callbackFn
self.finished = threading.Event()
def read_cputime(self):
while True:
try:
return self.cgroups.read_cputime()
except ValueError:
# Sometimes the kernel produces strange values with linebreaks in them
time.sleep(1)
pass
def run(self):
while not self.finished.is_set():
usedCpuTime = self.read_cputime() if CPUACCT in self.cgroups else 0
remainingCpuTime = self.timelimit - usedCpuTime
remainingSoftCpuTime = self.softtimelimit - usedCpuTime
remainingWallTime = self.latestKillTime - time.time()
logging.debug("TimelimitThread for process {0}: used CPU time: {1}, remaining CPU time: {2}, remaining soft CPU time: {3}, remaining wall time: {4}."
.format(self.process.pid, usedCpuTime, remainingCpuTime, remainingSoftCpuTime, remainingWallTime))
if remainingCpuTime <= 0:
self.callback('cputime')
logging.debug('Killing process {0} due to CPU time timeout.'.format(self.process.pid))
util.kill_process(self.process.pid)
self.finished.set()
return
if remainingWallTime <= 0:
self.callback('walltime')
logging.warning('Killing process {0} due to wall time timeout.'.format(self.process.pid))
util.kill_process(self.process.pid)
self.finished.set()
return
if remainingSoftCpuTime <= 0:
self.callback('cputime-soft')
# soft time limit violated, ask process to terminate
util.kill_process(self.process.pid, signal.SIGTERM)
self.softtimelimit = self.timelimit
remainingTime = min(remainingCpuTime/self.cpuCount,
remainingSoftCpuTime/self.cpuCount,
remainingWallTime)
self.finished.wait(remainingTime + 1)
def cancel(self):
self.finished.set()
class _CPUThrottleCheck(object):
"""
Class for checking whether the CPU has throttled during some time period.
"""
def __init__(self, cores=None):
"""
Create an instance that monitors the given list of cores (or all CPUs).
"""
self.cpu_throttle_count = {}
cpu_pattern = '[{0}]'.format(','.join(map(str, cores))) if cores else '*'
for file in glob.glob('/sys/devices/system/cpu/cpu{}/thermal_throttle/*_throttle_count'.format(cpu_pattern)):
try:
self.cpu_throttle_count[file] = int(util.read_file(file))
except Exception as e:
logging.warning('Cannot read throttling count of CPU from kernel: ' + str(e))
def has_throttled(self):
"""
Check whether any of the CPU cores monitored by this instance has
throttled since this instance was created.
@return a boolean value
"""
for file, value in self.cpu_throttle_count.items():
try:
new_value = int(util.read_file(file))
if new_value > value:
return True
except Exception as e:
logging.warning('Cannot read throttling count of CPU from kernel: ' + str(e))
return False
class _SwapCheck(object):
"""
Class for checking whether the system has swapped during some period.
"""
def __init__(self):
self.swap_count = self._read_swap_count()
def _read_swap_count(self):
try:
return dict((k, int(v)) for k, v
in util.read_key_value_pairs_from_file('/proc/vmstat')
if k in ['pswpin', 'pswpout'])
except Exception as e:
logging.warning('Cannot read swap count from kernel: ' + str(e))
def has_swapped(self):
"""
Check whether any swapping occured on this system since this instance was created.
@return a boolean value
"""
new_values = self._read_swap_count()
for key, new_value in new_values.items():
old_value = self.swap_count.get(key, 0)
if new_value > old_value:
return True
return False
if __name__ == '__main__':
main()
| apache-2.0 | -6,594,898,723,726,744,000 | 44.073281 | 238 | 0.605178 | false | 4.320761 | false | false | false |
bjornwallner/proq2-server | apps/modeller9v8/modlib/modeller/automodel/refine.py | 1 | 2738 | """Functions to refine a selection to varying degrees. These functions are
usually used by setting the md_level member of an automodel or loop model
object."""
def very_fast(atmsel, actions):
"""Very fast MD annealing"""
# at T=1000, max_atom_shift for 4fs is cca 0.15 A.
refine(atmsel, actions, cap=0.39, timestep=4.0,
equil_its=50, equil_equil=10,
equil_temps=(150.0, 400.0, 1000.0),
sampl_its=300, sampl_equil=100,
sampl_temps=(1000.0, 800.0, 500.0, 300.0))
def fast(atmsel, actions):
"""Fast MD annealing"""
refine(atmsel, actions, cap=0.39, timestep=4.0,
equil_its=100, equil_equil=20,
equil_temps=(150.0, 250.0, 500.0, 1000.0),
sampl_its=400, sampl_equil=100,
sampl_temps=(1000.0, 800.0, 500.0, 300.0))
def slow(atmsel, actions):
"""Slow MD annealing"""
refine(atmsel, actions, cap=0.39, timestep=4.0,
equil_its=200, equil_equil=20,
equil_temps=(150.0, 250.0, 400.0, 700.0, 1000.0),
sampl_its=600, sampl_equil=200,
sampl_temps=(1000.0, 800.0, 600.0, 500.0, 400.0, 300.0))
def very_slow(atmsel, actions):
"""Very slow MD annealing"""
refine(atmsel, actions, cap=0.39, timestep=4.0,
equil_its=300, equil_equil=20,
equil_temps=(150.0, 250.0, 400.0, 700.0, 1000.0, 1300.0),
sampl_its=1000, sampl_equil=200,
sampl_temps=(1300.0, 1000.0, 800.0, 600.0, 500.0, 430.0, 370.0,
320.0, 300.0))
def slow_large(atmsel, actions):
"""Very slow/large dt MD annealing"""
refine(atmsel, actions, cap=0.39, timestep=10.0,
equil_its=200, equil_equil=20,
equil_temps=(150.0, 250.0, 400.0, 700.0, 1000.0, 1500.0),
sampl_its=2000, sampl_equil=200,
sampl_temps=(1500.0, 1000.0, 800.0, 600.0, 500.0, 400.0, 300.0))
def refine(atmsel, actions, cap, timestep, equil_its, equil_equil,
equil_temps, sampl_its, sampl_equil, sampl_temps, **args):
from modeller.optimizers import molecular_dynamics
mdl = atmsel.get_model()
md = molecular_dynamics(cap_atom_shift=cap, md_time_step=timestep,
md_return='FINAL', output=mdl.optimize_output,
actions=actions, **args)
init_vel = True
# First run for equilibration, the second for sampling:
for (its, equil, temps) in ((equil_its, equil_equil, equil_temps),
(sampl_its, sampl_equil, sampl_temps)):
for temp in temps:
md.optimize(atmsel, max_iterations=its, equilibrate=equil,
temperature=temp, init_velocities=init_vel)
init_vel=False
| gpl-3.0 | -6,443,310,088,690,744,000 | 39.264706 | 76 | 0.587655 | false | 2.771255 | false | false | false |
agamdua/github3.py | github3/gists/history.py | 10 | 1768 | # -*- coding: utf-8 -*-
"""
github3.gists.history
---------------------
Module containing the logic for the GistHistory object.
"""
from __future__ import unicode_literals
from ..models import GitHubCore
from ..users import User
class GistHistory(GitHubCore):
"""Thisobject represents one version (or revision) of a gist.
Two history instances can be checked like so::
h1 == h2
h1 != h2
And is equivalent to::
h1.version == h2.version
h1.version != h2.version
"""
def _update_attributes(self, history):
self._api = history.get('url', '')
#: SHA of the commit associated with this version
self.version = history.get('version', '')
#: user who made these changes
self.user = User(history.get('user') or {}, self)
#: dict containing the change status; see also: deletions, additions,
#: total
self.change_status = history.get('change_status', {})
#: number of additions made
self.additions = self.change_status.get('additions', 0)
#: number of deletions made
self.deletions = self.change_status.get('deletions', 0)
#: total number of changes made
self.total = self.change_status.get('total', 0)
#: datetime representation of when the commit was made
self.committed_at = self._strptime(history.get('committed_at'))
def _repr(self):
return '<Gist History [{0}]>'.format(self.version)
def get_gist(self):
"""Retrieve the gist at this version.
:returns: :class:`Gist <github3.gists.gist.Gist>`
"""
from .gist import Gist
json = self._json(self._get(self._api), 200)
return self._instance_or_null(Gist, json)
| bsd-3-clause | -3,721,514,964,649,648,000 | 25.38806 | 77 | 0.605769 | false | 3.911504 | false | false | false |
chenjiandongx/pyecharts | test/test_radar.py | 1 | 3381 | from unittest.mock import patch
from nose.tools import assert_equal, assert_in
from pyecharts import options as opts
from pyecharts.charts import Radar
v1 = [(4300, 10000, 28000, 35000, 50000, 19000)]
v2 = [(5000, 14000, 28000, 31000, 42000, 21000)]
@patch("pyecharts.render.engine.write_utf8_html_file")
def test_radar_base(fake_writer):
c = (
Radar()
.add_schema(
schema=[
opts.RadarIndicatorItem(name="销售", max_=6500),
opts.RadarIndicatorItem(name="管理", max_=16000),
opts.RadarIndicatorItem(name="信息技术", max_=30000),
opts.RadarIndicatorItem(name="客服", max_=38000),
opts.RadarIndicatorItem(name="研发", max_=52000),
opts.RadarIndicatorItem(name="市场", max_=25000),
]
)
.add("预算分配", v1)
.add("实际开销", v2)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
)
c.render()
_, content = fake_writer.call_args[0]
assert_equal(c.theme, "white")
assert_equal(c.renderer, "canvas")
@patch("pyecharts.render.engine.write_utf8_html_file")
def test_radar_item_base(fake_writer):
series_names = ["预算分配", "实际开销"]
series_data = [
[4300, 10000, 28000, 35000, 50000, 19000],
[5000, 14000, 28000, 31000, 42000, 21000],
]
radar_item = [
opts.RadarItem(name=d[0], value=d[1])
for d in list(zip(series_names, series_data))
]
c = (
Radar()
.add_schema(
schema=[
opts.RadarIndicatorItem(name="销售", max_=6500),
opts.RadarIndicatorItem(name="管理", max_=16000),
opts.RadarIndicatorItem(name="信息技术", max_=30000),
opts.RadarIndicatorItem(name="客服", max_=38000),
opts.RadarIndicatorItem(name="研发", max_=52000),
opts.RadarIndicatorItem(name="市场", max_=25000),
]
)
.add("", radar_item)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(title_opts=opts.TitleOpts(title="Radar-基本示例"))
)
c.render()
_, content = fake_writer.call_args[0]
assert_equal(c.theme, "white")
assert_equal(c.renderer, "canvas")
@patch("pyecharts.render.engine.write_utf8_html_file")
def test_radar_options(fake_writer):
c = (
Radar()
.add_schema(
schema=[
opts.RadarIndicatorItem(name="销售", max_=6500),
opts.RadarIndicatorItem(name="管理", max_=16000),
opts.RadarIndicatorItem(name="信息技术", max_=30000),
opts.RadarIndicatorItem(name="客服", max_=38000),
opts.RadarIndicatorItem(name="研发", max_=52000),
opts.RadarIndicatorItem(name="市场", max_=25000),
],
radiusaxis_opts=opts.RadiusAxisOpts(),
angleaxis_opts=opts.AngleAxisOpts(),
polar_opts=opts.PolarOpts(),
)
.add("预算分配", v1)
.add("实际开销", v2)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
)
c.render()
_, content = fake_writer.call_args[0]
assert_in("radiusAxis", content)
assert_in("angleAxis", content)
assert_in("polar", content)
| mit | -5,761,579,638,173,765,000 | 33.115789 | 71 | 0.572663 | false | 3.012082 | false | false | false |
luwei0917/awsemmd_script | script/GetCACoordinatesFromPDB.py | 1 | 1517 | #!/usr/bin/env python2
import sys
def three2one(prot):
""" translate a protein sequence from 3 to 1 letter code"""
code = {"GLY" : "G", "ALA" : "A", "LEU" : "L", "ILE" : "I",
"ARG" : "R", "LYS" : "K", "MET" : "M", "CYS" : "C",
"TYR" : "Y", "THR" : "T", "PRO" : "P", "SER" : "S",
"TRP" : "W", "ASP" : "D", "GLU" : "E", "ASN" : "N",
"GLN" : "Q", "PHE" : "F", "HIS" : "H", "VAL" : "V",
"M3L" : "K", "MSE" : "M", "CAS" : "C" }
newprot = ""
for a in prot:
newprot += code.get(a, "?")
return newprot
if len(sys.argv)!=3:
print "\nGetCACoordinatesFromPDB.py PDB_Id Output_file\n"
exit()
from Bio.PDB.PDBParser import PDBParser
p = PDBParser(PERMISSIVE=1,QUIET=True)
struct_id = sys.argv[1]
filename = struct_id + ".pdb"
output_fn = ""
output_fn = sys.argv[2]
out = open( output_fn, 'w' )
xyz_CA = []
s = p.get_structure(struct_id, filename)
chains = s[0].get_list()
for ch in chains:
sequance = []
for res in ch:
is_regular_res = res.has_id('CA') and res.has_id('O')
res_id = res.get_id()[0]
if (res_id==' ' or res_id=='H_MSE' or res_id=='H_M3L' or res_id=='H_CAS') and is_regular_res:
sequance.append(res.get_resname())
xyz_CA.append(res['CA'].get_coord())
out.write( str(len(xyz_CA)) )
out.write('\n')
for ixyz in xyz_CA:
out.write( str(round(ixyz[0], 5)) )
out.write('\t')
out.write( str(round(ixyz[1], 5)) )
out.write('\t')
out.write( str(round(ixyz[2], 5)) )
out.write('\n')
| mit | -5,382,920,857,065,637,000 | 25.155172 | 101 | 0.526038 | false | 2.35559 | false | false | false |
grengojbo/satchmo | satchmo/apps/satchmo_ext/newsletter/views.py | 13 | 2078 | from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _, ugettext
from satchmo_ext.newsletter.forms import NewsletterForm
def add_subscription(request, template="newsletter/subscribe_form.html",
result_template="newsletter/update_results.html", form=NewsletterForm):
"""Add a subscription and return the results in the requested template."""
return _update(request, True, template, result_template, form=form)
def remove_subscription(request, template="newsletter/unsubscribe_form.html",
result_template="newsletter/update_results.html", form=NewsletterForm):
"""Remove a subscription and return the results in the requested template."""
return _update(request, False, template, result_template, form=form)
def update_subscription(request, template="newsletter/update_form.html",
result_template="newsletter/update_results.html", form=NewsletterForm):
"""Add a subscription and return the results in the requested template."""
return _update(request, 'FORM', template, result_template, form=form)
def _update(request, state, template, result_template, form=NewsletterForm):
"""Add a subscription and return the results in the requested template."""
success = False
result = ""
if request.method == "POST":
workform = form(request.POST)
if workform.is_valid():
if state == 'FORM':
# save with subcription status from form
result = workform.save()
else:
# save with subscription status explicitly set
result = workform.save(state)
success = True
else:
result = ugettext('Error, not valid.')
else:
workform = form()
ctx = RequestContext(request, {
'result' : result,
'form' : workform
})
if success:
return render_to_response(result_template, context_instance=ctx)
else:
return render_to_response(template, context_instance=ctx)
| bsd-3-clause | -5,097,847,611,739,750,000 | 38.207547 | 81 | 0.684312 | false | 4.365546 | false | false | false |
gkc1000/pyscf | examples/scf/40-customizing_hamiltonian.py | 2 | 1494 | #!/usr/bin/env python
#
# Author: Qiming Sun <[email protected]>
#
import numpy
from pyscf import gto, scf, ao2mo
'''
Customizing Hamiltonian for SCF module.
Three steps to define Hamiltonian for SCF:
1. Specify the number of electrons. (Note mole object must be "built" before doing this step)
2. Overwrite three attributes of scf object
.get_hcore
.get_ovlp
._eri
3. Specify initial guess (to overwrite the default atomic density initial guess)
Note you will see warning message on the screen:
Overwritten attributes get_ovlp get_hcore of <class 'pyscf.scf.hf.RHF'>
'''
mol = gto.M()
n = 10
mol.nelectron = n
mf = scf.RHF(mol)
h1 = numpy.zeros((n,n))
for i in range(n-1):
h1[i,i+1] = h1[i+1,i] = -1.0
h1[n-1,0] = h1[0,n-1] = -1.0 # PBC
eri = numpy.zeros((n,n,n,n))
for i in range(n):
eri[i,i,i,i] = 4.0
mf.get_hcore = lambda *args: h1
mf.get_ovlp = lambda *args: numpy.eye(n)
# ao2mo.restore(8, eri, n) to get 8-fold permutation symmetry of the integrals
# ._eri only supports the two-electron integrals in 4-fold or 8-fold symmetry.
mf._eri = ao2mo.restore(8, eri, n)
mf.kernel()
# If you need to run post-HF calculations based on the customized Hamiltonian,
# setting incore_anyway=True to ensure the customized Hamiltonian (the _eri
# attribute) to be used. Without this parameter, some post-HF method
# (particularly in the MO integral transformation) may ignore the customized
# Hamiltonian if memory is not enough.
mol.incore_anyway = True
| apache-2.0 | 6,658,539,606,866,120,000 | 27.730769 | 93 | 0.706158 | false | 2.682226 | false | false | false |
t-hey/QGIS-Original | python/plugins/processing/tools/vector.py | 9 | 3478 | # -*- coding: utf-8 -*-
"""
***************************************************************************
vector.py
---------------------
Date : February 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'February 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import csv
from qgis.core import (QgsWkbTypes,
QgsFeatureRequest)
def resolveFieldIndex(source, attr):
"""This method takes an object and returns the index field it
refers to in a layer. If the passed object is an integer, it
returns the same integer value. If the passed value is not an
integer, it returns the field whose name is the string
representation of the passed object.
Ir raises an exception if the int value is larger than the number
of fields, or if the passed object does not correspond to any
field.
"""
if isinstance(attr, int):
return attr
else:
index = source.fields().lookupField(attr)
if index == -1:
raise ValueError('Wrong field name')
return index
def values(source, *attributes):
"""Returns the values in the attributes table of a feature source,
for the passed fields.
Field can be passed as field names or as zero-based field indices.
Returns a dict of lists, with the passed field identifiers as keys.
It considers the existing selection.
It assummes fields are numeric or contain values that can be parsed
to a number.
"""
ret = {}
indices = []
attr_keys = {}
for attr in attributes:
index = resolveFieldIndex(source, attr)
indices.append(index)
attr_keys[index] = attr
# use an optimised feature request
request = QgsFeatureRequest().setSubsetOfAttributes(indices).setFlags(QgsFeatureRequest.NoGeometry)
for feature in source.getFeatures(request):
for i in indices:
# convert attribute value to number
try:
v = float(feature.attributes()[i])
except:
v = None
k = attr_keys[i]
if k in ret:
ret[k].append(v)
else:
ret[k] = [v]
return ret
def checkMinDistance(point, index, distance, points):
"""Check if distance from given point to all other points is greater
than given value.
"""
if distance == 0:
return True
neighbors = index.nearestNeighbor(point, 1)
if len(neighbors) == 0:
return True
if neighbors[0] in points:
np = points[neighbors[0]]
if np.sqrDist(point) < (distance * distance):
return False
return True
| gpl-2.0 | 6,635,530,790,690,708,000 | 30.908257 | 103 | 0.542841 | false | 4.712737 | false | false | false |
garnaat/skew | skew/resources/aws/elasticbeanstalk.py | 2 | 1293 | # Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from skew.resources.aws import AWSResource
class Application(AWSResource):
class Meta(object):
service = 'elasticbeanstalk'
type = 'application'
enum_spec = ('describe_applications', 'Applications', None)
detail_spec = None
id = 'ApplicationName'
filter_name = None
filter_type = None
name = 'ApplicationName'
date = None
dimension = None
class Environment(AWSResource):
class Meta(object):
service = 'elasticbeanstalk'
type = 'environment'
enum_spec = ('describe_environments', 'Environments', None)
detail_spec = None
id = 'EnvironmentName'
filter_name = None
filter_type = None
name = 'EnvironmentName'
date = None
dimension = None | apache-2.0 | -3,479,674,481,035,610,600 | 30.560976 | 72 | 0.658159 | false | 4.324415 | false | false | false |
llthelinkll/opencv_project_1 | VS2015/PowYingChubGame/PowYingChubGame/PowYingChubGame.py | 2 | 3398 | import sys
#from gameDemo import video
#Change the following line
sys.path.append('C:\opencv\sources\samples\python2')
import numpy as np
import cv2
#import video
import socket
import time
import math
UDP_IP = "127.0.0.1"
UDP_PORT = 5005
print "UDP target IP:", UDP_IP
print "UDP target port:", UDP_PORT
#print "message:", MESSAGE
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
cap = cv2.VideoCapture(0)
_ , initImg = cap.read()
while(cap.isOpened()):
ret, img = cap.read()
#img -= initImg;
cv2.rectangle(img,(300,300),(100,100),(0,255,0),0)
crop_img = img[100:300, 100:300]
grey = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)
value = (35, 35)
blurred = cv2.GaussianBlur(grey, value, 0)
_, thresh1 = cv2.threshold(blurred, 63, 255,
cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
cv2.imshow('Thresholded', thresh1)
_ , contours, hierarchy = cv2.findContours(thresh1.copy(),cv2.RETR_TREE, \
cv2.CHAIN_APPROX_NONE)
max_area = -1
for i in range(len(contours)):
cnt=contours[i]
area = cv2.contourArea(cnt)
if(area>max_area):
max_area=area
ci=i
cnt=contours[ci]
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(crop_img,(x,y),(x+w,y+h),(0,0,255),0)
hull = cv2.convexHull(cnt)
drawing = np.zeros(crop_img.shape,np.uint8)
cv2.drawContours(drawing,[cnt],0,(0,255,0),0)
cv2.drawContours(drawing,[hull],0,(0,0,255),0)
hull = cv2.convexHull(cnt,returnPoints = False)
defects = cv2.convexityDefects(cnt,hull)
count_defects = 0
cv2.drawContours(thresh1, contours, -1, (0,255,0), 3)
for i in range(defects.shape[0]):
s,e,f,d = defects[i,0]
start = tuple(cnt[s][0])
end = tuple(cnt[e][0])
far = tuple(cnt[f][0])
a = math.sqrt((end[0] - start[0])**2 + (end[1] - start[1])**2)
b = math.sqrt((far[0] - start[0])**2 + (far[1] - start[1])**2)
c = math.sqrt((end[0] - far[0])**2 + (end[1] - far[1])**2)
angle = math.acos((b**2 + c**2 - a**2)/(2*b*c)) * 57
if angle <= 90:
count_defects += 1
cv2.circle(crop_img,far,1,[0,0,255],-1)
#dist = cv2.pointPolygonTest(cnt,far,True)
cv2.line(crop_img,start,end,[0,255,0],2)
#cv2.circle(crop_img,far,5,[0,0,255],-1)
if count_defects == 1:
#print w*h
if w * h > 14000 and w*h < 24000:
cv2.putText(img,"Rock", (50,50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
sock.sendto("Rock" , (UDP_IP, UDP_PORT))
elif count_defects == 2:
str = "Scisor"
print str
cv2.putText(img, str, (5,50), cv2.FONT_HERSHEY_SIMPLEX, 1, 2)
sock.sendto("Scisor" , (UDP_IP, UDP_PORT))
elif count_defects == 3:
cv2.putText(img,"Noting3", (50,50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
elif count_defects == 4:
cv2.putText(img,"Nothing4", (50,50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
else:
cv2.putText(img,"Paper", (50,50),\
cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
sock.sendto("Paper" , (UDP_IP, UDP_PORT))
#cv2.imshow('drawing', drawing)
#cv2.imshow('end', crop_img)
cv2.imshow('Gesture', img)
all_img = np.hstack((drawing, crop_img))
cv2.imshow('Contours', all_img)
k = cv2.waitKey(10)
if k == 27:
break | mit | -5,918,979,132,122,378,000 | 33.663265 | 78 | 0.572733 | false | 2.612308 | false | false | false |
Kinnay/NintendoClients | nintendo/nex/aauser.py | 1 | 5836 |
# This file was generated automatically by generate_protocols.py
from nintendo.nex import notification, rmc, common, streams
import logging
logger = logging.getLogger(__name__)
class ApplicationInfo(common.Structure):
def __init__(self):
super().__init__()
self.title_id = None
self.title_version = None
def check_required(self, settings, version):
for field in ['title_id', 'title_version']:
if getattr(self, field) is None:
raise ValueError("No value assigned to required field: %s" %field)
def load(self, stream, version):
self.title_id = stream.u64()
self.title_version = stream.u16()
def save(self, stream, version):
self.check_required(stream.settings, version)
stream.u64(self.title_id)
stream.u16(self.title_version)
class AAUserProtocol:
METHOD_REGISTER_APPLICATION = 1
METHOD_UNREGISTER_APPLICATION = 2
METHOD_SET_APPLICATION_INFO = 3
METHOD_GET_APPLICATION_INFO = 4
PROTOCOL_ID = 0x7B
class AAUserClient(AAUserProtocol):
def __init__(self, client):
self.settings = client.settings
self.client = client
async def register_application(self, title_id):
logger.info("AAUserClient.register_application()")
#--- request ---
stream = streams.StreamOut(self.settings)
stream.u64(title_id)
data = await self.client.request(self.PROTOCOL_ID, self.METHOD_REGISTER_APPLICATION, stream.get())
#--- response ---
stream = streams.StreamIn(data, self.settings)
if not stream.eof():
raise ValueError("Response is bigger than expected (got %i bytes, but only %i were read)" %(stream.size(), stream.tell()))
logger.info("AAUserClient.register_application -> done")
async def unregister_application(self, title_id):
logger.info("AAUserClient.unregister_application()")
#--- request ---
stream = streams.StreamOut(self.settings)
stream.u64(title_id)
data = await self.client.request(self.PROTOCOL_ID, self.METHOD_UNREGISTER_APPLICATION, stream.get())
#--- response ---
stream = streams.StreamIn(data, self.settings)
if not stream.eof():
raise ValueError("Response is bigger than expected (got %i bytes, but only %i were read)" %(stream.size(), stream.tell()))
logger.info("AAUserClient.unregister_application -> done")
async def set_application_info(self, application_info):
logger.info("AAUserClient.set_application_info()")
#--- request ---
stream = streams.StreamOut(self.settings)
stream.list(application_info, stream.add)
data = await self.client.request(self.PROTOCOL_ID, self.METHOD_SET_APPLICATION_INFO, stream.get())
#--- response ---
stream = streams.StreamIn(data, self.settings)
if not stream.eof():
raise ValueError("Response is bigger than expected (got %i bytes, but only %i were read)" %(stream.size(), stream.tell()))
logger.info("AAUserClient.set_application_info -> done")
async def get_application_info(self):
logger.info("AAUserClient.get_application_info()")
#--- request ---
stream = streams.StreamOut(self.settings)
data = await self.client.request(self.PROTOCOL_ID, self.METHOD_GET_APPLICATION_INFO, stream.get())
#--- response ---
stream = streams.StreamIn(data, self.settings)
info = stream.list(ApplicationInfo)
if not stream.eof():
raise ValueError("Response is bigger than expected (got %i bytes, but only %i were read)" %(stream.size(), stream.tell()))
logger.info("AAUserClient.get_application_info -> done")
return info
class AAUserServer(AAUserProtocol):
def __init__(self):
self.methods = {
self.METHOD_REGISTER_APPLICATION: self.handle_register_application,
self.METHOD_UNREGISTER_APPLICATION: self.handle_unregister_application,
self.METHOD_SET_APPLICATION_INFO: self.handle_set_application_info,
self.METHOD_GET_APPLICATION_INFO: self.handle_get_application_info,
}
async def logout(self, client):
pass
async def handle(self, client, method_id, input, output):
if method_id in self.methods:
await self.methods[method_id](client, input, output)
else:
logger.warning("Unknown method called on AAUserServer: %i", method_id)
raise common.RMCError("Core::NotImplemented")
async def handle_register_application(self, client, input, output):
logger.info("AAUserServer.register_application()")
#--- request ---
title_id = input.u64()
await self.register_application(client, title_id)
async def handle_unregister_application(self, client, input, output):
logger.info("AAUserServer.unregister_application()")
#--- request ---
title_id = input.u64()
await self.unregister_application(client, title_id)
async def handle_set_application_info(self, client, input, output):
logger.info("AAUserServer.set_application_info()")
#--- request ---
application_info = input.list(ApplicationInfo)
await self.set_application_info(client, application_info)
async def handle_get_application_info(self, client, input, output):
logger.info("AAUserServer.get_application_info()")
#--- request ---
response = await self.get_application_info(client)
#--- response ---
if not isinstance(response, list):
raise RuntimeError("Expected list, got %s" %response.__class__.__name__)
output.list(response, output.add)
async def register_application(self, *args):
logger.warning("AAUserServer.register_application not implemented")
raise common.RMCError("Core::NotImplemented")
async def unregister_application(self, *args):
logger.warning("AAUserServer.unregister_application not implemented")
raise common.RMCError("Core::NotImplemented")
async def set_application_info(self, *args):
logger.warning("AAUserServer.set_application_info not implemented")
raise common.RMCError("Core::NotImplemented")
async def get_application_info(self, *args):
logger.warning("AAUserServer.get_application_info not implemented")
raise common.RMCError("Core::NotImplemented")
| mit | 2,790,017,988,029,408,000 | 35.248447 | 125 | 0.726182 | false | 3.443068 | false | false | false |
wanji/hdidx | hdidx/util.py | 1 | 5092 | #!/usr/bin/env python
# coding: utf-8
#########################################################################
#########################################################################
"""
File Name: util.py
Author: Wan Ji
E-mail: [email protected]
Created on: Tue Nov 4 09:38:24 2014 CST
"""
DESCRIPTION = """
"""
import os
import logging
# distance
from distance import distFunc
import bottleneck
from scipy.io import loadmat
import numpy as np
# profiling
import time
DO_NORM = {
"cosine": True,
"euclidean": False,
}
class HDIdxException(Exception):
"""
HDIdx Exception
"""
"""
Math
"""
def eigs(X, npca):
l, pc = np.linalg.eig(X)
idx = l.argsort()[::-1][:npca]
return pc[:, idx], l[idx]
"""
KMeans
"""
try:
import cv2
def kmeans(vs, ks, niter):
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER,
niter, 0.01)
flags = cv2.KMEANS_RANDOM_CENTERS
compactness, labels, centers = cv2.kmeans(
vs, ks, criteria, 1, flags)
return centers
except ImportError:
logging.warn("Cannot find OpenCV, using `kmeans` from SciPy instead.")
from scipy.cluster import vq
def kmeans(vs, ks, niter):
centers, labels = vq.kmeans2(vs, ks, niter)
return centers
# finding nearest neighbor
def pq_kmeans_assign(centroids, query):
dist = distFunc['euclidean'](centroids, query)
return dist.argmin(1)
def pq_knn(dist, topk):
ids = bottleneck.argpartsort(dist, topk)[:topk]
ids = ids[dist[ids].argsort()]
return ids
# Profiling
START_TIME = 0
def tic():
global START_TIME
START_TIME = time.time()
def toc():
return time.time() - START_TIME
class Profiler(object):
""" Profiling the running time of code snippet.
"""
class Record(object):
__slots__ = ["name", "time", "count", "t0"]
def __init__(self, name):
self.name = name
self.reset()
def reset(self):
self.time = 0.0
self.count = 0
self.t0 = None
def start(self):
self.t0 = time.time()
def end(self):
self.time += (time.time() - self.t0)
self.count += 1
self.t0 = None
def average(self):
return self.time / self.count if self.count > 0 else 0
__slots__ = ["records",
"cur_record",
"name_stack"]
def __init__(self):
self.reset()
def start(self, name):
"""
Start the timer.
`name` is the description of the current code snippet.
"""
if name not in self.records:
self.records[name] = Profiler.Record(name)
self.cur_record = self.records[name]
self.name_stack.append(name)
self.cur_record.start()
def end(self, name=None):
"""
Calculate the time costs of the current code snippet.
"""
if name is not None and name != self.name_stack[-1]:
raise Exception("name '%s' should be '%s'" %
(name, self.name_stack[-1]))
self.cur_record.end()
self.name_stack.pop()
def sum_overall(self):
"""
Return the sum of overall time costs for each code snippet.
"""
return sum([rec.time for name, rec in self.records.iteritems()])
def sum_average(self):
"""
Return the sum of average time costs for each code snippet.
"""
return sum([rec.average() for name, rec in self.records.iteritems()])
def str_overall(self, fmt="%s: %.3fms"):
"""
Return the overall time costs for each code snippet as string.
"""
return ";\t".join([fmt % (name, rec.time * 1000)
for name, rec in self.records.iteritems()])
def str_average(self, fmt="%s: %.3fms"):
"""
Return the average time costs for each code snippet as string.
"""
return ";\t".join([fmt % (name, rec.average() * 1000)
for name, rec in self.records.iteritems()])
def reset(self):
"""
Reset the time costs and counters.
"""
self.records = {}
self.name_stack = []
self.cur_record = None
def normalize(feat, ln=2):
if ln is 1:
return feat / feat.sum(1).reshape(-1, 1)
elif ln > 0:
return feat / ((feat**ln).sum(1)**(1.0/ln)).reshape(-1, 1)
else:
raise Exception("Unsupported norm: %d" % ln)
def tokey(item):
"""
Key function for sorting filenames
"""
return int(item.split("_")[-1].split(".")[0])
class Reader(object):
def __init__(self, featdir):
self.v_fname = sorted(os.listdir(featdir), key=tokey)
self.next_id = 0
self.featdir = featdir
def get_next(self):
logging.info("Reader - load %d" % self.next_id)
feat = loadmat(
os.path.join(self.featdir, self.v_fname[self.next_id]))['feat']
self.next_id += 1
return feat
| mit | -4,566,812,271,994,191,400 | 22.357798 | 77 | 0.533386 | false | 3.673882 | false | false | false |
s390guy/SATK | tools/ipl/PyIPLELF.py | 1 | 8682 | #!/usr/bin/python3.3
# Copyright (C) 2012 Harold Grovesteen
#
# This file is part of SATK.
#
# SATK is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SATK is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SATK. If not, see <http://www.gnu.org/licenses/>.
# This module is a wrapper for PyELF tailored to the IPL ELF ABI Supplement.
import PyELF # access the Python ELF handler
import hexdump # access to the binary manipulation functions
class PyIPLELF(object):
def __init__(self,elf,debug=False):
if not isinstance(elf,PyELF.elf):
raise TypeError("PyIPLELF.py: error: not a PyELF.elf instance: %s" % elf)
self.filename=elf.name # IPL ELF file name
self.elfo=elf # IPL ELF as a PyELF.elf instance
self.elf=self.elfo.fil # IPL ELF as a single string
if debug:
print("PyIPLELF.py: debug: length of elf file: %s" % len(self.elf))
self.elf_entry=self.elfo.getEntry() # Entry address of IPL ELF
self.segments={} # Contains the IPL ELF recognized segments by name
self.elf_itself=None # Set by elfseg method, if called below
self.ipl_entry=None # Set by loadelf method if IPL segment is present
# The rmode is used to define the architecture in which the IPL ELF
# expects to run. This is determined from the IPL ELF Header data
self.rmode=self.arch() # Set residency mode (or arch) the same as amode
# amode may be set from the IPL PSW if the IPL ELF contains an IPL segment.
# Otherwise it will default to the hightest addressing mode available to
# the architecture implied by the IPL ELF Header data.
self.amode=None # Set amode based upon IPL ELF
self.rmode=self.arch() # Set residency mode (or arch) the same as amode
# Note, if an IPL PSW is present, the IPL PSW address overrides the ELF
# entry address.
# Construct the self.segments dictionary from the IPL ELF
for seg in self.elfo.PgmTbl.entries:
try:
segname=IPLSegment.name(seg)
except KeyError:
continue
sego=IPLSegment(segname,seg)
self.segments[sego.name]=sego
try:
text=self.segments["TEXT"]
except KeyError:
pass
if self.loadelf():
# Try to create a segment wrapper for the ELF itself
self.elfseg()
# If no IPL Segment, set the addressing mode to match the residency arch.
if self.amode is None:
self.amode=self.rmode
def __str__(self):
keys=self.segments.keys()
keys.sort()
segs=""
for x in keys:
segs="%s, %s" % (segs,self.segments[x].name)
return "Segments: %s" % segs[2:]
def arch(self):
if self.elfo.iss370():
return 24
if self.elfo.is64():
return 64
return 31
def elfseg(self):
# This method returns the ELF as a segment in its own right
# This treatment allows the ELF to be handled the same as in individual
# segment
if len(self.segments)==0:
raise ValueError("PyIPLELF.py: error: could not locate information for "\
"IPL ELF segment creation")
# A segment is needed to calculate where the ELF itself should be loaded
keys=self.segments.keys()
seg=self.segments[keys[0]]
self.elf_itself=ELFSegment(self.elf,address=seg.address-seg.elf_offset)
def entry(self):
# Return the entry address. It defaults to the ELF executable entry
# address. If an IPL segment is present the address provided by the PSW
# is used.
if self.ipl_entry is None:
return self.elf_entry
return self.ipl_entry
def get_elf(self):
# Returns the ELF Segment
if self.elf_itself is not None:
return self.elf_itself
raise ValueError("PyIPLELF.py: error: ELFSegment not available")
def loadelf(self):
# True is returned if entire ELF is required by the program or
# False is returned if program uses just the TEXT segment
# How to load the ELF is defined by the IPL segment flags. If there is
# no IPL segment, then it is assumed the entire ELF is required.
try:
iplseg=self.segments["IPL"]
except KeyError:
# No IPL segment, so assume the whole ELF is required
return True
self.ipl_entry=iplseg.iplentry
self.amode=iplseg.amode
return False
def sego(self,name):
# Returns by name a segment's PyELF.Program instance. A segment name that is
# not present raises a KeyError.
return self.segments[name]
class IPLSegment(object):
areas={0x01000000:"TXT0",
0x02000000:"TXT1",
0x03000000:"TXT2",
0x04000000:"ELF1",
0x05000000:"ELF2"}
names={0x00000005:".data",
0x00000006:".text",
0x00000007:"TEXT",
0x10000004:"IPL",
0x20000004:"CCW",
0x30000006:"LOWC",
0xF0000007:"LOADER"}
@staticmethod
def area(sego):
if not isinstance(sego,PyELF.Program):
raise TypeError("PyIPLELF.py: internal: not a PyELF.Program: %s" % sego)
try:
return IPLSegment.areas[sego.flags & 0x07000000]
except KeyError:
return "any"
@staticmethod
def name(sego):
if not isinstance(sego,PyELF.Program):
raise TypeError("PyIPLELF.py: internal: not a PyELF.Program: %s" % sego)
return IPLSegment.names[sego.flags & 0xF0000007]
def __init__(self,name,sego=None):
if sego is not None and not isinstance(sego,PyELF.Program):
raise TypeError("PyIPLELF.py: internal: not a PyELF.Program: %s" % sego)
self.name=name
self.sego=sego # Program segment as a PyELF.Program instance
if self.name=="IPL":
self.iplseg_decode()
self.isIPL=True
else:
self.isIPL=False
# Must use self.isIPL flag before attempting to access instance variables
# created by iplseg_decode: self.iplpsw, self.amode, self.iplentry
def iplseg_decode(self):
# Decode the information in the IPL ELF "IPL" segment
self.iplpsw=self.segment[:8]
self.loadelf=(self.sego.flags & 0x08000000) == 0x00000000
if (ord(self.iplpsw[1])&0x08)==0x08:
# If EC-mode PSW use last 4 bytes for address (valid for S/370 & ESA/390)
addr=addr31(self.iplpsw[4:8])
self.iplentry=addr&0x7FFFFFFF
if (addr&0x80000000)==0x80000000:
self.amode=31
else:
self.amode=24
else:
# If BC-mode PSW use last 3 bytes for address
self.iplentry=addr24(self.iplpsw[5:8])
self.amode=24
@property
def address(self):
# Retrns the segment's load address
return self.sego.virt_addr
@property
def elf_offset(self):
return self.sego.offset
@property
def segment(self):
# Returns the segment data as a string
return self.sego.fetch()
@property
def size(self):
# Returns the segment's memory size
return self.sego.mem_size
class ELFSegment(IPLSegment):
# Representation of the entire IPL ELF as an IPL ELF Segment
def __init__(self,elftext,address):
self.addr=address
self.elftext=elftext
super(ELFSegment,self).__init__("ELF",sego=None)
@property
def address(self):
# Retrns the segment's load address
return self.addr
@property
def elf_offset(self):
return 0
@property
def segment(self):
# Returns the segment data as a string
return self.elftext
@property
def size(self):
# Returns the segment's memory size
return len(self.segment) | gpl-3.0 | -5,996,643,650,752,151,000 | 35.792373 | 85 | 0.604008 | false | 3.730984 | false | false | false |
crossgovernmentservices/csd-notes | app/assets.py | 1 | 1576 | # -*- coding: utf-8 -*-
"""
Flask assets bundles and filters
"""
import os
from flask_assets import Bundle, Environment
from lib.jinja_to_js_filter import JinjaToJs
from lib.sass_filter import LibSass
def static(*path):
return os.path.join(os.path.dirname(__file__), 'static', *path)
jinja_to_js = JinjaToJs()
libsass_output = LibSass(include_paths=[
static('sass'),
static('govuk_frontend_toolkit/stylesheets'),
static('govuk_elements/public/sass/elements')])
env = Environment()
env.register('css_govuk_elements', Bundle(
'sass/govuk_elements.scss',
filters=(libsass_output,),
output='stylesheets/govuk_elements.css',
depends=[
'/static/govuk_elements/public/sass/**/*.scss',
'/static/govuk_frontend_toolkit/stylesheets/**/*.scss']))
env.register('css_main', Bundle(
'sass/main.scss',
filters=(libsass_output,),
output='stylesheets/main.css',
depends=[
'/static/sass/main/**/*.scss',
'/static/govuk_frontend_toolkit/stylesheets/**/*.scss']))
env.register('css_notes', Bundle(
'sass/notes.scss',
filters=(libsass_output,),
output='stylesheets/notes.css',
depends=[
'/static/sass/notes/**/*.scss',
'/static/govuk_frontend_toolkit/stylesheets/**/*.scss']))
env.register('note_template', Bundle(
'notes/note.html',
filters=(jinja_to_js,),
output='javascript/templates/notes/note.js'))
env.register('js_notes', Bundle(
'js/vendor/jinja-to-js-runtime.js',
'js/notesapp.js',
filters=('rjsmin',),
output='javascript/notes.js'))
| mit | -6,841,808,532,404,907,000 | 25.266667 | 67 | 0.654188 | false | 3.190283 | false | false | false |
KE-works/pykechain | pykechain/models/user.py | 1 | 2958 | import datetime
from typing import Text
import pytz
from .base import Base
from ..enums import LanguageCodes
class User(Base):
"""A virtual object representing a KE-chain user.
:ivar username: username of the user
:type username: str
:ivar name: username of the user (compatibility)
:type name: str
:ivar id: userid of the user
:type id: int
:ivar timezone: timezone of the User (defaults to <UTC>)
:type timezone: timezone object
:ivar language: language of the User (defaults to 'en')
:type language: str
:ivar email: email of the User (defaults to '')
:type email: str
"""
def __init__(self, json, **kwargs):
"""Construct a user from provided json data."""
super(User, self).__init__(json, **kwargs)
self.username = self._json_data.get('username', '')
self.id = self._json_data.get('pk', '')
def __repr__(self): # pragma: no cover
return "<pyke {} '{}' id {}>".format(self.__class__.__name__, self.username, self.id)
@property
def default_name(self) -> Text:
"""
Get default name, prioritizing the user name over the KE-chain name.
:return: Name
:rtype str
"""
return self.username if self.username else self.name
@property
def timezone(self) -> pytz.BaseTzInfo:
"""
Timezone of the user.
Defaults to timezone UTC.
With return a pytz timezone eg. 'Europe/Amsterdam'
:return: timezone object (compatible with datetime)
:rtype: TzInfo
"""
return pytz.timezone(zone=self._json_data.get('timezone', 'UTC'))
@property
def language(self) -> Text:
"""
Language code of the user.
Defaults to English ('en") when no language code is configured.
:return: language code string
:rtype: basestring
"""
return self._json_data.get('language_code', LanguageCodes.ENGLISH)
@property
def email(self) -> Text:
"""
Email of the user.
:return: email address, default is empty string.
:rtype: basestring
"""
return self._json_data.get('email', '')
def now_in_my_timezone(self) -> datetime.datetime:
"""
Get current time in the timezone of the User.
Defaults to timezone GMT+1 (Europe/Amsterdam).
:return: Current datetime
:rtype datetime.datetime
"""
timezone_definition = self._json_data['timezone']
if timezone_definition:
timezone = pytz.timezone(timezone_definition)
else:
# if there is no timezone set then the Europe/Amsterdam timezone
timezone = pytz.timezone('Europe/Amsterdam')
# Default is utc timezone
utc_time = datetime.datetime.now(tz=pytz.utc)
# Convert to local timezone
local_time = utc_time.astimezone(timezone)
return local_time
| apache-2.0 | 4,657,301,922,102,669,000 | 27.442308 | 93 | 0.602096 | false | 4.172073 | false | false | false |
kgadek/evogil | problems/ZDT6/problem.py | 1 | 1372 | import functools
import math
dims = [(-5, 5), (-5, 5), (-5, 5)]
pareto_set = []
p_no = 150
emoa_points = [i/(p_no-1) for i in range(p_no)]
p_no2 = 300
emoa_points2 = [i/(p_no2-1) for i in range(p_no2)]
def emoa_fitness_2(f1, g, h, x):
y = g(x[1:])
return y * h(f1(x[0]), y)
def subfit1(x, f1):
return f1(x[0])
def subfit2(x, f1, g, h):
return emoa_fitness_2(f1, g, h, x)
def emoa_fitnesses(f1, g, h, dimensions, letter, known_front):
return ([functools.partial(subfit1, f1=f1),
functools.partial(subfit2, f1=f1, g=g, h=h)],
[(0, 1) for _ in range(dimensions)],
"ZDT6",
known_front)
def emoa_fitnesses2(f1, g, h, dimensions, letter, known_front):
return ([subfit1,
functools.partial(subfit2, f1=f1, g=g, h=h)],
[(0, 1)] + [(-5, 5) for _ in range(dimensions-1)],
"ZDT6",
known_front)
spi = 6 * math.pi
def f1e(x):
return 1 - math.exp(-4 * x) * (math.sin(spi * x) ** 6)
def ge(xs):
return 1 + 5.19615 * sum(xs) ** 0.25
def he(f1, g):
return 1 - (f1 / g) ** 2
emoa_e_analytical = [[f1e(x) for x in emoa_points], [1-f1e(x)*f1e(x) for x in emoa_points]]
fitnesses, dims, name, pareto_front = emoa_fitnesses(f1e, ge, he, 10, 'e', emoa_e_analytical)
pareto_front = [[x, y] for x, y in zip(pareto_front[0], pareto_front[1])]
| gpl-3.0 | -4,791,608,235,092,484,000 | 22.254237 | 93 | 0.551749 | false | 2.282862 | false | false | false |
shsingh/ansible | lib/ansible/module_utils/network/eos/config/lacp_interfaces/lacp_interfaces.py | 12 | 6884 | # -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The eos_lacp_interfaces class
It is in this file where the current configuration (as dict)
is compared to the provided configuration (as dict) and the command set
necessary to bring the current configuration to it's desired end-state is
created
"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.network.common.cfg.base import ConfigBase
from ansible.module_utils.network.common.utils import to_list, dict_diff, param_list_to_dict
from ansible.module_utils.network.eos.facts.facts import Facts
from ansible.module_utils.network.eos.utils.utils import normalize_interface
class Lacp_interfaces(ConfigBase):
"""
The eos_lacp_interfaces class
"""
gather_subset = [
'!all',
'!min',
]
gather_network_resources = [
'lacp_interfaces',
]
def get_lacp_interfaces_facts(self):
""" Get the 'facts' (the current configuration)
:rtype: A dictionary
:returns: The current configuration as a dictionary
"""
facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources)
lacp_interfaces_facts = facts['ansible_network_resources'].get('lacp_interfaces')
if not lacp_interfaces_facts:
return []
return lacp_interfaces_facts
def execute_module(self):
""" Execute the module
:rtype: A dictionary
:returns: The result from module execution
"""
result = {'changed': False}
warnings = list()
commands = list()
existing_lacp_interfaces_facts = self.get_lacp_interfaces_facts()
commands.extend(self.set_config(existing_lacp_interfaces_facts))
if commands:
if not self._module.check_mode:
self._connection.edit_config(commands)
result['changed'] = True
result['commands'] = commands
changed_lacp_interfaces_facts = self.get_lacp_interfaces_facts()
result['before'] = existing_lacp_interfaces_facts
if result['changed']:
result['after'] = changed_lacp_interfaces_facts
result['warnings'] = warnings
return result
def set_config(self, existing_lacp_interfaces_facts):
""" Collect the configuration from the args passed to the module,
collect the current configuration (as a dict from facts)
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
want = self._module.params['config']
have = existing_lacp_interfaces_facts
resp = self.set_state(want, have)
return to_list(resp)
def set_state(self, want, have):
""" Select the appropriate function based on the state provided
:param want: the desired configuration as a dictionary
:param have: the current configuration as a dictionary
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
state = self._module.params['state']
want = param_list_to_dict(want)
have = param_list_to_dict(have)
if state == 'overridden':
commands = self._state_overridden(want, have)
elif state == 'deleted':
commands = self._state_deleted(want, have)
elif state == 'merged':
commands = self._state_merged(want, have)
elif state == 'replaced':
commands = self._state_replaced(want, have)
return commands
@staticmethod
def _state_replaced(want, have):
""" The command generator when state is replaced
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
commands = []
for key, desired in want.items():
interface_name = normalize_interface(key)
if interface_name in have:
extant = have[interface_name]
else:
extant = dict()
add_config = dict_diff(extant, desired)
del_config = dict_diff(desired, extant)
commands.extend(generate_commands(key, add_config, del_config))
return commands
@staticmethod
def _state_overridden(want, have):
""" The command generator when state is overridden
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
commands = []
for key, extant in have.items():
if key in want:
desired = want[key]
else:
desired = dict()
add_config = dict_diff(extant, desired)
del_config = dict_diff(desired, extant)
commands.extend(generate_commands(key, add_config, del_config))
return commands
@staticmethod
def _state_merged(want, have):
""" The command generator when state is merged
:rtype: A list
:returns: the commands necessary to merge the provided into
the current configuration
"""
commands = []
for key, desired in want.items():
interface_name = normalize_interface(key)
if interface_name in have:
extant = have[interface_name]
else:
extant = dict()
add_config = dict_diff(extant, desired)
commands.extend(generate_commands(key, add_config, {}))
return commands
@staticmethod
def _state_deleted(want, have):
""" The command generator when state is deleted
:rtype: A list
:returns: the commands necessary to remove the current configuration
of the provided objects
"""
commands = []
for key in want:
desired = dict()
if key in have:
extant = have[key]
else:
continue
del_config = dict_diff(desired, extant)
commands.extend(generate_commands(key, {}, del_config))
return commands
def generate_commands(interface, to_set, to_remove):
commands = []
for key in to_remove.keys():
commands.append("no lacp {0}".format(key.replace("_", "-")))
for key, value in to_set.items():
if value is None:
continue
commands.append("lacp {0} {1}".format(key.replace("_", "-"), value))
if commands:
commands.insert(0, "interface {0}".format(interface))
return commands
| gpl-3.0 | -7,651,579,622,116,228,000 | 31.168224 | 107 | 0.60183 | false | 4.47013 | true | false | false |
stefanseibert/DataMining | experiment04/scripts/feedparser.py | 105 | 122933 | #!/usr/bin/env python
"""Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
Visit http://feedparser.org/ for the latest version
Visit http://feedparser.org/docs/ for the latest documentation
Required: Python 2.1 or later
Recommended: Python 2.3 or later
Recommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/>
"""
__version__ = "4.1"# + "$Revision: 1.92 $"[11:15] + "-cvs"
__license__ = """Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>"]
_debug = 0
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically run HTML markup through HTML Tidy, set
# this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html>
# or utidylib <http://utidylib.berlios.de/>.
TIDY_MARKUP = 0
# List of Python interfaces for HTML Tidy, in order of preference. Only useful
# if TIDY_MARKUP = 1
PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"]
# ---------- required modules (should come with any Python distribution) ----------
import sgmllib, re, sys, copy, urlparse, time, rfc822, types, cgi, urllib, urllib2
try:
from cStringIO import StringIO as _StringIO
except:
from StringIO import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except:
gzip = None
try:
import zlib
except:
zlib = None
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
from xml.sax.saxutils import escape as _xmlescape
_XML_AVAILABLE = 1
except:
_XML_AVAILABLE = 0
def _xmlescape(data):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
return data
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
except:
base64 = binascii = None
# cjkcodecs and iconv_codec provide support for more character encodings.
# Both are available from http://cjkpython.i18n.org/
try:
import cjkcodecs.aliases
except:
pass
try:
import iconv_codec
except:
pass
# chardet library auto-detects character encodings
# Download from http://chardet.feedparser.org/
try:
import chardet
if _debug:
import chardet.constants
chardet.constants._debug = 1
except:
chardet = None
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
sgmllib.special = re.compile('<!')
sgmllib.charref = re.compile('&#(x?[0-9A-Fa-f]+)[^0-9A-Fa-f]')
SUPPORTED_VERSIONS = {'': 'unknown',
'rss090': 'RSS 0.90',
'rss091n': 'RSS 0.91 (Netscape)',
'rss091u': 'RSS 0.91 (Userland)',
'rss092': 'RSS 0.92',
'rss093': 'RSS 0.93',
'rss094': 'RSS 0.94',
'rss20': 'RSS 2.0',
'rss10': 'RSS 1.0',
'rss': 'RSS (unknown version)',
'atom01': 'Atom 0.1',
'atom02': 'Atom 0.2',
'atom03': 'Atom 0.3',
'atom10': 'Atom 1.0',
'atom': 'Atom (unknown version)',
'cdf': 'CDF',
'hotrss': 'Hot RSS'
}
try:
UserDict = dict
except NameError:
# Python 2.1 does not have dict
from UserDict import UserDict
def dict(aList):
rc = {}
for k, v in aList:
rc[k] = v
return rc
class FeedParserDict(UserDict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['subtitle', 'summary'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
if key == 'category':
return UserDict.__getitem__(self, 'tags')[0]['term']
if key == 'categories':
return [(tag['scheme'], tag['term']) for tag in UserDict.__getitem__(self, 'tags')]
realkey = self.keymap.get(key, key)
if type(realkey) == types.ListType:
for k in realkey:
if UserDict.has_key(self, k):
return UserDict.__getitem__(self, k)
if UserDict.has_key(self, key):
return UserDict.__getitem__(self, key)
return UserDict.__getitem__(self, realkey)
def __setitem__(self, key, value):
for k in self.keymap.keys():
if key == k:
key = self.keymap[k]
if type(key) == types.ListType:
key = key[0]
return UserDict.__setitem__(self, key, value)
def get(self, key, default=None):
if self.has_key(key):
return self[key]
else:
return default
def setdefault(self, key, value):
if not self.has_key(key):
self[key] = value
return self[key]
def has_key(self, key):
try:
return hasattr(self, key) or UserDict.has_key(self, key)
except AttributeError:
return False
def __getattr__(self, key):
try:
return self.__dict__[key]
except KeyError:
pass
try:
assert not key.startswith('_')
return self.__getitem__(key)
except:
raise AttributeError, "object has no attribute '%s'" % key
def __setattr__(self, key, value):
if key.startswith('_') or key == 'data':
self.__dict__[key] = value
else:
return self.__setitem__(key, value)
def __contains__(self, key):
return self.has_key(key)
def zopeCompatibilityHack():
global FeedParserDict
del FeedParserDict
def FeedParserDict(aDict=None):
rc = {}
if aDict:
rc.update(aDict)
return rc
_ebcdic_to_ascii_map = None
def _ebcdic_to_ascii(s):
global _ebcdic_to_ascii_map
if not _ebcdic_to_ascii_map:
emap = (
0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201,
202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208,
209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215,
216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,
123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237,
125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243,
92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249,
48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255
)
import string
_ebcdic_to_ascii_map = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(_ebcdic_to_ascii_map)
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
return urlparse.urljoin(base, uri)
class _FeedParserMixin:
namespaces = {'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/':'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/XML/1998/namespace': 'xml',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf'
}
_matchnamespaces = {}
can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'license', 'icon', 'logo']
can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
html_types = ['text/html', 'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding='utf-8'):
if _debug: sys.stderr.write('initializing FeedParser\n')
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = '' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or ''
self.lang = baselang or None
if baselang:
self.feeddata['language'] = baselang
def unknown_starttag(self, tag, attrs):
if _debug: sys.stderr.write('start %s with %s\n' % (tag, attrs))
# normalize attrs
attrs = [(k.lower(), v) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
# Note: probably shouldn't simply recreate localname here, but
# our namespace handling isn't actually 100% correct in cases where
# the feed redefines the default namespace (which is actually
# the usual case for inline content, thanks Sam), so here we
# cheat and just reconstruct the element based on localname
# because that compensates for the bugs in our namespace handling.
# This will horribly munge inline content with non-empty qnames,
# but nobody actually does that, so I'm not fixing it.
tag = tag.split(':')[-1]
return self.handle_data('<%s%s>' % (tag, ''.join([' %s="%s"' % t for t in attrs])), escape=0)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
return self.push(prefix + suffix, 1)
def unknown_endtag(self, tag):
if _debug: sys.stderr.write('end %s\n' % tag)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack: return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = unichr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack: return
if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref)
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
else:
# entity resolution graciously donated by Aaron Swartz
def name2cp(k):
import htmlentitydefs
if hasattr(htmlentitydefs, 'name2codepoint'): # requires Python 2.3
return htmlentitydefs.name2codepoint[k]
k = htmlentitydefs.entitydefs[k]
if k.startswith('&#') and k.endswith(';'):
return int(k[2:-1]) # not in latin-1
return ord(k)
try: name2cp(ref)
except KeyError: text = '&%s;' % ref
else: text = unichr(name2cp(ref)).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack: return
if escape and self.contentparams.get('type') == 'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if _debug: sys.stderr.write('entering parse_declaration\n')
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1: k = len(self.rawdata)
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
return k+1
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text':
contentType = 'text/plain'
elif contentType == 'html':
contentType = 'text/html'
elif contentType == 'xhtml':
contentType = 'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version:
self.version = 'rss090'
if loweruri == 'http://purl.org/rss/1.0/' and not self.version:
self.version = 'rss10'
if loweruri == 'http://www.w3.org/2005/atom' and not self.version:
self.version = 'atom10'
if loweruri.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
uri = 'http://backend.userland.com/rss'
loweruri = uri
if self._matchnamespaces.has_key(loweruri):
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or '', uri)
def decodeEntities(self, element, data):
return data
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack: return
if self.elementstack[-1][0] != element: return
element, expectingText, pieces = self.elementstack.pop()
output = ''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText: return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = base64.decodestring(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
# resolve relative URIs within embedded markup
if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding)
# sanitize embedded markup
if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding)
if self.encoding and type(output) != type(u''):
try:
output = unicode(output, self.encoding)
except:
pass
# categories/tags/keywords/whatever are handled in _end_category
if element == 'category':
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource) and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos <> -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith('text/'):
return 0
if self.contentparams['type'].endswith('+xml'):
return 0
if self.contentparams['type'].endswith('/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value):
context = self._getContext()
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': 'rss091u',
'0.92': 'rss092',
'0.93': 'rss093',
'0.94': 'rss094'}
if not self.version:
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = 'rss20'
else:
self.version = 'rss'
def _start_dlhottitles(self, attrsD):
self.version = 'hotrss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
_start_feedinfo = _start_channel
def _cdf_common(self, attrsD):
if attrsD.has_key('lastmod'):
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if attrsD.has_key('href'):
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': 'atom01',
'0.2': 'atom02',
'0.3': 'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = 'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
self.inimage = 1
self.push('image', 0)
context = self._getContext()
context.setdefault('image', FeedParserDict())
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
self.intextinput = 1
self.push('textinput', 0)
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['textinput']['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['image']['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['image']['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
elif self.inimage:
context = self._getContext()
context['image']['href'] = value
elif self.intextinput:
context = self._getContext()
context['textinput']['link'] = value
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%s_detail' % key)
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = '%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author = context.get(key)
if not author: return
emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))''', author)
if not emailmatch: return
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, '')
author = author.replace('()', '')
author = author.strip()
if author and (author[0] == '('):
author = author[1:]
if author and (author[-1] == ')'):
author = author[:-1]
author = author.strip()
context.setdefault('%s_detail' % key, FeedParserDict())
context['%s_detail' % key]['name'] = author
context['%s_detail' % key]['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, 'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, 'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
_start_product = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value))
_end_dcterms_issued = _end_published
_end_issued = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_pubdate = _start_updated
_start_dc_date = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_pubdate = _end_updated
_end_dc_date = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value))
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')))
def _start_cc_license(self, attrsD):
self.push('license', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('license')
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
def _end_creativecommons_license(self):
self.pop('license')
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label): return
value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
if value not in tags:
tags.append(FeedParserDict({'term': term, 'scheme': scheme, 'label': label}))
def _start_category(self, attrsD):
if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD))
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split():
self._addTag(term, 'http://www.itunes.com/', None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value: return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', 'alternate')
attrsD.setdefault('type', 'text/html')
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
expectingText = self.infeed or self.inentry or self.insource
context = self._getContext()
context.setdefault('links', [])
context['links'].append(FeedParserDict(attrsD))
if attrsD['rel'] == 'enclosure':
self._start_enclosure(attrsD)
if attrsD.has_key('href'):
expectingText = 0
if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
_start_producturl = _start_link
def _end_link(self):
value = self.pop('link')
context = self._getContext()
if self.intextinput:
context['textinput']['link'] = value
if self.inimage:
context['image']['link'] = value
_end_producturl = _end_link
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and not self._getContext().has_key('link'))
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
def _start_title(self, attrsD):
self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
value = self.popContent('title')
context = self._getContext()
if self.intextinput:
context['textinput']['title'] = value
elif self.inimage:
context['image']['title'] = value
_end_dc_title = _end_title
_end_media_title = _end_title
def _start_description(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource)
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
context = self._getContext()
if self.intextinput:
context['textinput']['description'] = value
elif self.inimage:
context['image']['description'] = value
self._summaryKey = None
_end_abstract = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, 'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if context.has_key('generator_detail'):
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, 'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
self._getContext().setdefault('enclosures', []).append(FeedParserDict(attrsD))
href = attrsD.get('href')
if href:
context = self._getContext()
if not context.get('id'):
context['id'] = href
def _start_source(self, attrsD):
self.insource = 1
def _end_source(self):
self.insource = 0
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, 'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_prodlink(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, 'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToDescription = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types)
value = self.popContent('content')
if copyToDescription:
self._save('description', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
_end_prodlink = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
self._getContext()['itunes_explicit'] = (value == 'yes') and 1 or 0
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
if _debug: sys.stderr.write('trying StrictFeedParser\n')
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
def startPrefixMapping(self, prefix, uri):
self.trackNamespace(prefix, uri)
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
namespace = 'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix):
raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
if prefix:
localname = prefix + ':' + localname
localname = str(localname).lower()
if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname))
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD = {}
for (namespace, attrlocalname), attrvalue in attrs._attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
self.unknown_starttag(localname, attrsD.items())
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr',
'img', 'input', 'isindex', 'link', 'meta', 'param']
def __init__(self, encoding):
self.encoding = encoding
if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding)
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
#data = re.sub(r'<(\S+?)\s*?/>', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace
data = re.sub(r'<([^<\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
if self.encoding and type(data) == type(u''):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
def normalize_attrs(self, attrs):
# utility method to be called by descendants
attrs = [(k.lower(), v) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag)
uattrs = []
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
for key, value in attrs:
if type(value) != type(u''):
value = unicode(value, self.encoding)
uattrs.append((unicode(key, self.encoding), value))
strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs]).encode(self.encoding)
if tag in self.elements_no_end_tag:
self.pieces.append('<%(tag)s%(strattrs)s />' % locals())
else:
self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%(tag)s>" % locals())
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
self.pieces.append('&#%(ref)s;' % locals())
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
self.pieces.append('&%(ref)s;' % locals())
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text)
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%(text)s-->' % locals())
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%(text)s>' % locals())
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%(text)s>' % locals())
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
return data
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = [('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src')]
def __init__(self, baseuri, encoding):
_BaseHTMLProcessor.__init__(self, encoding)
self.baseuri = baseuri
def resolveURI(self, uri):
return _urljoin(self.baseuri, uri)
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding):
if _debug: sys.stderr.write('entering _resolveRelativeURIs\n')
p = _RelativeURIResolver(baseURI, encoding)
p.feed(htmlSource)
return p.output()
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big',
'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col',
'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset',
'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input',
'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup',
'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike',
'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th',
'thead', 'tr', 'tt', 'u', 'ul', 'var']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'axis', 'border', 'cellpadding', 'cellspacing',
'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols',
'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled',
'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace',
'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method',
'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly',
'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size',
'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type',
'usemap', 'valign', 'value', 'vspace', 'width']
unacceptable_elements_with_end_tag = ['script', 'applet']
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
def unknown_starttag(self, tag, attrs):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
return
attrs = self.normalize_attrs(attrs)
attrs = [(key, value) for key, value in attrs if key in self.acceptable_attributes]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def _sanitizeHTML(htmlSource, encoding):
p = _HTMLSanitizer(encoding)
p.feed(htmlSource)
data = p.output()
if TIDY_MARKUP:
# loop through list of preferred Tidy interfaces looking for one that's installed,
# then set up a common _tidy function to wrap the interface-specific API.
_tidy = None
for tidy_interface in PREFERRED_TIDY_INTERFACES:
try:
if tidy_interface == "uTidy":
from tidy import parseString as _utidy
def _tidy(data, **kwargs):
return str(_utidy(data, **kwargs))
break
elif tidy_interface == "mxTidy":
from mx.Tidy import Tidy as _mxtidy
def _tidy(data, **kwargs):
nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs)
return data
break
except:
pass
if _tidy:
utf8 = type(data) == type(u'')
if utf8:
data = data.encode('utf-8')
data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8")
if utf8:
data = unicode(data, 'utf-8')
if data.count('<body'):
data = data.split('<body', 1)[1]
if data.count('>'):
data = data.split('>', 1)[1]
if data.count('</body'):
data = data.split('</body', 1)[0]
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
if ((code / 100) == 3) and (code != 304):
return self.http_error_302(req, fp, code, msg, headers)
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
infourl.status = code
return infourl
def http_error_302(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
def http_error_301(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
http_error_300 = http_error_302
http_error_303 = http_error_302
http_error_307 = http_error_302
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# - we're using Python 2.3.3 or later (digest auth is irreparably broken in earlier versions)
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urlparse.urlparse(req.get_full_url())[1]
try:
assert sys.version.split()[0] >= '2.3.3'
assert base64 != None
user, passw = base64.decodestring(req.headers['Authorization'].split(' ')[1]).split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
except:
return self.http_error_default(req, fp, code, msg, headers)
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it must be a tuple of 9 integers
as returned by gmtime() in the standard Python time module. This MUST
be in GMT (Greenwich Mean Time). The formatted date/time will be used
as the value of an If-Modified-Since request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if url_file_stream_or_string == '-':
return sys.stdin
if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp'):
if not agent:
agent = USER_AGENT
# test for inline user:password for basic auth
auth = None
if base64:
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
user_passwd, realhost = urllib.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.encodestring(user_passwd).strip()
# try to open with urllib2 (to use optional headers)
request = urllib2.Request(url_file_stream_or_string)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
request.add_header('A-IM', 'feed') # RFC 3229 support
opener = apply(urllib2.build_opener, tuple([_FeedURLHandler()] + handlers))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string)
except:
pass
# treat url_file_stream_or_string as string
return _StringIO(str(url_file_stream_or_string))
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
del tmpl
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
del regex
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m: break
if not m: return
if m.span() == (0, 0): return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params.keys():
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(params.get('second', 0))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
# daylight savings is complex, but not needed for feedparser's purposes
# as time zones, if specified, include mention of whether it is active
# (e.g. PST vs. PDT, CET). Using -1 is implementation-dependent and
# and most implementations have DST bugs
daylight_savings_flag = 0
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tm))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = u'\ub144' # b3e2 in euc-kr
_korean_month = u'\uc6d4' # bff9 in euc-kr
_korean_day = u'\uc77c' # c0cf in euc-kr
_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m: return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
_mssql_date_re = \
re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?')
def _parse_date_mssql(dateString):
'''Parse a string according to the MS SQL date format'''
m = _mssql_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_mssql)
# Unicode strings for Greek date strings
_greek_months = \
{ \
u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m: return
try:
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
except:
return
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date)
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
u'janu\u00e1r': u'01', # e1 in iso-8859-2
u'febru\u00e1ri': u'02', # e1 in iso-8859-2
u'm\u00e1rcius': u'03', # e1 in iso-8859-2
u'\u00e1prilis': u'04', # e1 in iso-8859-2
u'm\u00e1ujus': u'05', # e1 in iso-8859-2
u'j\u00fanius': u'06', # fa in iso-8859-2
u'j\u00falius': u'07', # fa in iso-8859-2
u'augusztus': u'08',
u'szeptember': u'09',
u'okt\u00f3ber': u'10', # f3 in iso-8859-2
u'november': u'11',
u'december': u'12',
}
_hungarian_date_format_re = \
re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m: return
try:
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
except:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
# Drake and licensed under the Python license. Removed all range checking
# for month, day, hour, minute, and second, since mktime will normalize
# these later
def _parse_date_w3dtf(dateString):
def __extract_date(m):
year = int(m.group('year'))
if year < 100:
year = 100 * int(time.gmtime()[0] / 100) + int(year)
if year < 1000:
return 0, 0, 0
julian = m.group('julian')
if julian:
julian = int(julian)
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
month = m.group('month')
day = 1
if month is None:
month = 1
else:
month = int(month)
day = m.group('day')
if day:
day = int(day)
else:
day = 1
return year, month, day
def __extract_time(m):
if not m:
return 0, 0, 0
hours = m.group('hours')
if not hours:
return 0, 0, 0
hours = int(hours)
minutes = int(m.group('minutes'))
seconds = m.group('seconds')
if seconds:
seconds = int(seconds)
else:
seconds = 0
return hours, minutes, seconds
def __extract_tzd(m):
'''Return the Time Zone Designator as an offset in seconds from UTC.'''
if not m:
return 0
tzd = m.group('tzd')
if not tzd:
return 0
if tzd == 'Z':
return 0
hours = int(m.group('tzdhours'))
minutes = m.group('tzdminutes')
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == '+':
return -offset
return offset
__date_re = ('(?P<year>\d\d\d\d)'
'(?:(?P<dsep>-|)'
'(?:(?P<julian>\d\d\d)'
'|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?')
__tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)'
__tzd_rx = re.compile(__tzd_re)
__time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
'(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?'
+ __tzd_re)
__datetime_re = '%s(?:T%s)?' % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
m = __datetime_rx.match(dateString)
if (m is None) or (m.group() != dateString): return
gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
if gmt[0] == 0: return
return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
registerDateHandler(_parse_date_w3dtf)
def _parse_date_rfc822(dateString):
'''Parse an RFC822, RFC1123, RFC2822, or asctime-style date'''
data = dateString.split()
if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames:
del data[0]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('')
dateString = " ".join(data)
if len(data) < 5:
dateString += ' 00:00:00 GMT'
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
# rfc822.py defines several time zones, but we define some extra ones.
# 'ET' is equivalent to 'EST', etc.
_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800}
rfc822._timezones.update(_additional_timezones)
registerDateHandler(_parse_date_rfc822)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
if not date9tuple: continue
if len(date9tuple) != 9:
if _debug: sys.stderr.write('date handler function must return 9-tuple\n')
raise ValueError
map(int, date9tuple)
return date9tuple
except Exception, e:
if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e)))
pass
return None
def _getCharacterEncoding(http_headers, xml_data):
'''Get the character encoding of the XML document
http_headers is a dictionary
xml_data is a raw string (not Unicode)
This is so much trickier than it sounds, it's not even funny.
According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
is application/xml, application/*+xml,
application/xml-external-parsed-entity, or application/xml-dtd,
the encoding given in the charset parameter of the HTTP Content-Type
takes precedence over the encoding given in the XML prefix within the
document, and defaults to 'utf-8' if neither are specified. But, if
the HTTP Content-Type is text/xml, text/*+xml, or
text/xml-external-parsed-entity, the encoding given in the XML prefix
within the document is ALWAYS IGNORED and only the encoding given in
the charset parameter of the HTTP Content-Type header should be
respected, and it defaults to 'us-ascii' if not specified.
Furthermore, discussion on the atom-syntax mailing list with the
author of RFC 3023 leads me to the conclusion that any document
served with a Content-Type of text/* and no charset parameter
must be treated as us-ascii. (We now do this.) And also that it
must always be flagged as non-well-formed. (We now do this too.)
If Content-Type is unspecified (input was local file or non-HTTP source)
or unrecognized (server just got it totally wrong), then go by the
encoding given in the XML prefix of the document and default to
'iso-8859-1' as per the HTTP specification (RFC 2616).
Then, assuming we didn't find a character encoding in the HTTP headers
(and the HTTP Content-type allowed us to look in the body), we need
to sniff the first few bytes of the XML data and try to determine
whether the encoding is ASCII-compatible. Section F of the XML
specification shows the way here:
http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
If the sniffed encoding is not ASCII-compatible, we need to make it
ASCII compatible so that we can sniff further into the XML declaration
to find the encoding attribute, which will tell us the true encoding.
Of course, none of this guarantees that we will be able to parse the
feed in the declared character encoding (assuming it was declared
correctly, which many are not). CJKCodecs and iconv_codec help a lot;
you should definitely install them if you can.
http://cjkpython.i18n.org/
'''
def _parseHTTPContentType(content_type):
'''takes HTTP Content-Type header and returns (content type, charset)
If no charset is specified, returns (content type, '')
If no content type is specified, returns ('', '')
Both return parameters are guaranteed to be lowercase strings
'''
content_type = content_type or ''
content_type, params = cgi.parse_header(content_type)
return content_type, params.get('charset', '').replace("'", '')
sniffed_xml_encoding = ''
xml_encoding = ''
true_encoding = ''
http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type'))
# Must sniff for non-ASCII-compatible character encodings before
# searching for XML declaration. This heuristic is defined in
# section F of the XML specification:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = _ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and (xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
# ASCII-compatible
pass
xml_encoding_match = re.compile('^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
except:
xml_encoding_match = None
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].lower()
if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
acceptable_content_type = 0
application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity')
text_content_types = ('text/xml', 'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith('application/') and http_content_type.endswith('+xml')):
acceptable_content_type = 1
true_encoding = http_encoding or xml_encoding or 'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith('text/')) and http_content_type.endswith('+xml'):
acceptable_content_type = 1
true_encoding = http_encoding or 'us-ascii'
elif http_content_type.startswith('text/'):
true_encoding = http_encoding or 'us-ascii'
elif http_headers and (not http_headers.has_key('content-type')):
true_encoding = xml_encoding or 'iso-8859-1'
else:
true_encoding = xml_encoding or 'utf-8'
return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type
def _toUTF8(data, encoding):
'''Changes an XML data stream on the fly to specify a new encoding
data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already
encoding is a string recognized by encodings.aliases
'''
if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding)
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16be':
sys.stderr.write('trying utf-16be instead\n')
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16le':
sys.stderr.write('trying utf-16le instead\n')
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-8':
sys.stderr.write('trying utf-8 instead\n')
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32be':
sys.stderr.write('trying utf-32be instead\n')
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32le':
sys.stderr.write('trying utf-32le instead\n')
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding)
declmatch = re.compile('^<\?xml[^>]*?>')
newdecl = '''<?xml version='1.0' encoding='utf-8'?>'''
if declmatch.search(newdata):
newdata = declmatch.sub(newdecl, newdata)
else:
newdata = newdecl + u'\n' + newdata
return newdata.encode('utf-8')
def _stripDoctype(data):
'''Strips DOCTYPE from XML document, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document, minus the DOCTYPE
'''
entity_pattern = re.compile(r'<!ENTITY([^>]*?)>', re.MULTILINE)
data = entity_pattern.sub('', data)
doctype_pattern = re.compile(r'<!DOCTYPE([^>]*?)>', re.MULTILINE)
doctype_results = doctype_pattern.findall(data)
doctype = doctype_results and doctype_results[0] or ''
if doctype.lower().count('netscape'):
version = 'rss091n'
else:
version = None
data = doctype_pattern.sub('', data)
return version, data
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[]):
'''Parse a feed from a URL, file, stream, or string'''
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
if _XML_AVAILABLE:
result['bozo'] = 0
if type(handlers) == types.InstanceType:
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers)
data = f.read()
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
f = None
# if feed is gzip-compressed, decompress it
if f and data and hasattr(f, 'headers'):
if gzip and f.headers.get('content-encoding', '') == 'gzip':
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except Exception, e:
# Some feeds claim to be gzipped but they're not, so
# we get garbage. Ideally, we should re-request the
# feed without the 'Accept-encoding: gzip' header,
# but we don't.
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
elif zlib and f.headers.get('content-encoding', '') == 'deflate':
try:
data = zlib.decompress(data, -zlib.MAX_WBITS)
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
# save HTTP headers
if hasattr(f, 'info'):
info = f.info()
result['etag'] = info.getheader('ETag')
last_modified = info.getheader('Last-Modified')
if last_modified:
result['modified'] = _parse_date(last_modified)
if hasattr(f, 'url'):
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'headers'):
result['headers'] = f.headers.dict
if hasattr(f, 'close'):
f.close()
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
http_headers = result.get('headers', {})
result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \
_getCharacterEncoding(http_headers, data)
if http_headers and (not acceptable_content_type):
if http_headers.has_key('content-type'):
bozo_message = '%s is not an XML media type' % http_headers['content-type']
else:
bozo_message = 'no Content-type specified'
result['bozo'] = 1
result['bozo_exception'] = NonXMLContentType(bozo_message)
result['version'], data = _stripDoctype(data)
baseuri = http_headers.get('content-location', result.get('href'))
baselang = http_headers.get('content-language', None)
# if server sent 304, we're done
if result.get('status', 0) == 304:
result['version'] = ''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
# if there was a problem downloading, we're done
if not data:
return result
# determine character encoding
use_strict_parser = 0
known_encoding = 0
tried_encodings = []
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding):
if not proposed_encoding: continue
if proposed_encoding in tried_encodings: continue
tried_encodings.append(proposed_encoding)
try:
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
break
except:
pass
# if no luck and we have auto-detection library, try that
if (not known_encoding) and chardet:
try:
proposed_encoding = chardet.detect(data)['encoding']
if proposed_encoding and (proposed_encoding not in tried_encodings):
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried utf-8 yet, try that
if (not known_encoding) and ('utf-8' not in tried_encodings):
try:
proposed_encoding = 'utf-8'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried windows-1252 yet, try that
if (not known_encoding) and ('windows-1252' not in tried_encodings):
try:
proposed_encoding = 'windows-1252'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck, give up
if not known_encoding:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingUnknown( \
'document encoding unknown, I tried ' + \
'%s, %s, utf-8, and windows-1252 but nothing worked' % \
(result['encoding'], xml_encoding))
result['encoding'] = ''
elif proposed_encoding != result['encoding']:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingOverride( \
'documented declared as %s, but parsed as %s' % \
(result['encoding'], proposed_encoding))
result['encoding'] = proposed_encoding
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
if hasattr(saxparser, '_ns_stack'):
# work around bug in built-in SAX parser (doesn't recognize xml: namespace)
# PyXML doesn't have this problem, and it doesn't have _ns_stack either
saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'})
try:
saxparser.parse(source)
except Exception, e:
if _debug:
import traceback
traceback.print_stack()
traceback.print_exc()
sys.stderr.write('xml parsing failed\n')
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser:
feedparser = _LooseFeedParser(baseuri, baselang, known_encoding and 'utf-8' or '')
feedparser.feed(data)
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
if __name__ == '__main__':
if not sys.argv[1:]:
print __doc__
sys.exit(0)
else:
urls = sys.argv[1:]
zopeCompatibilityHack()
from pprint import pprint
for url in urls:
print url
print
result = parse(url)
pprint(result)
print
#REVISION HISTORY
#1.0 - 9/27/2002 - MAP - fixed namespace processing on prefixed RSS 2.0 elements,
# added Simon Fell's test suite
#1.1 - 9/29/2002 - MAP - fixed infinite loop on incomplete CDATA sections
#2.0 - 10/19/2002
# JD - use inchannel to watch out for image and textinput elements which can
# also contain title, link, and description elements
# JD - check for isPermaLink='false' attribute on guid elements
# JD - replaced openAnything with open_resource supporting ETag and
# If-Modified-Since request headers
# JD - parse now accepts etag, modified, agent, and referrer optional
# arguments
# JD - modified parse to return a dictionary instead of a tuple so that any
# etag or modified information can be returned and cached by the caller
#2.0.1 - 10/21/2002 - MAP - changed parse() so that if we don't get anything
# because of etag/modified, return the old etag/modified to the caller to
# indicate why nothing is being returned
#2.0.2 - 10/21/2002 - JB - added the inchannel to the if statement, otherwise its
# useless. Fixes the problem JD was addressing by adding it.
#2.1 - 11/14/2002 - MAP - added gzip support
#2.2 - 1/27/2003 - MAP - added attribute support, admin:generatorAgent.
# start_admingeneratoragent is an example of how to handle elements with
# only attributes, no content.
#2.3 - 6/11/2003 - MAP - added USER_AGENT for default (if caller doesn't specify);
# also, make sure we send the User-Agent even if urllib2 isn't available.
# Match any variation of backend.userland.com/rss namespace.
#2.3.1 - 6/12/2003 - MAP - if item has both link and guid, return both as-is.
#2.4 - 7/9/2003 - MAP - added preliminary Pie/Atom/Echo support based on Sam Ruby's
# snapshot of July 1 <http://www.intertwingly.net/blog/1506.html>; changed
# project name
#2.5 - 7/25/2003 - MAP - changed to Python license (all contributors agree);
# removed unnecessary urllib code -- urllib2 should always be available anyway;
# return actual url, status, and full HTTP headers (as result['url'],
# result['status'], and result['headers']) if parsing a remote feed over HTTP --
# this should pass all the HTTP tests at <http://diveintomark.org/tests/client/http/>;
# added the latest namespace-of-the-week for RSS 2.0
#2.5.1 - 7/26/2003 - RMK - clear opener.addheaders so we only send our custom
# User-Agent (otherwise urllib2 sends two, which confuses some servers)
#2.5.2 - 7/28/2003 - MAP - entity-decode inline xml properly; added support for
# inline <xhtml:body> and <xhtml:div> as used in some RSS 2.0 feeds
#2.5.3 - 8/6/2003 - TvdV - patch to track whether we're inside an image or
# textInput, and also to return the character encoding (if specified)
#2.6 - 1/1/2004 - MAP - dc:author support (MarekK); fixed bug tracking
# nested divs within content (JohnD); fixed missing sys import (JohanS);
# fixed regular expression to capture XML character encoding (Andrei);
# added support for Atom 0.3-style links; fixed bug with textInput tracking;
# added support for cloud (MartijnP); added support for multiple
# category/dc:subject (MartijnP); normalize content model: 'description' gets
# description (which can come from description, summary, or full content if no
# description), 'content' gets dict of base/language/type/value (which can come
# from content:encoded, xhtml:body, content, or fullitem);
# fixed bug matching arbitrary Userland namespaces; added xml:base and xml:lang
# tracking; fixed bug tracking unknown tags; fixed bug tracking content when
# <content> element is not in default namespace (like Pocketsoap feed);
# resolve relative URLs in link, guid, docs, url, comments, wfw:comment,
# wfw:commentRSS; resolve relative URLs within embedded HTML markup in
# description, xhtml:body, content, content:encoded, title, subtitle,
# summary, info, tagline, and copyright; added support for pingback and
# trackback namespaces
#2.7 - 1/5/2004 - MAP - really added support for trackback and pingback
# namespaces, as opposed to 2.6 when I said I did but didn't really;
# sanitize HTML markup within some elements; added mxTidy support (if
# installed) to tidy HTML markup within some elements; fixed indentation
# bug in _parse_date (FazalM); use socket.setdefaulttimeout if available
# (FazalM); universal date parsing and normalization (FazalM): 'created', modified',
# 'issued' are parsed into 9-tuple date format and stored in 'created_parsed',
# 'modified_parsed', and 'issued_parsed'; 'date' is duplicated in 'modified'
# and vice-versa; 'date_parsed' is duplicated in 'modified_parsed' and vice-versa
#2.7.1 - 1/9/2004 - MAP - fixed bug handling " and '. fixed memory
# leak not closing url opener (JohnD); added dc:publisher support (MarekK);
# added admin:errorReportsTo support (MarekK); Python 2.1 dict support (MarekK)
#2.7.4 - 1/14/2004 - MAP - added workaround for improperly formed <br/> tags in
# encoded HTML (skadz); fixed unicode handling in normalize_attrs (ChrisL);
# fixed relative URI processing for guid (skadz); added ICBM support; added
# base64 support
#2.7.5 - 1/15/2004 - MAP - added workaround for malformed DOCTYPE (seen on many
# blogspot.com sites); added _debug variable
#2.7.6 - 1/16/2004 - MAP - fixed bug with StringIO importing
#3.0b3 - 1/23/2004 - MAP - parse entire feed with real XML parser (if available);
# added several new supported namespaces; fixed bug tracking naked markup in
# description; added support for enclosure; added support for source; re-added
# support for cloud which got dropped somehow; added support for expirationDate
#3.0b4 - 1/26/2004 - MAP - fixed xml:lang inheritance; fixed multiple bugs tracking
# xml:base URI, one for documents that don't define one explicitly and one for
# documents that define an outer and an inner xml:base that goes out of scope
# before the end of the document
#3.0b5 - 1/26/2004 - MAP - fixed bug parsing multiple links at feed level
#3.0b6 - 1/27/2004 - MAP - added feed type and version detection, result['version']
# will be one of SUPPORTED_VERSIONS.keys() or empty string if unrecognized;
# added support for creativeCommons:license and cc:license; added support for
# full Atom content model in title, tagline, info, copyright, summary; fixed bug
# with gzip encoding (not always telling server we support it when we do)
#3.0b7 - 1/28/2004 - MAP - support Atom-style author element in author_detail
# (dictionary of 'name', 'url', 'email'); map author to author_detail if author
# contains name + email address
#3.0b8 - 1/28/2004 - MAP - added support for contributor
#3.0b9 - 1/29/2004 - MAP - fixed check for presence of dict function; added
# support for summary
#3.0b10 - 1/31/2004 - MAP - incorporated ISO-8601 date parsing routines from
# xml.util.iso8601
#3.0b11 - 2/2/2004 - MAP - added 'rights' to list of elements that can contain
# dangerous markup; fiddled with decodeEntities (not right); liberalized
# date parsing even further
#3.0b12 - 2/6/2004 - MAP - fiddled with decodeEntities (still not right);
# added support to Atom 0.2 subtitle; added support for Atom content model
# in copyright; better sanitizing of dangerous HTML elements with end tags
# (script, frameset)
#3.0b13 - 2/8/2004 - MAP - better handling of empty HTML tags (br, hr, img,
# etc.) in embedded markup, in either HTML or XHTML form (<br>, <br/>, <br />)
#3.0b14 - 2/8/2004 - MAP - fixed CDATA handling in non-wellformed feeds under
# Python 2.1
#3.0b15 - 2/11/2004 - MAP - fixed bug resolving relative links in wfw:commentRSS;
# fixed bug capturing author and contributor URL; fixed bug resolving relative
# links in author and contributor URL; fixed bug resolvin relative links in
# generator URL; added support for recognizing RSS 1.0; passed Simon Fell's
# namespace tests, and included them permanently in the test suite with his
# permission; fixed namespace handling under Python 2.1
#3.0b16 - 2/12/2004 - MAP - fixed support for RSS 0.90 (broken in b15)
#3.0b17 - 2/13/2004 - MAP - determine character encoding as per RFC 3023
#3.0b18 - 2/17/2004 - MAP - always map description to summary_detail (Andrei);
# use libxml2 (if available)
#3.0b19 - 3/15/2004 - MAP - fixed bug exploding author information when author
# name was in parentheses; removed ultra-problematic mxTidy support; patch to
# workaround crash in PyXML/expat when encountering invalid entities
# (MarkMoraes); support for textinput/textInput
#3.0b20 - 4/7/2004 - MAP - added CDF support
#3.0b21 - 4/14/2004 - MAP - added Hot RSS support
#3.0b22 - 4/19/2004 - MAP - changed 'channel' to 'feed', 'item' to 'entries' in
# results dict; changed results dict to allow getting values with results.key
# as well as results[key]; work around embedded illformed HTML with half
# a DOCTYPE; work around malformed Content-Type header; if character encoding
# is wrong, try several common ones before falling back to regexes (if this
# works, bozo_exception is set to CharacterEncodingOverride); fixed character
# encoding issues in BaseHTMLProcessor by tracking encoding and converting
# from Unicode to raw strings before feeding data to sgmllib.SGMLParser;
# convert each value in results to Unicode (if possible), even if using
# regex-based parsing
#3.0b23 - 4/21/2004 - MAP - fixed UnicodeDecodeError for feeds that contain
# high-bit characters in attributes in embedded HTML in description (thanks
# Thijs van de Vossen); moved guid, date, and date_parsed to mapped keys in
# FeedParserDict; tweaked FeedParserDict.has_key to return True if asking
# about a mapped key
#3.0fc1 - 4/23/2004 - MAP - made results.entries[0].links[0] and
# results.entries[0].enclosures[0] into FeedParserDict; fixed typo that could
# cause the same encoding to be tried twice (even if it failed the first time);
# fixed DOCTYPE stripping when DOCTYPE contained entity declarations;
# better textinput and image tracking in illformed RSS 1.0 feeds
#3.0fc2 - 5/10/2004 - MAP - added and passed Sam's amp tests; added and passed
# my blink tag tests
#3.0fc3 - 6/18/2004 - MAP - fixed bug in _changeEncodingDeclaration that
# failed to parse utf-16 encoded feeds; made source into a FeedParserDict;
# duplicate admin:generatorAgent/@rdf:resource in generator_detail.url;
# added support for image; refactored parse() fallback logic to try other
# encodings if SAX parsing fails (previously it would only try other encodings
# if re-encoding failed); remove unichr madness in normalize_attrs now that
# we're properly tracking encoding in and out of BaseHTMLProcessor; set
# feed.language from root-level xml:lang; set entry.id from rdf:about;
# send Accept header
#3.0 - 6/21/2004 - MAP - don't try iso-8859-1 (can't distinguish between
# iso-8859-1 and windows-1252 anyway, and most incorrectly marked feeds are
# windows-1252); fixed regression that could cause the same encoding to be
# tried twice (even if it failed the first time)
#3.0.1 - 6/22/2004 - MAP - default to us-ascii for all text/* content types;
# recover from malformed content-type header parameter with no equals sign
# ('text/xml; charset:iso-8859-1')
#3.1 - 6/28/2004 - MAP - added and passed tests for converting HTML entities
# to Unicode equivalents in illformed feeds (aaronsw); added and
# passed tests for converting character entities to Unicode equivalents
# in illformed feeds (aaronsw); test for valid parsers when setting
# XML_AVAILABLE; make version and encoding available when server returns
# a 304; add handlers parameter to pass arbitrary urllib2 handlers (like
# digest auth or proxy support); add code to parse username/password
# out of url and send as basic authentication; expose downloading-related
# exceptions in bozo_exception (aaronsw); added __contains__ method to
# FeedParserDict (aaronsw); added publisher_detail (aaronsw)
#3.2 - 7/3/2004 - MAP - use cjkcodecs and iconv_codec if available; always
# convert feed to UTF-8 before passing to XML parser; completely revamped
# logic for determining character encoding and attempting XML parsing
# (much faster); increased default timeout to 20 seconds; test for presence
# of Location header on redirects; added tests for many alternate character
# encodings; support various EBCDIC encodings; support UTF-16BE and
# UTF16-LE with or without a BOM; support UTF-8 with a BOM; support
# UTF-32BE and UTF-32LE with or without a BOM; fixed crashing bug if no
# XML parsers are available; added support for 'Content-encoding: deflate';
# send blank 'Accept-encoding: ' header if neither gzip nor zlib modules
# are available
#3.3 - 7/15/2004 - MAP - optimize EBCDIC to ASCII conversion; fix obscure
# problem tracking xml:base and xml:lang if element declares it, child
# doesn't, first grandchild redeclares it, and second grandchild doesn't;
# refactored date parsing; defined public registerDateHandler so callers
# can add support for additional date formats at runtime; added support
# for OnBlog, Nate, MSSQL, Greek, and Hungarian dates (ytrewq1); added
# zopeCompatibilityHack() which turns FeedParserDict into a regular
# dictionary, required for Zope compatibility, and also makes command-
# line debugging easier because pprint module formats real dictionaries
# better than dictionary-like objects; added NonXMLContentType exception,
# which is stored in bozo_exception when a feed is served with a non-XML
# media type such as 'text/plain'; respect Content-Language as default
# language if not xml:lang is present; cloud dict is now FeedParserDict;
# generator dict is now FeedParserDict; better tracking of xml:lang,
# including support for xml:lang='' to unset the current language;
# recognize RSS 1.0 feeds even when RSS 1.0 namespace is not the default
# namespace; don't overwrite final status on redirects (scenarios:
# redirecting to a URL that returns 304, redirecting to a URL that
# redirects to another URL with a different type of redirect); add
# support for HTTP 303 redirects
#4.0 - MAP - support for relative URIs in xml:base attribute; fixed
# encoding issue with mxTidy (phopkins); preliminary support for RFC 3229;
# support for Atom 1.0; support for iTunes extensions; new 'tags' for
# categories/keywords/etc. as array of dict
# {'term': term, 'scheme': scheme, 'label': label} to match Atom 1.0
# terminology; parse RFC 822-style dates with no time; lots of other
# bug fixes
#4.1 - MAP - removed socket timeout; added support for chardet library
| mit | 4,730,405,504,525,414,000 | 42.013646 | 214 | 0.587474 | false | 3.610685 | false | false | false |
tacocats/2DEngine | tools/tile_editor/tileEditor.py | 1 | 4852 | from tkinter import *
from menuOptions import *
from PIL import Image, ImageFilter, ImageTk
from tileset_module import tilesetManipulation
from scene_module import levelManipulation, tile, level
# Globals############################################
# Wether or not the user is drawing on the screen
isDrawing = False
tileset = None
scene = None
levelManip = levelManipulation()
tilesetManip = tilesetManipulation()
######################################################
def keypress(key):
print (key)
def mousePress(event):
#print("x - " + str(event.x) + " y - " + str(event.y))
#print ("fixed - " + str(levelManip.determinePos(event.x, event.y)))
#event.widget.create_rectangle(20, 20, 50, 50, fill="blue")
#levelManip.displayLevel(event.widget)
global isDrawing
isDrawing = True
canvas = event.widget
if (canvas == scene):
x = canvas.canvasx(event.x)
y = canvas.canvasy(event.y)
tileX, tileY, tileImage = tilesetManip.getSelectedTile()
print (tileX)
print (tileY)
levelManip.drawTile(event.widget, x, y, tileImage, tileX, tileY)
elif (canvas == tileset):
print("tileset")
x = canvas.canvasx(event.x)
y = canvas.canvasy(event.y)
tilesetManip.setSelectedTile(x, y)
def release(event):
global isDrawing
isDrawing = False
def motion(event):
if (event.widget == scene):
if (isDrawing == True):
canvas = event.widget
x = canvas.canvasx(event.x)
y = canvas.canvasy(event.y)
tileX, tileY, tileImage = tilesetManip.getSelectedTile()
levelManip.drawTile(event.widget, x, y, tileImage, tileX, tileY)
# Create the window
def createWindow():
root = Tk()
# ToolBox Container
toolbox = Frame(root)
toolbox.pack(side=BOTTOM)
greenbutton = Button(toolbox, text="Tool", fg="brown")
greenbutton.pack( side = LEFT )
# Paned windows
pScene = PanedWindow(root, orient=HORIZONTAL)
pScene.pack(fill=BOTH, expand=True)
# Scene container
sceneContainer = Frame(pScene)
pScene.add(sceneContainer)
# Tileset Container
tilesetContainer = Frame(pScene)
pScene.add(tilesetContainer)
# The menubar
menubar = Menu(root)
fileMenu = Menu(menubar, tearoff=0)
fileMenu.add_command(label="New", command=newMenu)
fileMenu.add_command(label="Open", command=openMenu)
fileMenu.add_command(label="Save", command=lambda:saveMenu("testzzz", levelManip.levelz))
fileMenu.add_command(label="Save As", command=saveAsMenu)
menubar.add_cascade(label="File", menu=fileMenu)
editMenu = Menu(menubar, tearoff=0)
editMenu.add_command(label="Clear", command=clearMenu)
menubar.add_cascade(label="Edit", menu=editMenu)
tilesetMenu = Menu(menubar, tearoff=0)
tilesetMenu.add_command(label="Select tileset", command=selectTileset)
menubar.add_cascade(label="Tileset", menu=tilesetMenu)
# Add a scrollbar to canvas and tileset
sScrollbarV = Scrollbar(sceneContainer, orient=VERTICAL)
sScrollbarV.pack(side=LEFT, fill=Y)
sScrollbarH = Scrollbar(sceneContainer, orient=HORIZONTAL)
sScrollbarH.pack(side=BOTTOM, fill=X)
tScrollbarV = Scrollbar(tilesetContainer, orient=VERTICAL)
tScrollbarV.pack(side=LEFT, fill=Y)
tScrollbarH = Scrollbar(tilesetContainer, orient=HORIZONTAL)
tScrollbarH.pack(side=BOTTOM, fill=X)
# Canvas for the scene
global scene
scene = Canvas(sceneContainer, height=500, width=700, bd=0, highlightthickness=0, relief='ridge',scrollregion=(0,0,1000,1000), yscrollcommand=sScrollbarV.set, xscrollcommand=sScrollbarH.set, bg="black")
# Key bindings for the scene
scene.bind("<Button-1>", mousePress)
scene.bind("<Key>", keypress)
scene.bind("<Motion>", motion)
scene.bind("<ButtonRelease-1>", release)
scene.pack(side=RIGHT, fill=BOTH, expand=True)
sScrollbarV.config(command = scene.yview)
sScrollbarH.config(command = scene.xview)
# Canvas for the tileset
global tileset
tileset = Canvas(tilesetContainer, height=50, width=50, bg="black", yscrollcommand=tScrollbarV.set, xscrollcommand=tScrollbarH.set)
tileset.pack(side=RIGHT, fill=BOTH, expand=True)
tScrollbarV.config(command = tileset.yview)
tScrollbarH.config(command = tileset.xview)
# Key bindings for the tileset
tileset.bind("<Button-1>", mousePress)
tileset.bind("<Key>", keypress)
tileset.bind("<Motion>", motion)
tileset.bind("<ButtonRelease-1>", release)
root.config(menu=menubar)
#Set title
root.wm_title("Tile Editor")
# Create the opening level
levelManip.createLevel(50, 50, scene)
levelManip.displayLevel()
# Set the tileset
tilesetManip.setTileset(tileset, "tileset.png")
root.mainloop()
window = createWindow()
| mit | -6,763,474,092,875,302,000 | 31.13245 | 206 | 0.674567 | false | 3.381185 | false | false | false |
ZebraHat/AgoraD-LoadingDock | AgoraD/loading_dock/JsonSerializer.py | 1 | 3401 | #
## JsonSerializer.py
## functions for serializing and deserializing schemas
#
from django.db import connections
from models import Database, Table, Column
import json
import ModelGenerator
from django.core.serializers.json import DjangoJSONEncoder
def serialize(objects):
json_objs = []
for o in objects:
obj = {}
obj['class'] = o.__class__.__name__
obj['fields'] = {}
for f in o.__fields__:
obj['fields'][f] = o.__dict__[f]
json_objs.append(obj)
return json.dumps(json_objs, cls=DjangoJSONEncoder)
def deserialize(json_str, destdb):
json_objs = json.loads(json_str)
objs = []
for obj in json_objs:
o = ModelGenerator.getModel(destdb, obj['class'])()
for f, v in obj['fields'].iteritems():
o.__dict__[f] = v
objs.append(o)
return objs
def schema2json(dbname = None, tablenames = None, destdb = None):
"""
If no parameters are passed, creates a schema representation of
all known databases and tables.
If dbname is passed,
creates a JSON representation of a table schema,
using destdb as the database name if it is specified.
"""
if dbname:
dblist = [Database.objects.get(name=dbname)]
else:
dblist = Database.objects.all()
schema = {}
for db in dblist:
if tablenames:
tablelist = Table.objects.filter(db=db, name__in=tablenames)
else:
tablelist = Table.objects.filter(db=db)
table_schema = {}
for table in tablelist:
table_schema[table.name] = {}
for column in Column.objects.filter(table=table):
table_schema[table.name][column.name] = column.type
if destdb:
schema[destdb] = table_schema
else:
schema[db.name] = table_schema
return json.dumps(schema, sort_keys=True)
def json2schema(schema_json, commit = True, destdb = None):
"""
Creates Database, Table, and Column objects as needed to satisfy the incoming schema.
If the table is already present, assume we are updating: delete all columns and recreate from the schema.
Unless commit is false, call the required sql to create the incoming tables in the destination database.
"""
schema = json.loads(schema_json)
for dbname, table_schema in schema.iteritems():
if destdb:
dbname = destdb
try:
db = Database.objects.get(name=dbname)
except Database.DoesNotExist:
db = Database(name=dbname)
db.save()
for tablename, column_schema in table_schema.iteritems():
try:
table = Table.objects.get(db=db, name=tablename)
for column in Column.objects.filter(table=table):
column.delete()
except Table.DoesNotExist:
table = Table(db=db, name=tablename)
table.save()
for columnname, columntype in column_schema.iteritems():
column = Column(table=table, name=columnname, type=columntype)
column.save()
if commit:
model = ModelGenerator.getModel(dbname, tablename)
cursor = connections[dbname].cursor()
for sql in ModelGenerator.getSQL(model):
cursor.execute(sql)
return None
| lgpl-2.1 | 7,897,342,371,836,772,000 | 29.366071 | 109 | 0.603058 | false | 4.2301 | false | false | false |
ZhuangER/robot_path_planning | scripts/coursera.py | 1 | 37429 | # Testing solutions for Coursera
# (c) PySimiam Team 2014
#
# Contact person: Tim Fuchs <[email protected]>
#
# This class was implemented for the weekly programming excercises
# of the 'Control of Mobile Robots' course by Magnus Egerstedt.
#
try:
from urllib import urlencode
from urllib2 import urlopen
except Exception:
from urllib.parse import urlencode
from urllib.request import urlopen
import hashlib
import base64
import re
import math
import helpers
from pose import Pose
import numpy as np
class CourseraException(Exception):
pass
class WeekTestCase:
# RX = re.compile(r"(?P<name>[a-zA-Z_][a-zA-Z_0-9]*)=(?P<value>-?[0-9]+(?:\.[0-9]+)?);")
RX = re.compile(r"(?P<name>[a-zA-Z_][a-zA-Z_0-9]*)=(?P<value>[^;]+);")
def __init__(self, week): # Never initialize test-run parameters in the constructor
self.testsuite = week
self.name = "Name not set"
self.test_id = "XXXYYYZZZ"
def parseChallenge(self,challenge, types = {}):
result = {}
for m in self.RX.finditer(challenge):
try:
name = m.group('name')
if name in types:
result[name] = types[name](m.group('value'))
else:
result[name] = float(m.group('value'))
except Exception:
raise CourseraException("Unknown challenge format. Please contact developers for assistance.")
return result
class WeekTest:
coursera_challenge_url = "http://class.coursera.org/conrob-002/assignment/challenge"
coursera_submit_url = "http://class.coursera.org/conrob-002/assignment/submit"
def __init__(self, gui):
self.gui = gui
self.week = 0
# Test is a tuple - 'title','id',function
self.tests = []
self.login = None
self.password = None
self.callback = None
self.submit = True
self.testname = 'Name not set'
def setuser(self, login, password):
self.login = str(login).strip()
self.password = str(password).strip()
def run_tests(self,tests = None):
if tests is None:
tests = list(range(len(self.tests)))
for i in tests:
self.test(self.tests[i])
def test(self,testcase,callback):
if isinstance(testcase,int):
testcase = self.tests[testcase]
self.callback = callback
self.testcase = testcase
params = urlencode({
'email_address' : self.login,
'assignment_part_sid' : testcase.test_id,
'response_encoding' : 'delim'
}).encode('utf-8')
response = urlopen(url=WeekTest.coursera_challenge_url, data = params)
string = response.read().decode('utf-8').split('|')[1:]
self.c_response = {}
for i in range(len(string)//2):
try:
self.c_response[string[2*i]] = string[2*i+1]
except Exception:
pass
if 'email_address' not in self.c_response or not self.c_response['email_address']:
raise CourseraException("Communication with server failed")
elif 'challenge_key' not in self.c_response or not self.c_response['challenge_key'] \
or 'state' not in self.c_response or not self.c_response['state']:
# Error occured, error string in email_address
raise CourseraException(self.c_response['email_address'])
testcase.start_test(self.c_response['challenge_aux_data'])
def respond(self,fn_output):
if self.callback is None:
return
ch_resp = hashlib.sha1((self.c_response['challenge_key'] + self.password).encode('utf-8')).hexdigest()
params = urlencode(
{'assignment_part_sid': self.testcase.test_id,
'email_address': self.c_response['email_address'],
'submission': base64.standard_b64encode(fn_output.encode('utf-8')),
'submission_aux': b'',
'challenge_response': ch_resp,
'state': self.c_response['state']}).encode('utf-8');
self.callback(urlopen(url=self.coursera_submit_url, data = params).read().decode('utf-8'))
self.testcase = None
self.callback = None
class Week1(WeekTest):
def __init__(self, gui):
WeekTest.__init__(self, gui)
self.testname = "Programming Assignment Week 1"
self.week = 1
self.tests.append(Week1Test1(self))
class Week1Test1(WeekTestCase):
def __init__(self, week):
self.testsuite = week
self.name = "Running the simulator"
self.test_id = "k3pa0rK4"
def __call__(self,event,args):
if event == "log":
message, objclass, objcolor = args
if message == "Switched to Hold":
self.stop_test()
self.testsuite.respond("-1")
return False
def start_test(self,challenge):
self.testsuite.gui.start_testing()
self.testsuite.gui.load_world('week1.xml')
self.testsuite.gui.register_event_handler(self)
self.testsuite.gui.run_simulation()
def stop_test(self):
self.testsuite.gui.unregister_event_handler()
self.testsuite.gui.stop_testing()
class Week2(WeekTest):
def __init__(self, gui):
WeekTest.__init__(self, gui)
self.testname = "Programming Assignment Week 2"
self.week = 2
self.tests.append(Week2Test1(self))
self.tests.append(Week2Test2(self))
self.tests.append(Week2Test3(self))
class Week2Test1(WeekTestCase):
"""Test 1: Test uni2diff"""
def __init__(self, week):
self.testsuite = week
self.name = "Unicycle to differential-drive\ntransformation"
self.test_id = "QihGedxL"
def start_test(self,challenge):
vals = self.parseChallenge(challenge)
if 'v' not in vals or 'w' not in vals:
raise CourseraException("Unknown challenge format. Please contact developers for assistance.")
v = vals['v']
w = vals['w']
from supervisors.week2 import QuickBotSupervisor
from robots.quickbot import QuickBot
from pose import Pose
info = QuickBot(Pose()).get_info()
info.color = 0
s = QuickBotSupervisor(Pose(),info)
vl, vr = s.uni2diff((v,w))
self.testsuite.respond("{:0.3f},{:0.3f}".format(vr,vl)) # Note the inverse order
class Week2Test2(WeekTestCase):
def __init__(self, week):
self.testsuite = week
self.name = "Odometry"
self.test_id = "TQkrYtec"
def start_test(self,challenge):
vals = self.parseChallenge(challenge)
if 'v' not in vals or 'theta' not in vals:
raise CourseraException("Unknown challenge format. Please contact developers for assistance.")
v = vals['v']
theta = vals['theta']
from supervisors.week2 import QuickBotSupervisor
from robots.quickbot import QuickBot
from pose import Pose
from helpers import Struct
from math import pi
bot = QuickBot(Pose())
info = bot.get_info()
info.color = 0
s = QuickBotSupervisor(Pose(),info)
params = Struct()
params.goal = theta*180/pi
params.velocity = v
params.pgain = 1
s.set_parameters(params)
tc = 0.033 # 0.033 sec' is the SimIAm time step
for step in range(25): # 25 steps
bot.move(tc)
bot.set_inputs(s.execute(bot.get_info(), tc))
xe,ye,te = s.pose_est
xr,yr,tr = bot.get_pose()
self.testsuite.respond("{:0.3f},{:0.3f},{:0.3f}".format(abs((xr-xe)/xr), abs((yr-ye)/yr), abs(abs(tr-te)%(2*pi)/tr)))
class Week2Test3(WeekTestCase):
def __init__(self, week):
self.testsuite = week
self.name = "Converting raw IR sensor values\nto distances"
self.test_id = "yptGGVPr"
def start_test(self,challenge):
vals = self.parseChallenge(challenge)
if 'd1' not in vals or 'd2' not in vals:
raise CourseraException("Unknown challenge format. Please contact developers for assistance.")
d1 = vals['d1']
d2 = vals['d2']
from supervisors.week2 import QuickBotSupervisor
from robots.quickbot import QuickBot, QuickBot_IRSensor
from pose import Pose
bot = QuickBot(Pose())
sensor = QuickBot_IRSensor(Pose(),bot)
id1 = sensor.distance_to_value(d1)
id2 = sensor.distance_to_value(d2)
info = bot.get_info()
info.color = 0
s = QuickBotSupervisor(Pose(),info)
# Just in case a student iterates in a weird way
s.robot.ir_sensors.readings = [id1,id2,id1,id2,id1]
ird = s.get_ir_distances()
self.testsuite.respond("{:0.3f},{:0.3f}".format(abs((d1-ird[0])/d1), abs((d2-ird[1])/d2)))
class Week3(WeekTest):
def __init__(self, gui):
WeekTest.__init__(self, gui)
self.testname = "Programming Assignment Week 3"
self.week = 3
self.tests.append(Week3Test1(self))
self.tests.append(Week3Test2(self))
self.tests.append(Week3Test3(self))
class Week3Test1(WeekTestCase):
"""Run the simulator until the robot reaches the goal or collides with the wall.
Stops after 30 seconds."""
def __init__(self, week):
self.testsuite = week
self.name = "Arriving at the goal location"
self.test_id = "pKyj9jyA"
self.dst2goal = 'math.sqrt((robot.get_pose().x - supervisor.parameters.goal.x)**2 + (robot.get_pose().y - supervisor.parameters.goal.y)**2)'
def __call__(self,event,args):
if self.testsuite.gui.simulator_thread.get_time() > 30:
self.stop_test()
self.testsuite.respond("0")
if event == "plot_update": # get distance to goal from the simulator
dst = args[0][self.dst2goal]
if dst < 0.05:
self.stop_test()
self.testsuite.respond("1")
elif event == "log": # watch for collisions
message, objclass, objcolor = args
if message.startswith("Collision with"):
self.stop_test()
self.testsuite.respond("0")
elif event == "make_param_window": # in the beginning rewrite parameters
robot_id, name, params = args
# FIXME What follows is a hack, that will only work
# in the current GUI implementation.
# For a better solution we need to change the API again
params[0][1][0] = ('x',self.goal[0])
params[0][1][1] = ('y',self.goal[1])
params[1][1][0] = ('v',self.v)
p = helpers.Struct()
p.goal = helpers.Struct()
p.goal.x = self.goal[0]
p.goal.y = self.goal[1]
p.velocity = helpers.Struct()
p.velocity.v = self.v
p.gains = helpers.Struct()
p.gains.kp = params[2][1][0][1]
p.gains.ki = params[2][1][1][1]
p.gains.kd = params[2][1][2][1]
self.testsuite.gui.run_simulator_command('apply_parameters', robot_id, p)
#elif event == 'reset' # World constructed, safe
return False
def stop_test(self):
self.testsuite.gui.unregister_event_handler()
self.testsuite.gui.pause_simulation()
self.testsuite.gui.stop_testing()
def start_test(self,challenge):
vals = self.parseChallenge(challenge)
if 'v' not in vals or 'x_g' not in vals or 'y_g' not in vals:
raise CourseraException("Unknown challenge format. Please contact developers for assistance.")
self.v = vals['v']
self.goal = (vals['x_g'],vals['y_g'])
self.testsuite.gui.start_testing()
self.testsuite.gui.register_event_handler(self)
self.testsuite.gui.load_world('week3.xml')
self.testsuite.gui.run_simulator_command('add_plotable',self.dst2goal)
#self.testsuite.gui.dockmanager.docks[self.dockname].widget().apply_click()
self.testsuite.gui.run_simulation()
class Week3Test2(WeekTestCase):
"""Test 2: check if the PID gains do not lead to oscillations"""
def __init__(self, week):
self.testsuite = week
self.name = "Tuning the PID gains for performance"
self.test_id = "2aZEky7h"
self.dtheta = '((math.atan2(supervisor.parameters.goal.y - robot.get_pose().y, supervisor.parameters.goal.x - robot.get_pose().x) - robot.get_pose().theta + math.pi)%(2*math.pi) -math.pi)/math.atan2(supervisor.parameters.goal.y,supervisor.parameters.goal.x)'
self.dst2goal = 'math.sqrt((robot.get_pose().x - supervisor.parameters.goal.x)**2 + (robot.get_pose().y - supervisor.parameters.goal.y)**2)'
def __call__(self,event,args):
if self.testsuite.gui.simulator_thread.get_time() > 15: # Not more than 15 seconds
self.stop_test()
if event == "plot_update": # get dtheta
dtheta = args[0][self.dtheta]
self.dthetas.append(abs(dtheta))
if dtheta < self.dtheta_min:
self.dtheta_min = dtheta
dst2goal = abs(args[0][self.dst2goal])
if dst2goal < 0.05:
self.stop_test()
elif event == "log": # watch for collisions
message, objclass, objcolor = args
if message.startswith("Collision with"):
self.stop_test()
elif event == "make_param_window": # in the beginning rewrite parameters
robot_id, name, params = args
params[0][1][0] = ('x',self.p.goal.x)
params[0][1][1] = ('y',self.p.goal.y)
params[1][1][0] = ('v',self.p.velocity.v)
params[2][1][0] = (('kp','Proportional gain'), self.p.gains.kp)
params[2][1][1] = (('ki','Integral gain'), self.p.gains.ki)
params[2][1][2] = (('kd','Differential gain'), self.p.gains.kd)
self.testsuite.gui.run_simulator_command('apply_parameters', robot_id, self.p)
#elif event == 'reset' # World constructed, safe
return False
def stop_test(self):
self.testsuite.gui.unregister_event_handler()
self.testsuite.gui.pause_simulation()
self.testsuite.gui.stop_testing()
i_iter = -1
i_dec = 0
i_dec_max = 50 # Simiam has 0.05 tc and 20 max
settletime = -1
for i_iter, dtheta in enumerate(self.dthetas):
if dtheta < 0.1:
i_dec += 1
if i_dec > i_dec_max:
settletime = (i_iter-i_dec)*0.02
break
else:
i_dec = 0
settletime = -1*0.02
self.testsuite.respond("{:0.3f},{:0.3f}".format(settletime,abs(self.dtheta_min)))
def start_test(self,challenge):
vals = self.parseChallenge(challenge)
if 'v' not in vals or 'x_g' not in vals or 'y_g' not in vals:
raise CourseraException("Unknown challenge format. Please contact developers for assistance.")
self.p = helpers.Struct()
self.p.goal = helpers.Struct()
self.p.goal.x = vals['x_g']
self.p.goal.y = vals['y_g']
self.p.velocity = helpers.Struct()
self.p.velocity.v = vals['v']
self.p.gains = helpers.Struct()
self.p.gains.kp = 3
self.p.gains.ki = 6
self.p.gains.kd = 0.01
# FIXME What follows is a hack, that will only work
# in the current GUI implementation.
# For a better solution we need to change the API again
docks = self.testsuite.gui.dockmanager.docks
if len(docks):
dock = docks[list(docks.keys())[0]]
self.p.gains = dock.widget().contents.get_struct().gains
self.dtheta_min = math.pi
self.dthetas = []
self.testsuite.gui.start_testing()
self.testsuite.gui.register_event_handler(self)
self.testsuite.gui.load_world('week3.xml')
self.testsuite.gui.run_simulator_command('add_plotable',self.dtheta)
self.testsuite.gui.run_simulator_command('add_plotable',self.dst2goal)
#self.testsuite.gui.dockmanager.docks[self.dockname].widget().apply_click()
self.testsuite.gui.run_simulation()
class Week3Test3(WeekTestCase):
"""Test 3: check if ensure_w works"""
def __init__(self, week):
self.testsuite = week
self.name = "Reshaping the output for the hardware"
self.test_id = "BlIrXfQO"
def start_test(self,challenge):
vals = self.parseChallenge(challenge)
if 'v' not in vals or 'w' not in vals:
raise CourseraException("Unknown challenge format. Please contact developers for assistance.")
vd = vals['v']
wd = vals['w']
QuickBotSupervisor = helpers.load_by_name('week3.QBGTGSupervisor','supervisors')
QuickBot = helpers.load_by_name('QuickBot','robots')
from pose import Pose
bot = QuickBot(Pose())
info = bot.get_info()
info.color = 0
s = QuickBotSupervisor(Pose(),info)
vld, vrd = s.uni2diff((vd,wd))
vl, vr = s.ensure_w((vld,vrd))
# Clamp to robot maxima
vl = max(-info.wheels.max_velocity, min(info.wheels.max_velocity, vl))
vr = max(-info.wheels.max_velocity, min(info.wheels.max_velocity, vr))
v, w = bot.diff2uni((vl,vr))
self.testsuite.respond("{:0.3f}".format(abs(w-wd)/wd))
class Week4(WeekTest):
def __init__(self, gui):
WeekTest.__init__(self, gui)
self.testname = "Programming Assignment Week 4"
self.week = 4
self.tests.append(Week4Test1(self))
self.tests.append(Week4Test2(self))
class Week4Test1(WeekTestCase):
"""Test 1: check if sensor points are calculated correctly"""
def __init__(self, week):
self.testsuite = week
self.name = "From IR distances to points in the World"
self.test_id = "n6Td5e5B"
def start_test(self,challenge):
print(challenge)
vals = self.parseChallenge(challenge)
print(vals)
if 'dist_1' not in vals or 'x' not in vals or 'y' not in vals or 'theta' not in vals:
raise CourseraException("Unknown challenge format. Please contact developers for assistance.")
rpose = Pose(vals['x'],vals['y'],vals['theta'])
ir_sensor_poses = [
Pose(-0.0474, 0.0534, np.radians(90)),
Pose( 0.0613, 0.0244, np.radians(45)),
Pose( 0.0636, 0.0, np.radians(0)),
Pose( 0.0461,-0.0396, np.radians(-45)),
Pose(-0.0690,-0.0534, np.radians(-90))
]
params = helpers.Struct()
params.sensor_poses = [p >> rpose for p in ir_sensor_poses]
params.gains = helpers.Struct({'kp':0, 'ki':0, 'kd':0})
state = helpers.Struct()
state.sensor_distances = [vals['dist_1'], 0.3, 0.3, vals['dist_1'], 0.3]
AvoidObstacles = helpers.load_by_name('week4_solved.AvoidObstacles','controllers')
testAO = AvoidObstacles(params)
testAO.get_heading(state)
vs = testAO.vectors
error_1 = math.sqrt((vs[0][0] - 0.3637)**2 + (vs[0][1] + 0.0545)**2)
error_2 = math.sqrt((vs[3][0] + 0.0895)**2 + (vs[3][1] + 0.2932)**2)
self.testsuite.respond("{:0.3f},{:0.3f}".format(error_1,error_2))
class Week4Test2(WeekTestCase):
"""Test 2: check if robot can take care of itself for 60 seconds"""
def __init__(self, week):
self.testsuite = week
self.name = "Avoiding collisions for 60 seconds"
self.test_id = "sw7on7mK"
self.dr = 'abs(robot.get_pose().x) + abs(robot.get_pose().y)'
def __call__(self,event,args):
if self.testsuite.gui.simulator_thread.get_time() > 60: # Stop after 60 seconds
self.stop_test(self.max_dr > 0.5)
if event == "log": # watch for collisions
message, objclass, objcolor = args
if message.startswith("Collision with"):
self.stop_test(False)
elif event == "plot_update": # get dr
dr = args[0][self.dr]
if dr > self.max_dr:
self.max_dr = dr
del args[0][self.dr]
elif event == "make_param_window": # in the beginning rewrite parameters
robot_id, name, params = args
params[0][1][0] = ('v',self.p.velocity.v)
params[1][1][0] = (('kp','Proportional gain'), self.p.gains.kp)
params[1][1][1] = (('ki','Integral gain'), self.p.gains.ki)
params[1][1][2] = (('kd','Differential gain'), self.p.gains.kd)
self.testsuite.gui.run_simulator_command('apply_parameters', robot_id, self.p)
return False
def stop_test(self, passed):
self.testsuite.gui.unregister_event_handler()
self.testsuite.gui.pause_simulation()
self.testsuite.gui.stop_testing()
if passed:
self.testsuite.respond("0")
else:
self.testsuite.respond("1")
def start_test(self,challenge):
vals = self.parseChallenge(challenge)
if 'v' not in vals:
raise CourseraException("Unknown challenge format. Please contact developers for assistance.")
self.p = helpers.Struct()
self.p.velocity = helpers.Struct()
self.p.velocity.v = vals['v']
self.p.gains = helpers.Struct()
self.p.gains.kp = 4
self.p.gains.ki = 0.1
self.p.gains.kd = 0
self.max_dr = 0.0
# FIXME What follows is a hack, that will only work
# in the current GUI implementation.
# For a better solution we need to change the API again
docks = self.testsuite.gui.dockmanager.docks
if len(docks):
dock = docks[list(docks.keys())[0]]
self.p.gains = dock.widget().contents.get_struct().gains
self.testsuite.gui.start_testing()
self.testsuite.gui.register_event_handler(self)
self.testsuite.gui.load_world('week4.xml')
# We have to check the robot actually moved
self.testsuite.gui.run_simulator_command('add_plotable',self.dr)
self.testsuite.gui.run_simulation()
class Week5(WeekTest):
def __init__(self, gui):
WeekTest.__init__(self, gui)
self.testname = "Programming Assignment Week 5"
self.week = 5
self.tests.append(Week5Test1(self))
self.tests.append(Week5Test2(self))
class Week5Test1(WeekTestCase):
"""Test 1: check if robot reaches the goal in 60 seconds"""
def __init__(self, week):
self.testsuite = week
self.name = "Collision-free navigation with blending"
self.test_id = "HChwie7B"
self.dst2goal = 'math.sqrt((robot.get_pose().x - supervisor.parameters.goal.x)**2 + (robot.get_pose().y - supervisor.parameters.goal.y)**2)'
def __call__(self,event,args):
if self.testsuite.gui.simulator_thread.get_time() > 60: # Stop after 60 seconds
self.stop_test(False)
if event == "log": # watch for collisions
message, objclass, objcolor = args
if message.startswith("Collision with"):
self.stop_test(False)
elif event == "plot_update": # get dr
dst2goal = args[0][self.dst2goal]
if dst2goal < 0.05:
self.stop_test(True)
# del args[0][self.dst2goal]
elif event == "make_param_window": # in the beginning rewrite parameters
robot_id, name, params = args
params[0][1][0] = ('x', self.p.goal.x)
params[0][1][1] = ('y', self.p.goal.y)
params[1][1][0] = ('v',self.p.velocity.v)
params[2][1][0] = (('kp','Proportional gain'), self.p.gains.kp)
params[2][1][1] = (('ki','Integral gain'), self.p.gains.ki)
params[2][1][2] = (('kd','Differential gain'), self.p.gains.kd)
self.testsuite.gui.run_simulator_command('apply_parameters', robot_id, self.p)
return False
def stop_test(self, passed):
self.testsuite.gui.unregister_event_handler()
self.testsuite.gui.pause_simulation()
self.testsuite.gui.stop_testing()
self.testsuite.respond("{:d}".format(passed))
def start_test(self,challenge):
vals = self.parseChallenge(challenge)
if 'v' not in vals or 'x_g' not in vals or 'y_g' not in vals:
raise CourseraException("Unknown challenge format. Please contact developers for assistance.")
self.v = vals['v']
self.goal = (vals['x_g'],vals['y_g'])
self.p = helpers.Struct()
self.p.velocity = helpers.Struct({'v':vals['v']})
self.p.goal = helpers.Struct({'x':vals['x_g'], 'y':vals['y_g']})
self.p.gains = helpers.Struct({'kp':4, 'ki':0.1, 'kd':0})
# FIXME What follows is a hack, that will only work
# in the current GUI implementation.
# For a better solution we need to change the API again
docks = self.testsuite.gui.dockmanager.docks
if len(docks):
dock = docks[list(docks.keys())[0]]
self.p.gains = dock.widget().contents.get_struct().gains
self.testsuite.gui.start_testing()
self.testsuite.gui.register_event_handler(self)
self.testsuite.gui.load_world('week5_blending.xml')
self.testsuite.gui.run_simulator_command('add_plotable',self.dst2goal)
self.testsuite.gui.run_simulation()
class Week5Test2(WeekTestCase):
"""Test 2: check if robot can take care of itself for 60 seconds"""
def __init__(self, week):
self.testsuite = week
self.name = "Collision-free navigation with switching"
self.test_id = "7TGoq1mz"
self.dst2goal = 'math.sqrt((robot.get_pose().x - supervisor.parameters.goal.x)**2 + (robot.get_pose().y - supervisor.parameters.goal.y)**2)'
self.switch_RX = re.compile(r'^Switched to (?P<CNT>.*)$')
def __call__(self,event,args):
if self.testsuite.gui.simulator_thread.get_time() > 60: # Stop after 60 seconds
self.stop_test(False, self.switches)
if event == "plot_update": # get dr
self.cdist = args[0][self.dst2goal]
if self.cdist < 0.05:
self.stop_test(True, self.switches)
elif event == "log":
message, objclass, objcolor = args
if message.startswith("Collision with"):
self.stop_test(False, self.switches)
else:
m = self.switch_RX.match(message)
if m is not None:
self.switches += 1
cnt = m.group('CNT')
if cnt == "Hold":
if self.cdist > 0.2:
print("The robot stopped too far from the goal.")
self.stop_test(False, self.switches)
else:
self.stop_test(True, self.switches)
elif event == "make_param_window": # in the beginning rewrite parameters
robot_id, name, params = args
params[0][1][0] = ('x', self.p.goal.x)
params[0][1][1] = ('y', self.p.goal.y)
params[1][1][0] = ('v',self.p.velocity.v)
params[2][1][0] = (('kp','Proportional gain'), self.p.gains.kp)
params[2][1][1] = (('ki','Integral gain'), self.p.gains.ki)
params[2][1][2] = (('kd','Differential gain'), self.p.gains.kd)
self.testsuite.gui.run_simulator_command('apply_parameters', robot_id, self.p)
return False
def stop_test(self, passed, nswitches):
runtime = self.testsuite.gui.simulator_thread.get_time()
self.testsuite.gui.unregister_event_handler()
self.testsuite.gui.pause_simulation()
self.testsuite.gui.stop_testing()
print('The supervisor switched {} times in {} seconds'.format(nswitches,runtime))
self.testsuite.respond("{:d},{:d}".format(passed,(nswitches/runtime <= self.max_shz)))
def start_test(self,challenge):
vals = self.parseChallenge(challenge)
if 'v' not in vals or 'x_g' not in vals or 'y_g' not in vals or 's_hz' not in vals:
raise CourseraException("Unknown challenge format. Please contact developers for assistance.")
self.cdist = 100
self.switches = 0
self.max_shz = vals['s_hz']
self.v = vals['v']
self.goal = (vals['x_g'],vals['y_g'])
self.p = helpers.Struct()
self.p.velocity = helpers.Struct({'v':vals['v']})
self.p.goal = helpers.Struct({'x':vals['x_g'], 'y':vals['y_g']})
self.p.gains = helpers.Struct({'kp':4, 'ki':0.1, 'kd':0})
# FIXME What follows is a hack, that will only work
# in the current GUI implementation.
# For a better solution we need to change the API again
docks = self.testsuite.gui.dockmanager.docks
if len(docks):
dock = docks[list(docks.keys())[0]]
self.p.gains = dock.widget().contents.get_struct().gains
self.testsuite.gui.start_testing()
self.testsuite.gui.register_event_handler(self)
self.testsuite.gui.load_world('week5_switching.xml')
self.testsuite.gui.run_simulator_command('add_plotable',self.dst2goal)
self.testsuite.gui.run_simulation()
class Week6(WeekTest):
def __init__(self, gui):
WeekTest.__init__(self, gui)
self.testname = "Programming Assignment Week 6"
self.week = 6
self.tests.append(Week6Test(self, "L6V17gUC", "left"))
self.tests.append(Week6Test(self, "J12UGUtV", "right"))
class Week6Test(WeekTestCase):
"""Test: check if robot can follow the wall for two laps"""
def __init__(self, week, test_id, direction):
self.testsuite = week
self.name = "Two laps around the obstacle to the {}".format(direction)
self.test_id = test_id
self.direction = direction
self.dst20 = 'math.sqrt(robot.get_pose().x**2 + robot.get_pose().y**2)'
def __call__(self,event,args):
if self.testsuite.gui.simulator_thread.get_time() > 90: # Stop after 60 seconds
self.stop_test(False)
if event == "log": # watch for collisions
message, objclass, objcolor = args
if message.startswith("Collision with"):
self.stop_test(False)
elif event == "plot_update": # get dr
dst20 = args[0][self.dst20]
if self.new_lap and dst20 > 0.1:
self.new_lap = False
self.testsuite.gui.simulator_log("Starting lap {}".format(self.lap_count),"Week6 test",None)
elif not self.new_lap and dst20 < 0.1:
self.new_lap = True
self.testsuite.gui.simulator_log("Finished lap {}".format(self.lap_count),"Week6 test",None)
self.lap_count += 1
if self.lap_count > 2:
self.stop_test(True)
return False
def stop_test(self, passed):
self.testsuite.gui.unregister_event_handler()
self.testsuite.gui.pause_simulation()
self.testsuite.gui.stop_testing()
self.testsuite.respond("{:d}".format(passed))
def start_test(self,challenge):
vals = self.parseChallenge(challenge, {'dir':str})
if 'v' not in vals or 'dir' not in vals or 'theta' not in vals:
raise CourseraException("Unknown challenge format. Please contact developers for assistance.")
self.new_lap = True
self.lap_count = 1
self.testsuite.gui.start_testing()
self.testsuite.gui.register_event_handler(self)
from xmlreader import XMLReader
world = XMLReader("worlds/week6_test_{}.xml".format(vals['dir']), 'simulation').read()
i = 0
while world[i].type != 'robot':
i += 1
world[i].robot.pose.theta = vals['theta']
world[i].supervisor.options = '{{"velocity":{}, "direction":"{}"}}'.format(vals['v'],vals['dir'])
self.testsuite.gui.dockmanager.clear()
self.testsuite.gui.run_simulator_command('load_world',world)
self.testsuite.gui.run_simulator_command('add_plotable',self.dst20)
self.testsuite.gui.run_simulation()
class Week7(WeekTest):
def __init__(self, gui):
WeekTest.__init__(self, gui)
self.testname = "Programming Assignment Week 7"
self.week = 7
self.tests.append(Week7Test(self, "jbZAu4c1"))
class Week7Test(WeekTestCase):
"""Test: check if robot can reach the goal hidden behind many obstacles"""
def __init__(self, week, test_id):
self.testsuite = week
self.name = "Navigating to the goal"
self.test_id = test_id
self.dst2goal = 'math.sqrt((robot.get_pose().x - supervisor.parameters.goal.x)**2 + (robot.get_pose().y - supervisor.parameters.goal.y)**2)'
self.switch_RX = re.compile(r'^Switched to (?P<CNT>.*)$')
def __call__(self,event,args):
if self.testsuite.gui.simulator_thread.get_time() > 30: # Stop after 30 seconds
self.stop_test(False)
if event == "log": # watch for collisions
message, objclass, objcolor = args
if message.startswith("Collision with"):
self.stop_test(False)
else:
m = self.switch_RX.match(message)
if m is not None:
cnt = m.group('CNT')
if cnt == "Hold":
if self.cdist > 0.06:
print("The robot stopped too far from the goal.")
self.stop_test(False)
else:
self.stop_test(True)
elif event == "plot_update": # get dr
self.cdist = args[0][self.dst2goal]
elif event == "make_param_window": # in the beginning rewrite parameters
robot_id, name, params = args
params[0][1][0] = ('x', self.p.goal.x)
params[0][1][1] = ('y', self.p.goal.y)
params[1][1][0] = ('v',self.p.velocity.v)
params[2][1][0] = (('kp','Proportional gain'), self.p.gains.kp)
params[2][1][1] = (('ki','Integral gain'), self.p.gains.ki)
params[2][1][2] = (('kd','Differential gain'), self.p.gains.kd)
self.testsuite.gui.run_simulator_command('apply_parameters', robot_id, self.p)
return False
def stop_test(self, passed):
self.testsuite.gui.unregister_event_handler()
self.testsuite.gui.pause_simulation()
self.testsuite.gui.stop_testing()
self.testsuite.respond("{:d}".format(passed))
def start_test(self,challenge):
vals = self.parseChallenge(challenge)
if 'x_g' not in vals or 'y_g' not in vals:
raise CourseraException("Unknown challenge format. Please contact developers for assistance.")
self.p = helpers.Struct()
self.p.velocity = helpers.Struct({'v':0.2})
self.p.goal = helpers.Struct({'x':vals['x_g'], 'y':vals['y_g']})
self.p.gains = helpers.Struct({'kp':4, 'ki':0.1, 'kd':0})
# FIXME What follows is a hack, that will only work
# in the current GUI implementation.
# For a better solution we need to change the API again
docks = self.testsuite.gui.dockmanager.docks
if len(docks):
dock = docks[list(docks.keys())[0]]
struct = dock.widget().contents.get_struct()
self.p.gains = struct.gains
self.p.velocity = struct.velocity
self.testsuite.gui.start_testing()
self.testsuite.gui.register_event_handler(self)
self.testsuite.gui.load_world("worlds/week7.xml")
self.testsuite.gui.run_simulator_command('add_plotable',self.dst2goal)
self.cdist = 100
self.testsuite.gui.run_simulation()
| mit | -277,692,897,075,128,770 | 37.193878 | 266 | 0.561516 | false | 3.67167 | true | false | false |
nathangeffen/tbonline-2 | tbonlineproject/credit/utils.py | 2 | 1267 | '''Utility functions for credit app.
'''
from django.utils.translation import ugettext as _
def credit_length(obj):
return len(obj)
def credit_list(obj, number_to_print=0):
"""Returns formatted list of people for bylines.
Implements "et al." and "and". E.g. "Samuel Johnson, Ingrid Bergman and
Lucy Stevens."
Arguments:
obj -- OrderedCredit GenericRelation
number_to_print -- Number of credits to list before "et al."
If 0, all authors printed.
"""
alist = obj.order_by('position')
len_alist = len(alist)
if len_alist == 0:
authors=u''
elif len_alist == 1:
authors = unicode(alist[0])
else:
if number_to_print == 0 or number_to_print >= len(alist):
second_last_index = len(alist) - 1
joining_phrase = unicode(_(u' and '))
last_name = alist[len(alist)-1].__unicode__()
else:
second_last_index = number_to_print
joining_phrase = u' ' + _('et al.')
last_name = ''
authors = u', '.join([a.__unicode__() \
for a in alist[0:second_last_index]]) + joining_phrase + \
last_name
return authors
| mit | 5,624,823,801,594,170,000 | 29.166667 | 76 | 0.539858 | false | 3.770833 | false | false | false |
ge0rgi/cinder | cinder/volume/drivers/datera/datera_api21.py | 1 | 34806 | # Copyright 2016 Datera
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import re
import uuid
import eventlet
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from cinder.i18n import _, _LI, _LW, _LE
from cinder import exception
from cinder.volume import utils as volutils
import cinder.volume.drivers.datera.datera_common as datc
LOG = logging.getLogger(__name__)
class DateraApi(object):
# =================
# = Create Volume =
# =================
def _create_volume_2_1(self, volume):
tenant = self._create_tenant(volume)
policies = self._get_policies_for_resource(volume)
num_replicas = int(policies['replica_count'])
storage_name = policies['default_storage_name']
volume_name = policies['default_volume_name']
template = policies['template']
if template:
app_params = (
{
'create_mode': "openstack",
# 'uuid': str(volume['id']),
'name': datc._get_name(volume['id']),
'app_template': '/app_templates/{}'.format(template)
})
else:
app_params = (
{
'create_mode': "openstack",
'uuid': str(volume['id']),
'name': datc._get_name(volume['id']),
'access_control_mode': 'deny_all',
'storage_instances': [
{
'name': storage_name,
'volumes': [
{
'name': volume_name,
'size': volume['size'],
'replica_count': num_replicas,
'snapshot_policies': [
]
}
]
}
]
})
self._issue_api_request(
datc.URL_TEMPLATES['ai'](),
'post',
body=app_params,
api_version='2.1',
tenant=tenant)
self._update_qos_2_1(volume, policies, tenant)
metadata = {}
volume_type = self._get_volume_type_obj(volume)
if volume_type:
metadata.update({datc.M_TYPE: volume_type['name']})
metadata.update(self.HEADER_DATA)
url = datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id']))
self._store_metadata(url, metadata, "create_volume_2_1", tenant)
# =================
# = Extend Volume =
# =================
def _extend_volume_2_1(self, volume, new_size):
tenant = self._create_tenant(volume)
policies = self._get_policies_for_resource(volume)
template = policies['template']
if template:
LOG.warning(_LW("Volume size not extended due to template binding:"
" volume: %(volume)s, template: %(template)s"),
volume=volume, template=template)
return
# Offline App Instance, if necessary
reonline = False
app_inst = self._issue_api_request(
datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id'])),
api_version='2.1', tenant=tenant)
if app_inst['data']['admin_state'] == 'online':
reonline = True
self._detach_volume_2_1(None, volume)
# Change Volume Size
app_inst = datc._get_name(volume['id'])
data = {
'size': new_size
}
store_name, vol_name = self._scrape_template(policies)
self._issue_api_request(
datc.URL_TEMPLATES['vol_inst'](
store_name, vol_name).format(app_inst),
method='put',
body=data,
api_version='2.1',
tenant=tenant)
# Online Volume, if it was online before
if reonline:
self._create_export_2_1(None, volume, None)
# =================
# = Cloned Volume =
# =================
def _create_cloned_volume_2_1(self, volume, src_vref):
policies = self._get_policies_for_resource(volume)
tenant = self._create_tenant(volume)
store_name, vol_name = self._scrape_template(policies)
src = "/" + datc.URL_TEMPLATES['vol_inst'](
store_name, vol_name).format(datc._get_name(src_vref['id']))
data = {
'create_mode': 'openstack',
'name': datc._get_name(volume['id']),
'uuid': str(volume['id']),
'clone_volume_src': {'path': src},
}
self._issue_api_request(
datc.URL_TEMPLATES['ai'](), 'post', body=data, api_version='2.1',
tenant=tenant)
if volume['size'] > src_vref['size']:
self._extend_volume_2_1(volume, volume['size'])
url = datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id']))
volume_type = self._get_volume_type_obj(volume)
if volume_type:
vtype = volume_type['name']
else:
vtype = None
metadata = {datc.M_TYPE: vtype,
datc.M_CLONE: datc._get_name(src_vref['id'])}
self._store_metadata(url, metadata, "create_cloned_volume_2_1", tenant)
# =================
# = Delete Volume =
# =================
def _delete_volume_2_1(self, volume):
self.detach_volume(None, volume)
tenant = self._create_tenant(volume)
app_inst = datc._get_name(volume['id'])
try:
self._issue_api_request(
datc.URL_TEMPLATES['ai_inst']().format(app_inst),
method='delete',
api_version='2.1',
tenant=tenant)
except exception.NotFound:
msg = _LI("Tried to delete volume %s, but it was not found in the "
"Datera cluster. Continuing with delete.")
LOG.info(msg, datc._get_name(volume['id']))
# =================
# = Ensure Export =
# =================
def _ensure_export_2_1(self, context, volume, connector):
self.create_export(context, volume, connector)
# =========================
# = Initialize Connection =
# =========================
def _initialize_connection_2_1(self, volume, connector):
# Now online the app_instance (which will online all storage_instances)
multipath = connector.get('multipath', False)
tenant = self._create_tenant(volume)
url = datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id']))
data = {
'admin_state': 'online'
}
app_inst = self._issue_api_request(
url, method='put', body=data, api_version='2.1', tenant=tenant)[
'data']
storage_instances = app_inst["storage_instances"]
si = storage_instances[0]
portal = si['access']['ips'][0] + ':3260'
iqn = si['access']['iqn']
if multipath:
portals = [p + ':3260' for p in si['access']['ips']]
iqns = [iqn for _ in si['access']['ips']]
lunids = [self._get_lunid() for _ in si['access']['ips']]
result = {
'driver_volume_type': 'iscsi',
'data': {
'target_discovered': False,
'target_iqn': iqn,
'target_iqns': iqns,
'target_portal': portal,
'target_portals': portals,
'target_lun': self._get_lunid(),
'target_luns': lunids,
'volume_id': volume['id'],
'discard': False}}
else:
result = {
'driver_volume_type': 'iscsi',
'data': {
'target_discovered': False,
'target_iqn': iqn,
'target_portal': portal,
'target_lun': self._get_lunid(),
'volume_id': volume['id'],
'discard': False}}
url = datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id']))
self._store_metadata(url, {}, "initialize_connection_2_1", tenant)
return result
# =================
# = Create Export =
# =================
def _create_export_2_1(self, context, volume, connector):
tenant = self._create_tenant(volume)
url = datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id']))
data = {
'admin_state': 'online'
}
self._issue_api_request(
url, method='put', body=data, api_version='2.1', tenant=tenant)
# Check if we've already setup everything for this volume
url = (datc.URL_TEMPLATES['si']().format(datc._get_name(volume['id'])))
storage_instances = self._issue_api_request(
url, api_version='2.1', tenant=tenant)
# Handle adding initiator to product if necessary
# Then add initiator to ACL
policies = self._get_policies_for_resource(volume)
store_name, _ = self._scrape_template(policies)
if (connector and
connector.get('initiator') and
not policies['acl_allow_all']):
initiator_name = "OpenStack_{}_{}".format(
self.driver_prefix, str(uuid.uuid4())[:4])
initiator_group = datc.INITIATOR_GROUP_PREFIX + volume['id']
found = False
initiator = connector['initiator']
current_initiators = self._issue_api_request(
'initiators', api_version='2.1', tenant=tenant)
for iqn, values in current_initiators.items():
if initiator == iqn:
found = True
break
# If we didn't find a matching initiator, create one
if not found:
data = {'id': initiator, 'name': initiator_name}
# Try and create the initiator
# If we get a conflict, ignore it because race conditions
self._issue_api_request("initiators",
method="post",
body=data,
conflict_ok=True,
api_version='2.1',
tenant=tenant)
# Create initiator group with initiator in it
initiator_path = "/initiators/{}".format(initiator)
initiator_group_path = "/initiator_groups/{}".format(
initiator_group)
ig_data = {'name': initiator_group,
'members': [{'path': initiator_path}]}
self._issue_api_request("initiator_groups",
method="post",
body=ig_data,
conflict_ok=True,
api_version='2.1',
tenant=tenant)
# Create ACL with initiator group as reference for each
# storage_instance in app_instance
# TODO(_alastor_): We need to avoid changing the ACLs if the
# template already specifies an ACL policy.
for si in storage_instances['data']:
acl_url = (datc.URL_TEMPLATES['si']() +
"/{}/acl_policy").format(
datc._get_name(volume['id']), si['name'])
existing_acl = self._issue_api_request(acl_url,
method="get",
api_version='2.1',
tenant=tenant)['data']
data = {}
data['initiators'] = existing_acl['initiators']
data['initiator_groups'] = existing_acl['initiator_groups']
data['initiator_groups'].append({"path": initiator_group_path})
self._issue_api_request(acl_url,
method="put",
body=data,
api_version='2.1',
tenant=tenant)
if connector and connector.get('ip'):
# Case where volume_type has non default IP Pool info
if policies['ip_pool'] != 'default':
initiator_ip_pool_path = self._issue_api_request(
"access_network_ip_pools/{}".format(
policies['ip_pool']),
api_version='2.1',
tenant=tenant)['path']
# Fallback to trying reasonable IP based guess
else:
initiator_ip_pool_path = self._get_ip_pool_for_string_ip(
connector['ip'])
ip_pool_url = datc.URL_TEMPLATES['si_inst'](
store_name).format(datc._get_name(volume['id']))
ip_pool_data = {'ip_pool': {'path': initiator_ip_pool_path}}
self._issue_api_request(ip_pool_url,
method="put",
body=ip_pool_data,
api_version='2.1',
tenant=tenant)
# Check to ensure we're ready for go-time
self._si_poll_2_1(volume, policies, tenant)
url = datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id']))
metadata = {}
# TODO(_alastor_): Figure out what we want to post with a create_export
# call
self._store_metadata(url, metadata, "create_export_2_1", tenant)
# =================
# = Detach Volume =
# =================
def _detach_volume_2_1(self, context, volume, attachment=None):
tenant = self._create_tenant(volume)
url = datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id']))
data = {
'admin_state': 'offline',
'force': True
}
try:
self._issue_api_request(url, method='put', body=data,
api_version='2.1', tenant=tenant)
except exception.NotFound:
msg = _LI("Tried to detach volume %s, but it was not found in the "
"Datera cluster. Continuing with detach.")
LOG.info(msg, volume['id'])
# TODO(_alastor_): Make acl cleaning multi-attach aware
self._clean_acl_2_1(volume, tenant)
url = datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id']))
metadata = {}
try:
self._store_metadata(url, metadata, "detach_volume_2_1", tenant)
except exception.NotFound:
# If the object isn't found, we probably are deleting/detaching
# an already deleted object
pass
def _clean_acl_2_1(self, volume, tenant):
policies = self._get_policies_for_resource(volume)
store_name, _ = self._scrape_template(policies)
acl_url = (datc.URL_TEMPLATES["si_inst"](
store_name) + "/acl_policy").format(datc._get_name(volume['id']))
try:
initiator_group = self._issue_api_request(
acl_url, api_version='2.1', tenant=tenant)['data'][
'initiator_groups'][0]['path']
initiator_iqn_path = self._issue_api_request(
initiator_group.lstrip("/"), api_version='2.1', tenant=tenant)[
"data"]["members"][0]["path"]
# Clear out ACL and delete initiator group
self._issue_api_request(acl_url,
method="put",
body={'initiator_groups': []},
api_version='2.1',
tenant=tenant)
self._issue_api_request(initiator_group.lstrip("/"),
method="delete",
api_version='2.1',
tenant=tenant)
if not self._check_for_acl_2(initiator_iqn_path):
self._issue_api_request(initiator_iqn_path.lstrip("/"),
method="delete",
api_version='2.1',
tenant=tenant)
except (IndexError, exception.NotFound):
LOG.debug("Did not find any initiator groups for volume: %s",
volume)
# ===================
# = Create Snapshot =
# ===================
def _create_snapshot_2_1(self, snapshot):
tenant = self._create_tenant(snapshot)
policies = self._get_policies_for_resource(snapshot)
store_name, vol_name = self._scrape_template(policies)
url_template = datc.URL_TEMPLATES['vol_inst'](
store_name, vol_name) + '/snapshots'
url = url_template.format(datc._get_name(snapshot['volume_id']))
snap_params = {
'uuid': snapshot['id'],
}
snap = self._issue_api_request(url, method='post', body=snap_params,
api_version='2.1', tenant=tenant)
snapu = "/".join((url, snap['data']['timestamp']))
self._snap_poll_2_1(snapu, tenant)
# ===================
# = Delete Snapshot =
# ===================
def _delete_snapshot_2_1(self, snapshot):
tenant = self._create_tenant(snapshot)
policies = self._get_policies_for_resource(snapshot)
store_name, vol_name = self._scrape_template(policies)
snap_temp = datc.URL_TEMPLATES['vol_inst'](
store_name, vol_name) + '/snapshots'
snapu = snap_temp.format(datc._get_name(snapshot['volume_id']))
snapshots = self._issue_api_request(snapu,
method='get',
api_version='2.1',
tenant=tenant)
try:
for snap in snapshots['data']:
if snap['uuid'] == snapshot['id']:
url_template = snapu + '/{}'
url = url_template.format(snap['timestamp'])
self._issue_api_request(
url,
method='delete',
api_version='2.1',
tenant=tenant)
break
else:
raise exception.NotFound
except exception.NotFound:
msg = _LI("Tried to delete snapshot %s, but was not found in "
"Datera cluster. Continuing with delete.")
LOG.info(msg, datc._get_name(snapshot['id']))
# ========================
# = Volume From Snapshot =
# ========================
def _create_volume_from_snapshot_2_1(self, volume, snapshot):
tenant = self._create_tenant(volume)
policies = self._get_policies_for_resource(snapshot)
store_name, vol_name = self._scrape_template(policies)
snap_temp = datc.URL_TEMPLATES['vol_inst'](
store_name, vol_name) + '/snapshots'
snapu = snap_temp.format(datc._get_name(snapshot['volume_id']))
snapshots = self._issue_api_request(
snapu, method='get', api_version='2.1', tenant=tenant)
for snap in snapshots['data']:
if snap['uuid'] == snapshot['id']:
found_ts = snap['utc_ts']
break
else:
raise exception.NotFound
snap_url = (snap_temp + '/{}').format(
datc._get_name(snapshot['volume_id']), found_ts)
self._snap_poll_2_1(snap_url, tenant)
src = "/" + snap_url
app_params = (
{
'create_mode': 'openstack',
'uuid': str(volume['id']),
'name': datc._get_name(volume['id']),
'clone_snapshot_src': {'path': src},
})
self._issue_api_request(
datc.URL_TEMPLATES['ai'](),
method='post',
body=app_params,
api_version='2.1',
tenant=tenant)
# ==========
# = Manage =
# ==========
def _manage_existing_2_1(self, volume, existing_ref):
# Only volumes created under the requesting tenant can be managed in
# the v2.1 API. Eg. If tenant A is the tenant for the volume to be
# managed, it must also be tenant A that makes this request.
# This will be fixed in a later API update
tenant = self._create_tenant(volume)
existing_ref = existing_ref['source-name']
if existing_ref.count(":") not in (2, 3):
raise exception.ManageExistingInvalidReference(
_("existing_ref argument must be of this format: "
"tenant:app_inst_name:storage_inst_name:vol_name or "
"app_inst_name:storage_inst_name:vol_name"))
app_inst_name = existing_ref.split(":")[0]
try:
(tenant, app_inst_name, storage_inst_name,
vol_name) = existing_ref.split(":")
except TypeError:
app_inst_name, storage_inst_name, vol_name = existing_ref.split(
":")
tenant = None
LOG.debug("Managing existing Datera volume %s "
"Changing name to %s",
datc._get_name(volume['id']), existing_ref)
data = {'name': datc._get_name(volume['id'])}
self._issue_api_request(datc.URL_TEMPLATES['ai_inst']().format(
app_inst_name), method='put', body=data, api_version='2.1',
tenant=tenant)
# ===================
# = Manage Get Size =
# ===================
def _manage_existing_get_size_2_1(self, volume, existing_ref):
tenant = self._create_tenant(volume)
existing_ref = existing_ref['source-name']
if existing_ref.count(":") != 2:
raise exception.ManageExistingInvalidReference(
_("existing_ref argument must be of this format:"
"app_inst_name:storage_inst_name:vol_name"))
app_inst_name, si_name, vol_name = existing_ref.split(":")
app_inst = self._issue_api_request(
datc.URL_TEMPLATES['ai_inst']().format(app_inst_name),
api_version='2.1', tenant=tenant)
return self._get_size_2_1(
volume, tenant, app_inst, si_name, vol_name)
def _get_size_2_1(self, volume, tenant=None, app_inst=None, si_name=None,
vol_name=None):
"""Helper method for getting the size of a backend object
If app_inst is provided, we'll just parse the dict to get
the size instead of making a separate http request
"""
policies = self._get_policies_for_resource(volume)
si_name = si_name if si_name else policies['default_storage_name']
vol_name = vol_name if vol_name else policies['default_volume_name']
if not app_inst:
vol_url = datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id']))
app_inst = self._issue_api_request(
vol_url, api_version='2.1', tenant=tenant)['data']
if 'data' in app_inst:
app_inst = app_inst['data']
sis = app_inst['storage_instances']
found_si = None
for si in sis:
if si['name'] == si_name:
found_si = si
break
found_vol = None
for vol in found_si['volumes']:
if vol['name'] == vol_name:
found_vol = vol
size = found_vol['size']
return size
# =========================
# = Get Manageable Volume =
# =========================
def _get_manageable_volumes_2_1(self, cinder_volumes, marker, limit,
offset, sort_keys, sort_dirs):
# Use the first volume to determine the tenant we're working under
if cinder_volumes:
tenant = self._create_tenant(cinder_volumes[0])
else:
tenant = None
LOG.debug("Listing manageable Datera volumes")
app_instances = self._issue_api_request(
datc.URL_TEMPLATES['ai'](), api_version='2.1',
tenant=tenant)['data']
results = []
cinder_volume_ids = [vol['id'] for vol in cinder_volumes]
for ai in app_instances:
ai_name = ai['name']
reference = None
size = None
safe_to_manage = False
reason_not_safe = ""
cinder_id = None
extra_info = None
if re.match(datc.UUID4_RE, ai_name):
cinder_id = ai_name.lstrip(datc.OS_PREFIX)
if (not cinder_id and
ai_name.lstrip(datc.OS_PREFIX) not in cinder_volume_ids):
safe_to_manage, reason_not_safe = self._is_manageable_2_1(ai)
if safe_to_manage:
si = list(ai['storage_instances'].values())[0]
si_name = si['name']
vol = list(si['volumes'].values())[0]
vol_name = vol['name']
size = vol['size']
reference = {"source-name": "{}:{}:{}".format(
ai_name, si_name, vol_name)}
results.append({
'reference': reference,
'size': size,
'safe_to_manage': safe_to_manage,
'reason_not_safe': reason_not_safe,
'cinder_id': cinder_id,
'extra_info': extra_info})
page_results = volutils.paginate_entries_list(
results, marker, limit, offset, sort_keys, sort_dirs)
return page_results
def _is_manageable_2_1(self, app_inst):
if len(app_inst['storage_instances']) == 1:
si = list(app_inst['storage_instances'].values())[0]
if len(si['volumes']) == 1:
return (True, "")
return (False,
"App Instance has more than one storage instance or volume")
# ============
# = Unmanage =
# ============
def _unmanage_2_1(self, volume):
tenant = self._create_tenant(volume)
LOG.debug("Unmanaging Cinder volume %s. Changing name to %s",
volume['id'], datc._get_unmanaged(volume['id']))
data = {'name': datc._get_unmanaged(volume['id'])}
self._issue_api_request(datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id'])),
method='put',
body=data,
api_version='2.1',
tenant=tenant)
# ================
# = Volume Stats =
# ================
# =========
# = Login =
# =========
# ===========
# = Tenancy =
# ===========
def _create_tenant(self, volume=None):
# Create the Datera tenant if specified in the config
# Otherwise use the tenant provided
if self.tenant_id is None:
tenant = None
elif self.tenant_id.lower() == "map" and volume:
# Convert dashless uuid to uuid with dashes
# Eg: 0e33e95a9b154d348c675a1d8ea5b651 -->
# 0e33e95a-9b15-4d34-8c67-5a1d8ea5b651
tenant = datc._get_name(str(uuid.UUID(volume["project_id"])))
elif self.tenant_id.lower() == "map" and not volume:
tenant = None
else:
tenant = self.tenant_id
if tenant:
params = {'name': tenant}
self._issue_api_request(
'tenants', method='post', body=params, conflict_ok=True,
api_version='2.1')
return tenant
# ============
# = Metadata =
# ============
def _get_metadata(self, obj_url, tenant):
url = "/".join((obj_url.rstrip("/"), "metadata"))
mdata = self._issue_api_request(
url, api_version="2.1", tenant=tenant).get("data")
# Make sure we only grab the relevant keys
filter_mdata = {k: json.loads(mdata[k])
for k in mdata if k in datc.M_KEYS}
return filter_mdata
def _store_metadata(self, obj_url, data, calling_func_name, tenant):
mdata = self._get_metadata(obj_url, tenant)
new_call_entry = (calling_func_name, self.HEADER_DATA['Datera-Driver'])
if mdata.get(datc.M_CALL):
mdata[datc.M_CALL].append(new_call_entry)
else:
mdata[datc.M_CALL] = [new_call_entry]
mdata.update(data)
mdata.update(self.HEADER_DATA)
data_s = {k: json.dumps(v) for k, v in data.items()}
url = "/".join((obj_url.rstrip("/"), "metadata"))
return self._issue_api_request(url, method="put", api_version="2.1",
body=data_s, tenant=tenant)
# =========
# = Login =
# =========
def _login_2_1(self):
"""Use the san_login and san_password to set token."""
body = {
'name': self.username,
'password': self.password
}
# Unset token now, otherwise potential expired token will be sent
# along to be used for authorization when trying to login.
self.datera_api_token = None
try:
LOG.debug('Getting Datera auth token.')
results = self._issue_api_request(
'login', 'put', body=body, sensitive=True, api_version='2.1',
tenant=None)
self.datera_api_token = results['key']
except exception.NotAuthorized:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Logging into the Datera cluster failed. Please '
'check your username and password set in the '
'cinder.conf and start the cinder-volume '
'service again.'))
# ===========
# = Polling =
# ===========
def _snap_poll_2_1(self, url, tenant):
eventlet.sleep(datc.DEFAULT_SNAP_SLEEP)
TIMEOUT = 10
retry = 0
poll = True
while poll and not retry >= TIMEOUT:
retry += 1
snap = self._issue_api_request(url,
api_version='2.1',
tenant=tenant)['data']
if snap['op_state'] == 'available':
poll = False
else:
eventlet.sleep(1)
if retry >= TIMEOUT:
raise exception.VolumeDriverException(
message=_('Snapshot not ready.'))
def _si_poll_2_1(self, volume, policies, tenant):
# Initial 4 second sleep required for some Datera versions
eventlet.sleep(datc.DEFAULT_SI_SLEEP)
TIMEOUT = 10
retry = 0
check_url = datc.URL_TEMPLATES['si_inst'](
policies['default_storage_name']).format(
datc._get_name(volume['id']))
poll = True
while poll and not retry >= TIMEOUT:
retry += 1
si = self._issue_api_request(check_url,
api_version='2.1',
tenant=tenant)['data']
if si['op_state'] == 'available':
poll = False
else:
eventlet.sleep(1)
if retry >= TIMEOUT:
raise exception.VolumeDriverException(
message=_('Resource not ready.'))
# ================
# = Volume Stats =
# ================
def _get_volume_stats_2_1(self, refresh=False):
if refresh or not self.cluster_stats:
try:
LOG.debug("Updating cluster stats info.")
results = self._issue_api_request(
'system', api_version='2.1')['data']
if 'uuid' not in results:
LOG.error(_LE(
'Failed to get updated stats from Datera Cluster.'))
backend_name = self.configuration.safe_get(
'volume_backend_name')
stats = {
'volume_backend_name': backend_name or 'Datera',
'vendor_name': 'Datera',
'driver_version': self.VERSION,
'storage_protocol': 'iSCSI',
'total_capacity_gb': (
int(results['total_capacity']) / units.Gi),
'free_capacity_gb': (
int(results['available_capacity']) / units.Gi),
'reserved_percentage': 0,
'QoS_support': True,
}
self.cluster_stats = stats
except exception.DateraAPIException:
LOG.error(_LE('Failed to get updated stats from Datera '
'cluster.'))
return self.cluster_stats
# =======
# = QoS =
# =======
def _update_qos_2_1(self, resource, policies, tenant):
url = datc.URL_TEMPLATES['vol_inst'](
policies['default_storage_name'],
policies['default_volume_name']) + '/performance_policy'
url = url.format(datc._get_name(resource['id']))
type_id = resource.get('volume_type_id', None)
if type_id is not None:
# Filter for just QOS policies in result. All of their keys
# should end with "max"
fpolicies = {k: int(v) for k, v in
policies.items() if k.endswith("max")}
# Filter all 0 values from being passed
fpolicies = dict(filter(lambda _v: _v[1] > 0, fpolicies.items()))
if fpolicies:
self._issue_api_request(url, 'post', body=fpolicies,
api_version='2.1', tenant=tenant)
| apache-2.0 | -6,069,262,351,423,010,000 | 38.687571 | 79 | 0.489312 | false | 4.243599 | false | false | false |
bxlab/bx-python | scripts/interval_count_intersections.py | 1 | 1083 | #!/usr/bin/env python
"""
Read two lists of intervals (with chromosomes) and count the number of entries
in the second set that intersect any entry in the first set.
TODO: This could use bitsets rather than the intervals package, would it be
faster?
usage: %prog bed1 bed2 > out
"""
import sys
from bx import intervals
from bx import misc
def main():
intersecters = {}
# Read ranges
for chr, start, end in read_intervals(misc.open_compressed(sys.argv[1])):
if chr not in intersecters:
intersecters[chr] = intervals.Intersecter()
intersecters[chr].add_interval(intervals.Interval(start, end))
# Count intersection
total = 0
for chr, start, end in read_intervals(misc.open_compressed(sys.argv[2])):
if chr in intersecters:
intersection = intersecters[chr].find(start, end)
if intersection:
total += 1
print(total)
def read_intervals(input):
for line in input:
fields = line.split()
yield fields[0], int(fields[1]), int(fields[2])
main()
| mit | -1,869,477,446,070,604,800 | 21.102041 | 78 | 0.650046 | false | 3.760417 | false | false | false |
mdeff/ntds_2017 | projects/reports/movie_network/python/costs_function.py | 1 | 2476 | import numpy as np
import pandas as pd
from tqdm import tqdm
usefull_columns = ['genres','keywords','vote_average']
Movies = pd.read_csv("../Datasets/Transformed.csv",usecols=usefull_columns)
#Constant definition
#Cost added if the first genre is similar between two films
first_genre = 5
#Cost added if the secondary genre is similar between two films
second_genre = 1
#Cost added by similar keyword identical between two films
keyword_cost = 1
def get_genres(film):
genres = str(film['genres'])
if genres == 'nan':
return[]
else:
genres = genres.split(",")
return genres
def get_keywords(film):
kw = str(film['keywords'])
if kw == 'nan':
return[]
else:
kw = kw.split(",")
return kw
"""Define the cost between the film given in index and the others one."""
costs = np.zeros([Movies.shape[0],Movies.shape[0]])
Movies = Movies.loc[Movies['vote_average'] > 0]
for i in tqdm(range(0,Movies.shape[0])):
current_film = Movies.iloc[i]
genres_current = get_genres(current_film)
kw_current = get_keywords(current_film)
vote_current = current_film['vote_average']
for j in range(i,Movies.shape[0]):
cost = 0
b_film = Movies.iloc[j]
genres_b = get_genres(b_film)
vote_b = b_film['vote_average']
#First we only select the first genre to determine the similarity because it's more important that the other genre.
if len(genres_current) > 0 & len(genres_b) > 0:
if (genres_current[0] == genres_b[0]):
cost += first_genre
#This give us the number of similar genres. We pop the first one because we already compare them.
cost += np.sum(np.in1d(genres_current,genres_b.pop(0),assume_unique='True')) * second_genre
kw_b = get_keywords(b_film)
#This give us the number of similar keywords.
cost += np.sum(np.in1d(kw_current,kw_b,assume_unique='True')) * keyword_cost
#impossible here because we ignore to much popularity
#cost = (cost * popularity_b/100) / (popularity_current/100)
if vote_current == 0:
costs[i,j] = cost
else:
costs[i,j] = cost + vote_b / vote_current
if vote_b == 0:
costs[j,i] = cost
else:
costs[j,i] = cost + vote_current / vote_b
np.savez_compressed("../Datasets/costs_2.npz", costs, costs = costs)
| mit | 1,095,156,459,287,710,000 | 32.013333 | 123 | 0.611874 | false | 3.350474 | false | false | false |
kindly/reformed | database/util.py | 1 | 22813 | import datetime
import decimal
import os
import time
import errno
import formencode as fe
import data_loader
JOINS_DEEP = 6
# root_dir holds the root directory of the application
root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
root_dir = os.path.normpath(root_dir)
class Holder(object):
def __init__(self, **kw):
self.__dict__ = kw
def __getattr__(self, value):
return
def get_dir(file = None, extra_path = None):
"""path from parent directory of this this file
file:
file to be added to end of path
extra path:
path from this directory"""
global root_dir
if not file:
if extra_path:
return os.path.join(root_dir, extra_path)
else:
return root_dir
if extra_path:
return os.path.join(os.path.join(root_dir, extra_path), file)
else:
return os.path.join(root_dir, file)
def swap_relations(relation_type):
if relation_type == "onetomany":
return "manytoone"
if relation_type == "manytoone":
return "onetomany"
return "onetoone"
def file_length(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
def check_two_entities(tables, node, rtables):
if node == "_core":
counter = 0
for table in tables[::-1]:
if rtables[table].name == "_core":
return False
if rtables[table].entity:
counter = counter + 1
if counter == 2:
return True
return False
if rtables[node].entity:
counter = 0
for table in tables[::-1]:
if rtables[table].entity:
counter = counter + 1
if rtables[table].name == "_core":
if counter > 0:
return True
else:
return False
return False
def last_entity(tables, rtables):
for table in tables[::-1]:
if rtables[table].entity:
return table
class Edge(object):
def __init__(self, node, node1 = None, node2 = None,
tables = None, path = None, path_no_rel = None,
name_changes = None, relation = None,
changed_table_names = None):
self.node = node
self.table = node
self.node1 = node1
self.node2 = node2
self.tables = tables or []
self.path = path or []
self.path_no_rel = path_no_rel or []
self.name_changes = name_changes or []
self.changed_table_names = changed_table_names or []
self.relation = relation
# make sure table path includes next table
if not relation:
return
if node == node1:
self.join = swap_relations(relation.type)
else:
self.join = relation.type
self.name = self.node
name_change = [name_change for name_change in name_changes if name_change]
self.alt_name = ".".join(name_change + [self.node])
self.changed_table_names = self.changed_table_names + [self.alt_name]
self.table_path = zip(self.changed_table_names, self.path)
def get_next_relation(gr, path_dict, edge, first = False):
node = edge.node
tables = edge.tables
current_path = edge.path
current_path_no_rel = edge.path_no_rel
last_edge = (edge.node1, edge.node2)
name_changes = edge.name_changes
changed_table_names = edge.changed_table_names
last_relation = edge.relation
if last_relation and last_relation.no_auto_path:
return
for edge in gr.out_edges(node, data = True):
node1, node2, relation = edge
relation = relation["relation"]
rtables = relation.table.database.tables
old_table = rtables[node1]
new_table = rtables[node2]
if relation == last_relation:
continue
if rtables[node1].lookup and not rtables[node2].lookup and not first:
continue
if len(tables) > 1 and rtables[node2].entity and rtables[tables[-1]].name == "_core" and rtables[tables[-2]].entity:
continue
if len(tables) > 1 and check_two_entities(tables, node2, rtables):
continue
split = None
relation_name = relation.name
all_relations = old_table.tables_with_relations[(node2, "here")]
auto_path = [rel for rel in all_relations if not rel.no_auto_path]
if len(auto_path) > 1:
split = relation_name[5:]
new_name_changes = name_changes + [split]
new_path = current_path + [relation_name]
new_path_no_rel = current_path_no_rel + [relation_name[5:]]
if len(new_path) > JOINS_DEEP:
continue
new_tables = tables + [node2]
edge = Edge(node2, node1, node2, new_tables, new_path, new_path_no_rel,
new_name_changes, relation, changed_table_names)
path_dict[tuple(new_path_no_rel)] = edge
get_next_relation(gr, path_dict, edge)
for edge in gr.in_edges(node, data = True):
node1, node2, relation = edge
relation = relation["relation"]
rtables = relation.table.database.tables
old_table = rtables[node2]
new_table = rtables[node1]
if relation == last_relation:
continue
if rtables[node2].lookup and not rtables[node1].lookup and not first:
continue
if len(tables) > 1 and rtables[node1].entity and rtables[tables[-1]].name == "_core" and rtables[tables[-2]].entity:
continue
if len(tables) > 1 and check_two_entities(tables, node1, rtables):
continue
split = None
all_relations = old_table.tables_with_relations[(node1, "other")]
auto_path = [rel for rel in all_relations if not rel.no_auto_path]
if len(auto_path) > 1:
split = relation.name[5:]
backref = relation.sa_options.get("backref", "_%s" % node1)
if not backref:
continue
new_path = current_path + [backref.encode("ascii")]
new_path_no_rel = current_path_no_rel + [backref.encode("ascii")]
new_name_changes = name_changes + [split]
if len(new_path) > JOINS_DEEP:
continue
new_tables = tables + [node1]
edge = Edge(node1, node1, node2, new_tables, new_path,
new_path_no_rel, new_name_changes, relation, changed_table_names)
path_dict[tuple(new_path_no_rel)] = edge
get_next_relation(gr, path_dict, edge)
def get_collection_of_obj(database, obj, parent_name):
table_name = obj._table.name
table = database.tables[table_name]
relation_path = table.local_tables[parent_name]
parent_obj = reduce(getattr, relation_path, obj)
parent_obj_table = parent_obj._table
relation_back_path = parent_obj_table.one_to_many_tables[table.name]
return reduce(getattr, relation_back_path, parent_obj)
def get_paths(gr, table):
path_dict = {}
edge = Edge(table, tables = [table])
get_next_relation(gr, path_dict, edge, first = True)
return path_dict
def get_local_tables(path_dict, one_to_many_tables, local_tables, edge):
new_name = edge.alt_name
if edge.relation.no_auto_path:
return
if edge.join in ("manytoone", "onetoone"):
local_tables[new_name] = edge
else:
one_to_many_tables[new_name] = edge
return
for new_path, new_edge in path_dict.iteritems():
if len(edge.path_no_rel)+1 <> len(new_path):
continue
if list(new_path)[:len(edge.path)] == list(edge.path_no_rel):
get_local_tables(path_dict, one_to_many_tables, local_tables, new_edge)
def make_local_tables(path_dict):
local_tables = {}
one_to_many_tables = {}
for path, edge in path_dict.iteritems():
if len(path) == 1:
get_local_tables(path_dict, one_to_many_tables, local_tables, edge)
return [local_tables, one_to_many_tables]
def create_table_path_list(path_dict):
table_paths_list = []
for k, v in path_dict.iteritems():
table_paths_list.append([k, v])
return table_paths_list
def create_table_path(table_path_list, table):
table_path = {}
table_path[table] = "root"
tables = set()
duplicate_tables = set()
for item in table_path_list:
key, edge = item
table_name = edge.node
relation = edge.relation
if relation.no_auto_path:
continue
if table_name in tables:
duplicate_tables.add(table_name)
tables.add(table_name)
for item in table_path_list:
key, edge = item
table_name = edge.node
relation = edge.relation
if relation.no_auto_path:
continue
elif edge.name in duplicate_tables:
table_path[edge.alt_name] = edge #[list(key), relation]
else:
table_path[edge.node] = edge #[list(key), relation]
return table_path
def get_fields_from_obj(obj):
return obj._table.columns.keys() + ["id"]
INTERNAL_FIELDS = ("_modified_by", "_modified_date", "id",
"_core_id", "_core_entity_id", "_version")
def convert_value(value):
if isinstance(value, datetime.datetime):
# use .isoformat not .strftime as this allows dates pre 1900
value = '%sZ' % value.isoformat()
if len(value) == 20:
value = '%s.000Z' % value[:19]
if isinstance(value, datetime.date):
value = '%sT00:00:00.000Z' % value.isoformat()
if isinstance(value, decimal.Decimal):
value = str(value)
return value
def get_row_data(obj, fields = None, keep_all = False,
internal = False, basic = False,
table = None, alias_name = None):
row_data = {}
obj_table = obj._table.name
if not alias_name:
alias_name = obj_table
if not table:
table = obj_table
for field in get_fields_from_obj(obj):
if fields:
if not((field in fields) or (keep_all and field in INTERNAL_FIELDS)):
continue
else:
if not keep_all and field in INTERNAL_FIELDS:
continue
if obj_table == table:
field_name = field
else:
field_name = '%s.%s' % (alias_name, field)
value = getattr(obj, field)
if internal:
row_data[field_name] = value
else:
row_data[field_name] = convert_value(value)
if fields and obj_table == table:
row_data["id"] = obj.id
if keep_all and not fields:
if obj_table == table:
id_name = "id"
else:
id_name = "%s.id" % alias_name
row_data[id_name] = obj.id
return row_data
def create_data_dict(result, **kw):
data = {}
if hasattr(result, "_table"):
data[result.id] = get_row_data(result, **kw)
return data
for row in result:
data[row.id] = get_row_data(row, **kw)
return data
def create_data_array(result, **kw):
data = []
if hasattr(result, "_table"):
out = get_row_data(result, **kw)
out['id'] = result.id
data.append(out)
return data
for row in result:
out = get_row_data(row, **kw)
out['id'] = row.id
data.append(out)
return data
def split_table_fields(field_list, table_name):
table_field_dict = {}
for table_field in field_list:
table_field_list = table_field.split(".")
if len(table_field_list) == 2:
out_table, out_field = table_field_list
table_field_dict.setdefault(out_table,[]).append(out_field)
else:
table_field_dict.setdefault(table_name,[]).append(table_field)
return table_field_dict
def get_all_local_data(obj, **kw):
internal = kw.pop("internal", False)
fields = kw.pop("fields", False)
tables = kw.pop("tables", False)
allow_system = kw.pop("allow_system", False)
keep_all = kw.pop("keep_all", False)
extra_obj = kw.pop("extra_obj", None)
extra_fields = kw.pop("extra_fields", None)
table = obj._table
if fields:
row = get_row_with_fields(obj, fields, internal = internal, keep_all = keep_all)
elif tables:
row = get_row_with_table(obj, tables, keep_all = keep_all, internal = internal)
elif allow_system:
all_local_tables = table.local_tables.keys() + [table.name]
row = get_row_with_table(obj, all_local_tables, keep_all = keep_all, internal = internal)
else:
all_local_tables = table.local_tables.keys() + [table.name]
local_tables = [table for table in all_local_tables if not table.startswith("_")]
row = get_row_with_table(obj, local_tables, keep_all = keep_all, internal = internal)
if extra_obj:
for obj in extra_obj:
table_name = obj._table
data = get_row_data(obj, fields = fields, keep_all = keep_all, internal = internal, table = table.name)
row.update(data)
if extra_fields:
row.update(extra_fields)
return row
def get_row_with_table(obj, tables, keep_all = True, internal = False):
table = obj._table
row_data = {"__table": table.name}
database = table.database
local_tables = table.local_tables
if obj._table.name in tables:
data = get_row_data(obj, keep_all = keep_all, internal = internal, table = table.name)
row_data.update(data)
for aliased_table_name, edge in table.local_tables.iteritems():
#table_name = edge.node
if aliased_table_name not in tables:
continue
current_obj = recurse_relationships(database, obj, edge)
if not current_obj:
continue
data = get_row_data(current_obj, keep_all = keep_all, internal = internal,
table = table.name, alias_name = aliased_table_name)
row_data.update(data)
return row_data
def get_row_with_fields(obj, fields, keep_all = False, internal = False):
table = obj._table
table_field_dict = split_table_fields(fields, table.name)
row_data = {"__table": table.name}
database = table.database
local_tables = table.local_tables
if table.name in table_field_dict:
fields = table_field_dict[table.name]
if fields:
data = get_row_data(obj, fields = fields, keep_all = keep_all, internal = internal, table = table.name)
row_data.update(data)
for aliased_table_name, edge in table.local_tables.iteritems():
table_name = edge.node
if table_name not in table_field_dict or table_name == table.name:
continue
if table_name in table_field_dict:
fields = table_field_dict[table_name]
if fields:
current_obj = recurse_relationships(database, obj, edge)
if not current_obj:
continue
data = get_row_data(current_obj, fields = fields, keep_all = keep_all, internal = internal, table = table.name)
row_data.update(data)
return row_data
def recurse_relationships(database, obj, edge):
current_obj = obj
for relation in edge.path:
current_obj = getattr(current_obj, relation)
if not current_obj:
#current_obj = database.get_instance(table_name)
break
return current_obj
def load_local_data(database, data):
session = database.Session()
table = data["__table"]
rtable = database.tables[table]
record = {}
for key, value in data.iteritems():
if key.startswith("__"):
continue
fields = key.split(".")
if len(fields) == 1:
alias, field = table, fields[0]
else:
alias, field = fields
if alias == table:
path_key = "root"
else:
path_key = []
for item in rtable.local_tables[alias].path_no_rel:
path_key.extend([item,0])
path_key = tuple(path_key)
if isinstance(value, basestring):
value = value.decode("utf8")
record.setdefault(path_key, {})[field] = value
try:
data_loader.SingleRecord(database, table, all_rows = record).load()
session.close()
except fe.Invalid, e:
error_dict = {}
invalid_msg = ""
for key, invalid in e.error_dict.iteritems():
new_key = key[:-1:2]
field_name = key[-1]
if key[0] == table:
new_table = table
else:
edge = rtable.paths[new_key]
new_table = edge.name
error_dict["%s.%s" % (new_table,
field_name)] = invalid
invalid_msg = invalid_msg + "\n" + "\n".join(["%s\n" % inv.msg for inv in invalid])
if error_dict:
raise fe.Invalid(invalid_msg, data, record, None, error_dict)
session.close()
def get_table_from_instance(instance, database):
return database.tables[instance.__class__.__name__]
## {{{ http://code.activestate.com/recipes/576669/ (r18)
## Raymond Hettingers proporsal to go in 2.7
from collections import MutableMapping
class OrderedDict(dict, MutableMapping):
# Methods with direct access to underlying attributes
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at 1 argument, got %d', len(args))
if not hasattr(self, '_keys'):
self._keys = []
self.update(*args, **kwds)
def clear(self):
del self._keys[:]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
self._keys.append(key)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
self._keys.remove(key)
def __iter__(self):
return iter(self._keys)
def __reversed__(self):
return reversed(self._keys)
def popitem(self):
if not self:
raise KeyError
key = self._keys.pop()
value = dict.pop(self, key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
inst_dict.pop('_keys', None)
return (self.__class__, (items,), inst_dict)
# Methods with indirect access via the above methods
setdefault = MutableMapping.setdefault
update = MutableMapping.update
pop = MutableMapping.pop
keys = MutableMapping.keys
values = MutableMapping.values
items = MutableMapping.items
def __repr__(self):
pairs = ', '.join(map('%r: %r'.__mod__, self.items()))
return '%s({%s})' % (self.__class__.__name__, pairs)
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
## end of http://code.activestate.com/recipes/576669/ }}}
## From Evan Fosmark found on http://www.evanfosmark.com/2009/01/cross-platform-file-locking-support-in-python/
class FileLockException(Exception):
pass
class FileLock(object):
""" A file locking mechanism that has context-manager support so
you can use it in a with statement. This should be relatively cross
compatible as it doesn't rely on msvcrt or fcntl for the locking.
"""
def __init__(self, file_name, timeout=10, delay=.05):
""" Prepare the file locker. Specify the file to lock and optionally
the maximum timeout and the delay between each attempt to lock.
"""
self.is_locked = False
self.lockfile = file_name + ".lock"
self.file_name = file_name
self.timeout = timeout
self.delay = delay
def acquire(self):
""" Acquire the lock, if possible. If the lock is in use, it check again
every `wait` seconds. It does this until it either gets the lock or
exceeds `timeout` number of seconds, in which case it throws
an exception.
"""
start_time = time.time()
while True:
try:
self.fd = os.open(self.lockfile, os.O_CREAT|os.O_EXCL|os.O_RDWR)
break;
except OSError as e:
if e.errno != errno.EEXIST:
raise
if (time.time() - start_time) >= self.timeout:
raise FileLockException("Timeout occured.")
time.sleep(self.delay)
self.is_locked = True
def release(self):
""" Get rid of the lock by deleting the lockfile.
When working in a `with` statement, this gets automatically
called at the end.
"""
if self.is_locked:
os.close(self.fd)
os.unlink(self.lockfile)
self.is_locked = False
def __enter__(self):
""" Activated when used in the with statement.
Should automatically acquire a lock to be used in the with block.
"""
if not self.is_locked:
self.acquire()
return self
def __exit__(self, type, value, traceback):
""" Activated at the end of the with statement.
It automatically releases the lock if it isn't locked.
"""
if self.is_locked:
self.release()
def __del__(self):
""" Make sure that the FileLock instance doesn't leave a lockfile
lying around.
"""
self.release()
###
class SchemaLock(FileLock):
def __init__(self, database, timeout=10, delay=.05):
self.database = database
self.schema_path = database.get_file_path()
self.uuid_path = database.get_file_path(True)
self.is_locked = False
self.lockfile = self.schema_path + ".lock"
self.timeout = timeout
self.delay = delay
self.export_uuid = False
def export(self, uuid = False):
if uuid:
self.database.code_repr_export(self.uuid_path)
self.export_uuid = True
else:
self.database.code_repr_export(self.schema_path)
def __enter__(self):
if not self.is_locked:
self.acquire()
return self
def __exit__(self, type, value, traceback):
""" Activated at the end of the with statement.
It automatically releases the lock if it isn't locked.
"""
if self.is_locked:
self.release()
if not traceback and self.export_uuid:
os.rename(self.uuid_path, self.uuid_path[:-3] + "-complete.py")
| gpl-2.0 | -1,778,376,202,833,671,700 | 27.950508 | 124 | 0.577828 | false | 3.762038 | false | false | false |
google-research/recsim_ng | recsim_ng/applications/recsys_partially_observable_rl/interest_evolution_simulation_demo.py | 1 | 1402 | # coding=utf-8
# Copyright 2021 The RecSim Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Train a recommender with the interest_evolution_simulation."""
from absl import app
from recsim_ng.applications.recsys_partially_observable_rl import interest_evolution_simulation
from recsim_ng.applications.recsys_partially_observable_rl import simulation_config
def main(argv):
del argv
num_users = 1000
variables, trainable_variables = (
simulation_config.create_interest_evolution_simulation_network(
num_users=num_users))
interest_evolution_simulation.run_simulation(
num_training_steps=100,
horizon=100,
global_batch=num_users,
learning_rate=1e-4,
simulation_variables=variables,
trainable_variables=trainable_variables,
metric_to_optimize='cumulative_reward')
if __name__ == '__main__':
app.run(main)
| apache-2.0 | 5,941,458,815,071,267,000 | 33.195122 | 95 | 0.742511 | false | 3.809783 | false | false | false |
specify/specify7 | specifyweb/specify/api.py | 1 | 30535 | from urllib.parse import urlencode
import json
import re
import logging
logger = logging.getLogger(__name__)
from django import forms
from django.db import transaction
from django.http import (HttpResponse, HttpResponseBadRequest,
Http404, HttpResponseNotAllowed, QueryDict)
from django.core.exceptions import ObjectDoesNotExist, FieldError
from django.db.models.fields.related import ForeignKey
from django.db.models.fields import DateTimeField, FieldDoesNotExist, FloatField, DecimalField
# from django.utils.deprecation import CallableBool
from . import models
from .autonumbering import autonumber_and_save, AutonumberOverflowException
from .filter_by_col import filter_by_collection
from .auditlog import auditlog
# Regex matching api uris for extracting the model name and id number.
URI_RE = re.compile(r'^/api/specify/(\w+)/($|(\d+))')
class JsonEncoder(json.JSONEncoder):
"""Augmented JSON encoder that handles datetime and decimal objects."""
def default(self, obj):
from decimal import Decimal
# if isinstance(obj, CallableBool):
# return obj()
if isinstance(obj, Decimal):
return str(obj)
if hasattr(obj, 'isoformat'):
return obj.isoformat()
if isinstance(obj, bytes):
# assume byte data is utf encoded text.
# this works for things like app resources.
return obj.decode()
return json.JSONEncoder.default(self, obj)
def toJson(obj):
return json.dumps(obj, cls=JsonEncoder)
class RecordSetException(Exception):
"""Raised for problems related to record sets."""
pass
class OptimisticLockException(Exception):
"""Raised when there is a problem related to optimistic locking."""
pass
class MissingVersionException(OptimisticLockException):
"""Raised when an object is expected to have an optimistic locking
version number, but none can be determined from the request.
"""
pass
class StaleObjectException(OptimisticLockException):
"""Raised when attempting to mutate a resource with a newer
version than the client has supplied.
"""
pass
class FilterError(Exception):
"""Raised when filter a resource collection using a bad value."""
pass
class OrderByError(Exception):
"""Raised for bad fields in order by clause."""
pass
class HttpResponseCreated(HttpResponse):
"""Returned to the client when a POST request succeeds and a new
resource is created.
"""
status_code = 201
def resource_dispatch(request, model, id):
"""Handles requests related to individual resources.
Determines the client's version of the resource.
Determines the logged-in user and collection from the request.
Dispatches on the request type.
De/Encodes structured data as JSON.
"""
request_params = QueryDict(request.META['QUERY_STRING'])
# Get the version the client has, if it is given
# in URL query string or in the HTTP if-match header.
try:
version = request_params['version']
except KeyError:
try:
version = request.META['HTTP_IF_MATCH']
except KeyError:
version = None
# Dispatch on the request type.
if request.method == 'GET':
data = get_resource(model, id, request.GET.get('recordsetid', None))
resp = HttpResponse(toJson(data), content_type='application/json')
elif request.method == 'PUT':
data = json.load(request)
# Look for a version field in the resource data itself.
try:
version = data['version']
except KeyError:
pass
obj = put_resource(request.specify_collection,
request.specify_user_agent,
model, id, version, data)
resp = HttpResponse(toJson(obj_to_data(obj)),
content_type='application/json')
elif request.method == 'DELETE':
delete_resource(request.specify_user_agent, model, id, version)
resp = HttpResponse('', status=204)
else:
# Unhandled request type.
resp = HttpResponseNotAllowed(['GET', 'PUT', 'DELETE'])
return resp
class GetCollectionForm(forms.Form):
# Use the logged_in_collection to limit request
# to relevant items.
domainfilter = forms.ChoiceField(choices=(('true', 'true'), ('false', 'false')),
required=False)
# Return at most 'limit' items.
# Zero for all.
limit = forms.IntegerField(required=False)
# Return items starting from 'offset'.
offset = forms.IntegerField(required=False)
orderby = forms.CharField(required=False)
defaults = dict(
domainfilter=None,
limit=0,
offset=0,
orderby=None,
)
def clean_limit(self):
limit = self.cleaned_data['limit']
return 20 if limit is None else limit
def clean_offset(self):
offset = self.cleaned_data['offset']
return 0 if offset is None else offset
def collection_dispatch(request, model):
"""Handles requests related to collections of resources.
Dispatches on the request type.
Determines the logged-in user and collection from the request.
De/Encodes structured data as JSON.
"""
if request.method == 'GET':
control_params = GetCollectionForm(request.GET)
if not control_params.is_valid():
return HttpResponseBadRequest(toJson(control_params.errors),
content_type='application/json')
try:
data = get_collection(request.specify_collection, model,
control_params.cleaned_data, request.GET)
except (FilterError, OrderByError) as e:
return HttpResponseBadRequest(e)
resp = HttpResponse(toJson(data), content_type='application/json')
elif request.method == 'POST':
obj = post_resource(request.specify_collection,
request.specify_user_agent,
model, json.load(request),
request.GET.get('recordsetid', None))
resp = HttpResponseCreated(toJson(obj_to_data(obj)),
content_type='application/json')
else:
# Unhandled request type.
resp = HttpResponseNotAllowed(['GET', 'POST'])
return resp
def get_model_or_404(name):
"""Lookup a specify model by name. Raise Http404 if not found."""
try:
return getattr(models, name.capitalize())
except AttributeError as e:
raise Http404(e)
def get_object_or_404(model, *args, **kwargs):
"""A version of get_object_or_404 that can accept a model name
in place of the model class."""
from django.shortcuts import get_object_or_404 as get_object
if isinstance(model, str):
model = get_model_or_404(model)
return get_object(model, *args, **kwargs)
def get_resource(name, id, recordsetid=None):
"""Return a dict of the fields from row 'id' in model 'name'.
If given a recordset id, the data will be suplemented with
data about the resource's relationship to the given record set.
"""
obj = get_object_or_404(name, id=int(id))
data = obj_to_data(obj)
if recordsetid is not None:
data['recordset_info'] = get_recordset_info(obj, recordsetid)
return data
def get_recordset_info(obj, recordsetid):
"""Return a dict of info about how the resource 'obj' is related to
the recordset with id 'recordsetid'.
"""
# Queryset of record set items in the given record set with
# the additional condition that they match the resource's table.
rsis = models.Recordsetitem.objects.filter(
recordset__id=recordsetid, recordset__dbtableid=obj.specify_model.tableId)
# Get the one which points to the resource 'obj'.
try:
rsi = rsis.get(recordid=obj.id)
except models.Recordsetitem.DoesNotExist:
return None
# Querysets for the recordset items before and after the one in question.
prev_rsis = rsis.filter(recordid__lt=obj.id).order_by('-recordid')
next_rsis = rsis.filter(recordid__gt=obj.id).order_by('recordid')
# Build URIs for the previous and the next recordsetitem, if present.
try:
prev = uri_for_model(obj.__class__, prev_rsis[0].recordid)
except IndexError:
prev = None
try:
next = uri_for_model(obj.__class__, next_rsis[0].recordid)
except IndexError:
next = None
return {
'recordsetid': rsi.recordset_id,
'total_count': rsis.count(),
'index': prev_rsis.count(),
'previous': prev,
'next': next
}
@transaction.atomic
def post_resource(collection, agent, name, data, recordsetid=None):
"""Create a new resource in the database.
collection - the collection the client is logged into.
agent - the agent associated with the specify user logged in.
name - the model name of the resource to be created.
data - a dict of the data for the resource to be created.
recordsetid - created resource will be added to the given recordset (optional)
"""
obj = create_obj(collection, agent, name, data)
if recordsetid is not None:
# add the resource to the record set
try:
recordset = models.Recordset.objects.get(id=recordsetid)
except models.Recordset.DoesNotExist as e:
raise RecordSetException(e)
if recordset.dbtableid != obj.specify_model.tableId:
# the resource is not of the right kind to go in the recordset
raise RecordSetException(
"expected %s, got %s when adding object to recordset",
(models.models_by_tableid[recordset.dbtableid], obj.__class__))
recordset.recordsetitems.create(recordid=obj.id)
return obj
def set_field_if_exists(obj, field, value):
"""Where 'obj' is a Django model instance, a resource object, check
if a field named 'field' exists and set it to 'value' if so. Do nothing otherwise.
"""
try:
f = obj._meta.get_field(field)
except FieldDoesNotExist:
return
if f.concrete:
setattr(obj, field, value)
def cleanData(model, data, agent):
"""Returns a copy of data with only fields that are part of model, removing
metadata fields and warning on unexpected extra fields."""
cleaned = {}
for field_name in list(data.keys()):
if field_name in ('resource_uri', 'recordset_info'):
# These fields are meta data, not part of the resource.
continue
try:
model._meta.get_field(field_name)
except FieldDoesNotExist:
logger.warn('field "%s" does not exist in %s', field_name, model)
else:
cleaned[field_name] = data[field_name]
if model is models.Agent and not agent.specifyuser.is_admin():
# only admins can set the user field on agents
try:
del cleaned['specifyuser']
except KeyError:
pass
# guid should only be updatable for taxon and geography
if model not in (models.Taxon, models.Geography):
try:
del cleaned['guid']
except KeyError:
pass
# timestampcreated should never be updated.
try:
del cleaned['timestampcreated']
except KeyError:
pass
return cleaned
def create_obj(collection, agent, model, data, parent_obj=None):
"""Create a new instance of 'model' and populate it with 'data'."""
logger.debug("creating %s with data: %s", model, data)
if isinstance(model, str):
model = get_model_or_404(model)
data = cleanData(model, data, agent)
obj = model()
handle_fk_fields(collection, agent, obj, data)
set_fields_from_data(obj, data, False)
set_field_if_exists(obj, 'createdbyagent', agent)
set_field_if_exists(obj, 'collectionmemberid', collection.id)
try:
autonumber_and_save(collection, agent.specifyuser, obj)
except AutonumberOverflowException as e:
logger.warn("autonumbering overflow: %s", e)
if obj.id is not None: # was the object actually saved?
auditlog.insert(obj, agent, parent_obj)
handle_to_many(collection, agent, obj, data)
return obj
def should_audit(field):
if field.name == 'timestampmodified':
return False
else:
return True
def fld_change_info(obj, field, val):
if should_audit(field):
value = prepare_value(field, val)
if isinstance(field, FloatField) or isinstance(field, DecimalField):
value = value and float(value)
old_value = getattr(obj, field.name)
if str(old_value) != str(value):
return {'field_name': field.name, 'old_value': old_value, 'new_value': value}
else:
return None
def set_fields_from_data(obj, data, audit):
"""Where 'obj' is a Django model instance and 'data' is a dict,
set all fields provided by data that are not related object fields.
"""
dirty_flds = []
for field_name, val in list(data.items()):
field = obj._meta.get_field(field_name)
if not field.is_relation:
if audit:
fld_change = fld_change_info(obj, field, val)
if fld_change is not None:
dirty_flds.append(fld_change)
setattr(obj, field_name, prepare_value(field, val))
return dirty_flds
def is_dependent_field(obj, field_name):
return (
obj.specify_model.get_field(field_name).dependent
or (obj.__class__ is models.Collectionobject and
field_name == 'collectingevent' and
obj.collection.isembeddedcollectingevent)
or (field_name == 'paleocontext' and (
(obj.__class__ is models.Collectionobject and
obj.collection.discipline.paleocontextchildtable == "collectionobject" and
obj.collection.discipline.ispaleocontextembedded)
or (obj.__class__ is models.Collectingevent and
obj.discipline.paleocontextchildtable == "collectingevent" and
obj.discipline.ispaleocontextembedded)
or (obj.__class__ is models.Locality and
obj.discipline.paleocontextchildtable == "locality" and
obj.discipline.ispaleocontextembedded))))
def get_related_or_none(obj, field_name):
try:
return getattr(obj, field_name)
except ObjectDoesNotExist:
return None
def reorder_fields_for_embedding(cls, data):
"""For objects which can have embedded collectingevent or
paleocontext, we have to make sure the domain field gets set
first so that is_dependent_field will work.
"""
put_first = {
models.Collectionobject: 'collection',
models.Collectingevent: 'discipline',
models.Locality: 'discipline',
}.get(cls, None)
if put_first in data:
yield (put_first, data[put_first])
for key in data.keys() - {put_first}:
yield (key, data[key])
def handle_fk_fields(collection, agent, obj, data, checkchanges = False):
"""Where 'obj' is a Django model instance and 'data' is a dict,
set foreign key fields in the object from the provided data.
"""
items = reorder_fields_for_embedding(obj.__class__, data)
dependents_to_delete = []
dirty = []
for field_name, val in items:
field = obj._meta.get_field(field_name)
if not field.many_to_one: continue
old_related = get_related_or_none(obj, field_name)
dependent = is_dependent_field(obj, field_name)
old_related_id = None if old_related is None else old_related.id
new_related_id = None
if val is None:
setattr(obj, field_name, None)
if dependent and old_related:
dependents_to_delete.append(old_related)
elif isinstance(val, field.related_model):
# The related value was patched into the data by a parent object.
setattr(obj, field_name, val)
new_related_id = val.id
elif isinstance(val, str):
# The related object is given by a URI reference.
assert not dependent, "didn't get inline data for dependent field %s in %s: %r" % (field_name, obj, val)
fk_model, fk_id = parse_uri(val)
assert fk_model == field.related_model.__name__.lower()
assert fk_id is not None
setattr(obj, field_name, get_object_or_404(fk_model, id=fk_id))
new_related_id = fk_id
elif hasattr(val, 'items'): # i.e. it's a dict of some sort
# The related object is represented by a nested dict of data.
assert dependent, "got inline data for non dependent field %s in %s: %r" % (field_name, obj, val)
rel_model = field.related_model
if 'id' in val:
# The related object is an existing resource with an id.
# This should never happen.
rel_obj = update_obj(collection, agent,
rel_model, val['id'],
val['version'], val,
parent_obj=obj)
else:
# The related object is to be created.
rel_obj = create_obj(collection, agent,
rel_model, val,
parent_obj=obj)
setattr(obj, field_name, rel_obj)
if dependent and old_related and old_related.id != rel_obj.id:
dependents_to_delete.append(old_related)
new_related_id = rel_obj.id
data[field_name] = obj_to_data(rel_obj)
else:
raise Exception('bad foreign key field in data')
if checkchanges and str(old_related_id) != str(new_related_id):
dirty.append({'field_name': field_name, 'old_value': old_related_id, 'new_value': new_related_id})
return dependents_to_delete, dirty
def handle_to_many(collection, agent, obj, data):
"""For every key in the dict 'data' which is a *-to-many field in the
Django model instance 'obj', if nested data is provided, use it to
update the set of related objects.
The assumption is that provided data represents ALL related objects for
'obj'. Any existing related objects not in the nested data will be deleted.
Nested data items with ids will be updated. Those without ids will be
created as new resources.
"""
for field_name, val in list(data.items()):
field = obj._meta.get_field(field_name)
if not field.is_relation or (field.many_to_one or field.one_to_one): continue # Skip *-to-one fields.
if isinstance(val, list):
assert obj.specify_model.get_field(field_name).dependent, \
"got inline data for non dependent field %s in %s: %r" % (field_name, obj, val)
else:
# The field contains something other than nested data.
# Probably the URI of the collection of objects.
assert not obj.specify_model.get_field(field_name).dependent, \
"didn't get inline data for dependent field %s in %s: %r" % (field_name, obj, val)
continue
rel_model = field.related_model
ids = [] # Ids not in this list will be deleted at the end.
for rel_data in val:
rel_data[field.field.name] = obj
if 'id' in rel_data:
# Update an existing related object.
rel_obj = update_obj(collection, agent,
rel_model, rel_data['id'],
rel_data['version'], rel_data,
parent_obj=obj)
else:
# Create a new related object.
rel_obj = create_obj(collection, agent, rel_model, rel_data, parent_obj=obj)
ids.append(rel_obj.id) # Record the id as one to keep.
# Delete related objects not in the ids list.
# TODO: Check versions for optimistic locking.
to_delete = getattr(obj, field_name).exclude(id__in=ids)
for rel_obj in to_delete:
auditlog.remove(rel_obj, agent, obj)
to_delete.delete()
@transaction.atomic
def delete_resource(agent, name, id, version):
"""Delete the resource with 'id' and model named 'name' with optimistic
locking 'version'.
"""
obj = get_object_or_404(name, id=int(id))
return delete_obj(agent, obj, version)
def delete_obj(agent, obj, version=None, parent_obj=None):
# need to delete dependent -to-one records
# e.g. delete CollectionObjectAttribute when CollectionObject is deleted
# but have to delete the referring record first
dependents_to_delete = [_f for _f in (
get_related_or_none(obj, field.name)
for field in obj._meta.get_fields()
if (field.many_to_one or field.one_to_one) and is_dependent_field(obj, field.name)
) if _f]
auditlog.remove(obj, agent, parent_obj)
if version is not None:
bump_version(obj, version)
obj.delete()
for dep in dependents_to_delete:
delete_obj(agent, dep, parent_obj=obj)
@transaction.atomic
def put_resource(collection, agent, name, id, version, data):
return update_obj(collection, agent, name, id, version, data)
def update_obj(collection, agent, name, id, version, data, parent_obj=None):
"""Update the resource with 'id' in model named 'name' with given
'data'.
"""
obj = get_object_or_404(name, id=int(id))
data = cleanData(obj.__class__, data, agent)
fk_info = handle_fk_fields(collection, agent, obj, data, auditlog.isAuditingFlds())
dependents_to_delete = fk_info[0]
dirty = fk_info[1] + set_fields_from_data(obj, data, auditlog.isAuditingFlds())
try:
obj._meta.get_field('modifiedbyagent')
except FieldDoesNotExist:
pass
else:
obj.modifiedbyagent = agent
bump_version(obj, version)
obj.save(force_update=True)
auditlog.update(obj, agent, parent_obj, dirty)
for dep in dependents_to_delete:
delete_obj(agent, dep, parent_obj=obj)
handle_to_many(collection, agent, obj, data)
return obj
def bump_version(obj, version):
"""Implements the optimistic locking mechanism.
If the Django model resource 'obj' has a version field and it
does not match 'version' which comes from the client, an
OptimisticLockingException is raised. Otherwise the version
is incremented.
"""
# If the object has no version field, there's nothing to do.
try:
obj._meta.get_field('version')
except FieldDoesNotExist:
return
try:
version = int(version)
except (ValueError, TypeError):
raise MissingVersionException("%s object cannot be updated without version info" % obj.__class__.__name__)
# Try to update a row with the PK and the version number we have.
# If our version is stale, the rows updated will be 0.
logger.info("Incrementing version of %s object %d from %d.", obj.__class__.__name__, obj.id, version)
manager = obj.__class__._base_manager
updated = manager.filter(pk=obj.pk, version=version).update(version=version+1)
if not updated:
raise StaleObjectException("%s object %d is out of date" % (obj.__class__.__name__, obj.id))
obj.version = version + 1
def prepare_value(field, val):
if isinstance(field, DateTimeField) and isinstance(val, str):
return val.replace('T', ' ')
return val
def parse_uri(uri):
"""Return the model name and id from a resource or collection URI."""
match = URI_RE.match(uri)
if match is not None:
groups = match.groups()
return (groups[0], groups[2])
def obj_to_data(obj):
"""Return a (potentially nested) dictionary of the fields of the
Django model instance 'obj'.
"""
# Get regular and *-to-one fields.
fields = obj._meta.get_fields()
if isinstance(obj, models.Specifyuser):
# block out password field from users table
fields = [f for f in fields if f.name != 'password']
data = dict((field.name, field_to_val(obj, field))
for field in fields
if not (field.auto_created or field.one_to_many or field.many_to_many))
# Get *-to-many fields.
data.update(dict((ro.get_accessor_name(), to_many_to_data(obj, ro))
for ro in obj._meta.get_fields()
if ro.one_to_many))
# Add a meta data field with the resource's URI.
data['resource_uri'] = uri_for_model(obj.__class__.__name__.lower(), obj.id)
# Special cases
if isinstance(obj, models.Preparation):
data['isonloan'] = obj.isonloan()
elif isinstance(obj, models.Specifyuser):
data['isadmin'] = obj.is_admin()
elif isinstance(obj, models.Collectionobject):
dets = data['determinations']
currDets = [det['resource_uri'] for det in dets if det['iscurrent']] if dets is not None else []
data['currentdetermination'] = currDets[0] if len(currDets) > 0 else None;
elif isinstance(obj, models.Loan):
preps = data['loanpreparations']
items = 0
quantities = 0
unresolvedItems = 0
unresolvedQuantities = 0
for prep in preps:
items = items + 1;
prep_quantity = prep['quantity'] if prep['quantity'] is not None else 0
prep_quantityresolved = prep['quantityresolved'] if prep['quantityresolved'] is not None else 0
quantities = quantities + prep_quantity
if not prep['isresolved']:
unresolvedItems = unresolvedItems + 1;
unresolvedQuantities = unresolvedQuantities + (prep_quantity - prep_quantityresolved)
data['totalPreps'] = items
data['totalItems'] = quantities
data['unresolvedPreps'] = unresolvedItems
data['unresolvedItems'] = unresolvedQuantities
data['resolvedPreps'] = items - unresolvedItems
data['resolvedItems'] = quantities - unresolvedQuantities
return data
def to_many_to_data(obj, rel):
"""Return the URI or nested data of the 'rel' collection
depending on if the field is included in the 'inlined_fields' global.
"""
parent_model = rel.model.specify_model
field_name = rel.get_accessor_name()
field = parent_model.get_field(field_name)
if field is not None and field.dependent:
objs = getattr(obj, field_name)
return [obj_to_data(o) for o in objs.all()]
collection_uri = uri_for_model(rel.related_model)
return collection_uri + '?' + urlencode([(rel.field.name.lower(), str(obj.id))])
def field_to_val(obj, field):
"""Return the value or nested data or URI for the given field which should
be either a regular field or a *-to-one field.
"""
if field.many_to_one or (field.one_to_one and not field.auto_created):
if is_dependent_field(obj, field.name):
related_obj = getattr(obj, field.name)
if related_obj is None: return None
return obj_to_data(related_obj)
related_id = getattr(obj, field.name + '_id')
if related_id is None: return None
return uri_for_model(field.related_model, related_id)
else:
return getattr(obj, field.name)
def get_collection(logged_in_collection, model, control_params=GetCollectionForm.defaults, params={}):
"""Return a list of structured data for the objects from 'model'
subject to the request 'params'."""
if isinstance(model, str):
model = get_model_or_404(model)
filters = {}
for param, val in list(params.items()):
if param in control_params:
# filter out control parameters
continue
if param.endswith('__in'):
# this is a bit kludgy
val = val.split(',')
filters.update({param: val})
try:
objs = model.objects.filter(**filters)
except (ValueError, FieldError) as e:
raise FilterError(e)
if control_params['domainfilter'] == 'true':
objs = filter_by_collection(objs, logged_in_collection)
if control_params['orderby']:
try:
objs = objs.order_by(control_params['orderby'])
except FieldError as e:
raise OrderByError(e)
try:
return objs_to_data(objs, control_params['offset'], control_params['limit'])
except FieldError as e:
raise OrderByError(e)
def objs_to_data(objs, offset=0, limit=20):
"""Return a collection structure with a list of the data of given objects
and collection meta data.
"""
offset, limit = int(offset), int(limit)
total_count = objs.count()
if limit == 0:
objs = objs[offset:]
else:
objs = objs[offset:offset + limit]
return {'objects': [obj_to_data(o) for o in objs],
'meta': {'limit': limit,
'offset': offset,
'total_count': total_count}}
def uri_for_model(model, id=None):
"""Given a Django model and optionally an id, return a URI
for the collection or resource (if an id is given).
"""
if not isinstance(model, str):
model = model.__name__
uri = '/api/specify/%s/' % model.lower()
if id is not None:
uri += '%d/' % int(id)
return uri
class RowsForm(forms.Form):
fields = forms.CharField(required=True)
distinct = forms.CharField(required=False)
limit = forms.IntegerField(required=False)
def rows(request, model_name):
form = RowsForm(request.GET)
if not form.is_valid():
return HttpResponseBadRequest(toJson(form.errors), content_type='application/json')
try:
model = getattr(models, model_name.capitalize())
except AttributeError as e:
raise Http404(e)
query = model.objects.all()
fields = form.cleaned_data['fields'].split(',')
try:
query = query.values_list(*fields).order_by(*fields)
except FieldError as e:
return HttpResponseBadRequest(e)
query = filter_by_collection(query, request.specify_collection)
if form.cleaned_data['distinct']:
query = query.distinct()
limit = form.cleaned_data['limit']
if limit:
query = query[:limit]
data = list(query)
return HttpResponse(toJson(data), content_type='application/json')
| gpl-2.0 | -844,554,643,283,933,000 | 37.073566 | 116 | 0.628132 | false | 3.973842 | false | false | false |
danielkitta/libsigrokdecode | decoders/adns5020/pd.py | 4 | 3372 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2015 Karl Palsson <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
##
import sigrokdecode as srd
regs = {
0: 'Product_ID',
1: 'Revision_ID',
2: 'Motion',
3: 'Delta_X',
4: 'Delta_Y',
5: 'SQUAL',
6: 'Shutter_Upper',
7: 'Shutter_Lower',
8: 'Maximum_Pixel',
9: 'Pixel_Sum',
0xa: 'Minimum_Pixel',
0xb: 'Pixel_Grab',
0xd: 'Mouse_Control',
0x3a: 'Chip_Reset',
0x3f: 'Inv_Rev_ID',
0x63: 'Motion_Burst',
}
class Decoder(srd.Decoder):
api_version = 2
id = 'adns5020'
name = 'ADNS-5020'
longname = 'Avago ADNS-5020 optical mouse sensor'
desc = 'Bidirectional command and data over an SPI-like protocol.'
license = 'gplv2'
inputs = ['spi']
outputs = ['adns5020']
annotations = (
('read', 'Register read commands'),
('write', 'Register write commands'),
('warning', 'Warnings'),
)
annotation_rows = (
('read', 'Read', (0,)),
('write', 'Write', (1,)),
('warnings', 'Warnings', (2,)),
)
def __init__(self, **kwargs):
self.ss_cmd, self.es_cmd = 0, 0
self.mosi_bytes = []
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
def putx(self, data):
self.put(self.ss_cmd, self.es_cmd, self.out_ann, data)
def put_warn(self, pos, msg):
self.put(pos[0], pos[1], self.out_ann, [2, [msg]])
def decode(self, ss, es, data):
ptype = data[0]
if ptype == 'CS-CHANGE':
# If we transition high mid-stream, toss out our data and restart.
cs_old, cs_new = data[1:]
if cs_old is not None and cs_old == 0 and cs_new == 1:
if len(self.mosi_bytes) not in [0, 2]:
self.put_warn([self.ss_cmd, es], 'Misplaced CS#!')
self.mosi_bytes = []
return
# Don't care about anything else.
if ptype != 'DATA':
return
mosi, miso = data[1:]
self.ss, self.es = ss, es
if len(self.mosi_bytes) == 0:
self.ss_cmd = ss
self.mosi_bytes.append(mosi)
# Writes/reads are mostly two transfers (burst mode is different).
if len(self.mosi_bytes) != 2:
return
self.es_cmd = es
cmd, arg = self.mosi_bytes
write = cmd & 0x80
reg = cmd & 0x7f
reg_desc = regs.get(reg, 'Reserved %#x' % reg)
if reg > 0x63:
reg_desc = 'Unknown'
if write:
self.putx([1, ['%s: %#x' % (reg_desc, arg)]])
else:
self.putx([0, ['%s: %d' % (reg_desc, arg)]])
self.mosi_bytes = []
| gpl-3.0 | 6,443,116,752,135,753,000 | 28.840708 | 78 | 0.571471 | false | 3.205323 | false | false | false |
EyuelAbebe/pygame_pluralsight | Game/Breakout.py | 1 | 1711 | __author__ = 'eyuelabebe'
import pygame
from Game import *
from Game.Scenes import *
from Game.Shared import GameConstants
class Breakout(object):
def __init__(self):
self.__lives = 5
self.__score = 0
self.__level = Level(self)
self.__level.load(0)
self.__pad = Pad((0, 0), 0)
self.__balls = [
Ball((0, 0), 0, self)
]
pygame.init()
pygame.mixer.init()
pygame.display.set_caption("Game Programming with Python and Pygame")
self.__clock = pygame.time.Clock()
self.screen = pygame.display.set_mode(GameConstants.SCREEN_SIZE, pygame.DOUBLEBUF, 32)
pygame.mouse.set_visible(0)
self.__scenes = (
PlayingGameScene(self),
MenuScene(self),
HighScoreScene(self),
GameOverScene(self)
)
self.__currentScene = 0
self.__sounds = ()
def start(self):
while 1:
self.__clock.tick(60)
self.screen.fill((0, 0, 0))
currentScene = self.__scenes[self.__currentScene]
currentScene.handleEvents(pygame.event.get())
currentScene.render()
pygame.display.update()
def changeScene(self, scene):
pass
def getLevel(self):
pass
def getScore(self):
pass
def increaseScore(self, score):
pass
def getLives(self):
pass
def getBalls(self):
pass
def getPad(self):
pass
def playSound(self, soundClip):
pass
def reduceLives(self):
pass
def increaseLives(self):
pass
def reset(self):
pass
Breakout().start() | mit | 1,009,826,851,908,065,300 | 18.235955 | 94 | 0.542957 | false | 3.933333 | false | false | false |
pycassa/pycassa | tests/test_pool_logger.py | 1 | 4992 | from unittest import TestCase
from nose.tools import assert_equal, assert_raises
from pycassa.logging.pool_stats_logger import StatsLogger
from pycassa.pool import ConnectionPool, NoConnectionAvailable, InvalidRequestError
__author__ = 'gilles'
_credentials = {'username': 'jsmith', 'password': 'havebadpass'}
class TestStatsLogger(TestCase):
def __init__(self, methodName='runTest'):
super(TestStatsLogger, self).__init__(methodName)
def setUp(self):
super(TestStatsLogger, self).setUp()
self.logger = StatsLogger()
def test_empty(self):
assert_equal(self.logger.stats, self.logger._stats)
def test_connection_created(self):
self.logger.connection_created({'level': 'info'})
self.logger.connection_created({'level': 'error'})
stats = self.logger.stats
assert_equal(stats['created']['success'], 1)
assert_equal(stats['created']['failure'], 1)
def test_connection_checked(self):
self.logger.connection_checked_out({})
self.logger.connection_checked_out({})
self.logger.connection_checked_in({})
stats = self.logger.stats
assert_equal(stats['checked_out'], 2)
assert_equal(stats['checked_in'], 1)
assert_equal(stats['opened'], {'current': 1, 'max': 2})
def test_connection_disposed(self):
self.logger.connection_disposed({'level': 'info'})
self.logger.connection_disposed({'level': 'error'})
stats = self.logger.stats
assert_equal(stats['disposed']['success'], 1)
assert_equal(stats['disposed']['failure'], 1)
def test_connection_recycled(self):
self.logger.connection_recycled({})
stats = self.logger.stats
assert_equal(stats['recycled'], 1)
def test_connection_failed(self):
self.logger.connection_failed({})
stats = self.logger.stats
assert_equal(stats['failed'], 1)
def test_obtained_server_list(self):
self.logger.obtained_server_list({})
stats = self.logger.stats
assert_equal(stats['list'], 1)
def test_pool_at_max(self):
self.logger.pool_at_max({})
stats = self.logger.stats
assert_equal(stats['at_max'], 1)
class TestInPool(TestCase):
def __init__(self, methodName='runTest'):
super(TestInPool, self).__init__(methodName)
def test_pool(self):
listener = StatsLogger()
pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000,
prefill=True, pool_timeout=0.1, timeout=1,
keyspace='PycassaTestKeyspace', credentials=_credentials,
listeners=[listener], use_threadlocal=False)
conns = []
for i in range(10):
conns.append(pool.get())
assert_equal(listener.stats['created']['success'], 10)
assert_equal(listener.stats['created']['failure'], 0)
assert_equal(listener.stats['checked_out'], 10)
assert_equal(listener.stats['opened'], {'current': 10, 'max': 10})
# Pool is maxed out now
assert_raises(NoConnectionAvailable, pool.get)
assert_equal(listener.stats['created']['success'], 10)
assert_equal(listener.stats['checked_out'], 10)
assert_equal(listener.stats['opened'], {'current': 10, 'max': 10})
assert_equal(listener.stats['at_max'], 1)
for i in range(0, 5):
pool.return_conn(conns[i])
assert_equal(listener.stats['disposed']['success'], 0)
assert_equal(listener.stats['checked_in'], 5)
assert_equal(listener.stats['opened'], {'current': 5, 'max': 10})
for i in range(5, 10):
pool.return_conn(conns[i])
assert_equal(listener.stats['disposed']['success'], 5)
assert_equal(listener.stats['checked_in'], 10)
conns = []
# These connections should come from the pool
for i in range(5):
conns.append(pool.get())
assert_equal(listener.stats['created']['success'], 10)
assert_equal(listener.stats['checked_out'], 15)
# But these will need to be made
for i in range(5):
conns.append(pool.get())
assert_equal(listener.stats['created']['success'], 15)
assert_equal(listener.stats['checked_out'], 20)
assert_equal(listener.stats['disposed']['success'], 5)
for i in range(10):
conns[i].return_to_pool()
assert_equal(listener.stats['checked_in'], 20)
assert_equal(listener.stats['disposed']['success'], 10)
assert_raises(InvalidRequestError, conns[0].return_to_pool)
assert_equal(listener.stats['checked_in'], 20)
assert_equal(listener.stats['disposed']['success'], 10)
print "in test:", id(conns[-1])
conns[-1].return_to_pool()
assert_equal(listener.stats['checked_in'], 20)
assert_equal(listener.stats['disposed']['success'], 10)
pool.dispose()
| mit | -3,283,971,811,469,260,000 | 36.533835 | 87 | 0.611979 | false | 3.842956 | true | false | false |
leouieda/funghi | setup.py | 1 | 1575 | import os
from setuptools import setup, find_packages
NAME = "cryptococcus"
# Get version and release info
with open(os.path.join(NAME, '_version.py')) as f:
exec(f.read())
# Use the README for the long description
with open('README.rst') as f:
long_description = f.read()
VERSION = __version__
AUTHOR = "Leonardo Uieda"
AUTHOR_EMAIL = "[email protected]"
MAINTAINER = AUTHOR
MAINTAINER_EMAIL = AUTHOR_EMAIL
DESCRIPTION = ''
LONG_DESCRIPTION = long_description
URL = "http://github.com/leouieda/{}".format(NAME)
LICENSE = "BSD 3-clause"
PLATFORMS = "OS Independent"
PACKAGES = find_packages()
PACKAGE_DATA = {'{}.datasets.data'.format(NAME): ['*']}
REQUIRES = ["numpy"]
CLASSIFIERS = ["Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: {} License".format(LICENSE),
"Operating System :: {}".format(PLATFORMS),
"Programming Language :: Python 3.5",
"Topic :: Scientific/Engineering"]
if __name__ == '__main__':
setup(name=NAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
url=URL,
license=LICENSE,
classifiers=CLASSIFIERS,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
platforms=PLATFORMS,
version=VERSION,
packages=PACKAGES,
package_data=PACKAGE_DATA,
install_requires=REQUIRES)
| bsd-3-clause | 4,163,296,218,932,432,400 | 30.5 | 71 | 0.622857 | false | 3.767943 | false | false | false |
proversity-org/edx-platform | cms/djangoapps/contentstore/tests/test_transcripts_utils.py | 1 | 27421 | # -*- coding: utf-8 -*-
""" Tests for transcripts_utils. """
import copy
import ddt
import json
import textwrap
import unittest
from uuid import uuid4
from django.conf import settings
from django.test.utils import override_settings
from django.utils import translation
from mock import Mock, patch
from nose.plugins.skip import SkipTest
from six import text_type
from contentstore.tests.utils import mock_requests_get
from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.django import contentstore
from xmodule.exceptions import NotFoundError
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.video_module import transcripts_utils
TEST_DATA_CONTENTSTORE = copy.deepcopy(settings.CONTENTSTORE)
TEST_DATA_CONTENTSTORE['DOC_STORE_CONFIG']['db'] = 'test_xcontent_%s' % uuid4().hex
class TestGenerateSubs(unittest.TestCase):
"""Tests for `generate_subs` function."""
def setUp(self):
super(TestGenerateSubs, self).setUp()
self.source_subs = {
'start': [100, 200, 240, 390, 1000],
'end': [200, 240, 380, 1000, 1500],
'text': [
'subs #1',
'subs #2',
'subs #3',
'subs #4',
'subs #5'
]
}
def test_generate_subs_increase_speed(self):
subs = transcripts_utils.generate_subs(2, 1, self.source_subs)
self.assertDictEqual(
subs,
{
'start': [200, 400, 480, 780, 2000],
'end': [400, 480, 760, 2000, 3000],
'text': ['subs #1', 'subs #2', 'subs #3', 'subs #4', 'subs #5']
}
)
def test_generate_subs_decrease_speed_1(self):
subs = transcripts_utils.generate_subs(0.5, 1, self.source_subs)
self.assertDictEqual(
subs,
{
'start': [50, 100, 120, 195, 500],
'end': [100, 120, 190, 500, 750],
'text': ['subs #1', 'subs #2', 'subs #3', 'subs #4', 'subs #5']
}
)
def test_generate_subs_decrease_speed_2(self):
"""Test for correct devision during `generate_subs` process."""
subs = transcripts_utils.generate_subs(1, 2, self.source_subs)
self.assertDictEqual(
subs,
{
'start': [50, 100, 120, 195, 500],
'end': [100, 120, 190, 500, 750],
'text': ['subs #1', 'subs #2', 'subs #3', 'subs #4', 'subs #5']
}
)
@override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE)
class TestSaveSubsToStore(SharedModuleStoreTestCase):
"""Tests for `save_subs_to_store` function."""
org = 'MITx'
number = '999'
display_name = 'Test course'
def clear_subs_content(self):
"""Remove, if subtitles content exists."""
for content_location in [self.content_location, self.content_copied_location]:
try:
content = contentstore().find(content_location)
contentstore().delete(content.location)
except NotFoundError:
pass
@classmethod
def sub_id_to_location(cls, sub_id):
"""
A helper to compute a static file location from a subtitle id.
"""
return StaticContent.compute_location(cls.course.id, u'subs_{0}.srt.sjson'.format(sub_id))
@classmethod
def setUpClass(cls):
super(TestSaveSubsToStore, cls).setUpClass()
cls.course = CourseFactory.create(
org=cls.org, number=cls.number, display_name=cls.display_name)
cls.subs = {
'start': [100, 200, 240, 390, 1000],
'end': [200, 240, 380, 1000, 1500],
'text': [
'subs #1',
'subs #2',
'subs #3',
'subs #4',
'subs #5'
]
}
# Prefix it to ensure that unicode filenames are allowed
cls.subs_id = u'uniçøde_{}'.format(uuid4())
cls.subs_copied_id = u'cøpy_{}'.format(uuid4())
cls.content_location = cls.sub_id_to_location(cls.subs_id)
cls.content_copied_location = cls.sub_id_to_location(cls.subs_copied_id)
# incorrect subs
cls.unjsonable_subs = {1} # set can't be serialized
cls.unjsonable_subs_id = str(uuid4())
cls.content_location_unjsonable = cls.sub_id_to_location(cls.unjsonable_subs_id)
def setUp(self):
super(TestSaveSubsToStore, self).setUp()
self.addCleanup(self.clear_subs_content)
self.clear_subs_content()
def test_save_unicode_filename(self):
# Mock a video item
item = Mock(location=Mock(course_key=self.course.id))
transcripts_utils.save_subs_to_store(self.subs, self.subs_id, self.course)
transcripts_utils.copy_or_rename_transcript(self.subs_copied_id, self.subs_id, item)
self.assertTrue(contentstore().find(self.content_copied_location))
def test_save_subs_to_store(self):
with self.assertRaises(NotFoundError):
contentstore().find(self.content_location)
result_location = transcripts_utils.save_subs_to_store(
self.subs,
self.subs_id,
self.course)
self.assertTrue(contentstore().find(self.content_location))
self.assertEqual(result_location, self.content_location)
def test_save_unjsonable_subs_to_store(self):
"""
Ensures that subs, that can't be dumped, can't be found later.
"""
with self.assertRaises(NotFoundError):
contentstore().find(self.content_location_unjsonable)
with self.assertRaises(TypeError):
transcripts_utils.save_subs_to_store(
self.unjsonable_subs,
self.unjsonable_subs_id,
self.course)
with self.assertRaises(NotFoundError):
contentstore().find(self.content_location_unjsonable)
class TestYoutubeSubsBase(SharedModuleStoreTestCase):
"""
Base class for tests of Youtube subs. Using override_settings and
a setUpClass() override in a test class which is inherited by another
test class doesn't work well with pytest-django.
"""
@classmethod
def setUpClass(cls):
super(TestYoutubeSubsBase, cls).setUpClass()
cls.course = CourseFactory.create(
org=cls.org, number=cls.number, display_name=cls.display_name)
@override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE)
class TestDownloadYoutubeSubs(TestYoutubeSubsBase):
"""Tests for `download_youtube_subs` function."""
org = 'MITx'
number = '999'
display_name = 'Test course'
def clear_sub_content(self, subs_id):
"""
Remove, if subtitle content exists.
"""
filename = 'subs_{0}.srt.sjson'.format(subs_id)
content_location = StaticContent.compute_location(self.course.id, filename)
try:
content = contentstore().find(content_location)
contentstore().delete(content.location)
except NotFoundError:
pass
def clear_subs_content(self, youtube_subs):
"""
Remove, if subtitles content exists.
youtube_subs: dict of '{speed: youtube_id}' format for different speeds.
"""
for subs_id in youtube_subs.values():
self.clear_sub_content(subs_id)
def test_success_downloading_subs(self):
response = textwrap.dedent("""<?xml version="1.0" encoding="utf-8" ?>
<transcript>
<text start="0" dur="0.27"></text>
<text start="0.27" dur="2.45">Test text 1.</text>
<text start="2.72">Test text 2.</text>
<text start="5.43" dur="1.73">Test text 3.</text>
</transcript>
""")
good_youtube_sub = 'good_id_2'
self.clear_sub_content(good_youtube_sub)
with patch('xmodule.video_module.transcripts_utils.requests.get') as mock_get:
mock_get.return_value = Mock(status_code=200, text=response, content=response)
# Check transcripts_utils.GetTranscriptsFromYouTubeException not thrown
transcripts_utils.download_youtube_subs(good_youtube_sub, self.course, settings)
mock_get.assert_any_call('http://video.google.com/timedtext', params={'lang': 'en', 'v': 'good_id_2'})
# Check asset status after import of transcript.
filename = 'subs_{0}.srt.sjson'.format(good_youtube_sub)
content_location = StaticContent.compute_location(self.course.id, filename)
self.assertTrue(contentstore().find(content_location))
self.clear_sub_content(good_youtube_sub)
def test_subs_for_html5_vid_with_periods(self):
"""
This is to verify a fix whereby subtitle files uploaded against
a HTML5 video that contains periods in the name causes
incorrect subs name parsing
"""
html5_ids = transcripts_utils.get_html5_ids(['foo.mp4', 'foo.1.bar.mp4', 'foo/bar/baz.1.4.mp4', 'foo'])
self.assertEqual(4, len(html5_ids))
self.assertEqual(html5_ids[0], 'foo')
self.assertEqual(html5_ids[1], 'foo.1.bar')
self.assertEqual(html5_ids[2], 'baz.1.4')
self.assertEqual(html5_ids[3], 'foo')
@patch('xmodule.video_module.transcripts_utils.requests.get')
def test_fail_downloading_subs(self, mock_get):
mock_get.return_value = Mock(status_code=404, text='Error 404')
bad_youtube_sub = 'BAD_YOUTUBE_ID2'
self.clear_sub_content(bad_youtube_sub)
with self.assertRaises(transcripts_utils.GetTranscriptsFromYouTubeException):
transcripts_utils.download_youtube_subs(bad_youtube_sub, self.course, settings)
# Check asset status after import of transcript.
filename = 'subs_{0}.srt.sjson'.format(bad_youtube_sub)
content_location = StaticContent.compute_location(
self.course.id, filename
)
with self.assertRaises(NotFoundError):
contentstore().find(content_location)
self.clear_sub_content(bad_youtube_sub)
def test_success_downloading_chinese_transcripts(self):
# Disabled 11/14/13
# This test is flakey because it performs an HTTP request on an external service
# Re-enable when `requests.get` is patched using `mock.patch`
raise SkipTest
good_youtube_sub = 'j_jEn79vS3g' # Chinese, utf-8
self.clear_sub_content(good_youtube_sub)
# Check transcripts_utils.GetTranscriptsFromYouTubeException not thrown
transcripts_utils.download_youtube_subs(good_youtube_sub, self.course, settings)
# Check assets status after importing subtitles.
for subs_id in good_youtube_subs.values():
filename = 'subs_{0}.srt.sjson'.format(subs_id)
content_location = StaticContent.compute_location(
self.course.id, filename
)
self.assertTrue(contentstore().find(content_location))
self.clear_sub_content(good_youtube_sub)
@patch('xmodule.video_module.transcripts_utils.requests.get')
def test_get_transcript_name_youtube_server_success(self, mock_get):
"""
Get transcript name from transcript_list fetch from youtube server api
depends on language code, default language in YOUTUBE Text Api is "en"
"""
youtube_text_api = copy.deepcopy(settings.YOUTUBE['TEXT_API'])
youtube_text_api['params']['v'] = 'dummy_video_id'
response_success = """
<transcript_list>
<track id="1" name="Custom" lang_code="en" />
<track id="0" name="Custom1" lang_code="en-GB"/>
</transcript_list>
"""
mock_get.return_value = Mock(status_code=200, text=response_success, content=response_success)
transcript_name = transcripts_utils.youtube_video_transcript_name(youtube_text_api)
self.assertEqual(transcript_name, 'Custom')
@patch('xmodule.video_module.transcripts_utils.requests.get')
def test_get_transcript_name_youtube_server_no_transcripts(self, mock_get):
"""
When there are no transcripts of video transcript name will be None
"""
youtube_text_api = copy.deepcopy(settings.YOUTUBE['TEXT_API'])
youtube_text_api['params']['v'] = 'dummy_video_id'
response_success = "<transcript_list></transcript_list>"
mock_get.return_value = Mock(status_code=200, text=response_success, content=response_success)
transcript_name = transcripts_utils.youtube_video_transcript_name(youtube_text_api)
self.assertIsNone(transcript_name)
@patch('xmodule.video_module.transcripts_utils.requests.get')
def test_get_transcript_name_youtube_server_language_not_exist(self, mock_get):
"""
When the language does not exist in transcript_list transcript name will be None
"""
youtube_text_api = copy.deepcopy(settings.YOUTUBE['TEXT_API'])
youtube_text_api['params']['v'] = 'dummy_video_id'
youtube_text_api['params']['lang'] = 'abc'
response_success = """
<transcript_list>
<track id="1" name="Custom" lang_code="en" />
<track id="0" name="Custom1" lang_code="en-GB"/>
</transcript_list>
"""
mock_get.return_value = Mock(status_code=200, text=response_success, content=response_success)
transcript_name = transcripts_utils.youtube_video_transcript_name(youtube_text_api)
self.assertIsNone(transcript_name)
@patch('xmodule.video_module.transcripts_utils.requests.get', side_effect=mock_requests_get)
def test_downloading_subs_using_transcript_name(self, mock_get):
"""
Download transcript using transcript name in url
"""
good_youtube_sub = 'good_id_2'
self.clear_sub_content(good_youtube_sub)
transcripts_utils.download_youtube_subs(good_youtube_sub, self.course, settings)
mock_get.assert_any_call(
'http://video.google.com/timedtext',
params={'lang': 'en', 'v': 'good_id_2', 'name': 'Custom'}
)
# Check asset status after import of transcript.
filename = 'subs_{0}.srt.sjson'.format(good_youtube_sub)
content_location = StaticContent.compute_location(self.course.id, filename)
self.assertTrue(contentstore().find(content_location))
self.clear_sub_content(good_youtube_sub)
class TestGenerateSubsFromSource(TestDownloadYoutubeSubs):
"""Tests for `generate_subs_from_source` function."""
def test_success_generating_subs(self):
youtube_subs = {
0.5: 'JMD_ifUUfsU',
1.0: 'hI10vDNYz4M',
2.0: 'AKqURZnYqpk'
}
srt_filedata = textwrap.dedent("""
1
00:00:10,500 --> 00:00:13,000
Elephant's Dream
2
00:00:15,000 --> 00:00:18,000
At the left we can see...
""")
self.clear_subs_content(youtube_subs)
# Check transcripts_utils.TranscriptsGenerationException not thrown.
# Also checks that uppercase file extensions are supported.
transcripts_utils.generate_subs_from_source(youtube_subs, 'SRT', srt_filedata, self.course)
# Check assets status after importing subtitles.
for subs_id in youtube_subs.values():
filename = 'subs_{0}.srt.sjson'.format(subs_id)
content_location = StaticContent.compute_location(
self.course.id, filename
)
self.assertTrue(contentstore().find(content_location))
self.clear_subs_content(youtube_subs)
def test_fail_bad_subs_type(self):
youtube_subs = {
0.5: 'JMD_ifUUfsU',
1.0: 'hI10vDNYz4M',
2.0: 'AKqURZnYqpk'
}
srt_filedata = textwrap.dedent("""
1
00:00:10,500 --> 00:00:13,000
Elephant's Dream
2
00:00:15,000 --> 00:00:18,000
At the left we can see...
""")
with self.assertRaises(transcripts_utils.TranscriptsGenerationException) as cm:
transcripts_utils.generate_subs_from_source(youtube_subs, 'BAD_FORMAT', srt_filedata, self.course)
exception_message = text_type(cm.exception)
self.assertEqual(exception_message, "We support only SubRip (*.srt) transcripts format.")
def test_fail_bad_subs_filedata(self):
youtube_subs = {
0.5: 'JMD_ifUUfsU',
1.0: 'hI10vDNYz4M',
2.0: 'AKqURZnYqpk'
}
srt_filedata = """BAD_DATA"""
with self.assertRaises(transcripts_utils.TranscriptsGenerationException) as cm:
transcripts_utils.generate_subs_from_source(youtube_subs, 'srt', srt_filedata, self.course)
exception_message = text_type(cm.exception)
self.assertEqual(exception_message, "Something wrong with SubRip transcripts file during parsing.")
class TestGenerateSrtFromSjson(TestDownloadYoutubeSubs):
"""Tests for `generate_srt_from_sjson` function."""
def test_success_generating_subs(self):
sjson_subs = {
'start': [100, 200, 240, 390, 54000],
'end': [200, 240, 380, 1000, 78400],
'text': [
'subs #1',
'subs #2',
'subs #3',
'subs #4',
'subs #5'
]
}
srt_subs = transcripts_utils.generate_srt_from_sjson(sjson_subs, 1)
self.assertTrue(srt_subs)
expected_subs = [
'00:00:00,100 --> 00:00:00,200\nsubs #1',
'00:00:00,200 --> 00:00:00,240\nsubs #2',
'00:00:00,240 --> 00:00:00,380\nsubs #3',
'00:00:00,390 --> 00:00:01,000\nsubs #4',
'00:00:54,000 --> 00:01:18,400\nsubs #5',
]
for sub in expected_subs:
self.assertIn(sub, srt_subs)
def test_success_generating_subs_speed_up(self):
sjson_subs = {
'start': [100, 200, 240, 390, 54000],
'end': [200, 240, 380, 1000, 78400],
'text': [
'subs #1',
'subs #2',
'subs #3',
'subs #4',
'subs #5'
]
}
srt_subs = transcripts_utils.generate_srt_from_sjson(sjson_subs, 0.5)
self.assertTrue(srt_subs)
expected_subs = [
'00:00:00,050 --> 00:00:00,100\nsubs #1',
'00:00:00,100 --> 00:00:00,120\nsubs #2',
'00:00:00,120 --> 00:00:00,190\nsubs #3',
'00:00:00,195 --> 00:00:00,500\nsubs #4',
'00:00:27,000 --> 00:00:39,200\nsubs #5',
]
for sub in expected_subs:
self.assertIn(sub, srt_subs)
def test_success_generating_subs_speed_down(self):
sjson_subs = {
'start': [100, 200, 240, 390, 54000],
'end': [200, 240, 380, 1000, 78400],
'text': [
'subs #1',
'subs #2',
'subs #3',
'subs #4',
'subs #5'
]
}
srt_subs = transcripts_utils.generate_srt_from_sjson(sjson_subs, 2)
self.assertTrue(srt_subs)
expected_subs = [
'00:00:00,200 --> 00:00:00,400\nsubs #1',
'00:00:00,400 --> 00:00:00,480\nsubs #2',
'00:00:00,480 --> 00:00:00,760\nsubs #3',
'00:00:00,780 --> 00:00:02,000\nsubs #4',
'00:01:48,000 --> 00:02:36,800\nsubs #5',
]
for sub in expected_subs:
self.assertIn(sub, srt_subs)
def test_fail_generating_subs(self):
sjson_subs = {
'start': [100, 200],
'end': [100],
'text': [
'subs #1',
'subs #2'
]
}
srt_subs = transcripts_utils.generate_srt_from_sjson(sjson_subs, 1)
self.assertFalse(srt_subs)
class TestYoutubeTranscripts(unittest.TestCase):
"""
Tests for checking right datastructure returning when using youtube api.
"""
@patch('xmodule.video_module.transcripts_utils.requests.get')
def test_youtube_bad_status_code(self, mock_get):
mock_get.return_value = Mock(status_code=404, text='test')
youtube_id = 'bad_youtube_id'
with self.assertRaises(transcripts_utils.GetTranscriptsFromYouTubeException):
transcripts_utils.get_transcripts_from_youtube(youtube_id, settings, translation)
@patch('xmodule.video_module.transcripts_utils.requests.get')
def test_youtube_empty_text(self, mock_get):
mock_get.return_value = Mock(status_code=200, text='')
youtube_id = 'bad_youtube_id'
with self.assertRaises(transcripts_utils.GetTranscriptsFromYouTubeException):
transcripts_utils.get_transcripts_from_youtube(youtube_id, settings, translation)
def test_youtube_good_result(self):
response = textwrap.dedent("""<?xml version="1.0" encoding="utf-8" ?>
<transcript>
<text start="0" dur="0.27"></text>
<text start="0.27" dur="2.45">Test text 1.</text>
<text start="2.72">Test text 2.</text>
<text start="5.43" dur="1.73">Test text 3.</text>
</transcript>
""")
expected_transcripts = {
'start': [270, 2720, 5430],
'end': [2720, 2720, 7160],
'text': ['Test text 1.', 'Test text 2.', 'Test text 3.']
}
youtube_id = 'good_youtube_id'
with patch('xmodule.video_module.transcripts_utils.requests.get') as mock_get:
mock_get.return_value = Mock(status_code=200, text=response, content=response)
transcripts = transcripts_utils.get_transcripts_from_youtube(youtube_id, settings, translation)
self.assertEqual(transcripts, expected_transcripts)
mock_get.assert_called_with('http://video.google.com/timedtext', params={'lang': 'en', 'v': 'good_youtube_id'})
class TestTranscript(unittest.TestCase):
"""
Tests for Transcript class e.g. different transcript conversions.
"""
def setUp(self):
super(TestTranscript, self).setUp()
self.srt_transcript = textwrap.dedent("""\
0
00:00:10,500 --> 00:00:13,000
Elephant's Dream
1
00:00:15,000 --> 00:00:18,000
At the left we can see...
""")
self.sjson_transcript = textwrap.dedent("""\
{
"start": [
10500,
15000
],
"end": [
13000,
18000
],
"text": [
"Elephant's Dream",
"At the left we can see..."
]
}
""")
self.txt_transcript = u"Elephant's Dream\nAt the left we can see..."
def test_convert_srt_to_txt(self):
"""
Tests that the srt transcript is successfully converted into txt format.
"""
expected = self.txt_transcript
actual = transcripts_utils.Transcript.convert(self.srt_transcript, 'srt', 'txt')
self.assertEqual(actual, expected)
def test_convert_srt_to_srt(self):
"""
Tests that srt to srt conversion works as expected.
"""
expected = self.srt_transcript
actual = transcripts_utils.Transcript.convert(self.srt_transcript, 'srt', 'srt')
self.assertEqual(actual, expected)
def test_convert_sjson_to_txt(self):
"""
Tests that the sjson transcript is successfully converted into txt format.
"""
expected = self.txt_transcript
actual = transcripts_utils.Transcript.convert(self.sjson_transcript, 'sjson', 'txt')
self.assertEqual(actual, expected)
def test_convert_sjson_to_srt(self):
"""
Tests that the sjson transcript is successfully converted into srt format.
"""
expected = self.srt_transcript
actual = transcripts_utils.Transcript.convert(self.sjson_transcript, 'sjson', 'srt')
self.assertEqual(actual, expected)
def test_convert_srt_to_sjson(self):
"""
Tests that the srt transcript is successfully converted into sjson format.
"""
expected = json.loads(self.sjson_transcript)
actual = transcripts_utils.Transcript.convert(self.srt_transcript, 'srt', 'sjson')
self.assertDictEqual(actual, expected)
def test_convert_invalid_srt_to_sjson(self):
"""
Tests that TranscriptsGenerationException was raises on trying
to convert invalid srt transcript to sjson.
"""
invalid_srt_transcript = 'invalid SubRip file content'
with self.assertRaises(transcripts_utils.TranscriptsGenerationException):
transcripts_utils.Transcript.convert(invalid_srt_transcript, 'srt', 'sjson')
def test_dummy_non_existent_transcript(self):
"""
Test `Transcript.asset` raises `NotFoundError` for dummy non-existent transcript.
"""
with self.assertRaises(NotFoundError):
transcripts_utils.Transcript.asset(None, transcripts_utils.NON_EXISTENT_TRANSCRIPT)
with self.assertRaises(NotFoundError):
transcripts_utils.Transcript.asset(None, None, filename=transcripts_utils.NON_EXISTENT_TRANSCRIPT)
class TestSubsFilename(unittest.TestCase):
"""
Tests for subs_filename funtion.
"""
def test_unicode(self):
name = transcripts_utils.subs_filename(u"˙∆©ƒƒƒ")
self.assertEqual(name, u'subs_˙∆©ƒƒƒ.srt.sjson')
name = transcripts_utils.subs_filename(u"˙∆©ƒƒƒ", 'uk')
self.assertEqual(name, u'uk_subs_˙∆©ƒƒƒ.srt.sjson')
@ddt.ddt
class TestVideoIdsInfo(unittest.TestCase):
"""
Tests for `get_video_ids_info`.
"""
@ddt.data(
{
'edx_video_id': '000-000-000',
'youtube_id_1_0': '12as34',
'html5_sources': [
'www.abc.com/foo.mp4', 'www.abc.com/bar.webm', 'foo/bar/baz.m3u8'
],
'expected_result': (False, ['000-000-000', '12as34', 'foo', 'bar', 'baz'])
},
{
'edx_video_id': '',
'youtube_id_1_0': '12as34',
'html5_sources': [
'www.abc.com/foo.mp4', 'www.abc.com/bar.webm', 'foo/bar/baz.m3u8'
],
'expected_result': (True, ['12as34', 'foo', 'bar', 'baz'])
},
{
'edx_video_id': '',
'youtube_id_1_0': '',
'html5_sources': [
'www.abc.com/foo.mp4', 'www.abc.com/bar.webm',
],
'expected_result': (True, ['foo', 'bar'])
},
)
@ddt.unpack
def test_get_video_ids_info(self, edx_video_id, youtube_id_1_0, html5_sources, expected_result):
"""
Verify that `get_video_ids_info` works as expected.
"""
actual_result = transcripts_utils.get_video_ids_info(edx_video_id, youtube_id_1_0, html5_sources)
self.assertEqual(actual_result, expected_result)
| agpl-3.0 | 4,344,171,855,453,367,300 | 36.883817 | 119 | 0.589741 | false | 3.69237 | true | false | false |
mkuron/espresso | testsuite/scripts/tutorials/test_07-electrokinetics__scripts.py | 1 | 2205 | # Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import importlib_wrapper as iw
import numpy as np
import sys
# these tutorials need to be executed sequentially
tutorial_simulation, skipIfMissingFeatures_simulation = iw.configure_and_import(
"@TUTORIALS_DIR@/07-electrokinetics/scripts/eof_electrokinetics.py",
gpu=True, integration_length=600, dt=0.5)
# use importlib directly to avoid an error for some myconfig.hpp configurations
sys.path.insert(0, "@TUTORIALS_DIR@/07-electrokinetics/scripts/")
tutorial_analytical = iw.importlib.import_module("eof_analytical")
tutorial_plot, skipIfMissingFeatures_plot = iw.configure_and_import(
"@TUTORIALS_DIR@/07-electrokinetics/scripts/plot.py")
@skipIfMissingFeatures_simulation
@skipIfMissingFeatures_plot
class Tutorial(ut.TestCase):
system = tutorial_simulation.system
def normalize_two_datasets(self, a, b):
offset = min(np.min(a), np.min(b))
a -= offset
b -= offset
scale = max(np.max(a), np.max(b))
a /= scale
b /= scale
def test_simulation(self):
for varname in ("density", "velocity", "pressure_xz"):
sim = np.array(tutorial_simulation.__dict__[varname + "_list"])
ana = np.array(tutorial_analytical.__dict__[varname + "_list"])
self.normalize_two_datasets(sim, ana)
accuracy = np.max(np.abs(sim - ana))
# expecting at most 3% deviation
self.assertLess(accuracy, 3.0 / 100)
if __name__ == "__main__":
ut.main()
| gpl-3.0 | -4,796,694,920,548,650,000 | 37.017241 | 80 | 0.69932 | false | 3.585366 | false | false | false |
demianbrecht/django-sanction | example/core/models.py | 1 | 4373 | from datetime import datetime, timedelta
from time import mktime
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import AbstractBaseUser, UserManager
from lazy import lazy
from sanction.client import Client as SanctionClient
class User(AbstractBaseUser):
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = []
username = models.CharField(_('username'), max_length=30, unique=True)
first_name = models.CharField(_('first name'), max_length=100, blank=True)
last_name = models.CharField(_('last name'), max_length=100, blank=True)
objects = UserManager()
@lazy
def providers(self):
return dict((p.name, p) for p in Provider.objects.filter(user=self))
def current_provider(self, request):
return self.providers[request.session['__sp']]
@staticmethod
def fetch_user(provider, client):
return getattr(User, 'fetch_{}'.format(provider))(client)
@staticmethod
def get_user(user_id):
return User.objects.get(id=user_id)
@staticmethod
def fetch_google(client):
resp = client.request('/userinfo')
normalized = {
'id': resp['id'],
'provider': 'google',
'email': resp['email'],
'first_name': resp['given_name'],
'last_name': resp['family_name'],
'access_token': client.access_token,
'token_expires': client.token_expires,
'refresh_token': client.refresh_token,
}
return User._get(normalized)
@staticmethod
def fetch_facebook(client):
resp = client.request('/me')
normalized = {
'id': resp['id'],
'provider': 'facebook',
'email': resp['email'],
'first_name': resp['first_name'],
'last_name': resp['last_name'],
'access_token': client.access_token,
# fb doesn't use the RFC-defined expires_in field, but uses
# expires. as such, we have to set this manually
'token_expires': mktime((datetime.utcnow() +
timedelta(seconds=int(client.expires))).timetuple()),
}
return User._get(normalized)
@staticmethod
def _get(data):
try:
provider = Provider.objects.get(name=data['provider'],
pid=data['id'])
except Provider.DoesNotExist:
user = User.objects.create(username='{}_{}'.format(
data['provider'], data['id']),
first_name=data['first_name'],
last_name=data['last_name'])
user.save()
provider = Provider()
provider.name = data['provider']
provider.user = user
provider.pid = data['id']
provider.email = data['email']
provider.access_token = data['access_token']
provider.token_expires = data['token_expires']
provider.refresh_token = data.get('refresh_token', None)
provider.save()
return provider.user
class Provider(models.Model):
name = models.CharField(_('provider'), max_length=50)
user = models.ForeignKey(User)
email = models.EmailField(_('email address'), blank=True)
pid = models.CharField(_('provider id'), max_length=50)
access_token = models.CharField(_('access token'), max_length=100,
blank=True)
refresh_token = models.CharField(_('refresh token'), max_length=100,
blank=True, null=True)
token_expires = models.FloatField(default=-1)
@lazy
def resource(self):
provider = settings.SANCTION_PROVIDERS[self.name]
c = SanctionClient(auth_endpoint=provider['auth_endpoint'],
token_endpoint=provider['token_endpoint'],
resource_endpoint=provider['resource_endpoint'],
client_id=provider['client_id'],
client_secret=provider['client_secret'])
c.refresh_token = self.refresh_token
c.access_token = self.access_token
c.token_expires = self.token_expires
return c
def refresh(self):
assert self.refresh_token is not None
self.resource.refresh()
self.access_token = self.resource.access_token
self.token_expires = self.resource.token_expires
self.save()
| mit | 3,216,017,726,208,436,700 | 34.844262 | 78 | 0.603933 | false | 4.216972 | false | false | false |
CivicKnowledge/metaeditor | editor/migrations/0009_auto_20150316_1454.py | 1 | 1774 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
import editor.models
class Migration(migrations.Migration):
dependencies = [
('editor', '0008_auto_20150315_1847'),
]
operations = [
migrations.AddField(
model_name='datafile',
name='created',
field=models.DateTimeField(default=datetime.datetime(2015, 3, 16, 14, 54, 3, 122690, tzinfo=utc), help_text=b'Creation date and time', auto_now_add=True),
preserve_default=False,
),
migrations.AddField(
model_name='documentfile',
name='created',
field=models.DateTimeField(default=datetime.datetime(2015, 3, 16, 14, 54, 14, 307008, tzinfo=utc), help_text=b'Creation date and time', auto_now_add=True),
preserve_default=False,
),
migrations.AlterField(
model_name='datafile',
name='f',
field=models.FileField(upload_to='files'),
preserve_default=True,
),
migrations.AlterField(
model_name='datafile',
name='file_format',
field=models.ForeignKey(blank=True, to='editor.Format', null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='documentfile',
name='f',
field=models.FileField(upload_to='files'),
preserve_default=True,
),
migrations.AlterField(
model_name='documentfile',
name='file_format',
field=models.ForeignKey(blank=True, to='editor.Format', null=True),
preserve_default=True,
),
]
| mit | -751,359,076,486,165,200 | 32.471698 | 167 | 0.580045 | false | 4.274699 | false | false | false |
pwong-mapr/private-hue | desktop/core/ext-py/ctypes-1.0.2/ctypes/test/test_memfunctions.py | 14 | 2317 | import sys
import unittest
from ctypes import *
class MemFunctionsTest(unittest.TestCase):
def test_memmove(self):
# large buffers apparently increase the chance that the memory
# is allocated in high address space.
a = create_string_buffer(1000000)
p = "Hello, World"
result = memmove(a, p, len(p))
self.failUnlessEqual(a.value, "Hello, World")
self.failUnlessEqual(string_at(result), "Hello, World")
self.failUnlessEqual(string_at(result, 5), "Hello")
self.failUnlessEqual(string_at(result, 16), "Hello, World\0\0\0\0")
self.failUnlessEqual(string_at(result, 0), "")
def test_memset(self):
a = create_string_buffer(1000000)
result = memset(a, ord('x'), 16)
self.failUnlessEqual(a.value, "xxxxxxxxxxxxxxxx")
self.failUnlessEqual(string_at(result), "xxxxxxxxxxxxxxxx")
self.failUnlessEqual(string_at(a), "xxxxxxxxxxxxxxxx")
self.failUnlessEqual(string_at(a, 20), "xxxxxxxxxxxxxxxx\0\0\0\0")
def test_cast(self):
a = (c_ubyte * 32)(*map(ord, "abcdef"))
self.failUnlessEqual(cast(a, c_char_p).value, "abcdef")
self.failUnlessEqual(cast(a, POINTER(c_byte))[:7],
[97, 98, 99, 100, 101, 102, 0])
def test_string_at(self):
s = string_at("foo bar")
# XXX The following may be wrong, depending on how Python
# manages string instances
self.failUnlessEqual(2, sys.getrefcount(s))
self.failUnless(s, "foo bar")
self.failUnlessEqual(string_at("foo bar", 8), "foo bar\0")
self.failUnlessEqual(string_at("foo bar", 3), "foo")
try:
create_unicode_buffer
except NameError:
pass
else:
def test_wstring_at(self):
p = create_unicode_buffer("Hello, World")
a = create_unicode_buffer(1000000)
result = memmove(a, p, len(p) * sizeof(c_wchar))
self.failUnlessEqual(a.value, "Hello, World")
self.failUnlessEqual(wstring_at(a), "Hello, World")
self.failUnlessEqual(wstring_at(a, 5), "Hello")
self.failUnlessEqual(wstring_at(a, 16), "Hello, World\0\0\0\0")
self.failUnlessEqual(wstring_at(a, 0), "")
if __name__ == "__main__":
unittest.main()
| apache-2.0 | 1,945,446,036,713,943,300 | 36.983607 | 75 | 0.604661 | false | 3.701278 | true | false | false |
proxeIO/fast-lattice | addon/properties.py | 1 | 1359 | import bpy
from bpy.types import PropertyGroup
from bpy.props import FloatProperty, BoolProperty, EnumProperty
from .config import defaults as default
class fast_lattice(PropertyGroup):
method = EnumProperty(
name = 'Conforming Method',
description = 'Method to use when conforming the lattice to your selection',
items = [
('WORLD', 'World Aligned', 'The world aligned method that only produces a lattice that fits around the selection without alignment'),
('ALIGN', 'Fit To Selection', 'Try to fit the the lattice to the selection'),
],
default = default['method']
)
accuracy = FloatProperty(
name = 'Accuracy',
description = 'How accurate the lattice will conform to the selection (Increasing this value takes longer to calculate)',
min = 0.001,
max = 5.0,
soft_max = 1.0,
default = default['accuracy']
)
interpolation_type = EnumProperty(
name = 'Interpolation Type',
description = 'Interpolation type to use for the created lattice',
items = [
('KEY_BSPLINE', 'BSpline', ''),
('KEY_CATMULL_ROM', 'Catmull-Rom', ''),
('KEY_CARDINAL', 'Cardinal', ''),
('KEY_LINEAR', 'Linear', '')
],
default = default['interpolation_type']
)
| gpl-3.0 | 864,349,192,912,093,400 | 33.846154 | 145 | 0.6078 | false | 4.398058 | false | false | false |
chenyujie/hybrid-murano | murano/tests/functional/common/utils.py | 1 | 6917 | # Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import logging
import os
import time
import zipfile
from heatclient import client as heatclient
from keystoneclient import exceptions as ks_exceptions
from keystoneclient.v2_0 import client as ksclient
from muranoclient import client as mclient
import muranoclient.common.exceptions as exceptions
from murano.services import states
import murano.tests.functional.engine.config as cfg
CONF = cfg.cfg.CONF
LOG = logging.getLogger(__name__)
@contextlib.contextmanager
def ignored(*exceptions):
try:
yield
except exceptions:
pass
def memoize(f):
"""Decorator, which saves result of a decorated function
to cache.
TTL for cache is 1800 sec
:param f: decorated function
:return: saved result of a decorated function
"""
cache = {}
def decorated_function(*args):
if args in cache:
if time.time() - cache[args][1] < 1800:
return cache[args][0]
else:
cache[args] = (f(*args), time.time())
return cache[args][0]
else:
cache[args] = (f(*args), time.time())
return cache[args][0]
return decorated_function
class ZipUtilsMixin(object):
@staticmethod
def zip_dir(parent_dir, dir):
abs_path = os.path.join(parent_dir, dir)
path_len = len(abs_path) + 1
zip_file = abs_path + ".zip"
with zipfile.ZipFile(zip_file, "w") as zf:
for dir_name, _, files in os.walk(abs_path):
for filename in files:
fn = os.path.join(dir_name, filename)
zf.write(fn, fn[path_len:])
return zip_file
class DeployTestMixin(ZipUtilsMixin):
cfg.load_config()
@staticmethod
@memoize
def keystone_client():
return ksclient.Client(username=CONF.murano.user,
password=CONF.murano.password,
tenant_name=CONF.murano.tenant,
auth_url=CONF.murano.auth_url)
@classmethod
@memoize
def heat_client(cls):
heat_url = cls.keystone_client().service_catalog.url_for(
service_type='orchestration', endpoint_type='publicURL')
return heatclient.Client('1',
endpoint=heat_url,
token=cls.keystone_client().auth_token)
@classmethod
def get_murano_url(cls):
try:
url = cls.keystone_client().service_catalog.url_for(
service_type='application_catalog', endpoint_type='publicURL')
except ks_exceptions.EndpointNotFound:
url = CONF.murano.murano_url
LOG.warning("Murano endpoint not found in Keystone. Using CONF.")
return url if 'v1' not in url else "/".join(
url.split('/')[:url.split('/').index('v1')])
@classmethod
@memoize
def murano_client(cls):
murano_url = cls.get_murano_url()
return mclient.Client('1',
endpoint=murano_url,
token=cls.keystone_client().auth_token)
@classmethod
def init_list(cls, list_name):
if not hasattr(cls, list_name):
setattr(cls, list_name, [])
@classmethod
def upload_package(cls, package_name, body, app):
files = {'%s' % package_name: open(app, 'rb')}
package = cls.murano_client().packages.create(body, files)
cls.init_list("_packages")
cls._packages.append(package)
return package
@classmethod
def environment_delete(cls, environment_id, timeout=180):
cls.murano_client().environments.delete(environment_id)
start_time = time.time()
while time.time() - start_time < timeout:
try:
cls.murano_client().environments.get(environment_id)
except exceptions.HTTPNotFound:
return
err_msg = ('Environment {0} was not deleted in {1} seconds'.
format(environment_id, timeout))
LOG.error(err_msg)
raise RuntimeError(err_msg)
@classmethod
def deploy_apps(cls, name, *apps):
environment = cls.murano_client().environments.create({'name': name})
cls.init_list("_environments")
cls._environments.append(environment)
session = cls.murano_client().sessions.configure(environment.id)
for app in apps:
cls.murano_client().services.post(
environment.id,
path='/',
data=app,
session_id=session.id)
cls.murano_client().sessions.deploy(environment.id, session.id)
return environment
@staticmethod
def wait_for_final_status(environment, timeout=300):
start_time = time.time()
status = environment.manager.get(environment.id).status
while states.SessionState.DEPLOYING == status:
if time.time() - start_time > timeout:
err_msg = ('Deployment not finished in {0} seconds'
.format(timeout))
LOG.error(err_msg)
raise RuntimeError(err_msg)
time.sleep(5)
status = environment.manager.get(environment.id).status
dep = environment.manager.api.deployments.list(environment.id)
reports = environment.manager.api.deployments.reports(environment.id,
dep[0].id)
return status, ", ".join([r.text for r in reports])
@classmethod
def purge_environments(cls):
cls.init_list("_environments")
try:
for env in cls._environments:
with ignored(Exception):
cls.environment_delete(env.id)
finally:
cls._environments = []
@classmethod
def purge_uploaded_packages(cls):
cls.init_list("_packages")
try:
for pkg in cls._packages:
with ignored(Exception):
cls.murano_client().packages.delete(pkg.id)
finally:
cls._packages = []
cls.init_list("_package_files")
try:
for pkg_file in cls._package_files:
os.remove(pkg_file)
finally:
cls._package_files = []
| apache-2.0 | -3,509,607,711,950,462,000 | 32.906863 | 78 | 0.590574 | false | 4.161853 | false | false | false |
tomer8007/kik-bot-api-unofficial | kik_unofficial/datatypes/xmpp/auth_stanza.py | 1 | 6407 | import base64
import bs4
import hashlib
import hmac
import logging
import pyDes
import rsa
import time
from kik_unofficial.device_configuration import device_id
from kik_unofficial.utilities.cryptographic_utilities import CryptographicUtils
log = logging.getLogger(__name__)
identifierHex = "30820122300d06092a864886f70d01010105000382010f00"
class AuthStanza():
client = None
keyBytes: bytes = None
secretKey: bytes = None
public_key: rsa.key.PublicKey = None
private_key: rsa.key.PrivateKey = None
encrypted_public_key: bytes = None
decrypted_public_key: bytes = None
revalidate_time: int = None
cert_url: str = None
def __init__(self, client):
self.client = client
def send_stanza(self) -> None:
"""
Send the outgoing auth stanza
"""
stanza = self.searlize()
log.info('[+] Sending authentication certificate')
self.client.loop.call_soon_threadsafe(self.client.connection.send_raw_data, stanza)
def revalidate(self) -> None:
"""
Revalidates the keys after n amount of time which is provided by Kik
"""
if time.time() < self.revalidate_time:
return
stanza = self.searlize()
log.info('[+] Revalidating the authentication certificate')
self.client.loop.call_soon_threadsafe(self.client.connection.send_raw_data, stanza)
def searlize(self) -> bytes:
"""
Generates/Gets the generated public key and builds an auth stanza
"""
UUID = CryptographicUtils.make_kik_uuid()
der = self.get_public_key_base64()
signature = self.get_signature()
urlattr = f' url="{self.cert_url}"' if self.cert_url else ''
query = (
'<iq type="set" id="{}"><query xmlns="kik:auth:cert"{}>'
'<key type="rsa">'
'<der>{}</der><signature>{}</signature>'
'</key>'
'</query></iq>'
).format(UUID, urlattr, der, signature)
return query.encode()
def generate_keys(self) -> None:
"""
Generate new 2048 bits RSA keys, could take from about a second to six
"""
(pubkey, privkey) = rsa.newkeys(2048)
self.public_key = bytes.fromhex(identifierHex) + pubkey.save_pkcs1('DER')
self.private_key = bytes.fromhex(identifierHex) + privkey.save_pkcs1('DER')
def get_key_phrase(self) -> bytes:
"""
Calculates salted username passkey
"""
username = self.client.username
password = self.client.password
return CryptographicUtils.key_from_password(username, password).encode()
def get_des_secret(self) -> bytes:
"""
The secret Kik uses for the DESKeySpec
"""
username = self.client.username
device = self.client.device_id_override or device_id
data = (device + '-' + username).encode()
return hashlib.sha1(data).digest()
def get_public_key_bytes(self) -> bytes:
"""
Generates all the secrets then encrypts and decrypts the public key
"""
if not self.public_key:
self.generate_keys()
if not (self.keyBytes and self.secretKey):
key = self.get_des_key(self.get_des_secret())
self.get_parity_bit(key, 0)
if not self.decrypted_public_key:
des = pyDes.des(self.secretKey, mode=pyDes.ECB, padmode=pyDes.PAD_PKCS5)
self.encrypted_public_key = des.encrypt(self.public_key)
self.decrypted_public_key = des.decrypt(self.encrypted_public_key)
return self.decrypted_public_key
def get_public_key_base64(self) -> str:
"""
Base64 encodes the encrypted and decrypted data
"""
return base64.urlsafe_b64encode(self.get_public_key_bytes()).decode()
def get_des_key(self, key) -> bytes:
"""
Equivalent to new DESKeySpec(key).getKey() in Java
"""
if not isinstance(key, bytes):
key = bytes(key)
self.keyBytes = key[:8] # DES keys are only 8 bytes in length
return self.keyBytes
def get_key(self) -> bytes:
"""
Returns the normal DESKey bytes
"""
return self.keyBytes
def get_secret_key(self) -> bytes:
"""
Returns the secret of the DESKey
"""
return self.secretKey
def get_parity_bit(self, bArr: bytes, i: int = 0) -> bytes:
"""
Same as calling generateSecret(DESKeySpec).getEncoded() in Java
"""
tmp = list(bArr)
for _ in range(8):
b = tmp[i] & 254
tmp[i] = (((bin(b).count('1') & 1) ^ 1) | b)
i = i + 1
self.secretKey = bytes(tmp)
return self.secretKey
def get_signature(self) -> str:
"""
Base64 of the encrypted and decrypted public key with our username passkey
"""
msg = self.get_public_key_bytes()
key = self.get_key_phrase()
digest = hashlib.sha1
signature = hmac.new(key, msg, digest).digest()
return base64.urlsafe_b64encode(signature).decode()
def handle(self, data: bs4.BeautifulSoup):
"""
Handles the auth response (result/error) sent by Kik
"""
if data.error:
log.error('[!] kik:auth:cert [' + data.error.get('code') + '] ' + data.error.get_text())
log.debug(str(data))
return
if data.find_all('regenerate-key', recursive=True):
log.info('[!] Regenerating the keys for certificate authentication')
self.teardown()
self.send_stanza()
return
current = round(time.time() * 1000)
revalidate = int(data.certificate.revalidate.text)
self.cert_url = data.certificate.url.text
self.revalidate_time = current + (revalidate * 1000)
self.client.loop.call_later(revalidate, self.revalidate)
log.info('[+] Successfully validated the authentication certificate')
def teardown(self):
"""
Removes all the generated data to build a new Key
"""
self.keyBytes = None
self.secretKey = None
self.public_key = None
self.private_key = None
self.encrypted_public_key = None
self.decrypted_public_key = None
self.revalidate_time = None
self.cert_url = None
| mit | 1,200,259,331,266,545,000 | 33.079787 | 100 | 0.59435 | false | 3.852676 | false | false | false |
Tjorriemorrie/trading | 01_fibo/analyze_one.py | 1 | 5284 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from random import choice
from features import FeatureFactory
from types import *
def addMaxMins(df, n=2):
'''
get highs and lows for n periods backwards
1 min = 1m
2 min = 2m
4 min = 4m
8 min = 8m
16 min = 16m
32 min = 32m
64 min = 1h 4m
128 min = 2h 8m
256 min = 4h 16m
512 min = 8h 32m
1024 min = 17h 4m
'''
print 'retrieving entry features...'
columns = {}
for i, row in df.iterrows():
# row['max_1'] = row['high']
# row['min_1'] = row['low']
# print '\n\ni', i
x = 1
for y in xrange(1, n):
# print 'x', x
start = max(0, i+1 - x)
# print 'start', start, ':', i+1
slice = df[start:i+1]
# print slice
# print 'len', len(slice)
if 'max_' + str(x) not in columns:
columns['max_' + str(x)] = []
if 'min_' + str(x) not in columns:
columns['min_' + str(x)] = []
columns['max_' + str(x)].append(max(slice['high']) / row['close'])
columns['min_' + str(x)].append(min(slice['low']) / row['close'])
# print '\n'
x *= 2
# break
# break
for name, vector in columns.iteritems():
df[name] = vector
def processMaxMins(df):
print 'processing max and mins...'
def addTradeSetUps(df):
print 'adding trade set ups...'
currencies = [
'AUDUSD',
'EURGBP',
'EURJPY',
'EURUSD',
'GBPJPY',
'GBPUSD',
'NZDUSD',
'USDCAD',
'USDCHF',
'USDJPY',
]
intervals = [
# '60',
'1440',
]
if __name__ == '__main__':
# randomly select currency (due to high iteration)
currency = choice(currencies)
interval = choice(intervals)
print currency, interval
print 'loading dataframe...'
df = pd.read_csv(
r'../data/' + currency + interval + '.csv',
names=['date', 'time', 'open', 'high', 'low', 'close', 'volume'],
# parse_dates=[[0, 1]],
# index_col=0,
)
print df.tail()
# trades
trades = []
# entry
entry = None
iEntry = None
# low - start
low = None
iLow = None
# high - mid
high = None
iHigh = None
# exit
exit = None
iExit = None
# process
trade = {}
for i, row in df.iterrows():
# look for entry
if not entry:
# LOW
# if none: assign
if isinstance(low, NoneType):
low = row
iLow = i
print 'low new', low['low']
# else: if lower than low: new low & invalid high
elif row['low'] < low['low']:
low = row
iLow = i
high = None
iHigh = None
print 'low lower', low['low']
# else if lower too old then update
# HIGH
# if we have a low, and it's the lowest thus far, then get the high
# high must be at least 5 ticks later
elif i - iLow < 5:
trade['low'] = low
trade['iLow'] = iLow
continue
# if none: assign
elif isinstance(high, NoneType):
high = row
iHigh = i
print 'high new', high['high']
# else: if higher than high: new high
elif row['high'] > high['high']:
high = row
iHigh = i
print 'high higher', high['high']
# CURRENT
# can enter if close is at least 5 ticks later
elif i - iHigh < 5:
trade['high'] = high
trade['iHigh'] = iHigh
continue
# if we have a low, and if we have a high, then we can look for entry
else:
range = abs(high['high'] - low['low'])
m50 = high['high'] - (range * 0.500)
m61 = high['high'] - (range * 0.618)
t38 = high['high'] + (range * 0.382)
print '\nclose', row['close']
print 'low', low['low']
print 'high', high['high']
print 'range', range
print 'm50', m50
print 'm61', m61
print 't38', t38
# if close is in good range: 50 < C < 61, then enter
if m50 < row['close'] < m61:
entry = row
iEntry = i
print 'entry', entry['close']
# look for exit
else:
# let it turn for at least 5 ticks
if i - iEntry < 5:
trade['entry'] = entry
trade['iEntry'] = iEntry
continue
# stop loss if lower than m61
# take profit if higher than t38
elif row['close'] > t38 or row['close'] < m61:
trade['exit'] = row
trade['iExit'] = i
trade['profit'] = exit['close'] - entry['close']
trades.append(trade)
trade = {}
print len(trades)
print trades | mit | -1,934,741,996,207,708,000 | 25.557789 | 81 | 0.448524 | false | 3.931548 | false | false | false |
adamchainz/lifelogger | lifelogger/commands/local.py | 1 | 6070 | # coding=utf-8
"""
All commands that create & use the local copies of the Google Calendar data.
"""
from __future__ import absolute_import, print_function
import requests
from icalendar import Calendar
from ..config import config, ICAL_PATH
from ..utils import nice_format
from .parser import subparsers
import six
from six.moves import input
def download(reset=None):
if reset:
config.pop('ical_url', None)
try:
ical_url = config['ical_url']
except KeyError:
print("To download the iCal file for analysis, you must give me the "
"public URL for it.")
print("Please go to the Google Calendar web interface "
", 'Calendar Settings', and then copy the link address from "
"the ICAL button under 'Calendar Address'")
ical_url = input("Paste --> ")
config['ical_url'] = ical_url
print("Downloading private iCal file...")
req = requests.get(ical_url, stream=True)
if req.status_code != 200:
print("Could not fetch iCal url - has it expired? ")
print("To change, run download --reset")
print(ical_url)
return False
with open(ICAL_PATH, 'wb') as f:
for chunk in req.iter_content():
f.write(chunk)
print("Download successful!")
make_db()
return True
download.parser = subparsers.add_parser(
'download',
description="Downloads the iCal that contains the whole of your Google "
"Calendar, and then parses it into the local database"
)
download.parser.add_argument(
'-r',
'--reset',
const=True,
default=False,
nargs='?',
help="Pass this in to force re-pasting in the iCal url, if e.g. the url "
" stored in lifelogger is no longer valid."
)
download.parser.set_defaults(func=download)
def make_db():
from ..database import Event, db
print("Converting iCal file into sqlite database...")
with open(ICAL_PATH, 'rb') as f:
ical_data = f.read()
cal = Calendar.from_ical(ical_data)
try:
Event.drop_table()
except Exception:
pass
try:
Event.create_table()
except Exception:
pass
with db.atomic():
for event in cal.walk("VEVENT"):
Event.create_from_ical_event(event)
print("Imported {} events.".format(
Event.select().count()
))
return True
make_db.parser = subparsers.add_parser(
'make_db',
description="Parses the downloaded iCal file into the local sqlite "
"database. Normally done when the download command is run, "
"but may need re-running on changes to lifelogger."
)
make_db.parser.set_defaults(func=make_db)
def shell():
from datetime import datetime, date # noqa
from ..database import Event, regexp # noqa
from IPython import embed
embed()
shell.parser = subparsers.add_parser(
'shell',
description="Loads the local database and an IPython shell so you can "
"manually search around the events using the 'peewee' ORM."
)
shell.parser.set_defaults(func=shell)
def sql(statement, separator):
from ..database import conn
statement = ' '.join(statement)
cursor = conn.cursor()
cursor.execute(statement)
separator = {
'comma': ',',
'semicolon': ';',
'tab': '\t',
}[separator]
# Header
print(separator.join([d[0] for d in cursor.description]))
# Data
for row in cursor.fetchall():
print(separator.join([str(v) for v in row]))
sql.parser = subparsers.add_parser(
'sql',
description="Execute a SQL statement direct on the db and output results "
"as csv."
)
sql.parser.add_argument(
'statement',
nargs="+",
type=six.text_type,
help="The SQL statement."
)
sql.parser.add_argument(
'-s',
'--separator',
nargs="?",
type=six.text_type,
default="comma",
choices=['comma', 'semicolon', 'tab'],
help="Separator for the output - default comma."
)
sql.parser.set_defaults(func=sql)
def list_command(filter_re):
filter_re = ' '.join(filter_re)
from ..database import Event, regexp
events = Event.select().where(regexp(Event.summary, filter_re))
for event in events:
print(event.display())
return True
list_command.parser = subparsers.add_parser(
'list',
description="Lists the events that match a given regex."
)
list_command.parser.add_argument(
'filter_re',
nargs="+",
type=six.text_type,
help="The regex to filter events by."
)
list_command.parser.set_defaults(func=list_command)
def csv(filter_re, separator, varnames):
filter_re = ' '.join(filter_re)
varnames = varnames.split(',')
separator = {
'comma': ',',
'semicolon': ';',
'tab': '\t',
}[separator]
from ..database import Event, regexp
events = Event.select().where(regexp(Event.summary, filter_re))
# Header
print(separator.join(varnames))
# Data
for event in events:
print(separator.join([
nice_format(event.get_var(varname)) for varname in varnames
]))
csv.parser = subparsers.add_parser(
'csv',
description="Used to output properties of events that a given filter as "
"CSV data."
)
csv.parser.add_argument(
'-s',
'--separator',
nargs="?",
type=six.text_type,
default="comma",
choices=['comma', 'semicolon', 'tab'],
help="Separator for the output - default comma."
)
csv.parser.add_argument(
'-v',
'--varnames',
nargs="?",
type=six.text_type,
default="start,end,summary",
help="A comma-separated list of the Event variables to output (options: "
"start, end, summary, duration_seconds, duration_minutes, "
"duration_hours, units, percentage, kg, mg). "
"Defaults to 'start,end,summary'."
)
csv.parser.add_argument(
'filter_re',
nargs="+",
type=six.text_type,
help="The regex to filter events by."
)
csv.parser.set_defaults(func=csv)
| mit | -724,857,612,454,011,900 | 23.087302 | 78 | 0.620264 | false | 3.800877 | false | false | false |
dhanunjaya/neutron | neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py | 4 | 6991 | # Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from neutron.agent.common import config
from neutron.common import constants as n_const
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2.drivers.openvswitch.agent.common \
import constants
DEFAULT_BRIDGE_MAPPINGS = []
DEFAULT_VLAN_RANGES = []
DEFAULT_TUNNEL_RANGES = []
DEFAULT_TUNNEL_TYPES = []
ovs_opts = [
cfg.StrOpt('integration_bridge', default='br-int',
help=_("Integration bridge to use.")),
cfg.StrOpt('tunnel_bridge', default='br-tun',
help=_("Tunnel bridge to use.")),
cfg.StrOpt('int_peer_patch_port', default='patch-tun',
help=_("Peer patch port in integration bridge for tunnel "
"bridge.")),
cfg.StrOpt('tun_peer_patch_port', default='patch-int',
help=_("Peer patch port in tunnel bridge for integration "
"bridge.")),
cfg.IPOpt('local_ip', version=4,
help=_("Local IP address of tunnel endpoint.")),
cfg.ListOpt('bridge_mappings',
default=DEFAULT_BRIDGE_MAPPINGS,
help=_("List of <physical_network>:<bridge>. "
"Deprecated for ofagent.")),
cfg.BoolOpt('use_veth_interconnection', default=False,
help=_("Use veths instead of patch ports to interconnect the "
"integration bridge to physical bridges.")),
cfg.StrOpt('of_interface', default='ovs-ofctl',
choices=['ovs-ofctl', 'native'],
help=_("OpenFlow interface to use.")),
cfg.StrOpt('datapath_type', default=constants.OVS_DATAPATH_SYSTEM,
choices=[constants.OVS_DATAPATH_SYSTEM,
constants.OVS_DATAPATH_NETDEV],
help=_("OVS datapath to use.")),
cfg.IPOpt('of_listen_address', default='127.0.0.1',
help=_("Address to listen on for OpenFlow connections. "
"Used only for 'native' driver.")),
cfg.IntOpt('of_listen_port', default=6633,
help=_("Port to listen on for OpenFlow connections. "
"Used only for 'native' driver.")),
cfg.IntOpt('of_connect_timeout', default=30,
help=_("Timeout in seconds to wait for "
"the local switch connecting the controller. "
"Used only for 'native' driver.")),
cfg.IntOpt('of_request_timeout', default=10,
help=_("Timeout in seconds to wait for a single "
"OpenFlow request. "
"Used only for 'native' driver.")),
]
agent_opts = [
cfg.IntOpt('polling_interval', default=2,
help=_("The number of seconds the agent will wait between "
"polling for local device changes.")),
cfg.BoolOpt('minimize_polling',
default=True,
help=_("Minimize polling by monitoring ovsdb for interface "
"changes.")),
cfg.IntOpt('ovsdb_monitor_respawn_interval',
default=constants.DEFAULT_OVSDBMON_RESPAWN,
help=_("The number of seconds to wait before respawning the "
"ovsdb monitor after losing communication with it.")),
cfg.ListOpt('tunnel_types', default=DEFAULT_TUNNEL_TYPES,
help=_("Network types supported by the agent "
"(gre and/or vxlan).")),
cfg.IntOpt('vxlan_udp_port', default=p_const.VXLAN_UDP_PORT,
help=_("The UDP port to use for VXLAN tunnels.")),
cfg.IntOpt('veth_mtu',
help=_("MTU size of veth interfaces")),
cfg.BoolOpt('l2_population', default=False,
help=_("Use ML2 l2population mechanism driver to learn "
"remote MAC and IPs and improve tunnel scalability.")),
cfg.BoolOpt('arp_responder', default=False,
help=_("Enable local ARP responder if it is supported. "
"Requires OVS 2.1 and ML2 l2population driver. "
"Allows the switch (when supporting an overlay) "
"to respond to an ARP request locally without "
"performing a costly ARP broadcast into the overlay.")),
cfg.BoolOpt('prevent_arp_spoofing', default=True,
help=_("Enable suppression of ARP responses that don't match "
"an IP address that belongs to the port from which "
"they originate. Note: This prevents the VMs attached "
"to this agent from spoofing, it doesn't protect them "
"from other devices which have the capability to spoof "
"(e.g. bare metal or VMs attached to agents without "
"this flag set to True). Spoofing rules will not be "
"added to any ports that have port security disabled. "
"For LinuxBridge, this requires ebtables. For OVS, it "
"requires a version that supports matching ARP "
"headers.")),
cfg.BoolOpt('dont_fragment', default=True,
help=_("Set or un-set the don't fragment (DF) bit on "
"outgoing IP packet carrying GRE/VXLAN tunnel.")),
cfg.BoolOpt('enable_distributed_routing', default=False,
help=_("Make the l2 agent run in DVR mode.")),
cfg.IntOpt('quitting_rpc_timeout', default=10,
help=_("Set new timeout in seconds for new rpc calls after "
"agent receives SIGTERM. If value is set to 0, rpc "
"timeout won't be changed")),
cfg.BoolOpt('drop_flows_on_start', default=False,
help=_("Reset flow table on start. Setting this to True will "
"cause brief traffic interruption.")),
cfg.BoolOpt('tunnel_csum', default=False,
help=_("Set or un-set the tunnel header checksum on "
"outgoing IP packet carrying GRE/VXLAN tunnel.")),
cfg.StrOpt('agent_type', default=n_const.AGENT_TYPE_OVS,
deprecated_for_removal=True,
help=_("Selects the Agent Type reported"))
]
cfg.CONF.register_opts(ovs_opts, "OVS")
cfg.CONF.register_opts(agent_opts, "AGENT")
config.register_agent_state_opts_helper(cfg.CONF)
| apache-2.0 | -4,177,901,443,156,568,600 | 50.785185 | 79 | 0.587184 | false | 4.29159 | false | false | false |
tensorflow/tpu | models/official/detection/dataloader/factory.py | 1 | 7775 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data loader factory."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from dataloader import classification_parser
from dataloader import extract_objects_parser
from dataloader import maskrcnn_parser
from dataloader import maskrcnn_parser_with_copy_paste
from dataloader import mode_keys as ModeKeys
from dataloader import retinanet_parser
from dataloader import segmentation_parser
from dataloader import shapemask_parser
def parser_generator(params, mode):
"""Generator function for various dataset parser."""
if params.architecture.parser == 'classification_parser':
parser_params = params.classification_parser
parser_fn = classification_parser.Parser(
output_size=parser_params.output_size,
aug_rand_hflip=parser_params.aug_rand_hflip,
use_bfloat16=params.architecture.use_bfloat16,
mode=mode)
elif params.architecture.parser == 'retinanet_parser':
anchor_params = params.anchor
parser_params = params.retinanet_parser
parser_fn = retinanet_parser.Parser(
output_size=parser_params.output_size,
min_level=params.architecture.min_level,
max_level=params.architecture.max_level,
num_scales=anchor_params.num_scales,
aspect_ratios=anchor_params.aspect_ratios,
anchor_size=anchor_params.anchor_size,
match_threshold=parser_params.match_threshold,
unmatched_threshold=parser_params.unmatched_threshold,
aug_rand_hflip=parser_params.aug_rand_hflip,
aug_scale_min=parser_params.aug_scale_min,
aug_scale_max=parser_params.aug_scale_max,
aug_policy=parser_params.aug_policy,
skip_crowd_during_training=parser_params.skip_crowd_during_training,
max_num_instances=parser_params.max_num_instances,
use_bfloat16=params.architecture.use_bfloat16,
mode=mode,
regenerate_source_id=parser_params.regenerate_source_id)
elif params.architecture.parser == 'maskrcnn_parser':
anchor_params = params.anchor
parser_params = params.maskrcnn_parser
parser_fn = maskrcnn_parser.Parser(
output_size=parser_params.output_size,
min_level=params.architecture.min_level,
max_level=params.architecture.max_level,
num_scales=anchor_params.num_scales,
aspect_ratios=anchor_params.aspect_ratios,
anchor_size=anchor_params.anchor_size,
rpn_match_threshold=parser_params.rpn_match_threshold,
rpn_unmatched_threshold=parser_params.rpn_unmatched_threshold,
rpn_batch_size_per_im=parser_params.rpn_batch_size_per_im,
rpn_fg_fraction=parser_params.rpn_fg_fraction,
aug_rand_hflip=parser_params.aug_rand_hflip,
aug_scale_min=parser_params.aug_scale_min,
aug_scale_max=parser_params.aug_scale_max,
skip_crowd_during_training=parser_params.skip_crowd_during_training,
max_num_instances=parser_params.max_num_instances,
include_mask=params.architecture.include_mask,
mask_crop_size=parser_params.mask_crop_size,
use_bfloat16=params.architecture.use_bfloat16,
mode=mode)
if mode == ModeKeys.TRAIN and parser_params.copy_paste:
parser_fn = maskrcnn_parser_with_copy_paste.Parser(
output_size=parser_params.output_size,
min_level=params.architecture.min_level,
max_level=params.architecture.max_level,
num_scales=anchor_params.num_scales,
aspect_ratios=anchor_params.aspect_ratios,
anchor_size=anchor_params.anchor_size,
rpn_match_threshold=parser_params.rpn_match_threshold,
rpn_unmatched_threshold=parser_params.rpn_unmatched_threshold,
rpn_batch_size_per_im=parser_params.rpn_batch_size_per_im,
rpn_fg_fraction=parser_params.rpn_fg_fraction,
aug_rand_hflip=parser_params.aug_rand_hflip,
aug_scale_min=parser_params.aug_scale_min,
aug_scale_max=parser_params.aug_scale_max,
skip_crowd_during_training=parser_params.skip_crowd_during_training,
max_num_instances=parser_params.max_num_instances,
include_mask=params.architecture.include_mask,
mask_crop_size=parser_params.mask_crop_size,
use_bfloat16=params.architecture.use_bfloat16,
mode=mode)
elif params.architecture.parser == 'extract_objects_parser':
parser_params = params.maskrcnn_parser
parser_fn = extract_objects_parser.Parser(
output_size=parser_params.output_size,
min_level=params.architecture.min_level,
max_level=params.architecture.max_level,
aug_rand_hflip=parser_params.aug_rand_hflip,
aug_scale_min=parser_params.aug_scale_min,
aug_scale_max=parser_params.aug_scale_max,
skip_crowd_during_training=parser_params.skip_crowd_during_training,
include_mask=params.architecture.include_mask,
mask_crop_size=parser_params.mask_crop_size)
elif params.architecture.parser == 'shapemask_parser':
anchor_params = params.anchor
parser_params = params.shapemask_parser
parser_fn = shapemask_parser.Parser(
output_size=parser_params.output_size,
min_level=params.architecture.min_level,
max_level=params.architecture.max_level,
num_scales=anchor_params.num_scales,
aspect_ratios=anchor_params.aspect_ratios,
anchor_size=anchor_params.anchor_size,
use_category=parser_params.use_category,
outer_box_scale=parser_params.outer_box_scale,
box_jitter_scale=parser_params.box_jitter_scale,
num_sampled_masks=parser_params.num_sampled_masks,
mask_crop_size=parser_params.mask_crop_size,
mask_min_level=parser_params.mask_min_level,
mask_max_level=parser_params.mask_max_level,
upsample_factor=parser_params.upsample_factor,
match_threshold=parser_params.match_threshold,
unmatched_threshold=parser_params.unmatched_threshold,
aug_rand_hflip=parser_params.aug_rand_hflip,
aug_scale_min=parser_params.aug_scale_min,
aug_scale_max=parser_params.aug_scale_max,
skip_crowd_during_training=parser_params.skip_crowd_during_training,
max_num_instances=parser_params.max_num_instances,
use_bfloat16=params.architecture.use_bfloat16,
mask_train_class=parser_params.mask_train_class,
mode=mode)
elif params.architecture.parser == 'segmentation_parser':
parser_params = params.segmentation_parser
parser_fn = segmentation_parser.Parser(
output_size=parser_params.output_size,
resize_eval=parser_params.resize_eval,
ignore_label=parser_params.ignore_label,
aug_rand_hflip=parser_params.aug_rand_hflip,
aug_scale_min=parser_params.aug_scale_min,
aug_scale_max=parser_params.aug_scale_max,
aug_policy=parser_params.aug_policy,
use_bfloat16=params.architecture.use_bfloat16,
mode=mode)
else:
raise ValueError('Parser %s is not supported.' % params.architecture.parser)
return parser_fn
| apache-2.0 | -7,713,678,602,931,322,000 | 47.59375 | 80 | 0.705209 | false | 3.553473 | false | false | false |
pingwin/lexsocial | main.py | 1 | 3949 | #!./.virtenv/bin/python
from flask import (
Flask,
request,
jsonify,
render_template,
url_for,
send_from_directory
)
import datetime
import logging
from dateutil import tz
from flask.ext.pymongo import PyMongo
from flask.ext.script import Manager
from flask_admin import Admin
from models import (
EventsView,
GroupsView
)
##------------------------------------------------------------------------------
## Bootstraping Flask
##------------------------------------------------------------------------------
app = Flask(
'lexsocial',
static_folder = 'static',
template_folder = 'templates'
)
app.config.from_object('settings')
with app.app_context():
app.logger.setLevel(level=
getattr(logging, app.config.get('LOG_LEVEL', 'DEBUG')))
mongo = PyMongo(app)
manager = Manager(app)
##------------------------------------------------------------------------------
## Admin Panel
##------------------------------------------------------------------------------
admin = Admin(app, url=app.config.get('ADMIN_URL', '/admin/'), name='LexSocial', template_mode='bootstrap3')
with app.app_context():
admin.add_view(EventsView(mongo.db.events, 'Events'))
admin.add_view(GroupsView(mongo.db.groups, 'Groups'))
##------------------------------------------------------------------------------
## Application Commands
##------------------------------------------------------------------------------
@manager.command
def sync_calendars():
from sync import sync
return sync(app, mongo)
##------------------------------------------------------------------------------
## Views
##------------------------------------------------------------------------------
@app.route("/", methods=['GET'])
def main():
return render_template(
'index.html',
groups = mongo.db.groups.find()
)
@app.route("/events", methods=['GET'])
def events_json():
browser_tz = tz.gettz(request.args.get('browser_timezone', app.config.get('TIMEZONE')))
From = datetime.datetime.fromtimestamp((int(request.args.get('from'))/1000)-86400, tz=browser_tz)
To = datetime.datetime.fromtimestamp((int(request.args.get('to') )/1000)+86400, tz=browser_tz)
return jsonify(
success = 1,
result =
map(lambda x:
{'id' : str(x['_id']),
'title' : x['summary'],
'description' : x['desc'],
'url' : '/event/'+str(x['_id']),
'bg_color' : mongo.db.groups.find_one({'_id': x['group_id']}).get('bg_color', None),
'start' : int(x['start'].replace(tzinfo=browser_tz).strftime("%s")) * 1000,
'end' : int(x['end'].replace(tzinfo=browser_tz).strftime("%s")) * 1000
},
mongo.db.events.find({'start' : {'$gte' : From},
'end' : {'$lte': To}
})))
@app.route('/event/<ObjectId:event_id>', methods=['GET'])
def event_details(event_id):
return render_template(
'event_details.html',
event = mongo.db.events.find_one_or_404(
{'_id' : event_id}))
@app.route('/<any(tmpls,components,img,js,css):subdir>/<path:filename>', methods=['GET'])
def static_files(subdir, filename):
app.logger.debug("subdir:'%s' filename:'%s'" % (subdir, filename))
return send_from_directory('static/'+subdir, filename)
##------------------------------------------------------------------------------
## Now let'r rip!
##------------------------------------------------------------------------------
if __name__ == '__main__':
manager.run()
| bsd-2-clause | -8,028,019,517,174,003,000 | 35.906542 | 108 | 0.425424 | false | 4.565318 | false | false | false |
titilambert/harbour-squilla | squilla/lib/server.py | 1 | 5879 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# server.py
#
# This file is part of Squilla
#
# Copyright (C) 2014 Thibault Cohen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import socketserver
import threading
from xml.sax.saxutils import unescape
import pyotherside
from bs4 import BeautifulSoup
from squilla.lib.logger import logger
from squilla.lib import get_presence_auth_user
from squilla.lib.scheduler import send_sms_q
class PresenceServer():
def __init__(self, auth_user=None):
self.host = '0.0.0.0'
self.port = 5299
self.auth_user = auth_user
self.server = None
def shutdown(self):
self.server.shutdown()
del(self.server)
self.server = None
logger.debug("Presence server stopped")
def restart(self):
# Kill old server
if self.server is not None:
self.shutdown()
# Start new one
self.server = self.ThreadedTCPServer((self.host, self.port), self.ThreadedTCPRequestHandler)
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
def get_user(self, soup):
""" Get target user ( friend cell phone number )
"""
attrs = dict(soup.first().attrs)
if 'to' in attrs.keys():
if attrs['to'] != '':
return attrs['to']
return False
def handle(self):
presence_auth_user = get_presence_auth_user()
if presence_auth_user is None:
return
auth_user = presence_auth_user.get("name", None)
recvData = self.request.recv(1024)
# First message
if recvData.startswith(b"<?xml version"):
# Test if is authorized user
soup = BeautifulSoup(recvData)
sender = soup.find("stream:stream").get("from", None)
if auth_user is None or not sender.startswith(auth_user + "@"):
logger.debug("Bonjour message received "
"from unauthorized user")
self.request.close()
return
# Get target user ( friend cell phone number)
user = soup.find("stream:stream").get("to", None).rsplit("@", 1)[0]
if not user:
# User not found
self.request.close()
return
logger.debug("Bonjour message received "
"from authorized user: %s" % user)
# First reply
sendData = (u"""<?xml version='1.0' encoding='UTF-8'?>"""
u"""<stream:stream xmlns='jabber:client' """
u"""xmlns:stream='http://etherx.jabber.org/streams'"""
u""" to="%s" from="%s" version="1.0">"""
% (sender, user))
self.request.sendall(sendData.encode('utf-8'))
recvData = self.request.recv(1024)
###### Second msg ######
if recvData == "<stream:features/>":
sendData = """<stream:features />"""
self.request.sendall(sendData.encode('utf-8'))
recvData = self.request.recv(1024)
if recvData.startswith(b"<message"):
soup = BeautifulSoup(recvData)
if not hasattr(soup, "message"):
self.request.close()
return
sender = soup.message.get("from")
if auth_user is None or not sender.startswith(auth_user + "@"):
logger.debug("Bonjour message received "
"from unauthorized user")
self.request.close()
return
if not hasattr(soup, "body"):
self.request.close()
return
# Get target user ( friend cell phone number)
friend = soup.message.get("to", None)
if not friend:
# User not found
self.request.close()
return
# Get Message
message = soup.message.body.text
message = unescape(message, {"'": "'", """: '"'})
logger.debug("New sms for %s queued" % friend)
logger.debug("New sms content %s" % message)
# Put message in sms queue
if message:
send_sms_q.put({'to': friend,
'message': message
},
block=False,
)
else:
# NOTIFY ???
# messag empty ????
pass
self.request.close()
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
| gpl-3.0 | 7,752,167,135,796,823,000 | 34.847561 | 100 | 0.519136 | false | 4.603759 | false | false | false |
coll-gate/collgate | server/geonames/management/commands/city.py | 1 | 9190 | # -*- coding: utf-8; -*-
#
# @file city.py
# @brief
# @author Medhi BOULNEMOUR (INRA UMR1095)
# @date 2017-01-03
# @copyright Copyright (c) 2016 INRA/CIRAD
# @license MIT (see LICENSE file)
# @details
"""
Install City
"""
from django.db import transaction
from django.core.management import BaseCommand
from geonames.appsettings import CITY_SOURCES, IGeoname, DATA_DIR
from geonames import instance
from geonames.models import City, Country
from geonames.geonames import Geonames
import progressbar
import resource
import sys
import os
from django.utils import timezone
from colorama import Fore, Style
class MemoryUsageWidget(progressbar.widgets.WidgetBase):
def __call__(self, progress, data):
if sys.platform != 'win32':
return '%s kB' % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
return '?? kB'
class Command(BaseCommand):
help = """Download all files in GEONAMES_CITY_SOURCES if they were updated or if
--force option was used.
And Import city data if they were downloaded."""
def __init__(self):
super(Command, self).__init__()
self.progress_enabled = False
self.progress_widgets = None
self.progress = 0
self.force = False
self.export_file = None
self.export = False
self.delete = False
self.verbosity = None
self.no_color = None
self.cities_bulk = []
def add_arguments(self, parser):
# Named (optional) arguments
parser.add_argument(
'-f', '--force',
action='store_true',
dest='force',
default=False,
help='Download and import even if matching files are up-to-date',
)
parser.add_argument(
'-np', '--no-progress',
action='store_true',
dest='no-progress',
default=False,
help='Hide progress bar'
)
parser.add_argument(
'-e', '--export',
dest='export',
action='store',
default=False,
nargs='?',
help='Export files with matching data only. Absolute path to export file'
)
parser.add_argument(
'-d', '--delete',
dest='delete',
action='store_true',
default=False,
help='Delete local source files after importation'
)
def progress_init(self):
"""Initialize progress bar."""
if self.progress_enabled:
self.progress = 0
self.progress_widgets = [
Fore.LIGHTCYAN_EX,
'RAM used: ',
MemoryUsageWidget(),
' ',
progressbar.ETA(),
' Done: ',
progressbar.Percentage(),
' ',
progressbar.Bar(
marker='▓',
fill='░'
),
' ',
progressbar.AnimatedMarker(markers='⎺⎻⎼⎽⎼⎻'),
' ',
Style.RESET_ALL,
]
def progress_start(self, max_value):
"""Start progress bar."""
if self.progress_enabled:
self.progress = progressbar.ProgressBar(
max_value=max_value,
widgets=self.progress_widgets
).start()
def progress_update(self, value):
"""Update progress bar."""
if self.progress_enabled:
self.progress.update(value)
def progress_finish(self):
"""Finalize progress bar."""
if self.progress_enabled:
self.progress.finish()
@transaction.atomic
def handle(self, *args, **options):
self.city_manager(args, options)
def city_manager(self, args, options):
self.progress_enabled = not options.get('no-progress')
self.export = options.get('export')
self.force = options.get('force')
self.verbosity = options.get('verbosity')
self.no_color = options.get('no_color')
if self.export is None:
self.export = '%s/city_light_%s.txt' % (DATA_DIR,
timezone.now().isoformat('_')
.replace(':', '-')
.replace('.', '-'))
self.delete = options.get('delete')
self.progress_init()
if self.export:
file_path = self.export
if os.path.exists(file_path):
os.remove(file_path)
else:
print('Creating %s' % file_path)
self.export_file = open(file_path, 'a')
for source in CITY_SOURCES:
geonames = Geonames(source, force=self.force)
if not geonames.need_run:
continue
i = 0
nb_lines = geonames.num_lines()
refresh_tx = int(nb_lines / 100) if (nb_lines / 100) >= 1 else 1
self.progress_start(nb_lines)
if not self.progress_enabled:
print('Importing...')
cities_to_check = []
for items in geonames.parse():
current_city = self.city_check(items)
if current_city:
cities_to_check.append(current_city)
if len(cities_to_check) >= 500:
self.city_bulk(cities_to_check)
cities_to_check = []
i += 1
if i % refresh_tx == 0:
self.progress_update(i)
if cities_to_check:
self.city_bulk(cities_to_check)
self.progress_finish()
if self.export:
self.export_file.close()
geonames.finish(delete=self.delete)
def city_check(self, items):
if not items[IGeoname.featureCode] in instance.geonames_include_city_types:
return False
return {
'geoname_id': int(items[IGeoname.geonameid]),
'name': items[IGeoname.name],
'country_code': items[IGeoname.countryCode],
'country_id': self._get_country_id(items[IGeoname.countryCode]),
'latitude': items[IGeoname.latitude],
'longitude': items[IGeoname.longitude],
'population': items[IGeoname.population],
'feature_code': items[IGeoname.featureCode]
}
def city_bulk(self, cities_to_check):
bulk = []
for city in cities_to_check:
result = City.objects.filter(geoname_id=city.get('geoname_id'))
if result:
result[0].name = city.get('name')
result[0].country_id = city.get('country_id')
result[0].latitude = city.get('latitude')
result[0].longitude = city.get('longitude')
result[0].population = city.get('population')
result[0].feature_code = city.get('feature_code')
result[0].save()
town = result[0]
else:
town = City(
geoname_id=city.get('geoname_id'),
name=city.get('name'),
country_id=city.get('country_id'),
latitude=city.get('latitude'),
longitude=city.get('longitude'),
population=city.get('population'),
feature_code=city.get('feature_code')
)
bulk.append(town)
if self.export:
r = [""] * 18
r[IGeoname.name] = city.get('name')
r[IGeoname.countryCode] = city.get('country_code')
r[IGeoname.latitude] = city.get('latitude')
r[IGeoname.longitude] = city.get('longitude')
r[IGeoname.population] = city.get('population')
r[IGeoname.featureCode] = city.get('feature_code')
r[IGeoname.geonameid] = str(city.get('geoname_id'))
self.export_file.write('\t'.join(r) + '\n')
self.display_entry_message(town, True if result else False)
if bulk:
City.objects.bulk_create(bulk)
self.display_bulk_message(len(bulk))
def _get_country_id(self, code2):
"""
Simple lazy identity map for code2->country
"""
if not hasattr(self, '_country_codes'):
self._country_codes = {}
if code2 not in self._country_codes.keys():
self._country_codes[code2] = Country.objects.get(code2=code2).pk
return self._country_codes[code2]
def display_bulk_message(self, bulk_size):
if not self.progress_enabled and self.verbosity:
print('BULK INSERT!\tNb_entries:%s' % bulk_size)
def display_entry_message(self, city, state):
if not self.progress_enabled and self.verbosity:
display_state = "UPDATED" if state else "ADD"
if not self.no_color:
display_state = (Fore.BLUE if state else Fore.GREEN) + display_state + Style.RESET_ALL
print('[%s] %s' % (display_state, city))
| mit | 9,152,917,883,174,326,000 | 31.416961 | 102 | 0.523981 | false | 4.123146 | false | false | false |
yxd-hde/lambda-poll-update-delete | py-asyncio-boto3/poll-test.py | 1 | 1045 | import trollius as asyncio
import boto3
from concurrent.futures import ThreadPoolExecutor
from poll import Poll
import logging
logging.getLogger(
'botocore.vendored.requests.packages.urllib3.connectionpool'
).setLevel(logging.CRITICAL)
logging.getLogger('boto3.resources.action').setLevel(logging.CRITICAL)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
session = boto3.session.Session(region_name='ap-northeast-1')
sqs = session.resource('sqs')
def handler(event, contest):
logger.info("Start!")
executor = ThreadPoolExecutor(max_workers=1000)
main_loop = asyncio.new_event_loop()
main_loop.set_default_executor(executor)
asyncio.set_event_loop(main_loop)
poll = Poll(main_loop)
queue_url = event['queueUrl']
message_count = event['messageCount']
poll.messages(sqs, queue_url, message_count)
logger.info("Receive API count: {}".format(poll.fetch_count))
logger.info("Fetched messages: {}".format(poll.message_count))
main_loop.close()
executor.shutdown()
| mit | 8,745,223,813,832,195,000 | 25.125 | 70 | 0.734928 | false | 3.554422 | false | false | false |
igor-toga/local-snat | neutron/agent/l2/extensions/qos.py | 4 | 11496 | # Copyright (c) 2015 Mellanox Technologies, Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
from neutron_lib import exceptions
from oslo_concurrency import lockutils
from oslo_log import log as logging
import six
from neutron._i18n import _LW, _LI
from neutron.agent.l2 import l2_agent_extension
from neutron.agent.linux import tc_lib
from neutron.api.rpc.callbacks.consumer import registry
from neutron.api.rpc.callbacks import events
from neutron.api.rpc.callbacks import resources
from neutron.api.rpc.handlers import resources_rpc
from neutron import manager
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class QosAgentDriver(object):
"""Defines stable abstract interface for QoS Agent Driver.
QoS Agent driver defines the interface to be implemented by Agent
for applying QoS Rules on a port.
"""
# Each QoS driver should define the set of rule types that it supports, and
# corresponding handlers that has the following names:
#
# create_<type>
# update_<type>
# delete_<type>
#
# where <type> is one of VALID_RULE_TYPES
SUPPORTED_RULES = set()
@abc.abstractmethod
def initialize(self):
"""Perform QoS agent driver initialization.
"""
def create(self, port, qos_policy):
"""Apply QoS rules on port for the first time.
:param port: port object.
:param qos_policy: the QoS policy to be applied on port.
"""
self._handle_update_create_rules('create', port, qos_policy)
def consume_api(self, agent_api):
"""Consume the AgentAPI instance from the QoSAgentExtension class
This allows QosAgentDrivers to gain access to resources limited to the
NeutronAgent when this method is overridden.
:param agent_api: An instance of an agent specific API
"""
def update(self, port, qos_policy):
"""Apply QoS rules on port.
:param port: port object.
:param qos_policy: the QoS policy to be applied on port.
"""
self._handle_update_create_rules('update', port, qos_policy)
def delete(self, port, qos_policy=None):
"""Remove QoS rules from port.
:param port: port object.
:param qos_policy: the QoS policy to be removed from port.
"""
if qos_policy is None:
rule_types = self.SUPPORTED_RULES
else:
rule_types = set(
[rule.rule_type
for rule in self._iterate_rules(qos_policy.rules)])
for rule_type in rule_types:
self._handle_rule_delete(port, rule_type)
def _iterate_rules(self, rules):
for rule in rules:
rule_type = rule.rule_type
if rule_type in self.SUPPORTED_RULES:
yield rule
else:
LOG.warning(_LW('Unsupported QoS rule type for %(rule_id)s: '
'%(rule_type)s; skipping'),
{'rule_id': rule.id, 'rule_type': rule_type})
def _handle_rule_delete(self, port, rule_type):
handler_name = "".join(("delete_", rule_type))
handler = getattr(self, handler_name)
handler(port)
def _handle_update_create_rules(self, action, port, qos_policy):
for rule in self._iterate_rules(qos_policy.rules):
if rule.should_apply_to_port(port):
handler_name = "".join((action, "_", rule.rule_type))
handler = getattr(self, handler_name)
handler(port, rule)
else:
LOG.debug("Port %(port)s excluded from QoS rule %(rule)s",
{'port': port, 'rule': rule.id})
def _get_egress_burst_value(self, rule):
"""Return burst value used for egress bandwidth limitation.
Because Egress bw_limit is done on ingress qdisc by LB and ovs drivers
so it will return burst_value used by tc on as ingress_qdisc.
"""
return tc_lib.TcCommand.get_ingress_qdisc_burst_value(
rule.max_kbps, rule.max_burst_kbps)
class PortPolicyMap(object):
def __init__(self):
# we cannot use a dict of sets here because port dicts are not hashable
self.qos_policy_ports = collections.defaultdict(dict)
self.known_policies = {}
self.port_policies = {}
def get_ports(self, policy):
return self.qos_policy_ports[policy.id].values()
def get_policy(self, policy_id):
return self.known_policies.get(policy_id)
def update_policy(self, policy):
self.known_policies[policy.id] = policy
def has_policy_changed(self, port, policy_id):
return self.port_policies.get(port['port_id']) != policy_id
def get_port_policy(self, port):
policy_id = self.port_policies.get(port['port_id'])
if policy_id:
return self.get_policy(policy_id)
def set_port_policy(self, port, policy):
"""Attach a port to policy and return any previous policy on port."""
port_id = port['port_id']
old_policy = self.get_port_policy(port)
self.known_policies[policy.id] = policy
self.port_policies[port_id] = policy.id
self.qos_policy_ports[policy.id][port_id] = port
if old_policy and old_policy.id != policy.id:
del self.qos_policy_ports[old_policy.id][port_id]
return old_policy
def clean_by_port(self, port):
"""Detach port from policy and cleanup data we don't need anymore."""
port_id = port['port_id']
if port_id in self.port_policies:
del self.port_policies[port_id]
for qos_policy_id, port_dict in self.qos_policy_ports.items():
if port_id in port_dict:
del port_dict[port_id]
if not port_dict:
self._clean_policy_info(qos_policy_id)
return
raise exceptions.PortNotFound(port_id=port['port_id'])
def _clean_policy_info(self, qos_policy_id):
del self.qos_policy_ports[qos_policy_id]
del self.known_policies[qos_policy_id]
class QosAgentExtension(l2_agent_extension.L2AgentExtension):
SUPPORTED_RESOURCE_TYPES = [resources.QOS_POLICY]
def initialize(self, connection, driver_type):
"""Initialize agent extension."""
self.resource_rpc = resources_rpc.ResourcesPullRpcApi()
self.qos_driver = manager.NeutronManager.load_class_for_provider(
'neutron.qos.agent_drivers', driver_type)()
self.qos_driver.consume_api(self.agent_api)
self.qos_driver.initialize()
self.policy_map = PortPolicyMap()
self._register_rpc_consumers(connection)
def consume_api(self, agent_api):
"""Allows an extension to gain access to resources internal to the
neutron agent and otherwise unavailable to the extension.
"""
self.agent_api = agent_api
def _register_rpc_consumers(self, connection):
"""Allows an extension to receive notifications of updates made to
items of interest.
"""
endpoints = [resources_rpc.ResourcesPushRpcCallback()]
for resource_type in self.SUPPORTED_RESOURCE_TYPES:
# We assume that the neutron server always broadcasts the latest
# version known to the agent
registry.subscribe(self._handle_notification, resource_type)
topic = resources_rpc.resource_type_versioned_topic(resource_type)
connection.create_consumer(topic, endpoints, fanout=True)
@lockutils.synchronized('qos-port')
def _handle_notification(self, qos_policies, event_type):
# server does not allow to remove a policy that is attached to any
# port, so we ignore DELETED events. Also, if we receive a CREATED
# event for a policy, it means that there are no ports so far that are
# attached to it. That's why we are interested in UPDATED events only
if event_type == events.UPDATED:
for qos_policy in qos_policies:
self._process_update_policy(qos_policy)
@lockutils.synchronized('qos-port')
def handle_port(self, context, port):
"""Handle agent QoS extension for port.
This method applies a new policy to a port using the QoS driver.
Update events are handled in _handle_notification.
"""
port_id = port['port_id']
port_qos_policy_id = port.get('qos_policy_id')
network_qos_policy_id = port.get('network_qos_policy_id')
qos_policy_id = port_qos_policy_id or network_qos_policy_id
if qos_policy_id is None:
self._process_reset_port(port)
return
if not self.policy_map.has_policy_changed(port, qos_policy_id):
return
qos_policy = self.resource_rpc.pull(
context, resources.QOS_POLICY, qos_policy_id)
if qos_policy is None:
LOG.info(_LI("QoS policy %(qos_policy_id)s applied to port "
"%(port_id)s is not available on server, "
"it has been deleted. Skipping."),
{'qos_policy_id': qos_policy_id, 'port_id': port_id})
self._process_reset_port(port)
else:
old_qos_policy = self.policy_map.set_port_policy(port, qos_policy)
if old_qos_policy:
self.qos_driver.delete(port, old_qos_policy)
self.qos_driver.update(port, qos_policy)
else:
self.qos_driver.create(port, qos_policy)
def delete_port(self, context, port):
self._process_reset_port(port)
def _policy_rules_modified(self, old_policy, policy):
return not (len(old_policy.rules) == len(policy.rules) and
all(i in old_policy.rules for i in policy.rules))
def _process_update_policy(self, qos_policy):
old_qos_policy = self.policy_map.get_policy(qos_policy.id)
if old_qos_policy:
if self._policy_rules_modified(old_qos_policy, qos_policy):
for port in self.policy_map.get_ports(qos_policy):
#NOTE(QoS): for now, just reflush the rules on the port.
# Later, we may want to apply the difference
# between the old and new rule lists.
self.qos_driver.delete(port, old_qos_policy)
self.qos_driver.update(port, qos_policy)
self.policy_map.update_policy(qos_policy)
def _process_reset_port(self, port):
try:
self.policy_map.clean_by_port(port)
self.qos_driver.delete(port)
except exceptions.PortNotFound:
LOG.info(_LI("QoS extension did have no information about the "
"port %s that we were trying to reset"),
port['port_id'])
| apache-2.0 | -7,872,976,652,774,280,000 | 38.369863 | 79 | 0.61952 | false | 3.946447 | false | false | false |
Luxoft/Twister | server/CentralEngine.py | 1 | 5887 | #!/usr/bin/env python2.7
# version: 3.011
# File: CentralEngine.py ; This file is part of Twister.
# Copyright (C) 2012-2014 , Luxoft
# Authors:
# Andrei Costachi <[email protected]>
# Cristi Constantin <[email protected]>
# Daniel Cioata <[email protected]>
# Mihai Tudoran <[email protected]>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file starts the Twister Server.
"""
# Patch for _DummyThread __stop error
import threading
threading._DummyThread._Thread__stop = lambda x: 1
import cherrypy
cherrypy.log.access_log.propagate = False
cherrypy.log.error_log.setLevel(10)
import os
import sys
import time
import thread
from rpyc.utils.server import ThreadPoolServer
if not sys.version.startswith('2.7'):
print 'Python version error! Central Engine must run on Python 2.7!'
exit(1)
TWISTER_PATH = os.getenv('TWISTER_PATH')
if not TWISTER_PATH:
print 'TWISTER_PATH environment variable is not set! Exiting!'
exit(1)
if TWISTER_PATH not in sys.path:
sys.path.append(TWISTER_PATH)
from common.tsclogging import logDebug, logInfo, logWarning, logError, logCritical
from common.tsclogging import setLogLevel
from server.CeProject import Project
from server.CeXmlRpc import CeXmlRpc
from server.CeRpyc import CeRpycService
from common import iniparser
#
if __name__ == "__main__":
if os.getuid() != 0:
logWarning('Twister Server should run as ROOT! If it doesn\'t, '
'it won\'t be able to read config files and write logs for all users!')
SERVER_PORT = sys.argv[1:2]
if not SERVER_PORT:
logCritical('Twister Server: Must start with parameter PORT number!')
exit(1)
else:
try:
SERVER_PORT = int(SERVER_PORT[0])
except Exception:
logCritical('Twister Server: Must start with parameter PORT number!')
exit(1)
# Read verbosity from configuration
CFG_PATH = '{}/config/server_init.ini'.format(TWISTER_PATH)
VERBOSITY = 20
if os.path.isfile(CFG_PATH):
CFG = iniparser.ConfigObj(CFG_PATH)
VERBOSITY = CFG.get('verbosity', 20)
del CFG
RET = setLogLevel(VERBOSITY)
if not RET:
logError('Log: The Log level will default to INFO.')
# RPyc config
CONFIG = {
'allow_pickle': True,
'allow_getattr': True,
'allow_setattr': True,
'allow_delattr': True,
'allow_all_attrs': True,
}
# CherryPy config
CONF = {
'global': {
'server.socket_host': '0.0.0.0',
'server.socket_port': SERVER_PORT,
'server.thread_pool': 90,
'engine.autoreload.on': False,
'log.screen': False,
'tools.sessions.on': True,
'tools.sessions.timeout': 60*24*365,
'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'Twister Server',
'tools.auth_basic.checkpassword': Project.check_passwd
},
'/static': {
'tools.sessions.on': False,
'tools.auth_basic.on': False,
'tools.auth_digest.on': False,
'tools.auth.on': False,
'tools.staticdir.on': True,
'tools.staticdir.dir': TWISTER_PATH + '/server/static'
}
}
while 1:
# Diff RPyc port
RPYC_PORT = SERVER_PORT + 10
try:
RPYC_SERVER = ThreadPoolServer(CeRpycService, port=RPYC_PORT, protocol_config=CONFIG)
RPYC_SERVER.logger.setLevel(30)
except Exception:
logCritical('Twister Server: Cannot launch the RPyc server on port `{}`!'.format(RPYC_PORT))
exit(1)
# Project manager does everything
PROJ = Project()
PROJ.rsrv = RPYC_SERVER
# CE is the XML-RPC interface
CE = CeXmlRpc(PROJ)
def close():
""" Close server. """
RPYC_SERVER.close()
logDebug('---- RPYC CLOSED')
del PROJ.manager
logDebug('---- PROJ MANAGER CLOSED')
del PROJ.testbeds
logDebug('---- PROJ TESTBEDS CLOSED')
del PROJ.sut
logDebug('---- PROJ SUTS CLOSED')
del PROJ.rsrv
logDebug('---- PROJ RSRV CLOSED')
time.sleep(2)
del PROJ.clearFs
logDebug('---- PROJ CLEARFS CLOSED')
del PROJ.localFs
logDebug('---- PROJ LOCALFS CLOSED')
PROJ.ip_port = ('127.0.0.1', SERVER_PORT)
CE.web = PROJ.web
CE.tb = PROJ.testbeds
CE.sut = PROJ.sut
CE.report = PROJ.report
# EE Manager is the helper for EPs and Clients
# Inject the project as variable for EE
RPYC_SERVER.service.inject_object('project', PROJ)
RPYC_SERVER.service.inject_object('cherry', CE)
# Start rpyc server
thread.start_new_thread(RPYC_SERVER.start, ())
logInfo('RPYC Serving on 0.0.0.0:{}'.format(RPYC_PORT))
# Start !
# cherrypy.engine.signal_handler.handlers['SIGTERM'] = close
# cherrypy.engine.subscribe('exit', close)
cherrypy.quickstart(CE, '/', config=CONF)
time.sleep(30)
cherrypy.server.stop();
time.sleep(10)
cherrypy.engine.exit()
time.sleep(30)
close()
time.sleep(5)
# del PROJ
#
| apache-2.0 | -5,693,460,127,002,823,000 | 29.661458 | 104 | 0.606421 | false | 3.531494 | true | false | false |
OpenNetworkingFoundation/libfluid | examples/python/of13switch.py | 1 | 3264 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
from fluid.base import OFServer, OFServerSettings, OFConnection
from fluid.msg import of13
table = {}
class LearningSwitch(OFServer):
def __init__(self):
OFServer.__init__(self, "0.0.0.0", 6653, 1)
config = OFServerSettings()
config.supported_version(4)
self.set_config(config)
def message_callback(self, conn, type_, data, length):
try:
if type_ == 10: # Packet in
conn_id = conn.get_id()
pi = of13.PacketIn()
pi.unpack(data)
in_port = pi.match().in_port().value()
packet = of13.cdata(pi.data(), pi.data_len())
dl_dst, dl_src = packet[:6], packet[6:12]
# Flood broadcast
if dl_dst == '\xff\xff\xff\xff\xff\xff':
self.flood(conn, pi, in_port)
else:
if conn_id not in table:
table[conn_id] = {}
# Learn that dl_src is at in_port
table[conn_id][dl_src] = in_port
# If we don't know where the destination is, flood
if dl_dst not in table[conn_id]:
self.flood(conn, pi, in_port)
# Otherwise, install a new flow connecting src and dst
else:
port = table[conn_id][dl_dst]
fm = of13.FlowMod(pi.xid(), 123, 0xffffffffffffffff,
0, 0, 5, 60, 300, pi.buffer_id(),
0, 0, 0);
fsrc = of13.EthSrc(of13.EthAddress(of13.btom(dl_src)))
fdst = of13.EthDst(of13.EthAddress(of13.btom(dl_dst)))
fm.add_oxm_field(fsrc);
fm.add_oxm_field(fdst);
act = of13.OutputAction(port, 1024);
inst = of13.ApplyActions();
inst.add_action(act);
fm.add_instruction(inst);
buff = fm.pack();
conn.send(buff, fm.length());
elif type_ == 6: # Features reply
print "New datapath! Installing default flow."
self.install_default_flow_mod(conn);
except Exception,e:
print "Exception",e;
def flood(self, conn, pi, port):
act = of13.OutputAction(0xfffffffb, 1024)
msg = of13.PacketOut(pi.xid(), pi.buffer_id(), port)
msg.add_action(act)
if (pi.buffer_id() == -1):
msg.data(pi.data(), pi.data_len());
buff = msg.pack()
conn.send(buff, msg.length())
def install_default_flow_mod(self, conn):
fm = of13.FlowMod(42, 0, 0xffffffffffffffff, 0, 0, 0, 0, 0,
0xffffffff, 0, 0, 0);
act = of13.OutputAction(0xfffffffd, 0xffff);
inst = of13.ApplyActions();
inst.add_action(act);
fm.add_instruction(inst);
buff = fm.pack();
conn.send(buff, fm.length());
c = LearningSwitch()
c.start(False)
raw_input("Press Enter to stop.")
c.stop()
| apache-2.0 | -6,113,062,158,835,330,000 | 39.296296 | 79 | 0.472426 | false | 3.786543 | false | false | false |
matt-gardner/deep_qa | tests/data/datasets/snli_dataset_test.py | 1 | 1987 | # pylint: disable=no-self-use,invalid-name
from deep_qa.data.datasets import SnliDataset
from deep_qa.data.instances.entailment.snli_instance import SnliInstance
from tests.common.test_case import DeepQaTestCase
class TestSnliDataset(DeepQaTestCase):
def setUp(self):
super(TestSnliDataset, self).setUp()
self.write_original_snli_data()
def test_read_from_file(self):
dataset = SnliDataset.read_from_file(self.TRAIN_FILE, SnliInstance)
instance1 = SnliInstance("A person on a horse jumps over a broken down airplane.",
"A person is training his horse for a competition.",
"neutral")
instance2 = SnliInstance("A person on a horse jumps over a broken down airplane.",
"A person is at a diner, ordering an omelette.",
"contradicts")
instance3 = SnliInstance("A person on a horse jumps over a broken down airplane.",
"A person is outdoors, on a horse.",
"entails")
assert len(dataset.instances) == 3
instance = dataset.instances[0]
assert instance.index == instance1.index
assert instance.first_sentence == instance1.first_sentence
assert instance.second_sentence == instance1.second_sentence
assert instance.label == instance1.label
instance = dataset.instances[1]
assert instance.index == instance2.index
assert instance.first_sentence == instance2.first_sentence
assert instance.second_sentence == instance2.second_sentence
assert instance.label == instance2.label
instance = dataset.instances[2]
assert instance.index == instance3.index
assert instance.first_sentence == instance3.first_sentence
assert instance.second_sentence == instance3.second_sentence
assert instance.label == instance3.label
| apache-2.0 | -6,493,186,564,760,148,000 | 46.309524 | 90 | 0.640161 | false | 4.310195 | true | false | false |
sparkslabs/kamaelia_ | Sketches/MPS/HTTP/michael.py | 3 | 4406 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import Axon
from Kamaelia.Chassis.ConnectedServer import SimpleServer
from Kamaelia.Protocol.HTTP.HTTPServer import HTTPServer
from Kamaelia.Protocol.HTTP.Handlers.Minimal import Minimal
import Kamaelia.Protocol.HTTP.ErrorPages as ErrorPages
from Kamaelia.Chassis.Pipeline import Pipeline
homedirectory = "/srv/www/htdocs/"
indexfilename = "index.html"
def requestHandlers(URLHandlers):
def createRequestHandler(request):
if request.get("bad"):
return ErrorPages.websiteErrorPage(400, request.get("errormsg",""))
else:
for (prefix, handler) in URLHandlers:
if request["raw-uri"][:len(prefix)] == prefix:
request["uri-prefix-trigger"] = prefix
request["uri-suffix"] = request["raw-uri"][len(prefix):]
return handler(request)
return ErrorPages.websiteErrorPage(404, "No resource handlers could be found for the requested URL.")
return createRequestHandler
class HelloHandler(Axon.Component.component):
def __init__(self, request):
super(HelloHandler, self).__init__()
self.request = request
def main(self):
resource = {
"type" : "text/html",
"statuscode" : "200",
"length": len("<html><body><h1>Hello World</h1><P> Game Over!! </body></html>"),
}
self.send(resource, "outbox"); yield 1
page = {
"data" : "<html><body><h1>Hello World</h1><P> Game Over!! </body></html>"
}
self.send(page, "outbox"); yield 1
self.send(Axon.Ipc.producerFinished(self), "signal")
yield 1
class Cat(Axon.Component.component):
def __init__(self, *args):
super(Cat, self).__init__()
self.args = args
def main(self):
self.send(self.args, "outbox")
self.send(Axon.Ipc.producerFinished(self), "signal")
yield 1
class ExampleWrapper(Axon.Component.component):
def main(self):
# Tell the browser the type of data we're sending!
resource = {
"type" : "text/html",
"statuscode" : "200",
}
self.send(resource, "outbox"); yield 1
# Send the header
header = {
"data" : "<html><body>"
}
self.send(header, "outbox"); yield 1
# Wait for it....
while not self.dataReady("inbox"):
self.pause()
yield 1
# Send the data we recieve as the page body
while self.dataReady("inbox"):
pageData = {
"data" : str(self.recv("inbox"))
}
self.send(pageData, "outbox"); yield 1
# send a footer
footer = {
"data" : "</body></html>"
}
self.send(footer, "outbox"); yield 1
# and shutdown nicely
self.send(Axon.Ipc.producerFinished(self), "signal")
yield 1
def EchoHandler(request):
return Pipeline ( Cat(request), ExampleWrapper() )
def servePage(request):
return Minimal(request=request,
homedirectory=homedirectory,
indexfilename=indexfilename)
def HTTPProtocol():
return HTTPServer(requestHandlers([
["/echo", EchoHandler ],
["/hello", HelloHandler ],
["/", servePage ],
]))
SimpleServer(protocol=HTTPProtocol,
port=8082,
socketOptions=(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) ).run()
| apache-2.0 | -2,036,248,962,736,340,500 | 32.12782 | 109 | 0.595778 | false | 4.016408 | false | false | false |
simsong/grr-insider | lib/fuse_mount_test.py | 1 | 11559 | #!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""Tests for grr.tools.fuse_mount.py."""
import datetime
import os
import threading
import time
# pylint: disable=unused-import, g-bad-import-order
from grr.lib import server_plugins
# pylint: enable=unused-import, g-bad-import-order
from grr.lib import aff4
from grr.lib import flow_utils
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.tools import fuse_mount
class MockFuseOSError(OSError):
"""A class to mock the fuse module's special OSError."""
pass
class MockFuse(object):
"""A class to mock the entire fuse module, if it is not present."""
# We rely on fuse.FuseOSError, so we add a mock of
# it to this mock module.
class FuseOSError(MockFuseOSError):
pass
# pylint: disable=invalid-name
# If fuse is not installed, replace the None returned by utils.ConditionalImport
# with our MockFuse object.
if fuse_mount.fuse is None:
fuse = MockFuse()
fuse_mount.fuse = fuse
else:
# If fuse IS installed, we refer to MockFuseOSError in our tests, so let's
# make that point to the real FuseOSError class.
MockFuseOSError = fuse_mount.fuse.FuseOSError
# pylint: enable=invalid-name
class GRRFuseDatastoreOnlyTest(test_lib.GRRBaseTest):
def setUp(self):
super(GRRFuseDatastoreOnlyTest, self).setUp()
self.client_name = "C." + "1" * 16
test_lib.ClientFixture(self.client_name, token=self.token)
self.root = "/"
self.passthrough = fuse_mount.GRRFuseDatastoreOnly(
self.root,
token=self.token)
def testInvalidAFF4Root(self):
with self.assertRaises(IOError):
fuse_mount.GRRFuseDatastoreOnly("not_a_valid_path",
token=self.token)
def _TestReadDir(self, directory):
contents = list(self.passthrough.readdir(directory))
for item in contents:
# All the filenames should be unicode strings.
self.assertTrue(isinstance(item, unicode))
self.assertTrue("." in contents and ".." in contents)
contents.remove(".")
contents.remove("..")
for child in contents:
child = os.path.join(directory, child)
# While checking if each child is a directory, we perform a stat on it in
# the _IsDir method. So this test ensures we can stat every valid path
# in the filesystem.
if self.passthrough._IsDir(child):
self._TestReadDir(child)
def testReadDir(self):
"""Recursively reads directories, making sure they exist."""
# Read everything the filesystem says is under the root.
self._TestReadDir(self.root)
def testReadExistingDir(self):
# In case the files reported were wrong, try and find this particular
# directory, which should exist.
existing_dir = os.path.join(self.root, self.client_name, "fs/os/c/bin/")
self._TestReadDir(existing_dir)
def testReadDirFile(self):
# We can't ls a file.
with self.assertRaises(MockFuseOSError):
file_path = os.path.join(self.root, self.client_name, "fs/os/c/bin/bash")
# We iterate through the generator so the error actually gets thrown.
list(self.passthrough.readdir(file_path))
def testAccessingDirThatDoesNotExist(self):
with self.assertRaises(MockFuseOSError):
list(self.passthrough.getattr("aff4:/This string is so silly",
"that it probably is not a directory"))
def testAccessingBlankDir(self):
with self.assertRaises(MockFuseOSError):
list(self.passthrough.getattr(""))
def testAccessingUnicodeDir(self):
with self.assertRaises(MockFuseOSError):
list(self.passthrough.getattr("ಠ_ಠ"))
def testGetAttrDir(self):
path = "/"
fd = aff4.FACTORY.Open(path, token=self.token)
self.assertEqual(self.passthrough.getattr("/"),
self.passthrough.MakePartialStat(fd))
def testGetAttrFile(self):
path = "/foreman"
fd = aff4.FACTORY.Open(path, token=self.token)
self.assertEqual(self.passthrough.getattr("/foreman"),
self.passthrough.MakePartialStat(fd))
def testExistingFileStat(self):
bash_stat = {
"st_ctime": rdfvalue.RDFDatetimeSeconds(1299502221),
"st_rdev": 0,
"st_mtime": rdfvalue.RDFDatetimeSeconds(1284154642),
"st_blocks": 16,
"st_nlink": 1,
"st_gid": 0,
"st_blksize": 4096,
"pathspec": rdfvalue.PathSpec(
path="/bin/bash",
pathtype="OS",
path_options="CASE_LITERAL"),
"st_dev": 51713,
"st_size": 4874,
"st_ino": 1026148,
"st_uid": 0,
"st_mode": rdfvalue.StatMode(33261),
"st_atime": rdfvalue.RDFDatetimeSeconds(1299502220)
}
bash_path = os.path.join("/", self.client_name, "fs/os/c/bin/bash")
self.assertItemsEqual(self.passthrough.getattr(bash_path), bash_stat)
def testReadNotFile(self):
with self.assertRaises(MockFuseOSError):
existing_dir = os.path.join(self.root, self.client_name, "/fs/os/c/bin")
self.passthrough.Read(existing_dir)
class GRRFuseTest(test_lib.FlowTestsBaseclass):
# Whether the tests are done and the fake server can stop running.
done = False
def __init__(self, method_name=None):
super(GRRFuseTest, self).__init__(method_name)
# Set up just once for the whole test suite, since we don't have any
# per-test setup to do.
super(GRRFuseTest, self).setUp()
self.client_name = str(self.client_id)[len("aff4:/"):]
with aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw") as fd:
fd.Set(fd.Schema.SYSTEM("Linux"))
kb = fd.Schema.KNOWLEDGE_BASE()
fd.Set(kb)
# Ignore cache so our tests always get client side updates.
self.grr_fuse = fuse_mount.GRRFuse(root="/", token=self.token,
ignore_cache=True)
self.action_mock = test_lib.ActionMock("TransferBuffer", "StatFile", "Find",
"HashFile", "HashBuffer",
"UpdateVFSFile",
"EnumerateInterfaces",
"EnumerateFilesystems",
"GetConfiguration",
"GetConfig", "GetClientInfo",
"GetInstallDate", "GetPlatformInfo",
"EnumerateUsers", "ListDirectory")
client_mock = test_lib.MockClient(self.client_id, self.action_mock,
token=self.token)
worker_mock = test_lib.MockWorker(check_flow_errors=True, token=self.token)
# All the flows we've run so far. We'll check them for errors at the end of
# each test.
self.total_flows = set()
# We add the thread as a class variable since we'll be referring to it in
# the tearDownClass method, and we want all tests to share it.
self.__class__.fake_server_thread = threading.Thread(
target=self.RunFakeWorkerAndClient,
args=(client_mock,
worker_mock))
self.fake_server_thread.start()
@classmethod
def tearDownClass(cls):
cls.done = True
cls.fake_server_thread.join()
def tearDown(self):
super(GRRFuseTest, self).tearDown()
# Make sure all the flows finished.
test_lib.CheckFlowErrors(self.total_flows, token=self.token)
def ClientPathToAFF4Path(self, client_side_path):
return "/%s/fs/os%s" % (self.client_name, client_side_path)
def ListDirectoryOnClient(self, path):
# NOTE: Path is a client side path, so does not have a leading
# /<client name>/fs/os
pathspec = rdfvalue.PathSpec(path=path, pathtype="OS")
# Decrease the max sleep time since the test flows are pretty fast.
flow_utils.StartFlowAndWait(self.client_id, token=self.token,
flow_name="ListDirectory",
pathspec=pathspec)
def testReadDoesNotTimeOut(self):
# Make sure to use the least topical meme we can think of as dummy data.
self.WriteFile("password.txt", "hunter2")
filename = os.path.join(self.temp_dir, "password.txt")
self.assertEqual(self.grr_fuse.Read(self.ClientPathToAFF4Path(filename),
length=len("hunter2"), offset=0),
"hunter2")
def WriteFile(self, filename, contents):
path = os.path.join(self.temp_dir, filename)
with open(path, "w") as f:
f.write(contents)
self.ListDirectoryOnClient(self.temp_dir)
return path
def testCacheExpiry(self):
cache_expiry_seconds = 5
# For this test only, actually set a cache expiry.
self.grr_fuse.cache_expiry = datetime.timedelta(
seconds=cache_expiry_seconds)
# Make a new, uncached directory.
new_dir = os.path.join(self.temp_dir, "new_caching_dir")
os.mkdir(new_dir)
start_time = time.time()
# Access it, caching it.
self.grr_fuse.readdir(new_dir)
# If we took too long to read the directory, make sure it expired.
if time.time() - start_time > cache_expiry_seconds:
self.assertTrue(self.grr_fuse.DataRefreshRequired(new_dir))
else:
# Wait for the cache to expire.
time.sleep(cache_expiry_seconds - (time.time() - start_time))
# Make sure it really expired.
self.assertTrue(self.grr_fuse.DataRefreshRequired(new_dir))
# Remove the temp cache expiry we set earlier.
self.grr_fuse.cache_expiry = datetime.timedelta(seconds=0)
def testClientSideUpdateDirectoryContents(self):
self.ListDirectoryOnClient(self.temp_dir)
contents = self.grr_fuse.Readdir(self.ClientPathToAFF4Path(self.temp_dir))
self.assertNotIn("password.txt", contents)
self.WriteFile("password.txt", "hunter2")
contents = self.grr_fuse.Readdir(self.ClientPathToAFF4Path(self.temp_dir))
self.assertIn("password.txt", contents)
def testClientSideUpdateFileContents(self):
filename = self.WriteFile("password.txt", "password1")
self.assertEqual(self.grr_fuse.Read(self.ClientPathToAFF4Path(filename)),
"password1")
filename = self.WriteFile("password.txt", "hunter2")
self.assertEqual(self.grr_fuse.Read(self.ClientPathToAFF4Path(filename)),
"hunter2")
def testReadNonzeroOffset(self):
filename = self.WriteFile("password.txt", "password1")
self.assertEqual(self.grr_fuse.Read(self.ClientPathToAFF4Path(filename),
length=5, offset=3),
"sword")
def RunFakeWorkerAndClient(self, client_mock, worker_mock):
"""Runs a fake client and worker until both have empty queues.
Args:
client_mock: The MockClient object whose queue we'll grab tasks from.
worker_mock: Used to mock run the flows.
This function will run in a background thread while the tests run, and
will end when self.done is True.
"""
# Run the client and worker until nothing changes any more.
while True:
if self.done:
break
if client_mock:
client_processed = client_mock.Next()
else:
client_processed = 0
flows_run = []
for flow_run in worker_mock.Next():
self.total_flows.add(flow_run)
flows_run.append(flow_run)
if client_processed == 0 and not flows_run:
# If we're stopping because there's nothing in the queue, don't stop
# running if we've more tests to do.
if self.done:
break
| apache-2.0 | 966,797,122,362,237,000 | 32.985294 | 80 | 0.643185 | false | 3.705901 | true | false | false |
jschaub30/workload_monitor | workload_monitor.py | 1 | 6005 | #!/usr/bin/python
'''
Usage:
workload_monitor.py CONFIG_FILENAME [-h | --help]
'''
import sys
import os
import re
import socket
from docopt import docopt
import shutil
from datetime import datetime
def main(config_fn):
''''''
# TODO: Do I need to pass around config?
config = read_config(config_fn)
setup_directories(config)
sys.stdout.write('Run directory created at %s\n' % config.run_directory)
for parameter1 in config.parameter1_vals:
for iteration in xrange(int(config.num_iterations)):
create_run_id(config, parameter1, iteration)
execute_local_command(config.setup_command)
start_monitors(config)
# Now run the workload
val = parameter1
if config.parameter1_factor:
try:
val = float(parameter1) * int(config.parameter1_factor)
val = str(val).rstrip('.0')
except ValueError:
msg = 'Problem multiplying parameter (%s) by factor (%s)' % (
parameter1, config.parameter1_factor)
msg += '\nContinuing...\n'
sys.stderr.write(msg)
command = '%s %s' % (config.workload_command, val)
execute_local_command(command)
stop_monitors(config)
# tidy_results
# return run_directory
def create_run_id(config, value, iteration):
name = config.parameter1_name
clean_name = re.sub(r'\W', '', name.replace(' ', '_'))
clean_name = re.sub('_+', '_', clean_name).strip('_')
if value:
run_id = '%s=%s.%d' % (clean_name, value, iteration)
else:
run_id = '%s=0.%d' % (clean_name, iteration)
if not config.run_ids:
config.run_ids = [run_id]
else:
config.run_ids.append(run_id)
return
def execute_local_command(cmd):
'''Execute a command on the local machine'''
if cmd:
print 'Now executing: ' + cmd
rc = os.system(cmd)
assert rc==0
return
def launch_dstat(host, config, run_id):
'''Launch the dstat monitoring utility on host'''
fn = '/tmp/workload_monitor/%s/%s.%s.dstat.csv' % (config.run_directory,
run_id, host.split('.')[0])
remote_command = 'mkdir -p /tmp/workload_monitor/%s; ' % (
config.run_directory)
remote_command += 'dstat --time -v --net --output %s %s 1>/dev/null' % (
fn, config.measurement_delay_sec)
rc = os.system('ssh %s %s&' % (host, remote_command))
assert rc==0
return
def start_monitors(config):
'''Launch all system monitors on all machines in the cluster'''
for slave in config.slaves:
launch_dstat(slave, config, config.run_ids[-1])
return
def kill_dstat(host):
'''Kill the dstat monitoring utility on host'''
remote_command = 'killall dstat'
print 'ssh %s "%s"' % (host, remote_command)
rc = os.system('ssh %s "%s"' % (host, remote_command))
assert rc==0
return
def stop_monitors(config):
'''
Stop all system monitors on all machines in the cluster, then
copies output files from each slave to run directory
'''
for slave in config.slaves:
kill_dstat(slave)
command = 'scp %s:/tmp/workload_monitor/%s/* %s/data/raw/.' % (
slave, config.run_directory, config.run_directory)
print 'Executing: ' + command
rc = os.system(command)
assert rc==0
return
class Config:
def __init__(self):
self.workload_name = None
self.workload_description = None
self.workload_command = None
self.setup_command = None
self.parameter1_name = "Parameter"
self.parameter1_vals = ['']
self.parameter1_factor = None
self.num_iterations = '1'
self.slaves = None
self.run_ids = None
self.measurement_delay_sec = '1'
self.run_directory = None
self.run_ids = None
def read_config(config_filename):
with open(config_filename, 'r') as fid:
lines = fid.readlines()
config = Config()
for line in lines:
line = line.strip().split('#')[0]
if len(line) < 3 or line[0] == '#':
pass
else:
argument = line.split()[0]
value = ' '.join(line.split()[1:]).strip('\'"')
if argument == 'parameter1_vals':
setattr(config, argument, [value])
else:
setattr(config, argument, value)
if not config.slaves:
config.slaves = [socket.gethostname()]
return config
def setup_directories(config):
'''
Create these directories:
./rundir/[WORKLOAD_NAME]/[TIMESTAMP]/data/raw # Raw data files
./rundir/[WORKLOAD_NAME]/[TIMESTAMP]/data/final # Final (parsed) CSV files
./rundir/[WORKLOAD_NAME]/[TIMESTAMP]/scripts # Measurenment and analysis scripts
And copy the html_source directory to:
./rundir/[WORKLOAD_NAME]/[TIMESTAMP]/html # For interactive charts
'''
workload = config.workload_name.replace(' ', '_').upper()
timestamp = datetime.now().strftime("%Y%m%d-%H%M%S")
run_directory = os.path.join('rundir', workload, timestamp)
for sub_directory in ['data/raw', 'data/final', 'scripts']:
full_path = os.path.join(run_directory, sub_directory)
os.makedirs(full_path)
# Now copy html directory, if one exists
if os.path.exists('html_source'):
shutil.copytree('html_source',
os.path.join(run_directory, timestamp, 'html'))
# Create 'latest' symbolic link
cwd = os.getcwd()
os.chdir(os.path.join('rundir', workload))
try:
os.unlink('latest')
except OSError:
pass
os.symlink(timestamp, 'latest')
config.run_directory = run_directory
os.chdir(cwd)
return
if __name__ == '__main__':
arguments = docopt(__doc__, version='1.0')
# print(arguments)
main(arguments['CONFIG_FILENAME'])
| apache-2.0 | 1,273,994,088,550,940,000 | 30.772487 | 87 | 0.584846 | false | 3.727498 | true | false | false |
liulion/mayavi | mayavi/modules/generic_module.py | 3 | 9695 | """
Defines a GenericModule which is a collection of mayavi
filters/components put together. This is very convenient and useful to
create new modules.
"""
# Author: Prabhu Ramachandran <prabhu [at] aero . iitb . ac . in>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from traits.api import Bool, Enum, Instance
from traitsui.api import Item, Group, View, ListEditor
from apptools.persistence import state_pickler
# Local imports.
from mayavi.core.module import Module
from mayavi.core.common import handle_children_state
from mayavi.components.actor import Actor
################################################################################
# Utility function.
################################################################################
def find_object_given_state(needle, haystack, object):
"""
Find the object which corrsponds to given state instance (`needle`)
in the given state (`haystack`) and object representing that
haystack.
Parameters
----------
`needle` -- The `State` instance to find
haystack -- The source State in which we are to find the state
`object` -- the object corresponding to the `haystack`
"""
if needle is haystack:
return object
if hasattr(object, 'filter'):
return find_object_given_state(needle,
haystack.filter,
object.filter)
elif hasattr(object, 'filters'):
for h, obj in zip(haystack.filters, object.filters):
r = find_object_given_state(needle, h, obj)
if r is not None:
return r
return None
################################################################################
# `GenericModule` class.
################################################################################
class GenericModule(Module):
"""
Defines a GenericModule which is a collection of mayavi
filters/components put together. This is very convenient and useful
to create new modules.
Note that all components including the actor must be passed as a
list to set the components trait.
"""
# The *optional* Contour component to which we must listen to if
# any. This is needed for modules that use a contour component
# because when we turn on filled contours the mapper must switch to
# use cell data.
contour = Instance('mayavi.components.contour.Contour',
allow_none=True)
# The *optional* Actor component for which the LUT must be set. If
# None is specified here, we will attempt to automatically determine
# it.
actor = Instance(Actor, allow_none=True)
# Should we use the scalar LUT or the vector LUT?
lut_mode = Enum('scalar', 'vector')
########################################
# Private traits.
# Is the pipeline ready? Used internally.
_pipeline_ready = Bool(False)
######################################################################
# `object` interface.
######################################################################
def __get_pure_state__(self):
# Need to pickle the components.
d = super(GenericModule, self).__get_pure_state__()
d['components'] = self.components
d.pop('_pipeline_ready', None)
return d
def __set_pure_state__(self, state):
# If we are already running, there is a problem since the
# components will be started automatically in the module's
# handle_components even though their state is not yet set call
# so we disable it here and restart it later.
running = self.running
self.running = False
# Remove the actor states since we don't want these unpickled.
actor_st = state.pop('actor', None)
contour_st = state.pop('contour', None)
# Create and set the components.
handle_children_state(self.components, state.components)
components = self.components
# Restore our state using set_state.
state_pickler.set_state(self, state)
# Now set our actor and component by finding the right one to get from
# the state.
if actor_st is not None:
for cst, c in zip(state.components, components):
actor = find_object_given_state(actor_st, cst, c)
if actor is not None:
self.actor = actor
break
if contour_st is not None:
for cst, c in zip(state.components, components):
contour = find_object_given_state(contour_st, cst, c)
if contour is not None:
self.contour = contour
break
# Now start all components if needed.
self._start_components()
self.running = running
######################################################################
# `HasTraits` interface.
######################################################################
def default_traits_view(self):
"""Returns the default traits view for this object."""
le = ListEditor(use_notebook=True,
deletable=False,
export='DockWindowShell',
page_name='.name')
view = View(Group(Item(name='components',
style='custom',
show_label=False,
editor=le,
resizable=True),
show_labels=False),
resizable=True)
return view
######################################################################
# `Module` interface.
######################################################################
def setup_pipeline(self):
"""Setup the pipeline."""
# Needed because a user may have setup the components by setting
# the default value of the trait in the subclass in which case
# the components_changed handler will never be called leading to
# problems.
if len(self.components) > 0 and not self._pipeline_ready:
self._components_changed([], self.components)
def update_pipeline(self):
"""This method *updates* the tvtk pipeline when data upstream is
known to have changed.
This method is invoked (automatically) when the input fires a
`pipeline_changed` event.
"""
mm = self.module_manager
if mm is None:
return
self._setup_pipeline()
# Propagate the event.
self.pipeline_changed = True
def update_data(self):
"""This method does what is necessary when upstream data
changes.
This method is invoked (automatically) when any of the inputs
sends a `data_changed` event.
"""
# Propagate the data_changed event.
self.data_changed = True
######################################################################
# Private interface.
######################################################################
def _setup_pipeline(self):
"""Sets up the objects in the pipeline."""
mm = self.module_manager
if mm is None or len(self.components) == 0:
return
# Our input.
my_input = mm.source
components = self.components
if not self._pipeline_ready:
# Hook up our first component.
first = self.components[0]
first.inputs = [my_input]
# Hook up the others to each other.
for i in range(1, len(components)):
component = components[i]
component.inputs = [components[i-1]]
self._pipeline_ready = True
# Start components.
self._start_components()
# Setup the LUT of any actors.
self._lut_mode_changed(self.lut_mode)
def _handle_components(self, removed, added):
super(GenericModule, self)._handle_components(removed, added)
for component in added:
if len(component.name) == 0:
component.name = component.__class__.__name__
if self.actor is None:
if isinstance(component, Actor):
self.actor = component
if len(self.components) == 0:
self.input_info.datasets = ['none']
else:
self.input_info.copy_traits(self.components[0].input_info)
self._pipeline_ready = False
self._setup_pipeline()
def _lut_mode_changed(self, value):
"""Static traits listener."""
mm = self.module_manager
if mm is None:
return
lm = mm.scalar_lut_manager
if value == 'vector':
lm = mm.vector_lut_manager
if self.actor is not None:
self.actor.set_lut(lm.lut)
def _actor_changed(self, old, new):
self._lut_mode_changed(self.lut_mode)
def _filled_contours_changed_for_contour(self, value):
"""When filled contours are enabled, the mapper should use the
the cell data, otherwise it should use the default scalar
mode.
"""
if self.actor is None:
return
if value:
self.actor.mapper.scalar_mode = 'use_cell_data'
else:
self.actor.mapper.scalar_mode = 'default'
self.render()
def _start_components(self):
for component in self.components:
if len(component.inputs) > 0 and \
len(component.inputs[0].outputs) > 0:
component.start()
| bsd-3-clause | 6,321,323,747,296,528,000 | 36.145594 | 80 | 0.533058 | false | 4.768815 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.