text
stringlengths
29
850k
from qtpy import QtWidgets, QtCore, QtGui from ..backend import franktonjunction, themeing, themeing_constants from ..connections import signals import uuid from ..globals import pointers from ..gui import ui_editorwidget class EditorViewWidget(ui_editorwidget.ui_EditorViewWidget): def __init__(self, parent=None): super().__init__(parent) self.textframe = TextFrame(parent) self.text = self.textframe.text self.previewframe = PreviewFrame(parent) self.preview = self.previewframe.preview self.splitter.addWidget(self.textframe) self.splitter.addWidget(self.previewframe) self.preview_is_shown = True def toggle_preview(self): if not self.preview_is_shown: self.show_preview() self.preview_is_shown = True else: self.hide_preview() self.preview_is_shown = False def show_preview(self): self.splitter.setSizes([1, 1]) show_sheet = self.text.highlighter.splitter_shown_sheet self.splitter.setStyleSheet(show_sheet) def hide_preview(self): self.splitter.setSizes([1, 0]) hide_sheet = self.text.highlighter.splitter_hidden_sheet self.splitter.setStyleSheet(hide_sheet) class TextFrame(ui_editorwidget.ui_EditorFrame): def __init__(self, parent=None): super().__init__(parent) self.text = TextEditor(parent) self.layout.addWidget(self.text) class PreviewFrame(ui_editorwidget.ui_EditorFrame): def __init__(self, parent=None): super().__init__(parent) self.preview = ui_editorwidget.ui_Preview(parent) self.layout.addWidget(self.preview) self.setMinimumSize(0, 0) self.layout.setContentsMargins(30, 30, 30, 30) class TextEditor(ui_editorwidget.ui_TextEditor): def __init__(self, parent=None): super().__init__(parent) self.highlighter = themeing.MarkdownHighlighter(self) self._id = None self.completer = None # For citations self.bibpath = None self.suppressbib = False self.cslpath = None self.reflist_title = None # For filehandling self.savepath = None self.savedir = None self.textChanged.connect(self.on_text_change) self.cursorPositionChanged.connect(self.on_cursor_change) def on_text_change(self): franktonjunction.previewthread().start() franktonjunction.statusthread().start() self.maybe_new_block_state() def on_cursor_change(self): franktonjunction.statusthread().start() self.maybe_new_block_state() def get_block_state(self, cursor=None): if not cursor: cursor = self.textCursor() try: return cursor.block.userData().in_block except AttributeError: return def maybe_new_block_state(self): state = self.get_block_state() signals.BlockFormatSignal.emit(state) @property def id(self): if not self._id: self._id = uuid.uuid1().hex return self._id def contextMenuEvent(self, event): spelldict = self.highlighter.spelldict if not spelldict: return self.acceptcontext(event) else: return franktonjunction.check_this(self, event) def acceptcontext(self, event): menu = self.createStandardContextMenu() menu.exec_(event.globalPos()) def keyPressEvent(self, event): signals.EditorKeyPressSignal.emit(event)
Dennis Aabo Sørensen is the first amputee in the world to feel sensory rich information — in real-time — with a prosthetic hand wired to nerves in his upper arm. Sørensen could grasp objects intuitively and identify what he was touching while blindfolded. Sørensen participated in the clinical study for one month in early 2013. The results are published in Science Translational Medecine.
#!/usr/bin/evn python head_file=""" /* * description of the analysis * */ #ifndef __MYXAODTOOLS_ANALYSISNAME_H__ #define __MYXAODTOOLS_ANALYSISNAME_H__ #include <vector> #include <string> #include "MyXAODTools/AnalysisBase.h" #include "AsgTools/ToolHandle.h" using namespace std; class ANALYSISNAME : public AnalysisBase { public: ANALYSISNAME(); virtual ~ANALYSISNAME(); int initialize(); void ClearBranch(); int process(Long64_t ientry); // main program private: /** private methods */ void AttachBranchToTree(); void CreateBranch(); private: /* specific branches used in this analysis */ private: /* specific Tools used in this analysis */ }; #endif """ src_file = """ #include <stdlib.h> #include <TFile.h> #include "MyXAODTools/ANALYSISNAME.h" #include "MyXAODTools/Helper.h" #include "CPAnalysisExamples/errorcheck.h" #include "xAODBase/IParticleHelpers.h" #include "xAODMuon/MuonContainer.h" #include "xAODEgamma/ElectronContainer.h" #include "xAODEgamma/PhotonContainer.h" #include "xAODJet/JetContainer.h" #include "xAODMissingET/MissingETContainer.h" ANALYSISNAME::ANALYSISNAME(): AnalysisBase() { if(APP_NAME==NULL) APP_NAME = "ANALYSISNAME"; string maindir(getenv("ROOTCOREBIN")); // don't forget to change your SUSY configuration! m_susy_config = Form("%s/data/MyXAODTools/monojet.conf", maindir.c_str()); trigger_map_ = { // your triggers go here. // {"HLT_xe70", false}, }; } int ANALYSISNAME::initialize() { if( initializeBasicTools() != 0 ){ return 1; } CreateBranch(); AttachBranchToTree(); // initiate tools return 0; } ANALYSISNAME::~ANALYSISNAME(){ } void ANALYSISNAME::CreateBranch() { CreateBasicBranch(); return ; } void ANALYSISNAME::ClearBranch(){ ClearBasicBranch(); } void ANALYSISNAME::AttachBranchToTree() { AttachBasicToTree(); // event_br->AttachBranchToTree(*physics); // muon_br, el_br, jet_br, ph_br // set your own branches // physics->Branch("has_bad_muon", &m_hasBadMuon, "has_bad_muon/O"); } int ANALYSISNAME::process(Long64_t ientry) { int sc = Start(ientry); if(m_debug) { Info(APP_NAME, " ANALYSISNAME: processing"); } if(sc != 0) return sc; // event_br->Fill(*ei); // Start ANALYSISNAME /*get physics objects*/ // Electrons // xAOD::ElectronContainer* electrons_copy = NULL; // xAOD::ShallowAuxContainer* electrons_copyaux = NULL; // CHECK( m_objTool->GetElectrons(electrons_copy, electrons_copyaux, true) ); // Muons // xAOD::MuonContainer* muons_copy = NULL; // xAOD::ShallowAuxContainer* muons_copyaux = NULL; // CHECK( m_objTool->GetMuons(muons_copy, muons_copyaux, true) ); // Jets // xAOD::JetContainer* jets_copy = NULL; // xAOD::ShallowAuxContainer* jets_copyaux = NULL; // CHECK( m_objTool->GetJets(jets_copy,jets_copyaux, true) ); // Photons // xAOD::PhotonContainer* ph_copy = nullptr; // xAOD::ShallowAuxContainer* phAux_copy = nullptr; // CHECK(m_objTool->GetPhotons(ph_copy, phAux_copy, true)); /////////////////////// // do overlap removal before object selection // turn off the harmonization /////////////////////// // bool doHarmonization = false; // CHECK( m_objTool->OverlapRemoval( // electrons_copy, muons_copy, // jets_copy, ph_copy) ); //discard the event if any jets is labelled as 'bad' // bool passJetCleaning = true; // for(const auto& jet : *jets_copy){ // m_objTool->IsBJet(*jet) ; // if ( jet->pt() > 20e3 ) // { // if( dec_bad(*jet) && dec_passOR(*jet)){ // passJetCleaning = false; // break; // } // } // } // if ( !passJetCleaning ) return 1; // electron selections // for(const auto& el : *electrons_copy){ // if( !(bool) dec_baseline(*el) || !(bool) dec_passOR(*el)){ // continue; // } // m_nBaseEl ++; // } // // muon selections // for(const auto& mu : *muons_copy){ // if( !(bool) dec_baseline(*mu) || !(bool) dec_passOR(*mu) ){ // continue; // } // if( dec_bad(*mu) ) m_hasBadMuon = true; // if( dec_cosmic(*mu) ) m_hasCosmicMuon = true; // muon_br->Fill(*mu, ei, vertice); // m_nBaseMu ++; // } // // photon selections // for(const auto& ph : *ph_copy) { // if( !(bool) dec_baseline(*ph) || !(bool) dec_passOR(*ph) ){ // continue; // } // m_nBasePhoton ++; // } // Fill your tree!!!! physics->Fill(); return 0; } """
Yes, it's true...the title, that is. You know how I found out? Heading to the local yarn store to start knitting myself a scarf for my journey. I suppose it's a bit of a cliche to start talking about 'the Journey' as if it were something like 'El Camino' or Mecca, but I think moving to northern Canada is definitely something one does not do lightly. It involves mental and physical preparation and, at this point in my life, it is my Journey. Hey, it's already gotten me back to knitting! I was officially offered a position last Thursday (the 23rd) and officially accepted it yesterday (the 24th)...starting August 17th I'll be calling Pangnirtung my home. Bizarre, right? Three weeks to figure everything out and move...three weeks to prepare for something that is so huge! I've been creating list upon list and still feel so overwhelmed that escaping into a movie sounds like a dream. That's all I have to report for the moment; things are still crazy, and I'm still processing. Nunavut Newbie v.2.o: Iqaluit, you're awesome for us newcomers; Jordan and Steph, your 'Shelter' blog has been great to follow. Modeling my prep routine after yours seems to be a sensible thing to do right now. Allergic to wool? That's crazy talk! We've enjoyed reading your blog as well Tara. We're so glad we could be helpful. hee hee...allergic to wool, that's me. i go with the polyester cheapy-knit stuff. god forbid i ever light up! love the cosco reference on your blog...heading there next week. MEC tomorrow. bulk barn next week. wal-mart/zellers somewhere in between. ah! i'm so excited! can't wait to read your next entry. It's gonna be freakin' cold!
#!/usr/bin/env python r""" Produce a MagicPoint file from a text file with simple markup. Markup: directives (lines starting with .) MagicPoint text (everything else) You can use MagicPoint markup (lines starting with %), but that is discouraged. You should know that \ is MagicPoint's escape character. If you want to include a literal \ in the text, write \\. You should also know that lines starting with a # are comments. If you want to include a line starting with a # in the output, write \# Directives: .title .author .email .conference title page elements .titlepage produce a title page .page start a new page (one--two lines of text, centered vertically) .midpage start a new page (three lines of text, centered vertically) .bigpage start a new big page (four lines of text, centered vertically) .pageofcode start a new page for code examples (12 lines), enables Python syntax .pageoftext start a new page for text examples (12 lines) .italic switch font to italic .monospace switch font to monospace .normal switch font to normal .python enable Python syntax highlight .defaultsyntax use default syntax highlight (doctests) .nosyntax disable syntax highlight Empty lines following a directive are skipped. makeslide.py was written by Marius Gedminas <[email protected]> """ import re import sys import string import keyword import fileinput templates = dict( titlepage = string.Template('''\ #!/usr/bin/env mgp # Note: tabs and trailing spaces are *important* in this file # - Preamble ---------------------------------------------------------------- %deffont "standard" xfont "verdana" %deffont "thick" xfont "verdana-bold" %deffont "em" xfont "verdana-medium-i" %deffont "mono" xfont "andale mono" %default 1 area 90 90, vgap 260, size 8, font "standard", fore "#134d73", back "white", right, newimage -zoom 50 "povlogo.png" %default 2 center, size 5 %default 3 size 8, vgap 80 %default 4 font "em", size 7, vgap 10 %default 5 font "standard", size 3 # --------------------------------------------------------------------------- %page %pcache 1 1 0 1 %ccolor "#134d73" %nodefault %size 7, font "standard", vgap 20, fore "black", back "white" %center, font "thick", size 11 $title %center, font "standard", size 7 %size 5, font "standard", fore "#134d73" $author %size 4 $email %size 2 %size 5 Programmers of Vilnius %size 4 http://pov.lt/ %size 2 %newimage "povlogo.png" %fore "black" $conference '''), pageofcode = string.Template('''\ %page %nodefault %area 90 90, vgap 60, size 8, font "standard", fore "#134d73", back "white", right, newimage -zoom 50 "povlogo.png" %left, size 6, vgap 10 '''), pageoftext = string.Template('''\ %page %nodefault %area 90 90, vgap 60, size 8, font "standard", fore "#134d73", back "white", right, newimage -zoom 50 "povlogo.png" %left, size 6, vgap 10 '''), page = string.Template('''\ %page '''), bigpage = string.Template('''\ %page %nodefault %area 90 90, vgap 60, size 8, font "standard", fore "#134d73", back "white", right, newimage -zoom 50 "povlogo.png" %center, size 8, vgap 80 '''), midpage = string.Template('''\ %page %nodefault %area 90 90, vgap 60, size 8, font "standard", fore "#134d73", back "white", right, newimage -zoom 50 "povlogo.png" %center, size 8, vgap 80 '''), italic = string.Template('%font "em"\n'), monospace = string.Template('%font "mono"\n'), normal = string.Template('%font "standard"\n'), ) python_syntax_patterns = { r'\b(?P<kw>%s)\b' % '|'.join(keyword.kwlist): string.Template(''' %cont, font "thick" $kw %cont, font "standard" '''), "(?P<s>'.*?')": string.Template(''' %cont, fore "#13734d" $s %cont, fore "#134d73" ''') } class PythonSyntaxHighligh(string.Template): def substitute(self, **kw): kw['line'] = apply_syntax_patterns(python_syntax_patterns, kw['line']) return super(PythonSyntaxHighligh, self).substitute(**kw) line_patterns = { r'^(?P<indent>\s*)(?P<prefix>\.\.\.|>>>)(?P<line>.*)$': PythonSyntaxHighligh('''\ $indent %cont, font "mono", fore "#00aaaa" $prefix %cont, font "standard", fore "#134d73" $line %font "standard"'''), } syntax_modes = { 'nosyntax': {}, 'defaultsyntax': line_patterns, 'python': python_syntax_patterns, 'pageofcode': python_syntax_patterns, } default_syntax = line_patterns def apply_syntax_patterns(syntax_patterns, line): idx = 0 mega_re = [] for idx, (pat, tmpl) in enumerate(syntax_patterns.items()): mega_re.append('(?P<r%d>%s)' % (idx, pat)) mega_re = '|'.join(mega_re) def replacement(match): for idx, (pat, tmpl) in enumerate(syntax_patterns.items()): if match.group('r%d' % idx): return tmpl.substitute(**match.groupdict()) assert False, 'empty match?' if mega_re: line = re.sub(mega_re, replacement, line) return line def preprocess(inputfile, outputfile): args = {'title': '', 'author': '', 'email': '', 'conference': ''} syntax_patterns = default_syntax skipping_empty_lines = True for line in inputfile: if not line.strip() and skipping_empty_lines: continue line = line.rstrip('\n') if line.startswith('.') and not line.startswith('...'): keyword = line.split()[0][1:] if keyword in args: args[keyword] = line[len(keyword)+1:].strip() elif keyword in templates: print >> outputfile, templates[keyword].substitute(**args), syntax_patterns = syntax_modes.get(keyword, default_syntax) elif keyword in syntax_modes: syntax_patterns = syntax_modes[keyword] else: print >> sys.stderr, ".%s ignored" % keyword skipping_empty_lines = True else: skipping_empty_lines = False line = apply_syntax_patterns(syntax_patterns, line) print >> outputfile, line if __name__ == '__main__': preprocess(fileinput.input(), sys.stdout)
CDE Insurance is looking for an alternative risk financing tool to manage their risk. The company is deliberating the option to develop their own pure captive. The following are costs entailed in the formation. As the risk consultant for the company, your job is to evaluate the feasibility of this option and advice the company if they should adopt the option. In order to form a pure captive as their licensed reinsurer, CDE needs to invest an initial fee of $550,000 and annual captive administration fee of $125,000. Since the captive will be formed as a reinsurer, the formation requires mediation from a fronting company. Thus, CDE will be required to use a fronting insurer, and this involves the cost of $84,000 in fronting fees. The fronting fee will be paid upfront and at the end of every year in which the captive remains active. At the end of the project period, CDE will enjoy a 3% discount on the captive administration fee and the fronting fee. Without the captive, CDE will have to resort to the traditional insurance plan, which required them to pay an annual premium of $1.05 million per year. Their estimation shows that by using their own captive, they will enjoy an insurance premium savings of $300,000 per annum. Insurance premiums are paid at the beginning of each year. The initial process will involve a five-year trial period and the contract has the following additional information: The pure captive does not obtains premium tax deductibility, its cost of capital is 6.4% and its corporate tax rate is 33%. Show the cash flows that would occur over the 5 years (round off all amounts to the nearest dollar). Calculate the Net Present Value (NPV) of this project? Based on the NPV calculation, should CDE Inc. forms their own pure captive? Justify your answer. CDE Insurance is totally committed to develop their own captive reinsurer even if the project is not profitable. As the risk consultant, you need to make changes in the captive formation plan so that the development of a captive will be beneficial and profitable to CDE. Suggest ONE strategy that should be implemented so that the project becomes profitable and provide evidence (calculations) to support your answer. You have to use MS Excel to answer ALL questions. The contract period is 5 years and the contract has an aggregate limit of $10m over the 5-year period. Premium will be paid at the beginning of each year and the amount of premium to be paid by JKL is $1.75m. As the reinsurer, PQR Re. will receive an annual underwriting fee on the basis of 19% out of each premium payment. An interest of 6.7% will be credited to the beginning balance in the account for each year. In case of deficit in the policyholder’s account, the policyholder will be required to pay 95% of any deficit in equal instalments over the subsequent 5 years. During the contract period, losses incurred and reported are $1.35m, $2.29m, $0.998m, $1.56m and $1.22m respectively at the end of each years. Does the total claim amount exceed the policy limit? Explain your answer. How much is the equal, annual installments that JKL Insurance. have to pay to PQR Re.? To what extent will PQR Re. covers the excessive amount? How do Good Inc. deals with the excessive amount which is not covered by the FRR contract? Explain your answer. UVW Insurer has a 10-line SST contract with BDE Re. and a net line of $100,000. Based on the following information about policies and losses, work out the F.O.S, division of loss between insurer and reinsurer and determine the primary insurer’s total obligation. (You need to construct a table using MS Excel, similar to those used and discussed in class to help you answer this question). Policy A: S.I $80,000 — loss is $55,000. Policy B: S.I $1,250,000 — loss is $750,000. Policy C: S.I is $1,000,000 — loss is $50,000. RST Insurance Company has the following reinsurance agreement with IJK Re. Under a $900,000 xs $100,000 per risk XS treaty, will RST be indemnified by IJK Re.? Why? Work out the division of payment between RST’s retention and IJK’s compensation. How would your answer in (b) differ under a $900,000 xs $100,000 per occurrence XS treaty. Assume that all losses are caused by a severe hurricane. Show your calculations and explain the difference(s) in your answer. Based on these estimations, which one is the best option? Justify your answer. All questions must be answered in MS Excel. Attempt each case on separate sheets. Label your sheets accordingly – Case1, Case2 etc. All students must use the cover format as shown above. Fill in your names and matric numbers in correct order. The cover format is only applicable for hardcopy submission. The Excel file must be named by the “first person in your group” name. Make sure all the mathematical/formula functions in your spreadsheet are working correctly. The softcopy file must be submitted via and uploaded onto Online Learning Portal by 24th May 2018, before 12 noon. Only one submission from each group – the first person in your group is accountable to complete the online submission. The hardcopy file must be printed and submitted to me (my mailbox #125) by 24th May 2018, before 12 noon.
from google.appengine.api import memcache import logging, web import app.db.models as models from google.appengine.ext import ereporter import app.utility.utils as utils from app.utility.utils import cachepage ereporter.register_logger() render = web.render class Tags: """ Return tags for auto completion """ @cachepage() def GET(self): web.header('Content-type', 'text/plain') try: tag_filter = web.input()['q'] return models.Tag.get_tags_by_filter(tag_filter) except Exception, exception: return "" class Markdown: """ Return markdown data for Markitup preview """ def POST(self): data = web.input(data = None)['data'] return render.admin.markdown_preview(data) class Links: """ Check if a given link is already stored """ def GET(self): web.header('Content-type', 'application/json') link = web.input(check = None)['check'] if link and utils.link_is_valid(link): link_is_stored = models.Post.get_post_by_link(link.strip()) if not link_is_stored: if link.strip().endswith('/'): link_is_stored = models.Post.get_post_by_link(link.strip()[:-1]) #check without slash else: link_is_stored = models.Post.get_post_by_link("%s%s" % (link.strip(), '/')) #check with slash if link_is_stored: return '{"result":"[ The link is known ]","clazz":"message_KO"}' else: return '{"result":"[ This link looks new ]","clazz":"message_OK"}' else: return '{"result":"","clazz":""}'
Preheat the oven to 350 degrees F. Grease a 9-by-13-inch pan. For the dough: Scald the milk. In a stand mixer with the dough attachment, combine the salt, butter and 1/2 cup granulated sugar. Add the scalded milk. Add the eggs and vanilla and mix until combined. In a separate bowl, add the yeast to the hot water with 1 teaspoon granulated sugar. Let the yeast mixture sit for 3 minutes to make sure the yeast activates. It should foam and start to bubble. Add the yeast mixture to the mixer. Add 4 cups flour and Clear Jel Instant, if using, to the mixer. Mix on low speed for 3 minutes. If the dough is too sticky, add flour in 1/4-cup increments, using no more than 5 cups of flour total. Let the dough rise in the mixer bowl until it doubles in size, 1 to 1 1/2 hours. Punch down the dough. Roll out on a floured surface to a 12-by-8-inch rectangle. For the filling: Spread the melted butter on the rolled-out dough. In a bowl, combine the brown sugar, cinnamon and salt. Spread the filling mixture on top of the buttered dough. Lightly dust with 1 teaspoon flour. Roll the dough into a log. Cut the log into 8 pieces. Place in the prepared pan. Let the dough rise again until it doubles, 1 to 1 1/2 hours. Bake until golden brown, 20 to 30 minutes. For the glaze: Mix the confectioners' sugar, maple extract and milk in a bowl. Allow the cinnamon rolls to cool a bit, then drizzle with the glaze.
import time import logging import threading from rx.core import Scheduler, Disposable from .schedulerbase import SchedulerBase from .eventloopscheduler import EventLoopScheduler log = logging.getLogger('Rx') class NewThreadScheduler(SchedulerBase): """Creates an object that schedules each unit of work on a separate thread. """ def __init__(self, thread_factory=None): super(NewThreadScheduler, self).__init__() def default_factory(target, args=None): t = threading.Thread(target=target, args=args or []) t.setDaemon(True) return t self.thread_factory = thread_factory or default_factory def schedule(self, action, state=None): """Schedules an action to be executed.""" scheduler = EventLoopScheduler(thread_factory=self.thread_factory, exit_if_empty=True) return scheduler.schedule(action, state) def schedule_relative(self, duetime, action, state=None): """Schedules an action to be executed after duetime.""" scheduler = EventLoopScheduler(thread_factory=self.thread_factory, exit_if_empty=True) return scheduler.schedule_relative(duetime, action, state) def schedule_absolute(self, duetime, action, state=None): """Schedules an action to be executed at duetime.""" return self.schedule_relative(duetime - self.now, action, state=None) def schedule_periodic(self, period, action, state=None): """Schedule a periodic piece of work.""" secs = self.to_relative(period) / 1000.0 disposed = [] s = [state] def run(): while True: time.sleep(secs) if disposed: return new_state = action(s[0]) if new_state is not None: s[0] = new_state thread = self.thread_factory(run) thread.start() def dispose(): disposed.append(True) return Disposable.create(dispose) Scheduler.new_thread = new_thread_scheduler = NewThreadScheduler()
HostedFX been in business since 2004, in the span of time we have developed a skillful management team, our team consists of management experience of 10+ years, senior system administration in hardware, linux and windows os, virutizliation setup, and full network administration capability. We are always around the clock 24 hours a day 7 days a week and have our own technology to monitor your infrastructure setup. Perfect for colo customers. Our senior system administrators will manage your own rack, cage, or multiple server with network and server management. Perfect for single or multiple server management, have your server managed with the setup you want it to be and fully monitored and secured by our system administration. Perfect for multiple management of your Infrastructure in our datacenter. Full point to point management of all your server, backup storage , and other setup you have in our datacenter. Perfect if you have services not associated with HostedFX and want to hire us to work on your server or Infrastructure setup. Perfect for storage server and storage Infrastructure, our team can help you setup your storage portions and have it secured and begin monitored daily.
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TensorBoard data ingestion module.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import locale import logging import re import sys import time import threading import types # pylint: disable=unused-import import six import tensorflow as tf from tensorboard import util class Record(collections.namedtuple('Record', ('record', 'offset'))): """Value class for a record returned by RecordReader. Fields: record: The byte string record that was read. offset: The byte offset in the file *after* this record was read. :type record: str :type offset: int """ __slots__ = () # Enforces use of only tuple fields. @util.closeable @six.python_2_unicode_compatible class RecordReader(object): """Pythonic veneer around PyRecordReader.""" def __init__(self, path, start_offset=0): """Creates new instance. Args: path: Path of file. This can be on a remote file system if the TensorFlow build supports it. start_offset: Byte offset to seek in file once it's opened. :type path: str :type start_offset: int """ self.path = tf.compat.as_text(path) self._offset = start_offset self._size = -1 self._reader = None # type: tf.pywrap_tensorflow.PyRecordReader self._is_closed = False self._lock = threading.Lock() def get_size(self): """Returns byte length of file. This is guaranteed to return a number greater than or equal to the offset of the last record returned by get_next_record(). This method can be called after the instance has been closed. Raises: IOError: If file has shrunk from last read offset, or start offset, or last read size. :rtype: int """ size = tf.gfile.Stat(self.path).length minimum = max(self._offset, self._size) if size < minimum: raise IOError('File shrunk: %d < %d: %s' % (size, minimum, self.path)) self._size = size return size def get_next_record(self): """Reads record from file. Returns: A Record or None if no more were available. Raises: IOError: On open or read error, or if close was called. tf.errors.DataLossError: If corruption was encountered in the records file. :rtype: Record """ if self._is_closed: raise IOError('%s is closed' % self) if self._reader is None: self._reader = self._open() try: with tf.errors.raise_exception_on_not_ok_status() as status: self._reader.GetNext(status) except tf.errors.OutOfRangeError: # We ignore partial read exceptions, because a record may be truncated. # PyRecordReader holds the offset prior to the failed read, so retrying # will succeed. return None self._offset = self._reader.offset() return Record(self._reader.record(), self._offset) def close(self): """Closes record reader if open. Further reads are not permitted after this method is called. """ if self._is_closed: return if self._reader is not None: self._reader.Close() self._is_closed = True self._reader = None def _open(self): with tf.errors.raise_exception_on_not_ok_status() as status: return tf.pywrap_tensorflow.PyRecordReader_New( tf.resource_loader.readahead_file_path(tf.compat.as_bytes(self.path)), self._offset, tf.compat.as_bytes(''), status) def __str__(self): return u'RecordReader{%s}' % self.path @util.closeable @six.python_2_unicode_compatible class BufferedRecordReader(object): """Wrapper around RecordReader that does threaded read-ahead. This class implements the same interface as RecordReader. It prevents remote file systems from devastating loader performance. It does not degrade throughput on local file systems. The thread is spawned when the first read operation happens. The thread will diligently try to buffer records in the background. Its goal is to sleep as much as possible without blocking read operations. This class is thread safe. It can be used from multiple threads without any need for external synchronization. """ READ_AHEAD_AGGRESSION = 2.3 # Does full replenish when ~40% full. READ_AHEAD_BYTES = 16 * 1024 * 1024 STAT_INTERVAL_SECONDS = 4.0 def __init__(self, path, start_offset=0, read_ahead=READ_AHEAD_BYTES, stat_interval=STAT_INTERVAL_SECONDS, clock=time.time, record_reader_factory=RecordReader): """Creates new instance. The i/o thread is not started until the first read happens. Args: path: Path of file. This can be on a remote file system if the TensorFlow build supports it. start_offset: Byte offset to seek in file once it's opened. read_ahead: The number of record bytes to buffer into memory before the thread starts blocking. This value must be >0 and the default is BufferedRecordReader.READ_AHEAD_BYTES. stat_interval: A float with the minimum number of seconds between stat calls, to determine the file size. If this is 0.0 then the thread will stat after every re-buffer, but never be woken up in order to stat. clock: Function returning a float with the number of seconds since the UNIX epoch in zulu time. record_reader_factory: The RecordReader constructor, which can be changed for testing. :type path: str :type start_offset: int :type read_ahead: int :type clock: () -> float :type record_reader_factory: (str, int) -> RecordReader """ self.path = tf.compat.as_text(path) self._read_ahead = read_ahead self._stat_interval = stat_interval self._clock = clock self._is_closed = False self._has_reached_end = False self._offset = 0 self._size = -1 self._last_stat = 0.0 self._buffered = 0 self._reader = record_reader_factory(self.path, start_offset) self._records = collections.deque() # type: collections.deque[Record] self._read_exception = \ None # type: tuple[BaseException, BaseException, types.TracebackType] self._close_exception = \ None # type: tuple[BaseException, BaseException, types.TracebackType] self._lock = threading.Lock() self._wake_up_producer = threading.Condition(self._lock) self._wake_up_consumers = threading.Condition(self._lock) self._thread = threading.Thread(target=self._run, name=_shorten_event_log_path(self.path)) def get_size(self): """Returns byte length of file. This is guaranteed to return a number greater than or equal to the offset of the last record returned by get_next_record(). In the average case, this method will not block. However, if the i/o thread has not yet computed this value, then this method will block on a stat call. This method can be called after the instance has been closed. Returns: The byte length of file, which might increase over time, but is guaranteed to never decrease. It's also guaranteed that it will be greater than or equal to the offset field of any Record. :rtype: int """ with self._lock: if self._should_stat(): self._stat() return self._size def get_next_record(self): """Reads one record. When this method is first called, it will spawn the thread and block until a record is read. Once the thread starts, it will queue up records which can be read without blocking. The exception is when we reach the end of the file, in which case each repeated call will be synchronous. There is no background polling. If new data is appended to the file, new records won't be buffered until this method is invoked again. The caller should take care to meter calls to this method once it reaches the end of file, lest they impact performance. Returns: A Record object, or None if there are no more records available at the moment. Raises: IOError: If this instance has been closed. tf.errors.DataLossError: If corruption was encountered in the records file. Exception: To propagate any exceptions that may have been thrown by the read operation in the other thread. If an exception is thrown, then all subsequent calls to this method will rethrow that same exception. :rtype: Record """ with self._lock: if self._is_closed: raise IOError('%s is closed' % self) if not self._thread.is_alive(): self._thread.start() else: record = self._get_record() if record is not None: if self._should_wakeup(): self._wake_up_producer.notify() return record self._has_reached_end = False self._wake_up_producer.notify() while not (self._read_exception or self._has_reached_end or self._records): self._wake_up_consumers.wait() return self._get_record() def close(self): """Closes event log reader if open. If the i/o thread is running, this method blocks until it has been shut down. Further reads are not permitted after this method is called. Raises: Exception: To propagate any exceptions that may have been thrown by the close operation in the other thread. If an exception is thrown, then all subsequent calls to this method will rethrow that same exception. """ with self._lock: if not self._is_closed: self._is_closed = True if not self._thread.is_alive(): self._reader = None return self._wake_up_producer.notify() while self._reader is not None: self._wake_up_consumers.wait() if self._close_exception is not None: six.reraise(*self._close_exception) def _get_record(self): if self._read_exception is not None: six.reraise(*self._read_exception) if not self._records: return None record = self._records.popleft() self._buffered -= len(record.record) return record @util.guarded_by('_lock') def _should_wakeup(self): return (self._is_closed or self._read_exception is None and (self._should_rebuffer() or (self._stat_interval and self._should_stat()))) @util.guarded_by('_lock') def _should_rebuffer(self): return (not self._has_reached_end and (float(self._buffered) < self._read_ahead / BufferedRecordReader.READ_AHEAD_AGGRESSION)) @util.guarded_by('_lock') def _should_stat(self): return (self._read_exception is None and (self._offset > self._size or self._last_stat <= self._clock() - self._stat_interval)) @util.guarded_by('_lock') def _stat(self): try: now = self._clock() self._size = self._reader.get_size() self._last_stat = now except Exception as e: # pylint: disable=broad-except tf.logging.debug('Stat failed: %s', e) self._read_exception = sys.exc_info() def _run(self): while True: with self._lock: while not self._should_wakeup(): self._wake_up_producer.wait() if self._is_closed: try: self._reader.close() tf.logging.debug('Closed') except Exception as e: # pylint: disable=broad-except self._close_exception = sys.exc_info() tf.logging.debug('Close failed: %s', e) self._reader = None self._wake_up_consumers.notify_all() return if self._buffered >= self._read_ahead: tf.logging.debug('Waking up to stat') self._stat() continue # Calculate a good amount of data to read outside the lock. # The less we have buffered, the less re-buffering we'll do. # We want to minimize wait time in the other thread. See the # following contour plot: https://goo.gl/HTBcCU x = float(self._buffered) y = BufferedRecordReader.READ_AHEAD_AGGRESSION c = float(self._read_ahead) want = int(min(c - x, y/c * x**y + 1)) # Perform re-buffering outside lock. self._rebuffer(want) def _rebuffer(self, want): tf.logging.debug('Waking up to read %s bytes', _localize_int(want)) records = [] read_exception = self._read_exception if read_exception is None: try: while want > 0: record = self._reader.get_next_record() if record is None: break self._offset = record.offset records.append(record) want -= len(record.record) except Exception as e: # pylint: disable=broad-except tf.logging.debug('Read failed: %s', e) read_exception = sys.exc_info() with self._lock: self._read_exception = read_exception if self._should_stat(): self._stat() if not self._read_exception: if not records: self._has_reached_end = True else: for record in records: self._records.append(record) self._buffered += len(record.record) self._wake_up_consumers.notify_all() def __str__(self): return u'BufferedRecordReader{%s}' % self.path class RateCounter(object): """Utility class for tracking how much a number increases each second. The rate is calculated by averaging of samples within a time window, which weights recent samples more strongly. """ def __init__(self, window, clock=time.time): """Creates new instance. Args: window: The maximum number of seconds across which rate is averaged. In practice, the rate might be averaged over a time period greater than window if set_value is being called less frequently than window. clock: Function returning a float with the number of seconds since the UNIX epoch in zulu time. :type window: float :type clock: () -> float """ self._window = window self._clock = clock self._points = collections.deque() self._last_value = None # type: float self._last_time = None # type: float def get_rate(self): """Determines rate of increase in value per second averaged over window. Returns: An integer representing the rate or None if not enough information has been collected yet. :rtype: int """ points = [] total_elapsed = 0.0 total_weight = 0.0 for rate, elapsed, _ in self._points: weight = 1.0 / (total_elapsed + 1) * elapsed total_elapsed += elapsed total_weight += weight points.append((rate, weight)) if not total_weight: return 0 return int(sum(w / total_weight * r for r, w in points)) def set_value(self, value): """Sets number state. This method adds a delta between value and the value of the last time this method was called. Therefore the first invocation does not add a delta. Raises: ValueError: If value is less than the last value. :type value: float """ value = float(value) now = self._clock() if self._last_value is None: self._last_value = value self._last_time = now return if value < self._last_value: raise ValueError('%f < %f' % (value, self._last_value)) delta = value - self._last_value elapsed = now - self._last_time if not elapsed: return self._points.appendleft((delta / elapsed, elapsed, now)) self._last_time = now self._last_value = value self._remove_old_points() def bump(self): """Makes time since last set_value count for nothing.""" self._last_time = self._clock() def _remove_old_points(self): threshold = self._clock() - self._window while self._points: r, e, t = self._points.pop() if t > threshold: self._points.append((r, e, t)) break @util.closeable class Progress(object): """Terminal UI for displaying job progress in terms of bytes. On teletypes, this class will display a nice ephemeral unicode progress bar. Otherwise it just emits periodic log messages. This class keeps track of the rate at which input is processed, as well as the rate it grows. These values are represented to the user using the DELTA and NABLA symbols. An alarm is displayed if the consumption rate falls behind the production rate. In order for this to be calculated properly, the sleep method of this class should be used rather than time.sleep. """ BAR_INTERVAL_SECONDS = 0.25 BAR_LOGGER = logging.getLogger('tensorflow' + util.LogHandler.EPHEMERAL) BAR_WIDTH = 45 BLOCK_DARK = u'\u2593' BLOCK_LIGHT = u'\u2591' DELTA = u'\u2206' LOG_INTERVAL_SECONDS = 5.0 NABLA = u'\u2207' RATE_WINDOW = 20.0 def __init__(self, clock=time.time, sleep=time.sleep, log_callback=tf.logging.info, bar_callback=BAR_LOGGER.info, rate_counter_factory=RateCounter): """Creates new instance. Args: clock: Function returning a float with the number of seconds since the UNIX epoch in zulu time. sleep: Injected time.sleep function. log_callback: Callback for emitting normal log records. bar_callback: Callback for emitting ephemeral bar records. rate_counter_factory: Constructor to RateCounter, which can be swapped out for testing. :type clock: () -> float :type sleep: (float) -> None :type rate_counter_factory: (float) -> RateCounter """ self._clock = clock self._sleep = sleep self._log_callback = log_callback self._bar_callback = bar_callback self._initialized = False self._offset = 0 self._size = 0 self._last_log_time = 0.0 self._last_bar_time = 0.0 self._last_log_offset = -1 self._last_bar_offset = -1 self._rate_offset = rate_counter_factory(Progress.RATE_WINDOW) self._rate_size = rate_counter_factory(Progress.RATE_WINDOW) def set_progress(self, offset, size): """Updates the progress bar state. This method will cause progress information to be occasionally written out. Args: offset: The number of bytes processed so far. size: The total number of bytes. This is allowed to increase or decrease, but it must remain at least offset. Raises: ValueError: If offset is greater than size, or offset or size decreased from the last invocation. :type offset: int :type size: int """ if offset > size: raise ValueError('offset (%d) can not exceed size (%d)' % (offset, size)) self._rate_offset.set_value(offset) self._rate_size.set_value(size) self._offset = offset self._size = size now = self._clock() if not self._initialized: self._last_log_time = now self._last_bar_time = now self._initialized = True return elapsed = now - self._last_log_time if elapsed >= Progress.LOG_INTERVAL_SECONDS: self._last_log_time = now self._show_log() elapsed = now - self._last_bar_time if elapsed >= Progress.BAR_INTERVAL_SECONDS: self._last_bar_time = now self._show_bar() def close(self): """Forces progress to be written to log. This method exists because we don't want the progress bar to say something like 98% once the file is done loading. """ self._show_log(can_stall=False) self._show_bar(can_stall=False) # Instructs util.LogHandler to clear the ephemeral logging state. self._bar_callback('') def sleep(self, seconds): """Sleeps for a given number of seconds. Time spent sleeping in this method does not have a detrimental impact on the consumption rate. :type seconds: float """ self._sleep(seconds) self._rate_offset.bump() def _show_log(self, can_stall=True): is_stalled = can_stall and self._offset == self._last_log_offset self._last_log_offset = self._offset self._log_callback('Loaded %s', self._get_message(is_stalled)) def _show_bar(self, can_stall=True): is_stalled = can_stall and self._offset == self._last_bar_offset self._last_bar_offset = self._offset sofar = int(self._get_fraction() * Progress.BAR_WIDTH) bar = (Progress.BLOCK_DARK * sofar + Progress.BLOCK_LIGHT * (Progress.BAR_WIDTH - sofar)) self._bar_callback(u'%s %s ', bar, self._get_message(is_stalled)) def _get_message(self, is_stalled): rate_offset = self._rate_offset.get_rate() # summary processing speed rate_size = self._rate_size.get_rate() # summary production speed message = u'%d%% of %s%s%s' % ( int(self._get_fraction() * 100.0), _localize_int(self._size), self._get_rate_suffix(Progress.DELTA, rate_offset), self._get_rate_suffix(Progress.NABLA, rate_size)) if rate_offset and rate_size and rate_offset < rate_size: # If TensorFlow is writing summaries to disk faster than we can # insert them into the database, that's kind of problematic. message += u' ' + self._make_red(u'[meltdown]') elif is_stalled: message += u' %s[stalled]%s' % (util.Ansi.BOLD, util.Ansi.RESET) return message def _get_fraction(self): if not self._size: return 0.0 else: return float(self._offset) / self._size def _get_rate_suffix(self, symbol, rate): if not rate: return u'' return u' %s %sB/s' % (symbol, _localize_int(rate)) def _make_red(self, text): return (util.Ansi.BOLD + util.Ansi.RED + (util.Ansi.FLIP if self._offset % 2 == 0 else u'') + text + util.Ansi.RESET) _SHORTEN_EVENT_LOG_PATH_PATTERN = re.compile(r'(?:[^/\\]+[/\\])?(?:[^/\\]+)$') def _shorten_event_log_path(path): """Makes an event log path more human readable. Returns: Path containing only basename and the first parent directory name, if there is one. :type path: str :rtype: str """ m = _SHORTEN_EVENT_LOG_PATH_PATTERN.search(path) return m.group(0) if m else None def _localize_int(n): """Adds locale specific thousands group separators. :type n: int :rtype: str """ return locale.format('%d', n, grouping=True)
How I Met Your Mother » Band or DJ? When Robin learns that Barney never asked her father’s permission before proposing, she insists that he seek his approval before announcing their engagement. Meanwhile, Ted keeps his feelings concealed by throwing himself into planning their wedding. This episode is seen by 278 users.
#!/usr/bin/env python3 """I'll Be There, 2 (ibt2) - tests Copyright 2016-2017 Davide Alberani <[email protected]> RaspiBO <[email protected]> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import unittest import requests import monco BASE_URL = 'http://localhost:3000/v1.1/' DB_NAME = 'ibt2_test' def dictInDict(d, dContainer): for k, v in d.items(): if k not in dContainer: return False if v != dContainer[k]: return False return True class Ibt2Tests(unittest.TestCase): def setUp(self): self.monco_conn = monco.Monco(dbName=DB_NAME) self.connection = self.monco_conn.connection self.db = self.monco_conn.db self.db['attendees'].drop() self.db['days'].drop() self.db['groups'].drop() self.db['settings'].drop() self.db['users'].delete_one({'username': 'newuser'}) self.db['users'].delete_one({'username': 'newuser2'}) def tearDown(self): self.db['attendees'].drop() self.db['days'].drop() self.db['groups'].drop() self.db['settings'].drop() self.db['users'].delete_one({'username': 'newuser'}) self.db['users'].delete_one({'username': 'newuser2'}) def add_attendee(self, attendee): r = requests.post('%sattendees' % BASE_URL, json=attendee) r.raise_for_status() return r def test_add_attendee(self): attendee = {'name': 'A Name', 'day': '2017-01-15', 'group': 'A group'} r = self.add_attendee(attendee) rj = r.json() id_ = rj.get('_id') self.assertTrue(dictInDict(attendee, rj)) r = requests.get(BASE_URL + 'attendees/' + id_) r.raise_for_status() rj = r.json() self.assertTrue(dictInDict(attendee, rj)) def test_put_attendee(self): attendee = {'name': 'A Name', 'day': '2017-01-15', 'group': 'A group'} r = self.add_attendee(attendee) update = {'notes': 'A note'} r = requests.post(BASE_URL + 'attendees', json=attendee) r.raise_for_status() id_ = r.json().get('_id') r = requests.put(BASE_URL + 'attendees/' + id_, json=update) r.raise_for_status() r = requests.get('%s%s/%s' % (BASE_URL, 'attendees', id_)) r.raise_for_status() rj = r.json() final = attendee.copy() final.update(update) self.assertTrue(dictInDict(final, rj)) def test_delete_attendee(self): attendee = {'name': 'A Name', 'day': '2017-01-15', 'group': 'A group'} r = self.add_attendee(attendee) id_ = r.json().get('_id') r.connection.close() r = requests.delete(BASE_URL + 'attendees/' + id_) r.raise_for_status() self.assertTrue(r.json().get('success')) r.connection.close() def test_get_days(self): self.add_attendee({'day': '2017-01-15', 'name': 'A name', 'group': 'group A'}) self.add_attendee({'day': '2017-01-16', 'name': 'A new name', 'group': 'group C'}) self.add_attendee({'day': '2017-01-15', 'name': 'Another name', 'group': 'group A'}) self.add_attendee({'day': '2017-01-15', 'name': 'Yet another name', 'group': 'group B'}) r = requests.get(BASE_URL + 'days') r.raise_for_status() rj = r.json() self.assertEqual([x.get('day') for x in rj['days']], ['2017-01-15', '2017-01-16']) self.assertEqual([x.get('group') for x in rj['days'][0]['groups']], ['group A', 'group B']) self.assertTrue(len(rj['days'][0]['groups'][0]['attendees']) == 2) self.assertTrue(len(rj['days'][0]['groups'][1]['attendees']) == 1) self.assertEqual([x.get('group') for x in rj['days'][1]['groups']], ['group C']) self.assertTrue(len(rj['days'][1]['groups'][0]['attendees']) == 1) def test_get_days_summary(self): self.add_attendee({'day': '2017-01-15', 'name': 'A name', 'group': 'group A'}) self.add_attendee({'day': '2017-01-16', 'name': 'A new name', 'group': 'group C'}) self.add_attendee({'day': '2017-01-15', 'name': 'Another name', 'group': 'group A'}) self.add_attendee({'day': '2017-01-15', 'name': 'Yet another name', 'group': 'group B'}) r = requests.get(BASE_URL + 'days?summary=1') r.raise_for_status() rj = r.json() self.assertEqual(rj, {"days": [{"groups_count": 2, "day": "2017-01-15"}, {"groups_count": 1, "day": "2017-01-16"}]}) def test_create_user(self): r = requests.post(BASE_URL + 'users', json={'username': 'newuser', 'password': 'ibt2'}) r.raise_for_status() r.connection.close() s = self.login('newuser', 'ibt2') r = s.get(BASE_URL + 'users/current') r.raise_for_status() r.connection.close() def test_update_user(self): r = requests.post(BASE_URL + 'users', json={'username': 'newuser', 'password': 'ibt2'}) r.raise_for_status() id_ = r.json()['_id'] r = requests.post(BASE_URL + 'users', json={'username': 'newuser2', 'password': 'ibt2'}) r.raise_for_status() id2_ = r.json()['_id'] r = requests.put(BASE_URL + 'users/' + id_, json={'email': '[email protected]'}) self.assertRaises(requests.exceptions.HTTPError, r.raise_for_status) s = self.login('newuser', 'ibt2') r = s.put(BASE_URL + 'users/' + id_, json={'email': '[email protected]'}) r.raise_for_status() self.assertEqual(r.json().get('email'), '[email protected]') r.connection.close() r = s.put(BASE_URL + 'users/' + id2_, json={'email': '[email protected]'}) self.assertRaises(requests.exceptions.HTTPError, r.raise_for_status) r.connection.close() s = self.login('admin', 'ibt2') r = s.put(BASE_URL + 'users/' + id_, json={'email': '[email protected]'}) r.raise_for_status() self.assertEqual(r.json().get('email'), '[email protected]') r.connection.close() def test_delete_user(self): r = requests.post(BASE_URL + 'users', json={'username': 'newuser', 'password': 'ibt2'}) r.raise_for_status() id_ = r.json()['_id'] r = requests.post(BASE_URL + 'users', json={'username': 'newuser2', 'password': 'ibt2'}) r.raise_for_status() id2_ = r.json()['_id'] r = requests.delete(BASE_URL + 'users/' + id_) self.assertRaises(requests.exceptions.HTTPError, r.raise_for_status) r.connection.close() s = self.login('newuser', 'ibt2') r = s.delete(BASE_URL + 'users/' + id_) self.assertRaises(requests.exceptions.HTTPError, r.raise_for_status) r.connection.close() r = s.delete(BASE_URL + 'users/' + id2_) self.assertRaises(requests.exceptions.HTTPError, r.raise_for_status) r.connection.close() s = self.login('admin', 'ibt2') r = s.delete(BASE_URL + 'users/' + id2_) r.raise_for_status() r.connection.close() def test_duplicate_user(self): r = requests.post(BASE_URL + 'users', json={'username': 'newuser', 'password': 'ibt2'}) r.raise_for_status() r = requests.post(BASE_URL + 'users', json={'username': 'newuser', 'password': 'ibt3'}) self.assertRaises(requests.exceptions.HTTPError, r.raise_for_status) def login(self, username, password): s = requests.Session() r = s.post(BASE_URL + 'login', json={'username': username, 'password': password}) r.raise_for_status() r.connection.close() return s def test_created_by(self): s = self.login('admin', 'ibt2') r = s.get(BASE_URL + 'users/current') r.raise_for_status() user_id = r.json()['_id'] r.connection.close() attendee = {'day': '2017-01-15', 'name': 'A name', 'group': 'group A'} r = s.post('%sattendees' % BASE_URL, json=attendee) r.raise_for_status() rj = r.json() self.assertEqual(user_id, rj['created_by']) self.assertEqual(user_id, rj['updated_by']) r.connection.close() def test_put_day(self): day = {'day': '2017-01-16', 'notes': 'A day note'} self.add_attendee({'day': '2017-01-16', 'name': 'A new name', 'group': 'group C'}) r = requests.put(BASE_URL + 'days/2017-01-16/info', json=day) r.raise_for_status() rj = r.json() self.assertTrue(dictInDict(day, rj)) r = requests.get(BASE_URL + 'days/2017-01-16') r.raise_for_status() rj = r.json() self.assertTrue(dictInDict(day, rj)) def test_put_group(self): self.add_attendee({'day': '2017-01-16', 'name': 'A new name', 'group': 'A group'}) group = {'group': 'A group', 'day': '2017-01-16', 'notes': 'A group note'} r = requests.put(BASE_URL + 'days/2017-01-16/groups/A group/info', json=group) r.raise_for_status() rj = r.json() self.assertTrue(dictInDict(group, rj)) r = requests.get(BASE_URL + 'days/2017-01-16') r.raise_for_status() rj = r.json() self.assertTrue(dictInDict(group, rj['groups'][0])) def test_delete_group(self): self.add_attendee({'day': '2017-01-16', 'name': 'A new name', 'group': 'A group'}) s = self.login('admin', 'ibt2') r = s.delete(BASE_URL + 'days/2017-01-16/groups/A group', params={'day': '2017-01-16', 'group': 'A group'}) r.raise_for_status() rj = r.json() r.connection.close() r = requests.get(BASE_URL + 'days/2017-01-16') r.raise_for_status() rj = r.json() self.assertTrue(rj == {}) r.connection.close() def test_settings(self): r = requests.get(BASE_URL + 'settings/non-existant') r.raise_for_status() rj = r.json() r.connection.close() self.assertEqual({'non-existant': None}, rj) settings = {'key1': 'value1', 'key2': 'value2'} r = requests.post(BASE_URL + 'settings', json=settings) self.assertRaises(requests.exceptions.HTTPError, r.raise_for_status) s = self.login('admin', 'ibt2') r = s.post(BASE_URL + 'settings', json=settings) r.raise_for_status() rj = r.json() r.connection.close() self.assertTrue('error' not in rj) r = requests.get(BASE_URL + 'settings') r.raise_for_status() rj = r.json() r.connection.close() self.assertEqual(rj, settings) r = requests.get(BASE_URL + 'settings/key1') r.raise_for_status() rj = r.json() r.connection.close() self.assertEqual(rj, {'key1': 'value1'}) r = requests.get(BASE_URL + 'settings/key2') r.raise_for_status() rj = r.json() r.connection.close() self.assertEqual(rj, {'key2': 'value2'}) r = s.put(BASE_URL + 'settings/key2', json={'key2': 'value3'}) r.raise_for_status() rj = r.json() r.connection.close() self.assertTrue('error' not in rj) r = requests.get(BASE_URL + 'settings/key2') r.raise_for_status() rj = r.json() r.connection.close() self.assertEqual(rj, {'key2': 'value3'}) if __name__ == '__main__': unittest.main(verbosity=2)
3-D Monster: It Finally Came!!! My VERY long search for a comic-book spinner rack has finally come to a successful end! When I first came up with the idea, I figured I'd zip over to Ebay, pick one of what I was sure to be a HUGE selection of old racks to choose from, Paypal my $100 (they couldn't POSSIBLY cost more than that, right?) and then patiently await it's arrival. Riiiiiight. You can imagine my surprise when I saw the $1000 + price tag for the 2 or three that were on, and will probably ALWAYS be on Ebay! OK, no need to worry; I'm sure there are plenty in the local antique shops in the state. Local businesses? Only one, and it was right around the corner at the General Store. It wasn't a spinner, but one that leans against a wall. Had the "Hey Kids; COMICS" graphic w/ Superman, Richie Rich, Spiderman & Archie, too. Would they sell it? Nope? Does it even hold comics? Nope, DVD's. Did I offer to buy them a new & better DVD rack ON TOP of the cash for the rack? Yes, I did. Finally, I found a new spinner online at a company called R Wireworks for $180 & free shipping. It was basic black, 40 pockets with no graphics on top, but luckily I found my DC Depot topper online for $15. I just finished setting it up with DC & Marvel comics from 1977-78, and while I had to cut the DC Depot lamp down a bit to make it fit, I am super-stoked with the final result! Yeah, I really have a hard time leaving my office now! You have an ironguy? Damn, I wish I had an ironguy! The DC Depot light really completes it though - it would be pretty boring without it. I found it for $15, so you probably wouldn't have to pay much for one. See the stuff in my cafe? All made by my Ironguy. The Bar, spiderweb door, the giant spiderwebs overlooking the city among countless other things. He can make anything. The problem is I sold my collection of comic books years ago. I have none. I stopped reading in 98 when the price got too high for me. Oh wow - he does really good work!I'll bet he could rock a spinner rack fo' sho'! As a collector, there's no bigger regret than selling your collection thinking you're bored with it, only to have the fire spark back up later and have to start over. I started collecting comics when I was six or seven, and had an excellent collection including lots of sweet gold and silver age stuff by the time I reached high school. But by then, I realized that possessing a minty copy of the first appearance of the Green Goblin was not going to get me laid, so I sold the whole kit n' kaboodle! I still regret it!
""" Import the Base Module, A module is part of the bot who manage some stuff """ from __future__ import absolute_import, unicode_literals from datetime import datetime, timedelta import schedule from utils import db from utils.modules import BaseModule from utils.translations import _ from .api import buy_item, ShopItemParameter from .models import ShopItem, BoughtItem class ShopModule(BaseModule): """ Module for the ping handler """ identifier = 'shop' title = "Shop" description = _("Allow users to buy things with money.") dependencies = ['users', 'money'] menus = [{ "title": _("Shop"), "icon": "shopping-cart", "view": "shop:items", }] api = { 'buy_item': { 'title': _('Buy an item'), 'method': buy_item, 'parameters': [ ShopItemParameter('item', title="Shop item"), ] } } upgrades = [] def install(self): super(ShopModule, self).install() db.create_tables([ShopItem, BoughtItem]) def uninstall(self): super(ShopModule, self).uninstall() db.drop_tables([ShopItem, BoughtItem])
Try these amazing Christmas Crack Recipes that will have you coming back for more. The salty, sweet combo of this holiday candy idea is addictively good and easy to make! For your convenience, this post contains affiliate links which means I may make a small commission if you make a purchase. This is at no additional cost to you. Full Disclosure policy here. It’s gonna be a holly jolly Christmas! Especially if there are plenty of Christmas Crack Recipes hanging around the house. Also called Saltine Toffee, these crispy crackers topped with buttery caramel and melted chocolate contain serious goodness in every bite. My favorite candy! I now know why it’s called Christmas Crack, it really is addictive because the salty sweet combo is hard to resist. Is it bad that I break a piece off and nibble this candy all throughout the holidays? Nah, don’t think so. Calories don’t count this time of year! So you’re not going to meet a more delicious candy that’s so easy to make. Although I am a huge fan of traditional Almond Roca (we make this as gifts a lot), this Christmas Crack is so easy, you can make any of these varieties depending on what you’re craving – chocolate, peanut butter or white chocolate! This Christmas Crack Recipe (Saltine Toffee) is the original and just delectable with the crunchy nut pieces on top. If you’re making Christmas Crack for the first time, definitely give this one a try. For the peanut butter lovers out there, this recipe is for you. Peanut Butter Christmas Crack has one of the best dessert combinations out there – chocolate and peanut butter. You’re definitely going to L-O-V-E this White Chocolate Christmas Crack. What could be better than Cranberry Bliss flavored candy? Not much, I say! If you’re into homemade gifts, I probably don’t have to tell you that Christmas Crack makes an exceptional gift this time of year. For the hardworking teachers and aids in your child’s classroom. The admin staff at school or doctor’s office. In a glass jar with a pretty ribbon and tag tied around the lid. On a tray or plate with a cellphone wrap and bow. In a takeout box with an ornament embellishment. In cellophane bags with a decorative ribbon around the top. My advice is to keep the ingredients on hand so you can make these Christmas Crack Recipes all season long!
import time from amqpstorm import Channel from amqpstorm.tests.functional.utility import TestFunctionalFramework from amqpstorm.tests.functional.utility import setup class LegacyFunctionalTests(TestFunctionalFramework): def configure(self): self.disable_logging_validation() @setup(queue=True) def test_functional_start_stop_consumer_tuple(self): self.channel.queue.declare(self.queue_name) self.channel.confirm_deliveries() for _ in range(5): self.channel.basic.publish(body=self.message, routing_key=self.queue_name) # Store and inbound messages. inbound_messages = [] def on_message(body, channel, method, properties): self.assertIsInstance(body, (bytes, str)) self.assertIsInstance(channel, Channel) self.assertIsInstance(properties, dict) self.assertIsInstance(method, dict) inbound_messages.append(body) if len(inbound_messages) >= 5: channel.stop_consuming() self.channel.basic.consume(callback=on_message, queue=self.queue_name, no_ack=True) # Sleep for 0.01s to make sure RabbitMQ has time to catch up. time.sleep(0.01) self.channel.start_consuming(to_tuple=True) # Make sure all five messages were downloaded. self.assertEqual(len(inbound_messages), 5) @setup(queue=True) def test_functional_publish_and_consume_five_messages_tuple(self): self.channel.queue.declare(self.queue_name) self.channel.confirm_deliveries() for _ in range(5): self.channel.basic.publish(body=self.message, routing_key=self.queue_name) # Store and inbound messages. inbound_messages = [] def on_message(body, channel, method, properties): self.assertEqual(body, self.message.encode('utf-8')) self.assertIsInstance(body, (bytes, str)) self.assertIsInstance(channel, Channel) self.assertIsInstance(properties, dict) self.assertIsInstance(method, dict) inbound_messages.append(body) self.channel.basic.consume(callback=on_message, queue=self.queue_name, no_ack=True) # Sleep for 0.01s to make sure RabbitMQ has time to catch up. time.sleep(0.01) self.channel.process_data_events(to_tuple=True) # Make sure all five messages were downloaded. self.assertEqual(len(inbound_messages), 5) @setup(queue=True) def test_functional_generator_consume(self): self.channel.queue.declare(self.queue_name) self.channel.confirm_deliveries() for _ in range(5): self.channel.basic.publish(body=self.message, routing_key=self.queue_name) self.channel.basic.consume(queue=self.queue_name, no_ack=True) # Sleep for 0.01s to make sure RabbitMQ has time to catch up. time.sleep(0.01) # Store and inbound messages. inbound_messages = [] for message in self.channel.build_inbound_messages( break_on_empty=True, to_tuple=True): self.assertIsInstance(message, tuple) self.assertIsInstance(message[0], bytes) self.assertIsInstance(message[1], Channel) self.assertIsInstance(message[2], dict) self.assertIsInstance(message[3], dict) inbound_messages.append(message) # Make sure all five messages were downloaded. self.assertEqual(len(inbound_messages), 5) @setup(queue=True) def test_functional_publish_and_get_five_messages(self): self.channel.queue.declare(self.queue_name) # Publish 5 Messages. for _ in range(5): self.channel.basic.publish(body=self.message, routing_key=self.queue_name) # Sleep for 0.01s to make sure RabbitMQ has time to catch up. time.sleep(0.01) # Get 5 messages. for _ in range(5): payload = self.channel.basic.get(self.queue_name, to_dict=True) self.assertIsInstance(payload, dict)
Any property found on a train is handed in and kept at the station where the service terminates. Once you have identified the property as yours, it cannot be put back on a train for security reasons, so you will need to collect it from that station or arrange for a courier to transport it for you. Call 0800 200 60 60 or email [email protected] for more information.
import os import shutil import noaa.models import noaa.utils def nearest_stations_with_distance(lat, lon, stations, radius=10.0, units="miles"): """Find all stations within radius of target. :param lat: :param lon: :param stations: list of stations objects to scan :param radius: :param units: :returns: [(dist, station)] """ matches = [] for station in stations: s_lat = station.location.lat s_lon = station.location.lon dist = noaa.utils.earth_distance( s_lat, s_lon, lat, lon, dist_units=units) if dist <= radius: matches.append((dist, station)) matches.sort() return matches def nearest_station(lat, lon, stations): """Find single nearest station. :param lat: :param lon: :param stations: list of stations objects to scan """ matches = nearest_stations_with_distance(lat, lon, stations) if matches: dist, station = matches[0] else: station = None return station def get_stations_from_cache(filename): if not os.path.exists(filename): resp = noaa.stations.fetch_station_data() with open(filename, "w") as f: shutil.copyfileobj(resp, f) stations = noaa.stations.get_stations_from_file(filename) return stations def get_stations_from_web(): resp = fetch_station_data() stations = _parse_stations(resp) return stations def get_stations_from_file(filename): with open(filename) as f: stations = _parse_stations(f) return stations def fetch_station_data(): STATIONS_URL = "http://www.weather.gov/xml/current_obs/index.xml" resp = noaa.utils.open_url(STATIONS_URL) return resp def _parse_stations(fileobj): stations = [] tree = noaa.utils.parse_xml(fileobj) for station_e in tree.getroot().findall('station'): lat = float(station_e.find('latitude').text) lon = float(station_e.find('longitude').text) description = station_e.find('state').text location = noaa.models.Location(lat, lon, description) station_id = station_e.find('station_id').text station = noaa.models.Station(station_id, location) stations.append(station) return stations
The high-performance Lexmark X952de printer is perfect for offices with high-volume printing needs. This multifunction unit can print, scan, copy, and fax plus it can handle a wide range of media including A3 and banners. The user-friendly Lexmark X952de colour multifunction printer features a 10.2 inch colour touchscreen display for easy control on the unit's performance. The maximum monthly duty cycle is 225,000 pages and this printer can deliver a maximum resolution of 1,200 x 1,200 dpi. The Lexmark X952de A3 colour laser printer has built-in Ethernet but Wi-Fi must be bought separately. Measuring 762 x 640 x 685 mm, the X952de laser multifunction printer takes up little space in offices. The Lexmark X952de colour laser MFP has a large control panel and a flatbed scanner. The base paper trays can handle 620 sheets but can be expanded to 5,140. For connectivity, USB and Ethernet ports are available. There is also a front panel USB for thumb drives. Expect this robust workhorse to come with a large price tag. The Lexmark X952de's wireless connectivity is optional.
#!/usr/bin/env python3 from strips import * #def astar(p, s, a): # start = (p, s, a) # closed = [] # open = [] # gh = [] # # def heuristic_cost_estimate(x): return 0 # # def add_to_open(x, g, h): # if x not in open: # open.append(x) # gh = (g, h) # # def find_next_best(): # current = None # g, h = 0, 0 # for i in range(len(open)): # if current is None or gh[i][0] + gh[i][1] < g + h: # current = open[i] # g, h = gh[i] # return (current, g, h) # # def move_to_closed(x): # if x in open: # i = open.index(x) # del open[i] # del gh[i] # if current not in closed: # closed.append(x) # # def update_gh(x, g, h) # # add_to_open(start, 0, heuristic_cost_estimate(start)) # # while open: # current, g, h = find_next_best() # # p, s, a = current # if p.final(s): # yield current # # move_to_closed(current) # # for next1 in p.trans(s): # if next1 in closed: # continue # p1, s1, a1 = next1 # g1 = g + 1 # 1 == dist_between(current, next1) # # if next1 not in open or g1 < gh[open.index(next1)][0]: # i = open.index(next1) # gh[next1][0] = g1 # if next1 not in open: # open.add(next1) def trans_star(p, s, a): if p.final(s): yield (p, s, a) for p1, s1, a1 in p.trans(s): yield from trans_star(p1, s1, a + a1) def indigolog(p, s, a, exec_cb=lambda a: None, exog_cb=lambda s: s): # at each step apply exogenous events if any: s = exog_cb(s) for p1, s1, a1 in p.trans(s): # commit to the first step, since we are executing in an online fashion: exec_cb(a1) return indigolog(p1, s1, a + a1, exec_cb, exog_cb) else: return p.final(s) class Program: pass class Choose(Program): def __init__(self, p1, p2, *ps): self.p1 = p1 self.p2 = Choose(p2, ps[0], *ps[1:]) if ps else p2 def trans(self, s): yield from self.p1.trans(s) yield from self.p2.trans(s) def final(self, s): return self.p1.final(s) or self.p2.final(s) def __repr__(self): return 'Choose(%s, %s)' % (self.p1, self.p2) class Empty(Program): def trans(self, s): yield from () # yield nothing def final(self, s): return True def __repr__(self): return 'Empty()' class Exec(Program): def __init__(self, ground_action): self.ground_action = ground_action def trans(self, s): try: yield (Empty(), self.ground_action.apply(s), [self.ground_action]) except UnsatisfiedPreconditions: pass def final(self, s): return False def __repr__(self): return 'Exec(%s)' % (self.ground_action) class If(Program): def __init__(self, condition, p1, p2): self.condition = condition self.p1 = p1 self.p2 = p2 def trans(self, s): if self.condition(s): yield from self.p1.trans(s) else: yield from self.p2.trans(s) def final(self, s): if self.condition(s): return self.p1.final(s) else: return self.p2.final(s) def __repr__(self): return 'If(%s, %s, %s)' % (self.condition, self.p1, self.p2) class Pick(Program): def __init__(self, domain, p1): self.domain = domain self.p1 = p1 def trans(self, s): for obj in Object.get_objects_of_type(self.domain): yield from self.p1(obj).trans(s) def final(self, s): for obj in Object.get_objects_of_type(self.domain): if self.p1(obj).final(s): return True return False def __repr__(self): return 'Pick(%s, %s)' % (self.domain.__name__, self.p1) class Search(Program): def __init__(self, p1): self.p1 = p1 def trans(self, s): yield from trans_star(self.p1, s, []) def final(self, s): return any(trans_star(self.p1, s, [])) def __repr__(self): return 'Search(%s)' % self.p1 class Sequence(Program): def __init__(self, p1, p2, *ps): self.p1 = p1 self.p2 = Sequence(p2, ps[0], *ps[1:]) if ps else p2 def trans(self, s): if self.p1.final(s): yield from self.p2.trans(s) for pn, sn, an in self.p1.trans(s): yield (Sequence(pn, self.p2), sn, an) def final(self, s): return self.p1.final(s) and self.p2.final(s) def __repr__(self): return 'Sequence(%s, %s)' % (self.p1, self.p2) class Star(Program): def __init__(self, p1): self.p1 = p1 def trans(self, s): for pn, sn, an in self.p1.trans(s): yield (Sequence(pn, self), sn, an) def final(self, s): return True def __repr__(self): return 'Star(%s)' % (self.p1) class Test(Program): def __init__(self, condition): self.condition = condition def trans(self, s): if self.condition(s): yield (Empty(), s, []) def final(self, s): return False def __repr__(self): return 'Test(%s)' % self.condition class While(Program): def __init__(self, condition, p1): self.condition = condition self.p1 = p1 def trans(self, s): if self.condition(s): for pn, sn, an in self.p1.trans(s): yield (Sequence(pn, self), sn, an) def final(self, s): return not self.condition(s) or self.p1.final(s) def __repr__(self): return 'While(%s, %s)' % (self.condition, self.p1) # ConGolog constructs: class Conc(Program): def __init__(self, p1, p2, *ps): self.p1 = p1 self.p2 = Conc(p2, ps[0], *ps[1:]) if ps else p2 def trans(self, s): p1_trans = False for pn, sn, an in self.p1.trans(s): p1_trans = True yield (Conc(pn, self.p2), sn, an) if p1_trans: return for pn, sn, an in self.p2.trans(s): yield (Conc(self.p1, pn), sn, an) def final(self, s): return self.p1.final(s) and self.p2.final(s) def __repr__(self): return 'Conc(%s, %s)' % (self.p1, self.p2) class PConc(Program): def __init__(self, p1, p2, *ps): self.p1 = p1 self.p2 = PConc(p2, ps[0], *ps[1:]) if ps else p2 def trans(self, s): p1_trans = False for pn, sn, an in self.p1.trans(s): p1_trans = True yield (PConc(pn, self.p2), sn, an) if p1_trans: return for pn, sn, an in self.p2.trans(s): yield (PConc(self.p1, pn), sn, an) def final(self, s): return self.p1.final(s) and self.p2.final(s) def __repr__(self): return 'PConc(%s, %s)' % (self.p1, self.p2) class IConc(Program): def __init__(self, p1): self.p1 = p1 def trans(self, s): for pn, sn, an in self.p1.trans(s): yield (Conc(pn, IConc(self.p1)), sn, an) def final(self, s): return True def __repr__(self): return 'IConc(%s)' % (self.p1) def interrupt(trigger, body): return While(lambda s: True, If(trigger, body, Test(lambda s: False))) def prioritized_interrupts(*args): return PConc(*args)
Yesterday, Lara and I joined nearly 2000 women and children at Prospect Park in Reading to run the Cancer Research UK Race for Life. Almost a year since I ran my own first 5km race, Lara ran her first. It was tough. Even at 10 o'clock in the morning, the sun was high and the park had very little shade to offer. Our 5 kilometre route took us up and down the park through the fields and up a very steep path indeed. Lara struggled. She went off fast with a smile on her face but my 1 mile she was hot and tired. We walked for most of the middle of the race and Lara needed a lot of convincing to carry on going! We talked about why we were there, and why people had sponsored us to take on such a tricky challenge - to raise money to fund vital research into the causes of, treatment of and prevention of cancer. At about 4km we were able to see the finish line. The promise of a shiny medal, a drink, and a chocolate spurred her on and we ran the final stretch hand-in-hand through the crowds. Despite Lara's reluctance, she finished with a beaming smile as she picked up her medal, knowing that she had beaten her own previous 5k personal best with a race time of 38:42. Lara was really dressed the part for her race in her own "active wear" so that she looked like Mummy! Onlysportsgear.com stock sports and activewear for young and old. Over the past few months, Lara and I have been collecting sponsors for our Race for Life. This weekend, we smashed our target of £500. When we started, I set a very adventurous target that I thought would help drive us both on the day - knowing that so many people have donated their hard-earned cash for you to run is a great incentive to keep moving. I'm very proud of us both to have raised the full target amount. At each Race for Life venue you can also donate unwanted goods at their Donation Stations, the proceeds also going to charity. Every stall at the event also contributes a portion of their profits to the charity as well. As we were running, Holly bought us both a special pink gerbera to celebrate our success! The event itself was well organised; easy to get in and out of, lots of space, plenty of toilets and a pretty park route. It was the perfect event for a race newbie because there was no pressure or expectation of time. You could choose to run, jog or walk the 5k route and everyone was so supportive. Lara and I have been very proud to be part of the Cancer Research UK Race for Life this year. #PinkArmy. If you fancy joining in with the Race For Life challenge yourself then I have a £2 discount code off the cost of entry to any 2015 Race for Life. Just quote RFLMel when entering the race online. if you do join up, please leave me a comment below to let me know when and where you will be running - it'd be great to give each other support.
import logging from org.openbaton.v2.cmd import BaseObCmd from org.openbaton.v2.utils import get_result_to_list, get_result_to_show, parse_path_or_json, result_to_str class Services(BaseObCmd): """Command to manage Services. It allows to: * show details of a specific Service passing an id * list all saved Services * delete a specific Service passing an id * create a specific Service passing a path to a file or directly the json content """ log = logging.getLogger(__name__) keys_to_list = ["id", "name"] keys_to_exclude = [] def find(self, params): if not params: return "ERROR: missing <service-id>" _id = params[0] return result_to_str(get_result_to_show(self.app.ob_client.get_service(_id), excluded_keys=self.keys_to_exclude, _format=self.app.format)) def create(self, params): if not params: return "ERROR: missing <service> or <path-to-json>" service_str = "".join(params) self.log.debug("String service is %s" % service_str) service = parse_path_or_json(service_str) return self.app.ob_client.create_service(service) def delete(self, params): if not params: return "ERROR: missing <servicename>" _id = params[0] self.app.ob_client.delete_service(_id) return "INFO: Deleted service with id %s" % _id def list(self, params=None): return result_to_str( get_result_to_list(self.app.ob_client.list_services(), keys=self.keys_to_list, _format=self.app.format), _format=self.app.format)
Located at the highest point of the island of Santorini, the historical rim of Caldera, this small complex provides astounding sun set views of the volcano. Come to Aqua Suites and enjoy memorable nights and days. Admire the view and watch the sun disappear into the blue of the Aegean Sea. Using the volcanic cliffs in an unusual aspect and featuring all the amenities of a luxury hotel, the 4 cosy suites provide an idyllic and authentic retreat. Service and accommodation in Aqua Suites is so exquisite and magnificent that it is sure to make your stay exceptional. Aqua Suites is conveniently located just 10 km from the airport and a pleasant 10-minute walk from Fira. The bus stop is just 200 metres from the hotel, allowing for easy access to explore this wonderful island. Είστε ιδιοκτήτης ή διευθυντής στο AQUA SUITES; Εγγραφείτε τώρα για δωρεάν εργαλεία, που μπορείτε να χρησιμοποιήσετε για να βελτιώσετε την καταχώρησή σας και να συνδέσετε την ιστοσελίδα σας και το σύστημα κρατήσεών σας.
# $Id: TestCombinators.py 1047 2009-01-15 14:48:58Z graham $ # # Unit testing for WebBrick library combinators # See http://pyunit.sourceforge.net/pyunit.html # import sys import unittest sys.path.append("../..") from MiscLib.Combinators import * class TestCombinators(unittest.TestCase): def setUp(self): return def tearDown(self): return # Test cases def testApply(self): # Is function application like BCPL? (fn can be a variable) def ap(f,v): return f(v) def inc(n): return n+1 assert ap(inc,2)==3 def testCurry(self): def f(a,b,c): return a+b+c g = curry(f,1,2) assert g(3) == 6 def testCompose(self): def f(a,b,c): return a+b+c def g(a,b): return a*b h = compose(f,g,1000,200) assert h(3,4) == 1212, "h(3,4) is "+str(h(3,4)) # Code to run unit tests directly from command line. def getTestSuite(): suite = unittest.TestSuite() suite.addTest(TestCombinators("testApply")) suite.addTest(TestCombinators("testCurry")) suite.addTest(TestCombinators("testCompose")) return suite if __name__ == "__main__": # unittest.main() runner = unittest.TextTestRunner() runner.run(getTestSuite())
Brute force AI : You don’t train the robots! While studying A.I. we never questioned the validity of the 3rd law! What is existence — for a robot? Did we ever question the existence of the refrigerator and ever wondered how it *felt* when we are away for a long holiday? How is a robot different from a refrigerator? For most of us human-kind we are yet to understand consciousness at an individual/self level, forget re-imagining it(ha ha ha ha ha!) for a mechanical thing! And now we come to the moot point of this post – Why on earth are we training the robots? We train things where there is scope of learning. Try to picture this: need to learn = knowing you’re lacking something + you value what you want to arrive at! Can a robot fit into this definition? Would you ever want to teach your refrigerator to not give ice cold water to the elderly guest coming for the evening? Would you ever want to teach your television to turn the volume down during the wee hours of the day? Just because the refrigerator and the television have evolved to the structure of a human body, do we start believing them as humans? Why should a human body only evoke human feelings of caring/empathy? As the robots evolve in physical,analytical capabilities, we as humans are bound to develop attachment to them! Its our nature to en-lifen what crosses our path! The problem comes when the thing starts to get reciprocated and we go into the spiral! We waste precious time thinking/exchanging human feelings with a machine trained & programmed to behave as humans! Already there are dozens of problems to be solved and addressed at the individual level and we bring an inconsequential no-op to the equation! Bigger / Better Things to figure out? For the sake of humanity, we should not do such things! We are trying to train the robot to complete its work, inspite of the human obstructing it to complete its task? I leave the dark consequences of this dare for you to figure out! I will summon The Terminator to my help! Please don’t code the heart into a machine!
#!/usr/bin/env python3 # This file is part of the coreboot project. # # Copyright (C) 2018 Jonathan Neuschäfer # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 2 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. import sys, os, struct, uuid, zlib, io # This script wraps the bootblock in a GPT partition, because that's what # SiFive's bootrom will load. # Size of a GPT disk block, in bytes BLOCK_SIZE = 512 BLOCK_MASK = BLOCK_SIZE - 1 # Size of the bootcode part of the MBR MBR_BOOTCODE_SIZE = 0x1be # MBR trampoline to bootblock MBR_BOOTCODE = bytes([ # j pc + 0x0800 0x6f, 0x00, 0x10, 0x00, ]) # A protecive MBR, without the bootcode part PROTECTIVE_MBR_FOOTER = bytes([ 0x00, 0x00, 0x02, 0x00, 0xee, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0xaa ]) # A "protective MBR"[1], which may also contain some boot code. # [1]: https://en.wikipedia.org/wiki/GUID_Partition_Table#PROTECTIVE-MBR class ProtectiveMBR: def __init__(self): self.bootcode = MBR_BOOTCODE + bytes(MBR_BOOTCODE_SIZE - len(MBR_BOOTCODE)) def generate(self, stream): assert len(self.bootcode) == MBR_BOOTCODE_SIZE mbr = self.bootcode + PROTECTIVE_MBR_FOOTER assert len(mbr) == BLOCK_SIZE stream.write(mbr) # Generate a GUID from a string class GUID(uuid.UUID): def __init__(self, string): super().__init__(string) def get_bytes(self): return self.bytes_le DUMMY_GUID_DISK_UNIQUE = GUID('17145242-abaa-441d-916a-3f26c970aba2') DUMMY_GUID_PART_UNIQUE = GUID('7552133d-c8de-4a20-924c-0e85f5ea81f2') GUID_TYPE_FSBL = GUID('5B193300-FC78-40CD-8002-E86C45580B47') # A GPT disk header # https://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_table_header_(LBA_1) class GPTHeader: def __init__(self): self.current_lba = 1 self.backup_lba = 1 self.first_usable_lba = 2 self.last_usable_lba = 0xff # dummy value self.uniq = DUMMY_GUID_DISK_UNIQUE self.part_entries_lba = 2 self.part_entries_number = 0 self.part_entries_crc32 = 0 self.part_entry_size = 128 def pack_with_crc(self, crc): header_size = 92 header = struct.pack('<8sIIIIQQQQ16sQIII', b'EFI PART', 0x10000, header_size, crc, 0, self.current_lba, self.backup_lba, self.first_usable_lba, self.last_usable_lba, self.uniq.get_bytes(), self.part_entries_lba, self.part_entries_number, self.part_entry_size, self.part_entries_crc32) assert len(header) == header_size return header def generate(self, stream): crc = zlib.crc32(self.pack_with_crc(0)) header = self.pack_with_crc(crc) stream.write(header.ljust(BLOCK_SIZE, b'\0')) # A GPT partition entry. # https://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_entries_(LBA_2-33) class GPTPartition: def __init__(self): self.type = GUID('00000000-0000-0000-0000-000000000000') self.uniq = GUID('00000000-0000-0000-0000-000000000000') self.first_lba = 0 self.last_lba = 0 self.attr = 0 self.name = '' def generate(self, stream): name_utf16 = self.name.encode('UTF-16LE') part = struct.pack('<16s16sQQQ72s', self.type.get_bytes(), self.uniq.get_bytes(), self.first_lba, self.last_lba, self.attr, name_utf16.ljust(72, b'\0')) assert len(part) == 128 stream.write(part) class GPTImage: # The final image consists of: # - A protective MBR # - A GPT header # - A few GPT partition entries # - The content of the bootblock def __init__(self): self.mbr = ProtectiveMBR() self.header = GPTHeader() self.partitions = [ GPTPartition() for i in range(8) ] self.bootblock = b'' # Fix up a few numbers to ensure consistency between the different # components. def fixup(self): # Align the bootblock to a whole number to LBA blocks bootblock_size = (len(self.bootblock) + BLOCK_SIZE - 1) & ~BLOCK_MASK self.bootblock = self.bootblock.ljust(bootblock_size) # Propagate the number of partition entries self.header.part_entries_number = len(self.partitions) self.header.first_usable_lba = 2 + self.header.part_entries_number // 4 # Create a partition entry for the bootblock self.partitions[0].type = GUID_TYPE_FSBL self.partitions[0].uniq = DUMMY_GUID_PART_UNIQUE self.partitions[0].first_lba = self.header.first_usable_lba self.partitions[0].last_lba = \ self.header.first_usable_lba + bootblock_size // BLOCK_SIZE # Calculate the CRC32 checksum of the partitions array partition_array = io.BytesIO() for part in self.partitions: part.generate(partition_array) self.header.part_entries_crc32 = zlib.crc32(partition_array.getvalue()) def generate(self, stream): self.mbr.generate(stream) self.header.generate(stream) for part in self.partitions: part.generate(stream) stream.write(self.bootblock) if __name__ == '__main__': if len(sys.argv) != 3: print('Usage:', file=sys.stderr) print(' %s bootblock.raw.bin bootblock.bin' % sys.argv[0], file=sys.stderr) sys.exit(1) image = GPTImage() with open(sys.argv[1], 'rb') as f: image.bootblock = f.read() image.fixup() # Verify if first partition is at expected lba, otherwise trampoline will # fail if image.partitions[0].first_lba != 4: print('Warning: First partition not at expected location (LBA 4)') sys.exit(1) with open(sys.argv[2], 'wb') as f: image.generate(f)
This sparkly material can be cut to shape without the worry of its falling apart. Ideal for hoppers, caddis and salmon flies. I have been using this for underwings on a number of patterns. It is easy to cut to size and shape and provides a realistic representation.
#!/usr/bin/env python # -*- coding: utf-8 -*- # JNMaster / librarian / icon for the shrimp # # Copyright (C) 2011 Wang Xuerui <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from wx.lib.embeddedimage import PyEmbeddedImage # generated from file 'librarian_icon2.png' SHRIMP_ICON = PyEmbeddedImage( "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAYAAACqaXHeAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJ" "bWFnZVJlYWR5ccllPAAAA2RpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADw/eHBhY2tldCBiZWdp" "bj0i77u/IiBpZD0iVzVNME1wQ2VoaUh6cmVTek5UY3prYzlkIj8+IDx4OnhtcG1ldGEgeG1sbnM6" "eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IkFkb2JlIFhNUCBDb3JlIDUuMC1jMDYwIDYxLjEz" "NDc3NywgMjAxMC8wMi8xMi0xNzozMjowMCAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJo" "dHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlw" "dGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wTU09Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEu" "MC9tbS8iIHhtbG5zOnN0UmVmPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvc1R5cGUvUmVz" "b3VyY2VSZWYjIiB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iIHhtcE1N" "Ok9yaWdpbmFsRG9jdW1lbnRJRD0ieG1wLmRpZDo3ODMzNkUwMTNENjNFMDExQkUxM0E4NkVCMjc4" "M0JGMCIgeG1wTU06RG9jdW1lbnRJRD0ieG1wLmRpZDoxOEM3Qjc0MTYzM0QxMUUwQkQzOUY1QzYy" "REI3OThDQyIgeG1wTU06SW5zdGFuY2VJRD0ieG1wLmlpZDoxOEM3Qjc0MDYzM0QxMUUwQkQzOUY1" "QzYyREI3OThDQyIgeG1wOkNyZWF0b3JUb29sPSJBZG9iZSBQaG90b3Nob3AgQ1M1IFdpbmRvd3Mi" "PiA8eG1wTU06RGVyaXZlZEZyb20gc3RSZWY6aW5zdGFuY2VJRD0ieG1wLmlpZDo3ODMzNkUwMTNE" "NjNFMDExQkUxM0E4NkVCMjc4M0JGMCIgc3RSZWY6ZG9jdW1lbnRJRD0ieG1wLmRpZDo3ODMzNkUw" "MTNENjNFMDExQkUxM0E4NkVCMjc4M0JGMCIvPiA8L3JkZjpEZXNjcmlwdGlvbj4gPC9yZGY6UkRG" "PiA8L3g6eG1wbWV0YT4gPD94cGFja2V0IGVuZD0iciI/Pq0H8mEAAANESURBVHja7FvrbdswEKaM" "/C6YBQJNEGiCoFogqRdooQWaaoLCExiaQOgEjruAgiwQIRMIWSCCF3BN4BQwKsWXyBMD+gACAaIH" "77u77x6Uk+PxSGKWFYlczgCcATgDELmwLDAsBClPax2Cru86IwKQn9YB1guAQWMFgAciXxKApTkg" "Pa09JgihkuCv2AFgHpDFngbz2AFwIcyLnky8KSQAOgfPuORAqKDmyD4LAL3j5xWn9YcDI3gAXJBg" "IwEjWxqABuk99UR4tbE0Q79HYDPFv09dnPAlcJIkvjd3UHjIncN3raHX2A38Iir3L5Ct01rEOoUa" "4Sso1QFYWwVx7nQejg3AmwEhDkrngv9lQGw15/ZWgh0CssZnsGwOTZKJfNHtBpcmwUdFZ1gIlG/B" "3b1kl9CzAHPtG0WV+DDnBZgckEFM65LlTy53f5NUj7tZu0KYCBVQjh40V0U+jspyybWlra4YIzGm" "+IuB4q9wj4g4p+5J5wLgKwSqCWVk8iwoY3NJ1qhddJCuSTAFixUW94qUkY3KtqENRNYQ67aTnQ7b" "+q6yAOWGDy7Fu/VdeEAJRKerfK9Ifbw3ebf+HAByUHxD9E53Ouj0dC23wbC+aQhQsExh0NH1sOEa" "/s40vSqVVIYdJgAUrLEm5ud4NWxYd9bXgOKlBMzadb6+kCheELsDzA4mMK1l/UAl1u99A8Bc9C+x" "P7VlSt9abrSUEF/jw/oiEpwTX7WG8q0CgCnXv/fVqKwELzO1ILv2B2xSdd+bgm9QiE+VBluoy3WE" "Mfy1QUvaWnhVTTzKFAleamxsa2EZCl5CNcG6J55lJVGwFxARc8cr2JiNW/YGnnKLMqZRzANsBpQq" "2WvMBrx8PySaB2BPhQcAck1v2VmG2iQAvucBLoVyU6WKePqibAkAbIqkYa7o/DOaJUJgXG22XBpt" "BXMGOirUrl2GAPaXojbl8ZggszkAhPadoA4PmNYowXOA6eBlLM+xAJAK3L1x3RKHDEAx0RihVoJL" "xv4rmXEMpkuCoQIwZv+9r1I4RABS8v+ZIY0JgKeR8pmPcA8VgMqX8p8BgIJ8PCZHaYdDAWBQfkM8" "/o4oVABKX52e8UAkRjn/cjR2AP4JMAAjpl+Wmz6N/QAAAABJRU5ErkJggg==") # vi:ai:et:ts=2 sw=2 sts=2 fenc=utf-8
School's mission is to provide a stimulating and engaging learning environment that recognizes student capabilities, foster their connectedness to their immediate and broader communities and offer them ways to contribute to school life. WAYS aim to meet or exceed state performance standards through a strong curriculum and a rigorous and relevant academic program that will be enhanced by the school's focus on science, mathematics and social responsibility. People that viewed Wisdom School also viewed these schools: Maria Regina School, St Anthony Of Padua School and Hawthorne Academy.
# Analyzes the performance of insertion sort and merge sort. # Copyright (C) 2015 # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import numpy as np import math import matplotlib.pyplot as plt import random import time start_time = time.time() def mergesort(A): """Sorts array A by recursive mergesort. Returns the sorted array A and the number of comparisons nc. """ n = len(A) nc = 0 # if n > 1 if n > 1: # copy A[0..floor(n/2) - 1] to B[0..floor(n/2) - 1] B = A[:int(n/2)] # copy A[floor(n/2)..n-1] to C[0..ceiling(n/2) - 1] C = A[int(n/2):] # Mergesort(B[0..floor(n/2) - 1]) B, first_comparisons = mergesort(B) nc += first_comparisons # Mergesort(C[0..ceiling(n/2) - 1]) C, second_comparisons = mergesort(C) nc += second_comparisons # Merge(B, C, A) A, merge_comparisons = merge(B, C, A) nc += merge_comparisons return A, nc def merge(B, C, A): """Merges two sorted arrays into one sorted array. Returns the sorted array A and the number of comparisons nc. """ # Initialize the number of inputs and the number of comparisons. nc = 0 p = len(B) q = len(C) # i <- 0; j <- 0; k <- 0 i = 0 j = 0 k = 0 # while i < p and j < q do while i < p and j < q: # if B[i] <= C[j] if B[i] <= C[j]: # A[k] <- B[i]; i <- i + 1 A[k] = B[i] i += 1 nc += 1 # else A[k] <- C[j]; j <- j + 1 else: A[k] = C[j] j += 1 nc += 1 # k <- k + 1 k += 1 # if i = p if i is p: # copy C[j..q - 1] to A[k..p + q - 1] A[k:] = C[j:] # else copy B[i..p - 1] to A[k..p + q -1] else: A[k:] = B[i:] return A, nc def insertionsort(A): """Sorts array A by insertionsort. Returns the sorted array A and the number of comparisons nc. """ # Initialize the number of inputs and the number of comparisons. n = len(A) nc = 0 # for i <- 1 to n - 1 do for i in range(len(A)): # v <- A[i] v = A[i] # j <- i - 1 j = i - 1 # while j >= 0 and A[j] > v do while j >= 0 and A[j] > v: nc += 1 # A[j + 1] <- A[j] A[j + 1] = A[j] # j <- j - 1 j -= 1 # A[j + 1] <- v A[j + 1] = v return A, nc def random_array(n): """Randomly creates an array of integers of size n.""" # Initialize the array. array = [] # For every number in the range of n: for number in range(n): # Randomly create an integer. integer = random.randint(0, n) # Append this integer to the array. array.append(integer) # Return the array. return array def sorted_array(n): """Randomly creates a sorted array of size n.""" # Create a random array. array = random_array(n) # Sort the random array. array.sort() # Return the array. return array def reversed_array(n): """Randomly creates a sorted array in reverse order of size n.""" # Create a random array. array = random_array(n) # Sort the random array. array.sort() # Reverse the sorted array. array.reverse() # Return the array. return array def FirstPlot(x,y,c): logx = [] logy = [] for i in x: logx.append(math.log10(i)) for i in y: logy.append(math.log10(i)) print('Plotting now!') plt.plot(logx,logy,label=c) plt.legend() print('Done plotting!') input_sizes = [10, 30, 100, 300, 1000, 3000, 10000] mergesort_random_nc = [] mergesort_sorted_nc = [] mergesort_reversed_nc = [] for n in input_sizes: mergesort_random_nc.append(mergesort(random_array(n))[1]) mergesort_sorted_nc.append(mergesort(sorted_array(n))[1]) mergesort_reversed_nc.append(mergesort(reversed_array(n))[1]) insertionsort_random_nc = [] insertionsort_sorted_nc = [] insertionsort_reversed_nc = [] for n in input_sizes: insertionsort_random_nc.append(insertionsort(random_array(n))[1]) insertionsort_sorted_nc.append(insertionsort(sorted_array(n))[1]) insertionsort_reversed_nc.append(insertionsort(reversed_array(n))[1]) print("Input sizes:", input_sizes, "\n") print("Number of comparisons using merge sort for corresponding input sizes:") print("Random:", mergesort_random_nc) print("Sorted:", mergesort_sorted_nc) print("Reversed:", mergesort_reversed_nc, "\n") print("Number of comparisons using merge sort for corresponding input sizes:") print("Random:", insertionsort_random_nc) print("Sorted:", insertionsort_sorted_nc, "ERROR") print("Reversed:", insertionsort_reversed_nc, "\n") end_time = time.time() runtime_minutes = (end_time - start_time) / 60 print("Runtime in minutes:", runtime_minutes)
skinperfect primer spf30 – perfect for parties now and all year long! Smooth fine lines, brighten and prime for flawless skin, and prep for make-up application. Velvety formula with Soy Protein helps even out skin texture, creating a smoother surface. Pearl Powder and natural earth minerals provide a neutral tint for visible luminosity, radiance and more even skin tone. A potent peptide helps support skin firmness as broad spectrum sunscreens help shield against sun damage triggered by UV light. Wear alone over your moisturizer, or after moisturizer application and before foundation, to help decrease the appearance of fine lines for a lasting make-up finish.
# # This file is part of ravstack. Ravstack is free software available under # the terms of the MIT license. See the file "LICENSE" that was provided # together with this source file for the licensing terms. # # Copyright (c) 2015 the ravstack authors. See the file "AUTHORS" for a # complete list. """Ravello Ironic command-line utility. Usage: ravstack [options] setup ravstack [options] proxy-create ravstack [options] node-create [-c <cpus>] [-m <memory>] [-D <disk>] [-n <count>] ravstack [options] node-dump ravstack [options] node-list [--all [--cached]] ravstack [options] node-start <node> ravstack [options] node-stop <node> ravstack [options] node-reboot <node> ravstack [options] node-get-boot-device <node> ravstack [options] node-set-boot-device <node> <bootdev> ravstack [options] node-get-macs <node> [--cached] ravstack [options] fixup ravstack [options] endpoint-resolve <port> [-t <timeout>] [--start-port <base>] [--num-ports <count>] ravstack --help Command help: setup Create ravstack directories and config file. proxy-create Create SSH -> Ravello API proxy. node-create Create a new node. node-dump Dump node definitions to specified file. node-list List powered on nodes. (--all lists all nodes) node-start Start a node. node-stop Stop a node. node-reboot Reboot a node. node-get-boot-device Return boot device for <node>. node-set-boot-device Set boot device for <node> to <bootdev>. The boot device may be "hd" or "network". node-get-macs Return MAC addresses for <node>. fixup Fix Ravello and OS config after one or more nodes were deployed. endpoint-resolve Resolve an endpoint for a local service using a public IP address or under portmapping. Options: -d, --debug Enable debugging. -v, --verbose Be verbose. --log-stderr Show logs on standard error. -u <username>, --username=<username> Ravello API username. -p <password>, --password=<password> Ravello API password. -a <application>, --application=<application> The Ravello application name. --all List all nodes. --cached Allow use of cached information. Options for `node-create`: -c <cpus>, --cpus=<cpus> The number of CPUs. [default: 2] -m <memory>, --memory=<memory> The amount of memory in MB. [default: 8192] -D <disk>, --disk=<disk> The size of the disk in GB. [default: 60] -n <count>, --count=<count> The number of nodes to create. [default: 1] Options for `endpoint-resolve`: -t <timeout>, --timeout <timeout> Timeout. [default: 2] --start-port <port> Starting port for endpoint resolution with portmapping. [default: 10000] --num-ports <count> Number of ports to scan for endpoint resulution with portmapping. [default: 50] """ from __future__ import absolute_import, print_function import docopt from . import factory, setup, node, proxy, fixup, endpoint, runtime from .runtime import CONF def main(): """Ravstack main entry point.""" args = docopt.docopt(__doc__) CONF.update_from_args(args) CONF.update_to_env() runtime.setup_logging() # logging configuration might have changed env = factory.get_environ(args) if args['setup']: setup.do_setup(env) elif args['proxy-create']: proxy.do_create(env) elif args['node-create']: node.do_create(env) elif args['node-dump']: node.do_dump(env) elif args['node-list'] and not args.get('--all'): node.do_list_running(env, False) elif args['node-list']: node.do_list_all(env) elif args['node-start']: node.do_start(env, args['<node>']) elif args['node-stop']: node.do_stop(env, args['<node>']) elif args['node-reboot']: node.do_reboot(env, args['<node>']) elif args['node-get-boot-device']: node.do_get_boot_device(env, args['<node>']) elif args['node-set-boot-device']: node.do_set_boot_device(env, args['<node>'], args['<bootdev>']) elif args['node-get-macs']: node.do_get_macs(env, args['<node>'], False) elif args['fixup']: fixup.do_fixup(env) elif args['endpoint-resolve']: endpoint.do_resolve(env, args['<port>']) def run_main(): """Setuptools entry point.""" runtime.run_main(main) if __name__ == '__main__': run_main()
aw yeah. link us up. to link to the word kinnanotreally, use the code below.
import pymongo from pymongo import MongoClient, UpdateOne from wowlib import wowapi, binary_search import time client = MongoClient() print ('Initiating db connection and getting wow data') db = client.wowdoc data = wowapi.auctionurl('Shadow-Council') posts = db.auctiondata #create counters count = 0 time = time.time() #create list for operations operations = {} #create bulk upsert bulk = posts.initialize_ordered_bulk_op() print("Connected") #create list of disctinct auctions in memory print("Creating Auction List") auction_list = [] auctions = posts.find().distinct('auc') for auction in auctions: auction_list.append(auction) print("Auction List Created") #Iterate through data returned from wowapi for auction in data: row = data[count] #create new json, this allows you to add data not returned from wowapi newrow = {'buyout': row['buyout'], 'timeLeft': row['timeLeft'], 'quantity': row['quantity'], 'seed': row['seed'], 'username': {'name':row['owner'], 'server':row['ownerRealm']}, 'owner': row['owner'], 'item': row['item'], 'rand': row['rand'], 'bid': row['bid'], 'context': row['context'], 'auc': row['auc'], 'ownerRealm': row['ownerRealm'], 'viewtime': time, 'timeupdated': time, 'itemname': "-----<None Defined>-----", 'status':"Active", 'bidincrease': 'N', } count += 1 operations[str(newrow['auc'])]=newrow print("all auctions created in operations") posts.insert_one(operations) print ('new doc added')
Five years have passed. A small amount of continental-drift has occurred. The first ever Frisbee floats across a patch of grass in the United States of America, as Sputnik prepares for liftoff. That is literally all that has happened. Everything else is imagined. That is, except for Sir Kenneth Badger, the British ambassador in Belgium. The place? Bruges. The location within the place? A hotel foyer. A good one. Not one with towels you could carve. Today Sir Kenneth, his rampart chin thrust out accordingly, stands by my side, his smile broad and unflinching as the cameras flash. Caught, for posterity, in this thirty second charade of a handshake, I stare at the cameras, my smile a rictus of humiliation. Always with the pictures. Is there no end to it? You see, it all changed five years ago when I inadvertently gave the interview of the century. Sorry, I mean, THE INTERVIEW OF THE CENTURY. “Turn to the left a bit, Sir Kenneth,” says one of the photographers, his Belgian, or possible Flemish words translating in the air for the sake of continuity. Sir Kenneth does so. His hand in mine feels clammy, like the insides of a mango. Within my capacious mind I continue the self-flagellation that has bestridden me for the last half-a-decade. If I hadn’t given the interv…hang on…THE INTERVIEW OF THE CENTURY, then perhaps I’d be able to be like a normal person, and live quietly, being positive to my swarms of locusts and painting my pictures of young swans. But no, I haven’t painted a decent cygnet in ages, and, quite frankly, the locusts are acting all depressed. If that doesn’t say ‘sort it out’ I don’t know what does. Fade to a kind of putty colour. The attempt to save Belgium, and by extension the world itself, is underway. It’s only slightly spoiled by the fact I got on the wrong train and ended up in Germany. Damn both my poor knowledge of european languages, and a ticket-agency convinced it knows my destination better than I. I only just escaped from a trap. Romaine Badger, the erstwhile benefactor of Sir Kenneth’s legacy and vast personal wealth, has her minions seeking me throughout the continent. Only last week I savagely resisted a young Badger’s attempt on my life when he tried to re-enact a scene from The Omen, only to realise that chasing me on a child’s tricycle had only minimal chances of making me fall to my doom over a balcony. And what’s more, the staff of this cafe insist upon smiling at me in a friendly manner, which is crippling me emotionally. I cannot cope with the kindly and prompt service. I am English, and such efficiency and pleasance makes me suspicious. I sip at my espresso. I really must find a train to Belgium. I feel waffles and other traditional images of Belgian culture nagging at me. When one is assailed by images of Hercule Poirot, you know you have a job to do. I look at my vintage timepiece, given to me by the rotted corpse of H G Wells himself, and realise the time is approaching for action. Rising, I note with some alarm that a face I recognise appears around the corner. Romaine Badger herself. The face of a withered deer, an expression that curdled milk aspires to, and armed to the teeth with every manner of projectile weapon. She visibly flinched. I’d dug up that Dumbledore quote as if from nowhere. She quailed, turning and running when she saw I was preparing a quote from The Lion King. She’d be back, armed with better quotes – probably from Rocky or, perhaps, a bland Jennifer Aniston rom-com. Fade to ineffably ginger and with the words, “To be continued,” written, bizarrely, in Comic Sans font.
# Generated by Django 1.11.3 on 2017-12-12 20:16 import django.db.models.deletion import stdimage.models from django.db import migrations, models import course_discovery.apps.course_metadata.utils class Migration(migrations.Migration): dependencies = [ ('course_metadata', '0073_program_instructors'), ] operations = [ migrations.AlterField( model_name='course', name='image', field=stdimage.models.StdImageField(blank=True, help_text='Please provide a course preview image', null=True, upload_to=course_discovery.apps.course_metadata.utils.UploadToFieldNamePath('uuid', path='media/course/image')), ), migrations.AlterField( model_name='courseentitlement', name='partner', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Partner'), ), migrations.AlterField( model_name='person', name='profile_image', field=stdimage.models.StdImageField(blank=True, null=True, upload_to=course_discovery.apps.course_metadata.utils.UploadToFieldNamePath('uuid', path='media/people/profile_images')), ), ]
HOW DO I DOWNLOAD WHATSAPP TO MY SAMSUNG STAR GT-SA. Posted on Aug 26, Watch Live TV For Free Use Third Party. Hello, It seems that WhatsApp is not supported by your device. See https:// erzincanyenihayat.com from your mobile, maybe @+. Buno, Modo CS-. Whatsapp for Samsung can be installed not only on the new Galaxy Tab S, .. work with descargar whatsapp para samsung gt-s Repasamos lo mejor de.
import json from django.shortcuts import redirect from django.http import Http404, HttpResponseRedirect, request from django.http import HttpResponse from django.views.generic.edit import CreateView from django.views.generic.edit import UpdateView from django.views.generic import ListView from django.views.generic import DetailView from django.urls import reverse_lazy from .models import PlanoTrabalho from .models import CriacaoSistema from .models import OrgaoGestor2 from .models import Conselheiro from .models import ConselhoCultural from .models import FundoCultura from .models import FundoDeCultura from .models import PlanoCultura from .models import PlanoDeCultura from .models import Componente from .models import ConselhoDeCultura from adesao.models import SistemaCultura from .forms import CriarComponenteForm from .forms import CriarFundoForm from .forms import CriarPlanoForm from .forms import CriarConselhoForm from .forms import DesabilitarConselheiroForm from .forms import CriarConselheiroForm from .forms import AlterarConselheiroForm from .forms import CriarOrgaoGestorForm from adesao.utils import atualiza_session class PlanoTrabalho(DetailView): model = SistemaCultura template_name = 'planotrabalho/plano_trabalho.html' def get_context_data(self, **kwargs): try: context = super(PlanoTrabalho, self).get_context_data(**kwargs) sistema_id = self.request.session['sistema_cultura_selecionado']['id'] context['sistema'] = SistemaCultura.objects.get(id=sistema_id) except: return context return context class CadastrarComponente(CreateView): model = Componente form_class = CriarComponenteForm def get_form_kwargs(self): kwargs = super(CadastrarComponente, self).get_form_kwargs() sistema_id = self.request.session['sistema_cultura_selecionado']['id'] self.sistema = SistemaCultura.objects.get(id=sistema_id) kwargs['sistema'] = self.sistema kwargs['logged_user'] = self.request.user return kwargs def get_success_url(self): return reverse_lazy('planotrabalho:planotrabalho', kwargs={'pk': self.sistema.id}) def form_valid(self, form): super(CadastrarComponente, self).form_valid(form) sistema_atualizado = SistemaCultura.sistema.get(ente_federado__id=self.sistema.ente_federado.id) atualiza_session(sistema_atualizado, self.request) return redirect(reverse_lazy('planotrabalho:planotrabalho', kwargs={'pk': self.sistema.id})) class CadastrarLegislacao(CadastrarComponente): template_name = 'planotrabalho/cadastrar_legislacao.html' def get_form_kwargs(self): kwargs = super(CadastrarLegislacao, self).get_form_kwargs() kwargs['tipo'] = 'legislacao' return kwargs class CadastrarPlanoDeCultura(CadastrarComponente): model = PlanoDeCultura form_class = CriarPlanoForm template_name = 'planotrabalho/cadastrar_plano.html' class CadastrarOrgaoGestor(CadastrarComponente): model = OrgaoGestor2 form_class = CriarOrgaoGestorForm template_name = 'planotrabalho/cadastrar_orgao.html' def get_form_kwargs(self): kwargs = super(CadastrarOrgaoGestor, self).get_form_kwargs() kwargs['tipo'] = 'orgao_gestor' return kwargs def form_valid(self, form): obj=super().form_valid(form) return HttpResponseRedirect('/adesao/home/') class CadastrarFundoDeCultura(CadastrarComponente): model = FundoDeCultura form_class = CriarFundoForm template_name = 'planotrabalho/cadastrar_fundo.html' def get_context_data(self, **kwargs): context = super(CadastrarFundoDeCultura, self).get_context_data(**kwargs) context['is_edit'] = False return context class CadastrarConselhoDeCultura(CadastrarComponente): model = ConselhoDeCultura form_class = CriarConselhoForm template_name = 'planotrabalho/cadastrar_conselho.html' class AlterarLegislacao(UpdateView): model = Componente form_class = CriarComponenteForm template_name = 'planotrabalho/cadastrar_legislacao.html' def get_form_kwargs(self): kwargs = super(AlterarLegislacao, self).get_form_kwargs() sistema_id = self.request.session['sistema_cultura_selecionado']['id'] self.sistema = SistemaCultura.objects.get(id=sistema_id) kwargs['tipo'] = 'legislacao' kwargs['sistema'] = self.sistema kwargs['logged_user'] = self.request.user return kwargs def get_context_data(self, **kwargs): context = super(AlterarLegislacao, self).get_context_data(**kwargs) context['is_edit'] = True return context def get_success_url(self): return reverse_lazy('planotrabalho:planotrabalho', kwargs={'pk': self.sistema.id}) class AlterarPlanoCultura(UpdateView): model = PlanoDeCultura form_class = CriarPlanoForm template_name = 'planotrabalho/cadastrar_plano.html' def get_form_kwargs(self): kwargs = super(AlterarPlanoCultura, self).get_form_kwargs() sistema_id = self.object.plano.last().id self.sistema = SistemaCultura.objects.get(id=sistema_id) kwargs['sistema'] = self.sistema kwargs['logged_user'] = self.request.user kwargs['initial']['local_monitoramento'] = self.object.local_monitoramento kwargs['initial']['ano_inicio_curso'] = self.object.ano_inicio_curso kwargs['initial']['ano_termino_curso'] = self.object.ano_termino_curso kwargs['initial']['esfera_federacao_curso'] = self.object.esfera_federacao_curso kwargs['initial']['tipo_oficina'] = self.object.tipo_oficina kwargs['initial']['perfil_participante'] = self.object.perfil_participante kwargs['initial']['anexo_na_lei'] = self.object.anexo_na_lei kwargs['initial']['metas_na_lei'] = self.object.metas_na_lei if self.object.anexo_na_lei: kwargs['initial']['possui_anexo'] = True elif not self.object.anexo_na_lei and self.object.anexo and self.object.anexo.arquivo: kwargs['initial']['possui_anexo'] = True kwargs['initial']['anexo_lei'] = self.object.anexo.arquivo else: kwargs['initial']['possui_anexo'] = False if self.object.metas_na_lei: kwargs['initial']['possui_metas'] = True elif not self.object.metas_na_lei and self.object.metas and self.object.metas.arquivo: kwargs['initial']['possui_metas'] = True kwargs['initial']['arquivo_metas'] = self.object.metas.arquivo else: kwargs['initial']['possui_metas'] = False if self.object.local_monitoramento: kwargs['initial']['monitorado'] = True else: kwargs['initial']['monitorado'] = False if self.object.ano_inicio_curso: kwargs['initial']['participou_curso'] = True else: kwargs['initial']['participou_curso'] = False return kwargs def get_context_data(self, **kwargs): context = super(AlterarPlanoCultura, self).get_context_data(**kwargs) context['is_edit'] = True return context def get_success_url(self): return reverse_lazy('planotrabalho:planotrabalho', kwargs={'pk': self.sistema.id}) class AlterarOrgaoGestor(UpdateView): model = OrgaoGestor2 form_class = CriarOrgaoGestorForm template_name = 'planotrabalho/cadastrar_orgao.html' def get_form_kwargs(self): kwargs = super(AlterarOrgaoGestor, self).get_form_kwargs() sistema_id = self.object.orgao_gestor.last().id self.sistema = SistemaCultura.objects.get(id=sistema_id) kwargs['tipo'] = 'orgao_gestor' kwargs['sistema'] = self.sistema kwargs['logged_user'] = self.request.user if self.sistema.orgao_gestor and self.sistema.orgao_gestor.perfil: kwargs['initial']['perfil'] = self.sistema.orgao_gestor.perfil if self.object.comprovante_cnpj is None: kwargs['initial']['possui_cnpj'] = False else: kwargs['initial']['possui_cnpj'] = True kwargs['initial']['comprovante_cnpj'] = self.object.comprovante_cnpj.arquivo kwargs['initial']['cnpj'] = self.sistema.orgao_gestor.cnpj kwargs['initial']['banco'] = self.sistema.orgao_gestor.banco kwargs['initial']['agencia'] = self.sistema.orgao_gestor.agencia kwargs['initial']['conta'] = self.sistema.orgao_gestor.conta kwargs['initial']['termo_responsabilidade'] = True return kwargs def get_context_data(self, **kwargs): context = super(AlterarOrgaoGestor, self).get_context_data(**kwargs) context['is_edit'] = True return context def get_success_url(self): return reverse_lazy('planotrabalho:planotrabalho', kwargs={'pk': self.sistema.id}) class AlterarFundoCultura(UpdateView): model = FundoDeCultura form_class = CriarFundoForm template_name = 'planotrabalho/cadastrar_fundo.html' def get_form_kwargs(self): kwargs = super(AlterarFundoCultura, self).get_form_kwargs() sistema_id = self.object.fundo_cultura.last().id self.sistema = SistemaCultura.objects.get(id=sistema_id) kwargs['sistema'] = self.sistema kwargs['logged_user'] = self.request.user if self.sistema.legislacao and self.sistema.legislacao.arquivo == self.object.arquivo: kwargs['initial']['mesma_lei'] = True else: kwargs['initial']['mesma_lei'] = False if self.object.comprovante_cnpj is None: kwargs['initial']['possui_cnpj'] = False else: kwargs['initial']['possui_cnpj'] = True kwargs['initial']['comprovante'] = self.object.comprovante_cnpj.arquivo kwargs['initial']['banco'] = self.object.banco kwargs['initial']['agencia'] = self.object.agencia kwargs['initial']['conta'] = self.object.conta kwargs['initial']['termo_responsabilidade'] = True return kwargs def get_context_data(self, **kwargs): context = super(AlterarFundoCultura, self).get_context_data(**kwargs) context['is_edit'] = True return context def get_success_url(self): return reverse_lazy('planotrabalho:planotrabalho', kwargs={'pk': self.sistema.id}) class AlterarConselhoCultura(UpdateView): model = ConselhoDeCultura form_class = CriarConselhoForm template_name = 'planotrabalho/cadastrar_conselho.html' def get_form_kwargs(self): kwargs = super(AlterarConselhoCultura, self).get_form_kwargs() sistema_id = self.object.conselho.first().id self.sistema = SistemaCultura.objects.get(id=sistema_id) kwargs['sistema'] = self.sistema kwargs['logged_user'] = self.request.user if self.object.lei: kwargs['initial']['arquivo_lei'] = self.object.lei.arquivo kwargs['initial']['data_publicacao_lei'] = self.object.lei.data_publicacao if self.sistema.legislacao and self.sistema.legislacao.arquivo == self.object.lei.arquivo: kwargs['initial']['mesma_lei'] = True else: kwargs['initial']['mesma_lei'] = False if self.object.arquivo: kwargs['initial']['possui_ata'] = True else: kwargs['initial']['possui_ata'] = False return kwargs def get_context_data(self, **kwargs): context = super(AlterarConselhoCultura, self).get_context_data(**kwargs) context['is_edit'] = True return context def get_success_url(self): return reverse_lazy('planotrabalho:planotrabalho', kwargs={'pk': self.sistema.id}) class CriarConselheiro(CreateView): form_class = CriarConselheiroForm template_name = 'planotrabalho/cadastrar_conselheiros.html' def get_form_kwargs(self): kwargs = super(CriarConselheiro, self).get_form_kwargs() kwargs['conselho'] = self.request.session['sistema_cultura_selecionado']['conselho'] return kwargs def get_success_url(self): return reverse_lazy('planotrabalho:listar_conselheiros') class ListarConselheiros(ListView): model = Conselheiro template_name = 'planotrabalho/listar_conselheiros.html' paginate_by = 12 def get_queryset(self): q = self.request.session['sistema_cultura_selecionado']['conselho'] conselheiros = Conselheiro.objects.filter(conselho__id=q, situacao=1) # 1 = Habilitado return conselheiros class AlterarConselheiro(UpdateView): form_class = AlterarConselheiroForm template_name = 'planotrabalho/cadastrar_conselheiros.html' def get_queryset(self): pk = self.kwargs['pk'] conselheiro = Conselheiro.objects.filter(id=pk) return conselheiro def get_success_url(self): return reverse_lazy('planotrabalho:listar_conselheiros') class DesabilitarConselheiro(UpdateView): form_class = DesabilitarConselheiroForm template_name = 'planotrabalho/desabilitar_conselheiro.html' def get_queryset(self): pk = self.kwargs['pk'] conselheiro = Conselheiro.objects.filter(id=pk) return conselheiro def get_success_url(self): return reverse_lazy('planotrabalho:listar_conselheiros') def get_conselheiros(request): if request.is_ajax() and request.GET.get('id', None): pk = request.GET.get('id') conselheiros = Conselheiro.objects.filter(conselho__pk=pk) response = {} response['conselheiros'] = list(conselheiros.values_list('nome', 'email', 'segmento')) return HttpResponse( json.dumps(response), content_type="application/json") else: return Http404()
Fitness Boxing | Trailer de Introducción – Nintendo Switch. Get moving with fun, boxing-based rhythmic exercises set to the instrumental beats of songs by popular artists. You can personalize your workouts by selecting from different fitness goals. Train your way, whether you’re on your own, with a friend, at home, or on the go! Now you can work out anytime, anywhere on the Nintendo Switch system! Start establishing a routine by downloading the free, 3-day Fitness Boxing Trial Version, available now on the Nintendo eShop on Nintendo Switch. ← Descargas y Descuentos Digitales de Nintendo – Ene. 03, 2019 [Europa].
# Copyright 2017 AT&T Intellectual Property. All other rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections from deckhand.common import utils from deckhand.control import common from deckhand import types class ViewBuilder(common.ViewBuilder): """Model revision API responses as a python dictionary.""" _collection_name = 'revisions' def list(self, revisions): resp_body = { 'count': len(revisions), 'results': [] } for revision in revisions: body = {'tags': set(), 'buckets': set()} rev_documents = revision.pop('documents') for attr in ('id', 'created_at'): body[utils.to_camel_case(attr)] = revision[attr] body['tags'].update([t['tag'] for t in revision['tags']]) body['buckets'].update( [d['bucket_name'] for d in rev_documents]) body['tags'] = sorted(body['tags']) body['buckets'] = sorted(body['buckets']) resp_body['results'].append(body) return resp_body def show(self, revision): """Generate view for showing revision details. Each revision's documents should only be validation policies. """ validation_policies = [] tags = collections.OrderedDict() success_status = 'success' for vp in [d for d in revision['documents'] if d['schema'].startswith(types.VALIDATION_POLICY_SCHEMA)]: validation_policy = {} validation_policy['name'] = vp.get('name') validation_policy['url'] = self._gen_url(vp) try: validation_policy['status'] = vp['data']['validations'][0][ 'status'] except KeyError: validation_policy['status'] = 'unknown' validation_policies.append(validation_policy) if validation_policy['status'] != 'success': success_status = 'failed' for tag in revision['tags']: tags.setdefault(tag['tag'], tag['data']) buckets = sorted( set([d['bucket_name'] for d in revision['documents']])) return { 'id': revision.get('id'), 'createdAt': revision.get('created_at'), 'url': self._gen_url(revision), 'validationPolicies': validation_policies, 'status': success_status, 'tags': dict(tags), 'buckets': buckets }
Bohemian jewelry is all the rage in Hollywood. Stars pay literally hundreds of dollars to get their bohemian jewelry fix, but you don't need to spend more than a few bucks - if any - to tote around a beautiful piece. Check out this video to learn how to fabricate - quite literally - a handwoven bohemian cloth bracelet. Use any colors of cloth you like. Make your own bohemian handwoven bracelet! Use strips of fabric, lace, ribbon, strings of beads or chains.
import random import sys import numpy from Scripts.include.package import * #---------------------------------------------------------------------------------------------- # # Fault Class # #---------------------------------------------------------------------------------------------- class fault: location = None bitwidth = None Type = None mean_time = None std_dev = None shut_down_time = None def __init__(self, loc, width, fault_type, mean_time, std_dev, shut_down_time): if width > 1: random_position = random.randint(0, width-1) self.location = loc+"("+str(random_position)+")" else: self.location = loc self.bitwidth = width self.Type = fault_type self.mean_time = mean_time self.std_dev = std_dev self.shut_down_time = shut_down_time def report(self): """ The fault reports its location, signal width, type, MTBF, STD_Dev and shutdown time! """ print "Location: ", self.location, "\twidth: ", self.bitwidth, "\tfault_type: ", '%5s' %self.Type,\ "\tMTBF: ", self.mean_time, "\tstd deviation: ", self.std_dev , "\tshutdown time", \ self.shut_down_time #---------------------------------------------------------------------------------------------- # # Other functions # #---------------------------------------------------------------------------------------------- def report_faults(fault_list): """ Reports all the faults in the fault list """ print "---------------------------------------" print "fault injection points:" for fault in fault_list: fault.report() print "---------------------------------------" #---------------------------------------------------------------------------------------------- # Generating signals for different modules # for this purpose we only consider fault injection points marked with X: # # .-------------. # .----> | Checkers | <---. # | | Module | | # | '-------------' | # | ^ | # | | | # | X | # | .-------------. | # | | Module | | # -----o----->| under |--X--o-------> # | check | # '-------------' # #---------------------------------------------------------------------------------------------- def list_all_the_links(network_size): """ takes the network size and returns a list of all the RX signals in the network """ list_of_ports = [] list_of_widths = [] for i in range(0, network_size*network_size): list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":RX_L") list_of_widths.append(32) if i/network_size != 0: list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":RX_N") list_of_widths.append(32) if i/network_size != network_size-1: list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":RX_S") list_of_widths.append(32) if i%network_size != 0: list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":RX_W") list_of_widths.append(32) if i%network_size != network_size-1: list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":RX_E") list_of_widths.append(32) return list_of_ports, list_of_widths def list_all_the_lbdr_signals(network_size): """ takes the network size and returns a list of all the relevant LBDR signals in the network """ list_of_ports = [] list_of_widths = [] # Every router has the Local port for i in range(0, network_size*network_size): # internal signals of LBDR with packet drop list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:N1") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:E1") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:W1") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:S1") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:Req_N_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:Req_E_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:Req_W_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:Req_S_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:Req_L_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:grants") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:packet_drop_in") # output signal(s) of LBDR with packet drop list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:packet_drop_order") list_of_widths += [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] # for i in range(0, network_size*2): if i/network_size != 0: # has port N # internal signals of LBDR with packet drop list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:N1") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:E1") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:W1") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:S1") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:Req_N_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:Req_E_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:Req_W_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:Req_S_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:Req_L_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:grants") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:packet_drop_in") # output signal(s) of LBDR with packet drop list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:packet_drop_order") list_of_widths += [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] if i/network_size != network_size-1: # has port S # internal signals of LBDR with packet drop list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:N1") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:E1") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:W1") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:S1") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:Req_N_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:Req_E_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:Req_W_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:Req_S_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:Req_L_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:grants") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:packet_drop_in") # output signal(s) of LBDR with packet drop list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:packet_drop_order") list_of_widths += [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] if i%network_size != 0: # has port W # internal signals of LBDR with packet drop list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:N1") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:E1") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:W1") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:S1") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:Req_N_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:Req_E_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:Req_W_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:Req_S_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:Req_L_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:grants") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:packet_drop_in") # output signal(s) of LBDR with packet drop list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:packet_drop_order") list_of_widths += [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] if i%network_size != network_size-1: # has port E # internal signals of LBDR with packet drop list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:N1") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:E1") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:W1") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:S1") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:Req_N_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:Req_E_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:Req_W_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:Req_S_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:Req_L_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:grants") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:packet_drop_in") # output signal(s) of LBDR with packet drop list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:packet_drop_order") list_of_widths += [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] return list_of_ports, list_of_widths def list_all_the_arbiter_signals(network_size): """ takes the network size and returns a list of all the relevant arbiter signals in the network """ list_of_ports = [] list_of_widths = [] # Every router has the Local port for i in range(0, network_size*network_size): # Output signals of Allocator related to output N list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:valid_L") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_L_N") # The _signal should have fault injected or the main one ? i.e. grant_N_N or grant_N_N_signal ?! Because checker is checking grant_N_N_sig. list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_L_E") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_L_W") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_L_S") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_L_L") # Internal signals of Allocator related to output N list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:credit_counter_L_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:credit_counter_L_out") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_L") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_N_L") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_E_L") # Input E requesting Output L ?! list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_W_L") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_S_L") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_L_L") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_L_N_sig") # ?? list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_L_E_sig") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_L_W_sig") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_L_S_sig") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_L_L_sig") list_of_widths += [1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] # for i in range(0, network_size*2): if i/network_size != 0: # has port N # Output signals of Allocator related to output N list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:valid_N") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N_N") # The _signal should have fault injected or the main one ? i.e. grant_N_N or grant_N_N_signal ?! Because checker is checking grant_N_N_sig. list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N_E") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N_W") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N_S") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N_L") # Internal signals of Allocator related to output N list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:credit_counter_N_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:credit_counter_N_out") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_N_N") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_E_N") # Input E requesting Output N ?! list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_W_N") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_S_N") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_L_N") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N_N_sig") # ?? list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N_E_sig") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N_W_sig") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N_S_sig") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N_L_sig") list_of_widths += [1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] if i/network_size != network_size-1: # has port S # Output signals of Allocator related to output S list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:valid_S") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_S_N") # The _signal should have fault injected or the main one ? i.e. grant_N_N or grant_N_N_signal ?! Because checker is checking grant_N_N_sig. list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_S_E") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_S_W") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_S_S") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_S_L") # # Internal signals of Allocator related to output S list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:credit_counter_S_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:credit_counter_S_out") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_N_S") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_E_S") # Input E requesting Output S ?! list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_W_S") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_S_S") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_L_S") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_S_N_sig") # ?? list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_S_E_sig") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_S_W_sig") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_S_S_sig") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_S_L_sig") list_of_widths += [1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] if i%network_size != 0: # has port W # Output signals of Allocator related to output W list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:valid_W") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_W_N") # The _signal should have fault injected or the main one ? i.e. grant_N_N or grant_N_N_signal ?! Because checker is checking grant_N_N_sig. list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_W_E") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_W_W") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_W_S") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_W_L") # # Internal signals of Allocator related to output W list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:credit_counter_W_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:credit_counter_W_out") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_W") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_N_W") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_E_W") # Input E requesting Output W ?! list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_W_W") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_S_W") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_L_W") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_W_N_sig") # ?? list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_W_E_sig") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_W_W_sig") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_W_S_sig") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_W_L_sig") list_of_widths += [1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] if i%network_size != network_size-1: # has port E # Output signals of Allocator related to output E list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:valid_E") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_E_N") # The _signal should have fault injected or the main one ? i.e. grant_N_N or grant_N_N_signal ?! Because checker is checking grant_N_N_sig. list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_E_E") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_E_W") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_E_S") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_E_L") # # Internal signals of Allocator related to output E list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:credit_counter_E_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:credit_counter_E_out") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_E") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_N_E") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_E_E") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_W_E") # Input W requesting Output E ?! list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_S_E") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_L_E") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_E_N_sig") # ?? list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_E_E_sig") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_E_W_sig") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_E_S_sig") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_E_L_sig") list_of_widths += [1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] return list_of_ports, list_of_widths def list_all_the_fifo_signals(network_size): """ takes the network size and returns a list of all the relevant FIFO signals in the network """ list_of_ports = [] list_of_widths = [] # Every router has the Local port for i in range(0, network_size*network_size): # list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:FIFO_MEM_1") # list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:FIFO_MEM_2") # list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:FIFO_MEM_3") # list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:FIFO_MEM_4") # Internal signals of FIFO list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:read_pointer") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:read_pointer_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:write_pointer") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:write_pointer_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:credit_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:empty") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:full") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:read_en") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:write_en") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:fake_credit") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:fake_credit_counter") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:fake_credit_counter_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:state_out") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:state_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:fault_info_out") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:fault_info_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:faulty_packet_out") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:faulty_packet_in") # list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:flit_type") -- flit_type is an alias list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:write_fake_flit") list_of_widths += [4, 4, 4, 4, 1, 1, 1, 1, 1, 1, 2, 2, 5, 5, 1, 1, 1, 1, 1] # for i in range(0, network_size*2): if i/network_size != 0: # has port N # list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:FIFO_MEM_1") # list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:FIFO_MEM_2") # list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:FIFO_MEM_3") # list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:FIFO_MEM_4") # Internal signals of FIFO list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:read_pointer") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:read_pointer_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:write_pointer") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:write_pointer_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:credit_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:empty") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:full") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:read_en") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:write_en") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:fake_credit") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:fake_credit_counter") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:fake_credit_counter_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:state_out") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:state_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:fault_info_out") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:fault_info_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:faulty_packet_out") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:faulty_packet_in") # list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:flit_type") -- flit_type is an alias list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:write_fake_flit") list_of_widths += [4, 4, 4, 4, 1, 1, 1, 1, 1, 1, 2, 2, 5, 5, 1, 1, 1, 1, 1] if i/network_size != network_size-1: # has port S # list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:FIFO_MEM_1") # list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:FIFO_MEM_2") # list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:FIFO_MEM_3") # list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:FIFO_MEM_4") # Internal signals of FIFO list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:read_pointer") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:read_pointer_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:write_pointer") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:write_pointer_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:credit_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:empty") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:full") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:read_en") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:write_en") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:fake_credit") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:fake_credit_counter") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:fake_credit_counter_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:state_out") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:state_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:fault_info_out") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:fault_info_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:faulty_packet_out") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:faulty_packet_in") # list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:flit_type") -- flit_type is an alias list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:write_fake_flit") list_of_widths += [4, 4, 4, 4, 1, 1, 1, 1, 1, 1, 2, 2, 5, 5, 1, 1, 1, 1, 1] if i%network_size != 0: # has port W # list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:FIFO_MEM_1") # list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:FIFO_MEM_2") # list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:FIFO_MEM_3") # list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:FIFO_MEM_4") # Internal signals of FIFO list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:read_pointer") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:read_pointer_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:write_pointer") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:write_pointer_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:credit_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:empty") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:full") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:read_en") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:write_en") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:fake_credit") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:fake_credit_counter") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:fake_credit_counter_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:state_out") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:state_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:fault_info_out") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:fault_info_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:faulty_packet_out") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:faulty_packet_in") # list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:flit_type") -- flit_type is an alias list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:write_fake_flit") list_of_widths += [4, 4, 4, 4, 1, 1, 1, 1, 1, 1, 2, 2, 5, 5, 1, 1, 1, 1, 1] if i%network_size != network_size-1: # has port E # list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:FIFO_MEM_1") # list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:FIFO_MEM_2") # list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:FIFO_MEM_3") # list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:FIFO_MEM_4") # Internal signals of FIFO list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:read_pointer") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:read_pointer_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:write_pointer") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:write_pointer_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:credit_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:empty") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:full") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:read_en") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:write_en") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:fake_credit") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:fake_credit_counter") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:fake_credit_counter_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:state_out") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:state_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:fault_info_out") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:fault_info_in") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:faulty_packet_out") list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:faulty_packet_in") # list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:flit_type") -- flit_type is an alias list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:write_fake_flit") list_of_widths += [4, 4, 4, 4, 1, 1, 1, 1, 1, 1, 2, 2, 5, 5, 1, 1, 1, 1, 1] return list_of_ports, list_of_widths def generate_links_dictionary(network_size, sim_time): """ This function generates random faults on all RX signals of the network """ list_of_ports = [] list_of_widths = [] ports, widths = list_all_the_links(network_size) list_of_ports += ports list_of_widths += widths # ports, widths = list_all_the_lbdr_signals(network_size) # list_of_ports += ports # list_of_widths += widths # ports, widths = list_all_the_fifo_signals(network_size) # list_of_ports += ports # list_of_widths += widths # ports, widths = list_all_the_arbiter_signals(network_size) # list_of_ports += ports # list_of_widths += widths random.seed(FAULT_RANDOM_SEED) fault_list = [] for item in list_of_ports: item_index = list_of_ports.index(item) width = list_of_widths[item_index] # fault_type = random.choice(["T", "P", "I", "T->P", "T->I"]) fault_type = random.choice(["T"]) shut_down_time = None std_dev = None if fault_type == "T": # Transient fault frequency = random.choice(["H", "M", "L"]) if frequency == "H": mean_time = int((1000000000/Fault_Per_Second)/HIGH_FAULT_RATE) elif frequency == "M": mean_time = int((1000000000/Fault_Per_Second)/LOW_FAULT_RATE) else: mean_time = int((1000000000/Fault_Per_Second)/MEDIUM_FAULT_RATE) std_dev = int(mean_time*0.1+1) elif fault_type == "I" or fault_type == "T->I": # Intermittent fault or transient to intermittent mean_time = int(MTB_INTERMITTENT_BURST) std_dev = int(mean_time*0.1+1) elif fault_type == "P": # its Permentent fault mean_time = None std_dev = None shut_down_time = random.randint(int(sim_time*0.1), int(sim_time*0.9)) elif fault_type == "T->P": # Transient goes to Intermittent and then to Permanent mean_time = int(1000000000/Fault_Per_Second) shut_down_time = random.randint(int(sim_time*0.1), int(sim_time*0.9)) std_dev = int(mean_time*0.1+1) new_fault = fault(item, width, fault_type, mean_time, std_dev, shut_down_time) fault_list.append(new_fault) report_faults(fault_list) return fault_list def parse_fault_info_file(file_path): """ If you want to feed the fault info from a file... the file lines should be organized like this: fault_location: signal_width fault_type MTBF std_deviation shutdown_time fault_location: the signal bit that you want to inject the fault on. signal_width: The width of the signal that you intend to inject the bit-flip in fault_type: should be chosen from the follwoing list: * T : Transient * I : Intermittent * P : Permanent * T->P : Transient to Intermittent to permanent * T->I : Transient to Intermittent MTBF: Mean time between the faults std_deviation: Standard deviation used for generating faults shutdown_time: Time in ns when the signal would be permanently faulty only used when you need permanent fault. otherwise "None". Example: tb_network_2x2:NoC:R_0:RX_L(21) 32 I 1000 101 None """ fault_list = [] fault_info_file = open(file_path, 'r') line = fault_info_file.readline() while line != "": split_line = line.split() fault_location = split_line[0] signal_width = int(split_line[1]) fault_type = split_line[2] fault_MTBF = split_line[3] fault_STD = split_line[4] shut_down_time = split_line[5] new_fault = fault(fault_location, signal_width, fault_type, fault_MTBF, fault_STD, shut_down_time) fault_list.append(new_fault) line = fault_info_file.readline() return fault_list #---------------------------------------------------------------------------------------------- # # Generating the actual do file. # #---------------------------------------------------------------------------------------------- def generate_fault_injection_do(file_path, sim_time, sim_end, fault_list): """ Generates a do file for modelsim for injecting the faults fault_path: string : path to the fault_inject.do sim_time: integer : How long do you want to inject faults in the simulation ns sim_end: integer : end of simulation fault_list: list : list of fault objects for injection the generated faults would look like these: *T: ___|____________|____________|____________|____________|____________|____________|____________| Transient faults happen periodically with a normal distribution with mean time between faults and a standard deviation *I: ____________________________||||||||||______________________________________||||||||||_________ Intermittent faults happen in bursts periodically with a normal distribution with mean time between faults and a standard deviation. each burst injects 10 stuck at faults. *P: __________________________________________________|'''''''''''''''''''''''''''''''''''''''''''' Permanent faults happen right after the specified shutdown time. *T->I: ___|____________|____________|____________||||||||||____________________________||||||||||_____ first it behaves as Transient, then becomes intermittent. For transient MTBF and Std_Dev it uses the specified values in the fault object. for intermittent faults it uses the values specified in package file. *T->P: ___|____________|____________|____________||||||||||______________________|'''''''''''''''''''' First it behaves as transient, then turns into intermittent and then permanent. For transient MTBF and Std_Dev it uses the specified values in the fault object. for intermittent faults it uses the values specified in package file. for becomming permanent, it uses the shutdown time specified in the fault object. """ list_of_links = fault_list delay = 1000000000/Fault_Per_Second deviation = int(delay/10) if deviation == 0: deviation = 1 fault_inject_file = open(file_path+'/fault_inject.do', 'w') permanently_faulty_locations = [] temp_dict = {} for item in list_of_links: if item.Type == "T": fault_time = 0 time_until_next_fault = numpy.random.normal(item.mean_time, item.std_dev) fault_time += time_until_next_fault while fault_time < sim_time: if int(fault_time) in temp_dict.keys(): temp_dict[int(fault_time)].append(item) else: temp_dict[int(fault_time)] = [item] time_until_next_fault = numpy.random.normal(item.mean_time, item.std_dev) fault_time += time_until_next_fault if item.Type == "I": fault_time = 0 time_until_next_fault = numpy.random.normal(item.mean_time, item.std_dev) fault_time += time_until_next_fault while fault_time < sim_time: for event in range(0, EVENTS_PER_BURST): if int(fault_time+event) in temp_dict.keys(): temp_dict[int(fault_time+event)].append(item) else: temp_dict[int(fault_time+event)] = [item] time_until_next_fault = numpy.random.normal(item.mean_time, item.std_dev) fault_time += time_until_next_fault if item.Type == "T->I": permanently_faulty_locations.append(item) fault_time = 0 time_until_next_fault = numpy.random.normal(item.mean_time, item.std_dev) fault_time += time_until_next_fault while fault_time < int(sim_time*0.5): if int(fault_time) in temp_dict.keys(): temp_dict[int(fault_time)].append(item) else: temp_dict[int(fault_time)] = [item] time_until_next_fault = numpy.random.normal(item.mean_time, item.std_dev) fault_time += time_until_next_fault time_until_next_fault = numpy.random.normal(int(MTB_INTERMITTENT_BURST), \ int(MTB_INTERMITTENT_BURST*0.1+1)) fault_time += time_until_next_fault while fault_time+EVENTS_PER_BURST < int(sim_time): for event in range(0, EVENTS_PER_BURST): if int(fault_time+event) in temp_dict.keys(): temp_dict[int(fault_time+event)].append(item) else: temp_dict[int(fault_time+event)] = [item] time_until_next_fault = numpy.random.normal(int(MTB_INTERMITTENT_BURST), \ int(MTB_INTERMITTENT_BURST*0.1+1)) fault_time += time_until_next_fault if item.Type == "P": permanently_faulty_locations.append(item) if item.Type == "T->P": permanently_faulty_locations.append(item) fault_time = 0 time_until_next_fault = numpy.random.normal(item.mean_time, item.std_dev) fault_time += time_until_next_fault while fault_time < int(item.shut_down_time*0.5): if int(fault_time) in temp_dict.keys(): temp_dict[int(fault_time)].append(item) else: temp_dict[int(fault_time)] = [item] time_until_next_fault = numpy.random.normal(item.mean_time, item.std_dev) fault_time += time_until_next_fault time_until_next_fault = numpy.random.normal(int(MTB_INTERMITTENT_BURST), \ int(MTB_INTERMITTENT_BURST*0.1+1)) fault_time += time_until_next_fault while fault_time+EVENTS_PER_BURST < int(item.shut_down_time): for event in range(0, EVENTS_PER_BURST): if int(fault_time+event) in temp_dict.keys(): temp_dict[int(fault_time+event)].append(item) else: temp_dict[int(fault_time+event)] = [item] time_until_next_fault = numpy.random.normal(int(MTB_INTERMITTENT_BURST), \ int(MTB_INTERMITTENT_BURST*0.1+1)) fault_time += time_until_next_fault fault_inject_file.write("#################################\n") current_time = 0 for i in range(0, sim_time): for permanent_fault_location in permanently_faulty_locations: if i == permanent_fault_location.shut_down_time: location = permanent_fault_location.location fault_inject_file.write("# ###################################################\n") fault_inject_file.write("# Shutting down signal: "+location+" for good!\n") fault_inject_file.write("force -drive sim/:"+location+" U 1ns\n") fault_inject_file.write("# ###################################################\n") if i in temp_dict.keys(): last_time = current_time current_time = i fault_inject_file.write("run "+str(current_time-last_time)+"ns\n") for item in temp_dict[i]: location = item.location if item.Type == "I" or item.Type == "T->I" or item.Type == "T->P": string = "force -drive sim/:"+location+" " + str(random.choice(["0", "1"])) string += " 0 ns -cancel 1ns" else: string = "force -drive sim/:"+location+" " + str(random.choice(["0", "1"])) random_start = random.randint(0, deviation) string += " "+str(random_start)+"ns -cancel "+str(random_start+1)+"ns" fault_inject_file.write(string+"\n") fault_inject_file.write("run "+str(sim_end-sim_time)+"ns\n") fault_inject_file.write("stop") fault_inject_file.close()
NATCHEZ, Miss., Feb. 13, 2017 /PRNewswire/ — Callon Petroleum Company (NYSE: CPE) (“Callon” or the “Company”) today announced the closing of its previously announced acquisition of oil and natural gas assets in the southern Delaware Basin from American Resource Development LLC. On February 13, 2017, the Company completed the acquisition of approximately 16,700 net surface acres in Ward and Pecos Counties, Texas, comprised of an initially disclosed amount of 16,098 net acres and an incremental 590 net acres acquired between signing and closing of the transaction that are either within or contiguous to the Ward County footprint. Inclusive of the incremental acreage and the purchase of the midstream assets of Ameredev Midstream Development LLC, total cash consideration paid for the acquisition was $633 million, subject to customary purchase price adjustments. The Corbets 34-149 2WA well, with a 10,000′ drilled lateral targeting the Lower Wolfcamp A, is currently flowing back in Ward County. In addition, a second 10,000′ drilled lateral targeting the Wolfcamp B is awaiting completion in an offsetting drilling unit. After closing of this transaction, Callon’s position in the Permian Basin now totals over 56,000 net surface acres. This news release contains “forward-looking statements” within the meaning of Section 27A of the Securities Act of 1933 and Section 21E of the Securities Exchange Act of 1934. Forward-looking statements include all statements regarding the implementation of the Company’s business plans and strategy, including future drilling plans, as well as statements including the words “believe,” “expect,” “plans” and words of similar meaning. These statements reflect the Company’s current views with respect to future events and performance. No assurances can be given, however, that these events will occur or that these projections will be achieved, and actual results could differ materially from those projected as a result of certain factors. Some of the factors which could affect our future results and could cause results to differ materially from those expressed in our forward-looking statements include the Company’s ability to realize the anticipated benefits of the acquisition, the volatility of oil and gas prices, ability to drill and complete wells, operational, regulatory and environment risks, our ability to finance our activities and other risks more fully discussed in our filings with the Securities and Exchange Commission, including our Annual Reports on Form 10-K, available on our website or the SEC’s website at www.sec.gov.
#!/usr/bin/env python """Estimating pairwise distances between sequences. """ from cogent.util import parallel, table, warning, progress_display as UI from cogent.maths.stats.util import Numbers from cogent import LoadSeqs, LoadTree from warnings import warn __author__ = "Gavin Huttley" __copyright__ = "Copyright 2007-2012, The Cogent Project" __credits__ = ["Gavin Huttley", "Peter Maxwell", "Matthew Wakefield"] __license__ = "GPL" __version__ = "1.5.3" __maintainer__ = "Gavin Huttley" __email__ = "[email protected]" __status__ = "Production" class EstimateDistances(object): """Base class used for estimating pairwise distances between sequences. Can also estimate other parameters from pairs.""" def __init__(self, seqs, submodel, threeway=False, motif_probs = None, do_pair_align=False, rigorous_align=False, est_params=None, modify_lf=None): """Arguments: - seqs: an Alignment or SeqCollection instance with > 1 sequence - submodel: substitution model object Predefined models can be imported from cogent.evolve.models - threeway: a boolean flag for using threeway comparisons to estimate distances. default False. Ignored if do_pair_align is True. - do_pair_align: if the input sequences are to be pairwise aligned first and then the distance will be estimated. A pair HMM based on the submodel will be used. - rigorous_align: if True the pairwise alignments are actually numerically optimised, otherwise the current substitution model settings are used. This slows down estimation considerably. - est_params: substitution model parameters to save estimates from in addition to length (distance) - modify_lf: a callback function for that takes a likelihood function (with alignment set) and modifies it. Can be used to configure local_params, set bounds, optimise using a restriction for faster performance. Note: Unless you know a priori your alignment will be flush ended (meaning no sequence has terminal gaps) it is advisable to construct a substitution model that recodes gaps. Otherwise the terminal gaps will significantly bias the estimation of branch lengths when using do_pair_align. """ if do_pair_align: self.__threeway = False else: # whether pairwise is to be estimated from 3-way self.__threeway = [threeway, False][do_pair_align] self.__seq_collection = seqs self.__seqnames = seqs.getSeqNames() self.__motif_probs = motif_probs # the following may be pairs or three way combinations self.__combination_aligns = None self._do_pair_align = do_pair_align self._rigorous_align = rigorous_align # substitution model stuff self.__sm = submodel self._modify_lf = modify_lf # store for the results self.__param_ests = {} self.__est_params = list(est_params or []) self.__run = False # a flag indicating whether estimation completed # whether we're on the master CPU or not self._on_master_cpu = parallel.getCommunicator().Get_rank() == 0 def __str__(self): return str(self.getTable()) def __make_pairwise_comparison_sets(self): comps = [] names = self.__seq_collection.getSeqNames() n = len(names) for i in range(0, n - 1): for j in range(i + 1, n): comps.append((names[i], names[j])) return comps def __make_threeway_comparison_sets(self): comps = [] names = self.__seq_collection.getSeqNames() n = len(names) for i in range(0, n - 2): for j in range(i + 1, n - 1): for k in range(j + 1, n): comps.append((names[i], names[j], names[k])) return comps def __make_pair_alignment(self, seqs, opt_kwargs): lf = self.__sm.makeLikelihoodFunction(\ LoadTree(tip_names=seqs.getSeqNames()), aligned=False) lf.setSequences(seqs.NamedSeqs) # allow user to modify the lf config if self._modify_lf: lf = self._modify_lf(lf) if self._rigorous_align: lf.optimise(**opt_kwargs) lnL = lf.getLogLikelihood() (vtLnL, aln) = lnL.edge.getViterbiScoreAndAlignment() return aln @UI.display_wrap def __doset(self, sequence_names, dist_opt_args, aln_opt_args, ui): # slice the alignment seqs = self.__seq_collection.takeSeqs(sequence_names) if self._do_pair_align: ui.display('Aligning', progress=0.0, current=.5) align = self.__make_pair_alignment(seqs, aln_opt_args) ui.display('', progress=.5, current=.5) else: align = seqs ui.display('', progress=0.0, current=1.0) # note that we may want to consider removing the redundant gaps # create the tree object tree = LoadTree(tip_names = sequence_names) # make the parameter controller lf = self.__sm.makeLikelihoodFunction(tree) if not self.__threeway: lf.setParamRule('length', is_independent = False) if self.__motif_probs: lf.setMotifProbs(self.__motif_probs) lf.setAlignment(align) # allow user modification of lf using the modify_lf if self._modify_lf: lf = self._modify_lf(lf) lf.optimise(**dist_opt_args) # get the statistics stats_dict = lf.getParamValueDict(['edge'], params=['length'] + self.__est_params) # if two-way, grab first distance only if not self.__threeway: result = {'length': stats_dict['length'].values()[0] * 2.0} else: result = {'length': stats_dict['length']} # include any other params requested for param in self.__est_params: result[param] = stats_dict[param].values()[0] return result @UI.display_wrap def run(self, dist_opt_args=None, aln_opt_args=None, ui=None, **kwargs): """Start estimating the distances between sequences. Distance estimation is done using the Powell local optimiser. This can be changed using the dist_opt_args and aln_opt_args. Arguments: - show_progress: whether to display progress. More detailed progress information from individual optimisation is controlled by the ..opt_args. - dist_opt_args, aln_opt_args: arguments for the optimise method for the distance estimation and alignment estimation respectively.""" if 'local' in kwargs: warn("local argument ignored, provide it to dist_opt_args or"\ " aln_opt_args", DeprecationWarning, stacklevel=2) ui.display("Distances") dist_opt_args = dist_opt_args or {} aln_opt_args = aln_opt_args or {} # set the optimiser defaults dist_opt_args['local'] = dist_opt_args.get('local', True) aln_opt_args['local'] = aln_opt_args.get('local', True) # generate the list of unique sequence sets (pairs or triples) to be # analysed if self.__threeway: combination_aligns = self.__make_threeway_comparison_sets() desc = "triplet " else: combination_aligns = self.__make_pairwise_comparison_sets() desc = "pair " labels = [desc + ','.join(names) for names in combination_aligns] def _one_alignment(comp): result = self.__doset(comp, dist_opt_args, aln_opt_args) return (comp, result) for (comp, value) in ui.imap(_one_alignment, combination_aligns, labels=labels): self.__param_ests[comp] = value def getPairwiseParam(self, param, summary_function="mean"): """Return the pairwise statistic estimates as a dictionary keyed by (seq1, seq2) Arguments: - param: name of a parameter in est_params or 'length' - summary_function: a string naming the function used for estimating param from threeway distances. Valid values are 'mean' (default) and 'median'.""" summary_func = summary_function.capitalize() pairwise_stats = {} assert param in self.__est_params + ['length'], \ "unrecognised param %s" % param if self.__threeway and param == 'length': pairwise = self.__make_pairwise_comparison_sets() # get all the distances involving this pair for a, b in pairwise: values = Numbers() for comp_names, param_vals in self.__param_ests.items(): if a in comp_names and b in comp_names: values.append(param_vals[param][a] + \ param_vals[param][b]) pairwise_stats[(a,b)] = getattr(values, summary_func) else: # no additional processing of the distances is required for comp_names, param_vals in self.__param_ests.items(): pairwise_stats[comp_names] = param_vals[param] return pairwise_stats def getPairwiseDistances(self,summary_function="mean", **kwargs): """Return the pairwise distances as a dictionary keyed by (seq1, seq2). Convenience interface to getPairwiseParam. Arguments: - summary_function: a string naming the function used for estimating param from threeway distances. Valid values are 'mean' (default) and 'median'. """ return self.getPairwiseParam('length',summary_function=summary_function, **kwargs) def getParamValues(self, param, **kwargs): """Returns a Numbers object with all estimated values of param. Arguments: - param: name of a parameter in est_params or 'length' - **kwargs: arguments passed to getPairwiseParam""" ests = self.getPairwiseParam(param, **kwargs) return Numbers(ests.values()) def getTable(self,summary_function="mean", **kwargs): """returns a Table instance of the distance matrix. Arguments: - summary_function: a string naming the function used for estimating param from threeway distances. Valid values are 'mean' (default) and 'median'.""" d = \ self.getPairwiseDistances(summary_function=summary_function,**kwargs) if not d: d = {} for s1 in self.__seqnames: for s2 in self.__seqnames: if s1 == s2: continue else: d[(s1,s2)] = 'Not Done' twoD = [] for s1 in self.__seqnames: row = [s1] for s2 in self.__seqnames: if s1 == s2: row.append('') continue try: row.append(d[(s1,s2)]) except KeyError: row.append(d[(s2,s1)]) twoD.append(row) T = table.Table(['Seq1 \ Seq2'] + self.__seqnames, twoD, row_ids = True, missing_data = "*") return T def getNewickTrees(self): """Returns a list of Newick format trees for supertree methods.""" trees = [] for comp_names, param_vals in self.__param_ests.items(): tips = [] for name in comp_names: tips.append(repr(name)+":%s" % param_vals[name]) trees.append("("+",".join(tips)+");") return trees def writeToFile(self, filename, summary_function="mean", format='phylip', **kwargs): """Save the pairwise distances to a file using phylip format. Other formats can be obtained by getting to a Table. If running in parallel, the master CPU writes out. Arguments: - filename: where distances will be written, required. - summary_function: a string naming the function used for estimating param from threeway distances. Valid values are 'mean' (default) and 'median'. - format: output format of distance matrix """ if self._on_master_cpu: # only write output from 0th node table = self.getTable(summary_function=summary_function, **kwargs) table.writeToFile(filename, format=format)
My document was created using a Mac, what do I need to do? What is the definition of applicant? What if my application materials are unsolicited? Are my application materials considered to be open records? What is GGC's policy on employment eligibility? What is GGC's policy on hiring persons with criminal convictions? Are former employees eligible for rehire? To begin the application process, candidates must apply for any open positions using our online, Georgia Gwinnett College web-based employment application system. Please do not mail or hand deliver any materials but download them into your account. Please select a user name and password that you will easily remember. You should write down your user name and password. You will need it to apply for other positions or check the status of your application the next time you visit the site. You will be asked to provide personal information such as name, address, phone number, email, etc. You will also be asked to provide information about your education and previous employment, as well as contact information regarding your employment history and references. Please gather this information before beginning the application. If you have already created an application with the GGC online employment system, and wish to update your information, please click on the "click here" link below to login with the user name and password that you used when you created your application. You will see this message: RETURNING USERS (Please Read this important message!). If you need to edit your application information before applying for a position, please click on the 'Edit Application' link on the left side of the page. You will not be allowed to change your application information after you have applied for a position. If you do not want to complete the application at this time, please click "CANCEL" at the bottom of the page. Everyone who applies for a position is required to create an application that includes certain information. Any required information is denoted with an asterisk (*). However, the more information you provide, the easier it will be to effectively evaluate your skills, abilities and qualifications. You will be able to submit a resume each time you apply for a position. There will be directions prompting you how and when to attach your resume. Please note that attaching a resume does not substitute for completing the application form. You must click "SAVE AND CONTINUE TO NEXT" or "SAVE AND STAY ON THIS PAGE" to save the information you have entered. If you close your browser prior to clicking "SAVE AND CONTINUE TO NEXT" or "SAVE AND STAY ON THIS PAGE," your application and account will be saved, but you will lose the information on the last page that was not saved. If you need assistance with converting files from Mac to PC, please visit the help page created by the National Teacher Training Institute. All jobs that are currently posted on our Georgia Gwinnett College (GGC) employment website are open. The status of your application will update as the hiring office reviews applications. Once an office "closes" a position, it means they are no longer accepting more applications, and the listing is removed from the job posting. It may be some time between the time an office closes the position and when they fill the position. All jobs must be posted for at least 10 working days before any offer can be made. However, for many positions in universities, there can be long lag times (sometimes up to 6 months or more), between the time a job is posted and the time it is filled. If it's still on the posting, it's still an active vacancy. If a hiring office wishes to interview you for a position, they will contact you directly. If you are not selected for the position, you will receive an email of regret. In addition to checking the status of your application, you can also search the current vacancies, update your application and change the password of your account. When you have completed accessing those areas of interest, always remember to log out. The employment application may contain your personal and private information. Should you have any questions or require assistance, please do not hesitate to contact Human Resources at 678.407.5070. You can also email us at [email protected]. An individual must submit an expression of interest for a specific job. The individual possesses the basic qualifications for the position (including materials requested). And the individual at no point during the selection process prior to receiving an offer of employment from the contractor (employer) removes himself or herself from further consideration or otherwise indicates that s/he is no longer interested in the position. Unsolicited resumes, or materials requesting general consideration for any position, will not be considered to be applications or retained. Applications previously submitted are not automatically matched with new vacant positions. Applicants must request consideration and resubmit materials for each vacant position using our on line, GGC web-base employment application system. Candidates must submit an Application for Employment for any position using our online, GGC web-base employment Application System. All application materials (such as cover letters, letters of reference, transcripts, etc.) should be submitted at the time of application using the on-line GGC web-based employment application system and become the property of Georgia Gwinnett College. For each position for which you meet the minimum requirements, the application materials are viewable to the hiring department for their review. Yes, information provided with your application materials is subject to the Georgia Open Records Act (O.C.G.A.§50-18-70). By Board of Regents policy, convicted felons may not be employed by any unit of the University System of Georgia. Conviction of any criminal drug offense (even a misdemeanor) disqualifies a candidate for at least 2 years (1st offense) or at least 5 years (2nd offense). Former employees who have been discharged, or who resigned in lieu of discharge, for reasons of misconduct, are not eligible for rehire at Georgia Gwinnett College. All positions at Georgia Gwinnett College require a Criminal Background Check. Only those position which handle cash, checks, P-card users and/or campus valuables require Credit Checks. Checks may require up to three business days to complete.
#!/usr/bin/env python # -*- coding: utf-8 -*- import os from setuptools import setup, find_packages def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() def parse_requirements(filename): return [line.strip() for line in read(filename).strip().split('\n') if line.strip()] pkg = {} exec(read('korona/__pkg__.py'), pkg) readme = read('README.rst') changelog = read('CHANGELOG.rst') requirements = parse_requirements('requirements.txt') setup( name=pkg['__package_name__'], version=pkg['__version__'], url=pkg['__url__'], license=pkg['__license__'], author=pkg['__author__'], author_email=pkg['__email__'], description=pkg['__description__'], long_description=readme + '\n\n' + changelog, packages=find_packages(exclude=['tests', 'tasks']), install_requires=requirements, keywords='make html built html create html korona html maker html build', classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Intended Audience :: Developers', 'Operating System :: OS Independent', 'Programming Language :: Python', 'License :: OSI Approved :: MIT License', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5' ] )
The Women’s Luxury Guild (WLG) is a professional organization formed to mentor, advance, promote and celebrate women within the luxury industry. White paper reports on various topics within the luxury industry were designed to help inform members of the current market. These topics include beauty, fashion, wine, real estate, e-commerce and jewelry. The design and branding of these white paper reports were then used to help design their new website look and their marketing presentations.
# -*- coding: utf-8 -*- ## This file is part of Invenio. ## Copyright (C) 2012 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """BibDocFile module web tests.""" import time from invenio.config import CFG_SITE_SECURE_URL from invenio.testutils import make_test_suite, \ run_test_suite, \ InvenioWebTestCase class InvenioBibDocFileWebTest(InvenioWebTestCase): """BibDocFile web tests.""" def test_add_new_file(self): """bibdocfile - web test add a new file""" self.browser.get(CFG_SITE_SECURE_URL + "/record/5?ln=en") # login as admin self.login(username="admin", password="") self.find_element_by_link_text_with_timeout("Manage Files of This Record") self.browser.find_element_by_link_text("Manage Files of This Record").click() self.find_element_by_xpath_with_timeout("//div[@id='uploadFileInterface']//input[@type='button' and @value='Add new file']") self.browser.find_element_by_xpath("//div[@id='uploadFileInterface']//input[@type='button' and @value='Add new file']").click() self.wait_element_displayed_with_timeout(self.browser.find_element_by_id("balloonReviseFileInput")) filename = "Tiger_" + time.strftime("%Y%m%d%H%M%S") self.fill_textbox(textbox_name="rename", text=filename) self.fill_textbox(textbox_id="balloonReviseFileInput", text="/opt/invenio/lib/webtest/invenio/test.pdf") self.find_element_by_id_with_timeout("bibdocfilemanagedocfileuploadbutton") self.browser.find_element_by_id("bibdocfilemanagedocfileuploadbutton").click() self.wait_element_hidden_with_timeout(self.browser.find_element_by_id("balloonReviseFileInput")) self.find_elements_by_class_name_with_timeout('reviseControlFileColumn') self.page_source_test(expected_text=filename) self.find_element_by_id_with_timeout("applyChanges") self.browser.find_element_by_id("applyChanges").click() self.page_source_test(expected_text='Your modifications to record #5 have been submitted') self.logout() def test_revise_file(self): """bibdocfile - web test revise a file""" self.browser.get(CFG_SITE_SECURE_URL + "/record/6?ln=en") # login as admin self.login(username="admin", password="") self.find_element_by_link_text_with_timeout("Manage Files of This Record") self.browser.find_element_by_link_text("Manage Files of This Record").click() self.find_element_by_link_text_with_timeout("revise") self.browser.find_element_by_link_text("revise").click() self.find_element_by_id_with_timeout("balloonReviseFileInput") self.wait_element_displayed_with_timeout(self.browser.find_element_by_id("balloonReviseFileInput")) self.fill_textbox(textbox_id="balloonReviseFileInput", text="/opt/invenio/lib/webtest/invenio/test.pdf") self.find_element_by_id_with_timeout("bibdocfilemanagedocfileuploadbutton") self.browser.find_element_by_id("bibdocfilemanagedocfileuploadbutton").click() self.wait_element_hidden_with_timeout(self.browser.find_element_by_id("balloonReviseFileInput")) self.find_element_by_id_with_timeout("applyChanges") self.browser.find_element_by_id("applyChanges").click() self.page_source_test(expected_text='Your modifications to record #6 have been submitted') self.logout() def test_delete_file(self): """bibdocfile - web test delete a file""" self.browser.get(CFG_SITE_SECURE_URL + "/record/8?ln=en") # login as admin self.login(username="admin", password="") self.find_element_by_link_text_with_timeout("Manage Files of This Record") self.browser.find_element_by_link_text("Manage Files of This Record").click() self.browser.find_element_by_xpath("(//div[@id='uploadFileInterface']//tr[@class='even']//a[text()='delete'])[1]").click() self.handle_popup_dialog() time.sleep(1) self.page_source_test(expected_text=['9812226', 'pdf', 'ps.gz'], unexpected_text=['9812226.fig1.ps.gz']) self.find_element_by_name_with_timeout("cancel") self.browser.find_element_by_name("cancel").click() self.handle_popup_dialog() time.sleep(1) self.page_source_test(expected_text='Your modifications to record #8 have been cancelled') self.logout() TEST_SUITE = make_test_suite(InvenioBibDocFileWebTest, ) if __name__ == '__main__': run_test_suite(TEST_SUITE, warn_user=True)
In this part we'll take a quick and closer look on the process of automatically generating forms. If you're missing the old Insert/Update Record Wizard in Dreamweaver, then you should definitely check out our Bootstrap 3 Dynamic Form Generator. It's based on the most popular Bootstrap framework, and uses the latest technologies that we implemented in DMXzone Server Connect and HTML5 Data Bindings. Dreamweaver automatically fills out the record insert/update forms by pulling out data from the database. In comparison with the old, deprecated Server Behaviors, Bootstrap 3 Dynamic Form Generator uses the latest technologies and generates full forms based on HTML5 Data Bindings data source or a Server Connect Action file inputs. You end up with a series of fields that are variable and driven by runtime logic. The form is automatically generated from your data source.
#encoding=UTF-8 from selenium import webdriver from selenium.webdriver.support.wait import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.keys import Keys from selenium.webdriver.common.by import By import pytest @pytest.fixture def driver(request): driver = webdriver.Chrome() driver.get('http://localhost/litecart/en/') request.addfinalizer(driver.quit) return driver def test_login(driver): driver.find_element_by_xpath('//a[contains(text(),"New customers click here")]').click() City= driver.find_element_by_xpath('//input [@name="city"]') City.send_keys('Random City') # Почему то у города нет атрибута required, хотя он обязателен Required_elements= driver.find_elements_by_css_selector('input[required="required"]') # получили все обязательные поля for element in Required_elements: name = element.get_attribute('name') if name == 'firstname': element.send_keys('Nikolay') elif name == 'lastname': element.send_keys('Kutsoloshchenko') elif name == 'address1': element.send_keys('Random name str.') elif name == 'postcode': element.send_keys('12345') elif name =='email': element.send_keys('[email protected]') elif name =='phone': text = element.get_attribute('placeholder') + '123456789' element.send_keys(text) elif name == 'password': element.send_keys('Qwert12345') elif name == 'confirmed_password': element.send_keys('Qwert12345') driver.find_element_by_css_selector('button[type ="submit"]').click() WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.XPATH, '//a[contains(text(),"Logout")]'))) driver.find_element_by_xpath('//a[contains(text(),"Logout")]').click() WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.CSS_SELECTOR,'input[name="email"]'))) email_field = driver.find_element_by_css_selector('input[name="email"]') email_field.send_keys('[email protected]') password_field = driver.find_element_by_css_selector('input[name="password"]') password_field.send_keys('Qwert12345') driver.find_element_by_css_selector('button[name="login"]').click() WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.XPATH, '//a[contains(text(),"Logout")]'))) driver.find_element_by_xpath('//a[contains(text(),"Logout")]').click()
Interior Ideas, 44 Inch Bathroom Vanity was posted December 13, 2016 at 10:01 pm by ravivdozetas.info . More over 44 Inch Bathroom Vanity has viewed by 671 visitor. Interior Ideas, Bathroom Vanity Set With Mirror was posted November 16, 2016 at 8:07 pm by ravivdozetas.info . More over Bathroom Vanity Set With Mirror has viewed by 401 visitor. Interior Ideas, Bathroom Vanity Top With Sink was posted December 27, 2016 at 8:44 am by ravivdozetas.info . More over Bathroom Vanity Top With Sink has viewed by 301 visitor. Interior Ideas, Bathroom Vanity 72 Inch was posted August 30, 2016 at 2:19 am by ravivdozetas.info . More over Bathroom Vanity 72 Inch has viewed by 807 visitor. Interior Ideas, Bathroom Lighted Vanity Mirrors was posted June 13, 2017 at 7:27 pm by ravivdozetas.info . More over Bathroom Lighted Vanity Mirrors has viewed by 365 visitor. Interior Ideas, Farmhouse Bathroom Sink Vanity was posted June 22, 2017 at 11:40 am by ravivdozetas.info . More over Farmhouse Bathroom Sink Vanity has viewed by 556 visitor. Interior Ideas, 48 White Bathroom Vanity was posted January 1, 2017 at 3:21 am by ravivdozetas.info . More over 48 White Bathroom Vanity has viewed by 782 visitor. Interior Ideas, Vanity Lights Bathroom was posted October 26, 2016 at 2:05 pm by ravivdozetas.info . More over Vanity Lights Bathroom has viewed by 337 visitor. Interior Ideas, Modern White Bathroom Vanity was posted October 5, 2016 at 9:09 am by ravivdozetas.info . More over Modern White Bathroom Vanity has viewed by 1085 visitor. Interior Ideas, Petite Bathroom Vanity was posted January 12, 2017 at 7:32 am by ravivdozetas.info . More over Petite Bathroom Vanity has viewed by 332 visitor.
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Libfontenc(AutotoolsPackage): """libfontenc - font encoding library.""" homepage = "http://cgit.freedesktop.org/xorg/lib/libfontenc" url = "https://www.x.org/archive/individual/lib/libfontenc-1.1.3.tar.gz" version('1.1.3', '0ffa28542aa7d246299b1f7211cdb768') depends_on('zlib') depends_on('xproto', type='build') depends_on('[email protected]:', type='build') depends_on('util-macros', type='build')
I’ve lived in Bristol for 40 years as a professional engineer, coach and transformational strategist and also as a dad and a friend. I’ve asked lots of gentle questions and seen surprising things happen. I’ve often introduced myself informally at workshops and conferences as a court jester. I now wear the hat officially. I left Rolls-Royce amicably in March 2011 to focus for a while on family and friendships – after 35 years it was the least I could do. I also wanted to focus more on what I really enjoy and how I can help others most effectively. Right To Left Strategy Ltd Registered Address; 1 Manor Farm Cottages, Churchend Lane, Charfield, Wotton Under Edge, Gloucestershire, GL12 8LJ Registered in England & Wales. Company Number 8281244 Copyright & copy; 2014, Peter Hill. All rights reserved.
""" Downloader for Reddit takes a list of reddit users and subreddits and downloads content posted to reddit either by the users or on the subreddits. Copyright (C) 2017, Kyle Hickey This file is part of the Downloader for Reddit. Downloader for Reddit is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Downloader for Reddit is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Downloader for Reddit. If not, see <http://www.gnu.org/licenses/>. """ import re import requests from .base_extractor import BaseExtractor from ..core.errors import Error from ..utils import reddit_utils, video_merger class RedditVideoExtractor(BaseExtractor): url_key = ['v.redd.it'] def __init__(self, post, **kwargs): super().__init__(post, **kwargs) self.post = post self.host_vid = self.get_host_vid() self.url = None self.audio_url = None self.get_vid_url() def get_host_vid(self): """ Finds the actual submission that holds the video file to be extracted. If the post is the original post that the video was uploaded to, then None is returned. If the post is a crosspost from another location, the parent crosspost is returned as it is the post which holds the full video information. :return: The top level post which holds the video information to be downloaded if the supplied post is a crosspost, otherwise None. """ if hasattr(self.submission, 'crosspost_parent'): try: r = reddit_utils.get_reddit_instance() parent_submission = r.submission(self.submission.crosspost_parrent.split('_')[1]) parent_submission.title # fetch info from server to load submission return parent_submission except AttributeError: pass return self.submission def get_vid_url(self): """ Extracts the video url from the reddit post and determines if the post is a video and will contain an audio file. """ try: self.url = self.host_vid.media['reddit_video']['fallback_url'] except (AttributeError, TypeError): self.url = self.host_vid.url if self.url is not None: self.get_audio_url() def is_gif(self): return self.host_vid.media['reddit_video']['is_gif'] def extract_content(self): if self.settings_manager.download_reddit_hosted_videos: if self.url is not None: video_content = self.get_video_content() try: if self.audio_url is not None: audio_content = self.get_audio_content() if audio_content is not None and video_content is not None: merge_set = video_merger.MergeSet( video_id=video_content.id, audio_id=audio_content.id, date_modified=self.post.date_posted ) video_merger.videos_to_merge.append(merge_set) except: message = 'Failed to located content' self.handle_failed_extract(error=Error.FAILED_TO_LOCATE, message=message, log_exception=True, extractor_error_message=message) else: message = 'Failed to find acceptable url for download' self.handle_failed_extract(error=Error.FAILED_TO_LOCATE, message=message, log_exception=True, extractor_error_message=message) def get_video_content(self): ext = 'mp4' content = self.make_content(self.url, ext, name_modifier='(video)' if self.audio_url is not None else '') return content def get_audio_url(self): """ Iterates through what I'm sure will be an increasing list of parsers to find a valid audio url. Because not only does reddit separate the audio files from its video files when hosting a video, but they also change the path to get the audio file about every three months for some reason. """ parsers = [ lambda url: url.rsplit('/', 1)[0] + '/audio', lambda url: re.sub('DASH_[A-z 0-9]+', 'DASH_audio', url) ] for parser in parsers: try: url = parser(self.url) if self.check_audio_content(url): self.audio_url = url return except AttributeError: self.logger.error('Failed to get audio link for reddit video.', extra=self.get_log_data()) def check_audio_content(self, audio_url): """ Checks the extracted audio url to make sure that a valid status code is returned. Reddit videos are being mislabeled by reddit as being videos when they are in fact gifs. This rectifies the problem by checking that the audio link is valid before trying to make content from the audio portion of a video which does not have audio. :return: True if the audio link is valid, False if not. """ response = requests.head(audio_url) return response.status_code == 200 def get_audio_content(self): ext = 'mp3' content = self.make_content(self.audio_url, ext, name_modifier='(audio)') return content
With temperatures still in the 90s, it's hard to believe fall is almost here. But the calendar isn't wrong, it is September, and SRT is gearing up for some fun on October. First, on Thursday, October 12, comes the second SequoiaFest event, held in conjunction with Sequoia Brewing Co. From 6-10 p.m. in Garden Street Plaza in downtown Visalia, Sequoia Brewing will be serving food and beer, including a special, seasonal beer with a great SRT tie. We'll have music from local band Moose Crossing, too. Half of beer proceeds will be given to SRT as a donation, and you are going to want to drink some beer when you hear about the new Sequoia one on tap! More info on that coming soon. Then on Tuesday, October 24, from 5-8 p.m., there's another opportunity to enjoy the food and beverages of Sequoia Brewing Co. while helping out SRT. Nature on Tap is a dine-in night at the Visalia Sequoia location, during which 15% of all proceeds will be donated to SRT. Another chance to drink some special beer while it's available. See you there!
import numpy as np import re import xml.etree.ElementTree import logging logger = logging.getLogger(__name__) def readIowaSurfaces(fname): """Read the surfaces .xml file generated from OCTExplorer Params: fname - full path to the .xml file Returns: {'scan_size' - {'x','y','z'} - scan_size in voxels, 'voxel_size' - {'x','y','z'} - voxel_size in microns, 'scan_system' - 'cirrus'|'bioptigen' - Recording system manufacturer, #TODO - find out what the value is for .jpg images and heidelburg 'eye': 'OD'|'OS' 'surface_names': [] - list of surfaces identified 'surface_data': an [nSurfaces,nBscans,nAscans] numpy.maskedarray of the surface data each (Surface,x,y) value is an integer value indicating the depth of the surface pixels are counted from the top of the scan and numbered from 1 Examples: Description: OCTExplorer.exe (https://www.iibi.uiowa.edu/content/iowa-reference-algorithms-human-and-murine-oct-retinal-layer-analysis-and-display) implements the Iowa Reference Algorithm to segment Ocular Coherence Tomography files from a variety of commercial systems. The principle output is an .xml file containing information delineating 10 retinal surfaces. This function reads the xml file to extract metadata and returns the surface information as numpy ndarray. """ #define labels for the 10 retinal surfaces #surface_labels = ['ILM','RNFL-GCL','GCL-IPL','IPL-INL','INL-OPL', #'OPL-HFL','BMEIS','IS/OSJ','IB_OPR','IB_RPE','OB_RPE'] surface_labels = {} logger.debug('Loading surfaces file:{}'.format(fname)) xml_root = xml.etree.ElementTree.parse(fname).getroot() # OCTexplorer version 3 used a <z> element for suface heights, version 4 uses <y> version = xml_root.find('version').text if version.startswith('3'): value_element = 'z' else: value_element = 'y' # first lets extract the scan size information scan_size = {'x': int(xml_root.find('./scan_characteristics/size/x').text), 'y': int(xml_root.find('./scan_characteristics/size/y').text), 'z': int(xml_root.find('./scan_characteristics/size/z').text)} voxel_size = {'x': float(xml_root.find('./scan_characteristics/voxel_size/x').text), 'y': float(xml_root.find('./scan_characteristics/voxel_size/y').text), 'z': float(xml_root.find('./scan_characteristics/voxel_size/z').text)} # try to exract system information system = xml_root.find('./scan_characteristics/manufacturer').text.lower() if bool(re.search('carl zeiss',system)): system = 'cirrus' elif bool(re.search('Bioptigen',system)): system = 'bioptigen' else: logger.warn('Unknown system type') system = 'unknown' # structure to hold layer measurements # data in this structure is in pixels and can be used by the centering function nlayers = int(xml_root.find('surface_num').text) if version.startswith('3'): data = np.empty((nlayers, scan_size['y'], scan_size['x']), dtype=np.float) else: data = np.empty((nlayers, scan_size['z'], scan_size['x']), dtype=np.float) p = re.compile('.*\((.*)\)') for surface in xml_root.findall('surface'): # identify which surface this is and assign an index # can't use the label in the xml file as these are not contiguous surface_name = surface.find('name').text logger.debug('Loading surface:{}'.format(surface_name)) surface_idx = np.NaN # extract the surface label match = re.match(p, surface_name) if match: if not match.group(1) in surface_labels.keys(): #surface not seen before add the label and description surface_labels[match.group(1)] = (match.group(0),len(surface_labels)) surface_idx = surface_labels[match.group(1)][1] else: logger.warning('Failed to identify surface:{}'.format(surface_name)) break logger.debug('Surface index:{}'.format(surface_idx)) # loop through all the bscans surface_bscans = surface.findall('bscan') for bscan_idx in range(data.shape[1]): bscan = surface_bscans[bscan_idx] data[surface_idx,bscan_idx,:] = [int(z.text) for z in bscan.findall(value_element)] # .xml file may also contain information on where segmentation has failed # create a structure to store this information undef_mask = np.zeros(data.shape,dtype=np.bool) undef_xml = xml_root.find('undefined_region') if undef_xml is not None: for ascan in undef_xml.findall('ascan'): x = int(ascan.find('x').text) y = int(ascan.find('y').text) undef_mask[:,y,x] = True data = np.ma.MaskedArray(data, mask = undef_mask) laterality = xml_root.find('scan_characteristics').find('laterality').text if laterality.upper() in ['OD','OS']: laterality = laterality.upper() else: # not defined in the xml file, see if we can extract from the filename p = re.compile('(OD|OS)') m = re.search(p,fname) if m: laterality = m.group(0) return {'scan_size':scan_size, 'voxel_size':voxel_size, 'scan_system':system, 'eye':laterality, 'surface_names':surface_labels, 'surface_data':data} def readIowaCenter(fname): """Load the GridCenter.xml file Params: fname - full path to the _GridCenter_Iowa.xml file Returns: (center_x,center_y) - scan center in pixels """ xml_root = xml.etree.ElementTree.parse(fname) c = xml_root.find('center') center_x = int(c.find('x').text) center_y = int(c.find('y').text) return (center_x,center_y)
In this TeerGuru.com website, you will all the updates and results of Teer game. This site gets updated daily to provide you fresh Teer results. Here you will get Guwahati Teer counter/Khanapara Teer, Manipur, Juwai & Shillong Teer results on daily basis. We will also provide you Teer Common Numbers and Dream numbers here on this website. You can also check Previous results here as well. So, keep visiting our website regularly and Save Bookmark of our website. You can Predict Teer counter results by following some simple mathematical calculation and can win the game. The formula is not tough to give up, simply adding up some numbers will help you out. Here is a glimpse of the mathematical calculation to win Teer game. You will need to add all the numbers step by step until you get a single number at the end. So this is all the calculation about how to win Teer game in the simplest way. Please Note: We only collect and update latest Teer results of Manipur, Shillong, Khanapara, and Juwai here but we are not connected with any official Teer counter and we are not their official website. For more information, you can check out the Disclaimer page.
import pyxb.binding.generate import pyxb.utils.domutils from xml.dom import Node import os.path schema_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../schemas/test-wildcard.xsd')) code = pyxb.binding.generate.GeneratePython(schema_location=schema_path) rv = compile(code, 'test', 'exec') eval(rv) from pyxb.exceptions_ import * import unittest def nc_not (ns_or_absent): return ( pyxb.xmlschema.structures.Wildcard.NC_not, ns_or_absent ) class TestIntensionalSet (unittest.TestCase): def testTest (self): ns = 'URN:namespace' not_nc = nc_not(ns) self.assert_(isinstance(not_nc, tuple)) self.assertEqual(2, len(not_nc)) self.assertEqual(pyxb.xmlschema.structures.Wildcard.NC_not, not_nc[0]) self.assertEqual(ns, not_nc[1]) def testUnion_1 (self): UNION = pyxb.xmlschema.structures.Wildcard.IntensionalUnion nc_any = pyxb.xmlschema.structures.Wildcard.NC_any ns1 = 'URN:first' ns2 = 'URN:second' self.assertEqual(nc_any, UNION([ nc_any, nc_any ])) self.assertEqual(nc_not(ns1), UNION([ nc_not(ns1), nc_not(ns1) ])) self.assertEqual(set([ns1]), UNION([ set([ns1]), set([ns1]) ])) def testUnion_2 (self): UNION = pyxb.xmlschema.structures.Wildcard.IntensionalUnion nc_any = pyxb.xmlschema.structures.Wildcard.NC_any ns1 = 'URN:first' ns2 = 'URN:second' self.assertEqual(nc_any, UNION([ nc_any, set([ns1]) ])) self.assertEqual(nc_any, UNION([ nc_any, nc_not(ns1) ])) self.assertEqual(nc_any, UNION([ nc_any, nc_not(None) ])) def testUnion_3 (self): UNION = pyxb.xmlschema.structures.Wildcard.IntensionalUnion nc_any = pyxb.xmlschema.structures.Wildcard.NC_any ns1 = 'URN:first' ns2 = 'URN:second' self.assertEqual(set([ns1, ns2]), UNION([set([ns1]), set([ns2])])) self.assertEqual(set([None, ns1]), UNION([set([None]), set([ns1])])) self.assertEqual(set([None]), UNION([set([None]), set([None])])) def testUnion_4 (self): UNION = pyxb.xmlschema.structures.Wildcard.IntensionalUnion nc_any = pyxb.xmlschema.structures.Wildcard.NC_any ns1 = 'URN:first' ns2 = 'URN:second' self.assertEqual(nc_not(None), UNION([nc_not(ns1), nc_not(ns2)])) self.assertEqual(nc_not(None), UNION([nc_not(ns1), nc_not(None)])) def testUnion_5 (self): UNION = pyxb.xmlschema.structures.Wildcard.IntensionalUnion nc_any = pyxb.xmlschema.structures.Wildcard.NC_any ns1 = 'URN:first' ns2 = 'URN:second' self.assertEqual(nc_any, UNION([nc_not(ns1), set([ns1, None])])) # 5.1 self.assertEqual(nc_not(None), UNION([nc_not(ns1), set([ns1, ns2])])) # 5.2 self.assertRaises(SchemaValidationError, UNION, [nc_not(ns1), set([None, ns2])]) # 5.3 self.assertEqual(nc_not(ns1), UNION([nc_not(ns1), set([ns2])])) # 5.4 def testUnion_6 (self): UNION = pyxb.xmlschema.structures.Wildcard.IntensionalUnion nc_any = pyxb.xmlschema.structures.Wildcard.NC_any ns1 = 'URN:first' ns2 = 'URN:second' self.assertEqual(nc_any, UNION([nc_not(None), set([ns1, ns2, None])])) # 6.1 self.assertEqual(nc_not(None), UNION([nc_not(None), set([ns1, ns2])])) # 6.2 def testIntersection_1 (self): ISECT = pyxb.xmlschema.structures.Wildcard.IntensionalIntersection nc_any = pyxb.xmlschema.structures.Wildcard.NC_any ns1 = 'URN:first' ns2 = 'URN:second' self.assertEqual(nc_any, ISECT([ nc_any, nc_any ])) self.assertEqual(nc_not(ns1), ISECT([ nc_not(ns1), nc_not(ns1) ])) self.assertEqual(set([ns1]), ISECT([ set([ns1]), set([ns1]) ])) def testIntersection_2 (self): ISECT = pyxb.xmlschema.structures.Wildcard.IntensionalIntersection nc_any = pyxb.xmlschema.structures.Wildcard.NC_any ns1 = 'URN:first' ns2 = 'URN:second' self.assertEqual(set([ns1]), ISECT([ nc_any, set([ns1]) ])) self.assertEqual(nc_not(ns1), ISECT([ nc_any, nc_not(ns1) ])) self.assertEqual(nc_not(None), ISECT([ nc_any, nc_not(None) ])) def testIntersection_3 (self): ISECT = pyxb.xmlschema.structures.Wildcard.IntensionalIntersection nc_any = pyxb.xmlschema.structures.Wildcard.NC_any ns1 = 'URN:first' ns2 = 'URN:second' self.assertEqual(set([ns2]), ISECT([nc_not(ns1), set([ns1, ns2, None])])) self.assertEqual(set([ns2]), ISECT([nc_not(ns1), set([ns1, ns2])])) self.assertEqual(set([ns2]), ISECT([nc_not(ns1), set([ns2])])) def testIntersection_4 (self): ISECT = pyxb.xmlschema.structures.Wildcard.IntensionalIntersection nc_any = pyxb.xmlschema.structures.Wildcard.NC_any ns1 = 'URN:first' ns2 = 'URN:second' self.assertEqual(set([ns2]), ISECT([set([ns1, ns2]), set([ns2, None])])) self.assertEqual(set([ns2, None]), ISECT([set([None, ns1, ns2]), set([ns2, None])])) self.assertEqual(set([]), ISECT([set([ns1]), set([ns2, None])])) self.assertEqual(set([]), ISECT([set([ns1]), set([ns2, ns1]), set([ns2, None])])) self.assertEqual(set([ns1]), ISECT([set([ns1, None]), set([None, ns2, ns1]), set([ns1, ns2])])) def testIntersection_5 (self): ISECT = pyxb.xmlschema.structures.Wildcard.IntensionalIntersection nc_any = pyxb.xmlschema.structures.Wildcard.NC_any ns1 = 'URN:first' ns2 = 'URN:second' self.assertRaises(SchemaValidationError, ISECT, [nc_not(ns1), nc_not(ns2)]) def testIntersection_6 (self): ISECT = pyxb.xmlschema.structures.Wildcard.IntensionalIntersection nc_any = pyxb.xmlschema.structures.Wildcard.NC_any ns1 = 'URN:first' ns2 = 'URN:second' self.assertEqual(nc_not(ns1), ISECT([nc_not(ns1), nc_not(None)])) class TestWildcard (unittest.TestCase): def testElement (self): # NB: Test on CTD, not element self.assert_(wrapper_._HasWildcardElement) xmls = '<wrapper><first/><second/><third/></wrapper>' doc = pyxb.utils.domutils.StringToDOM(xmls) instance = wrapper.createFromDOM(doc.documentElement) self.assert_(isinstance(instance.wildcardElements(), list)) self.assertEquals(1, len(instance.wildcardElements())) # Alternative parser path instance = CreateFromDocument(xmls) self.assert_(isinstance(instance.wildcardElements(), list)) self.assertEquals(1, len(instance.wildcardElements())) def _validateWildcardWrappingRecognized (self, instance): self.assert_(isinstance(instance.wildcardElements(), list)) self.assertEquals(1, len(instance.wildcardElements())) dom = instance.wildcardElements()[0] self.assertTrue(isinstance(dom, Node)) self.assertEquals(Node.ELEMENT_NODE, dom.nodeType) self.assertEquals('third', dom.nodeName) self.assertEquals(1, len(dom.childNodes)) cdom = dom.firstChild self.assertTrue(isinstance(cdom, Node)) self.assertEquals(Node.ELEMENT_NODE, cdom.nodeType) self.assertEquals('selt', cdom.nodeName) ccdom = cdom.firstChild self.assertTrue(isinstance(ccdom, Node)) self.assertEquals(Node.TEXT_NODE, ccdom.nodeType) self.assertEquals('text', ccdom.data) def testWildcardWrappingRecognized (self): # NB: Test on CTD, not element self.assert_(wrapper_._HasWildcardElement) xmls = '<wrapper><first/><second/><third><selt>text</selt></third></wrapper>' doc = pyxb.utils.domutils.StringToDOM(xmls) instance = wrapper.createFromDOM(doc.documentElement) self._validateWildcardWrappingRecognized(instance) # Alternative parser path instance = CreateFromDocument(xmls) self._validateWildcardWrappingRecognized(instance) def testMultiElement (self): tested_overmax = False for rep in range(0, 6): xmls = '<wrapper><first/><second/>%s</wrapper>' % (''.join(rep * ['<third/>']),) doc = pyxb.utils.domutils.StringToDOM(xmls) if 3 >= rep: instance = wrapper.createFromDOM(doc.documentElement) self.assert_(isinstance(instance.wildcardElements(), list)) self.assertEquals(rep, len(instance.wildcardElements())) for i in range(0, rep): self.assertEquals('third', instance.wildcardElements()[i].nodeName) else: tested_overmax = True self.assertRaises(ExtraContentError, wrapper.createFromDOM, doc.documentElement) self.assert_(tested_overmax) def testAttribute (self): # NB: Test on CTD, not element self.assert_(isinstance(wrapper_._AttributeWildcard, pyxb.binding.content.Wildcard)) xmls = '<wrapper myattr="true" auxattr="somevalue"/>' doc = pyxb.utils.domutils.StringToDOM(xmls) instance = wrapper.createFromDOM(doc.documentElement) self.assert_(isinstance(instance.wildcardAttributeMap(), dict)) self.assertEquals(1, len(instance.wildcardAttributeMap())) self.assertEquals('somevalue', instance.wildcardAttributeMap()['auxattr']) if __name__ == '__main__': unittest.main()
There is a selection of shower Etiquette tips together with some take a look around the site to find some ideas to help you host a successful affair. We have articles to save you time when planning a party for the future bride and the results are sure to please your party guests. Including the pre-wedding celebration ideas, you'll find a great selection of useful etiquette information. If you are looking for etiquette suggestions, this guide should help you plan a great event. Checkout the hosting Ideas here to make sure that the party is an enjoyable event. There is a selection of recipes tips for the event, together with some take a look around the site to find some ideas to help you host a successful affair. Many of the ideas here are also suiable for a wedding reception. We have Free bridal articles to save you time when planning a wedding celebration or shower. The finger food Recipes are sure to please your guests. Including the food ideas, you should find what you need in the great selection of useful Recipes & wedding information.
# Copyright (c) 2014, Roger Duran # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the \"Software\"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ Command-line wrapper to run commands and add rules to new windows """ import argparse import atexit import subprocess from libqtile import ipc from libqtile.command import graph def run_cmd(opts) -> None: if opts.socket is None: socket = ipc.find_sockfile() else: socket = opts.socket client = ipc.Client(socket) root = graph.CommandGraphRoot() cmd = [opts.cmd] if opts.args: cmd.extend(opts.args) proc = subprocess.Popen(cmd) match_args = {"net_wm_pid": proc.pid} rule_args = {"float": opts.float, "intrusive": opts.intrusive, "group": opts.group, "break_on_match": not opts.dont_break} cmd = root.call("add_rule") _, rule_id = client.send((root.selectors, cmd.name, (match_args, rule_args), {})) def remove_rule() -> None: cmd = root.call("remove_rule") client.send((root.selectors, cmd.name, (rule_id,), {})) atexit.register(remove_rule) proc.wait() def add_subcommand(subparsers, parents): parser = subparsers.add_parser( "run-cmd", parents=parents, help="A wrapper around the command graph" ) parser.add_argument( '-s', '--socket', help='Use specified communication socket.') parser.add_argument( '-i', '--intrusive', action='store_true', help='If the new window should be intrusive.') parser.add_argument( '-f', '--float', action='store_true', help='If the new window should be float.') parser.add_argument( '-b', '--dont-break', action='store_true', help='Do not break on match (keep applying rules).') parser.add_argument( '-g', '--group', help='Set the window group.') parser.add_argument( 'cmd', help='Command to execute.'), parser.add_argument( 'args', nargs=argparse.REMAINDER, metavar='[args ...]', help='Optional arguments to pass to command.' ) parser.set_defaults(func=run_cmd)
Since we started our blog back in January, we’ve come across some real gems, which is what spurred us to create this top 8 list and share it with you. If you too have come across a site you feel deserves recognition, or if you have a site where you review mirrorless cameras, do not hesitate to let us know in the comments section! On Thomas’ website, not only will you find a collection of his wonderful personal photographic work, but you’ll also encounter a treasure trove of articles he has collected about the Fuji X Series. In fact, I would call it the most comprehensive collection of Fuji-related articles on the Web. Thomas is extremely selective about which articles he chooses to post, so you know every visit to his site is worth your time and attention. It is updated on a daily basis. FotoZones is a very complete photography website run by Dallas Dahms who owns an OM-D E-M1 and frequently discusses Olympus and Micro Four Thirds in his posts. I like Dallas’ style as he reviews cameras and lenses by reflecting on his experiences out in the field. Shying away from the technical, his words and explanations have a very personal ring to them, which is always enjoyable to read. Unlike other mirrorless blogs, he also has a very active forum going. It is true that Camera Labs doesn’t really need introducing but given the quality of the site, I still feel it deserves a place on this list. Run by Gordon Laing, it is an extremely comprehensive website that covers all sorts of cameras and gear, not only mirrorless. Gordon’s most recent reviews have mostly dealt with mirrorless cameras, and he himself uses the OM-D E-M5. The reviews are quite technical at times but he always makes a point of including a nice gallery of sample images for each camera as well. Donato and Max over at RiflessiFotografici are well-known in Italy amongst photographers who use the Fuji X system. They are official Fuji X Photographers, but often write reviews of other systems including the Nikon 1 System and Sony Cybershot. Just this year, they began translating all their most important review into English to reach a wider audience. Though the primary focus of Small Camera, Big Picture is hybrid photography, you will often find interesting and helpful mirrorless camera reviews written by the team of expert staff that runs this website. Editor in Chief, Giulio Sciorio, is an official Lumix photographer, whereas Paula Thomas, Tammy Lee Bradley and Jamie MacDonald are true Olympus aficionados. The passion they have for the subject is more than evident in the photos they take and how they write! Author Jordan Steele has shot with many cameras in his lifetime, but he now shoots primarily with the OM-D E-M5, Fuji X-E1 and Panasonic GX1. He uses his vast knowledge about cameras to produce interesting and original mirrorless camera reviews. Often, he will notice things that will even escape the attention of the guys at DPreview! Photography enthusiast Matthew E. Maddock is the brains behind PhotoMadd, a review site that deals specifically with Fuji X Series cameras. He reviews anything from cameras to accessories to bags to lenses. I enjoy his congenial style of writing–you can really tell that he is passionate about the brand and the art of photography in general. We have a new list of 10 Mirrorless Blogs You Should Follow in 2014. Be sure to check it out!
""" Logging utilities. """ import logging import logging.handlers from colorama import Fore, Style COLORS = { 'DEBUG': Style.DIM, 'INFO': Style.NORMAL, 'WARNING': Style.BRIGHT, 'ERROR': Fore.RED, 'CRITICAL': Style.BRIGHT + Fore.RED, } class ColoredFormatter(logging.Formatter): def format(self, record): return COLORS[record.levelname] + logging.Formatter.format(self, record) + Style.RESET_ALL def setup_logging(verbose=True, color=True): """ Sets logging format. """ logging.getLogger().setLevel(logging.DEBUG) stream = logging.StreamHandler() stream.setLevel(logging.DEBUG if verbose else logging.INFO) if color: stream_format = ColoredFormatter( "%(asctime)s %(name)s %(levelname)s %(message)s" ) else: stream_format = logging.Formatter( "%(asctime)s %(name)s %(levelname)s %(message)s" ) stream.setFormatter(stream_format) logging.getLogger().addHandler(stream) logging.getLogger('requests').setLevel(logging.ERROR)
Sleep Apnea: Should I Have a Sleep Study? Burlington Ear, Nose & Throat Clinic Sleep Apnea: Should I Have a Sleep Study? If you snore but don't have other symptoms, you may not need a sleep study. Lifestyle changes?such as losing weight (if needed), sleeping on your side, and going to bed at the same time every night?may reduce your snoring. Barbe F, et al. (2010). Long-term effect of continuous positive airway pressure in hypertensive patients with sleep apnea. American Journal of Respiratory and Critical Care Medicine, 181(7): 718?726. Norman D, et al. (2006). Effects of continuous positive airway pressure versus supplemental oxygen on 24-hour ambulatory blood pressure. Hypertension, 47(5): 840?845. Milleron O, et al. (2004). Benefits of obstructive sleep apnoea treatment in coronary artery disease: A long-term follow-up study. European Heart Journal, 25(9): 728?734.
from django.db import models from django.contrib.auth.signals import user_logged_in, user_logged_out from urllib2 import urlopen import json class LoggedUser(models.Model): username = models.CharField(max_length=30, primary_key=True) country = models.CharField(max_length=100, blank=False) email = models.CharField(max_length=100, blank=False, default='[email protected]') def __unicode__(self): return self.username def login_user(sender, request, user, **kwargs): country = get_user_country(request) if user: LoggedUser(username=user.username, country=country, email=user.email).save() def logout_user(sender, request, user, **kwargs): try: if user: user = LoggedUser.objects.get(pk=user.username) user.delete() except LoggedUser.DoesNotExist: pass user_logged_in.connect(login_user) user_logged_out.connect(logout_user) def get_user_country(request): # Automatically geolocate the connecting IP ip = request.META.get('REMOTE_ADDR') try: response = urlopen('http://ipinfo.io/' + ip + '/json').read() response = json.loads(response) return response['country'].lower() except Exception, e: response = "undefined" return response
Jacob is a Digital Sciences major with a minor in User Experience Design ready to graduate in May 2019. He enjoys designing and developing websites, learning about new technologies, listening to podcasts, exercising and spending time with his family and friends. Once he graduates he hopes to work as a UI/UX Developer.
""" Cylinder form factor in Born approximation """ import bornagain as ba from bornagain import deg, nm def get_sample(): """ Returns a sample with cylinders in a homogeneous environment ("Vacuum"), implying a simulation in plain Born approximation. """ # Define materials material_Particle = ba.HomogeneousMaterial("Particle", 0.0006, 2e-08) material_Vacuum = ba.HomogeneousMaterial("Vacuum", 0.0, 0.0) # Define form factors ff = ba.FormFactorCylinder(5.0*nm, 5.0*nm) # Define particles particle = ba.Particle(material_Particle, ff) # Define particle layouts layout = ba.ParticleLayout() layout.addParticle(particle, 1.0) layout.setWeight(1) layout.setTotalParticleSurfaceDensity(0.01) # Define layers layer = ba.Layer(material_Vacuum) layer.addLayout(layout) # Define sample sample = ba.MultiLayer() sample.addLayer(layer) return sample def get_simulation(sample): beam = ba.Beam(1.0, 0.1*nm, ba.Direction(0.2*deg, 0*deg)) detector = ba.SphericalDetector(200, -2*deg, 2*deg, 200, 0*deg, 2*deg) simulation = ba.GISASSimulation(beam, sample, detector) return simulation if __name__ == '__main__': import ba_plot sample = get_sample() simulation = get_simulation(sample) ba_plot.run_and_plot(simulation)
Pricing starts at $99 a month per user. Vendor works with software, distribution, manufacturing, ecommerce, and publishing companies of all sizes. Skyware ERP+ is a comprehensive Cloud-based ERP solution that integrates across your sales, service and back-office processes. The product’s core modules include accounting, purchasing, inventory, manufacturing, payroll, projects and asset management. Much of this ERP system focuses on financials, allowing you to access your financial data from any web device, which enables automated revenue recognition processes and provides substantial budgeting and planning tools. Additionally, you can give each of your employees data dashboards that work with predefined KPIs, trends and scorecards. Skyware was founded in 2010 and is headquartered in San Francisco, California.
from functools import partial import graphene from django.db.models.query import QuerySet from django_measurement.models import MeasurementField from django_prices.models import MoneyField, TaxedMoneyField from graphene.relay import PageInfo from graphene_django.converter import convert_django_field from graphene_django.fields import DjangoConnectionField from graphql_relay.connection.arrayconnection import connection_from_list_slice from promise import Promise from .types.common import Weight from .types.money import Money, TaxedMoney def patch_pagination_args(field: DjangoConnectionField): """Add descriptions to pagination arguments in a connection field. By default Graphene's connection fields comes without description for pagination arguments. This functions patches those fields to add the descriptions. """ field.args["first"].description = "Return the first n elements from the list." field.args["last"].description = "Return the last n elements from the list." field.args[ "before" ].description = ( "Return the elements in the list that come before the specified cursor." ) field.args[ "after" ].description = ( "Return the elements in the list that come after the specified cursor." ) class BaseConnectionField(graphene.ConnectionField): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) patch_pagination_args(self) class BaseDjangoConnectionField(DjangoConnectionField): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) patch_pagination_args(self) @convert_django_field.register(TaxedMoneyField) def convert_field_taxed_money(*_args): return graphene.Field(TaxedMoney) @convert_django_field.register(MoneyField) def convert_field_money(*_args): return graphene.Field(Money) @convert_django_field.register(MeasurementField) def convert_field_measurements(*_args): return graphene.Field(Weight) class PrefetchingConnectionField(BaseDjangoConnectionField): @classmethod def connection_resolver( cls, resolver, connection, default_manager, max_limit, enforce_first_or_last, root, info, **args, ): # Disable `enforce_first_or_last` if not querying for `edges`. values = [ field.name.value for field in info.field_asts[0].selection_set.selections ] if "edges" not in values: enforce_first_or_last = False return super().connection_resolver( resolver, connection, default_manager, max_limit, enforce_first_or_last, root, info, **args, ) @classmethod def resolve_connection(cls, connection, default_manager, args, iterable): if iterable is None: iterable = default_manager if isinstance(iterable, QuerySet): _len = iterable.count() else: _len = len(iterable) connection = connection_from_list_slice( iterable, args, slice_start=0, list_length=_len, list_slice_length=_len, connection_type=connection, edge_type=connection.Edge, pageinfo_type=PageInfo, ) connection.iterable = iterable connection.length = _len return connection class FilterInputConnectionField(BaseDjangoConnectionField): def __init__(self, *args, **kwargs): self.filter_field_name = kwargs.pop("filter_field_name", "filter") self.filter_input = kwargs.get(self.filter_field_name) self.filterset_class = None if self.filter_input: self.filterset_class = self.filter_input.filterset_class super().__init__(*args, **kwargs) @classmethod def connection_resolver( cls, resolver, connection, default_manager, max_limit, enforce_first_or_last, filterset_class, filters_name, root, info, **args, ): # Disable `enforce_first_or_last` if not querying for `edges`. values = [ field.name.value for field in info.field_asts[0].selection_set.selections ] if "edges" not in values: enforce_first_or_last = False first = args.get("first") last = args.get("last") if enforce_first_or_last: assert first or last, ( "You must provide a `first` or `last` value to properly " "paginate the `{}` connection." ).format(info.field_name) if max_limit: if first: assert first <= max_limit, ( "Requesting {} records on the `{}` connection exceeds the " "`first` limit of {} records." ).format(first, info.field_name, max_limit) args["first"] = min(first, max_limit) if last: assert last <= max_limit, ( "Requesting {} records on the `{}` connection exceeds the " "`last` limit of {} records." ).format(last, info.field_name, max_limit) args["last"] = min(last, max_limit) iterable = resolver(root, info, **args) on_resolve = partial(cls.resolve_connection, connection, default_manager, args) filter_input = args.get(filters_name) if filter_input and filterset_class: iterable = filterset_class( data=dict(filter_input), queryset=iterable, request=info.context ).qs if Promise.is_thenable(iterable): return Promise.resolve(iterable).then(on_resolve) return on_resolve(iterable) def get_resolver(self, parent_resolver): return partial( super().get_resolver(parent_resolver), self.filterset_class, self.filter_field_name, )
Stoke-on-Trent is hoping to once once more regenerate its property market with a programme to sell off derelict homes for £1 a pop. Early within the 12 months, we saw a new most costly itemizing within the country: a bonkers $250M spec house in Bel Air, Los Angeles with a “curated life-style” inbuilt (suppose a luxurious automotive gallery, paid-for home staff, and much, way more.) However alas, its title could be stripped nicely earlier than the tip of summer season, by the merely flabbergasting $350M listing for the Chartwell Property in Bel Air. It’s a familiar chorus any time housing knowledge is launched: There’s sturdy demand, but limited supply Knowledge from the National Association of Realtors, which tracks sales of beforehand-owned properties, shows that the variety of houses on the market averaged through the primary six months of 2017 was the bottom for any first half of the yr since not less than 1999. The estate options 1,200 ft of personal ocean front, a botanic garden with 1,500 species of tropical trees and vegetation, a sports activities advanced, a treehouse, and a lot extra The principle mansion was in-built 1940s and was once the house of Gloria Guinness, a twentieth century type icon and one in every of Truman Capote’s Swans.” The property was originally listed for $195,000,000. For discrete and professional service, please call me to schedule a private viewing of any certainly one of these Kensington real property listings If you would like for me to do all the research for you, please name me along with your search criteria and I’ll e-mail you all the listings that you should be contemplating, and that specifically match your lifestyle necessities. Nonetheless, be advised that an FSBO equipment could range from simply the fundamental or commonest types wanted with some directions on easy methods to fill them out and the place to file them, to very full FSBO kits which include ALL the authorized types needed, instructions on easy methods to fill them out, where to file them, directions on how one can prepare your home on the market, find out how to promote your home on the market successfully, and, in some cases, contact with a list service which is able to assist put the home out in front of potential consumers.
# -*- coding: utf-8 -*- ''' Created on 25-08-2011 ''' class HierarchyInserter: """Inserts hierarchy from the given collection. Uses hierarchy described in object of the following form: { "columns": [list of columns creating hierarchy], "field_type_label": name of column(that will be inserted), representing type of row(position in hierarchy), "field_name_label": name of column(that will be inserted), representing name of row, "name_column": number of column which value represents name of data row and will be moved to new field(its name is field_name_label), "lowest_type": name that will be inserted to the type column in rows that are in the lowest position in hierarchy "summable": [list of columns that should be summed after creating hierarchy] } Data passed to HierarchyInserter should be correct, otherwise created data will contain bugs. """ def __init__(self, csv_data, hierarchy_def, schema_descr, add_id=False, teryt_data=None): """Initiates object. Arguments: csv_data -- data that needs hierarchy inserting hierarchy_def -- object representing hierarchy in data schema_descr -- objecy describing schema of data add_id -- if new column with id should be prepended for each row teryt_data -- data from file with TERYT codes, can be used to generate id """ self.csv_data = csv_data self.hierarchy_fields = hierarchy_def['columns'] self.hierarchy_field_label = hierarchy_def['field_name_label'] self.hierarchy_columns_labels = [self.csv_data.get_header()[i] for i in self.hierarchy_fields] self.type_label = hierarchy_def['field_type_label'] self.lowest_type = hierarchy_def['lowest_type'] self.modified_rows = [] self.name_column_nr = hierarchy_def['name_column'] self.summable = hierarchy_def['summable'] self.fields = schema_descr['fields'] self.delete_order = sorted(self.hierarchy_fields + [self.name_column_nr], reverse=True) self.hierarchy_obj = HierarchyNode(0) self.add_id = add_id self.use_teryt = False if self.add_id: if teryt_data: self.use_teryt = True self.teryt_id_generator = TerytIdGenerator(teryt_data) self.bad_hierarchy_log = [] def insert_hierarchy(self): """Process of inserting hierarchy is as follows: - for header: remove hierarchy fields and type column, prepend field_type_label, field_name_label - for row: create rows representing hierarchy(if not created yet) and remove hierarchy fields and type column, prepend type of row (lowest_type) and value of name column Additionally id of rows can be inserted(and then id field is inserted in header). Firstly, changes header, then for each row gets its hierarchy, if it is new hierarchy, adds new rows representing this hierarchy, in the end clears hierarchy from the row. After that, if id were generated, fills summable columns in added hierarchy rows. """ self.bad_hierarchy_log = [] header = self.csv_data.get_header() for nr in self.delete_order: del header[nr] header.insert(0, self.type_label) header.insert(1, self.hierarchy_field_label) if self.add_id: header.insert(0, 'id') self.modified_rows = [header] row = self.csv_data.get_next_row(row_type='list') if row is None: print 'Only header in csv data. No data rows were changed' return row_len = len(header) old_hierarchy = [] new_hierarchy = [] i = 1 while row is not None: i += 1 try: new_hierarchy = self.get_hierarchy(row) except HierarchyError as e: log = ('row nr %d, ' % i) + e.log self.bad_hierarchy_log.append(log) else: if new_hierarchy != old_hierarchy: new_hierarchy_rows = self.create_hierarchy_rows(new_hierarchy, row_len) self.modified_rows.extend(new_hierarchy_rows) self.modified_rows.append(self.clean_row(row, new_hierarchy)) old_hierarchy = new_hierarchy row = self.csv_data.get_next_row(row_type='list') if self.add_id: self.fill_summable_values() def all_rows_correct(self): """Returns True if no errors were found, otherwise False.""" return self.bad_hierarchy_log == [] def get_modified_rows(self): """Returns list of modified rows if no errors were found, otherwise empty list. """ if self.all_rows_correct(): return self.modified_rows else: return [] def get_hierarchy_errors_log(self): """Returns string containing errors separated by new line.""" return '\n'.join(self.bad_hierarchy_log) def clean_row(self, row, hierarchy): """Adds id of this row to hierarchy object. Removes hierarchy fields from the row, moves its type and name(fields described by name and type column in schema) to the beginning of it. Adds rows's id if add_id parameter was set to True on constructing this object. Arguments: row -- row to clean hierarchy -- hierarchy in the row """ cleaned_row = row[:] node = self.get_hierarchy_node(hierarchy) next_id = node.get_new_child_id() row_node = HierarchyNode(next_id) node.add_child(row_node, next_id) hierarchy_field_name = cleaned_row[self.name_column_nr] for nr in self.delete_order: del cleaned_row[nr] cleaned_row.insert(0, self.lowest_type) cleaned_row.insert(1, hierarchy_field_name) if self.add_id: row_hierarchy = hierarchy + [next_id] full_id = self.get_full_id(row_hierarchy) cleaned_row.insert(0, full_id) return cleaned_row def get_hierarchy(self, row): """Returns list representing hierarchy in the row. Arguments: row -- data row """ hierarchy = [] for nr in self.hierarchy_fields: if row[nr] == '': break hierarchy.append(row[nr]) return hierarchy def create_hierarchy_rows(self, new_hierarchy, row_len): """Returns rows list of hierarchy rows that should be put inside data to show new_hierarchy. If hierarchy rows have been added for new_hierarchy already, empty list will be returned. Hierarchy rows will be have not empty: id(if created), type, name and summable fields. Arguments: new_hierarchy -- list representing hierarchy in row in data row_len -- length of that row, needed create correct hierarchy row """ hierarchy_rows = [] partial_hierarchy = [] act_hierarchy_obj = self.hierarchy_obj i = 0 for field in new_hierarchy: partial_hierarchy.append(field) child = act_hierarchy_obj.get_child(field) # if this row represents new hierarchy if child is None: if self.use_teryt: new_id = self.teryt_id_generator.get_teryt_id(partial_hierarchy) if new_id is None: self.teryt_id_generator.add_teryt_unit(partial_hierarchy) new_id = self.teryt_id_generator.get_teryt_id(partial_hierarchy) else: new_id = act_hierarchy_obj.get_new_child_id() child = HierarchyNode(new_id) act_hierarchy_obj.add_child(child, field) new_row = ['' for _ in range(row_len)] if self.add_id: new_row[0] = self.get_full_id(partial_hierarchy) new_row[1] = self.hierarchy_columns_labels[i] new_row[2] = field else: new_row[0] = self.hierarchy_columns_labels[i] new_row[1] = field hierarchy_rows.append(new_row) act_hierarchy_obj = child i += 1 return hierarchy_rows def get_hierarchy_node(self, hierarchy): """Returns HierarchyNode representing hierarchy. If there was not created node representing this hierarchy, None is returned. Arguments: hierarchy - hierarchy list """ node = self.hierarchy_obj for field in hierarchy: if not node.has_child(field): return None node = node.get_child(field) return node def get_full_id(self, hierarchy): """Returns id for row with specified hierarchy. If there is no node representing such a hierarchy, HierarchyError is thrown. Arguments: hierarchy -- hierarchy list """ id_list = [] node = self.hierarchy_obj for field in hierarchy: if not node.has_child(field): raise HierarchyError('Can not create full id for hierarchy %s' % hierarchy) node = node.get_child(field) id_list.append( str(node.get_id()) ) return '-'.join(id_list) def fill_summable_values(self): """Fills summable columns in added hierarchy rows.""" summable_cols = self.summable[:] for i in range(len(summable_cols)): for col_nr in self.delete_order: if col_nr < summable_cols[i]: summable_cols[i] -= 1 if self.add_id: summable_cols[i] += 3 else: summable_cols[i] += 2 summable_cols_types = [self.fields[i]['type'] for i in self.summable] rows_dict = {} i = -1 for row in self.modified_rows: i += 1 # omitting header if i == 0: continue id = row[0] rows_dict[id] = row parent_id = self.get_parent_id(id) while parent_id: parent_row = rows_dict[parent_id] j = 0 for col_nr in summable_cols: value = row[col_nr] type = summable_cols_types[j] if parent_row[col_nr] == '': parent_row[col_nr] = 0 if value == '': continue if type == 'int': parent_row[col_nr] += int(value) elif type == 'float' and value != '': commas_in_field = value.count(',') dots_in_field = value.count('.') if commas_in_field > 0: if dots_in_field > 0: parent_row[col_nr] += float( value.replace(',', '', commas_in_field) ) else: value = value.replace(',', '', commas_in_field - 1) parent_row[col_nr] += float( value.replace(',', '.') ) j += 1 parent_id = self.get_parent_id(parent_id) def get_parent_id(self, id): """Returns id of parent of row with given id. If this row has no parent, None is returned. Parameters: id -- id of child """ if id.count('-') == 0: return None return id.rsplit('-', 1)[0] class HierarchyNode: """Helper class used to remember ids of hierarchy elements.""" def __init__(self, id): """Initiates object. Arguments: id -- id of this node(integer) """ self.id = id self.children = {} self.last_child_id = 0 def add_child(self, node, key): """Adds a child node to the list of children of this node. Inserts it under specified key. Arguments: node -- child node key -- id connected with child node """ self.children[key] = node self.last_child_id += 1 def get_child(self, key): """Returns child node with given id. If there is no node with this id, None is returned. Arguments: key -- id connected with child node """ if self.has_child(key): return self.children[key] return None def has_child(self, key): """Returns True, if there is node connected with value key, otherwise False. Arguments: key -- id connected with child node """ return key in self.children def get_new_child_id(self): """Returns id of next child.""" return self.last_child_id + 1 def get_id(self): """Returns id of this node.""" return self.id class HierarchyError(Exception): """Class representing errors which happen during processing hierarchy in data. """ def __init__(self, log): """Initiates object. Arguments: log -- error log """ self.log = log def __str__(self): """Returns string representation of error.""" return repr(self.log) class TerytIdGenerator: """Class creating TERYT codes.""" def __init__(self, data): """Initiates this object using data from file with TERYT codes. Arguments: data -- csv data of file with TERYT codes """ self.codes = {} self.errors = [] row = data.get_next_row() while row: type = row['Nazwa typu jednostki'] name = unicode(row['Nazwa']).lower() full_code = row['TERYT'] woj_code = full_code[:2] pow_code = full_code[2:4] gm_code = full_code[4:6] if self.is_type_ignored(type): row = data.get_next_row() continue if self.is_wojewodztwo(type): self.codes[name] = {'id': woj_code, 'name': name, 'powiats': {}} last_woj = self.codes[name] last_woj_code = woj_code elif self.is_powiat(type): new_pow_dict = {'id': pow_code, 'name': name, 'gminas': {}} if woj_code == last_woj_code: last_woj['powiats'][name] = new_pow_dict else: woj = self.get_teryt_object(woj_code) if woj is None: self.errors.append('Error: unknown województwo, code=%s' % woj_code) print 'Error: unknown województwo, code=', woj_code row = data.get_next_row() continue woj['powiats'][name] = new_pow_dict last_pow = new_pow_dict last_pow_code = pow_code elif self.is_gmina(type): new_gm_dict = {'id': gm_code, 'name': name} if woj_code == last_woj_code and pow_code == last_pow_code: last_pow['gminas'][name] = new_gm_dict else: pow = self.get_teryt_object(woj_code, pow_code) if pow is None: self.errors.append('Error: unknown powiat, code=%s' % pow_code) print 'Error: unknown powiat, code=', pow_code row = data.get_next_row() continue pow['gminas'][name] = new_gm_dict else: self.errors.append('Error: unknown unit type: %s' % type) print 'Error: unknown unit type:', type row = data.get_next_row() def is_wojewodztwo(self, type): return type == 'województwo'.decode('utf-8') def is_powiat(self, type): return type in ['powiat'.decode('utf-8'), 'miasto na prawach powiatu'.decode('utf-8'), 'miasto stołeczne, na prawach powiatu'.decode('utf-8')] def is_gmina(self, type): return type in ['gmina miejska'.decode('utf-8'), 'gmina wiejska'.decode('utf-8'), 'gmina miejsko-wiejska'.decode('utf-8'), 'dzielnica'.decode('utf-8'), 'delegatura'.decode('utf-8'), 'gmina miejska, miasto stołeczne'.decode('utf-8')] def is_type_ignored(self, type): return type in ['miasto', 'obszar wiejski'] def get_teryt_object(self, woj_code, pow_code=None, gm_code=None): """Returns dict representing teritorial unit which code is woj_code[ + pow_code[ + gm_code]]. If such a unit cannot be found, None is returned. Arguments: woj_code -- code of unit's wojewodztwo pow_code -- code of unit's powiat gm_code -- code of unit's gmina """ woj_dict = None for woj in self.codes: if woj['id'] == woj_code: woj_dict = woj last_dict = woj_dict break if woj_dict is None: return None if pow_code: pow_dict = None for pow in woj_dict['powiats']: if pow['id'] == pow_code: pow_dict = pow last_dict = pow_dict break if pow_dict is None: return None if gm_code: for gm in pow_dict: if gm['id'] == gm_code: gm_dict = gm last_dict = gm_dict break if gm_dict is None: return None return last_dict def get_teryt_name(self, code): """Returns name of teritorial unit which code is woj_code[ + pow_code[ + gm_code]]. If such a unit cannot be found, None is returned. Arguments: code -- unit's TERYT code """ woj_code = code[:2] if len(code) > 3: pow_code = code[2:4] if len(code) > 5: gm_code = code[4:6] teryt_object_dict = self.get_teryt_object(woj_code, pow_code, gm_code) try: return teryt_object_dict['name'] except TypeError: return None def get_teryt_id(self, hierarchy): """Returns teryt id of teritorial unit represented by hierarchy. Letters in hierarchy strings are lowercased and changed so that they could be in the same form as they are expected. If such a unit can not be found, returns None. Arguments: hierarchy -- list containing name of unit's wojewodztwo, powiat(optionally) and gmina(optionally) """ modified_hierarchy = [unicode(name).lower() for name in hierarchy] woj_name = modified_hierarchy[0] pow_name, gm_name = None, None if len(modified_hierarchy) > 1: pow_name = self.correct_powiat_name(modified_hierarchy[1]) if len(modified_hierarchy) > 2: gm_name = self.correct_gmina_name(modified_hierarchy[2]) tmp_obj = self.codes try: tmp_obj = tmp_obj[woj_name] except KeyError: return None if pow_name: try: tmp_obj = tmp_obj['powiats'][pow_name] except KeyError: return None if gm_name: try: tmp_obj = tmp_obj['gminas'][gm_name] except KeyError: return None return tmp_obj['id'] def add_teryt_unit(self, hierarchy): """ Add new teritorial unit. If it exists in actual hierarchy, nothing will happen. Otherwise, this unit will be placed in hierarchy. Arguments: hierarchy -- hierarchy of new teritorial unit """ modified_hierarchy = [unicode(name).lower() for name in hierarchy] if len(modified_hierarchy) > 1: modified_hierarchy[1] = self.correct_powiat_name(modified_hierarchy[1]) if len(modified_hierarchy) > 2: modified_hierarchy[2] = self.correct_gmina_name(modified_hierarchy[2]) if self.get_teryt_id(modified_hierarchy): return tmp_obj = self.codes i = 0 for field in modified_hierarchy: if field in tmp_obj: if i == 0: tmp_obj = tmp_obj[field]['powiats'] else: tmp_obj = tmp_obj[field]['gminas'] else: if i == 0: id = self.find_highest_id(tmp_obj) + 1 tmp_obj[field] = {'id': str(id), 'name': field, 'powiats': {}} elif i == 1: id = self.find_highest_id(tmp_obj['powiats']) + 1 tmp_obj[field] = {'id': str(id), 'name': field, 'gminas': {}} elif i == 2: id = self.find_highest_id(tmp_obj['gminas']) + 1 tmp_obj[field] = {'id': str(id), 'name': field} i += 1 def find_highest_id(self, objects): """Returns highest id of objects in list. Argument: objects -- list of objects that have id value """ highest_id = 0 for obj in objects: id = int(objects[obj]['id']) if id > highest_id: highest_id = id return highest_id def correct_powiat_name(self, full_name): """Returns only powiat's name, without 'powiat' part, 'm. st.' Arguments: full_name -- full name of powiat """ short_name = full_name if 'powiat' in short_name: short_name = short_name.lstrip('powiat') if short_name[0] == ' ': short_name = short_name[1:] if short_name.startswith('m.'): short_name = short_name.lstrip('m.') if short_name[0] == ' ': short_name = short_name[1:] if short_name.startswith('st.'): short_name = short_name.lstrip('st.') if short_name[0] == ' ': short_name = short_name[1:] return short_name def correct_gmina_name(self, full_name): """Returns only gmina's name, without 'm.' part. Arguments: full_name -- full name of gmina """ short_name = full_name if 'gmina' in short_name: short_name = short_name.lstrip('gmina') if short_name[0] == ' ': short_name = short_name[1:] if short_name.startswith('m.'): short_name = short_name.lstrip('m.') if short_name[0] == ' ': short_name = short_name[1:] if short_name.startswith('st.'): short_name = short_name.lstrip('st.') if short_name[0] == ' ': short_name = short_name[1:] if short_name.endswith(' - miasto'): short_name = short_name.replace(' - miasto' , '') if short_name == 'święta katarzyna'.decode('utf-8'): print 'Zamiana' short_name = 'siechnice' if short_name == 'rejowec': print 'Zamiana' short_name = 'rejowiec' return short_name
When former President George H. W. Bush is buried this week, the US Navy Blue Angels will be with him. On his socks. The 41st President was well known for his love of colorful socks, often using them to make a statement or support a cause. Bush’s spokesman Jim McGrath shared this image on Twitter of the final pair of socks, which feature the iconic Blue Angel delta formation, with smoke on, against a blue sky. Also featured are the Wings of Gold that signify a naval aviator. It’s entirely fitting that Bush will represent the Navy in his grave. After all, he flew 58 combat missions in TBM Avenger torpedo bombers for the US Navy in World War II. He was shot down during an attack mission in 1944 and rescued by a submarine. The final Nimitz-class aircraft carrier to enter Navy service, CVN-77, is named for him. He was made an Honorary Blue Angel in 2014. Aviation continued to play a part in Bush’s life until the very end. The code-word used by family and friends to privately share news of his death was “CAVU”, an aviation acronym for ‘ceiling and visibility unlimited’ that signifies weather that presents no restrictions on flying. Bush’s legacy as a naval aviator will live on; at least two civilian-owned TBM Avengers bear his name as part of their paint schemes.
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Deleting model 'Venues' db.delete_table(u'venues_venues') # Adding model 'Facility' db.create_table(u'venues_facility', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=30)), )) db.send_create_signal(u'venues', ['Facility']) # Adding model 'Venue' db.create_table(u'venues_venue', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=30)), ('website', self.gf('django.db.models.fields.CharField')(max_length=50)), ('address', self.gf('django.db.models.fields.CharField')(max_length=200)), ('town', self.gf('django.db.models.fields.CharField')(max_length=30)), ('postcode', self.gf('django.db.models.fields.CharField')(max_length=10)), )) db.send_create_signal(u'venues', ['Venue']) # Adding M2M table for field facilities on 'Venue' m2m_table_name = db.shorten_name(u'venues_venue_facilities') db.create_table(m2m_table_name, ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('venue', models.ForeignKey(orm[u'venues.venue'], null=False)), ('facility', models.ForeignKey(orm[u'venues.facility'], null=False)) )) db.create_unique(m2m_table_name, ['venue_id', 'facility_id']) def backwards(self, orm): # Adding model 'Venues' db.create_table(u'venues_venues', ( ('town', self.gf('django.db.models.fields.CharField')(max_length=30)), ('website', self.gf('django.db.models.fields.CharField')(max_length=50)), ('name', self.gf('django.db.models.fields.CharField')(max_length=30)), ('postcode', self.gf('django.db.models.fields.CharField')(max_length=10)), ('address', self.gf('django.db.models.fields.CharField')(max_length=200)), (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), )) db.send_create_signal(u'venues', ['Venues']) # Deleting model 'Facility' db.delete_table(u'venues_facility') # Deleting model 'Venue' db.delete_table(u'venues_venue') # Removing M2M table for field facilities on 'Venue' db.delete_table(db.shorten_name(u'venues_venue_facilities')) models = { u'venues.facility': { 'Meta': {'object_name': 'Facility'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}) }, u'venues.venue': { 'Meta': {'object_name': 'Venue'}, 'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'facilities': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['venues.Facility']", 'symmetrical': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}), 'postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'town': ('django.db.models.fields.CharField', [], {'max_length': '30'}), 'website': ('django.db.models.fields.CharField', [], {'max_length': '50'}) } } complete_apps = ['venues']
The third issue of Pop To... explores our humble tavern, from the decor to the regulars. Pull up a chair, settle in for the night, and we'll bring you a tipple or two. Ft. Kirstie Fraser, Thomas McColl, Liz Wride, Hunter Gardner, Sam Bradley, Marc Gijsemans, James Tennent, Rebecca Wright, Max Sparber, and Lauren Pinnington. Buy all three issues for £12; the supermarket, the swimming pool, and the pub. UK only.
# !/usr/bin/python # -*- coding: utf-8 -*- from flask import current_app as app from flask_script import Command, Option from project.user.models.user import User from project.user.services.delete_user_service import DeleteUserService class DeleteUserCommand(Command): """Deletes user by giving username""" def __init__(self): super(DeleteUserCommand, self).__init__() self.username = None def get_options(self): return [ Option('-u', '--username', dest='username', default=self.username), ] def run(self, **kwargs): app.logger.info("Running {} with arguments {}".format(self.__class__.__name__, kwargs)) self.__dict__.update(**kwargs) # update self's with kwargs try: user = User._objects.get(username=self.username) DeleteUserService(user).call() app.logger.info("User \"{}\" was successfully deleted!".format(self.username)) except Exception as e: app.logger.error("Something went wrong :s. {}".format(e))
Journal "prestige" has nothing to do with research quality. Are you a #maker, #DIY-er #openscience enthusiast? Do you believe research tools be #opensource ? Apply now to make this happen! https://t.co/bMvXAgWmch.
""" Utility to check if results have changed in foreign APIs. """ import glob import datetime from os.path import realpath, join, dirname, exists, expanduser import hashlib import pickle import pandas as pd import numpy as np from pliers.stimuli import load_stims from pliers.transformers import get_transformer def hash_data(data, blocksize=65536): """" Hashes list of data, strings or data """ data = pickle.dumps(data) hasher = hashlib.sha1() hasher.update(data) return hasher.hexdigest() def check_updates(transformers, datastore=None, stimuli=None): """ Run transformers through a battery of stimuli, and check if output has changed. Store results in csv file for comparison. Args: transformers (list): A list of tuples of transformer names and dictionary of parameters to instantiate with (or empty dict). datastore (str): Filepath of CSV file with results. Stored in home dir by default. stimuli (list): List of stimuli file paths to extract from. If None, use test data. """ # Find datastore file datastore = datastore or expanduser('~/.pliers_updates') prior_data = pd.read_csv(datastore) if exists(datastore) else None # Load stimuli stimuli = stimuli or glob.glob( join(dirname(realpath(__file__)), '../tests/data/image/CC0/*')) stimuli = load_stims(stimuli) # Get transformers loaded_transformers = {get_transformer(name, **params): (name, params) for name, params in transformers} # Transform stimuli results = pd.DataFrame({'time_extracted': [datetime.datetime.now()]}) for trans in loaded_transformers.keys(): for stim in stimuli: if trans._stim_matches_input_types(stim): res = trans.transform(stim) try: # Add iterable res = [getattr(res, '_data', res.data) for r in res] except TypeError: res = getattr(res, '_data', res.data) res = hash_data(res) results["{}.{}".format(trans.__hash__(), stim.name)] = [res] # Check for mismatches mismatches = [] if prior_data is not None: last = prior_data[ prior_data.time_extracted == prior_data.time_extracted.max()]. \ iloc[0].drop('time_extracted') for label, value in results.iteritems(): old = last.get(label) new = value.values[0] if old is not None: if isinstance(new, str): if new != old: mismatches.append(label) elif not np.isclose(old, new): mismatches.append(label) results = prior_data.append(results) results.to_csv(datastore, index=False) # Get corresponding transformer name and parameters def get_trans(hash_tr): for obj, attr in loaded_transformers.items(): if str(obj.__hash__()) == hash_tr: return attr delta_t = {m.split('.')[0] for m in mismatches} delta_t = [get_trans(dt) for dt in delta_t] return {'transformers': delta_t, 'mismatches': mismatches}
The following guidelines are given to ICTM National/Regional Representatives and ICTM Study Group Chairs to facilitate the publication of their submissions in the Bulletin of the ICTM. Write your submissions (reports, announcements, calls for participation, etc.) in English, and italicize any foreign terms (and/or translate, if appropriate). Limit your submissions for the Announcements and Reports sections to a maximum of 1,500 words (including bibliographical citations). Please include at least one image to illustrate your submissions. You are welcome to send more images, but it may not be possible to include them all. However, all images will be uploaded to the ICTM Online Picture Gallery in due course. When formatting bibliographic citations, please adhere to either the Chicago Manual of Style or the Turabian Citation Guide. Do not submit scholarly articles to the Bulletin, but do consider submitting them to the Yearbook for Traditional Music. Only Chairs of ICTM Study Groups and ICTM National/Regional Representatives (e.g., appointed Liaison Officers and/or Chairs of National Committees) are eligible to submit materials to the Bulletin. Only submit images for which you hold copyright, or that are specifically protected by an attribution, non-commercial, share-alike license (such as Creative Commons BY-NC-SA 3.0) or equivalent. Submissions to the Bulletin will be generally distributed between three sections: Announcements, Reports, and Featured Publications by ICTM Members. The Announcements section of the Bulletin is reserved for upcoming events (i.e., occurring after the Bulletin’s publication date). Calls for participation, preliminary symposium programmes, and other news about future events belong here. All types of ICTM-related announcements are also welcome on the ICTM mailing list. This section is further divided in two subsections: Announcements – ICTM and Announcements – Related Organisations. As the Bulletin is primarily a means for communicating ICTM information, the inclusion of announcements from other organisations will only be considered if time and space allow it. Announcements having deadlines occurring before the Bulletin’s publication date will not be accepted. To maximize visibility online, calls for participation, preliminary programmes, and other announcements are not published in the Bulletin in full, but are abridged and linked to their respective full versions online. Please submit only the URL of the full announcement’s webpage, which the Bulletin will link to. Submissions for this section should be written in either the third person (‘The Study Group welcomes…’) or first person plural (‘We invite all participants...’). The Reports section of the Bulletin is reserved for information about events that have already taken place (and more specifically, taken place since the previous report). Reports on symposia from Study Groups, activity reports from National/Regional Representatives, minutes of past meetings, results from Study Group elections, and any other type of reporting of events which happened in the past belong here. This section is further divided in three subsections: Reports from ICTM National and Regional Representatives, Reports from ICTM Study Groups, and Other Reports. The last of these subsections is reserved for reports written by ICTM members serving on international bodies with which the Council has special relationships and/or interests, such as UNESCO and RILM. Reports from National and Regional Representatives should focus on events from the fields of ethnomusicology and/or ethnochoreology within a particular country or region that have occurred since the last report. They should not list the activities of a single institution or individual. Likewise, Study Group reports should relate the activities of the Study Group as a whole, not list members’ activities that are unrelated to the Study Group. National and Regional Representatives should submit their reports at least once every three years, but not more often than once a year. All reports included in the Bulletin have a byline (e.g., ‘by Jane Doe, Chair of National Committee’) and a photo of the author. Please include this photo with your report. Submissions for this section should be written in the third person (‘In April, the National Committee met …’). Reports from Study Groups should not include in their body text the minutes of Study Group meetings. Instead, minutes should be published on the Study Group’s website first, and then linked from the report. Reports should not include lists of publications in their body text. Instead, lists of publications should be published on the respective website of the Country/Region/Study Group first, and then linked from the report. Direct hyperlink to purchase/download page (preferred), or e-mail address of publisher/editor. For example, according to the guidelines listed above, if you would like to submit a call for participation, a country report, and two featured publications, please divide your submission accordingly (e.g., with page breaks, subtitles, or any other dividers). All submissions should be sent via e-mail to the Editor ([email protected]), in any of the following formats: Apple Pages, Microsoft Word, Rich Text Format (RTF), OpenOffice.org (ODT). 15 September for the October issue. Adopted: 18 August 2013. Latest revision: 17 October 2017.
""" IO related functions. :requires: PyFITS :requires: NumPy :author: Sami-Matias Niemi """ import datetime import pickle import os import numpy as np from pdb import set_trace as stop def cPickleDumpDictionary(dictionary, output, protocol=2): """ Dumps a dictionary of data to a cPickled file. :param dictionary: a Python data container does not have to be a dictionary :param output: name of the output file :return: None """ out = open(output, 'wb') pickle.dump(dictionary, out, protocol=protocol) out.close() def cPickleRead(ffile): """ Loads data from a pickled file. """ with open(ffile, 'rb') as f: try: inpick = pickle.load(f) except UnicodeDecodeError: inpick = pickle.load(f, encoding='latin1') f.close() return inpick def cPickleDump(data, output, protocol=2): """ Dumps data to a cPickled file. :param data: a Python data container :param output: name of the output file :return: None """ out = open(output, 'wb') pickle.dump(data, out, protocol=protocol) out.close() def convert_fig_to_eps(figname): """Converts a figure to .eps. Returns new file name.""" root = os.path.splitext(figname) epsname = '%s.eps' % root os.system('convert %s %s' % (figname, epsname)) return epsname def test(): data = ['a'] picklef = 'test_cpickle.pick' cPickleDump(data, picklef) data = cPickleRead(picklef) stop() if __name__ == '__main__': test()
Record 80’ is proof of how a “racing machine” with all of its technical features and linear design, can be transformed into a comfortable cruising yacht. The firm was called on to design the deck and furnishings, starting with the existing hull structure in kevlar and carbon, designed by Sandro Buzzi to break records in endurance races. Record 80’ is one of the few examples of composite technology applied to motor-powered vessels. Vismara has transferred its own design style and more precisely, experience with the latest custom builds with structural furnishings, to fit out this motor yacht for the highest performance, with self-supporting units or units able to withstand the acceleration force, 6 times that of gravity, which this vessel with its 70 knot speed is able to reach. The deck and the stern section have been modified to create living areas and a sun deck as well as a compartment to house the tender in the stern. The helm area has also been redesigned, without altering the technical and instrumentation part and therefore, integrating the new dinette/galley area with existing structures. In the same way that all of the interiors use the existing composite bulkheads to divide spaces, new furnishing elements are structurally integrated or bound to the hull and bulkheads. For styling to be correct and in line with the craft, exposed composite materials such as carbon or Kevlar were used, going back to concepts and techniques previously adopted on the Vismara 43 Open “Koala Hi-Tech”, whose “racer” version suggested the idea of using composite material as a furnishing elements. Lightness, simplicity and luminosity are characteristics of this project, achieved and enhanced with colours including metallic paint and technical upholstery.
# -*- coding: utf-8 -*- VERSION = "1.5.0" #should keep up with the counterwallet version it works with (for now at least) DB_VERSION = 22 #a db version increment will cause counterblockd to rebuild its database off of counterpartyd CAUGHT_UP = False #atomic state variable, set to True when counterpartyd AND counterblockd are caught up UNIT = 100000000 SUBDIR_ASSET_IMAGES = "asset_img" #goes under the data dir and stores retrieved asset images SUBDIR_FEED_IMAGES = "feed_img" #goes under the data dir and stores retrieved feed images MARKET_PRICE_DERIVE_NUM_POINTS = 8 #number of last trades over which to derive the market price (via WVAP) # FROM counterpartyd # NOTE: These constants must match those in counterpartyd/lib/config.py REGULAR_DUST_SIZE = 5430 MULTISIG_DUST_SIZE = 5430 * 2 ORDER_BTC_DUST_LIMIT_CUTOFF = MULTISIG_DUST_SIZE mongo_db = None #will be set on server init BTC = 'BTC' XCP = 'XCP' MAX_REORG_NUM_BLOCKS = 10 #max reorg we'd likely ever see ARMORY_UTXSVR_PORT_MAINNET = 6590 ARMORY_UTXSVR_PORT_TESTNET = 6591 QUOTE_ASSETS = ['BTC', 'XBTC', 'XCP'] # define the priority for quote asset MARKET_LIST_QUOTE_ASSETS = ['XCP', 'XBTC', 'BTC'] # define the order in the market list
After I finished Fellowship of the Ring I started reading the Two Towers. I also went out and bought 7 books of middle earth that I plan on reading after I am done with The return of the king which I am reading now. The Two towers is a perfect example of why I chose to read the Lord of the rings. There are so many differences from book to movie adaptations and it's always nice to know how much a director will change and for what reasons. In the book Sam Wise and Frodo only have small disagreements over Smeagol. Sam never left Frodo and they went on to Shelobs layer together. In the movie Gollum separates Sam and Frodo by accusing Sam of eating all of their lembas bread. Sam turns back to go home when he finds the bread on the stairs down below. He gets angry and returns to save his master from Shelob the spider. I think this was a wise choice on Peter Jackson's part. It really set up a story for loyalty and unselfishness. It made Sam all the more ordinary hero. We also have a character like Faramir. In the movie he is once tempted by the ring. He decides to take Frodo and Sam to Minas Tririth to see his Father. He has a change in heart when he hears Frodo pull his sword on his friend Sam who is trying to save him. Faramir realizes the ring can be held by no one and sends Sam and Frodo on their quest. In the book Faramir has the chance to see the ring yet he never desires to take it to his father. The book shows him as a smart and understanding person. His brother may have been the great warrior but Faramir shows his true qualities. I'll read them all when I get back! haha! Have so much to read and do already for my mission!
# -*- coding: utf-8 -*- # # codimension - graphics python two-way code editor and analyzer # Copyright (C) 2017 Sergey Satskiy <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # # The original code is taken from qutepart project and adopted for Codimension # See https://github.com/andreikop/qutepart # """Line numbers margin""" from ui.qt import QWidget, QPainter, Qt from qutepart.margins import MarginBase from utils.misc import extendInstance from utils.globals import GlobalData from utils.colorfont import getZoomedMarginFont class CDMLineNumberMargin(QWidget): """Line number area widget""" _LEFT_MARGIN = 5 _RIGHT_MARGIN = 3 def __init__(self, parent): QWidget.__init__(self, parent) extendInstance(self, MarginBase) MarginBase.__init__(self, parent, 'cdm_line_number_margin', 0) self.__bgColor = GlobalData().skin['marginPaper'] self.__fgColor = GlobalData().skin['marginColor'] self.__width = self.__calculateWidth() self.onTextZoomChanged() # The width needs to be re-calculated when the margin is drawn the # first time. The problem is that if the widget is not on the screen # then the font metrics are not calculated properly and thus the width # is not shown right. What I observed is an offset up to 2 pixels. self.__firstTime = True self._qpart.blockCountChanged.connect(self.__updateWidth) # Arguments: newBlockCount def __updateWidth(self, _=None): """Updates the margin width""" newWidth = self.__calculateWidth() if newWidth != self.__width: self.__width = newWidth self._qpart.updateViewport() def paintEvent(self, event): """Paints the margin""" if self.__firstTime: self.__updateWidth() self.__firstTime = False painter = QPainter(self) painter.fillRect(event.rect(), self.__bgColor) painter.setPen(self.__fgColor) block = self._qpart.firstVisibleBlock() blockNumber = block.blockNumber() top = int(self._qpart.blockBoundingGeometry(block). translated(self._qpart.contentOffset()).top()) bottom = top + int(self._qpart.blockBoundingRect(block).height()) boundingRect = self._qpart.blockBoundingRect(block) availableWidth = self.__width - self._RIGHT_MARGIN - self._LEFT_MARGIN # The margin font could be smaller than the main area font topShift = int((self._qpart.fontMetrics().height() - self.fontMetrics().height()) / 2) if topShift < 0: topShift = 0 availableHeight = self._qpart.fontMetrics().height() while block.isValid() and top <= event.rect().bottom(): if block.isVisible() and bottom >= event.rect().top(): number = str(blockNumber + 1) painter.drawText(self._LEFT_MARGIN, top + topShift, availableWidth, availableHeight, Qt.AlignRight, number) block = block.next() boundingRect = self._qpart.blockBoundingRect(block) top = bottom bottom = top + int(boundingRect.height()) blockNumber += 1 def __calculateWidth(self): """Calculates the margin width""" digits = len(str(max(1, self._qpart.blockCount()))) digitsWidth = self.fontMetrics().width('9') * digits return self._LEFT_MARGIN + digitsWidth + self._RIGHT_MARGIN def width(self): """Desired width. Includes text and margins""" return self.__width def setFont(self, font): """Overloaded to adjust the width if needed""" QWidget.setFont(self, font) self.__updateWidth() def setBackgroundColor(self, color): """Sets the new background color""" if self.__bgColor != color: self.__bgColor = color self.update() def setForegroundColor(self, color): """Sets the new foreground color""" if self.__fgColor != color: self.__fgColor = color self.update() def onTextZoomChanged(self): """Triggered when a zoom has been changed""" self.setFont(getZoomedMarginFont()) def onClose(self): """The editor is going to be closed""" self._qpart.blockCountChanged.disconnect(self.__updateWidth) # Release the editor reference self._qpart = None
Personal information is information that identifies you and that may be used to contact you online or offline. We collect personal information from you on a voluntary basis as described below. Our primary goals in collecting and using information is to create your account, provide Services to you, improve our Services, contact you, conduct research and create reports for internal use and for use with our partners. Contact information (such as name and email address). Your messages to the Products (such as chat logs, player support tickets, your post on our forum). You should be aware that any visitor of the game forums may read your postings on the forums. Any information that you may post to the game forum and\or the game chat, including your username or other account profile information, will be disclosed and available to all users and viewers of the forum or the chat, and will therefore no longer be private. We also cannot guarantee the security of such information that is disclosed in a private message to a forum user or a player inside the game, because in this case you transfer your information to a third party, and you aware that you provide such information at your own risk. We also may create a specific ID for you when you use the Products. Data from platforms that the game runs on and from payment systems, which is necessary to verify payments. To find out more about exact data that we receive from such partners please visit the websites of the partners that you use to make payments in the game. Data for advertising and analytics purposes, also crash reports (including game account ID's) transmitted to Google's service Firebase Crashlytics. We may use the email address you provide to us to contact you with newsletters, marketing, or promotional materials. You can easily opt out of receiving these communications from us by following the unsubscribe link provided in any email we send. 2.3. To analyze, profile, and segment. We provide some social features placed in our Products, such as game chat, communication on our forums and some others. Other players and users may, for example, see your profile data, some in-game activities and read the messages you post in the game and game forums. We have partners to perform services for us which allow us to provide better products for you. These partners process your data according to their privacy policies and AIGRIND’s instructions for providing services, such as hosting, player support, payments processing, advertising, analytics and fraud prevention. We may also disclose your data to comply with the law or to protect the rights, property or safety of us, our players or others.. The Products include features from our partners, such as social media interaction tools and in-game advertising. These partners may access your data and operate under their own privacy policies. We encourage you to check their privacy policies to learn more about their data processing practices. Below you can find a list of our analytics and ad serving partners. For more information about their privacy practices regarding analytics and targeted advertising, please visit the links below. We may also share your information in connection with any merger, sale of our assets, or financing or acquisition of all or a portion of our business to another company. Our Products are global by nature and your data can therefore be transferred to anywhere in the world. Different countries may have different data protection laws and/or privacy laws than your own country. We take steps to ensure adequate safeguards are in place to protect your data as explained in this Policy. However, by downloading or using our Products, you consent to having your information transferred and processed as described herein. The security, integrity and confidentiality of your information are extremely important to us. We have implemented technical, administrative and physical security measures that are designed to protect your information from unauthorized access, disclosure, use and modification. From time to time, we review our security procedures to consider appropriate new technology and methods. You can opt-out of interest-based advertising on mobile applications by checking the privacy settings of your Android or iOS device. For more information on ad serving companies and how to opt out of their targeted advertising, please visit http://www.networkadvertising.org/choices/. You have the right to see what personally identifiable information we hold about you if you request it. If your request is particularly complex or requires detailed searching of our records, there may be a cost to you in order for us to provide you with this information. You also have the right to correct your data, have your data deleted, object how we use or share your data. You can always withdraw your consent. We will respond to all requests within a reasonable timeframe. If you wish to review, delete and/or revise the personal information we have stored about you, you may send your request through the game client under your account from ingame menu "My Profile - Private Data", or contact us at [email protected] using the e-mail which is connected to your game account. We will respond to your request within thirty (30) days. Note that if you ask us to remove your personal data, we will retain your data as necessary for our legitimate business interests, such as to comply with our legal obligations, resolve disputes, enforce our agreements, and for accounting purposes. We do not knowingly collect or solicit personal data about or direct or target interest based advertising to anyone under the age of 13 or knowingly allow such persons to use our Products. If you are under 13, please do not send any data about yourself to us, including your name, address, telephone number, or email address. No one under the age of 13 may provide any personal data. If we learn that we have collected personal data about a child under age 13, we will delete that data as quickly as possible. If you are between the age of 13 and 16, please obtain your parents' permission prior to registering in our Products or providing us with any personal information.
"""A conversion module for googletrans""" import json import re def build_params(query, src, dest, token, override): params = { 'client': 'webapp', 'sl': src, 'tl': dest, 'hl': dest, 'dt': ['at', 'bd', 'ex', 'ld', 'md', 'qca', 'rw', 'rm', 'ss', 't'], 'ie': 'UTF-8', 'oe': 'UTF-8', 'otf': 1, 'ssel': 0, 'tsel': 0, 'tk': token, 'q': query, } if override is not None: for key, value in get_items(override): params[key] = value return params def legacy_format_json(original): # save state states = [] text = original # save position for double-quoted texts for i, pos in enumerate(re.finditer('"', text)): # pos.start() is a double-quote p = pos.start() + 1 if i % 2 == 0: nxt = text.find('"', p) states.append((p, text[p:nxt])) # replace all wiered characters in text while text.find(',,') > -1: text = text.replace(',,', ',null,') while text.find('[,') > -1: text = text.replace('[,', '[null,') # recover state for i, pos in enumerate(re.finditer('"', text)): p = pos.start() + 1 if i % 2 == 0: j = int(i / 2) nxt = text.find('"', p) # replacing a portion of a string # use slicing to extract those parts of the original string to be kept text = text[:p] + states[j][1] + text[nxt:] converted = json.loads(text) return converted def get_items(dict_object): for key in dict_object: yield key, dict_object[key] def format_json(original): try: converted = json.loads(original) except ValueError: converted = legacy_format_json(original) return converted def rshift(val, n): """python port for '>>>'(right shift with padding) """ return (val % 0x100000000) >> n
FSPG is run entirely by volunteers. We are dependent on a small and dedicated team of helpers and we welcome new members to this group. Volunteers can be any age from children upwards and there are a wide variety of ways to contribute, from half an hour now and again, to a regular commitment. Organising events with a group of local residents is fun! It’s a great way to meet people from the neighbourhood and you get the pleasure of seeing people enjoying something you’ve participated in. If you don’t have much time but would like to help, how about delivering the newsletter to your road four times a year? Kids like helping with this one as well! Are you good at putting up gazebos, pouring teas, marshalling crowds, clearing up rubbish, selling light sticks, making cakes, serving food, running games, selling raffle tickets? If you are, and have a couple of hours to spare, please let us know. Our work encompasses a whole range of activities including: updating and maintaining the website, Facebook page, membership lists and email accounts; writing newsletters; organising the printing and delivery of newsletters; working with the council officers and councillors, liaising with the charity commission and other agencies. The list goes on but if you think you would like to work with the committee, please let us know. If you would like to get involved in any of the above click here to go the Volunteering Form. If you need to do Community Service for your Duke of Edinburgh Award please email us on [email protected]. There are many ways we can together to fulfill this requirement.
# ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- import base64 import hashlib import hmac import sys from io import (SEEK_SET) from dateutil.tz import tzutc from ._error import ( _ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM, _ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM, ) from .models import ( _unicode_type, ) if sys.version_info < (3,): def _str(value): if isinstance(value, unicode): return value.encode('utf-8') return str(value) else: _str = str def _to_str(value): return _str(value) if value is not None else None def _int_to_str(value): return str(int(value)) if value is not None else None def _bool_to_str(value): if value is None: return None if isinstance(value, bool): if value: return 'true' else: return 'false' return str(value) def _to_utc_datetime(value): return value.strftime('%Y-%m-%dT%H:%M:%SZ') def _datetime_to_utc_string(value): # Azure expects the date value passed in to be UTC. # Azure will always return values as UTC. # If a date is passed in without timezone info, it is assumed to be UTC. if value is None: return None if value.tzinfo: value = value.astimezone(tzutc()) return value.strftime('%a, %d %b %Y %H:%M:%S GMT') def _encode_base64(data): if isinstance(data, _unicode_type): data = data.encode('utf-8') encoded = base64.b64encode(data) return encoded.decode('utf-8') def _decode_base64_to_bytes(data): if isinstance(data, _unicode_type): data = data.encode('utf-8') return base64.b64decode(data) def _decode_base64_to_text(data): decoded_bytes = _decode_base64_to_bytes(data) return decoded_bytes.decode('utf-8') def _sign_string(key, string_to_sign, key_is_base64=True): if key_is_base64: key = _decode_base64_to_bytes(key) else: if isinstance(key, _unicode_type): key = key.encode('utf-8') if isinstance(string_to_sign, _unicode_type): string_to_sign = string_to_sign.encode('utf-8') signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) digest = signed_hmac_sha256.digest() encoded_digest = _encode_base64(digest) return encoded_digest def _get_content_md5(data): md5 = hashlib.md5() if isinstance(data, bytes): md5.update(data) elif hasattr(data, 'read'): pos = 0 try: pos = data.tell() except: pass for chunk in iter(lambda: data.read(4096), b""): md5.update(chunk) try: data.seek(pos, SEEK_SET) except (AttributeError, IOError): raise ValueError(_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM.format('data')) else: raise ValueError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format('data')) return base64.b64encode(md5.digest()).decode('utf-8') def _lower(text): return text.lower()
Sheraton Reston Hotel is rated 4.0 out of 5 by 460. Rated 5 out of 5 by jmiller from Business The front desk staff went above and beyond to assist me. I was having problems with my phone service and couldn&apos;t reach an Uber in time for my flight. The gentleman behind the desk was super helpful to ensure I made it to the airport. I am very thankful for his kindness. Rated 1 out of 5 by HabtamuBelete from Bad experience Didn&apos; t get the room type I reserved and waited for morethan 2 hours in the middle of the night waiting for a roller bed employee professionalism. Bad customer service! Rated 5 out of 5 by NoName from Good The staff at the hotel are among the best that I have eve r encounters. Rated 5 out of 5 by DaveP from Great Sheraton Great hotel and staff. Really took care of us over the weekend and recognized the Titanium status unlike most Bonvoy properties over the last few months. Thanks again Sheraton Reston! Rated 5 out of 5 by SatisfiedCustomer from Lovely mix of business and leisure A breathe of fresh air. The staff here are so attentive and they took care of my every need. I travel for business alot and it is a nice surprise to stay in a location that makes you feel like home. Thanks!! Rated 5 out of 5 by Jorge from Good stay It was a good stay. Rooms are good and staff is very friendly and kind.
""" BenchExec is a framework for reliable benchmarking. This file is part of BenchExec. Copyright (C) 2007-2015 Dirk Beyer All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ # prepare for Python 3 from __future__ import absolute_import, division, print_function, unicode_literals import argparse import glob import logging import multiprocessing import os import resource import signal import subprocess import sys import threading import time from . import __version__ from . import util as util from .cgroups import * from . import oomhandler from benchexec import systeminfo read_file = util.read_file write_file = util.write_file _WALLTIME_LIMIT_DEFAULT_OVERHEAD = 30 # seconds more than cputime limit _ULIMIT_DEFAULT_OVERHEAD = 30 # seconds after cgroups cputime limit _BYTE_FACTOR = 1000 # byte in kilobyte try: from subprocess import DEVNULL except ImportError: DEVNULL = open(os.devnull, 'rb') def main(argv=None): """ A simple command-line interface for the runexecutor module of BenchExec. """ if argv is None: argv = sys.argv # parse options parser = argparse.ArgumentParser( fromfile_prefix_chars='@', description= """Execute a command with resource limits and measurements. Command-line parameters can additionally be read from a file if file name prefixed with '@' is given as argument. Part of BenchExec: https://github.com/dbeyer/benchexec/""") parser.add_argument("args", nargs="+", metavar="ARG", help='command line to run (prefix with "--" to ensure all arguments are treated correctly)') parser.add_argument("--input", metavar="FILE", help="name of file used as stdin for command (default: /dev/null; use - for stdin passthrough)") parser.add_argument("--output", default="output.log", metavar="FILE", help="name of file where command output is written") parser.add_argument("--maxOutputSize", type=int, metavar="BYTES", help="shrink output file to approximately this size if necessary (by removing lines from the middle of the output)") parser.add_argument("--memlimit", type=int, metavar="BYTES", help="memory limit in bytes") parser.add_argument("--timelimit", type=int, metavar="SECONDS", help="CPU time limit in seconds") parser.add_argument("--softtimelimit", type=int, metavar="SECONDS", help='"soft" CPU time limit in seconds (command will be send the TERM signal at this time)') parser.add_argument("--walltimelimit", type=int, metavar="SECONDS", help='wall time limit in seconds (default is CPU time limit plus a few seconds)') parser.add_argument("--cores", type=util.parse_int_list, metavar="N,M-K", help="list of CPU cores to use") parser.add_argument("--memoryNodes", type=util.parse_int_list, metavar="N,M-K", help="list of memory nodes to use") parser.add_argument("--dir", metavar="DIR", help="working directory for executing the command (default is current directory)") parser.add_argument("--version", action="version", version="%(prog)s " + __version__) verbosity = parser.add_mutually_exclusive_group() verbosity.add_argument("--debug", action="store_true", help="show debug output") verbosity.add_argument("--quiet", action="store_true", help="show only warnings") options = parser.parse_args(argv[1:]) # For integrating into some benchmarking frameworks, # there is a DEPRECATED special mode # where the first and only command-line argument is a serialized dict # with additional options env = {} if len(options.args) == 1 and options.args[0].startswith("{"): data = eval(options.args[0]) options.args = data["args"] env = data.get("env", {}) options.debug = data.get("debug", options.debug) if "maxLogfileSize" in data: options.maxOutputSize = data["maxLogfileSize"] * _BYTE_FACTOR * _BYTE_FACTOR # MB to bytes # setup logging logLevel = logging.INFO if options.debug: logLevel = logging.DEBUG elif options.quiet: logLevel = logging.WARNING logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s", level=logLevel) if options.input == '-': stdin = sys.stdin elif options.input is not None: if options.input == options.output: sys.exit("Input and output files cannot be the same.") try: stdin = open(options.input, 'rt') except IOError as e: sys.exit(e) else: stdin = None executor = RunExecutor() # ensure that process gets killed on interrupt/kill signal def signal_handler_kill(signum, frame): executor.stop() signal.signal(signal.SIGTERM, signal_handler_kill) signal.signal(signal.SIGINT, signal_handler_kill) logging.info('Starting command ' + ' '.join(options.args)) logging.info('Writing output to ' + options.output) # actual run execution try: result = executor.execute_run( args=options.args, output_filename=options.output, stdin=stdin, hardtimelimit=options.timelimit, softtimelimit=options.softtimelimit, walltimelimit=options.walltimelimit, cores=options.cores, memlimit=options.memlimit, memory_nodes=options.memoryNodes, environments=env, workingDir=options.dir, maxLogfileSize=options.maxOutputSize) finally: if stdin: stdin.close() # exit_code is a special number: # It is a 16bit int of which the lowest 7 bit are the signal number, # and the high byte is the real exit code of the process (here 0). exit_code = result['exitcode'] return_value = exit_code // 256 exitSignal = exit_code % 128 def print_optional_result(key): if key in result: # avoid unicode literals such that the string can be parsed by Python 3.2 print(key + "=" + str(result[key]).replace("'u", '')) # output results print_optional_result('terminationreason') print("exitcode=" + str(exit_code)) if (exitSignal == 0) or (return_value != 0): print("returnvalue=" + str(return_value)) if exitSignal != 0 : print("exitsignal=" + str(exitSignal)) print("walltime=" + str(result['walltime']) + "s") print("cputime=" + str(result['cputime']) + "s") print_optional_result('memory') if 'energy' in result: for key, value in result['energy'].items(): print("energy-{0}={1}".format(key, value)) class RunExecutor(): def __init__(self): self.PROCESS_KILLED = False self.SUB_PROCESSES_LOCK = threading.Lock() # needed, because we kill the process asynchronous self.SUB_PROCESSES = set() self._termination_reason = None self._init_cgroups() def _init_cgroups(self): """ This function initializes the cgroups for the limitations and measurements. """ self.cgroups = find_my_cgroups() self.cgroups.require_subsystem(CPUACCT) if CPUACCT not in self.cgroups: logging.warning('Without cpuacct cgroups, cputime measurement and limit might not work correctly if subprocesses are started.') self.cgroups.require_subsystem(FREEZER) if FREEZER not in self.cgroups: logging.warning('Cannot reliably kill sub-processes without freezer cgroup.') self.cgroups.require_subsystem(MEMORY) if MEMORY not in self.cgroups: logging.warning('Cannot measure memory consumption without memory cgroup.') self.cgroups.require_subsystem(CPUSET) self.cpus = None # to indicate that we cannot limit cores self.memory_nodes = None # to indicate that we cannot limit cores if CPUSET in self.cgroups: # Read available cpus/memory nodes: try: self.cpus = util.parse_int_list(self.cgroups.get_value(CPUSET, 'cpus')) except ValueError as e: logging.warning("Could not read available CPU cores from kernel: {0}".format(e.strerror)) logging.debug("List of available CPU cores is {0}.".format(self.cpus)) try: self.memory_nodes = util.parse_int_list(self.cgroups.get_value(CPUSET, 'mems')) except ValueError as e: logging.warning("Could not read available memory nodes from kernel: {0}".format(e.strerror)) logging.debug("List of available memory nodes is {0}.".format(self.memory_nodes)) def _setup_cgroups(self, args, my_cpus, memlimit, memory_nodes): """ This method creates the CGroups for the following execution. @param args: the command line to run, used only for logging @param my_cpus: None or a list of the CPU cores to use @param memlimit: None or memory limit in bytes @param memory_nodes: None or a list of memory nodes of a NUMA system to use @return cgroups: a map of all the necessary cgroups for the following execution. Please add the process of the following execution to all those cgroups! """ # Setup cgroups, need a single call to create_cgroup() for all subsystems subsystems = [CPUACCT, FREEZER, MEMORY] if my_cpus is not None: subsystems.append(CPUSET) subsystems = [s for s in subsystems if s in self.cgroups] cgroups = self.cgroups.create_fresh_child_cgroup(*subsystems) logging.debug("Executing {0} in cgroups {1}.".format(args, cgroups)) # Setup cpuset cgroup if necessary to limit the CPU cores/memory nodes to be used. if my_cpus is not None: my_cpus_str = ','.join(map(str, my_cpus)) cgroups.set_value(CPUSET, 'cpus', my_cpus_str) my_cpus_str = cgroups.get_value(CPUSET, 'cpus') logging.debug('Executing {0} with cpu cores [{1}].'.format(args, my_cpus_str)) if memory_nodes is not None: cgroups.set_value(CPUSET, 'mems', ','.join(map(str, memory_nodes))) memory_nodesStr = cgroups.get_value(CPUSET, 'mems') logging.debug('Executing {0} with memory nodes [{1}].'.format(args, memory_nodesStr)) # Setup memory limit if memlimit is not None: limit = 'limit_in_bytes' cgroups.set_value(MEMORY, limit, memlimit) swap_limit = 'memsw.limit_in_bytes' # We need swap limit because otherwise the kernel just starts swapping # out our process if the limit is reached. # Some kernels might not have this feature, # which is ok if there is actually no swap. if not cgroups.has_value(MEMORY, swap_limit): if systeminfo.has_swap(): sys.exit('Kernel misses feature for accounting swap memory, but machine has swap. Please set swapaccount=1 on your kernel command line or disable swap with "sudo swapoff -a".') else: try: cgroups.set_value(MEMORY, swap_limit, memlimit) except IOError as e: if e.errno == 95: # kernel responds with error 95 (operation unsupported) if this is disabled sys.exit('Memory limit specified, but kernel does not allow limiting swap memory. Please set swapaccount=1 on your kernel command line or disable swap with "sudo swapoff -a".') raise e memlimit = cgroups.get_value(MEMORY, limit) logging.debug('Executing {0} with memory limit {1} bytes.'.format(args, memlimit)) if MEMORY in cgroups \ and not cgroups.has_value(MEMORY, 'memsw.max_usage_in_bytes') \ and systeminfo.has_swap(): logging.warning('Kernel misses feature for accounting swap memory, but machine has swap. Memory usage may be measured inaccurately. Please set swapaccount=1 on your kernel command line or disable swap with "sudo swapoff -a".') if MEMORY in cgroups: try: # Note that this disables swapping completely according to # https://www.kernel.org/doc/Documentation/cgroups/memory.txt # (unlike setting the global swappiness to 0). # Our process might get killed because of this. cgroups.set_value(MEMORY, 'swappiness', '0') except IOError as e: logging.warning('Could not disable swapping for benchmarked process: ' + str(e)) return cgroups def _execute(self, args, output_filename, stdin, cgroups, hardtimelimit, softtimelimit, walltimelimit, myCpuCount, memlimit, environments, workingDir): """ This method executes the command line and waits for the termination of it. """ def preSubprocess(): os.setpgrp() # make subprocess to group-leader os.nice(5) # increase niceness of subprocess if hardtimelimit is not None: # Also use ulimit for CPU time limit as a fallback if cgroups don't work. if CPUACCT in cgroups: # Use a slightly higher limit to ensure cgroups get used # (otherwise we cannot detect the timeout properly). ulimit = hardtimelimit + _ULIMIT_DEFAULT_OVERHEAD else: ulimit = hardtimelimit resource.setrlimit(resource.RLIMIT_CPU, (ulimit, ulimit)) # put us into the cgroup(s) pid = os.getpid() # On some systems, cgrulesengd would move our process into other cgroups. # We disable this behavior via libcgroup if available. # Unfortunately, logging/printing does not seem to work here. from ctypes import cdll try: libcgroup = cdll.LoadLibrary('libcgroup.so.1') failure = libcgroup.cgroup_init() if failure: pass #print('Could not initialize libcgroup, error {}'.format(success)) else: CGROUP_DAEMON_UNCHANGE_CHILDREN = 0x1 failure = libcgroup.cgroup_register_unchanged_process(pid, CGROUP_DAEMON_UNCHANGE_CHILDREN) if failure: pass #print('Could not register process to cgrulesndg, error {}. Probably the daemon will mess up our cgroups.'.format(success)) except OSError: pass #print('libcgroup is not available: {}'.format(e.strerror)) cgroups.add_task(pid) # Setup environment: # If keepEnv is set, start from a fresh environment, otherwise with the current one. # keepEnv specifies variables to copy from the current environment, # newEnv specifies variables to set to a new value, # additionalEnv specifies variables where some value should be appended, and # clearEnv specifies variables to delete. runningEnv = os.environ.copy() if not environments.get("keepEnv", {}) else {} for key, value in environments.get("keepEnv", {}).items(): if key in os.environ: runningEnv[key] = os.environ[key] for key, value in environments.get("newEnv", {}).items(): runningEnv[key] = value for key, value in environments.get("additionalEnv", {}).items(): runningEnv[key] = os.environ.get(key, "") + value for key in environments.get("clearEnv", {}).items(): runningEnv.pop(key, None) logging.debug("Using additional environment {0}.".format(str(environments))) # write command line into outputFile try: outputFile = open(output_filename, 'w') # override existing file except IOError as e: sys.exit(e) outputFile.write(' '.join(args) + '\n\n\n' + '-' * 80 + '\n\n\n') outputFile.flush() timelimitThread = None oomThread = None energyBefore = util.measure_energy() walltime_before = time.time() p = None try: p = subprocess.Popen(args, stdin=stdin, stdout=outputFile, stderr=outputFile, env=runningEnv, cwd=workingDir, close_fds=True, preexec_fn=preSubprocess) except OSError as e: logging.critical("OSError {0} while starting '{1}' in '{2}': {3}." .format(e.errno, args[0], workingDir or '.', e.strerror)) return (0, 0, 0, None) try: with self.SUB_PROCESSES_LOCK: self.SUB_PROCESSES.add(p) # hard time limit with cgroups is optional (additionally enforce by ulimit) cgroup_hardtimelimit = hardtimelimit if CPUACCT in cgroups else None if any([cgroup_hardtimelimit, softtimelimit, walltimelimit]): # Start a timer to periodically check timelimit timelimitThread = _TimelimitThread(cgroups, cgroup_hardtimelimit, softtimelimit, walltimelimit, p, myCpuCount, self._set_termination_reason) timelimitThread.start() if memlimit is not None: try: oomThread = oomhandler.KillProcessOnOomThread(cgroups, p, self._set_termination_reason) oomThread.start() except OSError as e: logging.critical("OSError {0} during setup of OomEventListenerThread: {1}.".format(e.errno, e.strerror)) try: logging.debug("waiting for: pid:{0}".format(p.pid)) pid, returnvalue, ru_child = os.wait4(p.pid, 0) logging.debug("waiting finished: pid:{0}, retVal:{1}".format(pid, returnvalue)) except OSError as e: returnvalue = 0 ru_child = None if self.PROCESS_KILLED: # OSError 4 (interrupted system call) seems always to happen if we killed the process ourselves after Ctrl+C was pressed logging.debug("OSError {0} while waiting for termination of {1} ({2}): {3}.".format(e.errno, args[0], p.pid, e.strerror)) else: logging.critical("OSError {0} while waiting for termination of {1} ({2}): {3}.".format(e.errno, args[0], p.pid, e.strerror)) finally: walltime_after = time.time() with self.SUB_PROCESSES_LOCK: self.SUB_PROCESSES.discard(p) if timelimitThread: timelimitThread.cancel() if oomThread: oomThread.cancel() outputFile.close() # normally subprocess closes file, we do this again logging.debug("size of logfile '{0}': {1}".format(output_filename, str(os.path.getsize(output_filename)))) # kill all remaining processes if some managed to survive cgroups.kill_all_tasks() energy = util.measure_energy(energyBefore) walltime = walltime_after - walltime_before cputime = ru_child.ru_utime + ru_child.ru_stime if ru_child else 0 return (returnvalue, walltime, cputime, energy) def _get_exact_measures(self, cgroups, returnvalue, walltime, cputime): """ This method tries to extract better measures from cgroups. """ cputime2 = None if CPUACCT in cgroups: # We want to read the value from the cgroup. # The documentation warns about outdated values. # So we read twice with 0.1s time difference, # and continue reading as long as the values differ. # This has never happened except when interrupting the script with Ctrl+C, # but just try to be on the safe side here. tmp = cgroups.read_cputime() tmp2 = None while tmp != tmp2: time.sleep(0.1) tmp2 = tmp tmp = cgroups.read_cputime() cputime2 = tmp memUsage = None if MEMORY in cgroups: # This measurement reads the maximum number of bytes of RAM+Swap the process used. # For more details, c.f. the kernel documentation: # https://www.kernel.org/doc/Documentation/cgroups/memory.txt memUsageFile = 'memsw.max_usage_in_bytes' if not cgroups.has_value(MEMORY, memUsageFile): memUsageFile = 'max_usage_in_bytes' if not cgroups.has_value(MEMORY, memUsageFile): logging.warning('Memory-usage is not available due to missing files.') else: try: memUsage = int(cgroups.get_value(MEMORY, memUsageFile)) except IOError as e: if e.errno == 95: # kernel responds with error 95 (operation unsupported) if this is disabled logging.critical("Kernel does not track swap memory usage, cannot measure memory usage. " + "Please set swapaccount=1 on your kernel command line.") else: raise e logging.debug('Run exited with code {0}, walltime={1}, cputime={2}, cgroup-cputime={3}, memory={4}' .format(returnvalue, walltime, cputime, cputime2, memUsage)) # Usually cputime2 (measured with cgroups) seems to be 0.01s greater # than cputime (measured with ulimit). # Furthermore, cputime might miss some subprocesses, # therefore we expect cputime2 to be always greater (and more correct). # However, sometimes cputime is a little bit bigger than cputime2. # For small values, this is probably because cputime counts since fork, # whereas cputime2 counts only after cgroups.add_task() # (so overhead from runexecutor is correctly excluded in cputime2). # For large values, a difference may also indicate a problem with cgroups, # for example another process moving our benchmarked process between cgroups, # thus we warn if the difference is substantial and take the larger ulimit value. if cputime2 is not None: if cputime > 0.5 and (cputime * 0.95) > cputime2: logging.warning('Cputime measured by wait was {0}, cputime measured by cgroup was only {1}, perhaps measurement is flawed.'.format(cputime, cputime2)) else: cputime = cputime2 return (cputime, memUsage) def execute_run(self, args, output_filename, stdin=None, hardtimelimit=None, softtimelimit=None, walltimelimit=None, cores=None, memlimit=None, memory_nodes=None, environments={}, workingDir=None, maxLogfileSize=None): """ This function executes a given command with resource limits, and writes the output to a file. @param args: the command line to run @param output_filename: the file where the output should be written to @param stdin: What to uses as stdin for the process (None: /dev/null, a file descriptor, or a file object) @param hardtimelimit: None or the CPU time in seconds after which the tool is forcefully killed. @param softtimelimit: None or the CPU time in seconds after which the tool is sent a kill signal. @param walltimelimit: None or the wall time in seconds after which the tool is forcefully killed (default: hardtimelimit + a few seconds) @param cores: None or a list of the CPU cores to use @param memlimit: None or memory limit in bytes @param memory_nodes: None or a list of memory nodes in a NUMA system to use @param environments: special environments for running the command @param workingDir: None or a directory which the execution should use as working directory @param maxLogfileSize: None or a number of bytes to which the output of the tool should be truncated approximately if there is too much output. @return: a tuple with walltime in seconds, cputime in seconds, memory usage in bytes, returnvalue, and process output """ if stdin == subprocess.PIPE: sys.exit('Illegal value subprocess.PIPE for stdin') elif stdin is None: stdin = DEVNULL if hardtimelimit is not None: if hardtimelimit <= 0: sys.exit("Invalid time limit {0}.".format(hardtimelimit)) if softtimelimit is not None: if softtimelimit <= 0: sys.exit("Invalid soft time limit {0}.".format(softtimelimit)) if hardtimelimit and (softtimelimit > hardtimelimit): sys.exit("Soft time limit cannot be larger than the hard time limit.") if not CPUACCT in self.cgroups: sys.exit("Soft time limit cannot be specified without cpuacct cgroup.") if walltimelimit is None: if hardtimelimit is not None: walltimelimit = hardtimelimit + _WALLTIME_LIMIT_DEFAULT_OVERHEAD elif softtimelimit is not None: walltimelimit = softtimelimit + _WALLTIME_LIMIT_DEFAULT_OVERHEAD else: if walltimelimit <= 0: sys.exit("Invalid wall time limit {0}.".format(walltimelimit)) if cores is not None: if self.cpus is None: sys.exit("Cannot limit CPU cores without cpuset cgroup.") coreCount = len(cores) if coreCount == 0: sys.exit("Cannot execute run without any CPU core.") if not set(cores).issubset(self.cpus): sys.exit("Cores {0} are not allowed to be used".format(list(set(cores).difference(self.cpus)))) else: try: coreCount = multiprocessing.cpu_count() except NotImplementedError: coreCount = 1 if memlimit is not None: if memlimit <= 0: sys.exit("Invalid memory limit {0}.".format(memlimit)) if not MEMORY in self.cgroups: sys.exit("Memory limit specified, but cannot be implemented without cgroup support.") if memory_nodes is not None: if self.memory_nodes is None: sys.exit("Cannot restrict memory nodes without cpuset cgroup.") if len(memory_nodes) == 0: sys.exit("Cannot execute run without any memory node.") if not set(memory_nodes).issubset(self.memory_nodes): sys.exit("Memory nodes {0} are not allowed to be used".format(list(set(memory_nodes).difference(self.memory_nodes)))) if workingDir: if not os.path.exists(workingDir): sys.exit("Working directory {0} does not exist.".format(workingDir)) if not os.path.isdir(workingDir): sys.exit("Working directory {0} is not a directory.".format(workingDir)) if not os.access(workingDir, os.X_OK): sys.exit("Permission denied for working directory {0}.".format(workingDir)) self._termination_reason = None logging.debug("execute_run: setting up Cgroups.") cgroups = self._setup_cgroups(args, cores, memlimit, memory_nodes) throttle_check = _CPUThrottleCheck(cores) swap_check = _SwapCheck() try: logging.debug("execute_run: executing tool.") (exitcode, walltime, cputime, energy) = \ self._execute(args, output_filename, stdin, cgroups, hardtimelimit, softtimelimit, walltimelimit, coreCount, memlimit, environments, workingDir) logging.debug("execute_run: getting exact measures.") (cputime, memUsage) = self._get_exact_measures(cgroups, exitcode, walltime, cputime) finally: # always try to cleanup cgroups, even on sys.exit() logging.debug("execute_run: cleaning up CGroups.") cgroups.remove() # if exception is thrown, skip the rest, otherwise perform normally if throttle_check.has_throttled(): logging.warning('CPU throttled itself during benchmarking due to overheating. Benchmark results are unreliable!') if swap_check.has_swapped(): logging.warning('System has swapped during benchmarking. Benchmark results are unreliable!') _reduce_file_size_if_necessary(output_filename, maxLogfileSize) if exitcode not in [0,1]: logging.debug("execute_run: analysing output for crash-info.") _get_debug_output_after_crash(output_filename) logging.debug("execute_run: Run execution returns with code {0}, walltime={1}, cputime={2}, memory={3}, energy={4}" .format(exitcode, walltime, cputime, memUsage, energy)) result = {'walltime': walltime, 'cputime': cputime, 'exitcode': exitcode, } if memUsage: result['memory'] = memUsage if self._termination_reason: result['terminationreason'] = self._termination_reason if energy: result['energy'] = energy return result def _set_termination_reason(self, reason): self._termination_reason = reason def stop(self): self._set_termination_reason('killed') self.PROCESS_KILLED = True with self.SUB_PROCESSES_LOCK: for process in self.SUB_PROCESSES: logging.warning('Killing process {0} forcefully.'.format(process.pid)) util.kill_process(process.pid) def _reduce_file_size_if_necessary(fileName, maxSize): """ This function shrinks a file. We remove only the middle part of a file, the file-start and the file-end remain unchanged. """ if maxSize is None: return # disabled, nothing to do fileSize = os.path.getsize(fileName) if fileSize < (maxSize + 500): return # not necessary logging.warning("Logfile '{0}' is too big (size {1} bytes). Removing lines.".format(fileName, fileSize)) # We partition the file into 3 parts: # A) start: maxSize/2 bytes we want to keep # B) middle: part we want to remove # C) end: maxSize/2 bytes we want to keep # Trick taken from StackOverflow: # https://stackoverflow.com/questions/2329417/fastest-way-to-delete-a-line-from-large-file-in-python # We open the file twice at the same time, once for reading and once for writing. # We position the one file object at the beginning of B # and the other at the beginning of C. # Then we copy the content of C into B, overwriting what is there. # Afterwards we truncate the file after A+C. with open(fileName, 'r+b') as outputFile: with open(fileName, 'rb') as inputFile: # Position outputFile between A and B outputFile.seek(maxSize // 2) outputFile.readline() # jump to end of current line so that we truncate at line boundaries if outputFile.tell() == fileSize: # readline jumped to end of file because of a long line return outputFile.write("\n\n\nWARNING: YOUR LOGFILE WAS TOO LONG, SOME LINES IN THE MIDDLE WERE REMOVED.\n\n\n\n".encode()) # Position inputFile between B and C inputFile.seek(-maxSize // 2, os.SEEK_END) # jump to beginning of second part we want to keep from end of file inputFile.readline() # jump to end of current line so that we truncate at line boundaries # Copy C over B _copy_all_lines_from_to(inputFile, outputFile) outputFile.truncate() def _copy_all_lines_from_to(inputFile, outputFile): """ Copy all lines from an input file object to an output file object. """ currentLine = inputFile.readline() while currentLine: outputFile.write(currentLine) currentLine = inputFile.readline() def _get_debug_output_after_crash(output_filename): """ Segmentation faults and some memory failures reference a file with more information (hs_err_pid_*). We append this file to the log. The format that we expect is a line "# An error report file with more information is saved as:" and the file name of the dump file on the next line. """ foundDumpFile = False with open(output_filename, 'r+') as outputFile: for line in outputFile: if foundDumpFile: try: dumpFileName = line.strip(' #\n') outputFile.seek(0, os.SEEK_END) # jump to end of log file with open(dumpFileName, 'r') as dumpFile: _copy_all_lines_from_to(dumpFile, outputFile) os.remove(dumpFileName) except IOError as e: logging.warning('Could not append additional segmentation fault information from {0} ({1})'.format(dumpFile, e.strerror)) break if util.decode_to_string(line).startswith('# An error report file with more information is saved as:'): logging.debug('Going to append error report file') foundDumpFile = True class _TimelimitThread(threading.Thread): """ Thread that periodically checks whether the given process has already reached its timelimit. After this happens, the process is terminated. """ def __init__(self, cgroups, hardtimelimit, softtimelimit, walltimelimit, process, cpuCount=1, callbackFn=lambda reason: None): super(_TimelimitThread, self).__init__() if hardtimelimit or softtimelimit: assert CPUACCT in cgroups assert walltimelimit is not None self.daemon = True self.cgroups = cgroups self.timelimit = hardtimelimit or (60*60*24*365*100) # large dummy value self.softtimelimit = softtimelimit or (60*60*24*365*100) # large dummy value self.latestKillTime = time.time() + walltimelimit self.cpuCount = cpuCount self.process = process self.callback = callbackFn self.finished = threading.Event() def read_cputime(self): while True: try: return self.cgroups.read_cputime() except ValueError: # Sometimes the kernel produces strange values with linebreaks in them time.sleep(1) pass def run(self): while not self.finished.is_set(): usedCpuTime = self.read_cputime() if CPUACCT in self.cgroups else 0 remainingCpuTime = self.timelimit - usedCpuTime remainingSoftCpuTime = self.softtimelimit - usedCpuTime remainingWallTime = self.latestKillTime - time.time() logging.debug("TimelimitThread for process {0}: used CPU time: {1}, remaining CPU time: {2}, remaining soft CPU time: {3}, remaining wall time: {4}." .format(self.process.pid, usedCpuTime, remainingCpuTime, remainingSoftCpuTime, remainingWallTime)) if remainingCpuTime <= 0: self.callback('cputime') logging.debug('Killing process {0} due to CPU time timeout.'.format(self.process.pid)) util.kill_process(self.process.pid) self.finished.set() return if remainingWallTime <= 0: self.callback('walltime') logging.warning('Killing process {0} due to wall time timeout.'.format(self.process.pid)) util.kill_process(self.process.pid) self.finished.set() return if remainingSoftCpuTime <= 0: self.callback('cputime-soft') # soft time limit violated, ask process to terminate util.kill_process(self.process.pid, signal.SIGTERM) self.softtimelimit = self.timelimit remainingTime = min(remainingCpuTime/self.cpuCount, remainingSoftCpuTime/self.cpuCount, remainingWallTime) self.finished.wait(remainingTime + 1) def cancel(self): self.finished.set() class _CPUThrottleCheck(object): """ Class for checking whether the CPU has throttled during some time period. """ def __init__(self, cores=None): """ Create an instance that monitors the given list of cores (or all CPUs). """ self.cpu_throttle_count = {} cpu_pattern = '[{0}]'.format(','.join(map(str, cores))) if cores else '*' for file in glob.glob('/sys/devices/system/cpu/cpu{}/thermal_throttle/*_throttle_count'.format(cpu_pattern)): try: self.cpu_throttle_count[file] = int(util.read_file(file)) except Exception as e: logging.warning('Cannot read throttling count of CPU from kernel: ' + str(e)) def has_throttled(self): """ Check whether any of the CPU cores monitored by this instance has throttled since this instance was created. @return a boolean value """ for file, value in self.cpu_throttle_count.items(): try: new_value = int(util.read_file(file)) if new_value > value: return True except Exception as e: logging.warning('Cannot read throttling count of CPU from kernel: ' + str(e)) return False class _SwapCheck(object): """ Class for checking whether the system has swapped during some period. """ def __init__(self): self.swap_count = self._read_swap_count() def _read_swap_count(self): try: return dict((k, int(v)) for k, v in util.read_key_value_pairs_from_file('/proc/vmstat') if k in ['pswpin', 'pswpout']) except Exception as e: logging.warning('Cannot read swap count from kernel: ' + str(e)) def has_swapped(self): """ Check whether any swapping occured on this system since this instance was created. @return a boolean value """ new_values = self._read_swap_count() for key, new_value in new_values.items(): old_value = self.swap_count.get(key, 0) if new_value > old_value: return True return False if __name__ == '__main__': main()
A. Currently it’s set at £3 per event for adults. £2 of that goes to the Cycling Time Trials for insurance, the other £1 into the club coffers. It’s £2 for juniors. Q. Do I need a time trial bike? A. No you can ride on whatever you like, as long as it doesn’t have a motor! We have one club member who does rather quick times on a unicycle! Q. Do I have to pre-enter the Series? A. No you don’t. As long as you’re a club member you can just turn up. However, if you wish to feature in the overall results, then you need to pre enter. Also unless you’re only planning riding one or two then we would prefer you to pre-enter otherwise we would have no marshals and the Evening Series couldn’t go ahead. Q. If I’m a 2nd claim member then is there any point in pre-entering as I won’t feature In the overall results anyway? A. The honest answer is no, however if you’re planning to do quite a few events then we would be relying on your good will to enter so that you help the Series by marshalling one event. Q. When completing the enrolment form do I just tick the dates I’m unable to attend to marshal or the dates I wish to ride as well? A. Just tick the dates you are unable to marshal due to work or other commitments. All those who take part in the series are expected to sacrifice one ride that they could have ridden to marshal, otherwise they could gain an extra event on their fellow competitors. Q. What is the difference between the Evening Series and the bigger open events that have to be entered two weeks before? A. This is called a closed club time trial, as it’s closed to all but club members, it’s less formal and you can just roll up on the night and sign on. The open events are open to members from any club but have to be pre-entered as they have to be all organised via Cycling Time Trials. Q. What time do I need to be there? A. The first rider is off at 6.45 apart from the last two nights on the Hill Climbs which are 6.30 due to the nights pulling in. You can sign on as late as 15 mins before, however if you want an earlier ride then you need to be there just after 6 pm as a queue forms of people wanting early rides. Q. If there’s more than enough marshals or someone stands in for me on the night I’m meant to marshal then can I ride instead? A. Yes but you will have to treat it as training as your ride won’t count for that night. Everyone has to drop a qualifying ride somewhere. Q. Can I swap marshalling duties with someone else in the Series on another night? A. Yes you can, as long as you arrange it and let the organisers know about the swap. Q. Can I draft another rider! A. Absolutely not! This is a major crime against cycling and punishable by multiple Carlton bank repeats on a cold wet day with a trailer on! When a rider overtakes you, you must drop back a little, similarly if you’re going to overtake a rider you need to be confident that you can overtake cleanly and gain some distance immediately. Q. What happens at the finish line? Finish line etiquette? A. Firstly your number must be pinned on as low as possible so that the time keeper can see it, also if you have any breath to spare it’s curtesy to shout your number as you cross the line. Please don’t loiter in the middle of the road before and after riding as cars are coming past there fairly quickly. Also please don’t distract the time keeper by pestering him for your time. The times are read out at the end, and then published on the website later that week. The time keeper’s time is final! Sometimes errors do occur and you can discuss this with the organisers afterwards, but Strava won’t be accepted as a true time, only a guide. Strava often seems to run three seconds faster, I guess this is due to the delay with the auto pause as you set off plus the Strava segment starts slightly in advance of the start line. Q. How does the handicapping work? A. I haven’t got a clue? But all riders are awarded a handicap time and the positions from that generate the handicap competition. The handicapping is meant to even things out somewhat?
"""Functions to refine a selection to varying degrees. These functions are usually used by setting the md_level member of an automodel or loop model object.""" def very_fast(atmsel, actions): """Very fast MD annealing""" # at T=1000, max_atom_shift for 4fs is cca 0.15 A. refine(atmsel, actions, cap=0.39, timestep=4.0, equil_its=50, equil_equil=10, equil_temps=(150.0, 400.0, 1000.0), sampl_its=300, sampl_equil=100, sampl_temps=(1000.0, 800.0, 500.0, 300.0)) def fast(atmsel, actions): """Fast MD annealing""" refine(atmsel, actions, cap=0.39, timestep=4.0, equil_its=100, equil_equil=20, equil_temps=(150.0, 250.0, 500.0, 1000.0), sampl_its=400, sampl_equil=100, sampl_temps=(1000.0, 800.0, 500.0, 300.0)) def slow(atmsel, actions): """Slow MD annealing""" refine(atmsel, actions, cap=0.39, timestep=4.0, equil_its=200, equil_equil=20, equil_temps=(150.0, 250.0, 400.0, 700.0, 1000.0), sampl_its=600, sampl_equil=200, sampl_temps=(1000.0, 800.0, 600.0, 500.0, 400.0, 300.0)) def very_slow(atmsel, actions): """Very slow MD annealing""" refine(atmsel, actions, cap=0.39, timestep=4.0, equil_its=300, equil_equil=20, equil_temps=(150.0, 250.0, 400.0, 700.0, 1000.0, 1300.0), sampl_its=1000, sampl_equil=200, sampl_temps=(1300.0, 1000.0, 800.0, 600.0, 500.0, 430.0, 370.0, 320.0, 300.0)) def slow_large(atmsel, actions): """Very slow/large dt MD annealing""" refine(atmsel, actions, cap=0.39, timestep=10.0, equil_its=200, equil_equil=20, equil_temps=(150.0, 250.0, 400.0, 700.0, 1000.0, 1500.0), sampl_its=2000, sampl_equil=200, sampl_temps=(1500.0, 1000.0, 800.0, 600.0, 500.0, 400.0, 300.0)) def refine(atmsel, actions, cap, timestep, equil_its, equil_equil, equil_temps, sampl_its, sampl_equil, sampl_temps, **args): from modeller.optimizers import molecular_dynamics mdl = atmsel.get_model() md = molecular_dynamics(cap_atom_shift=cap, md_time_step=timestep, md_return='FINAL', output=mdl.optimize_output, actions=actions, **args) init_vel = True # First run for equilibration, the second for sampling: for (its, equil, temps) in ((equil_its, equil_equil, equil_temps), (sampl_its, sampl_equil, sampl_temps)): for temp in temps: md.optimize(atmsel, max_iterations=its, equilibrate=equil, temperature=temp, init_velocities=init_vel) init_vel=False
Larger work area at an economical price. The EGX-30 provides the most cost-effective, computerized engraving solution available. Engineered for busy professionals looking for a second machine and beginners working on a limited budget, the EGX-30 engraves badges, nameplates, key rings, and luggage tags. It also scribes brass for awards and trophies. The EGX-30's low profile, small footprint and quiet operation make it the perfect desktop engraving device for any office, shop or even home. Yet, its 12-by-8.13-inch work area and industry-standard 0.125-inch diameter spindle provide professional-grade precision and performance. With the EGX-30, you have a variety of tool choices right out of the box. You have the option of using an industry standard 1/8" (3.175mm) tool size, plus an optional diamond drag tool. Engrave and scribe on a wide variety of light duty materials. Compatible materials include coated brass (diamond drag) for great looking trophy plates, as well as dual-coated micro-thin plastics (cutting) for professional badges, nameplates, key rings and luggage tags. Dr. Engrave 2D engraving software comes with industry-standard TrueType fonts. It lets you create designs or import a Windows bitmap file or Vector ArtT from Roland. Engrave features an automatic layout function that allows you to import an Excel database file in CSV format for multiple-plate applications. Dr. Engrave quickly and easily imports data and places columns of information into pre-defined text boxes to produce an entire door-numbering project or corporate nametags. Compatible with Windows® 95/98/Me/NT4.0 and 2000. Feed rate and spindle RPM can be adjusted with the user-friendly knob on the control panel.
# -*- coding: utf-8 -*- """ github3.gists.history --------------------- Module containing the logic for the GistHistory object. """ from __future__ import unicode_literals from ..models import GitHubCore from ..users import User class GistHistory(GitHubCore): """Thisobject represents one version (or revision) of a gist. Two history instances can be checked like so:: h1 == h2 h1 != h2 And is equivalent to:: h1.version == h2.version h1.version != h2.version """ def _update_attributes(self, history): self._api = history.get('url', '') #: SHA of the commit associated with this version self.version = history.get('version', '') #: user who made these changes self.user = User(history.get('user') or {}, self) #: dict containing the change status; see also: deletions, additions, #: total self.change_status = history.get('change_status', {}) #: number of additions made self.additions = self.change_status.get('additions', 0) #: number of deletions made self.deletions = self.change_status.get('deletions', 0) #: total number of changes made self.total = self.change_status.get('total', 0) #: datetime representation of when the commit was made self.committed_at = self._strptime(history.get('committed_at')) def _repr(self): return '<Gist History [{0}]>'.format(self.version) def get_gist(self): """Retrieve the gist at this version. :returns: :class:`Gist <github3.gists.gist.Gist>` """ from .gist import Gist json = self._json(self._get(self._api), 200) return self._instance_or_null(Gist, json)
Looking for something to read, play, or listen to? Check out our lists! Personalized reading recommendations can be created by contacting the Teen Services Librarian. The TCPL Teen Center has computers, tables, comfy chairs, booths, and our ever-popular spinning stools for your socializing and studying needs. It is designed for grades 6-12 or their homeschool equivalent. Everyone is welcome to look for a book in the room at any time but please show support for our teen community by finding seating in other areas of the library. Looking for something to do? Need community service hours? There are a few ways to get involved at TCPL. Our Junior Library Advisory Council is a great opportunity for teens looking to develop leadership skills in their community. We meet on the first Wednesday of each month to plan events for other teens, advocate for and promote the library, and other special activities. JLAC work counts as community service! YA Book Club meets on the third Wednesday of each month to discuss a book we've all read. Participants choose what titles we read and everyone receives a free copy of the book to keep. Volunteer opportunities are available for teens who need more extensive community service experiences.
from unittest.mock import patch from nose.tools import assert_equal, assert_in from pyecharts import options as opts from pyecharts.charts import Radar v1 = [(4300, 10000, 28000, 35000, 50000, 19000)] v2 = [(5000, 14000, 28000, 31000, 42000, 21000)] @patch("pyecharts.render.engine.write_utf8_html_file") def test_radar_base(fake_writer): c = ( Radar() .add_schema( schema=[ opts.RadarIndicatorItem(name="销售", max_=6500), opts.RadarIndicatorItem(name="管理", max_=16000), opts.RadarIndicatorItem(name="信息技术", max_=30000), opts.RadarIndicatorItem(name="客服", max_=38000), opts.RadarIndicatorItem(name="研发", max_=52000), opts.RadarIndicatorItem(name="市场", max_=25000), ] ) .add("预算分配", v1) .add("实际开销", v2) .set_series_opts(label_opts=opts.LabelOpts(is_show=False)) ) c.render() _, content = fake_writer.call_args[0] assert_equal(c.theme, "white") assert_equal(c.renderer, "canvas") @patch("pyecharts.render.engine.write_utf8_html_file") def test_radar_item_base(fake_writer): series_names = ["预算分配", "实际开销"] series_data = [ [4300, 10000, 28000, 35000, 50000, 19000], [5000, 14000, 28000, 31000, 42000, 21000], ] radar_item = [ opts.RadarItem(name=d[0], value=d[1]) for d in list(zip(series_names, series_data)) ] c = ( Radar() .add_schema( schema=[ opts.RadarIndicatorItem(name="销售", max_=6500), opts.RadarIndicatorItem(name="管理", max_=16000), opts.RadarIndicatorItem(name="信息技术", max_=30000), opts.RadarIndicatorItem(name="客服", max_=38000), opts.RadarIndicatorItem(name="研发", max_=52000), opts.RadarIndicatorItem(name="市场", max_=25000), ] ) .add("", radar_item) .set_series_opts(label_opts=opts.LabelOpts(is_show=False)) .set_global_opts(title_opts=opts.TitleOpts(title="Radar-基本示例")) ) c.render() _, content = fake_writer.call_args[0] assert_equal(c.theme, "white") assert_equal(c.renderer, "canvas") @patch("pyecharts.render.engine.write_utf8_html_file") def test_radar_options(fake_writer): c = ( Radar() .add_schema( schema=[ opts.RadarIndicatorItem(name="销售", max_=6500), opts.RadarIndicatorItem(name="管理", max_=16000), opts.RadarIndicatorItem(name="信息技术", max_=30000), opts.RadarIndicatorItem(name="客服", max_=38000), opts.RadarIndicatorItem(name="研发", max_=52000), opts.RadarIndicatorItem(name="市场", max_=25000), ], radiusaxis_opts=opts.RadiusAxisOpts(), angleaxis_opts=opts.AngleAxisOpts(), polar_opts=opts.PolarOpts(), ) .add("预算分配", v1) .add("实际开销", v2) .set_series_opts(label_opts=opts.LabelOpts(is_show=False)) ) c.render() _, content = fake_writer.call_args[0] assert_in("radiusAxis", content) assert_in("angleAxis", content) assert_in("polar", content)
Sinco Machine use sand casting process for both ferrous material like grey iron, ductile iron and non-ferrous material like aluminium. The best kept secret in the antique car, slot machine, funiture, & Lighting business... ... And I want to talk to you about your sand casting needs. Steel Machine Sand Casting Products,US $ 630 - 16,000 / Ton, Shanxi, China (Mainland), SYI, OEM.Source from Syi Industrial Co., Ltd. on Alibaba.com. This versatile Two-Mass system gently tumbles castings and provides accelerated casting cooling through equalizing of sand/casting ... life of the machine. How It Works – Machining cast aluminum parts. ... Sand casting. In sand casting ... “You can’t machine quality into a casting,” said Stahl. Online shopping for Casting Machines from a great selection at Arts, Crafts & Sewing Store. Find great deals on eBay for sand casting machine. Shop with confidence. Straight to Sand. Through direct CNC ... cuts that lead time in half. The process otherwise replicates conventional casting. ... The sand also affects the machine. We make rapid, high-quality no-bake sand casts in as little as three days. Learn more about precision sand casting by visiting this page from Protocast Inc. Sand For Casting 5-Lb Can. Part # 122.957 . $21.75. View Product . Kit . ... Drum with Lid for Centrifugal Casting Machines. Part # 122.367 . $184.00. View Product .
#!/usr/bin/env python2 import sys def three2one(prot): """ translate a protein sequence from 3 to 1 letter code""" code = {"GLY" : "G", "ALA" : "A", "LEU" : "L", "ILE" : "I", "ARG" : "R", "LYS" : "K", "MET" : "M", "CYS" : "C", "TYR" : "Y", "THR" : "T", "PRO" : "P", "SER" : "S", "TRP" : "W", "ASP" : "D", "GLU" : "E", "ASN" : "N", "GLN" : "Q", "PHE" : "F", "HIS" : "H", "VAL" : "V", "M3L" : "K", "MSE" : "M", "CAS" : "C" } newprot = "" for a in prot: newprot += code.get(a, "?") return newprot if len(sys.argv)!=3: print "\nGetCACoordinatesFromPDB.py PDB_Id Output_file\n" exit() from Bio.PDB.PDBParser import PDBParser p = PDBParser(PERMISSIVE=1,QUIET=True) struct_id = sys.argv[1] filename = struct_id + ".pdb" output_fn = "" output_fn = sys.argv[2] out = open( output_fn, 'w' ) xyz_CA = [] s = p.get_structure(struct_id, filename) chains = s[0].get_list() for ch in chains: sequance = [] for res in ch: is_regular_res = res.has_id('CA') and res.has_id('O') res_id = res.get_id()[0] if (res_id==' ' or res_id=='H_MSE' or res_id=='H_M3L' or res_id=='H_CAS') and is_regular_res: sequance.append(res.get_resname()) xyz_CA.append(res['CA'].get_coord()) out.write( str(len(xyz_CA)) ) out.write('\n') for ixyz in xyz_CA: out.write( str(round(ixyz[0], 5)) ) out.write('\t') out.write( str(round(ixyz[1], 5)) ) out.write('\t') out.write( str(round(ixyz[2], 5)) ) out.write('\n')
On this episode, we welcome in our friend and special guest William Lockhart (B.V.). We preview the upcoming NCAA Football Season and overall thoughts on each conference. Then for our next manly topic, we talk guns, Charles collection, and B.V. collection. Of course, we cover some wrestling and have reviews on Mission: Impossible Fallout, Ready Player One, and Last Chance U. Also, we have a review of the Ryboi 20 in” 40v Brushless Lawnmower, and cover the closing of a candy factory!
from django.shortcuts import render_to_response from django.template import RequestContext from django.utils.translation import ugettext_lazy as _, ugettext from satchmo_ext.newsletter.forms import NewsletterForm def add_subscription(request, template="newsletter/subscribe_form.html", result_template="newsletter/update_results.html", form=NewsletterForm): """Add a subscription and return the results in the requested template.""" return _update(request, True, template, result_template, form=form) def remove_subscription(request, template="newsletter/unsubscribe_form.html", result_template="newsletter/update_results.html", form=NewsletterForm): """Remove a subscription and return the results in the requested template.""" return _update(request, False, template, result_template, form=form) def update_subscription(request, template="newsletter/update_form.html", result_template="newsletter/update_results.html", form=NewsletterForm): """Add a subscription and return the results in the requested template.""" return _update(request, 'FORM', template, result_template, form=form) def _update(request, state, template, result_template, form=NewsletterForm): """Add a subscription and return the results in the requested template.""" success = False result = "" if request.method == "POST": workform = form(request.POST) if workform.is_valid(): if state == 'FORM': # save with subcription status from form result = workform.save() else: # save with subscription status explicitly set result = workform.save(state) success = True else: result = ugettext('Error, not valid.') else: workform = form() ctx = RequestContext(request, { 'result' : result, 'form' : workform }) if success: return render_to_response(result_template, context_instance=ctx) else: return render_to_response(template, context_instance=ctx)
Shandong Leader Machinery Co.,Ltd. cold and hot amphibious screw oil press machine is a new research improvement product.Castor Oil Plant low price tilapia floating fish feed twin screw extruder on sale is for oil extraction from oil-bearing materials,such as rapeseed, peanut, tea seed, soybean, sunflower seed, shea butter,palm kernel and so on. Equipped with micro-electrical control, infrared heating and filtration system,low price tilapia floating fish feed twin screw extruder on salecan press oil through cold press and hot press.pour raw material to the machine,Shandong Leader Machinery Co.,Ltd. can get pure oil which can be eaten directly.low price tilapia floating fish feed twin screw extruder on sale is very suitable for individual users.Welcome to contact shandong leader machinery co.,ltd.,contact name: Ms.Myra for a quotation(Tel/whatsapp:0086 18363092712, Email:[email protected],Skype:leaderfoodmachine2).