text
stringlengths
29
850k
#! /usr/bin/python # -*- coding: UTF-8 -*- from __future__ import print_function import subprocess import threading import locale import sys import os locale.setlocale (locale.LC_ALL, '') class ExecuteBackground (threading.Thread): # threading.Threadを継承 """ プロセスをバックグラウンドで実行する Threadクラスの子クラスとして定義(__init__()とrun()をオーバーライドして使用) threading.Thread.__init__(self)の呼び出しは必須 """ def __init__ (self, **dic): """ オブジェクトの初期化 """ threading.Thread.__init__ (self) # 必ず呼び出す self._id = dic['id'] self._args = dic['cmd'] self._subproc_args = { 'stdin' : subprocess.PIPE, 'stdout' : subprocess.PIPE, 'stderr' : subprocess.STDOUT, 'cwd' : dic['cwd'], 'close_fds' : True, } def run (self): """ スレッド内で行われる処理を記述 """ try: p = subprocess.Popen (self._args, **self._subproc_args) except OSError as e: print ('Failed to execute command "{0}": [{1}] {2}'.format (self._args[0], e.errno, e.strerror), file=sys.stderr) return (stdouterr, stdin) = (p.stdout, p.stdin) print ('-- output [{0}] begin --'.format (self._id)) if sys.version_info.major == 3: while True: line = str (stdouterr.readline (), encoding='utf-8') #line = stdouterr.readline ().decode ('utf-8') # decode()を用いる場合 if not line: break print (line.rstrip ()) else: while True: line = stdouterr.readline () if not line: break print (line.rstrip ()) print ('-- output [{0}] end --'.format (self._id)) ret = p.wait () print ('[{0}] Return code: {1}'.format (self._id, ret))
Residents of Ellenton, are facing some financial problems which often choose to take a Payday Loan, Installment Loan, or other types of Cash Advance. Can I get $300, $500 or $1000 cash in Ellenton, Florida? We serve not only Ellenton but the other big and small cities of Manatee County, as well as other States. We can match you with any of the 300+ direct lenders who offer $100-$1000 on good conditions, with the best rates and terms. Are Payday Loans legal in Ellenton, FL? In Ellenton as well as other cities of Florida State, short-term lending is absolutely legal and is not sentenced to any restrictions. Furthermore, it is under control of the Consumer Protection Financial Bureau which may ensure that your rights are protected. Any of your questions and complaints will be reacted to if you e-mail or call them.
# This file is distributed under the same license as the Django package. # # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j. F Y.' TIME_FORMAT = 'H:i' DATETIME_FORMAT = 'j. F Y. H:i' YEAR_MONTH_FORMAT = 'F Y.' MONTH_DAY_FORMAT = 'j. F' SHORT_DATE_FORMAT = 'j.m.Y.' SHORT_DATETIME_FORMAT = 'j.m.Y. H:i' FIRST_DAY_OF_WEEK = 1 # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = [ '%d.%m.%Y.', '%d.%m.%y.', # '25.10.2006.', '25.10.06.' '%d. %m. %Y.', '%d. %m. %y.', # '25. 10. 2006.', '25. 10. 06.' # '%d. %b %y.', '%d. %B %y.', # '25. Oct 06.', '25. October 06.' # '%d. %b \'%y.', '%d. %B \'%y.', # '25. Oct '06.', '25. October '06.' # '%d. %b %Y.', '%d. %B %Y.', # '25. Oct 2006.', '25. October 2006.' ] DATETIME_INPUT_FORMATS = [ '%d.%m.%Y. %H:%M:%S', # '25.10.2006. 14:30:59' '%d.%m.%Y. %H:%M:%S.%f', # '25.10.2006. 14:30:59.000200' '%d.%m.%Y. %H:%M', # '25.10.2006. 14:30' '%d.%m.%Y.', # '25.10.2006.' '%d.%m.%y. %H:%M:%S', # '25.10.06. 14:30:59' '%d.%m.%y. %H:%M:%S.%f', # '25.10.06. 14:30:59.000200' '%d.%m.%y. %H:%M', # '25.10.06. 14:30' '%d.%m.%y.', # '25.10.06.' '%d. %m. %Y. %H:%M:%S', # '25. 10. 2006. 14:30:59' '%d. %m. %Y. %H:%M:%S.%f', # '25. 10. 2006. 14:30:59.000200' '%d. %m. %Y. %H:%M', # '25. 10. 2006. 14:30' '%d. %m. %Y.', # '25. 10. 2006.' '%d. %m. %y. %H:%M:%S', # '25. 10. 06. 14:30:59' '%d. %m. %y. %H:%M:%S.%f', # '25. 10. 06. 14:30:59.000200' '%d. %m. %y. %H:%M', # '25. 10. 06. 14:30' '%d. %m. %y.', # '25. 10. 06.' ] DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '.' NUMBER_GROUPING = 3
One of the single essential principles in company is the sales funnel. Sales funnels can increase your earnings online by automating your company. They use a path of least resistance to growing and also scaling your organisation on auto-pilot. Yet after spending time with a few of the globe’s ideal online marketers, I can inform you with near certainty, not just how sales funnels will allow you to make even more cash online, but which details funnels you must apply. Currently, if you’re anything like me, you could be averse to specific funnels. For instance, I’m never an ecommerce guy, yet I do know that free-plus-shipping funnels are absolutely squashing it. In fact, simply this past week, while attending the Closing Table Mastermind with Perry Belcher and also Roland Frasier, I obtained much more insight right into the alluring “tripwire” funnel, supposedly created by Perry Belcher. However, Belcher really did not develop the principle. He attracted that from an old Columbia House ad that offered 13 CDs or tapes for the large rate of $1.00. Remember it? But most of us recognize that this tempting offer (aka tripwire) had way a lot more to it than just the $1.00 price. In the background, there was a connection program at work. In Columbia House’s’ design, this was called adverse alternative invoicing. That means, you instantly obtain routine month-to-month shipments (together with a costs) unless you expressly state or else. So why is this so powerful and what does it have to do with sales funnels? As a matter of fact, what you’ll notice is that Columbia House’s sales funnel was quite simple. You just picked the CDs or tapes you desired for a buck, then got a regular month-to-month delivery every month at a pre-defined price. They would continuously bill you for those CDs or tapes every single month. There were variations of this deal as well where you can get 11 CDs or tapes for one dime then get the 12th cost-free. Either way you look at it, these were all irresistible offers. This was too good to miss. However what Columbia House and other firms realized was that, although they would certainly shed loan out of the gate, they would, generally, gradually, earn money hand over initially. And that is the power of the sales funnel. Prior to automation, this is exactly how sales funnels functioned. And also, if you hired, you would certainly get a real-time person properly strolling you via the funnel. The Columbia House example uses deep understanding into the psychology of a deal that’s irresistible. By very first figuring out the lifetime worth of a customer as well as their retention price, Columbia House determined that it can spend a specific quantity of money obtaining consumers as well as still be extremely rewarding. That’s basically what’s performed with sales funnels today. And also what actually makes those sales funnels so effective are tripwires. Today, it’s the tripwire that develops the tempting offer. Although Columbia House didn’t rather have significant up-sells or down-sells in their funnels, they produced a simple 2 step funnel that functioned. It functioned since the tripwire was so alluring. It appealed deeply to the consumer. Besides, just how can you withstand a deal like that for 12 or 13 CDs or tapes for $1 or 1 cent when they were retailing at around $10 each at the time. An additional powerful instance of this was with Sports Illustrated. Actually, Sports Illustrated was efficiently on the decrease at the time it introduced its revolutionary tripwire. There’s something concerning substantial products that individuals just get in touch with. Also when you’re using an electronic information item on the back end, by sweetening the handle a tangible excellent, you can rather literally send your conversions via the roof. In the example of Sports Illustrated, take on your own back to 1990 when publication registration prices were starting to leave. In that period, Sports Illustrated experienced a 7% decline in their flow in the very first fifty percent of the year. Currently, whatever they did, they were not able to get the registration rates up. Also by supplying deep discount rates on subscriptions, customers weren’t getting it. Plus, if they marked down as well steeply, it would certainly impact their capacity to get advertisers paying costs rates without showing that consumers were paying near to the complete cover rate of the magazine. Sports Illustrated, owned by Time, Inc., wasn’t the only magazine that was suffering under the business’s umbrella. People magazine had actually also experienced a severe decrease of 5%. Time, Inc., knew that it required to do something quick. So it began to supply a tripwire that included sports bungles on VHS. However, the expense was outrageous and there were just a lot of slips it can provide. So, they looked to the football phone as the brand-new tripwire. Why the football phone? First, it appealed to the target audience. As well as, it was unique as well as exciting. Even more amazing than a monotonous publication subscription. The football-shaped phone principle was conceived by Sports Illustrated’s supervisor of circulation at the time, Michael Loeb. At that time, marketing on cable television was also really cost-effective. And also, the phone, which was sourced from China, only cost a couple of dollars “landed”. So everything made good sense. Loeb worked on the logistics of this offer for numerous months. However, that had not been the only deal. They likewise prepared an offer that consisted of a tennis shoe phone. If you’ll reflect to 1990-1991, it was both uses that were running virtually simultaneously. Discuss an early-on split-test, right? So, what were the results of all this? Over 1 million brand-new customers. That’s right. As well as those deals only ran for a limited time. Yet it properly saved the company. That’s the power of a tripwire deal. It’s a concrete product that obtains people delighted about whatever else the core offer is. Actually, some people will only purchase what you’re selling entirely for the novelty of the substantial tripwire deal. Sales funnels are among the single most powerful ideas in organisation today for a very certain reason. It’s not just the example of the tripwire that’s effective. It’s the whole kittycat play dirty, in a manner of speaking. There are so many parts and also elements to what make a sales funnel job so well that I intended to break it down and give some even more examples to offer a deeper understanding to how you can utilize them to raise your revenue online. Now, if you’re not making use of a sales funnel today, or if you’re simply uncovering what a sales funnel is, then there are a couple of means you can promptly utilize this effective principle in your service to create eruptive results. Primarily, if you do not have a ClickFunnels account, go below and also sign up for the 14-day free test. When you’ve arrangement your account, grab a custom-made domain name as well as begin constructing your funnel. If you require assistance, see some of the on-boarding video clips included with ClickFunnels. It does not matter if you have an existing service or you’re functioning to setup a new company, funnels are the trick to expanding and scaling regardless of where you’re starting from. Nevertheless, there are 5 specific means you can use sales funnels to boost your revenue now. It matters not what business you’re in or what you’re marketing, you can leverage these today, now, to develop eruptive results. At the Inner Circle mastermind today, Russell Brunson dissected the hook-story-offer method that comes before constructing the funnel. Can you build a sales funnel prior to you have a hook as well as a tale? Yes. But you might have trouble obtaining grip, specifically if you’re completing in a red ocean. Red seas are entrepreneur-infested waters completing for the same pool of consumers, whereas blue oceans are a location where couple of entrepreneurs are swimming in. What does that inform you? Spend the moment responsible and the story. Yes, the deal is additionally vital. Yet it’s the hook and the tale that’s going to reel leads in. For instance, Brunson utilized the narrative of fit-to-fat-to-fat founder, Drew Manning. Manning, that remained in excellent shape, went out there and gained a bunch of weight over a 6 month duration to much better recognize what it feels like to be fat. Yet after that he went out there and also shed all the weight. That right there is an extraordinary hook and tale. It’s unique. As well as, as a result of that, Manning has actually experienced a speedy increase, despite the fact that his deal isn’t that excellent. Yet when the hook and the story is so excellent, the deal is practically worthless. Effectively, you can sell practically anything when the hook as well as the story just squash it. Another example is the story of Robert Allen who would always state he can be dropped in any type of city in the nation and get a home no cash down within 60 days. Which’s specifically what he did. His brand has corrected a billion bucks in sales because of it. Those are all wonderful hooks and tales. Without that, your offer will struggle. If you can create a distinct way to produce your hook as well as tale, the rest will certainly fall into place. Since there is a lot red ocean available, this is probably one of one of the most crucial indicate make prior to you launch any kind of sales funnel. Tripwires are wonderful low-barrier-of-entry items that make prospects take out their charge card. Frequently, these are too-good-to-pass-up products that are provided at an amazing cost. Just consider the tripwire offered by Columbia House or Sports Illustrated, for example. Tripwires aid make it simpler to get new customers, and after that to ascend them up your value ladder. Anything that makes customer procurement more difficult, such as a high-priced service or product out of eviction, is going to make growing and scaling your company harder. The reality is that it sets you back 5 times as a lot to attract a new client as it does to retain an existing one. That’s why tripwires are so effective. They reduced the barrier-of-entry into your company. As well as anything that can reduce the barrier of entry is always excellent. An additional great method to launch a sales funnel and also quickly raise your earnings no matter what organisation you’re in is through a lead magnet. The lead magnet requires to include real worth. However it can be in different layouts such as a PDF e-book, checklist, rip off sheet and so on. After the download, you can provide an one-time offer. This means a three-step funnel. You can also produce an up-sell if they take the single offer and even a payment-plan down-sell if they decline it. Why do a lead magnet with lots of worth? First, you’re establishing yourself up as a professional. If that web content is really actionable, as well as it adds an enormous quantity of worth to the possibility’s life, then they’re mosting likely to regard you as the authority. However it does something much more than that. It refers micro-commitments. The theory? Obtain a person to agree to do three tiny things for you and also the fourth one is instantaneously a yes. One of one of the most powerful sales funnels you can construct is based around the webinar. Actually, it was the webinar that conserved Brunson’s business from insolvency a couple of times. It was a combination as well as culmination of understanding that bring about the ideal webinar script, which develops the basis for the ultimate sales funnel. Some of it came from Jason Fladlien, while other pieces originated from elsewhere in Brunson’s journey. However, all that information integrated into the format of a perfect webinar and funnel, has actually actually changed the businesses of plenty of business owners who have actually taken their expertise and monetized it with the rapid pressure of webinars. Constructing a webinar is just one of the surest paths to success with a sales funnel. In fact, the webinar funnel is fairly perhaps one of the most transformative funnel you might release for any type of organisation, brand-new or old. An additional way to raise your revenue with a sales funnel is to develop a high-ticket coaching funnel that goes from application to phone call to shut. The application permits you to extract those that aren’t serious. And also, on the telephone call, you can close the deal. Now, this type of sales funnel works if you’ve already had outcomes with clients. If not, help totally free until you get outcomes for people. Then, accumulate testimonies and also make use of those as part of the pitch for the training. Possibly one of the most significant as well as most popular sales funnels these days is the free-plus-shipping offer funnel. It makes a terrific entry factor into any type of funnel by developing an one-time offer or up-sell after the offer, comparable to a tripwire funnel. Nonetheless, the difference right here is the power of the word complimentary. Yes, tripwires, which are really low-priced products or services, function well. However there is genuine power in the word free. As a matter of fact, people will lift as well as down for a totally free offer, even if they have to cover the delivery and handling, instead of taking an offer that they also need to pay a small amount of money for.
import string,cgi from datetime import datetime from os import curdir, sep from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer from SocketServer import ThreadingMixIn CONDOR_URL='http://www.cs.wisc.edu/condor/glidein/binaries/' CORRAL_URL='http://www-rcf.usc.edu/~juve/glidein/' class PackageMapper(HTTPServer,ThreadingMixIn): pass class PackageMapperHandler(BaseHTTPRequestHandler): def log_message(self, format, *args): pass def do_GET(self): # Get requested package name package = self.path[1:] # Log the request date = datetime.utcnow().isoformat()+'Z' print date,self.client_address[0],package # Perform the mapping urls = self.map(package) # Send URLs back to client self.send_response(200) self.send_header('Content-type','text/plain') self.end_headers() for url in urls: self.wfile.write(url+'\n') def map(self, package): mappings = [] # Process package if package.endswith('.tar.gz'): comp = package[:-7].split('-') if len(comp) == 5: # Parse the package components condor_version = comp[0] arch = comp[1] opsys = comp[2] opsys_version = comp[3] glibc_version = comp[4][5:] print condor_version,arch,opsys,opsys_version,glibc_version # Add default mappings mappings.append(CORRAL_URL+package) mappings.append(CONDOR_URL+package) return mappings def main(): try: server = PackageMapper(('',10960),PackageMapperHandler) print 'Started Package Mapper on port 10960...' server.serve_forever() except KeyboardInterrupt: print '^C received, shutting down server' server.socket.close() if __name__ == '__main__': main()
Welcome to the library of the INTRA project! As you would in any library, here, you will find an archive of knowledge generated, a collection of news gathered, and many useful bits of information that project INTRA has been producing. Feel free to browse through our online library and don't hesitate to contact us, if you're interested in more news, information or anything else INTRA-connected. INTRA partners prepared action plans for improving internationalization support. Published summary of joint EU Policy Recommendations. A collection of good practices identified and verified within the INTRA project. INTRA prepared a joint and regional overview of internationalization support. A collection of INTRA project implementation templates. INTRA partners organize public events for internationalization stakeholders. Follow the stories of what was learned within the Intra project. Various documents prepared within the INTRA project stored for posterity. A quick overview of how each INTRA region will improve their targeted policies. Recommendations for improving SME internationalization policies. Overview of validated Good practices, collected within the INTRA project. Pregled dobrih praks, zbranih tekom projekta INTRA. A joint State of Affairs document covering all 6 project partner regions. Regional State of Affair Analysis for the East Slovenia region. Regional State of Affair Analysis for the NorthEast region in Bulgaria. Regional State of Affair Analysis for the Spanish region - Extremadura. Regional State of Affair Analysis for the Italian region - Abruzzio. Regional State of Affair Analysis for the West-Midlands region in Great Britain. A template helping PPs to evaluate the good practice observed within INTRA study visits. A guideline for preparation of regional policy recommendations. 6th issue of the INTRA project E-Newsletter in English. 5th issue of the INTRA project E-Newsletter in English. 5th issue of the INTRA project E-Newsletter in Slovenian. 4th issue of INTRA E-Newsletter in Spanish. 4th issue of INTRA project E-Newsletter in Italian language. Third issue of the INTRA E-Newsletter in Slovenian language. The second issue of the INTRA newsletter in English language. A perfect tool for internationalization overview presented! Describing the importance of Interreg Europe programme in the upcoming period 2021-2027! Export Guide for SMEs: Are you export ready? Find out what's Slovenian experts' choice in financial services for internationalization! On September 13th and 14th 2018, INTRA participated in the exhibition at “II Regional Bioeconomy Meeting: Eurocasta 2018” in Spain. Regional Governmet of Extremadura invited FUNDECYT-PCTEX to present INTRA success cases. INTRA partner CUE presented INTRA at a stand at Venturefest 2017 (Birmingham). Informative leaflet about the INTRA project in English language. Informative leaflet about the INTRA project in Bulgarian language. Informative leaflet about the INTRA project in Spanish language. Informative leaflet about the INTRA project in Portuguese language. Informative leaflet about the INTRA project in Italian language. Informative leaflet about the INTRA project in Slovenian language. Members of the INTRA steering committee (updated). The list of intraregional task force (ITF) members within INTRA. Two possible outcomes for an SME that wants to become international. How a Spanish producer of pork from Extremadura entered the Japanese market. Algarve Cooking Vacations attracting international tourists from all over the globe. A collection of 18 good practice cases observed during the SV to North-East Bulgaria. A collection of 21 good practice cases observed during the SV to East Slovenia. A collection of 17 good practice cases observed during the SV to Extremadura Spain. A collection of 5 good practice cases observed during the SV to Algarve Portugal. A collection of 20 good practice cases observed during the SV to West Midlands UK. The report on the 6th intraorganizational event in Maribor. The report on the 6th RSG meeting in the West Midlands region. Information gathered throughout the project implementation is structured into knowledge generation according to the Knowledge sharing strategy. Report on the 2nd INTRA project's Intraorganizational meeting held at Maribor Development Agency on February 13th, 2017. The report on the 2nd meeting of regional stakeholders in Maribor (Slovenia). Report on the 3rd regional stakeholder meeting in Extremadura. The methodology used to conduct the foresight exercise in Varna (2016). Report on the 3rd INTRA project's Intraorganizational meeting held at Maribor Development Agency on September 6th, 2017. Report of the 4th intra-organizational meeting in Maribor (February 12th, 2018). This is the base methodological document for organization of INTRA study visits. On December 13th, 2017, ITW meeting was held in Birmingham (UK). Report on the 5th intraorganizational meeting in Maribor, held on September 27th, 2018.
# ------------------------------------------------------------------------- # Copyright (C) 2005-2013 Martin Strohalm <www.mmass.org> # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # Complete text of GNU GPL can be found in the file LICENSE.TXT in the # main directory of the program. # ------------------------------------------------------------------------- # load libs import sys import platform import numpy import wx # load modules import mwx import config import images # SYSTEM ERROR # ------------ class dlgError(wx.Dialog): """Show exception message.""" def __init__(self, parent, exception=''): wx.Dialog.__init__(self, parent, -1, 'Application Error', style=wx.DEFAULT_DIALOG_STYLE|wx.STAY_ON_TOP) # get system information self.exception = '' self.exception += exception self.exception += '\n-------------------------' self.exception += '\nmMass: %s' % (config.version) self.exception += '\nPython: %s' % str(platform.python_version_tuple()) self.exception += '\nwxPython: %s' % str(wx.version()) self.exception += '\nNumPy: %s' % str(numpy.version.version) self.exception += '\n-------------------------' self.exception += '\nArchitecture: %s' % str(platform.architecture()) self.exception += '\nMachine: %s' % str(platform.machine()) self.exception += '\nPlatform: %s' % str(platform.platform()) self.exception += '\nProcessor: %s' % str(platform.processor()) self.exception += '\nSystem: %s' % str(platform.system()) self.exception += '\nMac: %s' % str(platform.mac_ver()) self.exception += '\nMSW: %s' % str(platform.win32_ver()) self.exception += '\nLinux: %s' % str(platform.dist()) self.exception += '\n-------------------------\n' self.exception += 'Add your comments:\n' # make GUI sizer = self.makeGUI() # fit layout self.Layout() sizer.Fit(self) self.SetSizer(sizer) self.SetMinSize(self.GetSize()) self.Centre() # ---- def makeGUI(self): """Make GUI elements.""" # make elements self.exception_value = wx.TextCtrl(self, -1, self.exception, size=(400,250), style=wx.TE_MULTILINE) self.exception_value.SetFont(wx.SMALL_FONT) message_label = wx.StaticText(self, -1, "Uups, another one...\nUnfortunately, you have probably found another bug in mMass.\nPlease send me this error report to [email protected] and I will try to fix it.\nI apologize for any inconvenience due to this bug.\nI strongly recommend to restart mMass now.") message_label.SetFont(wx.SMALL_FONT) icon = wx.StaticBitmap(self, -1, images.lib['iconError']) quit_butt = wx.Button(self, -1, "Quit mMass") quit_butt.Bind(wx.EVT_BUTTON, self.onQuit) cancel_butt = wx.Button(self, wx.ID_CANCEL, "Try to Continue") # pack elements messageSizer = wx.BoxSizer(wx.HORIZONTAL) messageSizer.Add(icon, 0, wx.RIGHT, 10) messageSizer.Add(message_label, 0, wx.ALIGN_LEFT) buttSizer = wx.BoxSizer(wx.HORIZONTAL) buttSizer.Add(quit_butt, 0, wx.RIGHT, 15) buttSizer.Add(cancel_butt, 0) mainSizer = wx.BoxSizer(wx.VERTICAL) mainSizer.Add(self.exception_value, 0, wx.EXPAND|wx.CENTER|wx.ALL, mwx.PANEL_SPACE_MAIN) mainSizer.Add(messageSizer, 0, wx.ALIGN_LEFT|wx.LEFT|wx.RIGHT|wx.BOTTOM, mwx.PANEL_SPACE_MAIN) mainSizer.Add(buttSizer, 0, wx.ALIGN_RIGHT|wx.LEFT|wx.RIGHT|wx.BOTTOM, mwx.PANEL_SPACE_MAIN) return mainSizer # ---- def onQuit(self, evt): """Quit application.""" sys.exit() # ----
Credit repair companies have helped millions around the country have a better financial future. They eliminate the hours and hours of research and writing/data entry that comes with disputing negative credit marks by yourself. Going through a credit repair company typically will be the better route to go as they are well versed in federal and state credit law and how it can be utilized to remove those hard-to-fix negative marks on your credit report. The three companies I’m going to talk about have helped either myself or someone I know personally with their credit challenges. Even if you are part of the 0.01% of the U.S. population that has a 300 credit score (the lowest credit score possible), these companies can and will help you drastically improve your credit score. The list is relatively short as we at The Credit Dojo only want to focus on the best of the best with real world experience to back up the companies claims. Lexington Law is the complete package when it comes to credit monitoring and repair. They are always consistent with the level of service they offer and do a thorough job of handling any type of credit problems that can typically be found on your credit report. The firm has been around for decades, so they are not a fly-by-night operation. They advertise that you will see results by 60 days, but in almost all cases people saw results MUCH sooner than that (real world results from people I know, not based on yelp reviews). That is very impressive seeing as the typical length of time being advertised today is around 3 to 4 months. The initial fee for Lexington Law is $89.95, and you will be charged $89.95 monthly. Lexington Law is an excellent choice to repair your credit . They do offer higher-tiered service, but for needs of most people, the $89.95 service should be sufficient (named the “Concord” service). The website is pretty easy to navigate. They have the phone number listed on the main page if you like talking to a real person. There is also a Credit Education feature on the page that will walk you through how credit works, what credit repair consist of and explain why having a professional credit repair representative working for you may be the best way to get results fast. They also have some tools on their site that show what effect a specific credit score range can have on a home loan, auto loan, credit card application or loan refinancing. Anything you can think of related to credit, Lexington law appears to have it covered in-depth. They have a long history of dealing with all types of credit problems. The reviews for Lexington Law all over the internet and reviews from my peers confirm their quality over quantity approach. Other benefits from working with Lexington law includes dealing with attorneys that have dealt with credit/debt law for decades. They aren’t novices; you will know how competent they are after speaking with them for the first time. They are well versed in the Fair Credit Reporting Act, Fair Credit Billing Act and Fair Debt Collections Practices Act. This allows them to view your credit/debt situation and attack it from every legal approach possible by law. You can sign up by clicking on the company logo above. You can also CLICK HERE to learn more. CreditRepair.com has been around for more than a decade and has a lot of experience with dealing with credit problems of all types. They take your credit report, evaluate it then layout their game plan for addressing the negative items on your credit report. Then they will reach out to the creditors on your behalf to address the credit problem you’re facing. This will save you both time and energy if you are not up for the task of learning credit repair. It sounds so simple, but believe me when I say this is a lot of work! The initial fee to get things started is only $14.99. After that, you will pay $99.95 a month for their services. The website is well laid out and easy to navigate. The menu items are straight forward and simple. You can choose to learn “How it Works”, read “Reviews”, or click “Who We Are” to learn more about the companies mission and what services they offer their clients. On the main page, they also have a section called “What You Get” that states exactly what comes with their package (which is a lot for the price). While they don’t have their own internal “Education” section like Lexington law, they do offer links to some of their partners who offer in-depth knowledge about credit. Their partners are quality companies such as Credit karma and Lending Tree. As you can see with this list, you will get quite a bit for the money. All bases are covered with CreditRepair.com, which is why we recommend them and incorporated them into our top 3. CreditRepair.com offers the consumer a lot for the money. The turn around time from consultation to result is fairly quick. I only have feedback from one individual who has used this service, and they claim it to be “extremely thorough”. They removed all but one item within the first 2 months, and the last within 3 months. That is VERY impressive and fast in regards to removing negative items from a credit report. The Personal Member Dashboard is also useful as you are able to see the changes to your credit in real-time. They also offering identity monitoring with their package. CreditRepair.com sets you up for a future of financial success in a very timely manner. You can sign up by clicking on the company logo above, or sign up by clicking HERE. The Credit Pros is a complete package. They offer much of the same services as Lexington Law. One of the cool features that sets them apart is their Pay for Deletion service. With this, you only pay for the items that are deleted from your credit report ($50 per negative mark and $75 per public record removed). That means if nothing is able to be removed from your report, you don’t pay a dime. This is great for those who don’t have too many blemishes on their credit report and need professional assistance removing items from it. In the long run, this has the potential to save you a TON of money. The initial fee for their monthly individual service is $179 for the initial fee, then $99 a month after that. For couples, it is $279 for the initial fee, then $149 a month. That’s a nice way to show appreciation for the customer while also showing just how frequently their service works for the consumer. It’s a win for everyone. The Credit Pros are not as established as Lexington Law since they have only been around since 2009. That doesn’t mean you shouldn’t give them a try. From the people I know that have worked directly with them and online reviews, I can say with confidence they do not have any major blemishes on their record in regards to the services they offer consumers. They use tons of various strategies to improve credit score problems of all types. For this reason and their honest Pay for Deletion service, they make The Credit Dojos top list. You can sign up by clicking on the company logo above, or by visiting The Credit Pros HERE . While there are many companies out there that can help you, it is your credit we are talking about. Do you need professional assistance? Can you fix it yourself? For me, I chose myself as I was a broke college student and couldn’t afford a professional. While it did work in my favor (luckily), there were still a few items where I could have had removed sooner had I been more knowledgeable. This is where a professional comes in hand and can really help you. I know, we typically flock to what is the cheapest. But cheaper is not always better in this case, especially when it directly impacts your finances. If you follow these articles, you will gain at the very least a basic understanding of credit and what goes into your report and score. It will empower you to take control of your credit. I will say this is not for everyone as depending on how many items you have on your credit report, it can be VERY time consuming. Always remember to stay on top of your credit and make sure you are spending responsibly. Thank you for stopping by The Credit Dojo! Very great post. I just stumbled upon your blog and wished to mention that I’ve really loved browsing your weblog posts.
from fabric.api import * from fabric.contrib import files import platform from os.path import join from argparse import ArgumentError env.hosts = ['your_username@yourhost:yourport'] env.web_path = '/var/www/django' env.log_root ='/var/log/apache' env.project = 'example_project' @task def dev_serve(): with prefix('Scripts\\activate.bat' if platform.system()=="Windows" else 'source bin/activate'): local(join('compass watch %(project)s','static &') % env) local(join('python %(project)s','manage.py runserver') % env) @task def bootstap_dev(): """ Bootstrap your local development environment """ local('git clone https://github.com/twoolie/ProjectNarwhal.git') with lcd('ProjectNarwhal'): local('virtualenv --distribute .') with prefix('Scripts\\activate.bat' if platform.system()=="Windows" else 'source bin/activate'): local('pip install -r requirements.txt') local(join('python %(project)s','manage.py syncdb --all') % env) local(join('python %(project)s','manage.py migrate --fake') % env) @task def bootstrap(hostname, path=env.web_path, **kwargs): """ Creates a virtualhost instance on the box you specify `fab -H server1,server2 bootstrap:narwhal.example.com[,deploy_options...]` """ run('mkdir -p %(path)s/%(hostname)s/' % locals()) with cd('%(path)s/%(hostname)s/' % locals()): run('git init .') locals().update(kwargs) deploy(**locals()) # deploy script takes care of the rest @task def deploy(hostname, ref='master', path=env.web_path, apache_conf_path=None, distro=None, \ log_root=env.log_root, thread_count=2, process_count=4): """ `fab -H server1,server2 deploy:narwhal.example.com` """ if not apache_conf_path: apache_conf_path=find_apache_path(distro) local('git push -f ssh://%(host_string)s/%(path)s/%(hostname)s/ %(ref)s' % locals()) with cd('%(path)s/%(hostname)s' % locals()): run('git checkout -f %(ref)s' % locals()) run('pip install -r requirements.txt') with cd(env.project): files.upload_template('apache.conf', apache_conf_path+hostname, context=locals(), mode=0755, use_sudo=True) run('./manage.py collectstatic --noinput') run('./manage.py syncdb') run('./manage.py migrate') run('touch serve.wsgi') # restart the wsgi process @task def deploy_epio(appname): """ fab deploy_epio:appname """ with lcd(env.project), prefix('Scripts\\activate.bat' if platform.system()=="Windows" else 'source bin/activate'): local('python manage.py collectstatic --noinput') local( ('mklink /D %(link)s %s(target)' if platform.system()=="Windows" else 'ln -s %(target)s %(link)s') \ % { 'link':'narwhal', 'target':'../narwhal' }) local('python manage.py build_solr_schema > solr_schema.xml') local('epio upload -a %(appname)s'%locals()) local('epio django syncdb -a %(appname)s'%locals()) local('epio django migrate -a %(appname)s'%locals()) local('epio django rebuild_index') #-------- Utils ---------- def _join(*args): return "/".join(args) def find_apache_path(distro): if not distro: distro = run('python -c "import platform; print platform.dist()[0]"') if distro in ('debian', 'ubuntu'): return '/etc/apache2/sites-enabled/' else: raise ArgumentError('Cannot automatically determine apache_conf_path')
Onda X20 is a cost-effective tablet which supports and 4G network. Equipped with a 10.1 inch 10-point IPS touch display, brings your photos and videos to life. Android 8.0 OS ensures you more wonderful user experience. Powered by MT6797 ( Helio X20 ) deca core processor, offers you smooth operation and stable multitasking experience. Rear-facing + front dual camera for capturing memorable moments and high quality pictures in daily life. Dual band 2.4GHz / 5.0GHz WiFi supported, ensures you high-speed surfing. 3.8V / 6600mAh battery can meet the current, power and the temperature and increase the performance dynamically. Amazing sound, screen and build quality. The tablet is very well built and very sturdy. If u drop the tablet on the floor, please make sure that the floor didn't get damaged. This tablet is much better, in every aspect, than I expected to get at this price. The screen is wonderful, with very even backlighting and good color. Fingerprint sensor works quite fastly. The video has a very good and bright display. This tablet was sufficient for my needs. Perfect value, excellent performance. Beautifully crafted with precise execution on every aspect. Which frequency does Onda X20 4G Phablet support? Does Onda X20 have miracast function? Does Onda X20 support playing 4K video? which frequencies and bands supported? needs run on T-Mobile USA network, can it? Do you know if it can?
# -*- coding: utf-8 -*- from gluon import current def config(settings): """ Template settings for Myanmar - designed to be used in a Cascade with an application template """ #T = current.T # Pre-Populate settings.base.prepopulate.append("locations/MM") # Uncomment to restrict to specific country/countries settings.gis.countries.append("MM") # Disable the Postcode selector in the LocationSelector settings.gis.postcode_selector = False # L10n (Localization) settings settings.L10n.languages["my"] = "Burmese" # Default Language (put this in custom template if-required) #settings.L10n.default_language = "my" # Default timezone for users settings.L10n.timezone = "Asia/Rangoon" # Default Country Code for telephone numbers settings.L10n.default_country_code = 95 settings.fin.currencies["MMK"] = "Myanmar Kyat" settings.fin.currency_default = "MMK" # END =========================================================================
Mt. Rundle is a mountain in Canada's Banff National Park overlooking the towns of Banff and Canmore, Alberta. The easiest route up Mt. Rundle ascends the gently sloping SW, right-hand profile of the mountain, as viewed from Banff. This is a non-technical climb if on the correct route and in the right season. Aside from sturdy hiking boots, no special climbing gear is required. Most of the route to the summit entails very steep, strenuous hiking. On much of the climb, the sloped footing can be awkward and tiresome and can be very slippery in wet conditions. The standard route is traightforward, but there are several places where mistakes can lead to disaster.
#!/usr/bin/env python from spider import * import re sys.path.append("..") from record import Record class VideolecturesSpider(Spider): def __init__(self): Spider.__init__(self) self.school = 'videolectures' self.type_map = {'Lecture ' : 'vl',\ 'Tutorial' : 'vtt',\ 'Keynote' : 'vkn',\ 'Interview' : 'viv',\ 'Other' : '__'} self.subject_cid_map = {'Machine Learning' : '16',\ 'Data Mining' : '36',\ 'Computer Vision' : '71',\ 'Network Analysis' : '28',\ 'Data Visualisation' : '41',\ 'Natural Language Processing' : '144',\ 'Pattern Recognition' : '395',\ 'Text Mining' : '37',\ 'Web Mining' : '127',\ 'Robotics' : '69',\ 'Artificial Intelligence' : '136',\ 'Big Data' : '602',\ 'Semantic Web' : '27',\ 'Web Search' : '163',\ 'Optimization Methods' : '232'} def findLastPage(self, soup): max_page = 1 for a in soup.find_all('a'): if a.text == ' Last ': max_page = int(a['href'][a['href'].find('(') + 1 : a['href'].find(')')]) break return max_page def processEventData(self, subject): r = requests.get('http://videolectures.net/site/ajax/drilldown/?t=evt&cid=13&w=5') soup = BeautifulSoup(r.text) max_page = self.findLastPage(soup) file_name = self.get_file_name('eecs/' + self.school + '/' + subject, self.school) file_lines = self.countFileLineNum(file_name) f = self.open_db(file_name + ".tmp") self.count = 0 urls_list = [] for page in range(1, max_page + 1): r = requests.get('http://videolectures.net/site/ajax/drilldown/?o=top&t=evt&p=' + str(page) + '&cid=13&w=5') soup = BeautifulSoup(r.text) for a in soup.find_all('a'): if a.attrs.has_key('lang'): urls_list.append('http://videolectures.net' + a['href']) i = 0 title = '' desc = '' for span in soup.find_all('span'): i += 1 if i == 1: print title title = span.text.strip() if i == 2: desc = 'description:' + span.text.strip() + ' ' if i == 3: desc += span.text.strip() self.count += 1 self.write_db(f, subject + '-' + str(self.count), title, urls_list[self.count - 1], desc) i = 0 self.close_db(f) if file_lines != self.count and self.count > 0: self.do_upgrade_db(file_name) print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n" else: self.cancel_upgrade(file_name) print "no need upgrade\n" def processData(self, subject): file_name = self.get_file_name('eecs/' + self.school + '/' + subject, self.school) file_lines = self.countFileLineNum(file_name) f = self.open_db(file_name + ".tmp") self.count = 0 print 'processing ' + subject for s in self.type_map.keys(): r = requests.get('http://videolectures.net/site/ajax/drilldown/?t=' + self.type_map.get(s) + '&cid=' + self.subject_cid_map.get(subject) + '&w=5') soup = BeautifulSoup(r.text) max_page = self.findLastPage(soup) for page in range(1, max_page + 1): r = requests.get('http://videolectures.net/site/ajax/drilldown/?o=top&t=' + self.type_map.get(s) + '&p=' + str(page) + '&cid=' + self.subject_cid_map.get(subject) + '&w=5') soup = BeautifulSoup(r.text) for div in soup.find_all('div', class_='lec_thumb'): instructors = '' title = div.a.span.span.text.strip() url = 'http://videolectures.net' + div.a['href'] soup1 = BeautifulSoup(div.prettify()) div = soup1.find('div', class_='author') if div != None and div.span != None: instructors = 'instructors:' + div.span.text.strip() self.count += 1 vl_num = 'vl-' + str(self.subject_cid_map.get(subject)) + '-' + str(self.count) print vl_num + ' ' + title self.write_db(f, vl_num, title, url, instructors) self.close_db(f) if file_lines != self.count and self.count > 0: self.do_upgrade_db(file_name) print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n" else: self.cancel_upgrade(file_name) print "no need upgrade\n" def upFirstChar(self, text): result = '' for i in range(0, len(text)): if (i > 0 and text[i - 1] == ' ') or i == 0: result += str(text[i]).upper() else: result += text[i] return result.strip() def getNameAndDescription(self, url): name = '' homepage = '' desc = '' r = requests.get(url) soup = BeautifulSoup(r.text) span_name = soup.find('span', class_='auth_name') span_desc = soup.find("span", id="auth_desc_edit") if span_name != None and span_name.a != None: name = span_name.a.text.replace(' ',' ').strip() homepage = span_name.a['href'] desc += 'homepage:' + homepage + ' ' if span_desc != None: desc += 'description:' + span_desc.text.replace('\n', ' ').strip() return name, desc def processUserData(self): print 'processing user data' file_name = self.get_file_name('eecs/' + self.school + '/user', self.school) file_lines = self.countFileLineNum(file_name) f = self.open_db(file_name + ".tmp") self.count = 0 user_dict= {} for page in range(1, 24): r = requests.get('http://videolectures.net/site/list/authors/?page=' + str(page)) soup = BeautifulSoup(r.text) for tr in soup.find_all('tr'): if tr.text.find('Author') == -1: soup1 = BeautifulSoup(tr.prettify()) video_pos = tr.text.find('video') views_pos = tr.text.find('views') url = 'http://videolectures.net' + soup1.find('a')['href'] desc = '' vl_id = '' title = self.upFirstChar(soup1.find('a')['href'][1:].replace('/','').replace('_', ' ')) self.count += 1 if tr.text.find('videos') != -1: vl_id = str(tr.text[video_pos + 6 : views_pos].strip()) + '-' + str(self.count) else: vl_id = str(tr.text[video_pos + 5 : views_pos].strip()) + '-' + str(self.count) desc = 'organization:' + tr.text[views_pos + 5 :] if views_pos == -1: vl_id = '0' + '-' + str(self.count) desc = 'organization:' + tr.text[video_pos + 5 :] print vl_id + ' ' + title user_dict[vl_id] = Record(self.get_storage_format(vl_id, title, url, desc)) self.count = 0 for item in sorted(user_dict.items(), key=lambda user_dict:int(user_dict[1].get_id()[0 : user_dict[1].get_id().find('-')].strip()), reverse=True): self.count += 1 name = '' desc = '' if self.count <= 100 and item[1].get_url().strip().startswith('http'): name, desc = self.getNameAndDescription(item[1].get_url().strip()) uid = 'vl-' + item[1].get_id()[0 : item[1].get_id().find('-')] + '-' + str(self.count) if name == '': name = item[1].get_title().strip() #print uid + ' ' + name self.write_db(f, uid, name, item[1].get_url().strip(), item[1].get_describe().strip() + ' ' + desc) self.close_db(f) if file_lines != self.count and self.count > 0: self.do_upgrade_db(file_name) print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n" else: self.cancel_upgrade(file_name) print "no need upgrade\n" def doWork(self): self.processEventData('event') for subject in self.subject_cid_map.keys(): self.processData(subject) self.processUserData() start = VideolecturesSpider() start.doWork()
They named the star in Ukrainian (“Putin-Huilo!”), and Travis Metcalfe, the organization’s founder, says he didn’t realize what it meant until the name started making the social media rounds. Nevertheless, he’s keeping it. Proclaiming that “free speech is now written in the stars,” Metcalfe said the Ukrainians’ interstellar insult is fair game, and that the name won’t be changing anytime soon. The phrase “Putin huilo,” which can also be translated as “Putin is a dipshit,” became a popular slogan amongst Ukrainians after then-Ukrainian Foreign Minister Andriy Deshchytsia used it to describe the Russian President at a protest outside of the Russian embassy in Kiev earlier this summer. It’s since become embedded in Ukrainian pop culture and has found its way onto t-shirts and into music videos. It’s worth noting that these aren’t “official” star names; the actual, historical database of star names is in the hands of the International Astronomical Union. Pale Blue Dot launched this project in 2008, and it’s only one of many groups that allow people to “adopt a star.” However, as Metcalfe points out, most of those organizations do this on a for-profit basis, while Pale Blue Dot is using the money to fund space research. It’s working — in 2013, the organization discovered the smallest exoplanet yet.
#!/usr/bin/env python # # Copyright (c) 2010, Ryan Marquardt # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the project nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import collections import os.path import re import sys import threading DEBUG = True debug_out_lock = threading.Lock() def debug(*args): if DEBUG: with debug_out_lock: print >>sys.stderr, threading.currentThread().name + ':', ' '.join(map(str,args)) VERBOSE = True def verbose(*args): if VERBOSE: print >>sys.stderr, ' '.join(map(str,args)) TIME_FORMAT='hms' SECOND = 1e9 NETPORT = 8145 BUFSIZE = 1<<12 #4096 class Time(long): @classmethod def FromNS(cls, ns): return Time(ns) @classmethod def FromSec(cls, s): return Time(s*SECOND) def __repr__(self): return self.format('s.') def __str__(self): return self.format('hms') def format(self, f): if f == 'hms': m,s = divmod(self/SECOND, 60) h,m = divmod(m,60) return '%d:%02d:%02d' % (h,m,s) if h else '%d:%02d' % (m,s) elif f == 's.': return '%f' % (self / float(SECOND)) def uri(path): return path if re.match('[a-zA-Z0-9]+://.*', path) else 'file://' + os.path.abspath(path) class PObject(object): def connect(self, which, func, *args, **kwargs): try: self.__callbacks except AttributeError: self.__callbacks = collections.defaultdict(list) self.__callbacks[which].append((func,args,kwargs)) def emit(self, signal, *args): try: cbs = iter(self.__callbacks[signal]) except AttributeError: pass else: for f,a,k in cbs: f(*(args+a), **k) def hasattrs(obj, attrs): return all(hasattr(obj,a) for a in attrs)
AA, The Gathering and more! Stewardship is recognizing your life and talents as important gifts from God, to be accepted with an attitude of gratitude, in a responsible and accountable manner returned with increase to God. As stewards, we follow the principles of the Bible and the teachings of Jesus by making God an active priority in our lives. How have I used my gifts? Do I yearn to go deeper in my faith? Is there an area of parish life that I am called to? Have I given of my ‘first fruits’? In giving of our first fruits, the parish and its members support various local community organizations that do work in social outreach and justice. For the fiscal year of 2015-2016, St. Joe’s donated over $55,000 and countless food, clothing, quilts, gifts, school supplies, cell phones, personal items, and much more! We are given 24 hours each day, 7 days a week, 52 weeks in the year. How will you spend yours? Can you help with parish youth programs? Can you help minister to those in need in our greater community? Can you assist at weekend liturgies or during the week at the parish office? The benefits of volunteering are many. - Serving your faith community. - Feeling more a part of the parish. We all have money to live on. Each of us has organizations we like to support. A 10% tithe can be broken into 5% for my parish and 5% for other organizations. How much am I able to contribute to support my parish? What are the advantages of the Electronic Funds Transfer Program? Gives you an easy and direct way to make your financial stewardship commitment. Saves you and the parish time and money. Allows the parish to more confidently provide funds for various ministries. It automatically deducts the amount you want to give to St. Joseph on a weekly, monthly, quarterly or annual basis. You may give toward the general or building fund. Or both! The Electronic Funds Transfer Program sheet can be downloaded on the left, under documents. Thank you for supporting the many ministries here at St. Joseph! and the mystery of each human life. belongs to You and is Your gift. and our hearts with Your love. of the gifts You entrust to us. and care for each other as Jesus taught us.
import json import responses from twitch.client import TwitchClient from twitch.constants import BASE_URL from twitch.resources import Clip example_clip = { "broadcast_id": "25782478272", "title": "cold ace", "tracking_id": "102382269", "url": "https://clips.twitch.tv/OpenUglySnoodVoteNay?tt_medium=clips_api&tt_content=url", } example_clips = {"clips": [example_clip]} @responses.activate def test_get_by_slug(): slug = "OpenUglySnoodVoteNay" responses.add( responses.GET, "{}clips/{}".format(BASE_URL, slug), body=json.dumps(example_clip), status=200, content_type="application/json", ) client = TwitchClient("client id") clip = client.clips.get_by_slug(slug) assert isinstance(clip, Clip) assert clip.broadcast_id == example_clip["broadcast_id"] @responses.activate def test_get_top(): params = {"limit": 1, "period": "month"} responses.add( responses.GET, "{}clips/top".format(BASE_URL), body=json.dumps(example_clips), status=200, content_type="application/json", ) client = TwitchClient("client id") clips = client.clips.get_top(**params) assert len(clips) == len(example_clips) assert isinstance(clips[0], Clip) assert clips[0].broadcast_id == example_clips["clips"][0]["broadcast_id"]
I’m kicking 2017 off with a month-long fitness challenge, courtesy of TruFusion Summerlin. I was one of the lucky recipients of a month-long pass to their newest facility. Checkout my winning photo! My goal for the year is to receive Pop Pilates Certification and I’m determined to be as strong as possible. To get this journey in gear, I am bound determined to overcome my hatred of mornings and will be working out BEFORE work.”Allergic To Mornings” is probably the best description for my life. But seeing as I get off at peak traffic times, those 6 a.m. morning classes are calling my name. So checkout my journey on my instagram. I will be posting a photo everyday and updating the blog periodically with the different classes I am experiencing.
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys import os import re import xml.etree.ElementTree as ET selectors = {'ServiceItem': ['descriptiveName', 'mode', 'path', 'pathmd5sum', 'status', 'name'], 'RegistryItem': ['KeyPath', 'ValueName'], 'FileItem': ['FilePath', 'FullPath', 'FileExtension', 'FileName'], 'ArpEntryItem': ['Interface', 'IPv4Address', 'PhysicalAddress', 'CacheType'], 'DnsEntryItem': ['RecordName', 'RecordType', 'TimeToLive', 'DataLength', 'RecordData/Host', 'RecordData/IPv4Address'], 'PortItem': ['protocol', 'localIP', 'localPort', 'remoteIP', 'remotePort', 'state', 'pid'], 'PrefetchItem': ['PrefetchHash', 'ApplicationFileName', 'ReportedSizeInBytes', 'SizeInBytes', 'TimesExecuted', 'FullPath'], 'ProcessItem': ['pid', 'parentpid', 'UserSID', 'Username', 'name', 'path', 'HandleList/Handle/Type', 'HandleList/Handle/Name'], 'MemoryItem': ['pid', 'parentpid', 'name', 'page_addr', 'page_size', 'access_read', 'access_write', 'access_execute', 'access_copy_on_write'] } def multiprompt(options, all=False): regex = re.compile('(\d+|\*)' if all else '(\d)') for counter, opt in enumerate(options): print '({})\t{}'.format(counter + 1, opt) if all: print '(*)\tAll of them' user_input = raw_input('> ') if not regex.search(user_input): print '\n[>] Please enter a valid value.' return multiprompt(options) return user_input if user_input == '*' else int(user_input) - 1 def setSelectAttribute(items, choosenItem, choosenSelector): context = items[choosenItem].find('Context') document = context.get('document') context.set('select', '{}/{}'.format(document, selectors[document][choosenSelector])) if __name__ == '__main__': if len(sys.argv) < 2: print 'Usage : python ioc-selector.py [ioc path]' exit(1) try : tree = ET.parse(sys.argv[1]) except IOError: print 'Your IOC file was not found.' exit(1) # Stripping IOC namespaces for el in tree.getroot().iter(): if '}' in el.tag: el.tag = el.tag.split('}', 1)[1] root = tree.getroot() # Getting all indicator items elements items = root.findall('.//IndicatorItem') itemsList = [] for i in items: itemsList.append('{} {} {}'.format(i.find('Context').get('search'), i.get('condition'), i.find('Content').text)) print '[>] Which indicator item would you like to edit?' choice = multiprompt(itemsList, True) print '\n[>] Which attribute would you like to select?' if choice == '*': print '[!] All the indicators will get the same \'select\' attribute.' document = items[0].find('Context').get('document') selec = multiprompt(selectors[document]) for nb in range(len(items)): setSelectAttribute(items, nb, selec) else: document = items[choice].find('Context').get('document') selec = multiprompt(selectors[document]) setSelectAttribute(items, choice, selec) try: filename = sys.argv[1] + '-select' tree.write(filename) print '[>] File successfully saved as ' + filename except Exception as e: print '[X] Something happened' + str(e)
For the past few years, both AMD and Nvidia have leaned heavily on rebadged products when they introduced new hardware. This has generally led to widespread confusion and generally made the relationship between model number, performance, and power consumption even harder to track than it already is. With the HD 8000 family, AMD is taking steps to simplify its product lineup, standardize on the Graphics Core Next (GCN) architecture, and set the stage for further advances in 2013. Let’s tackle those one at a time. Up until now, the HD 7000M midrange parts have been rebadged 40nm hardware. The new HD 8500 and HD 8600 chips will resolve that, and give AMD’s midrange a 28nm footing. That’s good news for the company, particularly given the emphasis on ultra-thin form factors and long battery life. In the midrange, this is a unilateral upgrade. AMD’s performance figures predict a 20-30% gain over the company’s previous midrange parts — again, a nice boost. The HD 8700 and 8800M, however, are a bit of a different story. Their counterparts in the HD 7700 and HD 7800 family are already based on 28nm GCN hardware — so what new capabilities do the updates bring to the table? Here, AMD was a bit cagey. According to the product marketing manager for AMD’s Notebook Graphics, Jay Marsden, the 8700 and 8800 tweak the underlying GCN architecture, optimize software code, improve PowerTune features, and generally improve performance-per-watt. AMD says these are new cores, but it’s not calling them Gen 2 of Graphics Core Next, either. AMD’s re-trenching of the HD 8800M as a midrange part is, I think, partly an admission of the upward fight it faces against Nvidia’s Kepler. It also suggests that the company may have pursued a similar strategy to what it used for the HD 6000 family. Then, TSMC’s 40nm troubles necessitated a split GPU strategy. The HD 6800 family on the desktop was an improved version of the HD 5000 cards. The HD 6900 series was something different. That GPU, Cayman, was a VLIW4 design that debuted a bit later, and bridged the engineering gap between HD 5000 and GCN. For AMD’s midrange chips, this is a definite upgrade. The HD 7700M and 7800M should still see benefits. A few months into 2013 we’ll likely see a further follow-up that takes the title of GCN 2. In the meantime, this is a move in the right direction. Cleaning up product SKUs and rolling out 28nm-era technology is the right move, though we’d feel more comfortable if AMD’s published benchmarks were a bit more comparative with NV hardware.
from api.handler.APIHandler import APIHandler import asyncio import tornado import hashlib import tasks import api.web import base64 class index(APIHandler): def get(self): pass encrypt_key = 'api.encrypt.{text}' decrypt_key = 'api.decrypt.{text}' class encrypt(APIHandler): ''' 加密 api,请求示例 uri/encrypt?type='md5|sha256|sha512'&text=hello 参数: type : 表示加密类型,当类型有多个时, 使用 | 分割 text : 表示要加密的源数据 ''' # 该 api 支持的加密算法,过滤 type 参数用 TYPE = ('md5', 'sha1','sha224','sha256','sha384','sha512', 'blake2b') @tornado.web.authenticated async def get(self, text): # 不指定 type, 则返回所有类型的加密数据 types = (self.get_argument('type', '|'.join(self.TYPE))).split('|') result = dict() for type_ in types: if type_ in self.TYPE: tmp = self.cache.exists(encrypt_key.format(text=text)) if tmp and self.cache.hexists(encrypt_key.format(text=text), type_): cache = self.cache.hget(encrypt_key.format(text=text), type_) result[type_] = bytes.decode(cache) else: print("不存在") result[type_] = await self.encrypt(type_, text) await self.update_cache(type_, {'text': text, 'result': result}) else: result[type_] = 'The encryption algorithm is not supported at this time' data = dict(query = text,result = result) self.write_json(data) async def encrypt(self, type_, text, charset='utf-8'): ''' 抽象的加密函数,利用 python 的反射机制,执行 hashlib 相应的加密函数,并更新加密数据库中的资料 参数: type_ : 加密类型 text : 需要加密的源数据 ''' if hasattr(hashlib, type_): result = getattr(hashlib, type_)(text.encode(charset)).hexdigest() return result async def update_cache(self, type_, data): ''' 異布更新緩存與數據庫 ''' text = data.get('text', '') result = data.get('result', '') self.cache.hmset(encrypt_key.format(text=text), {type_:result[type_]}) self.cache.hmset(decrypt_key.format(text=result[type_]), {type_:text}) tmp = {'text':text, 'result':data['result'][type_]} await self.update(tmp, type_) class decrypt(APIHandler): ''' 解密 api,请求示例 uri/decrypt?type='md5|sha256|sha512'&text=hello 参数: type : 表示密文类型,有多个类型时, 使用 | 分割,当不确定类型时,可以留空 text : 表示要加密的源数据 ''' # 该 api 支持的解密算法,过滤 type 参数用 TYPE = ('md5', 'sha1','sha224','sha256','sha384','sha512', 'blake2b') async def get(self, text): types = (self.get_argument('type', '|'.join(self.TYPE))).split('|') result = dict() for type_ in types: if type_ in self.TYPE: if self.cache.hexists(decrypt_key.format(text=text), type_): # 命中緩存 cache = self.cache.hget(decrypt_key.format(text=text), type_) result[type_] = bytes.decode(cache) else: result[type_] = await self.decrypt(type_, text) else: result[type_] = 'The encryption algorithm is not supported at this time' data = dict( query = text, result = result ) self.write_json(data) async def decrypt(self, type_, text, charset='utf-8'): ''' 抽象的解密函数,利用 python 的反射机制,执行 hashlib 相应的加密函数,并更新加密数据库中的资料 参数: type_ : 加密类型 text : 需要加密的源数据 ''' result = self.find({'result':text}, type_) if result: return result class encode(APIHandler): ''' 加密 api,请求示例 uri/encrypt?type=base64&text=hello 参数: type : 表示加密类型,当类型有多个时, 使用 | 分割 text : 表示要加密的源数据 ''' # 该 api 支持的加密算法,过滤 type 参数用 TYPE = ('base16','base32', 'base64', 'base85') async def get(self, text): types = (self.get_argument('type', '|'.join(self.TYPE))).split('|') result = dict() for type_ in types: if type_ in self.TYPE: result[type_] = await self.encode(type_, text) else: result[type_] = 'The encryption algorithm is no t supported at this time' data = dict( query = text, result = result ) self.write_json(data) async def encode(self, type_, text, charset='utf-8'): ''' 抽象的编码函数,利用 python 的反射机制,执行 base64 相应的加密函数,并更新 编码 数据库中的资料 参数: type_ : 编码类型 text : 需要编码的源数据 ''' # 组合 base 编码名称,转换成 base64 库 需要的格式 types = (type_[0:1]+type_[-2:]+'encode') if hasattr(base64, types): result = getattr(base64, types)(text.encode()).decode() return result
Big thanks to WNYT NewsChannel 13 and Meteorologist Paul Caiano for using my image of yesterday's shelf cloud on this morning's broadcast! I love getting messages like this one. I absolutely love getting message like this one. Footage form various locations around the Northeast, New York, and New Jersey. Late summer sunlight stitches all these clips together. Summer fading is one of the most visually stunning times of the year in Upstate New York. The color at sunset always seems just a little deeper. The conditions produced by cool mornings and warm afternoons are so beautiful because they are a limited edition, soon to be replaced my the equally beautiful but monotone color palette of winter. Sunset on the Boardwalk in Wildwood, New Jersey. John Bulmer Photography and Groundswell Conservation is a proud partner of the Rensselaer Land Trust. Learn more about the Rensselaer Land Trust at www.renstrust.org. Groundswell Conservation: Photography is a powerful tool for change. It's imperative that open spaces and fragile environments be protected. Groundswell Conservation raises awareness and promotes advocacy through the documentation of the endangered ecosystems of New England and beyond. Powerful images and video are one of the most effective ways to raise awareness of the natural spaces that are in peril from both climate change and overdevelopment. From the summits of the Adirondack Mountains to the watersheds of the Hudson River Valley to the coast of Southern New England, Groundswell Conservation produces dynamic media for environmental advocacy, open space preservation and conservation organizations. Contact Groundswell Conservation if your environmental organization is in need of media to help spread its message to a wider audience. Pro bono work available for qualified organizations. Photography is a powerful tool for change. It's imperative that open spaces and fragile environments be protected. Groundswell Conservation raises awareness and promotes advocacy through the documentation of the endangered ecosystems of New England and beyond. Powerful images and video are one of the most effective ways to raise awareness of the natural spaces that are in peril from both climate change and overdevelopment. From the summits of the Adirondack Mountains to the watersheds of the Hudson River Valley to the coast of Southern New England, Groundswell Conservation produces dynamic media for environmental advocacy, open space preservation and conservation organizations. Contact Groundswell Conservation if your environmental organization is in need of media to help spread its message to a wider audience. This is my favorite time of the year: cool foggy mornings, warm afternoons and Halloween.
import json from decimal import Decimal from django.http import HttpResponse from statistics.models import Session, Committee, Point, ContentPoint def session_api(request, session_id): # Since the graphs on the session page need to be able to livereload, we need to create # a custom "API" that outputs the neccesary JSON to keep the graph alive session = Session.objects.get(pk=session_id) #First we need all the committees registered for that session committees = Committee.objects.filter(session__id=session_id).order_by('name') #Then we need all the available points, direct responses and votes if session.session_statistics != 'C': all_points = Point.objects.filter(session_id=session_id).order_by('timestamp') points = Point.objects.filter(session_id=session_id).filter(point_type='P') drs = Point.objects.filter(session_id=session_id).filter(point_type='DR') else: all_points = ContentPoint.objects.filter(session_id=session_id).order_by('timestamp') points = ContentPoint.objects.filter(session_id=session_id).filter(point_type='P') drs = ContentPoint.objects.filter(session_id=session_id).filter(point_type='DR') #Then we need a list of each of them. committee_list = [] points_list = [] drs_list = [] if not all_points: session_json = json.dumps({ 'committees': '', 'points': '', 'drs': '', 'total_points': '0', 'type_point': '', 'type_dr': '', 'ppm': '', }) else: total_points = all_points.count() type_point = points.count() type_dr = drs.count() first_point = all_points.first().timestamp latest_point = all_points.last().timestamp time_diff = latest_point - first_point minutes = (time_diff.days * 1440) + (time_diff.seconds / 60) if total_points > 0: mpp = Decimal(minutes) / Decimal(total_points) else: mpp = 0 #For each committee, for committee in committees: #Let c be the name c = committee.name #p be the count of points p = points.filter(committee_by=committee).count() #and d be the count of DRs. d = drs.filter(committee_by=committee).count() #Append each newly made variable to our nice lists. committee_list.append(c) points_list.append(p) drs_list.append(d) #Finally output the result as JSON session_json = json.dumps({ 'committees': committee_list, 'points': points_list, 'drs': drs_list, 'total_points': total_points, 'type_point': type_point, 'type_dr': type_dr, 'mpp': str(round(mpp, 3)), }) return HttpResponse(session_json, content_type='json')
As yesterday’s news of the upcoming Wood Elf DLC revealed, Creative Assembly are branching out into a whole new Total War: Warhammer faction on 8 December. Not content to leaf things at that, they’ve released a new video about Treemen. They have their roots in the spirits of Athel Loren, and get barking mad at anybody (or anything) that threatens the forests. The video below has an early look at some Treemen animations, which are sure to leave prospective foes chloro-phylled with fear. Or soiling themselves. You choose. Plant yourself down for a couple of minutes of Total War: Warhammer‘s Treemen, because I’ve run out of awful tree puns.
# Copyright 2018 The Cirq Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, cast, Iterable, List, Optional, Set, TYPE_CHECKING, FrozenSet import cirq from cirq_google.optimizers import convert_to_xmon_gates if TYPE_CHECKING: import cirq @cirq.value_equality class XmonDevice(cirq.Device): """A device with qubits placed in a grid. Neighboring qubits can interact.""" def __init__( self, measurement_duration: cirq.DURATION_LIKE, exp_w_duration: cirq.DURATION_LIKE, exp_11_duration: cirq.DURATION_LIKE, qubits: Iterable[cirq.GridQubit], ) -> None: """Initializes the description of an xmon device. Args: measurement_duration: The maximum duration of a measurement. exp_w_duration: The maximum duration of an ExpW operation. exp_11_duration: The maximum duration of an ExpZ operation. qubits: Qubits on the device, identified by their x, y location. """ self._measurement_duration = cirq.Duration(measurement_duration) self._exp_w_duration = cirq.Duration(exp_w_duration) self._exp_z_duration = cirq.Duration(exp_11_duration) self.qubits = frozenset(qubits) def qubit_set(self) -> FrozenSet[cirq.GridQubit]: return self.qubits def decompose_operation(self, operation: cirq.Operation) -> cirq.OP_TREE: return convert_to_xmon_gates.ConvertToXmonGates().convert(operation) def neighbors_of(self, qubit: cirq.GridQubit): """Returns the qubits that the given qubit can interact with.""" possibles = [ cirq.GridQubit(qubit.row + 1, qubit.col), cirq.GridQubit(qubit.row - 1, qubit.col), cirq.GridQubit(qubit.row, qubit.col + 1), cirq.GridQubit(qubit.row, qubit.col - 1), ] return [e for e in possibles if e in self.qubits] def duration_of(self, operation): if isinstance(operation.gate, cirq.CZPowGate): return self._exp_z_duration if isinstance(operation.gate, cirq.MeasurementGate): return self._measurement_duration if isinstance(operation.gate, (cirq.XPowGate, cirq.YPowGate, cirq.PhasedXPowGate)): return self._exp_w_duration if isinstance(operation.gate, cirq.ZPowGate): # Z gates are performed in the control software. return cirq.Duration() raise ValueError(f'Unsupported gate type: {operation!r}') @classmethod def is_supported_gate(cls, gate: cirq.Gate): """Returns true if the gate is allowed.""" return isinstance( gate, ( cirq.CZPowGate, cirq.XPowGate, cirq.YPowGate, cirq.PhasedXPowGate, cirq.MeasurementGate, cirq.ZPowGate, ), ) def validate_gate(self, gate: cirq.Gate): """Raises an error if the given gate isn't allowed. Raises: ValueError: Unsupported gate. """ if not self.is_supported_gate(gate): raise ValueError(f'Unsupported gate type: {gate!r}') def validate_operation(self, operation: cirq.Operation): if not isinstance(operation, cirq.GateOperation): raise ValueError(f'Unsupported operation: {operation!r}') self.validate_gate(operation.gate) for q in operation.qubits: if not isinstance(q, cirq.GridQubit): raise ValueError(f'Unsupported qubit type: {q!r}') if q not in self.qubits: raise ValueError(f'Qubit not on device: {q!r}') if len(operation.qubits) == 2 and not isinstance(operation.gate, cirq.MeasurementGate): p, q = operation.qubits if not cast(cirq.GridQubit, p).is_adjacent(q): raise ValueError(f'Non-local interaction: {operation!r}.') def _check_if_exp11_operation_interacts_with_any( self, exp11_op: cirq.GateOperation, others: Iterable[cirq.GateOperation] ) -> bool: return any(self._check_if_exp11_operation_interacts(exp11_op, op) for op in others) def _check_if_exp11_operation_interacts( self, exp11_op: cirq.GateOperation, other_op: cirq.GateOperation ) -> bool: if isinstance( other_op.gate, ( cirq.XPowGate, cirq.YPowGate, cirq.PhasedXPowGate, cirq.MeasurementGate, cirq.ZPowGate, ), ): return False return any( cast(cirq.GridQubit, q).is_adjacent(cast(cirq.GridQubit, p)) for q in exp11_op.qubits for p in other_op.qubits ) def validate_circuit(self, circuit: cirq.Circuit): super().validate_circuit(circuit) _verify_unique_measurement_keys(circuit.all_operations()) def validate_moment(self, moment: cirq.Moment): super().validate_moment(moment) for op in moment.operations: if isinstance(op.gate, cirq.CZPowGate): for other in moment.operations: if other is not op and self._check_if_exp11_operation_interacts( cast(cirq.GateOperation, op), cast(cirq.GateOperation, other) ): raise ValueError(f'Adjacent Exp11 operations: {moment}.') def can_add_operation_into_moment(self, operation: cirq.Operation, moment: cirq.Moment) -> bool: self.validate_moment(moment) if not super().can_add_operation_into_moment(operation, moment): return False if isinstance(operation.gate, cirq.CZPowGate): return not self._check_if_exp11_operation_interacts_with_any( cast(cirq.GateOperation, operation), cast(Iterable[cirq.GateOperation], moment.operations), ) return True def at(self, row: int, col: int) -> Optional[cirq.GridQubit]: """Returns the qubit at the given position, if there is one, else None.""" q = cirq.GridQubit(row, col) return q if q in self.qubits else None def row(self, row: int) -> List[cirq.GridQubit]: """Returns the qubits in the given row, in ascending order.""" return sorted(q for q in self.qubits if q.row == row) def col(self, col: int) -> List[cirq.GridQubit]: """Returns the qubits in the given column, in ascending order.""" return sorted(q for q in self.qubits if q.col == col) def __repr__(self) -> str: return ( 'XmonDevice(' f'measurement_duration={self._measurement_duration!r}, ' f'exp_w_duration={self._exp_w_duration!r}, ' f'exp_11_duration={self._exp_z_duration!r} ' f'qubits={sorted(self.qubits)!r})' ) def __str__(self) -> str: diagram = cirq.TextDiagramDrawer() for q in self.qubits: diagram.write(q.col, q.row, str(q)) for q2 in self.neighbors_of(q): diagram.grid_line(q.col, q.row, q2.col, q2.row) return diagram.render(horizontal_spacing=3, vertical_spacing=2, use_unicode_characters=True) def _value_equality_values_(self) -> Any: return (self._measurement_duration, self._exp_w_duration, self._exp_z_duration, self.qubits) def _verify_unique_measurement_keys(operations: Iterable[cirq.Operation]): seen: Set[str] = set() for op in operations: if cirq.is_measurement(op): key = cirq.measurement_key(op) if key in seen: raise ValueError(f'Measurement key {key} repeated') seen.add(key)
Education Secretary Betsy DeVos has reportedly opened an investigation into several universities at the center of a college admissions bribery scandal. Politico, citing individuals familiar with the investigation, reported Monday that the Department of Education is looking into whether the universities broke laws or rules "governing the Federal student financial aid programs" or "any other applicable laws." Earlier this month, federal prosecutors charged dozens of people allegedly implicated in a scheme to buy admission to eight universities including Yale University, Stanford University and UCLA. The scandal involved several celebrities and CEOs, who allegedly payed money to ensure their children were accepted into the schools. The Education Department sent letters to the presidents of Yale, UCLA, Stanford, Wake Forest University, the University of San Diego, Georgetown University, the University of Texas at Austin and the University of Southern California informing them that the universities faced a "preliminary investigation" stemming from the scandal, according to Politico. Politico also reported that the universities could face penalties if the department concludes that they violated federal education regulations. Those penalties could include eliminating a school's ability to access Pell Grants and federal student loans, according to Politico. "The allegations made and evidence cited by the Department of Justice raise questions about whether your institution is fully meeting its obligations," an official with the department wrote in the letters, per Politico.
# -*- coding: utf8 -*- # 위 주석은 이 .py 파일 안에 한글이 사용되었다는 점을 표시하는 것임 def dot(a, b): """ 크기가 같은 두 벡터 a, b의 내적 dot product """ # 벡터 a 의 크기. # 벡터 b 의 크기는 같을 것이라고 가정한다 # (어떤 경우 오류가 발생할 수 있겠는가?) n = len(a) result = 0.0 for i in xrange(n): result += a[i] * b[i] return result def multiply_matrix_vector(A, x): n_row = len(A) n_column = len(A[0]) result = [0.0] * n_row for i in xrange(n_row): result[i] = dot(A[i], x) return result def multiply_matrix_matrix(A, B): n_row = len(A) n_column = len(B[0]) n_dummy = len(A[0]) n_dummy2 = len(B) # 행렬 크기 확인 if n_dummy != n_dummy2: print "Incorrect Matrix Size" return None # 행렬을 저장할 공간을 지정 result = [] for i_row in xrange(n_row): # 각 행을 저장할 공간을 지정 result.append([0.0] * n_column) # 행 반복문 for i in xrange(n_row): # 열 반복문 for j in xrange(n_column): result[i][j] = 0.0 # dummy index for k in xrange(n_dummy): result[i][j] += A[i][k] * B[k][j] return result def main(): a_vector = [1.0, 0.0] b_vector = [3.0, 4.0] a_dot_b = dot(a_vector, b_vector) print "a =", a_vector print "b =", b_vector print "a dot b =", a_dot_b A_matrix = [[0.0, 1.0], [1.0, 0.0]] x_vector = [3.0, 4.0] A_x = multiply_matrix_vector(A_matrix, x_vector) print "A =", A_matrix print "x =", x_vector print "A*x =", A_x A_matrix2 = [[0.0, 1.0], [1.0, 0.0]] x_vector2T = [[3.0, 4.0]] x_vector2 = zip(*x_vector2T) A_x2 = multiply_matrix_matrix(A_matrix2, x_vector2) print "A2 =", A_matrix2 print "x2 =", x_vector2 print "A2*x2 =", A_x2 B_matrix = [[100, 101], [110, 111]] print "A =", A_matrix print "B =", B_matrix print "A*B =", multiply_matrix_matrix(A_matrix, B_matrix) if "__main__" == __name__: main()
Ingrown toenails, also known as onychocryptosis, is usually caused by trimming toenails too short, particularly on the sides of the big toes. They may also be caused by shoe pressure (from shoes that are too tight or short), injury, fungus infection, heredity, or poor foot structure. Ingrown toenails occur when the corners or sides of the toenail dig into the skin, often causing infection. A common ailment, ingrown toenails can be painful. Ingrown toenails start out hard, swollen, and tender. Left untreated, they may become sore, red, and infected and the skin may start to grow over the ingrown toenail. * Trimming toenails straight across with no rounded corners. * Ensuring that shoes and socks are not too tight. * Keeping feet clean at all times.
import sys import os from cx_Freeze import setup, Executable os.environ['TCL_LIBRARY'] = r"C:\Users\Sasha\AppData\Local\Programs\Python\Python36-32\tcl\tcl8.6" os.environ['TK_LIBRARY'] = r"C:\Users\Sasha\AppData\Local\Programs\Python\Python36-32\tcl\tk8.6" includes = ["tkinter"] include_files = [r"C:\Users\Sasha\AppData\Local\Programs\Python\Python36-32\DLLs\tcl86t.dll", \ r"C:\Users\Sasha\AppData\Local\Programs\Python\Python36-32\DLLs\tk86t.dll", r"C:\Users\Sasha\AppData\Local\Programs\Python\Python36-32\DLLs\sqlite3.dll", r"C:\Users\Sasha\Desktop\DebiaTranslator-master\zoom_logo.jpg"] # Dependencies are automatically detected, but it might need fine tuning. build_exe_options = {"includes": ["tkinter", "os", "platform"]} # GUI applications require a different base on Windows (the default is for a # console application). base = None if sys.platform == "win32": base = "Win32GUI" setup( name = "Devia Translator", version = "0.1", description = "Devia Translator application!", options = {"build_exe": {"includes": includes, "include_files": include_files}}, executables = [Executable("devia_translator.py", base=base)])
More 1961 alumni from Aledo HS Aledo, IL have posted profiles on Classmates.com®. Click here to register for free at Classmates.com® and view other 1961 alumni. Missing some friends from Aledo that graduated with you in 1961? Check the list below that shows the Aledo Aledo, IL class of '61.
from __future__ import unicode_literals from django.db import models from datetime import datetime from django.utils import timezone from django.db.models import Model, CharField, ForeignKey, IntegerField, DecimalField, BooleanField, DateTimeField class School(Model): ''' Stores information on a single school. ''' cno = CharField(max_length=10, null=False) short_name = CharField(max_length=50, null=False, unique=True, db_index=True) long_name = CharField(max_length=100, null=False) gender = CharField(max_length=2, null=False, default="MF") stype = CharField(max_length=2, null=False, default="U") level = CharField(max_length=2, null=False, default="U") def __str__(self): return self.short_name def __unicode__(self): return self.short_name class SchoolStatistics(Model): ''' Stores the years for which data is available for a specific school. Relates to :model:`api.School`. ''' school = ForeignKey(School) exam_year = IntegerField(db_index=True) div_name = CharField(max_length=10, null=False) div_count = IntegerField() def __str__(self): return str(self.school)
In a large soup pot, heat oil over medium heat. Add pork tenderloin (in batches if necessary) and brown on all sides. Using a slotted spoon transfer pork to a dish and set aside. Add onions and garlic to pot and sauté over medium-low heat, stirring occasionally, until onions are golden, about 6 minutes. Stir in the flour, paprika, and caraway seeds. Cook, stirring, for 2 minutes. Stir in wine and bring the mixture to a boil. Add tomato paste, bell pepper, potato, tomatoes, and chicken broth. Return pork to the pot and stir to blend. Reduce heat and simmer, partially covered, until pork is tender, about 1 hour. Season with salt (if using) and pepper to taste. Meanwhile, cook noodles according to package directions until al dente. Drain and divide noodles between soup bowls. Top with hot soup. If desired, garnish each serving with a dollop of sour cream.
#!/usr/bin/env python # # @file insideJSBML_parser.py # @brief JSBML classes parser using javap for GSoC 2016 # @author Hovakim Grabski # # <!-------------------------------------------------------------------------- # # Copyright (c) 2013-2015 by the California Institute of Technology # (California, USA), the European Bioinformatics Institute (EMBL-EBI, UK) # and the University of Heidelberg (Germany), with support from the National # Institutes of Health (USA) under grant R01GM070923. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # # Neither the name of the California Institute of Technology (Caltech), nor # of the European Bioinformatics Institute (EMBL-EBI), nor of the University # of Heidelberg, nor the names of any contributors, may be used to endorse # or promote products derived from this software without specific prior # written permission. # ------------------------------------------------------------------------ --> import os import sys import time import subprocess as sub file_path = os.path.dirname(os.path.abspath(__file__)) jsbml_jar = 'jsbml-1.1-with-dependencies.jar' curr_dir = os.getcwd() def print_output(output): for line in output: print(line) # Clean string line from '' and return list def clean_line(data): temp = [] for i in data: if i != '': temp.append(i) return temp def extract_data(temp_data): # print('temp_data ',temp_data) # function_name_step1 = temp_data[-1].split(');') # print(function_name_step1) function_name = '' access_type = None is_abstract = False return_type = [] arguments = [] of_type = '' of_type_args = [] # TODO this is the part that includes extends module if len(temp_data) == 1 and temp_data[-1] == '}': return for i in range(len(temp_data)): if temp_data[0] == 'Compiled': return if len(temp_data) == 1 and temp_data[-1] == '}': return # Function Arguments extracter if '(' in temp_data[i]: # print('i is ',i) function_name_step1 = temp_data[i].split('(') # print('function_name_step1 ',function_name_step1) function_name = function_name_step1[0] function_index = i if function_name_step1[-1] != ');': if ');' in function_name_step1[-1]: arg = function_name_step1[-1].split(');')[0] arguments.append(arg) else: arg = function_name_step1[-1].split(',')[0] arguments.append(arg) for y in range(function_index, len(temp_data)): # print('y ',temp_data[y]) if ',' in temp_data[y]: arg = function_name_step1[-1].split(',')[0] arguments.append(arg) elif ');' in function_name_step1[-1]: arg = function_name_step1[-1].split(');')[0] arguments.append(arg) elif function_name_step1[-1] == ');': break elif '<' in temp_data[i]: type_of_name_step1 = temp_data[i].split('<') of_type = type_of_name_step1[0] type_index = i if type_of_name_step1[-1] != '>': if '>' in type_of_name_step1[-1]: arg = type_of_name_step1[-1].split('>')[0] of_type_args.append(arg) else: arg = type_of_name_step1[-1].split(',')[0] of_type_args.append(arg) for y in range(type_index, len(temp_data)): # print('y ',temp_data[y]) if ',' in temp_data[y]: arg = type_of_name_step1[-1].split(',')[0] of_type_args.append(arg) elif '>' in type_of_name_step1[-1]: arg = type_of_name_step1[-1].split('>')[0] of_type_args.append(arg) if len(temp_data) > 0: if temp_data[0] in ['public', 'private', 'protected']: access_type = temp_data[0] if len(temp_data) > 1 and temp_data[1] == 'abstract': is_abstract = True return_type = temp_data[2] elif len(temp_data) > 1: if temp_data[1] == 'void': return_type = temp_data[1] else: # return_type = temp_data[1] return_type = None if function_name == '': return return {'accessType': access_type, 'isAbstract': is_abstract, 'returnType': return_type, 'functionName': function_name, 'functionArgs': arguments, 'of_type': of_type, 'of_type_args': of_type_args, 'originalData': temp_data} def parse_extends(extends): data_extends = {} data_extends.update({'accessType': extends[0]}) if extends[1] == 'interface': is_interface = True data_extends.update({'extendsOriginal': extends[2]}) else: is_interface = False data_extends.update({'extendsOriginal': extends[3]}) data_extends.update({'isInterface': is_interface}) if extends[1] == 'class': is_class = True else: is_class = False data_extends.update({'isClass': is_class}) data_extends.update({'extendsFull': extends[-2]}) extend_short = extends[-2].split('.')[-1] data_extends.update({'extendsShort': extend_short}) data_extends.update({'fullText': extends}) return data_extends def parse_output(output): final_data = {} output_data = [] for line in output: # print(line) data_stage1 = line.split('\n') # print(data_stage1) data_stage2 = data_stage1[0].split(' ') # Need to catch extend here if 'extends' in data_stage2: final_data.update({'extends': parse_extends(data_stage2)}) temp_data = clean_line(data_stage2) data = extract_data(temp_data) if data is not None: output_data.append(data) final_data.update({'modules': output_data}) return final_data # output_data def get_class_information(class_name=None, individual_run=False, extract_data=False): if class_name == 'AbstractSBasePlugin': # class_name = 'org.sbml.jsbml.ext.{0}'.format(class_name) return else: class_name = 'org.sbml.jsbml.{0}'.format(class_name) # Old version # command = 'javap -cp {0}{1}{2} -package {3}'.format(file_path, os.sep, jsbml_jar, class_name) # TODO inside JSBML parser debugging test # comm1 = 'javap_wrong' comm1 = 'javap' comm2 = '-cp' comm3 = '{0}{1}{2}'.format(file_path, os.sep, jsbml_jar) comm4 = '-package' comm5 = '{0}'.format(class_name) total_command = [comm1, comm2, comm3, comm4, comm5] try: class_info = sub.Popen(total_command, stdout=sub.PIPE, stderr=sub.PIPE) stdout, stderr = class_info.communicate() if stdout: # For debugging purposes # print(stdout) stdout_value = stdout.decode() # decode("utf-8") class_output = stdout_value.split('\n') dict_data = parse_output(class_output) return dict_data elif stderr: error_txt = stderr.decode() # print('ERROR is', error_txt) if 'Error: class not found:' in error_txt: return else: if extract_data is False: print('Check if Java SDK is installed, deviser requires javap') sys.exit(0) else: return except Exception as error: if extract_data is False: print('Error is ', error) print('Check if Java SDK is installed, deviser requires javap') sys.exit(0) # For testing purposes # class_name = 'org.sbml.jsbml.AbstractNamedSBase' # class_name = 'CompartmentalizedSBase' # class_name = 'Compartment' # class_name = 'SBaseWithDerivedUnit' # class_name = 'NamedSBaseWithDerivedUnit' # class_name = 'UniqueNamedSBase' # TODO for individual tests of javap parser # #Exist but no data # class_name = 'AbstractSBasePlugin' # data = get_class_information(class_name, individual_run=True) # print(data) # data = get_class_information(class_name, individual_run=True) # print(data)
Please note that this is the notes for the right hand only. ↑ before note means move up one octave while ↓ means move down one octave, else then stay at the same octave. # after note means move half step higher so C# means play the black key after the white C key and we call it C Sharp. b after note means move half step lower so Bb means play the black key before the B white key and we call it B Flat.
#!/usr/bin/env python ######################################################################## # $HeadURL$ # File : dirac-wms-get-queue-normalization.py # Author : Ricardo Graciani ######################################################################## """ Report Normalization Factor applied by Site to the given Queue """ __RCSID__ = "$Id$" import DIRAC from DIRAC.Core.Base import Script from DIRAC.WorkloadManagementSystem.Client.CPUNormalization import getQueueNormalization Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1], 'Usage:', ' %s [option|cfgfile] ... Queue ...' % Script.scriptName, 'Arguments:', ' Queue: GlueCEUniqueID of the Queue (ie, juk.nikhef.nl:8443/cream-pbs-lhcb)' ] ) ) Script.parseCommandLine( ignoreErrors = True ) args = Script.getPositionalArgs() if len( args ) < 1: Script.showHelp() exitCode = 0 for ceUniqueID in args: cpuNorm = getQueueNormalization( ceUniqueID ) if not cpuNorm['OK']: print 'ERROR %s:' % ceUniqueID, cpuNorm['Message'] exitCode = 2 continue print ceUniqueID, cpuNorm['Value'] DIRAC.exit( exitCode )
Can anyone still be taking Palin seriously? Came across this article this morning (here) where Palin while visiting Alaska admits that she used to sneak across the border for some of that single payer health care in Alaska!! Wow this lady is something else she derides single payer as if it is the devil incarnate but she used it plenty of times. Obviously you choose to measure someone by their actions and not their words!!
#!/usr/bin/env python3 # # Copyright 2014-2017 Simon Edwards <[email protected]> # # This source code is licensed under the MIT license which is detailed in the LICENSE.txt file. # import argparse import atexit import base64 import hashlib import os import os.path import sys import termios ##@inline from extratermclient import extratermclient MAX_CHUNK_BYTES = 3 * 1024 # This is kept a multiple of 3 to avoid padding in the base64 representation. def SendMimeTypeDataFromFile(filename, mimeType, charset, filenameMeta=None, download=False): filesize = os.path.getsize(filename) with open(filename,'rb') as fhandle: SendMimeTypeData(fhandle, filename if filenameMeta is None else filenameMeta, mimeType, charset, filesize=filesize, download=download) def SendMimeTypeDataFromStdin(mimeType, charset, filenameMeta=None, download=False): SendMimeTypeData(sys.stdin.buffer, filenameMeta, mimeType, charset, download) def SendMimeTypeData(fhandle, filename, mimeType, charset, filesize=-1, download=False): TurnOffEcho() extratermclient.startFileTransfer(mimeType, charset, filename, filesize=filesize, download=download) contents = fhandle.read(MAX_CHUNK_BYTES) previousHash = b"" previousHashHex = "" while len(contents) != 0: hash = hashlib.sha256() hash.update(previousHash) hash.update(contents) print("D:", end='') print(base64.b64encode(contents).decode(), end='') print(":", end='') previousHashHex = hash.hexdigest() print(previousHashHex) previousHash = hash.digest() contents = fhandle.read(MAX_CHUNK_BYTES) print("E::", end='') hash = hashlib.sha256() hash.update(previousHash) print(hash.hexdigest()) extratermclient.endFileTransfer() def ShowFile(filename, mimeType=None, charset=None, filenameMeta=None, download=False): if os.path.exists(filename): SendMimeTypeDataFromFile(filename, mimeType, charset, filenameMeta, download) return 0 else: print("Unable to open file {0}.".format(filename)) return 3 def ShowStdin(mimeType=None, charset=None, filenameMeta=None, download=False): SendMimeTypeDataFromStdin(mimeType, charset, filenameMeta, download) def TurnOffEcho(): # Turn off echo on the tty. fd = sys.stdin.fileno() if not os.isatty(fd): return old_settings = termios.tcgetattr(fd) new_settings = termios.tcgetattr(fd) new_settings[3] = new_settings[3] & ~termios.ECHO # lflags termios.tcsetattr(fd, termios.TCSADRAIN, new_settings) # Set up a hook to restore the tty settings at exit. def restoreTty(): termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) sys.stderr.flush() atexit.register(restoreTty) def main(): parser = argparse.ArgumentParser(prog='show', description='Show a file inside Extraterm.') parser.add_argument('--charset', dest='charset', action='store', default=None, help='the character set of the input file (default: UTF8)') parser.add_argument('-d', '--download', dest='download', action='store_true', default=None, help='download the file and don\'t show it') parser.add_argument('--mimetype', dest='mimetype', action='store', default=None, help='the mime-type of the input file (default: auto-detect)') parser.add_argument('--filename', dest='filename', action='store', default=None, help='sets the file name in the metadata sent to the terminal (useful when reading from stdin).') parser.add_argument('-t', '--text', dest='text', action='store_true', default=None, help='Treat the file as plain text.') parser.add_argument('files', metavar='file', type=str, nargs='*', help='file name. The file data is read from stdin if no files are specified.') args = parser.parse_args() if not extratermclient.isExtraterm(): print("Sorry, you're not using Extraterm as your terminal.") return 1 mimetype = args.mimetype if args.text: mimetype = "text/plain" if len(args.files) != 0: for filename in args.files: result = ShowFile(filename, mimeType=mimetype, charset=args.charset, filenameMeta=args.filename, download=args.download) if result != 0: return result return 0 else: return ShowStdin(mimeType=mimetype, charset=args.charset, filenameMeta=args.filename, download=args.download) main()
bwah! that looks cool. Nice photoshop work, too. Yay mass shifting!
# -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Copyright 2018 by I3py Authors, see AUTHORS for more details. # # Distributed under the terms of the BSD license. # # The full license is in the file LICENCE, distributed with this software. # ----------------------------------------------------------------------------- """Object used to handle operation that takes a long time to complete. """ import time from typing import Callable, Optional class InstrJob(object): """Object returned by instrument starting a long running job. This object can also be used inside a method to handle the waiting of a condition. Parameters ---------- condition_callable : Callable Callable taking no argument and indicating if the job is complete. expected_waiting_time : float Expected waiting time for the task to complete in seconds. cancel : Callable, optional Function to cancel the task. The job will pass it all the argument it is called with and the function return value will be returned. """ def __init__(self, condition_callable: Callable[[], bool], expected_waiting_time: float, cancel: Optional[Callable]=None) -> None: self.condition_callable = condition_callable self.expected_waiting_time = expected_waiting_time self._cancel = cancel self._start_time = time.time() def wait_for_completion(self, break_condition_callable: Optional[Callable[[], bool]]=None, timeout: float=15, refresh_time: float=1) -> bool: """Wait for the task to complete. Parameters ---------- break_condition_callable : Callable, optional Callable indicating that we should stop waiting. timeout : float, optional Time to wait in seconds in addition to the expected condition time before breaking. refresh_time : float, optional Time interval at which to check the break condition. Returns ------- result : bool Boolean indicating if the wait succeeded of was interrupted. """ if break_condition_callable is None: def no_check(): pass break_condition_callable = no_check while True: remaining_time = (self.expected_waiting_time - (time.time() - self._start_time)) if remaining_time <= 0: break time.sleep(min(refresh_time, remaining_time)) if break_condition_callable(): return False if self.condition_callable(): return True timeout_start = time.time() while True: remaining_time = (timeout - (time.time() - timeout_start)) if remaining_time < 0: return False time.sleep(min(refresh_time, remaining_time)) if self.condition_callable(): return True if break_condition_callable(): return False def cancel(self, *args, **kwargs): """Cancel the long running job. """ if not self._cancel: raise RuntimeError('No callable was provided to cancel the task.') return self._cancel(*args, **kwargs)
Due to your phenomenal generosity last year, we started with an expected deficit of £70,000 and ended the year with a surplus of £50,000! Both regular and one-off gifts were significantly more than we expected, meaning that our total income (£1,050,000) was 16% higher than budget (+£140,000). On behalf of our mission partners, Vestry and staff, can I start this financial year for thanking you for your faithful and generous giving! The breakdown by ministry is in the Giving Leaflet, please take a look! Whilst we never hope to end the year with a deficit, due to your faithful giving over many years, we are in the fortunate position to have reserves to accommodate a deficit of this size. One of the reasons for the deficit is that regular giving has been decreasing month on month for the last 6 months. If you consider St Mungo’s to be your church, then at the start of this new financial year, please could I ask you to review your financial support? If you have any questions on the finances of the church, please e-mail me on [email protected].
""" @Author: Rui Shu @Date: 4/21/15 Performs sequential optimization. """ import time from learning_objective.hidden_function import evaluate, true_evaluate, get_settings import matplotlib.pyplot as plt import utilities.optimizer as op import numpy as np # Open file to write times for comparison file_record = open("data/seq_time_data.csv", "a") # Freeze plotting plot_it = False print_statements = False # Get settings relevant to the hidden function being used lim_domain, init_size, additional_query_size, init_query, domain, selection_size = get_settings() # Construct the dataset dataset = evaluate(init_query[0,:], lim_domain) print "Randomly query a set of initial points... ", for query in init_query[1:,:]: dataset = np.concatenate((dataset, evaluate(query, lim_domain)), axis=0) print "Complete initial dataset acquired" # Begin sequential optimization using NN-LR based query system optimizer = op.Optimizer(dataset, domain) optimizer.train() # Select a series of points to query selected_points = optimizer.select_multiple(selection_size) # (#points, m) array selection_index = 0 t0 = time.time() print "Performing optimization..." for i in range(additional_query_size): if selection_index == selection_size: # Update optimizer's dataset and retrain LR optimizer.retrain_LR() selected_points = optimizer.select_multiple(selection_size) # Select new points selection_size = selected_points.shape[0] # Get number of selected points selection_index = 0 # Restart index info = "%.3f," % (time.time()-t0) file_record.write(info) t0 = time.time() if (optimizer.get_dataset().shape[0] % 100) == 0: # Retrain the neural network optimizer.retrain_NN() new_data = evaluate(selected_points[selection_index], lim_domain) optimizer.update_data(new_data) selection_index += 1 if print_statements: string1 = "Tasks done: %3d. " % (i+1) string2 = "New data added to dataset: " + str(new_data) print string1 + string2 else: if (i+1) % (additional_query_size/10) == 0: print "%.3f completion..." % ((i+1.)/additional_query_size) info = "%.3f," % (time.time()-t0) file_record.write(info) file_record.write("NA\n") file_record.close() print "Sequential optimization task complete." print "Best evaluated point is:" dataset = optimizer.get_dataset() print dataset[np.argmax(dataset[:, -1]), :] print "Predicted best point is:" optimizer.retrain_LR() domain, pred, hi_ci, lo_ci, nn_pred, ei, gamma = optimizer.get_prediction() index = np.argmax(pred[:, 0]) print np.concatenate((np.atleast_2d(domain[index, :]), np.atleast_2d(pred[index, 0])), axis=1)[0, :]
With over 12 years of experience in some of the most renowned restaurants in Australia and the United Kingdom, Chef Shaun Langdon has recently joined Mandarin Oriental, Sanya to lead the team of chefs at the resort’s signature Fresh Restaurant & Grill. Hailing from the South-East coast of Australia, Shaun has always had a passion for cooking. Born into a family of fishermen, he was exposed to seafood from an early age, and it is seafood which remains the primary devotion in his cooking today. Determined to pursue his love for all things culinary, Shaun accepted an apprenticeship at the age of 15, first with Italian chef Danny Russo at L’Unico restaurant in Sydney, and then with English chef Matt Kemp at Restaurant Balzac in Randwick, New South Wales. His talents were soon noticed by legendary culinarian Peter Gilmore, who offered Shaun the position of Chef de Partie at his celebrated Quay Restaurant on Sydney’s picturesque harbour. Having honed his skills during two successful years at the Quay, Shaun decided to venture outside of his native Australia and moved to the United Kingdom where he worked for master Sommelier Gerard Basset at Hampshire’s Hotel Terravina and also as head pastry chef in Coombe Abbey Hotel in Warwickshire. In 2009, Shaun was tempted to return to his homeland to run the exceptional Surry Hills bistro Bird Cow Fish. Awarded with one ‘Chef Hat’, Australia’s equivalent of the Michelin star, Shaun helped the restaurant go from strength to strength during his two year tenure. In joining Mandarin Oriental, Sanya, Shaun brings his flair and enthusiasm for seafood to the shores of the South China Sea where his unique cooking style and flavours are sure to surprise and delight even the most seasoned seafood aficionados staying at the luxury resort.
#linked lists can have several nodes (these nodes contain the data) class Node(object): def __init__(self, data): #this is where we store the data self.data = data; #this is a reference to the next node in the linked list self.next_node = None; #implementation of the linked list data structure class LinkedList(object): def __init__(self): #we keep a reference to the first node of the linked list #this is why we can get the first node in O(1) self.head = None; #we track the size of the list (how many itemy we have inserted) self.size = 0; #inserting at the beginning #because we store a reference to the first node (head) thats why we just #have to update the references [it can be done in O(1) running time] def insert_start(self, data): #we insert a new item so the size has changed self.size = self.size + 1; #create a new Node new_node = Node(data); #if the head is NULL - it means it is the first item we insert if not self.head: self.head = new_node; #if there are already items in the linked list (so not the first item) else: #we just have to update the references that why it is fast new_node.next_node = self.head; self.head = new_node; #removing an arbitrary item from the list #first we have to find the item [O(N)] + update the references (so remove it) [O(1)] #overall running time complexity is O(N) linear running time def remove(self, data): #if the linked list is empty we return if self.head is None: return; #we remove item so decrement the size self.size = self.size - 1; #first we have to find the node we want to remove. It can be done in O(N) #basically a simple linear search current_node = self.head; previous_node = None; #we try to find the node we want to get rid of while current_node.data != data: previous_node = current_node; current_node = current_node.next_node; #if we want to remove the first item (in this case the previous node is NULL) #NOTE: if there are no references to a given object then GC will delete that node #so no need to del the unnecessary nodes if previous_node is None: self.head = current_node.next_node; else: #we remove an item thats not the first one previous_node.next_node = current_node.next_node; #because we have a variable thats why this method has O(1) running time def size1(self): return self.size; #we can calculate the size by iterating through the list and counting the number of nodes def size2(self): actual_node = self.head; size = 0; #because of this it has O(N) linear running time (we can do better!!!) while actual_node is not None: size+=1; actual_node = actual_node.next_node; return size; #we want to insert data at the end of the list #first we have to get to the end of the list [O(N)] + insert a new node [O(1)] def insert_end(self, data): #we insert a new node so update the size self.size = self.size + 1; #the new node with the data to insert new_node = Node(data); actual_node = self.head; #we have to find the last node (the last node's next_node is NULL) while actual_node.next_node is not None: actual_node = actual_node.next_node; #we insert the new node as the last node's next node actual_node.next_node = new_node; #print the nodes in the linked list #we consider all the nodes one by one so it has O(N) running time def traverse_list(self): actual_node = self.head; #we consider all the nodes in the linked list while actual_node is not None: print("%d " % actual_node.data); actual_node = actual_node.next_node; # if __name__ == "__main__": # linkedlist = LinkedList(); # linkedlist.insert_start(12); # linkedlist.insert_start(122); # linkedlist.insert_start(3); # linkedlist.insert_end(31); # linkedlist.traverse_list(); # linkedlist.remove(3); # linkedlist.remove(12); # linkedlist.remove(122); # linkedlist.remove(31); # linkedlist.insert_start(12); # linkedlist.insert_start(122); # linkedlist.insert_start(3); # linkedlist.insert_end(31); # linkedlist.traverse_list(); # print(linkedlist.size1());
Many condo master policies are now using a “per unit” deductible in order to shift risk from the master policy to the HO-6 or “unit owner’s” policy. Condo owners need to be advised of the type of HO-6 policy they should carry in order to properly minimize their out of pocket exposure. The condo trust documents, or “condo docs” spell out what the association and unit owners are responsible for in the event of a loss. Under your agreement, the association is responsible for all common building elements and the entire unit, including any betterments or improvements made by owners. Unit owners should always carry enough under their HO-6 Coverage A limit to cover the master policy’s highest deductible. This limits the association’s expense of paying the deductible for a loss involving one of the units and limits the unit owner’s out of pocket expenses. Unit owners should check with their insurance agent to see that their policy has the “HO 17 34 – Unit Owner’s Modified Other Insurance & Service Agreement Condition” endorsement. Some companies use a base policy that includes this coverage automatically, but companies using newer forms can add this endorsement, to ensure there’s no issue with the HO-6 company paying the cost of the master policy deductible. The unit owner should also carry the “HO 04 35 – Loss Assessment Coverage” endorsement. This endorsement (at limits typically up to $50,000) protects the unit owner from a loss that is assessed to all unit owners, like damage to a common area, by a peril that is covered by the HO-6 policy (like fire). Finally, the unit owner should carry “HO 17 32 – Coverage A Special Form” endorsement. This endorsement broadens the types of perils that the HO-6 policy covers. Under “special form”, the HO-6 policy covers all perils except for those specifically excluded by the policy. Without this endorsement, the HO-6 covers only the perils listed on the policy. For more information on Condo Insurance, or for questions regarding your Condo coverage, contact your McSweeney & Ricci Account Manager.
# -*- coding: utf-8 -*- import os import csv import shutil import re import sqlite3 import collections from urlparse import urlparse, urljoin, parse_qs from datetime import date from bs4 import BeautifulSoup import requests html_path = "htmls" image_url = "http://www.klaphio.gov.tw/uploadfiles/cd/" base_url = "http://www.klaphio.gov.tw/receiving_notice.php" data_schema = collections.OrderedDict(( (u"進所日期:", "enter_date"), (u"進所原因:", "reason"), (u"性別:", "gender"), (u"毛色:", "color"), (u"品種:", "variety"), (u"體型:", "body_type"), (u"晶片號碼:", "wafer_number"), (u"來源地點:", "source") )) class DB(object): def __init__(self, table_name=None): if not table_name: raise Exception("table name invalid") self.conn = sqlite3.connect('animal.db') self.conn.row_factory = sqlite3.Row self.cursor = self.conn.cursor() self.table_name = table_name self.github_photo_url = "https://g0v.github.io/animal.coa/%E5%9F%BA%E9%9A%86%E5%B8%82/" try: sql = "CREATE TABLE %s (id, photo, %s);" % (self.table_name, ",".join(data_schema.values())) self.cursor.execute(sql) print "table %s created." % table_name except Exception as e: print e pass def get_animal(self, animal_id): sql = "SELECT * FROM %s WHERE id=?;" % self.table_name self.cursor.execute(sql, (animal_id,)) return self.cursor.fetchone() def save(self, data): try: print "save data to db, id=", data.get("id") sql = "INSERT INTO %s (id, photo, color, enter_date, source, gender, reason, wafer_number, body_type, variety) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?);" % self.table_name self.cursor.execute(sql, ( data.get("id"), data.get("photo"), data.get("color"), data.get("enter_date"), data.get("source"), data.get("gender"), data.get("reason"), data.get("wafer_number"), data.get("body_type"), data.get("variety") )) self.conn.commit() except Exception as e: print e pass def to_csv(self): self.cursor.execute("SELECT DISTINCT(enter_date) FROM %s;" % self.table_name) for (day,) in self.cursor.fetchall(): with open('%s.csv' % day, 'wb') as csvfile: print "Export csv = %s.csv" % day spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL) spamwriter.writerow(["來源地點", "入園日期", "品種", "備註", "性別", "收容原因", "晶片號碼", "毛色", "體型", "相片網址"]) sql = "SELECT * FROM %s WHERE enter_date = ? ORDER BY id;" % self.table_name self.cursor.execute(sql, (day,)) for row in self.cursor.fetchall(): photo_url = os.path.join(self.github_photo_url, day, row["photo"].split('/')[-1].lower()) y, m, d = tuple(map(int, row["enter_date"].split('-'))) enter_date = "%d年%d月%d日" % (y - 1911, m, d) data = [ row["source"].encode('utf-8'), enter_date, row["variety"].encode('utf-8'), u"", row["gender"].encode('utf-8'), row["reason"].encode('utf-8'), row["wafer_number"].encode('utf-8'), row["color"].encode('utf-8'), row["body_type"].encode('utf-8'), photo_url ] spamwriter.writerow(data) def ensure_directories(path): if not os.path.exists(path): os.makedirs(path) def save_html(path, filename, content): with open(os.path.join(path, filename), 'w') as f: f.write(content) def fetch_page(page=1, total=None): page = int(page) print "Fetching page %d" % page r = requests.post(base_url, {"page": page}) content = r.text.encode('utf-8').strip() if r.status_code == 200: save_html(html_path, "page-%d.html" % page, content) if (page < total): fetch_page(page + 1, total=total) else: return content def get_total_page(content): soup = BeautifulSoup(content) total_page_html = soup.find('a', href="javascript:goPage('5');").get('href') return int(re.match(r".+goPage\(\'(\d+)\'\)", total_page_html).group(1)) def download_image(filename, animal_id, save_path, save_name): if not os.path.exists(os.path.join(save_path, save_name)): print "downloading image, id=", animal_id ensure_directories(save_path) r = requests.get(image_url + filename, stream=True) if r.status_code == 200: with open(os.path.join(save_path, save_name), 'wb') as f: r.raw.decode_content = True shutil.copyfileobj(r.raw, f) else: print "photo exists, skip. %s/%s" % (save_path, save_name) def fetch_detail_page(url, animal_id): try: with open(os.path.join(html_path, "detail-page-%d.html" % animal_id), 'r') as f: print "use detail-page-%d.html cached file." % animal_id content = f.read() except IOError: print "fetching detail page, id =", animal_id r = requests.get(urljoin(base_url, url)) if r.status_code == 200: content = r.text.encode('utf-8').strip() save_html(html_path, 'detail-page-%d.html' % animal_id, content) return extract_detail_info(content) def extract_detail_info(content): soup = BeautifulSoup(content) data = { "id": animal_id } infos = soup.find("div", class_="word").find_all("li") for info in infos: title = info.find("span").contents[0] title = title.replace(" ", "") if title in data_schema.keys(): animal_info = "" try: animal_info = info.contents[1] except: pass data[data_schema[title]] = animal_info parsed_date = tuple(map(int, data['enter_date'].split('-'))) y, m, d = parsed_date data['enter_date'] = date(y + 1911, m, d).strftime("%Y-%m-%d") # download image img_src = soup.find("div", class_="photo").select("img")[0].get('src').split('/')[-1] data["photo"] = image_url + img_src filename, ext = os.path.splitext(img_src) save_path = data['enter_date'] save_name = filename + ext.lower() download_image(img_src, animal_id, save_path, save_name) return data def extract_animal_id(content): detail_url = "%s?%s" % (base_url, content.split('?')[-1]) qs = parse_qs(urlparse(detail_url).query) [animal_id] = qs.get('id') return int(animal_id) if __name__ == "__main__": ensure_directories(html_path) db = DB(table_name="keelung") result = fetch_page() total_pages = get_total_page(result) print "Total: %d pages" % total_pages fetch_page(2, total=total_pages) count = 0 page_files = next(os.walk(html_path))[2] for page_file in page_files: if not page_file.startswith('page'): continue with open(os.path.join(html_path, page_file), 'r') as f: content = f.read() soup = BeautifulSoup(content) animal_link_list = soup.find("ol", class_="search_img_list").find_all("li") animal_link_list = [l.find('a').get('href') for l in animal_link_list] for link in animal_link_list: count += 1 animal_id = extract_animal_id(link) animal = db.get_animal(animal_id) if animal: print "animal id: %d exists, skip fetch" % animal_id continue data = fetch_detail_page(link, animal_id) db.save(data) db.to_csv() print "All %d items." % count
The US has decided to punish UNESCO (United Nations Educational, Scientific and Cultural Organisation) for admitting Palestine to its membership. The US has brought the organisation to its knees and it must now beg Big Brother for reinstatement of its funding. Why is Palestine such a mighty threat to such a mighty nation? Because of the PLO? A tiny revolutionary movement in an occupied country where people are not free? To protect its economic interests, the US cannot allow Palestine to become an independent state. The government of the ‘land of the free and the home of the brave” – the champion of democracy, is unwilling to allow a small oppressed people to fight for its freedom. It will punish UNESCO for following its mandate of creating ‘the conditions for dialogue,’ for agreeing to give a voice to a voiceless people. The demand for ‘a negotiated peace agreement with Israel’ is a demand for Palestinian submission. Do States negotiate with the people of territories that are under their occupation? Surely negotiations happen between states, not between a state and its subject people. The failure to recognise the right of the Palestinian people to an independent state is a failure to acknowledge their human rights. Slavoj Zizek is most concerned by the European slide into fascism: the attack on Muslims in Norway and now the EDL (English Defence League) in Britain. Is the US moving towards fascism: the assassination of Bin Laden and Awlaki and now slashing the funding of UNESCO? It is particularly sad to note that these happenings are occurring under the administration of the first black President of the United States.
# -*- coding: utf-8 -*- """ Python Tests ~~~~~~~~~~~~ :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import pytest from pygments.lexers import PythonLexer, Python3Lexer from pygments.token import Token import re @pytest.fixture(scope='module') def lexer2(): yield PythonLexer() @pytest.fixture(scope='module') def lexer3(): yield Python3Lexer() def test_cls_builtin(lexer2): """ Tests that a cls token gets interpreted as a Token.Name.Builtin.Pseudo """ fragment = 'class TestClass():\n @classmethod\n def hello(cls):\n pass\n' tokens = [ (Token.Keyword, 'class'), (Token.Text, ' '), (Token.Name.Class, 'TestClass'), (Token.Punctuation, '('), (Token.Punctuation, ')'), (Token.Punctuation, ':'), (Token.Text, '\n'), (Token.Text, ' '), (Token.Name.Decorator, '@classmethod'), (Token.Text, '\n'), (Token.Text, ' '), (Token.Keyword, 'def'), (Token.Text, ' '), (Token.Name.Function, 'hello'), (Token.Punctuation, '('), (Token.Name.Builtin.Pseudo, 'cls'), (Token.Punctuation, ')'), (Token.Punctuation, ':'), (Token.Text, '\n'), (Token.Text, ' '), (Token.Keyword, 'pass'), (Token.Text, '\n'), ] assert list(lexer2.get_tokens(fragment)) == tokens def test_needs_name(lexer3): """ Tests that '@' is recognized as an Operator """ fragment = u'S = (H @ beta - r).T @ inv(H @ V @ H.T) @ (H @ beta - r)\n' tokens = [ (Token.Name, u'S'), (Token.Text, u' '), (Token.Operator, u'='), (Token.Text, u' '), (Token.Punctuation, u'('), (Token.Name, u'H'), (Token.Text, u' '), (Token.Operator, u'@'), (Token.Text, u' '), (Token.Name, u'beta'), (Token.Text, u' '), (Token.Operator, u'-'), (Token.Text, u' '), (Token.Name, u'r'), (Token.Punctuation, u')'), (Token.Operator, u'.'), (Token.Name, u'T'), (Token.Text, u' '), (Token.Operator, u'@'), (Token.Text, u' '), (Token.Name, u'inv'), (Token.Punctuation, u'('), (Token.Name, u'H'), (Token.Text, u' '), (Token.Operator, u'@'), (Token.Text, u' '), (Token.Name, u'V'), (Token.Text, u' '), (Token.Operator, u'@'), (Token.Text, u' '), (Token.Name, u'H'), (Token.Operator, u'.'), (Token.Name, u'T'), (Token.Punctuation, u')'), (Token.Text, u' '), (Token.Operator, u'@'), (Token.Text, u' '), (Token.Punctuation, u'('), (Token.Name, u'H'), (Token.Text, u' '), (Token.Operator, u'@'), (Token.Text, u' '), (Token.Name, u'beta'), (Token.Text, u' '), (Token.Operator, u'-'), (Token.Text, u' '), (Token.Name, u'r'), (Token.Punctuation, u')'), (Token.Text, u'\n'), ] assert list(lexer3.get_tokens(fragment)) == tokens def test_pep_515(lexer3): """ Tests that the lexer can parse numeric literals with underscores """ fragments = ( (Token.Literal.Number.Integer, u'1_000_000'), (Token.Literal.Number.Float, u'1_000.000_001'), (Token.Literal.Number.Float, u'1_000e1_000j'), (Token.Literal.Number.Hex, u'0xCAFE_F00D'), (Token.Literal.Number.Bin, u'0b_0011_1111_0100_1110'), (Token.Literal.Number.Oct, u'0o_777_123'), ) for token, fragment in fragments: tokens = [ (token, fragment), (Token.Text, u'\n'), ] assert list(lexer3.get_tokens(fragment)) == tokens def test_walrus_operator(lexer3): """ Tests that ':=' is recognized as an Operator """ fragment = u'if (a := 2) > 4:' tokens = [ (Token.Keyword, 'if'), (Token.Text, ' '), (Token.Punctuation, '('), (Token.Name, 'a'), (Token.Text, ' '), (Token.Operator, ':='), (Token.Text, ' '), (Token.Literal.Number.Integer, '2'), (Token.Punctuation, ')'), (Token.Text, ' '), (Token.Operator, '>'), (Token.Text, ' '), (Token.Literal.Number.Integer, '4'), (Token.Punctuation, ':'), (Token.Text, '\n'), ] assert list(lexer3.get_tokens(fragment)) == tokens def test_fstring(lexer3): """ Tests that the lexer can parse f-strings """ fragments_and_tokens = ( # examples from PEP-0498 ( "f'My name is {name}, my age next year is {age+1}, my anniversary is {anniversary:%A, %B %d, %Y}.'\n", [ (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Single, "'"), (Token.Literal.String.Single, 'My name is '), (Token.Literal.String.Interpol, '{'), (Token.Name, 'name'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Single, ', my age next year is '), (Token.Literal.String.Interpol, '{'), (Token.Name, 'age'), (Token.Operator, '+'), (Token.Literal.Number.Integer, '1'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Single, ', my anniversary is '), (Token.Literal.String.Interpol, '{'), (Token.Name, 'anniversary'), (Token.Literal.String.Interpol, ':'), (Token.Literal.String.Single, '%A, %B %d, %Y'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Single, '.'), (Token.Literal.String.Single, "'"), (Token.Text, u'\n') ] ), ( "f'He said his name is {name!r}.'\n", [ (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Single, "'"), (Token.Literal.String.Single, 'He said his name is '), (Token.Literal.String.Interpol, '{'), (Token.Name, 'name'), (Token.Literal.String.Interpol, '!r}'), (Token.Literal.String.Single, '.'), (Token.Literal.String.Single, "'"), (Token.Text, '\n') ] ), ( "f'input={value:#06x}'\n", [ (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Single, "'"), (Token.Literal.String.Single, 'input='), (Token.Literal.String.Interpol, '{'), (Token.Name, 'value'), (Token.Literal.String.Interpol, ':'), (Token.Literal.String.Single, '#06x'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Single, "'"), (Token.Text, '\n') ] ), ( """f'{"quoted string"}'\n""", [ (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Single, "'"), (Token.Literal.String.Interpol, '{'), (Token.Literal.String.Double, '"'), (Token.Literal.String.Double, 'quoted string'), (Token.Literal.String.Double, '"'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Single, "'"), (Token.Text, '\n') ] ), ( """f'{f"{inner}"}'\n""", # not in the PEP [ (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Single, "'"), (Token.Literal.String.Interpol, '{'), (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Double, '"'), (Token.Literal.String.Interpol, '{'), (Token.Name, 'inner'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Double, '"'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Single, "'"), (Token.Text, '\n') ] ), ( # SyntaxError: f-string expression part cannot include a backslash "f'{\\'quoted string\\'}'\n", [ (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Single, "'"), (Token.Literal.String.Interpol, '{'), (Token.Error, '\\'), (Token.Literal.String.Single, "'"), (Token.Literal.String.Single, 'quoted string'), (Token.Literal.String.Escape, "\\'"), (Token.Literal.String.Single, '}'), (Token.Literal.String.Single, "'"), (Token.Text, '\n') ] ), ( "f'{{ {4*10} }}'\n", [ (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Single, "'"), (Token.Literal.String.Escape, '{{'), (Token.Literal.String.Single, ' '), (Token.Literal.String.Interpol, '{'), (Token.Literal.Number.Integer, '4'), (Token.Operator, '*'), (Token.Literal.Number.Integer, '10'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Single, ' '), (Token.Literal.String.Escape, '}}'), (Token.Literal.String.Single, "'"), (Token.Text, '\n') ] ), ( "f'{{{4*10}}}'\n", [ (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Single, "'"), (Token.Literal.String.Escape, '{{'), (Token.Literal.String.Interpol, '{'), (Token.Literal.Number.Integer, '4'), (Token.Operator, '*'), (Token.Literal.Number.Integer, '10'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Escape, '}}'), (Token.Literal.String.Single, "'"), (Token.Text, '\n') ] ), ( "fr'x={4*10}'\n", [ (Token.Literal.String.Affix, 'fr'), (Token.Literal.String.Single, "'"), (Token.Literal.String.Single, "x="), (Token.Literal.String.Interpol, '{'), (Token.Literal.Number.Integer, '4'), (Token.Operator, '*'), (Token.Literal.Number.Integer, '10'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Single, "'"), (Token.Text, '\n') ] ), ( """f'abc {a["x"]} def'\n""", [ (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Single, "'"), (Token.Literal.String.Single, 'abc '), (Token.Literal.String.Interpol, '{'), (Token.Name, 'a'), (Token.Punctuation, '['), (Token.Literal.String.Double, '"'), (Token.Literal.String.Double, 'x'), (Token.Literal.String.Double, '"'), (Token.Punctuation, ']'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Single, ' def'), (Token.Literal.String.Single, "'"), (Token.Text, '\n') ] ), ( "f'''abc {a['x']} def'''\n", [ (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Single, "'''"), (Token.Literal.String.Single, 'abc '), (Token.Literal.String.Interpol, '{'), (Token.Name, 'a'), (Token.Punctuation, '['), (Token.Literal.String.Single, "'"), (Token.Literal.String.Single, 'x'), (Token.Literal.String.Single, "'"), (Token.Punctuation, ']'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Single, ' def'), (Token.Literal.String.Single, "'''"), (Token.Text, '\n') ] ), ( """f'''{x +1}'''\n""", [ (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Single, "'''"), (Token.Literal.String.Interpol, '{'), (Token.Name, 'x'), (Token.Text, '\n'), (Token.Operator, '+'), (Token.Literal.Number.Integer, '1'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Single, "'''"), (Token.Text, '\n') ] ), ( """f'''{d[0 ]}'''\n""", [ (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Single, "'''"), (Token.Literal.String.Interpol, '{'), (Token.Name, 'd'), (Token.Punctuation, '['), (Token.Literal.Number.Integer, '0'), (Token.Text, '\n'), (Token.Punctuation, ']'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Single, "'''"), (Token.Text, '\n') ] ), ( "f'result: {value:{width}.{precision}}'\n", [ (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Single, "'"), (Token.Literal.String.Single, 'result: '), (Token.Literal.String.Interpol, '{'), (Token.Name, 'value'), (Token.Literal.String.Interpol, ':'), (Token.Literal.String.Interpol, '{'), (Token.Name, 'width'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Single, '.'), (Token.Literal.String.Interpol, '{'), (Token.Name, 'precision'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Single, "'"), (Token.Text, '\n') ] ), ( "'a' 'b' f'{x}' '{c}' f'str<{y:^4}>' 'd' 'e'\n", [ (Token.Literal.String.Single, "'"), (Token.Literal.String.Single, 'a'), (Token.Literal.String.Single, "'"), (Token.Text, ' '), (Token.Literal.String.Single, "'"), (Token.Literal.String.Single, 'b'), (Token.Literal.String.Single, "'"), (Token.Text, ' '), (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Single, "'"), (Token.Literal.String.Interpol, '{'), (Token.Name, 'x'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Single, "'"), (Token.Text, ' '), (Token.Literal.String.Single, "'"), (Token.Literal.String.Interpol, '{c}'), (Token.Literal.String.Single, "'"), (Token.Text, ' '), (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Single, "'"), (Token.Literal.String.Single, 'str<'), (Token.Literal.String.Interpol, '{'), (Token.Name, 'y'), (Token.Literal.String.Interpol, ':'), (Token.Literal.String.Single, '^4'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Single, '>'), (Token.Literal.String.Single, "'"), (Token.Text, ' '), (Token.Literal.String.Single, "'"), (Token.Literal.String.Single, 'd'), (Token.Literal.String.Single, "'"), (Token.Text, ' '), (Token.Literal.String.Single, "'"), (Token.Literal.String.Single, 'e'), (Token.Literal.String.Single, "'"), (Token.Text, '\n') ] ), ( "f'{i}:{d[i]}'\n", [ (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Single, "'"), (Token.Literal.String.Interpol, '{'), (Token.Name, 'i'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Single, ':'), (Token.Literal.String.Interpol, '{'), (Token.Name, 'd'), (Token.Punctuation, '['), (Token.Name, 'i'), (Token.Punctuation, ']'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Single, "'"), (Token.Text, '\n') ] ), ( "f'x = {x:+3}'\n", [ (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Single, "'"), (Token.Literal.String.Single, "x = "), (Token.Literal.String.Interpol, '{'), (Token.Name, 'x'), (Token.Literal.String.Interpol, ':'), (Token.Literal.String.Single, '+3'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Single, "'"), (Token.Text, '\n') ] ), ( "f'{fn(lst,2)} {fn(lst,3)}'\n", [ (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Single, "'"), (Token.Literal.String.Interpol, '{'), (Token.Name, 'fn'), (Token.Punctuation, '('), (Token.Name, 'lst'), (Token.Punctuation, ','), (Token.Literal.Number.Integer, '2'), (Token.Punctuation, ')'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Single, ' '), (Token.Literal.String.Interpol, '{'), (Token.Name, 'fn'), (Token.Punctuation, '('), (Token.Name, 'lst'), (Token.Punctuation, ','), (Token.Literal.Number.Integer, '3'), (Token.Punctuation, ')'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Single, "'"), (Token.Text, '\n') ] ), ( "f'mapping is { {a:b for (a, b) in ((1, 2), (3, 4))} }'\n", [ (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Single, "'"), (Token.Literal.String.Single, 'mapping is '), (Token.Literal.String.Interpol, '{'), (Token.Text, ' '), (Token.Punctuation, '{'), (Token.Name, 'a'), (Token.Punctuation, ':'), (Token.Name, 'b'), (Token.Text, ' '), (Token.Keyword, 'for'), (Token.Text, ' '), (Token.Punctuation, '('), (Token.Name, 'a'), (Token.Punctuation, ','), (Token.Text, ' '), (Token.Name, 'b'), (Token.Punctuation, ')'), (Token.Text, ' '), (Token.Operator.Word, 'in'), (Token.Text, ' '), (Token.Punctuation, '('), (Token.Punctuation, '('), (Token.Literal.Number.Integer, '1'), (Token.Punctuation, ','), (Token.Text, ' '), (Token.Literal.Number.Integer, '2'), (Token.Punctuation, ')'), (Token.Punctuation, ','), (Token.Text, ' '), (Token.Punctuation, '('), (Token.Literal.Number.Integer, '3'), (Token.Punctuation, ','), (Token.Text, ' '), (Token.Literal.Number.Integer, '4'), (Token.Punctuation, ')'), (Token.Punctuation, ')'), (Token.Punctuation, '}'), (Token.Text, ' '), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Single, "'"), (Token.Text, '\n') ] ), ( """f'a={d["a"]}'\n""", [ (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Single, "'"), (Token.Literal.String.Single, 'a='), (Token.Literal.String.Interpol, '{'), (Token.Name, 'd'), (Token.Punctuation, '['), (Token.Literal.String.Double, '"'), (Token.Literal.String.Double, 'a'), (Token.Literal.String.Double, '"'), (Token.Punctuation, ']'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Single, "'"), (Token.Text, '\n') ] ), ( "f'a={d[a]}'\n", [ (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Single, "'"), (Token.Literal.String.Single, 'a='), (Token.Literal.String.Interpol, '{'), (Token.Name, 'd'), (Token.Punctuation, '['), (Token.Name, 'a'), (Token.Punctuation, ']'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Single, "'"), (Token.Text, '\n') ] ), ( "fr'{header}:\\s+'\n", [ (Token.Literal.String.Affix, 'fr'), (Token.Literal.String.Single, "'"), (Token.Literal.String.Interpol, '{'), (Token.Name, 'header'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Single, ':'), (Token.Literal.String.Single, '\\'), (Token.Literal.String.Single, 's+'), (Token.Literal.String.Single, "'"), (Token.Text, '\n') ] ), ( "f'{a!r}'\n", [ (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Single, "'"), (Token.Literal.String.Interpol, '{'), (Token.Name, 'a'), (Token.Literal.String.Interpol, '!r}'), (Token.Literal.String.Single, "'"), (Token.Text, '\n') ] ), ( "f'{(lambda x: x*2)(3)}'\n", [ (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Single, "'"), (Token.Literal.String.Interpol, '{'), (Token.Punctuation, '('), (Token.Keyword, 'lambda'), (Token.Text, ' '), (Token.Name, 'x'), (Token.Punctuation, ':'), (Token.Text, ' '), (Token.Name, 'x'), (Token.Operator, '*'), (Token.Literal.Number.Integer, '2'), (Token.Punctuation, ')'), (Token.Punctuation, '('), (Token.Literal.Number.Integer, '3'), (Token.Punctuation, ')'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Single, "'"), (Token.Text, '\n') ] ), ( "extra = f'{extra},waiters:{len(self._waiters)}'\n", [ (Token.Name, 'extra'), (Token.Text, ' '), (Token.Operator, '='), (Token.Text, ' '), (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Single, "'"), (Token.Literal.String.Interpol, '{'), (Token.Name, 'extra'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Single, ',waiters:'), (Token.Literal.String.Interpol, '{'), (Token.Name.Builtin, 'len'), (Token.Punctuation, '('), (Token.Name.Builtin.Pseudo, 'self'), (Token.Operator, '.'), (Token.Name, '_waiters'), (Token.Punctuation, ')'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Single, "'"), (Token.Text, '\n') ] ), ( 'message.append(f" [line {lineno:2d}]")\n', [ (Token.Name, 'message'), (Token.Operator, '.'), (Token.Name, 'append'), (Token.Punctuation, '('), (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Double, '"'), (Token.Literal.String.Double, ' [line '), (Token.Literal.String.Interpol, '{'), (Token.Name, 'lineno'), (Token.Literal.String.Interpol, ':'), (Token.Literal.String.Double, '2d'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Double, ']'), (Token.Literal.String.Double, '"'), (Token.Punctuation, ')'), (Token.Text, '\n') ] ), # Examples from https://bugs.python.org/issue36817 ( 'f"{foo=}"\n', [ (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Double, '"'), (Token.Literal.String.Interpol, '{'), (Token.Name, 'foo'), (Token.Literal.String.Interpol, '=}'), (Token.Literal.String.Double, '"'), (Token.Text, '\n') ] ), ( "f'{foo=!s}'\n", [ (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Single, "'"), (Token.Literal.String.Interpol, '{'), (Token.Name, 'foo'), (Token.Literal.String.Interpol, '=!s}'), (Token.Literal.String.Single, "'"), (Token.Text, '\n') ] ), ( 'f"{math.pi=!f:.2f}"\n', [ (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Double, '"'), (Token.Literal.String.Interpol, '{'), (Token.Name, 'math'), (Token.Operator, '.'), (Token.Name, 'pi'), (Token.Literal.String.Interpol, '=!f:'), (Token.Literal.String.Double, '.2f'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Double, '"'), (Token.Text, '\n') ] ), ( 'f"{ chr(65) =}"\n', [ (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Double, '"'), (Token.Literal.String.Interpol, '{'), (Token.Text, ' '), (Token.Name.Builtin, 'chr'), (Token.Punctuation, '('), (Token.Literal.Number.Integer, '65'), (Token.Punctuation, ')'), (Token.Text, ' '), (Token.Literal.String.Interpol, '=}'), (Token.Literal.String.Double, '"'), (Token.Text, '\n') ] ), ( 'f"{chr(65) = }"\n', [ (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Double, '"'), (Token.Literal.String.Interpol, '{'), (Token.Name.Builtin, 'chr'), (Token.Punctuation, '('), (Token.Literal.Number.Integer, '65'), (Token.Punctuation, ')'), (Token.Text, ' '), (Token.Literal.String.Interpol, '= }'), (Token.Literal.String.Double, '"'), (Token.Text, '\n') ] ), ( "f'*{n=:30}*'\n", [ (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Single, "'"), (Token.Literal.String.Single, '*'), (Token.Literal.String.Interpol, '{'), (Token.Name, 'n'), (Token.Literal.String.Interpol, '=:'), (Token.Literal.String.Single, '30'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Single, '*'), (Token.Literal.String.Single, "'"), (Token.Text, '\n') ] ), ( "f'*{n=!r:30}*'\n", [ (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Single, "'"), (Token.Literal.String.Single, '*'), (Token.Literal.String.Interpol, '{'), (Token.Name, 'n'), (Token.Literal.String.Interpol, '=!r:'), (Token.Literal.String.Single, '30'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Single, '*'), (Token.Literal.String.Single, "'"), (Token.Text, '\n') ] ), ( """f"*{f'{n=}':30}*"\n""", [ (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Double, '"'), (Token.Literal.String.Double, '*'), (Token.Literal.String.Interpol, '{'), (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Single, "'"), (Token.Literal.String.Interpol, '{'), (Token.Name, 'n'), (Token.Literal.String.Interpol, '=}'), (Token.Literal.String.Single, "'"), (Token.Literal.String.Interpol, ':'), (Token.Literal.String.Double, '30'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Double, '*'), (Token.Literal.String.Double, '"'), (Token.Text, '\n') ] ), ( "f'*{n=:+<30}*'\n", [ (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Single, "'"), (Token.Literal.String.Single, '*'), (Token.Literal.String.Interpol, '{'), (Token.Name, 'n'), (Token.Literal.String.Interpol, '=:'), (Token.Literal.String.Single, '+<30'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Single, '*'), (Token.Literal.String.Single, "'"), (Token.Text, '\n') ] ), ( """ f'''{foo = !s:20}'''\n""", [ (Token.Text, ' '), (Token.Literal.String.Affix, 'f'), (Token.Literal.String.Single, "'''"), (Token.Literal.String.Interpol, '{'), (Token.Name, 'foo'), (Token.Text, '\n '), (Token.Literal.String.Interpol, '= !s:'), (Token.Literal.String.Single, '20'), (Token.Literal.String.Interpol, '}'), (Token.Literal.String.Single, "'''"), (Token.Text, '\n') ] ) ) for fragment,tokens in fragments_and_tokens: assert list(lexer3.get_tokens(fragment)) == tokens # Now switch between single and double quotes, to cover both cases equally rep = {"'":'"', '"':"'"} pattern = re.compile("|".join(rep.keys())) for fragment,tokens in fragments_and_tokens: fragment = pattern.sub(lambda m: rep[m.group(0)], fragment) tokens = list(tokens) for i,(token,match) in enumerate(tokens): if token == Token.Literal.String.Single: token = Token.Literal.String.Double elif token == Token.Literal.String.Double: token = Token.Literal.String.Single match = pattern.sub(lambda m: rep[m.group(0)], match) tokens[i] = (token, match) assert list(lexer3.get_tokens(fragment)) == tokens
We stick to the principle of "quality to start with, service initially, continual improvement and innovation to meet the customers" for your administration and "zero defect, zero complaints" as the standard objective. To fantastic our service, we offer the products using the very good high quality at the reasonable price for Automatic Faucet , Automatic Sink Faucet , Automatic Basin Faucet , We warmly welcome shoppers from everywhere in the world for almost any form of cooperation with us to make a mutual benefit long term. We're devoting ourselves wholeheartedly to offer buyers the ideal service. With trustworthy excellent method, very good standing and excellent client provider, the series of items produced by our firm are exported to many countries and regions for Automatic Faucet , Automatic Sink Faucet , Automatic Basin Faucet , With all these supports we can serve every customer with quality product and timely shipping with highly responsibility. Being a young growing company we might not the best but we're trying our best to be your good partner.
# -*- coding: utf-8 -*- """ flask_mosession ~~~~~~~~~~~~~~~~~~ Alternative for Flask session module that uses MongoDB as main storage :copyright: (c) 2013 by Bayazee & Rokooie. :license: BSD, see LICENSE for more details. """ from bson import Binary from uuid import uuid4 from flask import current_app from flask.sessions import SessionInterface, SessionMixin from pymongo.errors import ConnectionFailure from werkzeug.datastructures import CallbackDict from cache_backends import NoCacheBackend __revision__ = '$Revision: e1a7ef4049fb $' class MoSession(CallbackDict, SessionMixin): """ Session replacement class. The session object will be an instance of this class or it's children. By importing flask.session, you will get this class' object. The MoSession class will save data only when it's necessary, empty sessions will not saved. """ def __init__(self, initial=None): def _on_update(d): d.modified = True CallbackDict.__init__(self, initial, _on_update) if initial: self.modified = False else: self.generate_sid() self.new = True self.modified = True def generate_sid(self): """ Generate session id using UUID4 and store it in the object's _id attribute. :return: (Binary) Session id """ self['_id'] = Binary(str(uuid4())) return self['_id'] def remove_stored_session(self): current_app.extensions['mosession'].storage.collection.remove({'_id': self['_id']}) current_app.extensions['mosession'].cache.remove(str(self['_id'])) def destroy(self): """Destroys a session completely, by deleting all keys and removing it from the internal store immediately. This allows removing a session for security reasons, e.g. a login stored in a session will cease to exist if the session is destroyed. """ self.remove_stored_session() self.clear() self.new = True self.generate_sid() def regenerate(self): """Generate a new session id for this session. To avoid vulnerabilities through `session fixation attacks <http://en.wikipedia.org/wiki/Session_fixation>`_, this function can be called after an action like a login has taken place. The session will be copied over to a new session id and the old one removed. """ self.remove_stored_session() self.new = True self.generate_sid() @property def sid(self): """ Return session id. Session id is stored in database as it's _id field. :return: Session id """ return str(self['_id']) def __setattr__(self, *args, **kwargs): return SessionMixin.__setattr__(self, *args, **kwargs) class MoSessionInterface(SessionInterface): """ MoSession interface class, flask session interface is replaced with this. MoSession Interface helps developer to overload or change operation functionality of flask central session manager. """ session_class = MoSession @property def _mosession(self): """ Returns current app's MoSession extension instance. """ return current_app.extensions['mosession'] def load_session(self, sid): """ Load session from cache or database, If found in database but not in cache, saves it in cache too. :param sid: Session ID :return: An instance of type session_class with session data or None if session not found """ if not sid: return None stored_session = self._mosession.cache.get(sid) if not stored_session: stored_session = self._mosession.storage.collection.find_one({'_id': Binary(sid)}) if stored_session: self._mosession.cache.set(sid, stored_session) return self.session_class(stored_session) if stored_session else None def open_session(self, app, request): """ Overrides open_session interface. Tries to load session, in case of failure creates a new instance of session_class type. :param app: Current app's instance (required to load SESSION_COOKIE_NAME field from config) :param request: Current request :return: Session object """ return self.load_session(str(request.cookies.get(app.config['SESSION_COOKIE_NAME'], ''))) or self.session_class() def raw_save_session(self, session): """ Save session in database and also in cache. :param session: Session object :return: """ dict_session = dict(session) self._mosession.storage.collection.save(dict_session) self._mosession.cache.set(session.sid, dict_session) def save_session(self, app, session, response): """ Overrides save_session interface. Save session data if it's modified, it cares about session expiration and other features. operation of function : step 1:if modified flag of session is true then function go to step 2 else function do nothing step 2:function calculate expire time and session permanent then if new flags of session and expire are true then change session expire property to expire time step 3:now if new flag of session is true set session sid (session id) and change flag to false.set sid and current cookie data in cookies step 4:set current session (new created) to current cache step 5:set modified flag os session to false :param app: Current app's instance (required to load SESSION_COOKIE_NAME field from config) :param session: Session object :param response: Response object """ if not session.modified: return session.permanent = not app.config['SESSION_EXPIRE_AT_BROWSER_CLOSE'] expiration = self.get_expiration_time(app, session) if session.new and expiration: # TODO: Is this line really necessary? session['expire'] = expiration self.raw_save_session(session) if session.new: session.new = False response.set_cookie( key=app.config['SESSION_COOKIE_NAME'], value=session.sid, domain=self.get_cookie_domain(app), expires=expiration, httponly=self.get_cookie_httponly(app) ) session.modified = False class SessionStorage(object): """ The class role is to serve the storage, So it's a wrapper on pymongo's database class to add auto reconnect. :param app: Current Application Object """ def __init__(self, host, port, database_name, collection_name): self.host = host self.port = port self.database_name = database_name self.collection_name = collection_name self._collection = None @property def collection(self): if not self._collection: self.connect() return self._collection def connect(self): """ Try to connect to mongodb and set self.database to sessions's database reference. It will try 5 times to connect to database - with 100ms delay between tries - """ if self._collection: return from pymongo.connection import Connection from pymongo.errors import AutoReconnect for _connection_attempts in range(5): try: self._collection = Connection(self.host, self.port)[self.database_name][self.collection_name] except AutoReconnect: from time import sleep sleep(0.1) else: break else: raise ConnectionFailure class MoSessionExtension(object): """ MoSession extension object. """ def __init__(self, app=None): self.app = None self.session_class = None self.storage = None self._collection = None if app: self.init_app(app) def init_app(self, app): """ Register flask-mosession with Flask's app instance. :param app: Flask's app instance """ app.extensions['mosession'] = self app.config.setdefault('MONGODB_SESSIONS_COLLECTION_NAME', 'sessions') app.config.setdefault('SESSION_EXPIRE_AT_BROWSER_CLOSE', True) app.config.setdefault('MOSESSION_CACHE_BACKEND', 'NoCacheBackend') self.cache = getattr(cache_backends, app.config['MOSESSION_CACHE_BACKEND'])(app) self.storage = SessionStorage( app.config['MONGODB_HOST'], app.config['MONGODB_PORT'], app.config['MONGODB_DATABASE'], app.config['MONGODB_SESSIONS_COLLECTION_NAME'], ) app.session_interface = MoSessionInterface() if self.session_class: app.session_interface.session_class = self.session_class def cleanup_sessions(self): # TODO: ba dastorate mongodb document haye expire shode bayad hazf beshe pass
Being able to bounce back after failure, learn from your mistakes and forge ahead with resilience are vital skills both in and out of the workplace. According to one survey, 91 per cent of HR decision-makers predict that resilience will be key to employability in the next few years. For Michelle Gallaher, La Trobe alumnus and 2017 Telstra Victorian Business Woman of the Year, failing is one of the most important things you can do. Watch our video to find out what Michelle learned from failing her first degree, and what failure can teach you. Develop your resilience through La Trobe’s Career Ready Advantage program. LBS School Manager Donna Burnett receives 2017 Award for Excellence in School and Faculty Management! The ATEM Best Practice Awards for 2017 was held at the Arts Centre, with over 150 staff from Tertiary institutions throughout Aus and NZ. Recognising professional management and administration in the Tertiary Education sector is fundamentally important not only to the staff recognised, but to the industry as a whole. Whilst ATEM has worked extremely hard for 41 years to promote a culture where professional managers work to partner academics in the education enterprise, Universities in general still have a long way to go to achieve the same goal. This award has sought to show that we are equal partners in the profession. I have received an incredible amount of support from the Leadership team within the LBS and support from Managers within the College. This support has enabled me to grow and flourish in my role and has treated me as an equal partner in the operations of a large and multidisciplinary school. Working together without hierarchical boundaries has enabled effective School Management, but has also broken down many barriers and allowed professional staff to have a voice in an Academic world. To see is to believe, and to believe is to see. Malcom Roberts’s recent citizenship troubles betray his double standards. LBS Business School Lecturer in Management Dr Angela McCabe and Dr Tom Osegowitsch from the University of Melbourne have published an article in Crikey contrasting the standards of evidence demanded of scientists by populist politicians, and those they adopt for themselves. Read the original article on Crikey, here. “Malcolm, you are hearing the interpretation of a highly qualified scientist and you’re saying, “I don’t believe that” — is that right?” an incredulous Tony Jones asked of former One Nation Senator Malcolm Roberts during a recent Q&A episode. The senator had demanded incontrovertible proof of anthropogenic climate change from fellow panellist Professor Brian Cox, and made wild allegations about evidence having been manipulated by NASA, the Australian Bureau of Meteorology and others. Last Friday the High court declared five current senators ineligible to sit in the Australian parliament. Among them was Malcolm Roberts, who came in for particular criticism. The former senator’s citizenship evidence to the court was summarily rejected. He was chastised by Justice Patrick Keane for his “reliance on his highly subjective appreciation of the importance of commonplace incidents of his familial experience”. His own barrister, Robert Newlinds, said of Senator Roberts that “He’s talked himself into a different belief about that [his possible UK citizenship]. The former senator has made a name for himself challenging scientists and scientific institutions. Ostensibly he was holding them to a higher standard of evidence, although the same rigour clearly did not apply to his own, personal situation. For years, Malcolm Roberts has been bombarding scientific institutions, individual scientists as well as journalists and parliamentarians with strong claims rejecting climate change research. He has also made severe allegations of scientific misconduct against individual scientists, which have been dismissed after due consideration. Roberts routinely challenges climate change scientists to produce “any empirical evidence”, defined as “observations in the real world, it’s measured real world data.” When challenged by scientists such as Brian Cox insisting that there is strong empirical evidence, most prominently global warming trends, Roberts likes to maintain that the data are manipulated, by NASA and the Bureau of Meteorology, abetted by the political establishment. Robert also criticises existing climate models, questioning their projections. These “models have already been proven to be inaccurate and the IPCC (Intergovernmental Panel on Climate Change) has recognised that and admitted it.” Yet climate change predictions have worked well, tracking overall climate trends. The denialist traits in Robert’s thinking have previously been document. The cherry-picking of evidence, the impossible expectations raised (for instance in terms of predictive accuracy and scientific consensus), and the underlying conspiracy theories are just some of the hallmarks of denialism. Similar to democratic politics, science can experience gridlock, detours and errors. Progress often is painstakingly slow. In recent times we have also become aware of a number of serious shortcomings in the scientific process. In modern science, the process for establishing valid knowledge, or rather knowledge “warranting belief”, hinges on peer review and professional reputation. In recent times, the process has come in for sharp criticism on account of a number of practices undermining scholarship. The pressure on academics to publish (“publish or perish” in the vernacular) makes them more reliant on the scientific institutions and their gatekeepers, such as journal reviewers, editors and tenure committees. These then direct researchers by effectively sanctioning the acceptable (and therefore permissable) research methods, research questions and available data. The pressure to publish in order to secure tenure and promotion may also lead to an inflation of “Type 1 errors”. A Type I error (also known as a “false positive”) is the mistake of erroneously believing something to be true when it is not. Scientific journals tend to cherish surprising results. As a result, scientists may test more frequently for contrarian hypotheses and produce more (erroneous) chance results. As reported by Scientific American magazine a number of fields of scientific endeavour may be plagued by a significant proportion of false positives and exaggerated results. Equally concerning, studies that fail to find novel results may not even be reported by the authors, effectively censoring the publicly reported evidence. A variety of fields such as medicine and psychology have in recent years been hit by an apparent crisis of reproducibility and experts have pointed to the elevation of a disproportionate number of false discoveries. In the social sciences, some of these biases and problems may be even more egregious than in the natural sciences. Overall, such institutional biases are casting a shadow over science, slowing scientific progress. But scientists are increasingly aware of these issues and gradually the scientific process is responding. Initiatives such as the appearance of ‘minimal threshold journals’ and article retractions or campaigns to report all trials and results attest to the (albeit slow) self-correcting capacity of science. Scientific knowledge is sometimes characterised as “warranted belief”. But what exactly warrants belief? What constitutes valid knowledge? How do we believe in the authority of a particular theory, given that few of us, even scientists, are in a position to verify them independently? We do so by trusting in the institutions tasked with producing valid representations of the world around us. In light of the inevitable shortcomings of the scientific process, the scientific community as well as the wider community need to maintain a healthy degree of scepticism about scientific findings and scientific evidence. But scepticism can sometimes degenerate into paranoid denialism. Seemingly “common sense” explanations, or conspiracies can provide a degree or comfort when complex and less-than-perfect expert opinion abound. Like democracy, science, and the scientific process, are flawed. But they are still preferable to all the other forms of generating valid knowledge that have been tried, including Malcolm Robert’s. It’s a vexing question for those planning a career in tax. In my 30 plus years in the profession I have never seen it face so many challenges simultaneously. The most obvious change is of course digital disruption. In part this is because the automatic exchange of data is about to balloon as information is transferred in real time as computers talk to each other in a common language using standard business reporting. But it is also because of the investment being made around the world by Governments and business to effectively leverage their use of big data to make more informed decisions. This is even extending to the development of cognitive computing systems such as IBM’s ‘Watson’ system which can be applied to analyse unstructured data to provide answers to specific questions. As a corollary much of the traditional tax compliance and process work will gradually diminish as data is collected, exchanged and analysed differently. However, there are an array of other impending changes including, amongst others, a more informed and savvy public; greater cross border transactions as part of a more integrated world economy; increased offshoring especially of compliance work; more complex tax laws to prop up increasingly competitive tax regimes; a growing reliance on consumption taxes worldwide to provide a more stable revenue base; and an evolving international digital economy where labour, finance, and knowhow are mobile to an unprecedented degree. Given this mix no-one can predict with absolute certainty where the tax jobs are going to be in 2020. Nonetheless I believe there are some clear pointers as to how you can best plan a career in tax. Firstly, the importance of being able to analyse big data in a meaningful way is becoming rapidly crucial to both revenue authorities and professional firms of all sizes. From the ATO’s perspective it is their growth area as witnessed by the recent creation of their Smarterdata business unit which is not only focussed on analysing data but challenging paradigms as to how the ATO conducts its operations. Increased globalisation has also heightened the need for businesses of all sizes to be transfer pricing compliant and develop defensible positions based on finding the most comparable data. Accordingly, tax professionals wishing to augment their tax technical skills by developing business analytics expertise could well consider enrolling in a course such as the Master of Business Analytics and Graduate Diploma in Business Analytics run by La Trobe University’s Business School as the combination of such skills will be in high demand in coming years. Secondly, if compliance work goes down rest assured the taxation laws will not become any easier. Whilst many talk about deregulation the tax rules have only become more complex especially for governments worldwide struggling to plug a revenue shortfall. One only has to witness the complexity of our general anti-avoidance provisions to realise how inordinately complex our tax system has become particularly the recent amendments which will supposedly crack down on international profit shifting. Going forward what clients will require of their advisers is the ability to work with them in disseminating such complexity and providing viable commercial solutions. Accordingly, the way in which tax is taught at both an undergraduate and postgraduate level must radically change so that students not only absorb the complexity of the tax law but develop the interpersonal skills to service clients and build referral networks in a more global economy. This is one of the key reasons why blended learning is being introduced by the Business School as it is encouraging students to not only develop better analytical capacities but also to work in teams to collaboratively resolve issues just like they will be required to do in the workplace. Finally, whilst the future is daunting in some respects it is critical to remember that accountants repeatedly top the list of most trusted adviser to clients. If you are overwhelmed with change so are your clients and if you need to adapt to changing circumstances so will many of them. Keeping your clients close will be more important than ever before as will the need to provide timely, accurate and value added services and the willingness to be adaptive and agile. Mark Morris is a Professor of Practice in Taxation at La Trobe University’s Business School where he teaches both undergraduate and postgraduate taxation and actively contributes to broader industry engagement initiatives between the Business School and the tax profession and other key stakeholders. Mark also Co-Chaired the ATO’s ‘Future of the Tax Profession 2016’ working group with Colin which comprises senior representatives from the ATO, professional bodies, software developers and practitioners concerning the implementation of the ATO’s standard business reporting initiative.
#!/usr/bin/env python # -*- coding: utf-8 -*- # Author Version Date Comments # FQuinto 1.0.0 2015-Nov First version fron NoConName 2015 event # Do test # Copyright (C) 2015 Fran Quinto # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import RPi.GPIO as GPIO import time import os import sys import paho.mqtt.client as mqtt import Adafruit_DHT # Sensor should be set to Adafruit_DHT.DHT11, # Adafruit_DHT.DHT22, or Adafruit_DHT.AM2302. sensor = Adafruit_DHT.DHT22 # Pins temphum = 2 # GPIO2, pin 3 GPIO.setwarnings(False) # Turn off warnings GPIO.setmode(GPIO.BOARD) # Config as BOARD # Pins red = 12 # pin RGB LED, GPIO 18 green = 16 # pin RGB LED, GPIO 23 blue = 18 # pin RGB LED, GPIO 24 pir = 32 # pin PIR sensor, GPIO 12 boton = 7 # pin boton fisico, GPIO 4 rel1_sirena = 35 # pin Relay 1 (board izquierda), GPIO 19 rel2_giro = 36 # pin Relay 2 (board izquierda), GPIO 16 rel3_luz_sirena = 37 # pin Relay 1 (board derecha), GPIO 26 rel4 = 38 # pin Relay 2 (board derecha), GPIO 20 # setup all the pins GPIO.setup(red, GPIO.OUT) GPIO.setup(green, GPIO.OUT) GPIO.setup(blue, GPIO.OUT) GPIO.setup(pir, GPIO.IN) GPIO.setup(boton, GPIO.IN) GPIO.setup(rel1_sirena, GPIO.OUT) GPIO.setup(rel2_giro, GPIO.OUT) GPIO.setup(rel3_luz_sirena, GPIO.OUT) GPIO.setup(rel4, GPIO.OUT) wait = 0.1 # INIT GPIO.output(red, 0) #Turn OFF LED GPIO.output(green, 0) #Turn OFF LED GPIO.output(blue, 0) #Turn OFF LED GPIO.output(rel1_sirena, 1) #Turn OFF GPIO.output(rel2_giro, 1) #Turn OFF GPIO.output(rel3_luz_sirena, 1) #Turn OFF GPIO.output(rel4, 1) #Turn OFF # The callback for when the client receives a CONNACK response from the server. def on_connect(client, userdata, flags, rc): print("Connected with result code "+str(rc)) # Subscribing in on_connect() means that if we lose the connection and # reconnect then subscriptions will be renewed. client.subscribe("orden") # The callback for when a PUBLISH message is received from the server. def on_message(client, userdata, msg): print(msg.topic+"\nMensaje: "+str(msg.payload)) if (msg.topic == 'orden'): if (msg.payload == 'temperatura'): humidity, temperature = Adafruit_DHT.read_retry(sensor, temphum) if temperature is not None: message = 'temperatura:{0:0.1f}'.format(temperature) else: message = 'temperatura:0' client.publish("temperatura", message) if (msg.payload == 'humedad'): humidity, temperature = Adafruit_DHT.read_retry(sensor, temphum) if humidity is not None: message = 'humedad:{0:0.1f}'.format(humidity) else: message = 'humedad:0' client.publish("humedad", message) if (msg.payload == 'giroON'): GPIO.output(rel2_giro, 0) #Turn ON if (msg.payload == 'luzON'): GPIO.output(rel3_luz_sirena, 0) #Turn ON if (msg.payload == 'sirenaON'): GPIO.output(rel1_sirena, 0) #Turn ON if (msg.payload == 'giroOFF'): GPIO.output(rel2_giro, 1) #Turn OFF if (msg.payload == 'luzOFF'): GPIO.output(rel3_luz_sirena, 1) #Turn OFF if (msg.payload == 'sirenaOFF'): GPIO.output(rel1_sirena, 1) #Turn OFF if (msg.payload == 'dispara'): os.system('mpg321 -g 100 -q mob_ua-gun_shoot_m_16.mp3 &') try: client = mqtt.Client() client.on_connect = on_connect client.on_message = on_message client.connect("localhost", 1883, 60) wait = 0.1 envia_mensaje_boton = True envia_mensaje_PIR = False # Blocking call that processes network traffic, dispatches callbacks and # handles reconnecting. # Other loop*() functions are available that give a threaded interface and a # manual interface. #client.loop_forever() while True: client.loop() if ((GPIO.input(boton) == False) and (envia_mensaje_boton)): envia_mensaje_boton = False client.publish("boton", "ON") elif (GPIO.input(boton) == True): envia_mensaje_boton = True if ((GPIO.input(pir) == True) and (envia_mensaje_PIR)): envia_mensaje_PIR = False client.publish("PIR", "ON") elif (GPIO.input(pir) == False): envia_mensaje_PIR = True time.sleep(wait) except KeyboardInterrupt: pass GPIO.output(red, 0) #Turn OFF LED GPIO.output(green, 0) #Turn OFF LED GPIO.output(blue, 0) #Turn OFF LED GPIO.output(rel1_sirena, 1) #Turn OFF GPIO.output(rel2_giro, 1) #Turn OFF GPIO.output(rel3_luz_sirena, 1) #Turn OFF GPIO.output(rel4, 1) #Turn OFF #Tidy up and remaining connections. GPIO.cleanup()
-Mr. Bulky's: Bulk baking supplies, like flour, sugar, chocolates, candies, herbs, spices, and more. -The Farmacy: According to a reader submission, this spot has bulk grains, beans, herbs, spices, honey, olive oil, and more. -Earth Fare: You can buy bulk dry staples, with the addition of candies, maple syrup, agave nectar, and apple cider vinegar. -Raisin Rack: Stocks baking supplies like flour and sugar, whole grains, beans and legumes, nuts and seeds, trail mix, chocolate chips, soup mixes, pasta, energy chunks, flax, chia, coffee, tea, herbs, spices, honey, and nut butters. -Essencha Tea House: Stock up on package-free, loose-leaf teas. -Fresh Thyme and Findlay Market for a wider variety of bulk pantry staples. -Lucky's Market: You can buy a wide variety of dry goods here, like grains, beans, baking supplies, nuts, seeds, snacks, and more. -Mediterranean Food Imports: This spot offers bulk herbs and spices. Ask if they'd be willing to fill your own containers! -Fresh Thyme: Grains, beans, nuts, seeds, snacks, dried fruits, candies, honey, olive oil, and more. -Kent Natural Foods Co-op: You can purchase bulk nuts, seeds, flour, oats, sugar, herbs, and spices. -Phoenix Earth Food Co-op: Sells bulk herbs, spices, tea, coffee, dried foods and pantry essentials, candy, snacks, protein powder, apple cider vinegar, local honey, maple syrup, olive oil, and tamari. -Tom’s Market: Grains, beans, nuts, seeds, snacks, trail mixes, and more.
from uuid import UUID from flask import Flask from flask_classful import FlaskView, route from nose.tools import eq_ # python3 only class TypingView(FlaskView): def index(self): return "Index" @route('/<id>', methods=['POST']) def post(self, id: str) -> str: return "Post" def patch(self, id: str) -> str: return "Patch" def int(self, arg: int): return str(arg) def float(self, arg: float): return str(arg) def uuid(self, arg: UUID): return str(arg) app = Flask('typing-app') TypingView.register(app) client = app.test_client() def test_index(): resp = client.get('/typing/') eq_(b"Index", resp.data) resp = client.get('/typing') eq_(resp.status_code, 308) def test_post(): resp = client.post('/typing/123') eq_(b"Post", resp.data) resp = client.post('/typing/123/') eq_(resp.status_code, 405) def test_patch(): resp = client.patch('/typing/123/') eq_(b"Patch", resp.data) resp = client.patch('/typing/123') eq_(resp.status_code, 308) def test_url_converter(): for type_, wrong_var, correct_var in [ ('int', 'asdfsdf', '1'), ('float', 'sdfad', '1.1'), ('uuid', '10', '1f5018ba-1a86-4f7f-a6c5-596674562f36') ]: url = '/typing/{}/{}/' resp = client.get(url.format(type_, wrong_var)) # should not match the endpoint if url variable type mismatches eq_(resp.status_code, 404) resp = client.get(url.format(type_, correct_var)) eq_(resp.status_code, 200) eq_(bytes(correct_var, 'utf-8'), resp.data)
Need an incentive to purchase an updated Mac product? For the Mac faithful who have used Macs for a decade or more, you can probably skip this article. The information contained herein is most likely residing within you already. For your general information, it can be said that on an average all Mac hardware is only supported up to a maximum of five (5) years from the date the product is discontinued. To read the full representation from Apple which contains a detailed list of which Mac hardware falls within the Vintage and Obsolete categories, go here. Be the first to comment on "Apple Products Hardware Support Lifespan"
from functools import wraps import kiwi.logger import sys import logging from io import BytesIO from mock import MagicMock, patch # default log level, overwrite when needed kiwi.logger.log.setLevel(logging.WARN) # default commandline used for any test, overwrite when needed sys.argv = [ sys.argv[0], 'system', 'prepare', '--description', 'description', '--root', 'directory' ] argv_kiwi_tests = sys.argv # mock open calls patch_open = patch("{0}.open".format( sys.version_info.major < 3 and "__builtin__" or "builtins") ) class raises(object): """ exception decorator as used in nose, tools/nontrivial.py """ def __init__(self, *exceptions): self.exceptions = exceptions self.valid = ' or '.join([e.__name__ for e in exceptions]) def __call__(self, func): name = func.__name__ def newfunc(*args, **kw): try: func(*args, **kw) except self.exceptions: pass except Exception: raise else: message = "%s() did not raise %s" % (name, self.valid) raise AssertionError(message) newfunc = wraps(func)(newfunc) return newfunc def mock_open(data=None): ''' Mock "open" function. :param data: :return: ''' data = BytesIO(data) mock = MagicMock() handle = MagicMock() handle.write.return_value = None handle.__enter__.return_value = data or handle mock.return_value = handle return mock
Less than a decade ago it was never a thing to be truly proud to be Canadian. Though every citizen has loved Canada for decades upon decades, it has only been recently that Canada has begun to show off its pride to the rest of the world. This’ll be showcased the Canada Day weekend falls upon the country in a week. Everyone in the country will hear the sounds of fireworks, they’ll experience the true joy of what it’s like to be Canadian. Instagram, an application used by millions to billions of people around the world has taken it upon themselves to challenge Canadians to showcase their pride of the country in hopes that this year on Canada’s Day it’ll be known worldwide that Canada is proud of what is has become. Along with this challenge users are meant to describe what they love about being Canadian, what drives them to show off their pride while in another country or how they feel when others bash our country in a negative manner. The results should be interesting as there has never been a campaign such as this one for Canadian citizens to participate in, at least not one on a global scale. Canadians around the globe will also be able to showcase their love of the country. The Canadian Government also plans on going through these Instagram posts to find out better what their citizens love about this country or what this country needs to become better in the future years to come. Vancouver, one of the largest cities in Canada is truly rising to the challenge this summer as their goal is to make the metropolis into a green urban environment. In order to do this Vancouver has revealed their intent to work alongside multiple corporations in the city to create large flower gardens or mini forests on top of their buildings. This requires a large amount of maintenance but the end result is that workers have a more peaceful area to go to on their lunch breaks or before work. This’ll also create a far better air quality in the city as most who live in Vancouver already walk or use Bicycle’s to travel the big city. Cars are slowly becoming less relevant as Vancouver continues to go greener. The city will also be placing small gardens all around the city across the sidewalks, much like a garden bed but slightly bigger in order to create more greenery and oxygen within the city. This is reason why Vancouver is quickly becoming one of the best places to live in Canada. Vancouver has already mastered their waterfront and as their continue to perfect this city it’s ensured that Vancouver will become one of the most beautiful metropolis’s in the world. It’s just a matter if cities such as Edmonton or Toronto can follow the same steps as their fellow Canadian city. Toronto is the largest city in Canada but it also by far the most disgusting & vastly needs to take the same steps as their sister city Vancouver.
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2017 F5 Networks Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = { 'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.0' } DOCUMENTATION = ''' module: iworkflow_bigip_connector short_description: Manipulate cloud BIG-IP connectors in iWorkflow. description: - Manipulate cloud BIG-IP connectors in iWorkflow. version_added: 2.4 options: name: description: - Name of the connector to create. required: True state: description: - When C(present), ensures that the cloud connector exists. When C(absent), ensures that the cloud connector does not exist. required: false default: present choices: - present - absent notes: - Requires the f5-sdk Python package on the host. This is as easy as pip install f5-sdk. extends_documentation_fragment: f5 requirements: - f5-sdk >= 2.3.0 - iWorkflow >= 2.1.0 author: - Tim Rupp (@caphrim007) ''' EXAMPLES = ''' - name: Create cloud connector named Private Cloud iworkflow_bigip_connector: name: "Private Cloud" password: "secret" server: "iwf.mydomain.com" user: "admin" delegate_to: localhost ''' RETURN = ''' ''' from ansible.module_utils.f5_utils import ( AnsibleF5Client, AnsibleF5Parameters, F5ModuleError, HAS_F5SDK, iControlUnexpectedHTTPError ) class Parameters(AnsibleF5Parameters): returnables = ['name'] api_attributes = ['description'] updatables = ['description'] def to_return(self): result = {} for returnable in self.returnables: result[returnable] = getattr(self, returnable) result = self._filter_params(result) return result def api_params(self): result = {} for api_attribute in self.api_attributes: if self.api_map is not None and api_attribute in self.api_map: result[api_attribute] = getattr(self, self.api_map[api_attribute]) else: result[api_attribute] = getattr(self, api_attribute) result = self._filter_params(result) return result class ArgumentSpec(object): def __init__(self): self.supports_check_mode = True self.argument_spec = dict( name=dict(required=True), state=dict( required=False, default='present', choices=['absent', 'present'] ) ) self.f5_product_name = 'iworkflow' class ModuleManager(object): def __init__(self, client): self.client = client self.have = None self.want = Parameters(self.client.module.params) self.changes = Parameters() def _set_changed_options(self): changed = {} for key in Parameters.returnables: if getattr(self.want, key) is not None: changed[key] = getattr(self.want, key) if changed: self.changes = Parameters(changed) def _update_changed_options(self): changed = {} for key in Parameters.updatables: if getattr(self.want, key) is not None: attr1 = getattr(self.want, key) attr2 = getattr(self.have, key) if attr1 != attr2: changed[key] = attr1 if changed: self.changes = Parameters(changed) return True return False def exec_module(self): changed = False result = dict() state = self.want.state try: if state == "present": changed = self.present() elif state == "absent": changed = self.absent() except iControlUnexpectedHTTPError as e: raise F5ModuleError(str(e)) result.update(**self.changes.to_return()) result.update(dict(changed=changed)) return result def exists(self): """Checks to see if a connector exists. This method does not use ODATA queries because that functionality is broken in iWorkflow. Therefore, we iterate over all connectors until we find the one we're interested in. :return: """ collection = self.client.api.cm.cloud.connectors.locals.get_collection() for item in collection: if item.displayName != "BIG-IP": continue if item.name != self.want.name: continue return True return False def present(self): if self.exists(): return self.update() else: return self.create() def create(self): self._set_changed_options() if self.client.check_mode: return True self.create_on_device() return True def update(self): self.have = self.read_current_from_device() if not self.should_update(): return False if self.client.check_mode: return True self.update_on_device() return True def should_update(self): result = self._update_changed_options() if result: return True return False def update_on_device(self): pass def read_current_from_device(self): connector = None collection = self.client.api.cm.cloud.connectors.locals.get_collection() for item in collection: if item.displayName != "BIG-IP": continue if item.name != self.want.name: continue connector = item break if not connector: return None result = connector.attrs return Parameters(result) def create_on_device(self): params = self.want.api_params() self.client.api.cm.cloud.connectors.locals.local.create( name=self.want.name, **params ) def absent(self): if self.exists(): return self.remove() return False def remove(self): if self.client.check_mode: return True self.remove_from_device() if self.exists(): raise F5ModuleError("Failed to delete the BIG-IP connector") return True def remove_from_device(self): resource = None collection = self.client.api.cm.cloud.connectors.locals.get_collection() for item in collection: if item.displayName != "BIG-IP": continue if item.name != self.want.name: continue resource = item break if resource: resource.delete() def main(): if not HAS_F5SDK: raise F5ModuleError("The python f5-sdk module is required") spec = ArgumentSpec() client = AnsibleF5Client( argument_spec=spec.argument_spec, supports_check_mode=spec.supports_check_mode, f5_product_name=spec.f5_product_name ) try: mm = ModuleManager(client) results = mm.exec_module() client.module.exit_json(**results) except F5ModuleError as e: client.module.fail_json(msg=str(e)) if __name__ == '__main__': main()
Natalie turned 3-years-old this month and we celebrated her with the very first ‘Natalie Day’ at the happiest place on earth: RiverTown Crossings Mall. She hasn’t been to Disney World quite yet so trust me, this totally worked. We started with building a cat, who has yet to be named, at Build-a-Bear. She picked out a toy food dish and hair dryer to go with it. Why a hair dryer? Your guess is as good as mine. Didn’t matter. It was Natalie Day. Next we spent some time at the Germ Tree, aka the indoor playland in the center of the mall. After about 20 rides down the slide (with the cat) we headed towards the food court for a spin on the carousel which was the best thing ever. We capped the day off with ice cream. A newbie 3-year-olds dream day. Natalie is turning into quite the little lady who loves all things girly: painted nails, princess dresses, bows, makeup, anything pink. She’s gentle and detail oriented, and the girl can’t get enough hotdogs, ice cream and candy of any kind. In the fall she’ll head off to preschool at which point I'll be home crying out all my feelings. Happy Birthday Natalie. This month we officially kicked off Women’s Golf League with our Spring Mixer at Watermark. We met up with old friends who we haven’t seen all winter and welcomed new faces. Some gals were tanner than others because… FLORIDA! And this week was our first time out on the course and it felt so good! The sun was shining and that’s all that mattered, well, at least that's what I'm telling myself because I lost 4 golf balls to water hazards. Last weekend for the first time we attended Runway on the Rapids with friends for some socially acceptable people watching. Because that’s what a fashion show is, right?! Before the show we grabbed dinner with friends at Osteria Rossa. I ordered the Prosciutto Wrapped Quail Gnocchi and it was fabulous. The show was super fun and you can find out more about the show HERE. Afterwards we grabbed a nightcap at Lumber Baron Bar located inside the Amway Grand Plaza. We'd always heard good things but had never been and it's safe to say we'll likely return. This was our second year attending the Wine Tasting Event benefitting St. Cecilia Music Center. It’s a wonderful fundraiser catered by our ever favorite Martha’s Vineyard that entails an all you can taste wine selection and a generous spread of meats and cheeses. Jeff and I enjoy a good silent auction and this event provides the best we've seen. This year Jeff took home a dry-cleaning package which was entirely all too practical of him. It will not be wasted. If you also follow me on Instagram this next blurb won’t be news to you because I diligently documented (via Insta-story) our Housewives Field Trip to Saugatuck last weekend. I took my documentation seriously and even brought my selfie-stick along for extra good angles. Talk about a super fun time! It was a tamer version of ‘mommas gone wild’ and began with a vigorous stair climb to the top of Mount Baldhead Park. Over 300 steps (and a few mimosas) later, we retreated to Everyday People Café for bottomless mimosas (sensing a theme?) and some breakfast. We also made a stop at Virtue Cider and bought out their stock of canned Rose. I’m not much of a cider fan but the cans were adorable, pink and I was in a good mood. The rest of our time was spent shopping and bar hoping our way back to our families. I feel asleep around 9:30pm. It was a really good day. A big shout out to our home-girl Hannah for being our designated driver. We promise to pay you back once that little bundle of joy arrives! Speaking of housewives, we started what I hope continues to be a monthly supper club with our group of girlfriends and April was my turn to plan. I’m a sucker for hibachi and dragged the ladies out to 28th Street for flying shrimp and flaming onion volcanos at Ichiban Hibachi & Sushi. There was a coy fish pond, cocktails and lots of laughs. Also, if you're reading this from any place but your front porch or sunny patio... you're doing it wrong. Go get yourself some sun! Enjoy the warmth. POSTSCRIPT – Stuff we’re into lately. FMGs Concert Series! If you have never been to an outdoor concert at Fredrick Meijer Gardens you are missing out. We’ve selected our summer roster and look forward to drinking wine out of plastic cups. Westworld is Back and we’re thrilled about it. It’s futuristic, dramatic and extremely thought provoking. Beware, it’s not for the faint at heart. Costco (always Costco) but specifically Costco for it's incredible deal on Arborvitae trees which are double the size of the ones at Meijer for the same steal of a deal price at $26. What is more, I crammed 6 of them in my mid sized SUV (pictured as the header of this post). The guys at Costco were likely dying laughing upon my departure.
from google.appengine.ext import vendor vendor.add('extensions') from google.appengine.api import mail import jinja2 import os import premailer _appid = os.getenv('APPLICATION_ID').replace('s~', '') EMAIL_SENDER = 'noreply@{}.appspotmail.com'.format(_appid) class Emailer(object): def __init__(self, sender=None): self.sender = sender or EMAIL_SENDER def send(self, to, subject, template_path, kwargs=None): html = self._render(template_path, kwargs=kwargs) self._send(subject, to, html) def _render(self, template_path, kwargs=None): params = {} if kwargs: params.update(kwargs) template = self.env.get_template(template_path) html = template.render(params) return premailer.transform(html) def _send(self, subject, to, html): message = mail.EmailMessage(sender=self.sender, subject=subject) message.to = to message.html = html message.send() @property def env(self): path = os.path.join(os.path.dirname(__file__), 'templates') loader = jinja2.FileSystemLoader([path]) extensions = [ 'jinja2.ext.autoescape', 'jinja2.ext.do', 'jinja2.ext.loopcontrols', 'jinja2.ext.with_', ] return jinja2.Environment( loader=loader, extensions=extensions, autoescape=True, trim_blocks=True)
John le Bon gives a couple of Ballers a chance, and they don’t do much to convince. Their numbers seem to add up (to them) and their shadow measurements of right angled triangles make sense to me when they explain lines of latitude. I heard repeated that the only way these numbers work is if the Earth is a ball. Maths aside, why can we get a verifiable picture of this ball from way up? Explain the nonsense around Antarctica. Cosines and tangents don’t explain those anomalies. Join Matrix Decode, David Weiss and host John le Bon for another installment of the Ball Earth Skeptic Roundtable. This is a special episode featuring three ball earthers who will be given a fair chance to argue for and defend their ball earth beliefs. Joining us first will be Critical Unity. Later in the show we will be joined by Reds Rhetoric (plus one). The two ‘brains’ seemed to set-out to bombard us with maths. I was a little disappointed that John didn’t get much of a chance to make his case with them; he let them bang on for ages and then he handed over to Matrix. Matrix is alright, but he’s a guy I’ve listened to making a determined case for Judy Wood’s research. Was he our very best ‘brain’ here, at this point? He started by accusing Ziller of presenting a ‘strawman’. This clearly got Ziller’s goat as he then proceeded to accuse Matrix of presenting ‘a strawman’ with just about every point he made. There were so many ‘strawmen’ flying about, it was a bit mad. Ziller even got to the point of making an impassioned plea to presumably the whole world, to just agree with him that the point Matrix had just made ‘was a strawman‘. Ziller proved himself unable to have an intelligent conversation. Ziller is a professional shill..! Here is a couple of good videos that gives some pretty good evidence that the earth is a sphere. The Earth Is Flat, Rory Cooper Says So! Part I: The Polaris Conundrum. Interesting and informative stuff from Ziller, despite the fake sardonic laughter and the condescending tone. He is THE debunker, by all accounts, after all. Here he addresses the issue of the horizon and the Bedford Level experiment. My interest in the shape of things and our perspective, was piqued by The Bedford Level experiment and it’s apparently accepted results. I had the impression that it had proved that we can see perfectly straight over 6 miles and that Science had admitted it, with the caveat that it was due to refraction. Ziller seems to mock this notion here, as he tells how the thing Rowbotham didn’t do was have ‘controls’ and that a clever scientist came along and added a control, half way. This marker was seen to rise and therefore curvature was proved. This, if true, would say a lot to me. It would answer my main question regarding the dastardly word ‘flat’ when talking about what our model really is. And it is presumably fairly easily repeated. But surely Rowbotham used controls at every mile didn’t he … ? Ziller says he didn’t.
"table -- Table class" import collections from . import pg, threevl, sqparse2, sqex # errors class PgExecError(sqparse2.PgMockError): "base class for errors during table execution" class BadFieldName(PgExecError): pass class IntegrityError(PgExecError): pass # careful: pgmock_dbapi also defines this class Missing: "for distinguishing missing columns vs passed-in null" def expand_row(table_fields, fields, values): "helper for insert. turn (field_names, values) into the full-width, properly-ordered row" table_fieldnames = [f.name for f in table_fields] reverse_indexes = {table_fieldnames.index(f): i for i, f in enumerate(fields)} indexes = [reverse_indexes.get(i) for i in range(len(table_fields))] return [(Missing if i is None else values[i]) for i in indexes] def emergency_cast(colx, value): """ugly: this is a huge hack. get serious about where this belongs in the architecture. For now, most types rely on being fed in as SubbedLiteral. """ if colx.coltp.type.lower() == 'boolean': if isinstance(value, sqparse2.NameX): value = value.name if isinstance(value, bool): return value return dict(true=True, false=False)[value.lower()] # keyerror if other else: return value # todo: type check? def field_default(colx, table_name, tables_dict): "takes sqparse2.ColX, Table" if colx.coltp.type.lower() == 'serial': next_id = sqparse2.parse('select coalesce(max(%s),-1)+1 from %s' % (colx.name, table_name)) return sqex.run_select(next_id, tables_dict, Table)[0] elif colx.not_null: raise NotImplementedError('todo: not_null error') else: return toliteral(colx.default) FieldLookup = collections.namedtuple('FieldLookup', 'index type') def toliteral(probably_literal): # todo: among the exception cases are Missing, str. go through cases and make this cleaner. the test suite alone has multiple types here. if probably_literal == sqparse2.NameX('null'): return None return probably_literal.toliteral() if hasattr(probably_literal, 'toliteral') else probably_literal class Table: def __init__(self, name, fields, pkey): "fields is a list of sqparse2.ColX" self.name, self.fields, self.pkey = name, fields, (pkey or []) self.rows = [] self.child_tables = [] # tables that inherit from this one self.parent_table = None # table this inherits from def get_column(self, name): col = next((f for f in self.fields if f.name == name), None) if col is None: raise KeyError(name) return col def pkey_get(self, row): if len(self.pkey) > 0: indexes = [i for i, f in enumerate(self.fields) if f.name in self.pkey] if len(indexes) != len(self.pkey): raise ValueError('bad pkey') pkey_vals = list(map(row.__getitem__, indexes)) return next((r for r in self.rows if pkey_vals == list(map(r.__getitem__, indexes))), None) else: # warning: is this right? it's saying that if not given, the pkey is the whole row. test dupe inserts on a real DB. return row if row in self.rows else None def fix_rowtypes(self, row): if len(row) != len(self.fields): raise ValueError return list(map(toliteral, row)) def apply_defaults(self, row, tables_dict): "apply defaults to missing cols for a row that's being inserted" return [ emergency_cast(colx, field_default(colx, self.name, tables_dict) if v is Missing else v) for colx, v in zip(self.fields, row) ] def insert(self, fields, values, returning, tables_dict): nix = sqex.NameIndexer.ctor_name(self.name) nix.resolve_aonly(tables_dict, Table) expanded_row = self.fix_rowtypes(expand_row(self.fields, fields, values) if fields else values) row = self.apply_defaults(expanded_row, tables_dict) # todo: check ColX.not_null here. figure out what to do about null pkey field for i, elt in enumerate(row): # todo: think about dependency model if one field relies on another. (what do I mean? 'insert into t1 (a,b) values (10,a+5)'? is that valid?) row[i] = sqex.Evaluator(row, nix, tables_dict).eval(elt) if self.pkey_get(row): raise pg.DupeInsert(row) self.rows.append(row) if returning: return sqex.Evaluator((row,), nix, tables_dict).eval(returning) return None def match(self, where, tables, nix): return [r for r in self.rows if not where or threevl.ThreeVL.test(sqex.Evaluator((r,), nix, tables).eval(where))] def lookup(self, name): if isinstance(name, sqparse2.NameX): name = name.name # this is horrible; be consistent try: return FieldLookup(*next((i, f) for i, f in enumerate(self.fields) if f.name == name)) except StopIteration: # todo: confirm that next() still raises StopIteration on py3 raise BadFieldName(name) def update(self, setx, where, returning, tables_dict): nix = sqex.NameIndexer.ctor_name(self.name) nix.resolve_aonly(tables_dict, Table) if not all(isinstance(x, sqparse2.AssignX) for x in setx): raise TypeError('not_xassign', list(map(type, setx))) match_rows = self.match(where, tables_dict, nix) if where else self.rows for row in match_rows: for expr in setx: row[self.lookup(expr.col).index] = sqex.Evaluator((row,), nix, tables_dict).eval(expr.expr) if returning: # todo: write a test for the empty case, make sure this doesn't crash. Should I set row to None at the top or is it not that simple? # pylint: disable=undefined-loop-variable return sqex.Evaluator((row,), nix, tables_dict).eval(returning) return None def delete(self, where, tables_dict): # todo: what's the deal with nested selects in delete. does it get evaluated once to a scalar before running the delete? # todo: this will crash with empty where clause nix = sqex.NameIndexer.ctor_name(self.name) nix.resolve_aonly(tables_dict, Table) # todo: why 'not' below? self.rows = [r for r in self.rows if not sqex.Evaluator((r,), nix, tables_dict).eval(where)]
Genuine leather bags from our collection are a range of classic bags that reflect the culture of the origin. This wallet has historical Iranian print which showcases the rich heritage of Iran. The wallet has a human figure of a Persian man who serves the country. The wallet has a real touch of leather which is shown by the texture, shade and colour of the wallet. Inside the wallet are multi compartments for perfect storage. If you are the one who likes classic vintage work, then check our collection!
# (C) British Crown Copyright 2014, Met Office # # This file is part of Iris. # # Iris is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Iris is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Iris. If not, see <http://www.gnu.org/licenses/>. """Unit tests for the `iris._merge._CubeSignature` class.""" from __future__ import (absolute_import, division, print_function) # Import iris.tests first so that some things can be initialised before # importing anything else. import iris.tests as tests import mock import numpy as np import iris.exceptions from iris._merge import _CubeSignature as CubeSig class Test_match__fill_value(tests.IrisTest): def setUp(self): self.defn = mock.Mock(standard_name=mock.sentinel.standard_name, long_name=mock.sentinel.long_name, var_name=mock.sentinel.var_name, units=mock.sentinel.units, attributes=mock.sentinel.attributes, cell_methods=mock.sentinel.cell_methods) self.data_shape = mock.sentinel.data_shape self.data_type = mock.sentinel.data_type def test_non_nan_fill_value_equal(self): sig1 = CubeSig(self.defn, self.data_shape, self.data_type, 10) sig2 = CubeSig(self.defn, self.data_shape, self.data_type, 10) self.assertTrue(sig1.match(sig2, True)) self.assertTrue(sig1.match(sig2, False)) self.assertTrue(sig2.match(sig1, True)) self.assertTrue(sig2.match(sig1, False)) def test_non_nan_fill_value_unequal(self): sig1 = CubeSig(self.defn, self.data_shape, self.data_type, 10) sig2 = CubeSig(self.defn, self.data_shape, self.data_type, 20) with self.assertRaises(iris.exceptions.MergeError): sig1.match(sig2, True) self.assertFalse(sig1.match(sig2, False)) with self.assertRaises(iris.exceptions.MergeError): sig2.match(sig1, True) self.assertFalse(sig2.match(sig1, False)) def test_nan_fill_value_equal(self): sig1 = CubeSig(self.defn, self.data_shape, self.data_type, np.nan) sig2 = CubeSig(self.defn, self.data_shape, self.data_type, np.nan) self.assertTrue(sig1.match(sig2, True)) self.assertTrue(sig1.match(sig2, False)) self.assertTrue(sig2.match(sig1, True)) self.assertTrue(sig2.match(sig1, False)) def test_nan_fill_value_unequal(self): sig1 = CubeSig(self.defn, self.data_shape, self.data_type, np.nan) sig2 = CubeSig(self.defn, self.data_shape, self.data_type, 10) with self.assertRaises(iris.exceptions.MergeError): sig1.match(sig2, True) self.assertFalse(sig1.match(sig2, False)) with self.assertRaises(iris.exceptions.MergeError): sig2.match(sig1, True) self.assertFalse(sig2.match(sig1, False)) def test_str_fill_value_equal(self): sig1 = CubeSig(self.defn, self.data_shape, self.data_type, ' ') sig2 = CubeSig(self.defn, self.data_shape, self.data_type, ' ') self.assertTrue(sig1.match(sig2, True)) self.assertTrue(sig1.match(sig2, False)) self.assertTrue(sig2.match(sig1, True)) self.assertTrue(sig2.match(sig1, False)) def test_str_fill_value_unequal(self): sig1 = CubeSig(self.defn, self.data_shape, self.data_type, ' ') sig2 = CubeSig(self.defn, self.data_shape, self.data_type, '_') with self.assertRaises(iris.exceptions.MergeError): sig1.match(sig2, True) self.assertFalse(sig1.match(sig2, False)) with self.assertRaises(iris.exceptions.MergeError): sig2.match(sig1, True) self.assertFalse(sig2.match(sig1, False)) if __name__ == '__main__': tests.main()
FPSP 2010 Best Practices Analyzer examines a server running FPSP 2010 and checks server’s system configuration as well as FPSP 2010 product configuration. Any settings or combination of settings that don’t conform to FPSP 2010 BP are summarized and displayed in a consolidated report, so that administrators can easily identify and address these issues. FPSP 2010 BPA also includes documentation that explains each best practice, as well as reasons why a setting might violate best practice, so that admins can judge whether best practice applies to their server. And FPE 2010 Best Practices Analyzer, originally released last December. BPA tools have an optional setting that allows them to connect to Microsoft Download Center in order to download updated sets of best practices. If you had already downloaded FPE BPA, it’s recommend that you run a rules update to make sure to get these new best practices rules.
"""exdb URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/dev/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Add an import: from blog import urls as blog_urls 2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls)) """ from django.conf.urls import url from django.contrib.auth.views import login, logout_then_login from exdb import views urlpatterns = [ url(r'^$', views.HomeView.as_view(), name='home'), url(r'^create$', views.CreateExperienceView.as_view(), name='create_experience'), url(r'^approval/(?P<pk>\d+)$', views.ExperienceApprovalView.as_view(), name='approval'), url(r'^conclusion/(?P<pk>\d+)$', views.ExperienceConclusionView.as_view(), name='conclusion'), url(r'^view/(?P<pk>\d+)$', views.ViewExperienceView.as_view(), name='view_experience'), url(r'^edit/(?P<pk>\d+)$', views.EditExperienceView.as_view(), name='edit'), url(r'^login$', login, name='login', kwargs={'template_name': 'exdb/login.html'}), url(r'^logout$', logout_then_login, name='logout'), url(r'^list/upcoming$', views.ListExperienceByStatusView.as_view(readable_status="Upcoming"), name="upcoming_list"), url(r'^list/needs-evaluation$', views.ListExperienceByStatusView.as_view(readable_status="Needs Evaluation"), name="eval_list"), url(r'^list/(?P<status>[a-zA-Z\-]+)$', views.ListExperienceByStatusView.as_view(), name='status_list'), url(r'^experience/search/$', views.SearchExperienceResultsView.as_view(), name='search'), url(r'^experience/search/report$', views.SearchExperienceReport.as_view(), name='search_report'), url(r'^complete/(?P<pk>\d+)?$', views.CompletionBoardView.as_view(), name='completion_board'), url(r'^requirement/view/(?P<pk>\d+)$', views.ViewRequirementView.as_view(), name='view_requirement'), url(r'^section/complete/(?P<pk>\d+)?$', views.SectionCompletionBoardView.as_view(), name='section_completion_board'), ]
Online advertising is featured within select sections of the DailyBitcoinNews.com website. Advertisers who want to advertise must ensure their ads meet the DailyBitcoinNews.com’s principles for advertising. Advertising generates revenue that helps support the site’s ability to provide high-quality information. We accept ads that appear in the right sidebar (for desktop view only) and left sidebar (both, desktop and mobile view), and at the bottom of the page. Along with paid ads, DailyBitcoinNews.com house ads are placed on the site to help promote various DailyBitcoinNews.com services and products that are of interest to physicians. Online advertisements on the DailyBitcoinNews.com do not collect personally identifiable information about individual visitors. The DailyBitcoinNews.com does collect aggregate data on visitors, such as time of day and web browser type. Such information may be shared with advertisers to determine their advertising effectiveness. The DailyBitcoinNews.com may contain advertisements of third parties. The inclusion of advertisements on the DailyBitcoinNews.com does not imply endorsement of the advertised products or services by DailyBitcoinNews.com. DailyBitcoinNews.com shall not be responsible for any loss or damage of any kind incurred as a result of the presence of such advertisements on the DailyBitcoinNews.com. Further, DailyBitcoinNews.com shall not be responsible or liable for the statements or conduct of any third party advertisers appearing on the DailyBitcoinNews.com. You shall be solely responsible for any correspondence or transactions you have with any third party advertisers. The DailyBitcoinNews.com may provide links (including any link through an on-line banner advertisement) to other sites on the Internet for your convenience. These other sites are maintained by third parties over which DailyBitcoinNews.com exercises no control. The appearance of any such third party links is not intended to endorse any particular company or product. If you decide to access any of the third party sites linked to the DailyBitcoinNews.com, you do so entirely at your own risk. These principles are applied by the DailyBitcoinNews.com to ensure adherence to the highest ethical standards of advertising and to determine the eligibility of products and services for advertising on the DailyBitcoinNews.com. The appearance of advertising on the DailyBitcoinNews.com is neither a guarantee nor an endorsement by the DailyBitcoinNews.com of the product, service, or company or the claims made for the product in such advertising. The fact that an advertisement for a product, service, or company has appeared on the DailyBitcoinNews.com shall not be referred to in collateral advertising. As a matter of policy, the DailyBitcoinNews.com will sell advertising space on its websites when the inclusion of advertising does not interfere with the mission or objectives of the DailyBitcoinNews.com or its publications. The DailyBitcoinNews.com, in its sole discretion, retains the right to decline any submitted advertisement or to discontinue posting of any advertisement previously accepted. 1. Digital advertising may be placed on the DailyBitcoinNews.com. 2. Digital advertisements must be readily distinguishable from editorial content. The word “advertisement” must be placed adjacent to the advertising image. 4. Digital advertisements that are fixed in relation to the viewer’s screen or that rotate should be placed to ensure that juxtaposition will not occur as screen content changes. 6. The DailyBitcoinNews.com logo may not appear on commercial websites as a logo or in any other form without prior written approval by the individuals responsible for the respective areas within DailyBitcoinNews.com. 7. Advertisements may link to additional promotional content that resides on the DailyBitcoinNews.com. 8. Advertisements may link off-site to a commercial website, provided that the viewer is clearly informed by the word “advertisement” adjacent to the image or link. 9. The DailyBitcoinNews.com will not link to websites that frame the DailyBitcoinNews.com content without express permission of the DailyBitcoinNews.com; prevent the viewer from returning to the DailyBitcoinNews.com or other previously viewed screens, such as by disabling the viewer’s “back” button; or redirect the viewer to a website the viewer did not intend to visit. 10. The DailyBitcoinNews.com reserves the right to not link to or to remove links to other websites.
"""refugio URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.11/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url, include from django.contrib import admin from django.conf import settings from django.conf.urls.static import static from django.contrib.auth.views import login, logout_then_login, password_reset, password_reset_done, password_reset_confirm, password_reset_complete urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^mascota/', include('apps.mascota.urls', namespace="mascota")), url(r'^adopcion/', include('apps.adopcion.urls', namespace="adopcion")), url(r'^usuario/', include('apps.usuarios.urls', namespace="usuario")), url(r'^accounts/login/', login, {'template_name': 'index.html'}, name='login'), url(r'^logout/', logout_then_login, name='logout'), url(r'^reset/password_reset', password_reset, {'template_name': 'registration/password_reset_form.html', 'email_template_name': 'registration/password_reset_email.html'}, name='password_reset'), url(r'^reset/password_reset_done', password_reset_done, {'template_name': 'registration/password_reset_done.html'}, name='password_reset_done'), url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>.+)/$', password_reset_confirm, {'template_name': 'registration/password_reset_confirm.html'}, name='password_reset_confirm'), url(r'^reset/done', password_reset_complete, {'template_name': 'registration/password_reset_complete.html'}, name='password_reset_complete'), ] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
A tram hit a parked tram at the Topçular stop in the Eyüp district of Istanbul. Fourteen passengers were injured when a tram reportedly rammed into a stationary tram at the Topçular stop in Istanbul’s Eyüp district on March 15, state-run Anadolu Agency has reported. Topçular is on the T-4 Topkapı-Habipler line. None of the injured people, including the tram driver, were said to be in serious conditions. Several ambulances and a fire brigade squad rushed to the scene soon after the incident. Tram services have been reportedly disrupted between the Demirkapı and Bosna Çukurçeşme stations following the incidents. Reports indicate that a probe into the incident has been launched.
""" Django settings for mitsingen project. Generated by 'django-admin startproject' using Django 1.9.1. For more information on this file, see https://docs.djangoproject.com/en/1.9/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.9/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = os.environ.get("DJANGO_SECRET_KEY", '') # SECURITY WARNING: don't run with debug turned on in production! DEBUG = os.environ.get("DJANGO_DEBUG", "False") == "True" ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'sing.apps.SingConfig', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] MIDDLEWARE_CLASSES = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'mitsingen.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'mitsingen.wsgi.application' # Database # https://docs.djangoproject.com/en/1.9/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.9/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.9/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = 'static/' MEDIA_ROOT = os.path.join(BASE_DIR, "media") MEDIA_URL = "/media/" EMAIL_HOST = os.environ.get("DJANGO_EMAIL_HOST", '') EMAIL_PORT = 465 EMAIL_HOST_USER = os.environ.get("DJANGO_EMAIL_HOST_USER", '') EMAIL_HOST_PASSWORD = os.environ.get("DJANGO_EMAIL_HOST_PASSWORD", '') EMAIL_USE_SSL = True LOGIN_URL = "/sing"
Using subwavelength-patterned dielectrics, we have designed and implemented a wide variety of wavefront manipulation elements. The modulation, which may be periodic or aperiodic (chirped,) permits the realization of physical properties, such as refractive index profiles and molecular surface interactions that are not easily manufacturable in natural materials. Hence, wide opportunities are presented for elements with unprecedented performance across domains (functions) beyond optics, with applications in imaging, telecommunications, and energy harvesting. I will describe in detail one such example of an axial gradient for broadband omnidirectional anti-reflective coatings. The same surface pattern also causes the surface to be come superhydrophobic or superhydrophilic (depending on the surfactant coating) thus enabling simultaneous control of reflectivity and surface wetting.
from adhocracy4.dashboard import ProjectDashboard from adhocracy4.dashboard import components default_app_config = 'meinberlin.apps.dashboard.apps.Config' class TypedProjectDashboard(ProjectDashboard): def __init__(self, project): if project.project_type == 'meinberlin_bplan.Bplan': project = project.externalproject.bplan elif (project.project_type == 'meinberlin_extprojects.ExternalProject'): project = project.externalproject elif (project.project_type == 'meinberlin_projectcontainers.ProjectContainer'): project = project.projectcontainer super().__init__(project) def get_project_components(self): if self.project.project_type == 'meinberlin_bplan.Bplan': return [components.projects.get('bplan'), components.projects.get('plans'), components.projects.get('adminlog')] elif (self.project.project_type == 'meinberlin_extprojects.ExternalProject'): return [components.projects.get('external'), components.projects.get('topics'), components.projects.get('point'), components.projects.get('plans'), components.projects.get('adminlog')] elif (self.project.project_type == 'meinberlin_projectcontainers.ProjectContainer'): return [components.projects.get('container-basic'), components.projects.get('container-information'), components.projects.get('topics'), components.projects.get('point'), components.projects.get('plans'), components.projects.get('container-projects')] return [component for component in components.get_project_components() if component.is_effective(self.project)] def get_module_components(self): if self.project.project_type == 'meinberlin_bplan.Bplan': return [] elif (self.project.project_type == 'meinberlin_extprojects.ExternalProject'): return [] elif (self.project.project_type == 'meinberlin_projectcontainers.ProjectContainer'): return [] return components.get_module_components()
The anyfeed SXM50 is a flexible bulk parts feeder with an integrated bulk storage bin that can hold components of arbitrary shapes and materials. When combined with flexfactory’s vision solution or and your robot of choice, the anyfeed SXM50 becomes a highly versatile robot feeding system that offers solutions for a wide range of application requirements. Controlled by the vision system, the anyfeed SXM50 uses its servo-electric drives to move the components into positions that can be easily picked by the robot. The anyfeed SXM line of feeders is based on a feeding method patented by flexfactory that does not require high maintenance conveyor belts or expensive recirculating systems. The ‘first-in-first-out’ principle of the anyfeed SXM50 is an efficient and reliable handling method that ensures parts spend as little time as possible in the feeding system, waiting to be picked.
""" Django settings for dondeestas project. For more information on this file, see https://docs.djangoproject.com/en/1.6/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.6/ref/settings/ """ # Parse database configuration from $DATABASE_URL import dj_database_url DATABASES = {'default': dj_database_url.config(default='postgres://ukqvlsqxejpebv:BthNM1aQN2DNd8ysGvG2N2JK6j@ec2-174-129-197-200.compute-1.amazonaws.com:5432/dflgcpfmok4avu')} # Honor the 'X-Forwarded-Proto' header for request.is_secure() SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') # Allow all host headers ALLOWED_HOSTS = ['*'] # Static asset configuration import os BASE_DIR = os.path.dirname(os.path.abspath(__file__)) STATIC_ROOT = 'staticfiles' STATIC_URL = '/static/' STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'static'), ) # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) PROJECT_PATH = os.path.dirname(__file__) PROJECT_ROOT = os.path.abspath(os.path.join(PROJECT_PATH, os.pardir)) import wsgi # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/ # SECURITY WARNING: don't run with debug turned on in production! DEBUG = False TEMPLATE_DEBUG = True ALLOWED_HOSTS = ['*'] # Application definition INSTALLED_APPS = ( 'admin_tools', 'admin_tools.theming', 'admin_tools.menu', 'admin_tools.dashboard', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django_extensions', 'south', 'rest_framework', 'rest_framework.authtoken', 'api', 'misc', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) TEMPLATE_CONTEXT_PROCESSORS = ( "django.core.context_processors.request", "django.contrib.auth.context_processors.auth", "django.core.context_processors.debug", "django.core.context_processors.i18n", "django.core.context_processors.media", "django.core.context_processors.static", "django.core.context_processors.tz", "django.contrib.messages.context_processors.messages" ) #ADMIN_TOOLS_INDEX_DASHBOARD = 'dashboard.CustomIndexDashboard' #ADMIN_TOOLS_APP_INDEX_DASHBOARD = 'dashboard.CustomAppIndexDashboard' #ADMIN_TOOLS_THEMING_CSS = 'css/admin_theming.css' TRACK_AJAX_REQUESTS=True TRACK_PAGEVIEWS=True DEBUG_TOOLBAR_PATCH_SETTINGS = False ROOT_URLCONF = 'main.urls' WSGI_APPLICATION = 'main.wsgi.application' DATE_INPUT_FORMATS=( '%d-%m-%Y', '%d/%m/%Y', '%d/%m/%y', # '25-10-2006', '25/10/2006', '25/10/06' '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006' '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006' '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006' '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006' ) DATETIME_INPUT_FORMATS=( '%d-%m-%Y %H:%M:%S', # '2006-10-25 14:30:59' '%d-%m-%Y %H:%M:%S.%f', # '2006-10-25 14:30:59.000200' '%d-%m-%Y %H:%M', # '2006-10-25 14:30' '%d-%m-%Y', # '2006-10-25' '%d/%m/%Y %H:%M:%S', # '10/25/2006 14:30:59' '%d/%m/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200' '%d/%m/%Y %H:%M', # '10/25/2006 14:30' '%d/%m/%Y', # '10/25/2006' '%d/%m/%y %H:%M:%S', # '10/25/06 14:30:59' '%d/%m/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200' '%d/%m/%y %H:%M', # '10/25/06 14:30' '%d/%m/%y', # '10/25/06' ) # Internationalization # https://docs.djangoproject.com/en/1.6/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'Asia/Kolkata' #USE_I18N = True #USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.6/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = os.path.join(PROJECT_ROOT, "static") STATICFILES_DIRS = ( os.path.join(PROJECT_ROOT, "main","static"), ) CRISPY_TEMPLATE_PACK = 'bootstrap3' CRISPY_FAIL_SILENTLY = not DEBUG TEMPLATE_DIRS = ( os.path.join(PROJECT_PATH, 'templates'), ) REST_FRAMEWORK = { 'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.AllowAny',), 'PAGINATE_BY': 10, 'DEFAULT_AUTHENTICATION_CLASSES': ( ) } try: from production_settings import * except ImportError: #If production settings are present don't import local settings try: from local_settings import * except ImportError: print "couldnt import local_settings" pass
Everyone understands “The Star-Spangled Banner”. The anthem represents the United States of America and states a lot about the flexibility and bravery of its individuals. However, besides its patriotic effect, there is another reason this precious tune is likewise really well-known. Due to the fact that it is so hard to sing! Concern consider it, this might likewise be one excellent reason that we generally hear it sung by expert vocalists throughout extremely essential occasions such as significant sports competitors. If nobody is around to sing it as skillfully as an expert, we typically play a recording of it to lead everybody in singing the effective tune. However, what occurs if the recording does not work and no expert vocalist is around to belt it out? Well, one high school in Iowa understood simply what to do. To open their high school women’ basketball video game last February 1, North Polk planned to play a musical recording of the nationwide anthem so individuals might participate and honor the nation. Nevertheless, an unexpected turn of occasions happened when the recording did not play and they were delegated sing the tune themselves. There was no expert vocalist around, however the video game was certainly not going to begin unless the nationwide anthem was sung! It remained in this minute that everybody from the audience – the trainees in addition to the referees and school workers began to sing the national anthem by themselves. It was such a marvelous minute since although a great deal of various voices were singing this difficult piece, the tune and the charm of it was a lot more effective and moving. The feelings required to bring the tune to life were felt higher by all in presence as the belted the gorgeous lyrics together. When it reached the part mentioning “O’er the land of the complimentary and the house of the brave!” everybody was so captured up in the minute that it seemed like an experienced chorale. Truthfully, if you did not understand that the vocalists were at a high school sports occasion, you would most likely never ever think that the vocalists were not expert artists at all! It was that great. The unscripted chorale of households, pals and even the opposing group who collected for a high school video game ended with a loud applause. It was certainly a fitting ending nobody in participation would ever forget. What a method to begin the video game! You must absolutely see and hear what this Iowa high school needs to use!
import os, sys from fig import * config1 = r""" [layout] style layout [node] style rect fontName $FONTNAME fontSize $FONTSIZE textBaselineCorrection $BASELINE_CORR strokeWidth 3 roundingStyle screen textPadX 22 textPadY 8 [connection] style curve [color] style cycle colorscheme "mint-examples3" fontColorAuto no fontColor #fff """ config2 = r""" [layout] style layout [node] style rect fontName $FONTNAME fontSize $FONTSIZE textBaselineCorrection $BASELINE_CORR strokeWidth 3 roundingStyle arc cornerRadius 25 textPadX 22 textPadY 8 [connection] style curve [color] style cycle colorscheme "mint-examples" fontColorAuto no fontColor #fff """ data1 = { 'toothsome': [] } data2 = { 'flittermice': [] } scale = 0.8 trees = [ create_tree(config1, data1), create_tree(config2, data2) ] write_all_trees(trees, scale)
The Forest Park's tax rate may change depending of the type of purchase. There is 1 out of 1 zip code in Forest Park that are being charged city sales tax for a ratio of 100%. There is also 1 out of 1 zip code in Forest Park that are required to charge a special sales tax for a ratio of 100%.
# coding: utf8 # rc.py # 10/8/2012 jichi # Runtime resource locations # # All paths are using ascii encoding. # Assume there are no unicode characters in the relative path. from PySide.QtCore import Qt, QUrl from PySide.QtGui import QImage, QPixmap, QIcon from Qt5.QtQuick1 import QDeclarativeImageProvider from sakurakit.skdebug import derror import rc ## Resource image provider ## # See: http://www.lothlorien.com/kf6gpe/?p=234 class ResourceImageProvider(QDeclarativeImageProvider): PROVIDER_ID = 'rc' def __init__(self, type=QDeclarativeImageProvider.Pixmap): """ @param type int QDeclarativeImageProvider.ImageType either Pixmap or Image Use QPixmap as default, which renders faster than QImage """ super(ResourceImageProvider, self).__init__(type) def requestImage(self, name, rsize, size): """@reimp @public @param[in] providerId unicode unused @param[out] rsize QSize @param[in] size QSize @return QImage not None virtual QImage requestImage(const QString &id, QSize *size, const QSize &requestedSize) """ ret = QImage(rc.image_path(name)) if ret.isNull(): derror("failed to load image: '%s'" % name) elif ret.size() != size: ret = (ret.scaled(size, Qt.KeepAspectRatio, Qt.SmoothTransformation) if not size.isEmpty() else ret.scaledToWidth(size.width(), Qt.SmoothTransformation) if size.width() > 0 else ret.scaledToHeight(size.height(), Qt.SmoothTransformation) if size.height() > 0 else ret) rsize.setWidth(ret.width()) rsize.setHeight(ret.height()) return ret def requestPixmap(self, name, rsize, size): """@reimp @public @param[in] providerId unicode unused @param[out] rsize QSize @param[in] size QSize @return QPixmap not None virtual QPixmap requestPixmap(const QString &id, QSize *size, const QSize &requestedSize) """ ret = QPixmap(rc.image_path(name)) if ret.isNull(): derror("failed to load image: '%s'" % name) elif ret.size() != size: ret = (ret.scaled(size, Qt.KeepAspectRatio, Qt.SmoothTransformation) if not size.isEmpty() else ret.scaledToWidth(size.width(), Qt.SmoothTransformation) if size.width() > 0 else ret.scaledToHeight(size.height(), Qt.SmoothTransformation) if size.height() > 0 else ret) rsize.setWidth(ret.width()) rsize.setHeight(ret.height()) return ret ## File image provider ## # See: http://www.lothlorien.com/kf6gpe/?p=234 class FileImageProvider(QDeclarativeImageProvider): PROVIDER_ID = 'file' """ Default icon size on Windows See: http://msdn.microsoft.com/en-us/library/ms997636.aspx """ ICON_SIZE = 48, 48 def __init__(self): """ Use QPixmap as default, which renders faster than QImage """ super(FileImageProvider, self).__init__(QDeclarativeImageProvider.Pixmap) def requestPixmap(self, path, rsize, size): """@reimp @public @param[in] providerId unicode unused @param[out] rsize QSize @param[in] size QSize @return QPixmap not None virtual QPixmap requestPixmap(const QString &id, QSize *size, const QSize &requestedSize) """ icon = rc.file_icon(path) if icon.isNull(): derror("failed to load image: '%s'" % path) ret = QPixmap() elif not size.isEmpty(): ret = icon.pixmap(size) else: #sizes = icon.availableSizes(QIcon.Selected, QIcon.Off) # crash for executable ret = icon.pixmap(*FileImageProvider.ICON_SIZE) rsize.setWidth(ret.width()) rsize.setHeight(ret.height()) return ret ## URL image provider ## class SpringImageProvider(QDeclarativeImageProvider): PROVIDER_ID = 'spring' def __init__(self): """ Use QPixmap as default, which renders faster than QImage """ super(SpringImageProvider, self).__init__(QDeclarativeImageProvider.Pixmap) def requestPixmap(self, path, rsize, size): """@reimp @public @param[in] providerId unicode unused @param[out] rsize QSize @param[in] size QSize @return QPixmap not None virtual QPixmap requestPixmap(const QString &id, QSize *size, const QSize &requestedSize) """ ret = QPixmap(QUrl(path).toLocalFile()) if ret.isNull(): derror("failed to load image: '%s'" % path) elif size != ret.size() and not size.isEmpty() and not ret.size().isEmpty(): if ret.width() * size.height() > ret.height() * size.width(): ret = ret.scaledToHeight(min(800, size.height()), Qt.SmoothTransformation) else: w = 1000 if ret.width() > ret.height() else 600 ret = ret.scaledToWidth(min(w, size.width()), Qt.SmoothTransformation) #elif size != ret.size(): # ret = (ret.scaled(size, Qt.KeepAspectRatio, Qt.SmoothTransformation) if not size.isEmpty() else # ret.scaledToWidth(size.width(), Qt.SmoothTransformation) if size.width() > 0 else # ret.scaledToHeight(size.height(), Qt.SmoothTransformation) if size.height() > 0 else # ret) rsize.setWidth(ret.width()) rsize.setHeight(ret.height()) return ret # EOF ## URL image provider ## #class UrlImageProvider(QDeclarativeImageProvider): # # PROVIDER_ID = 'url' # # def __init__(self): # """ # Use QPixmap as default, which renders faster than QImage # """ # super(UrlImageProvider, self).__init__(QDeclarativeImageProvider.Pixmap) # # def requestPixmap(self, path, rsize, size): # """@reimp @public # @param[in] providerId unicode unused # @param[out] rsize QSize # @param[in] size QSize # @return QPixmap not None # # virtual QPixmap requestPixmap(const QString &id, QSize *size, const QSize &requestedSize) # """ # # ret = QPixmap(QUrl(path).toLocalFile()) # if ret.isNull(): # derror("failed to load image: '%s'" % path) # elif size != ret.size(): # ret = (ret.scaled(size, Qt.KeepAspectRatio, Qt.SmoothTransformation) if not size.isEmpty() else # ret.scaledToWidth(size.width(), Qt.SmoothTransformation) if size.width() > 0 else # ret.scaledToHeight(size.height(), Qt.SmoothTransformation) if size.height() > 0 else # ret) # rsize.setWidth(ret.width()) # rsize.setHeight(ret.height()) # return ret ## Filter image provider ## #from qimp import qimp #class FilterImageProvider(QDeclarativeImageProvider): # # PROVIDER_ID = 'filter' # # def __init__(self): # """ # Use QPixmap as default, which renders faster than QImage # """ # super(FilterImageProvider, self).__init__(QDeclarativeImageProvider.Pixmap) # # def requestPixmap(self, path, rsize, size): # """@reimp @public # @param[in] providerId unicode unused # @param[out] rsize QSize # @param[in] size QSize # @return QPixmap not None # # virtual QPixmap requestPixmap(const QString &id, QSize *size, const QSize &requestedSize) # """ # ret = QPixmap(QUrl(path).toLocalFile()) # if ret.isNull(): # derror("failed to load image: '%s'" % path) # #elif size != ret.size(): # elif size.width() < ret.width() or size.height() < ret.height(): # do increase size # ret = (ret.scaled(size, Qt.KeepAspectRatio, Qt.SmoothTransformation) if not size.isEmpty() else # ret.scaledToWidth(size.width(), Qt.SmoothTransformation) if size.width() > 0 and size.width() < ret.width() else # ret.scaledToHeight(size.height(), Qt.SmoothTransformation) if size.height() > 0 and size.height() < ret.height() else # ret) # rsize.setWidth(ret.width()) # rsize.setHeight(ret.height()) # if ret and not ret.isNull(): # qimp.gradientpixmap(ret) # return ret
ATTN: for Excursion Transnistria, Tiraspol City and Cricova Winery Wine tasting - the sequence of excursions line begins from the end. 2 excursions in one day! In the morning, after breakfast, we will departure to one of the most fascinating wonders of the world - Cricova cellars, that was founded in 1952. The basements were formed after limestone extraction from the mines. Later, from cellars winemakers formed avenues and streets with wine, which are stored in the surrounding niches: Cabernet, Aligote, Sauvignon, etc. These avenues and streets have a length of 120 km in total. The constant temperature is about 12 degrees and humidity that does not exceed 97-98% - are the most favorable conditions for the storage of 30 million l of quality wine. Today, the wooden gate of Cricova winery is open to everyone. You have an undeniable chance to penetrate into the mysterious wine making process and to taste the best quality of Moldovan wines. The excursion includes: The total duration of the excursion is 1.5 hours, including round trip time, excursion along the streets of the underground city, a visit of the production areas for manufacturing of vintage wines and classic champagne, State Collection and the "Tasting Halls" and wine tasting that includes 4 types of wines. Technical information: Extent of the round-trip route - 170 km, duration 7- 8 hours (taking into account transfers- time). If you wish, we can stop for dinner or lunch in one of the best restaurant in Tiraspol.
''' Implementation of a Twisted Modbus Server ------------------------------------------ ''' from binascii import b2a_hex from twisted.internet import protocol from twisted.internet.protocol import ServerFactory from pymodbus.constants import Defaults from pymodbus.factory import ServerDecoder from pymodbus.datastore import ModbusServerContext from pymodbus.device import ModbusControlBlock from pymodbus.device import ModbusAccessControl from pymodbus.device import ModbusDeviceIdentification from pymodbus.transaction import ModbusSocketFramer, ModbusAsciiFramer from pymodbus.interfaces import IModbusFramer from pymodbus.exceptions import * from pymodbus.pdu import ModbusExceptions as merror from pymodbus.internal.ptwisted import InstallManagementConsole #---------------------------------------------------------------------------# # Logging #---------------------------------------------------------------------------# import logging _logger = logging.getLogger(__name__) #---------------------------------------------------------------------------# # Modbus TCP Server #---------------------------------------------------------------------------# class ModbusTcpProtocol(protocol.Protocol): ''' Implements a modbus server in twisted ''' def connectionMade(self): ''' Callback for when a client connects Note, since the protocol factory cannot be accessed from the protocol __init__, the client connection made is essentially our __init__ method. ''' _logger.debug("Client Connected [%s]" % self.transport.getHost()) self.framer = self.factory.framer(decoder=self.factory.decoder) def connectionLost(self, reason): ''' Callback for when a client disconnects :param reason: The client's reason for disconnecting ''' _logger.debug("Client Disconnected: %s" % reason) def dataReceived(self, data): ''' Callback when we receive any data :param data: The data sent by the client ''' _logger.debug(" ".join([hex(ord(x)) for x in data])) if not self.factory.control.ListenOnly: self.framer.processIncomingPacket(data, self._execute) def _execute(self, request): ''' Executes the request and returns the result :param request: The decoded request message ''' try: context = self.factory.store[request.unit_id] response = request.execute(context) except Exception, ex: _logger.debug("Datastore unable to fulfill request %s" % ex) response = request.doException(merror.SlaveFailure) #self.framer.populateResult(response) response.transaction_id = request.transaction_id response.unit_id = request.unit_id self._send(response) def _send(self, message): ''' Send a request (string) to the network :param message: The unencoded modbus response ''' self.factory.control.Counter.BusMessage += 1 pdu = self.framer.buildPacket(message) _logger.debug('send: %s' % b2a_hex(pdu)) return self.transport.write(pdu) class ModbusServerFactory(ServerFactory): ''' Builder class for a modbus server This also holds the server datastore so that it is persisted between connections ''' protocol = ModbusTcpProtocol def __init__(self, store, framer=None, identity=None): ''' Overloaded initializer for the modbus factory If the identify structure is not passed in, the ModbusControlBlock uses its own empty structure. :param store: The ModbusServerContext datastore :param framer: The framer strategy to use :param identity: An optional identify structure ''' self.decoder = ServerDecoder() self.framer = framer or ModbusSocketFramer self.store = store or ModbusServerContext() self.control = ModbusControlBlock() self.access = ModbusAccessControl() if isinstance(identity, ModbusDeviceIdentification): self.control.Identity.update(identity) #---------------------------------------------------------------------------# # Modbus UDP Server #---------------------------------------------------------------------------# class ModbusUdpProtocol(protocol.DatagramProtocol): ''' Implements a modbus udp server in twisted ''' def __init__(self, store, framer=None, identity=None): ''' Overloaded initializer for the modbus factory If the identify structure is not passed in, the ModbusControlBlock uses its own empty structure. :param store: The ModbusServerContext datastore :param framer: The framer strategy to use :param identity: An optional identify structure ''' framer = framer or ModbusSocketFramer self.framer = framer(decoder=ServerDecoder()) self.store = store or ModbusServerContext() self.control = ModbusControlBlock() self.access = ModbusAccessControl() if isinstance(identity, ModbusDeviceIdentification): self.control.Identity.update(identity) def datagramReceived(self, data, addr): ''' Callback when we receive any data :param data: The data sent by the client ''' _logger.debug("Client Connected [%s:%s]" % addr) _logger.debug(" ".join([hex(ord(x)) for x in data])) if not self.control.ListenOnly: continuation = lambda request: self._execute(request, addr) self.framer.processIncomingPacket(data, continuation) def _execute(self, request, addr): ''' Executes the request and returns the result :param request: The decoded request message ''' try: context = self.store[request.unit_id] response = request.execute(context) except Exception, ex: _logger.debug("Datastore unable to fulfill request %s" % ex) response = request.doException(merror.SlaveFailure) #self.framer.populateResult(response) response.transaction_id = request.transaction_id response.unit_id = request.unit_id self._send(response, addr) def _send(self, message, addr): ''' Send a request (string) to the network :param message: The unencoded modbus response :param addr: The (host, port) to send the message to ''' self.control.Counter.BusMessage += 1 pdu = self.framer.buildPacket(message) _logger.debug('send: %s' % b2a_hex(pdu)) return self.transport.write(pdu, addr) #---------------------------------------------------------------------------# # Starting Factories #---------------------------------------------------------------------------# def StartTcpServer(context, identity=None): ''' Helper method to start the Modbus Async TCP server :param context: The server data context :param identify: The server identity to use (default empty) ''' from twisted.internet import reactor _logger.info("Starting Modbus TCP Server on %s" % Defaults.Port) framer = ModbusSocketFramer factory = ModbusServerFactory(context, framer, identity) InstallManagementConsole({ 'factory' : factory }) reactor.listenTCP(Defaults.Port, factory) reactor.run() def StartUdpServer(context, identity=None): ''' Helper method to start the Modbus Async Udp server :param context: The server data context :param identify: The server identity to use (default empty) ''' from twisted.internet import reactor _logger.info("Starting Modbus UDP Server on %s" % Defaults.Port) framer = ModbusSocketFramer server = ModbusUdpProtocol(context, framer, identity) reactor.listenUDP(Defaults.Port, server) reactor.run() def StartSerialServer(context, identity=None, framer=ModbusAsciiFramer, **kwargs): ''' Helper method to start the Modbus Async Serial server :param context: The server data context :param identify: The server identity to use (default empty) :param framer: The framer to use (default ModbusAsciiFramer) ''' from twisted.internet import reactor from twisted.internet.serialport import SerialPort _logger.info("Starting Modbus Serial Server on %s" % kwargs['device']) factory = ModbusServerFactory(context, framer, identity) protocol = factory.buildProtocol(None) handle = SerialPort(protocol, kwargs['device'], reactor, Defaults.Baudrate) reactor.run() #---------------------------------------------------------------------------# # Exported symbols #---------------------------------------------------------------------------# __all__ = [ "StartTcpServer", "StartUdpServer", "StartSerialServer", ]
This article is about the year 1779. Wikimedia Commons has media related to 1779. 1779 (MDCCLXXIX)was a common year starting on Friday of the Gregorian calendar and a common year starting on Tuesday of the Julian calendar, the 1779th year of the Common Era (CE) and Anno Domini (AD) designations, the 779th year of the 2nd millennium, the 79th year of the 18th century, and the 10th and last year of the 1770s decade. As of the start of 1779, the Gregorian calendar was 11 days ahead of the Julian calendar, which remained in localized use until 1923. January 11 – British troops surrender to the Marathas in Wadgaon, India, and are forced to return all territories acquired since 1773. January 11 – Ching-Thang Khomba is crowned King of Manipur. January 22 – American Revolutionary War – Claudius Smith is hanged at Goshen, Orange County, New York for supposed acts of terrorism upon the people of the surrounding communities. January 29 – After a second petition for partition from its residents, the North Carolina General Assembly abolishes Bute County, North Carolina (established 1764) by dividing it and naming the northern portion Warren County (for Revolutionary War hero Joseph Warren), and the southern portion Franklin County (for Benjamin Franklin). The General Assembly also establishes Warrenton (also named for Joseph Warren) to be the seat of Warren County, and Louisburg (named for Louis XVI of France) to be the seat of Franklin County. February 12 – Lieutenant Colonel Francisco Bouligny arrives with Malagueño colonists at Bayou Teche, to establish the city of New Iberia, Louisiana. February 14 – Captain James Cook is killed on the Sandwich Islands, on his third voyage. March 10 – The Treaty of Aynalıkavak is signed between Ottoman Turkey and the Russian Empire, regarding the Crimean Khanate. April 12 – Spain and France secretly sign the Convention of Aranjuez, with Spain joining an alliance against Great Britain in return for France's pledge to recover all Spanish territory lost to the British. May 13 – War of the Bavarian Succession – Russian and French mediators at the Congress of Teschen negotiate an end to the war. In the agreement Austria receives a part of the Bavarian territory (the Innviertel), and relinquishes the rest. June 1 – American Revolutionary War – Benedict Arnold is court-martialed for malfeasance, in his treatment of government property. June 16 – American Revolutionary War – In support of the U.S., Spain declares war on Britain. July 16 – American Revolutionary War – United States forces, led by General Anthony Wayne, capture Stony Point, New York from British troops. July 16 – Declaratory Rescript of the Illyrian Nation issued in order to regulate organization of Eastern Orthodox Church in Habsburg Monarchy. July 20 – Tekle Giyorgis I begins the first of his five reigns as Emperor of Ethiopia. July 22 – Battle of Minisink: The Goshen Militia is destroyed by Joseph Brant's forces. July 24 – American Revolutionary War – American forces, led by Commodore Dudley Saltonstall, launch the Penobscot Expedition in what is now Castine, Maine, resulting in the worst naval defeat in U.S. history (until Pearl Harbor). July – The Great Siege of Gibraltar (fourteenth and last military siege) begins. This is an action by French and Spanish forces to wrest control of Gibraltar from the established British garrison. The garrison, led by George Augustus Eliott (later 1st Baron Heathfield of Gibraltar), survives all attacks and a blockade of supplies. Battle of Baton Rouge – Spanish troops under Bernardo de Gálvez capture the city from the British. The Great Siege of Gibraltar starts, the longest siege endured by the British Armed Forces. September 14–15 – American Revolutionary War – Little Beard's Town, a loyalist stronghold, is burnt by the Sullivan Expedition. September 23 – American Revolutionary War – Battle of Flamborough Head – The American ship Bonhomme Richard, commanded by John Paul Jones, engages the British ship HMS Serapis. The Bonhomme Richard sinks, but the Americans board the Serapis and other vessels, and are victorious. October 4 – The Fort Wilson Riot against James Wilson and others in Philadelphia takes place. November 2 – The North Carolina General Assembly carves a new county from Dobbs County, North Carolina and names it Wayne County, in honor of United States General Anthony Wayne. December 13 – Alexandre, Vicomte de Beauharnais marries Joséphine Tascher. December 22 – American Revolutionary War – Capture of Savannah: British forces under Archibald Campbell take the city of Savannah, Georgia. December 25 – Fort Nashborough (later to become Nashville, Tennessee) is founded by James Robertson. December 31 – Affair of Fielding and Bylandt: Following a brief naval engagement between the British and Dutch off the Isle of Wight, the Dutch merchantmen and naval vessels are captured and taken to Portsmouth, England. The Iron Bridge is erected across the River Severn in Shropshire, the world's first bridge built entirely of cast iron. It will open to traffic on January 1, 1781. The spinning mule is perfected by Lancashire inventor Samuel Crompton. Boulton and Watt's Smethwick Engine, now the oldest working engine in the world, is brought into service (May)). The city of Tampere, Finland, is founded. A joint Spanish-Portuguese survey of the Amazon basin begins to determine the boundary between the colonial possessions in South America; it continues until 1795.
import sys from services.spawn import MobileTemplate from services.spawn import WeaponTemplate from resources.datatables import WeaponType from resources.datatables import Difficulty from resources.datatables import Options from java.util import Vector def addTemplate(core): mobileTemplate = MobileTemplate() mobileTemplate.setCreatureName('tusken_war_master') mobileTemplate.setLevel(32) mobileTemplate.setDifficulty(Difficulty.ELITE) mobileTemplate.setMinSpawnDistance(4) mobileTemplate.setMaxSpawnDistance(8) mobileTemplate.setDeathblow(True) mobileTemplate.setScale(1) mobileTemplate.setSocialGroup("tusken raider") mobileTemplate.setAssistRange(6) mobileTemplate.setStalker(True) mobileTemplate.setOptionsBitmask(Options.AGGRESSIVE | Options.ATTACKABLE) templates = Vector() templates.add('object/mobile/shared_tusken_raider.iff') mobileTemplate.setTemplates(templates) weaponTemplates = Vector() weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic') weaponTemplates.add(weapontemplate) mobileTemplate.setWeaponTemplateVector(weaponTemplates) attacks = Vector() mobileTemplate.setDefaultAttack('meleehit') mobileTemplate.setAttacks(attacks) core.spawnService.addMobileTemplate('tusken_warmaster', mobileTemplate) return
Asbestos Free OEM and ISO certified linings that are value for money in terms of cost per kilometer, great durability in severe working and temperature conditions, hold excellent compatibility with drums for applications In Buses, Trucks, Trailers and Semi Trailers. Services provided by us to customers for relining ensure that all fitments are done with extreme care and precision to enable the product give its maximum life expectancy. Monitoring of drums, return springs, correct brake adjustments and associated parts are often discussed with customers to ensure the mechanisms are in perfect working conditions.
import cPickle as pickle import csv import gzip import numpy as np import os def writepickle(obj, filepath, protocol = -1): """ I write your python object obj into a pickle file at filepath. If filepath ends with .gz, I'll use gzip to compress the pickle. Leave protocol = -1 : I'll use the latest binary protocol of pickle. """ if os.path.splitext(filepath)[1] == ".gz": pkl_file = gzip.open(filepath, 'wb') else: pkl_file = open(filepath, 'wb') pickle.dump(obj, pkl_file, protocol) pkl_file.close() def readpickle(filepath): """ I read a pickle file and return whatever object it contains. If the filepath ends with .gz, I'll unzip the pickle file. """ if os.path.splitext(filepath)[1] == ".gz": pkl_file = gzip.open(filepath,'rb') else: pkl_file = open(filepath, 'rb') obj = pickle.load(pkl_file) pkl_file.close() return obj def find_nearest(array,value): """ Find nearest value is an array """ idx = (np.abs(array-value)).argmin() return idx def mkdir(somedir): """ A wrapper around os.makedirs. :param somedir: a name or path to a directory which I should make. """ if not os.path.isdir(somedir): os.makedirs(somedir) def classify(pred, threshold): classification = np.zeros_like(pred) classification[pred >= threshold] = 1 return classification def load_bmg(fname, main_sequence): data=[] with open(fname+'.dat') as observability_file: observability_data = csv.reader(observability_file, delimiter="\t") for row in observability_data: # if line is empty, skip otherwise filter out the blank dline=row[0].split() if len(dline)==17 and not dline[6].isdigit(): dline.insert(6, '0') if dline[0][0]=='#': continue data.append(np.asarray(dline, dtype=np.float)) data=np.asarray(data) if main_sequence: data=data[data[:,2] == 5] #Takes only main sequence stars return data def rdisk(radius, norien=25, nrad=35): orientations = np.linspace(0., np.pi * 2., norien, endpoint=False) dtheta = (orientations[:2] / 2.)[-1] nrad = float(nrad) radii = ( np.arange(nrad) / (nrad - 1) )**2 * float(radius) coord = [] seen_nought = False for ir, r in enumerate(radii): if r == 0 : if not seen_nought: coord.append([0., 0.]) seen_nought = True continue for orientation in orientations: x = np.cos(orientation + dtheta * (ir % 2)) * r y = np.sin(orientation + dtheta * (ir % 2)) * r coord.append([x, y]) return np.asarray(coord)
Our 3 Bedroom units would suite a larger family. It has 3 separate bedrooms of which two are on the ground floor with the main bedroom on the upper floor. The 3 bedroom units are spacious and tastefully furnished and ideally suited for family accommodation.
def check_categories(lines): ''' find out how many row and col categories are available ''' # count the number of row categories rcat_line = lines[0].split('\t') # calc the number of row names and categories num_rc = 0 found_end = False # skip first tab for inst_string in rcat_line[1:]: if inst_string == '': if found_end is False: num_rc = num_rc + 1 else: found_end = True max_rcat = 15 if max_rcat > len(lines): max_rcat = len(lines) - 1 num_cc = 0 for i in range(max_rcat): ccat_line = lines[i + 1].split('\t') # make sure that line has length greater than one to prevent false cats from # trailing new lines at end of matrix if ccat_line[0] == '' and len(ccat_line) > 1: num_cc = num_cc + 1 num_labels = {} num_labels['row'] = num_rc + 1 num_labels['col'] = num_cc + 1 return num_labels def dict_cat(net, define_cat_colors=False): ''' make a dictionary of node-category associations ''' # print('---------------------------------') # print('---- dict_cat: before setting cat colors') # print('---------------------------------\n') # print(define_cat_colors) # print(net.viz['cat_colors']) net.persistent_cat = True for inst_rc in ['row', 'col']: inst_keys = list(net.dat['node_info'][inst_rc].keys()) all_cats = [x for x in inst_keys if 'cat-' in x] for inst_name_cat in all_cats: dict_cat = {} tmp_cats = net.dat['node_info'][inst_rc][inst_name_cat] tmp_nodes = net.dat['nodes'][inst_rc] for i in range(len(tmp_cats)): inst_cat = tmp_cats[i] inst_node = tmp_nodes[i] if inst_cat not in dict_cat: dict_cat[inst_cat] = [] dict_cat[inst_cat].append(inst_node) tmp_name = 'dict_' + inst_name_cat.replace('-', '_') net.dat['node_info'][inst_rc][tmp_name] = dict_cat # merge with old cat_colors by default cat_colors = net.viz['cat_colors'] if define_cat_colors == True: cat_number = 0 for inst_rc in ['row', 'col']: inst_keys = list(net.dat['node_info'][inst_rc].keys()) all_cats = [x for x in inst_keys if 'cat-' in x] for cat_index in all_cats: if cat_index not in cat_colors[inst_rc]: cat_colors[inst_rc][cat_index] = {} cat_names = sorted(list(set(net.dat['node_info'][inst_rc][cat_index]))) # loop through each category name and assign a color for tmp_name in cat_names: # using the same rules as the front-end to define cat_colors inst_color = get_cat_color(cat_number + cat_names.index(tmp_name)) check_name = tmp_name # check if category is string type and non-numeric try: float(check_name) is_string_cat = False except: is_string_cat = True if is_string_cat == True: # check for default non-color if ': ' in check_name: check_name = check_name.split(': ')[1] # if check_name == 'False' or check_name == 'false': if 'False' in check_name or 'false' in check_name: inst_color = '#eee' if 'Not ' in check_name: inst_color = '#eee' # print('cat_colors') # print('----------') # print(cat_colors[inst_rc][cat_index]) # do not overwrite old colors if tmp_name not in cat_colors[inst_rc][cat_index] and is_string_cat: cat_colors[inst_rc][cat_index][tmp_name] = inst_color # print('overwrite: ' + tmp_name + ' -> ' + str(inst_color)) cat_number = cat_number + 1 net.viz['cat_colors'] = cat_colors # print('after setting cat_colors') # print(net.viz['cat_colors']) # print('======================================\n\n') def calc_cat_clust_order(net, inst_rc): ''' cluster category subset of data ''' from .__init__ import Network from copy import deepcopy from . import calc_clust, run_filter inst_keys = list(net.dat['node_info'][inst_rc].keys()) all_cats = [x for x in inst_keys if 'cat-' in x] if len(all_cats) > 0: for inst_name_cat in all_cats: tmp_name = 'dict_' + inst_name_cat.replace('-', '_') dict_cat = net.dat['node_info'][inst_rc][tmp_name] unordered_cats = dict_cat.keys() ordered_cats = order_categories(unordered_cats) # this is the ordering of the columns based on their category, not # including their clustering ordering within category all_cat_orders = [] tmp_names_list = [] for inst_cat in ordered_cats: inst_nodes = dict_cat[inst_cat] tmp_names_list.extend(inst_nodes) # cat_net = deepcopy(Network()) # cat_net.dat['mat'] = deepcopy(net.dat['mat']) # cat_net.dat['nodes'] = deepcopy(net.dat['nodes']) # cat_df = cat_net.dat_to_df() # sub_df = {} # if inst_rc == 'col': # sub_df['mat'] = cat_df['mat'][inst_nodes] # elif inst_rc == 'row': # # need to transpose df # cat_df['mat'] = cat_df['mat'].transpose() # sub_df['mat'] = cat_df['mat'][inst_nodes] # sub_df['mat'] = sub_df['mat'].transpose() # # filter matrix before clustering # ################################### # threshold = 0.0001 # sub_df = run_filter.df_filter_row_sum(sub_df, threshold) # sub_df = run_filter.df_filter_col_sum(sub_df, threshold) # # load back to dat # cat_net.df_to_dat(sub_df) # cat_mat_shape = cat_net.dat['mat'].shape # print('***************') # try: # if cat_mat_shape[0]>1 and cat_mat_shape[1] > 1 and all_are_numbers == False: # calc_clust.cluster_row_and_col(cat_net, 'cos') # inst_cat_order = cat_net.dat['node_info'][inst_rc]['clust'] # else: # inst_cat_order = list(range(len(cat_net.dat['nodes'][inst_rc]))) # except: # inst_cat_order = list(range(len(cat_net.dat['nodes'][inst_rc]))) # prev_order_len = len(all_cat_orders) # # add prev order length to the current order number # inst_cat_order = [i + prev_order_len for i in inst_cat_order] # all_cat_orders.extend(inst_cat_order) # # generate ordered list of row/col names, which will be used to # # assign the order to specific nodes # names_clust_list = [x for (y, x) in sorted(zip(all_cat_orders, # tmp_names_list))] names_clust_list = tmp_names_list # calc category-cluster order final_order = [] for i in range(len(net.dat['nodes'][inst_rc])): inst_node_name = net.dat['nodes'][inst_rc][i] inst_node_num = names_clust_list.index(inst_node_name) final_order.append(inst_node_num) inst_index_cat = inst_name_cat.replace('-', '_') + '_index' net.dat['node_info'][inst_rc][inst_index_cat] = final_order def order_categories(unordered_cats): ''' If categories are strings, then simple ordering is fine. If categories are values then I'll need to order based on their values. The final ordering is given as the original categories (including titles) in a ordered list. ''' no_titles = remove_titles(unordered_cats) all_are_numbers = check_all_numbers(no_titles) if all_are_numbers: ordered_cats = order_cats_based_on_values(unordered_cats, no_titles) else: ordered_cats = sorted(unordered_cats) return ordered_cats def order_cats_based_on_values(unordered_cats, values_list): import pandas as pd try: # convert values_list to values values_list = [float(i) for i in values_list] inst_series = pd.Series(data=values_list, index=unordered_cats) inst_series.sort_values(inplace=True) ordered_cats = inst_series.index.tolist() # ordered_cats = unordered_cats except: # keep default ordering if error occurs print('error sorting cats based on values ') ordered_cats = unordered_cats return ordered_cats def check_all_numbers(no_titles): all_numbers = True for tmp in no_titles: if is_number(tmp) == False: all_numbers = False return all_numbers def remove_titles(cats): from copy import deepcopy # check if all have titles ########################### all_have_titles = True for inst_cat in cats: if is_number(inst_cat) == False: if ': ' not in inst_cat: all_have_titles = False else: all_have_titles = False if all_have_titles: no_titles = cats no_titles = [i.split(': ')[1] for i in no_titles] else: no_titles = cats return no_titles def is_number(s): try: float(s) return True except ValueError: return False def get_cat_color(cat_num): all_colors = [ "#393b79", "#aec7e8", "#ff7f0e", "#ffbb78", "#98df8a", "#bcbd22", "#404040", "#ff9896", "#c5b0d5", "#8c564b", "#1f77b4", "#5254a3", "#FFDB58", "#c49c94", "#e377c2", "#7f7f7f", "#2ca02c", "#9467bd", "#dbdb8d", "#17becf", "#637939", "#6b6ecf", "#9c9ede", "#d62728", "#8ca252", "#8c6d31", "#bd9e39", "#e7cb94", "#843c39", "#ad494a", "#d6616b", "#7b4173", "#a55194", "#ce6dbd", "#de9ed6"]; inst_color = all_colors[cat_num % len(all_colors)] return inst_color def dendro_cats(net, axis, dendro_level): if axis == 0: axis = 'row' if axis == 1: axis = 'col' dendro_level = str(dendro_level) dendro_level_name = dendro_level if len(dendro_level) == 1: dendro_level = '0' + dendro_level df = net.export_df() if axis == 'row': old_names = df.index.tolist() elif axis == 'col': old_names = df.columns.tolist() if 'group' in net.dat['node_info'][axis]: inst_groups = net.dat['node_info'][axis]['group'][dendro_level] new_names = [] for i in range(len(old_names)): inst_name = old_names[i] group_cat = 'Group '+ str(dendro_level_name) +': cat-' + str(inst_groups[i]) inst_name = inst_name + (group_cat,) new_names.append(inst_name) if axis == 'row': df.index = new_names elif axis == 'col': df.columns = new_names net.load_df(df) else: print('please cluster, using make_clust, to define dendrogram groups before running dendro_cats') def add_cats(net, axis, cat_data): try: df = net.export_df() if axis == 'row': labels = df.index.tolist() elif axis == 'col': labels = df.columns.tolist() if 'title' in cat_data: inst_title = cat_data['title'] else: inst_title = 'New Category' all_cats = cat_data['cats'] # loop through all labels new_labels = [] for inst_label in labels: if type(inst_label) is tuple: check_name = inst_label[0] found_tuple = True else: check_name = inst_label found_tuple = False if ': ' in check_name: check_name = check_name.split(': ')[1] # default to False for found cat, overwrite if necessary found_cat = inst_title + ': False' # check all categories in cats for inst_cat in all_cats: inst_names = all_cats[inst_cat] if check_name in inst_names: found_cat = inst_title + ': ' + inst_cat # add category to label if found_tuple is True: new_label = inst_label + (found_cat,) else: new_label = (inst_label, found_cat) new_labels.append(new_label) # add labels back to DataFrame if axis == 'row': df.index = new_labels elif axis == 'col': df.columns = new_labels net.load_df(df) except: print('error adding new categories')
The OCC crew builds one of their fastest bikes ever but a problem with the supercharger threatens the schedule. Junior races to complete a complicated bike for a client. Father & son find it hard to let go of the past while restoring a bike together.
from pypov.pov import Texture, Pigment, Object, Cylinder, Merge from pypov.pov import Finish, Box, Cone, Sphere from pypov.pov import Union, Difference from pypov.colors import Colors from lib.base import five_by_five_corner from lib.textures import cross_hatch_2, wall_texture_1 from lib.metadata import Metadata from lib.util import float_range def full_5x5_010_info(): return Metadata("Basic roundish room", "f10", description="Basic four entrance room", block_type="full", bottom=0, top=20, size="5x5", repeatable=True, fully_connected=True, dead_ends=False, entrance=False, has_rooms=True, passage_type="hewn", wet=False, multi_level=False, keywords=['basic', 'room', 'roundish']) def full_5x5_010(rotate=(0, 0, 0), translate=(0, 0, 0), detail_level=1, cross_hatch_texture=cross_hatch_2): """docstring for gm02""" geomorph = Union( Difference( Object(five_by_five_corner(), cross_hatch_texture), Union( Box((-2.5, 10, 26), ( 2.5, 21, -26)), Box((26, 10.0001, -2.5), (-26, 21, 2.5)), Box((-8, 10.00001, -12), (8, 21, 12)), Box((-12, 10.00002, -8), (12, 21, 8)), Cylinder((-8, 10.0003, -8), (-8, 21, -8), 4), Cylinder((8, 10.0004, -8), (8, 21, -8), 4), Cylinder((-8, 10.0005, 8), (-8, 21, 8), 4), Cylinder((8, 10.0006, 8), (8, 21, 8), 4), wall_texture_1 ), ), translate=translate, rotate=rotate ) return geomorph
Our new Barnabas font is the result of our Dark Shadows font design project. It's an original font inspired by the original titles for the Dark Shadows television show, but updated for a more sophisticated modern audience with the upcoming release of Tim Burton's Dark Shadows movie in mind. It combines gothic capital letters with antiqued Latin small caps reminiscent of the original titles, but much more stylish overall. The name of the font was picked by voters on this page who preferred Barnabas (the first name of main character Barnabas Collins) to several alternatives. The sample graphic features a picture of the Corey Mansion which is the model for Collinwood in the TV series. It's also the first new font to include the OpenType version as one of the standard formats at no additional charge, which will be our practice from here out. You can try the DEMO version of Barnabas for free with a limited character set. Or you can ORDER the full version for only $24 online and download it right away.
""" Extract feature batteries from gauss pyramids """ import cv from utils import saveIm from mods import * from cpy import * def stage(lum, sat, rg, by): lumc = contrast(lum) lumt = contrast(lumc, 251) sats = smooth(sat) satc = contrast(sat) satt = contrast(satc, 251) rgc = contrast(rg) rgt = contrast(rgc, 251) byc = contrast(by) byt = contrast(byc, 251) sob = sobel(lum) sobs = smooth(sob) lums = smooth(lum) rgs = smooth(rg) bys = smooth(by) id0, id1, id2 = intdim(lum) idX = add(zscale(id0), zscale(id2)) return dict(lumc=lumc, lumt=lumt, satc=satc, satt=satt, rgc=rgc, rgt=rgt, byc=byc, byt=byt, sobs=sobs, lums=lums, id0=id0, id1=id1, id2=id2, rgs=rgs, sats=sats, bys=bys, idX=idX,) def noscale(indict): return indict def zscaledict(indict): return dict((n, zscale(m)) for n, m in indict.items()) def histeqdict(indict): def eq(inmat): m = zscale(inmat) return equalize(m) return dict((n, eq(m)) for n, m in indict.items()) def pyramid(lsrb, count=3, scaler=noscale): """ run stage in a downwards pyramid for ``count`` times, scale each map with ``scaler``, return list with one dict per pyramid level """ features = [scaler(stage(*lsrb))] if count == 1: return features lsrb = list(pyrsdown(*lsrb)) features += pyramid(lsrb, count - 1, scaler) return features def base(im, layers): """make sure im's dimensions are multiples of 2**layers""" mod = 2 ** layers if im.width % mod != 0 or im.height % mod != 0: im = cv.GetSubRect(im, ( 0, 0, im.width - im.width % mod, im.height - im.height % mod,)) return cv.GetImage(im) def extract(image, pyr_levels=3, scaler=zscaledict): """extract features from ``image``""" image = base(image, pyr_levels) lsrb = colorsplit(image) return pyramid(lsrb, pyr_levels, scaler=scaler)
- Attention will likely turn towards Fri's 3Q GDP growth estimates, which could hit a 3-year high of >5%, China's manufacturing and services PMI readings, a slew of US data, FOMC meeting minutes as well as start of US corporate earnings season. - Singed agreements to sell nine Pacific Class 400 drilling rigs to Borr Drilling for US$1.3b ($1.77b) plus a market-based fee. - The rigs include all six rigs from previously-terminated contracts, and will be delivered over a 14-month period from 4Q17 to 1Q19. - While the transaction will result in a loss of $15m, it will significantly improve SMM's liquidity position. - Last traded at 45.7x forward P/E. - Acquiring a 50% stake in the proposed manager of Keppel-KBS US REIT, for US$27.5m. - The move is in line with its plans to carry out an IPO for its portfolio of US commercial assets on SGX, comprising 11 office assets, with a mandate to invest in a diversified portfolio of income-producing assets in the US. - Last traded at 14.8x forward P/E and 1x P/B. - Collaborating with Air India Engineering Services to provide MRO services in India. - Last traded at 20.9x forward P/E. - Proposed placement of 221.6m new shares (25% share capital) to prominent investor Sam Goi at 5¢ each. - Net proceeds of $11m earmarked for business development. - Completed disposal of property at 16 Tuas Avenue 20 to JTC for $3.1m. - The disposal will result in a disposal gain of $1.6m. - Last traded at 14.9x trailing P/E. - Entered into a conditional agreement to dispose loss-making subsidiary, Toko Construction, for $1m. - The divestment is undertaken as part of a restructuring and will result in a disposal gain of $1m, to be used for working capital.
from django.shortcuts import render, get_object_or_404 from django.http import HttpResponseRedirect, HttpResponse, HttpResponseNotFound from rest_framework import viewsets, permissions, filters from rest_framework.decorators import api_view, permission_classes from korjournal.models import OdometerSnap, OdometerImage from korjournal.serializers import OdometerSnapSerializer, OdometerImageSerializer from korjournal.permissions import IsOwner, AnythingGoes, DenyAll, IsDriver from django.core.exceptions import ObjectDoesNotExist from django.views.decorators.csrf import csrf_exempt from django.db.models import Q from django.http.request import RawPostDataException from django.db import IntegrityError from django.utils import timezone from datetime import timedelta from dateutil import tz, parser import cv2 import subprocess import sys import os class OdometerImageViewSet(viewsets.ModelViewSet): serializer_class = OdometerImageSerializer permission_classes = (permissions.IsAuthenticatedOrReadOnly,IsOwner,IsDriver) def runtess(self, imgfile): ocr = subprocess.run(["/usr/bin/tesseract", imgfile, "stdout", "nobatch", "digits"], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, universal_newlines=True).stdout try: newodokm = int(ocr.replace(" ","")) return newodokm except ValueError: return 0 def do_ocr(self, imgfile, lim_max, lim_min = 0): img = cv2.imread(imgfile,0) height, width = img.shape x1 = 0 y1 = 0 xleft = int(width * 0.17) xright = int(width * 0.83) ybottom = int(height * 0.83) ytop = int(height * 0.17) xmiddle1 = int(width*0.07) xmiddle2 = int(width*0.93) ymiddle1 = int(height*0.07) ymiddle2 = int(height*0.93) x2 = width y2 = height crops = [ [y1, ybottom, xleft, x2], [ymiddle1, ymiddle2, xleft, x2], [ytop, y2, xleft, x2], [ytop, y2, x1, xright], [ymiddle1, ymiddle2, x1, xright], [y1, ybottom, x1, xright], [ymiddle1, ymiddle2, xmiddle1, xmiddle2] ] guesses = [0,0,0,0,0] bestguess = self.runtess(imgfile) guesses[2] = bestguess cropnumber = 0 for crop in crops: y1 = crop[0] y2 = crop[1] x1 = crop[2] x2 = crop[3] cropped = img[y1:y2, x1:x2] filename = "/tmp/ocrthis" + str(os.getpid()) + ".png" cv2.imwrite(filename, cropped) guess = self.runtess(filename) os.unlink(filename) if guess == 0 or guess in guesses: continue if guess < lim_min: if guesses[1] < guess: guesses[0] = guesses[1] guesses[1] = guess continue if guess > lim_max: if guesses[3] > guess: guesses[4] = guesses[3] guesses[3] = guess continue if guess == bestguess: if guesses[2] > bestguess: guesses[4] = guesses[3] guesses[3] = guesses[2] guesses[2] = guess if guesses[2] < bestguess and guesses[2] != 0: guesses[0] = guesses[1] guesses[1] = guesses[2] guesses[2] = guess continue if guess > bestguess: bestguess = guess if guesses[2] > 0: guesses[0] = guesses[1] guesses[1] = guesses[2] guesses[2] = guess continue if guess < bestguess: if guesses[1] > 0: guesses[0] = guesses[1] guesses[1] = guess if guesses[2] == 0: if guesses[1] > 0: guesses[2] = guesses[1] guesses[1] = guesses[0] guesses[0] = 0 elif guesses[3] > 0: guesses[2] = guesses[3] guesses[3] = guesses[4] guesses[4] = 0 return guesses def perform_create(self,serializer): imgfile = self.request.FILES.get('imagefile') odoimage = serializer.save(driver=self.request.user, imagefile=imgfile) lim_min = 0 lim_max = 9999999 # From the last three non-null odometers, pick the second largest odometer, # this is our MIN # From the MIN date, calculate reasonable kilometers until today, # this is our MAX try: last_odometers = OdometerSnap.objects.filter( vehicle=odoimage.odometersnap.vehicle,odometer__gt=0).order_by('-when')[:3] prev_odometers = OdometerSnap.objects.filter( vehicle=odoimage.odometersnap.vehicle,when__gt=last_odometers[2].when).order_by('-odometer')[:2] lim_min = prev_odometers[1].odometer since_days = timezone.now() - prev_odometers[1].when max_km_per_day = 300 lim_max = prev_odometers[1].odometer + since_days.days * max_km_per_day + max_km_per_day except IndexError: pass guesses = [0,0,0,0,0] if (odoimage.odometersnap.odometer < 1): guesses = self.do_ocr("/vagrant/www/media/" + odoimage.imagefile.name, lim_max, lim_min) odoimage.odometersnap.odometer = guesses[2] odoimage.odometersnap.save() odoimage.guess0 = guesses[0] odoimage.guess1 = guesses[1] odoimage.guess2 = guesses[2] odoimage.guess3 = guesses[3] odoimage.guess4 = guesses[4] odoimage.save() def get_queryset(self): return OdometerImage.objects.filter(Q(odometersnap__vehicle__owner=self.request.user)|Q(driver=self.request.user))
Control even more lights with this 48 channel DMX controller. Same great feature like the 24 channel Scene Setter except it incorporates 24 more individual faders. American DJ Scene Setter 48 Channel DMX Controller allows you to control even more lights with this 48 channel DMX controller! Has the same great feature like the 24 channel Scene Setter except it incorporates 24 more individual faders.
from collections import defaultdict from datetime import timedelta from itertools import chain from operator import itemgetter from typing import Optional, List, Tuple, Union, Iterable, Any, Callable, Dict, Mapping from django.conf import settings from django.conf.urls import re_path from django.contrib.auth.base_user import AbstractBaseUser from django.http import HttpRequest from django.utils import timezone from django.utils.translation import gettext as _ from .exceptions import UnknownMessengerError, UnknownMessageTypeError from .messages.base import MessageBase from .messages.plain import PlainTextMessage from .models import Message, Dispatch, Subscription from .views import mark_read, unsubscribe # NB: Some of these unused imports are exposed as part of toolbox API. from .messages import register_builtin_message_types # noqa from .utils import ( # noqa is_iterable, import_project_sitemessage_modules, get_site_url, recipients, register_messenger_objects, get_registered_messenger_object, get_registered_messenger_objects, register_message_types, get_registered_message_type, get_registered_message_types, get_message_type_for_app, override_message_type_for_app, Recipient ) _ALIAS_SEP = '|' _PREF_POST_KEY = 'sm_user_pref' def schedule_messages( messages: Union[str, MessageBase, List[Union[str, MessageBase]]], recipients: Optional[Union[Iterable[Recipient], Recipient]] = None, sender: Optional[AbstractBaseUser] = None, priority: Optional[int] = None ) -> List[Tuple[Message, List[Dispatch]]]: """Schedules a message or messages. :param messages: str or MessageBase heir or list - use str to create PlainTextMessage. :param recipients: recipients addresses or Django User model heir instances If `None` Dispatches should be created before send using `prepare_dispatches()`. :param User|None sender: User model heir instance :param priority: number describing message priority. If set overrides priority provided with message type. """ if not is_iterable(messages): messages = (messages,) results = [] for message in messages: if isinstance(message, str): message = PlainTextMessage(message) resulting_priority = message.priority if priority is not None: resulting_priority = priority results.append(message.schedule(sender=sender, recipients=recipients, priority=resulting_priority)) return results def send_scheduled_messages( priority: Optional[int] = None, ignore_unknown_messengers: bool = False, ignore_unknown_message_types: bool = False ): """Sends scheduled messages. :param priority: number to limit sending message by this priority. :param ignore_unknown_messengers: to silence UnknownMessengerError :param ignore_unknown_message_types: to silence UnknownMessageTypeError :raises UnknownMessengerError: :raises UnknownMessageTypeError: """ dispatches_by_messengers = Dispatch.group_by_messengers(Dispatch.get_unsent(priority=priority)) for messenger_id, messages in dispatches_by_messengers.items(): try: messenger_obj = get_registered_messenger_object(messenger_id) messenger_obj.process_messages(messages, ignore_unknown_message_types=ignore_unknown_message_types) except UnknownMessengerError: if ignore_unknown_messengers: continue raise def send_test_message(messenger_id: str, to: Optional[str] = None) -> Any: """Sends a test message using the given messenger. :param messenger_id: Messenger alias. :param to: Recipient address (if applicable). """ messenger_obj = get_registered_messenger_object(messenger_id) return messenger_obj.send_test_message(to=to, text='Test message from sitemessages.') def check_undelivered(to: Optional[str] = None) -> int: """Sends a notification email if any undelivered dispatches. Returns undelivered (failed) dispatches count. :param to: Recipient address. If not set Django ADMINS setting is used. """ failed_count = Dispatch.objects.filter(dispatch_status=Dispatch.DISPATCH_STATUS_FAILED).count() if failed_count: from sitemessage.shortcuts import schedule_email from sitemessage.messages.email import EmailTextMessage if to is None: admins = settings.ADMINS if admins: to = list(dict(admins).values()) if to: priority = 999 register_message_types(EmailTextMessage) schedule_email( _('You have %(count)s undelivered dispatch(es) at %(url)s') % { 'count': failed_count, 'url': get_site_url(), }, subject=_('[SITEMESSAGE] Undelivered dispatches'), to=to, priority=priority) send_scheduled_messages(priority=priority) return failed_count def cleanup_sent_messages(ago: Optional[int] = None, dispatches_only: bool = False): """Cleans up DB : removes delivered dispatches (and messages). :param ago: Days. Allows cleanup messages sent X days ago. Defaults to None (cleanup all sent). :param dispatches_only: Remove dispatches only (messages objects will stay intact). """ filter_kwargs = { 'dispatch_status': Dispatch.DISPATCH_STATUS_SENT, } objects = Dispatch.objects if ago: filter_kwargs['time_dispatched__lte'] = timezone.now() - timedelta(days=int(ago)) dispatch_map = dict(objects.filter(**filter_kwargs).values_list('pk', 'message_id')) # Remove dispatches objects.filter(pk__in=list(dispatch_map.keys())).delete() if not dispatches_only: # Remove messages also. messages_ids = set(dispatch_map.values()) if messages_ids: messages_blocked = set(chain.from_iterable( objects.filter(message_id__in=messages_ids).values_list('message_id'))) messages_stale = messages_ids.difference(messages_blocked) if messages_stale: Message.objects.filter(pk__in=messages_stale).delete() def prepare_dispatches() -> List[Dispatch]: """Automatically creates dispatches for messages without them.""" dispatches = [] target_messages = Message.get_without_dispatches() cache = {} for message_model in target_messages: if message_model.cls not in cache: message_cls = get_registered_message_type(message_model.cls) subscribers = message_cls.get_subscribers() cache[message_model.cls] = (message_cls, subscribers) else: message_cls, subscribers = cache[message_model.cls] dispatches.extend(message_cls.prepare_dispatches(message_model)) return dispatches def get_user_preferences_for_ui( user: AbstractBaseUser, message_filter: Optional[Callable] = None, messenger_filter: Optional[Callable] = None, new_messengers_titles: Optional[Dict[str, str]] = None ) -> Tuple[List[str], Mapping]: """Returns a two element tuple with user subscription preferences to render in UI. Message types with the same titles are merged into one row. First element: A list of messengers titles. Second element: User preferences dictionary indexed by message type titles. Preferences (dictionary values) are lists of tuples: (preference_alias, is_supported_by_messenger_flag, user_subscribed_flag) Example: {'My message type': [('test_message|smtp', True, False), ...]} :param user: :param message_filter: A callable accepting a message object to filter out message types :param messenger_filter: A callable accepting a messenger object to filter out messengers :param new_messengers_titles: Mapping of messenger aliases to a new titles. """ if new_messengers_titles is None: new_messengers_titles = {} msgr_to_msg = defaultdict(set) msg_titles = {} msgr_titles = {} for msgr in get_registered_messenger_objects().values(): if not (messenger_filter is None or messenger_filter(msgr)) or not msgr.allow_user_subscription: continue msgr_alias = msgr.alias msgr_title = new_messengers_titles.get(msgr.alias) or msgr.title for msg in get_registered_message_types().values(): if not (message_filter is None or message_filter(msg)) or not msg.allow_user_subscription: continue msgr_supported = msg.supported_messengers is_supported = (not msgr_supported or msgr.alias in msgr_supported) if not is_supported: continue msg_alias = msg.alias msg_titles.setdefault(f'{msg.title}', []).append(msg_alias) msgr_to_msg[msgr_alias].update((msg_alias,)) msgr_titles[msgr_title] = msgr_alias def sort_titles(titles): return dict(sorted([(k, v) for k, v in titles.items()], key=itemgetter(0))) msgr_titles = sort_titles(msgr_titles) user_prefs = {} user_subscriptions = [ f'{pref.message_cls}{_ALIAS_SEP}{pref.messenger_cls}' for pref in Subscription.get_for_user(user)] for msg_title, msg_aliases in sort_titles(msg_titles).items(): for __, msgr_alias in msgr_titles.items(): msg_candidates = msgr_to_msg[msgr_alias].intersection(msg_aliases) alias = '' msg_supported = False subscribed = False if msg_candidates: alias = f'{msg_candidates.pop()}{_ALIAS_SEP}{msgr_alias}' msg_supported = True subscribed = alias in user_subscriptions user_prefs.setdefault(msg_title, []).append((alias, msg_supported, subscribed)) return list(msgr_titles.keys()), user_prefs def set_user_preferences_from_request(request: HttpRequest) -> bool: """Sets user subscription preferences using data from a request. Expects data sent by form built with `sitemessage_prefs_table` template tag. Returns a flag, whether prefs were found in the request. :param request: """ prefs = [] for pref in request.POST.getlist(_PREF_POST_KEY): message_alias, messenger_alias = pref.split(_ALIAS_SEP) try: get_registered_message_type(message_alias) get_registered_messenger_object(messenger_alias) except (UnknownMessengerError, UnknownMessageTypeError): pass else: prefs.append((message_alias, messenger_alias)) Subscription.replace_for_user(request.user, prefs) return bool(prefs) def get_sitemessage_urls() -> List: """Returns sitemessage urlpatterns, that can be attached to urlpatterns of a project: # Example from urls.py. from sitemessage.toolbox import get_sitemessage_urls urlpatterns = patterns('', # Your URL Patterns belongs here. ) + get_sitemessage_urls() # Now attaching additional URLs. """ url_unsubscribe = re_path( r'^messages/unsubscribe/(?P<message_id>\d+)/(?P<dispatch_id>\d+)/(?P<hashed>[^/]+)/$', unsubscribe, name='sitemessage_unsubscribe' ) url_mark_read = re_path( r'^messages/ping/(?P<message_id>\d+)/(?P<dispatch_id>\d+)/(?P<hashed>[^/]+)/$', mark_read, name='sitemessage_mark_read' ) return [url_unsubscribe, url_mark_read]
Based on decades of entrepreneurial and corporate training, scientific research and directing, producing and performing with many of the music legends of our time, conference planners often engage Freddie Ravel in the program planning process to help design and incorporate their meeting objectives into a rich, harmonious and rewarding experience. Drawing on his unique experiences and background, Freddie’s programs can be delivered in English or Spanish to bridge corporate cultures together and uplift your audience to success. Freddie is the internationally acclaimed “Keynote Maestro” who blends his infectious passions for business breakthroughs and the power of music to unlock the minds, hearts and potential of audiences around the world. Backed by #1 hits and collaborative successes with Earth, Wind & Fire, Madonna, Prince, Sergio Mendes, Quincy Jones, the Boston Pops and rock legend Carlos Santana, the #1 chart-topping pianist is the founder of The Rhythm of Success. This patented peak performance program enhances leadership, innovation and collaboration for small, midsized and large corporations alike. Recent accolades hail from the mayor of Los Angeles who awarded Freddie the city’s Certificate of Recognition for creating a music-based system to “renew the national and international economy,” while his media appearances include FOX, ABC, CBS, Universal, Business Rockstars, Clear Channel and SiriusXM. His delighted clients refer to him as “the ideal balance of entertainment and content.” With rave reviews from IBM, Toyota, Red Bull, NASA, Apple, Morgan Stanley, Google and Citi, Freddie serves as a captivating access point through which to transform organizations, education and the human condition. Increase business through the power of music! Keynote Speaker Freddie Ravel helps your audience unravel obstacles, enhance listening skills and increase productivity to achieve breakthrough success. Freddie Ravel engages and reveals the Four Foundations of music—Melody, Harmony, Rhythm and SCORE—to inspire and transform audiences around the world. In this dynamic multimedia “keynote concert” where speaking meets live masterful piano performances backed by a full orchestra and legendary music icons (he brings to your event on a flash drive!), Freddie helps your audience unravel obstacles, enhance listening skills and increase productivity to SCORE breakthrough success. No matter what kind of organization—from small companies to large corporations—employees at all levels will tune up to a better future by becoming more collaborative, productive and ultimately more profitable. This presentation is sure to open or close your meeting on a dynamic high note. Have you ever heard a great-sounding band or orchestra where everyone is a soloist? It simply doesn’t exist. For over a decade, the Gallup polls reveal that four out of five employees are disengaged in their work, costing the U.S. economy over $500 billion annually in lost revenue. Freddie shares his personal stories of how minorities and women have opened doors to his ongoing success; from being hired by Brazilian icon Sergio Mendes at age 23 to being the only white guy in the all black band named “Earth, Wind and Fire.” In addition to his collaborations with Dr. Martin Luther King’s daughter, Yolanda King, as guest author alongside Stevie Wonder, Muhammad Ali, Robert Kennedy Jr. and Maya Angelou, he’s played the GRAMMY’s with Prince, recorded and appeared with Madonna and toured the world to packed stadiums with Latin rock legend Carlos Santana. Driving home the message that when we LISTEN UP and embrace diversity and inclusion, our team of collective Melodies, Harmonies and Rhythms achieve the ultimate SCORE—a state where organizations gain the distinct advantage needed in today’s hyper-competitive marketplace. Want to keep your meeting moving or add an extra spark to make your event unforgettable? Freddie Ravel as your emcee is the answer. He combines your business themes and the Four Foundations of Music to rev up the energy of general sessions throughout the day, your multi-day meeting or your awards ceremony. Transforming the mundane to magnificent, clients invite Freddie back year after year! "The attendee reaction we received to Freddie Ravel’s presentation at the cVent Elite Meetings Alliance was off the charts! His message was smart, clear, humorous, entertaining, energetic and provided real impactful lessons for our attendees to take back to their workplace and make an immediate difference in their creativity, collaboration and productivity." "His message that we have music within coupled with his fresh approach left us with timely and powerful business tools to apply to our work." "Freddie Ravel was fabulous! His musical talents are truly amazing, charisma oozes from him, and his kindness comes through genuinely in his presentation. As our keynote speaker, he set the perfect tone/mood for our conference. Everyone attending were very appreciative and pleased." "His powerful presentation was pure joy and full of valuable lessons that we were able to implement immediately!" "Mr. Ravel’s gift in bringing words and music to give definition to the interplay between teams, purpose and people was fantastic and profound." "Freddie uses the language of music to show how listening skills are critical to our business. Whether it’s getting “in tune” with our customers, associates, families or ourselves, his incredible talent delivers this message through his truly innovative and entertaining program." Copyright © 2018 Embrace Speaker Solutions, LLC - All Rights Reserved.
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Jisho.org semi-api """ import re from collections import OrderedDict from itertools import islice import requests from requests import RequestException from bs4 import BeautifulSoup class Jisho: def __init__(self): # Starting with self.url = u'http://jisho.org/words?jap=%s&eng=&dict=edict' # Any position self.fuzzy_url = u'http://jisho.org/words?jap=*%s*&eng=&dict=edict' # Details self.details_url = u'http://jisho.org/kanji/details/%s' def lookup(self, term, fuzzy=True): """Lookup term on Jisho""" url = self.fuzzy_url if fuzzy else self.url try: return requests.get(url % term).content except RequestException: return '' def complete(self, kanji): """Get words which include specified kanji""" results = [] soup = BeautifulSoup(self.lookup(kanji), 'lxml') for word in soup.find_all('span', {'class': 'kanji'}): # get text from html element, strip spaces and tabs word = word.get_text().strip() # skip kanji itself if word != kanji: results.append(word) return results def define(self, kanji, limit=20, skip_same_reading=False, skip_same_meaning=False): """Get words with specified kanji + meaning + kana Returns iterator. """ results = OrderedDict({}) soup = BeautifulSoup(self.lookup(kanji), 'lxml') # Utility function to get specific row|column text get_row = lambda row, column: row.find('td', column).get_text().strip() columns = ['kanji_column', 'kana_column', 'meanings_column'] # Find rows with classes 'odd' and 'even' for row in soup.find_all("tr", {"class": re.compile(r"^(odd|even)$")}): # Skip 'lower' classes if 'lower' in row['class']: continue # Get columns by names word, kana, meaning = [get_row(row, column) for column in columns] # Append to results if not the same kanji if word != kanji: results[word] = {'kana': kana, 'meaning': meaning} # todo: filter results based on flags # todo: may filter by the same meaning and kana return islice(results.iteritems(), limit) def details(self, word): """Get info for each kanji in word""" details = {} try: data = BeautifulSoup( requests.get(self.details_url % word).content, 'lxml' ) for div in data.find_all('div', 'kanji_result'): # Get kanji, its meanings and readings kanji = div.find('h1', 'literal').get_text().strip() meanings = div.find('div', 'english_meanings') \ .get_text(strip=True).replace('English meanings', '') try: kun, on = div.find('div', 'japanese_readings') \ .get_text().strip().split('\n') names = u'' except ValueError: kun, on, names = div.find('div', 'japanese_readings') \ .get_text().strip().split('\n') details[kanji] = { 'meanings': meanings.replace(';', ', '), 'on': on.replace('Japanese on:', '').strip(), 'kun': kun.replace('Japanese kun:', '').strip(), 'names': names.replace('Japanese names:', '').strip() } except RequestException: pass return details if __name__ == '__main__': for item, value in Jisho().details(u'才凱旋').iteritems(): print item for key, data in value.iteritems(): print key, data
Heart surgeries are performed on a daily basis in the United States—in fact, more than 500,000 coronary bypass procedures alone are performed each year, on average, in the United States. Heart disease is a leading health issue in the U.S., and cardiothoracic surgeons receive extensive training in how to perform them. Salinas Valley Medical Clinic (SVMC) calls on medical experts of the highest caliber to perform these operations. Catering to the safety and comfort of our patients is a major priority. First, you are administered anesthesia, meaning you will not be conscious during the operation and should not feel any pain. Once this is done, you are hooked up to a breathing machine through a tube placed in your airway. The doctor will then cut into the chest to get to the heart. What happens next depends on the type of operation you are receiving. Typically, heart surgery lasts between 3 to 6 hours. Once the procedure is finished, your chest is sewn back up. Tubes and catheters may remain in your body, do not panic if you wake up with them still inserted as this is normal. You may be taken to the intensive care unit (ICU) after the surgery for monitoring. You will still be asleep during transport, and likely will not wake up for a few hours. If you have friends or family waiting on you during the operation, the surgeon will meet with them afterwards and update them on your current status. After you wake up and the monitoring period has ended, the doctor will inform you of your progress and may give you the clear to go home. You will also receive thorough instructions for post-op care.
import json import os from flask import redirect, request, session from flask_restplus import Namespace, Resource from requests_oauthlib import OAuth2Session from security.token import get_jwt_token, TokenType, get_token_redirect_response # pylint: disable=unused-variable # OAuth endpoints given in the Google API documentation AUTHORIZATION_URI = 'https://accounts.google.com/o/oauth2/v2/auth' TOKEN_URI = 'https://www.googleapis.com/oauth2/v4/token' USER_PROFILE_URI = 'https://www.googleapis.com/oauth2/v1/userinfo' SCOPE = ['https://www.googleapis.com/auth/userinfo.profile', 'https://www.googleapis.com/auth/userinfo.email'] # OAuth application configuration created on Google client_id = os.environ['GOOGLE_CLIENT_ID'] client_secret = os.environ['GOOGLE_CLIENT_SECRET'] redirect_uri = os.environ['HOST_NAME'] + '/mobydq/api/v1/security/oauth/google/callback' def get_user_info(google_session: object): """Gets user profile using OAuth session.""" user_profile = google_session.get(USER_PROFILE_URI).content.decode('utf-8') user_profile = json.loads(user_profile) return user_profile def register_google_oauth(namespace: Namespace): """Registers all endpoints used for Google OAuth authentication.""" @namespace.route('/security/oauth/google') @namespace.doc() class GoogleOAuth(Resource): """Defines resource to redirect user to Google OAuth page.""" def get(self): """Redirects user to Google OAuth page.""" google_session = OAuth2Session(client_id, redirect_uri=redirect_uri, scope=SCOPE) url, state = google_session.authorization_url(AUTHORIZATION_URI, access_type='offline', prompt='select_account') # State is used to prevent CSRF, keep this for later. session['oauth_state'] = state return redirect(url) @namespace.route('/security/oauth/google/callback') @namespace.doc() class GoogleOAuthCallback(Resource): """Defines resource to handle callback from Google OAuth.""" def get(self): """Handles Google OAuth callback and fetch user access token.""" google_session = OAuth2Session(client_id, redirect_uri=redirect_uri, scope=SCOPE) token = google_session.fetch_token(TOKEN_URI, client_secret=client_secret, authorization_response=request.url) user_info = get_user_info(google_session) jwt = get_jwt_token(TokenType.GOOGLE, user_info['email'], user_info, token) return get_token_redirect_response(jwt)
The Digital Transformation Unit is leading digital transformation at the Foreign & Commonwealth Office, making our online public services easy to use. As Senior Product Manager you will devise and iterate our consular services product backlogs and roadmap, work with external technical suppliers, and champion for user needs as well work closely with operational and policy teams. This is a fantastic opportunity to own the end-to-end product development process. You will have the opportunity to develop a career in government digital as part of the wider Digital, Data and Technology community. Flexible, adaptable and resilient, you are an experienced web professional with significant experience of delivering high-quality public-facing websites within a context of business/IT/digital transformation. You will have strong product and project management skills. You will be confident and experienced in data driven design and in managing a project end-to-end from gathering user needs to delivery and iteration.
from yapytex.dictutils import DictWrapper from yapytex import latex_directives as xdir from yapytex import styles from yapytex import miscelanea as misc from yapytex.abstract import YaPyTexBase from yapytex.pieces import YaPyTexPiece, YaPyTexAppendix #book layout #https://en.wikipedia.org/wiki/Book_design _d_misc_options = dict( numbered = r'numbered', pprint = r'print', index = r'index' ) misc_options = DictWrapper(_d_misc_options) _default_doc_options = [ misc_options.numbered, styles.font_sizes.sz12pt, styles.font_families.times, misc_options.pprint, misc_options.index, ] class Document(YaPyTexBase): _appendices = [] _pre = [] _glossary = [] _acronym = [] _pieces = [] _title = None _author = None _language = xdir.default_language _hook_load_packages = None _type = 'article' @property def language(self): return self._language @language.setter def language(self): return self._language @property def title(self): return self._title @title.setter def title(self,title): self._title = title @property def author(self): return self._author @property def hook_load_packages(self): return self._hook_load_packages @hook_load_packages.setter def hook_load_packages(self,hook): self._hook_load_packages = hook @author.setter def author(self,author): self._author = author def add(self, piece): if not isinstance(piece,YaPyTexPiece): raise Exception('Piece argument must be YaPyTexPiece instance.') self._pieces.append(piece) def add_appendix(self, appendix): if not isinstance(appendix,YaPyTexAppendix): raise Exception('Appendix argument must be YaPyTexAppendix instance.') self._appendices.append(appendix) def build(self,ttype): pre_header = [ xdir.doc_class.format(','.join(_default_doc_options),ttype), xdir.useinputenc, xdir.usenumerate, xdir.usehyperref, ] + self._pre if self._hook_load_packages: self._hook_load_packages(pre_header) if self._language is 'es_ES': pre_header.append(xdir.es_ES) if self._title: pre_header.append(xdir.doc_title.format(self._title)) if self._author: pre_header.append(xdir.doc_author.format(self._author)) if xdir.useglossaries in pre_header and len(self._glossary) > 0: pre_header.append(xdir.make_glossaries) pre_header.append(xdir.gls_entry_italic) header = [] #document's begin header.append(xdir.doc_begin) post_header = [] if self._title: post_header.append(xdir.maketitle) post_header.append(xdir.cleardoublepage) post_header.append(xdir.tableofcontents) pieces = map(misc.format,self._pieces) backmatter = [xdir.backmatter] backmatter.append('\n'.join(map(misc.format,self._appendices))) if xdir.useglossaries in pre_header and len(self._glossary) > 0: backmatter.append(xdir.print_glossaries) if xdir.useglossaries in pre_header and len(self._acronym) > 0: backmatter.append(xdir.print_acronyms) pre_header.extend(self._glossary) pre_header.extend(self._acronym) #this line may be the last of directives backmatter.append(xdir.doc_end) return \ '\n'.join(pre_header)+\ '\n'.join(header)+\ '\n'.join(post_header)+\ '\n'.join(pieces)+\ '\n'.join(backmatter)
The distance between Budapest, Hungary and Jenisejsk, Russia is approximately 4,790 km or 2,976 mi. To reach Budapest in this distance one would set out from Jenisejsk bearing 288.7° or WNW and follow the great circles arc to approach Budapest bearing 227.2° or SW . Budapest has a marine west coast climate (Cfb) whereas Jenisejsk has a boreal subarctic climate with no dry season (Dfc). Budapest is in or near the cool temperate moist forest biome whereas Jenisejsk is in or near the boreal moist forest biome. The mean temperature is 12.8 °C (23.1°F) warmer. Average monthly temperatures vary by 18.1 °C (32.6°F) less in Budapest. The continentality subtype is subcontinental as opposed to truly continental. Total annual precipitation averages 77.1 mm (3 in) more which is equivalent to 77.1 l/m² (1.89 US gal/ft²) or 771,000 l/ha (82,425 US gal/ac) more. About 1 1/6 as much. The altitude of the sun at midday is overall 10.9° higher in Budapest than in Jenisejsk. The table shows values for Budapest relative to Jenisejsk. You can also view this comparison the other way around from the perspective of Jenisejsk vs Budapest.