text
stringlengths
29
850k
import unittest, time, sys sys.path.extend(['.','..','../..','py']) import h2o, h2o_cmd, h2o_import as h2i, h2o_rf class Basic(unittest.TestCase): def tearDown(self): h2o.check_sandbox_for_errors() @classmethod def setUpClass(cls): h2o.init(3, java_heap_GB=4) @classmethod def tearDownClass(cls): h2o.tear_down_cloud() def test_rf_parity_cmp(self): SYNDATASETS_DIR = h2o.make_syn_dir() # always match the run below! # just using one file for now for x in [50000]: shCmdString = "perl " + h2o.find_file("syn_scripts/parity.pl") + " 128 4 "+ str(x) + " quad " + SYNDATASETS_DIR h2o.spawn_cmd_and_wait('parity.pl', shCmdString.split(),4) csvFilename = "parity_128_4_" + str(x) + "_quad.data" def doBoth(): h2o.verboseprint("Trial", trial) start = time.time() # make sure ntrees and max_depth are the same for both rfView = h2o_cmd.runRF(parseResult=parseResult, ntrees=ntrees, max_depth=40, response=response, timeoutSecs=600, retryDelaySecs=3) elapsed1 = time.time() - start (totalError1, classErrorPctList1, totalScores2) = h2o_rf.simpleCheckRFView(rfv=rfView) rfView = h2o_cmd.runSpeeDRF(parseResult=parseResult, ntrees=ntrees, max_depth=40, response=response, timeoutSecs=600, retryDelaySecs=3) elapsed2 = time.time() - start (totalError2, classErrorPctList2, totalScores2) = h2o_rf.simpleCheckRFView(rfv=rfView) print "Checking that results are similar (within 20%)" print "DRF2 then SpeeDRF" print "per-class variance is large..basically we can't check very well for this dataset" for i, (j,k) in enumerate(zip(classErrorPctList1, classErrorPctList2)): print "classErrorPctList[%s]:i %s %s" % (i, j, k) # self.assertAlmostEqual(classErrorPctList1[i], classErrorPctList2[i], # delta=1 * classErrorPctList2[i], msg="Comparing RF class %s errors for DRF2 and SpeeDRF" % i) print "totalError: %s %s" % (totalError1, totalError2) self.assertAlmostEqual(totalError1, totalError2, delta=.2 * totalError2, msg="Comparing RF total error for DRF2 and SpeeDRF") print "elapsed: %s %s" % (elapsed1, elapsed2) self.assertAlmostEqual(elapsed1, elapsed2, delta=.5 * elapsed2, msg="Comparing RF times for DRF2 and SpeeDRF") # always match the gen above! for trial in range (1): csvPathname = SYNDATASETS_DIR + '/' + csvFilename hex_key = csvFilename + "_" + str(trial) + ".hex" parseResult = h2o_cmd.parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key, timeoutSecs=30, doSummary=False) inspect = h2o_cmd.runInspect(key=hex_key) numCols = inspect['numCols'] numRows = inspect['numRows'] response = "C" + str(numCols) ntrees = 30 doBoth() print "*****************************" print "end # %s RF compare" % trial, print "*****************************" print "Now change all cols to enums" for e in range(numCols): enumResult = h2o.nodes[0].to_enum(src_key=hex_key, column_index=(e+1)) doBoth() print "*********************************" print "end # %s RF compare, with enums #" % trial, print "*********************************" if __name__ == '__main__': h2o.unit_main()
Used for the determination of material emissions: The GERSTEL TDS/TDS A2 mounted on a GC/MS System. Chemical contaminants from indoor sources: For example: adhesives, carpeting, upholstery, manufactured wood products, copy machines, pesticides, and cleaning agents may emit volatile organic compounds (VOCs), including formaldehyde. Bacteria, molds, pollen, and viruses are types of biological contaminants. These contaminants may breed in stagnant water that has accumulated in ducts, humidifiers and drain pans, or where water has collected on ceiling tiles, carpeting, or insulation. ** LCI: Lowest Concentration of Interest. of Interest”, i.e. the lowest concentration of toxicological relevance for a particular compound in indoor air in residential and office buildings. LCI values are not equivalent to or related to Occupational Exposure Limits (OELs) or Recommended Exposure Levels (RELs) as specified for occupational safety. OEL is an acronym for “Occupational Exposure Limits”, By definition, the occupational exposure limit is the contaminant level to which you can be exposed continually, day after day during your whole working life without experiencing any negative health effects as a result. Committee for health related assessment of construction products (German: AgBB) was formed in 1997 by a Working Group for “Environmental Health Protection” brought together under a cooperation between the Health Authorities of the German States. Among the members of the AgBB are State Health Authorities, the German Federal Environmental Protection Agency (UBA), the German Institute for Construction Technology (DIBt), The federal Institute for Material Research (BAM), and various other regional and federal State Agencies. Sampling, storage, and preparation of samples. Determination of VOCs in indoor air and in Environmental test chambers. Sampling on TenaxTA followed by Thermal Desorption – GC/MS. Building products used indoors in homes and offices can have significant impact on indoor air quality (IAQ) through emission of volatile- or semi-volatile organic compounds (VOCs/SVOCs). In order to protect the health and well-being of occupants in homes and company buildings from potentially toxic emissions, EU and national regulations require that products used indoors be tested following clearly defined methods. In Germany, and increasingly throughout Europe, the AgBB evaluation scheme is used. Material emission testing relies mainly on environmental test chambers combined with sampling of chamber air onto adsorbent tubes and Thermal Desorption GC/MS analysis. Tests generally take 28 days, but thermal extraction offers an easier and less expensive way of getting reliable information about product emissions, for R&D purposes or for quality control of existing approved products. PVC, linoleum, carpeting, laminate, parquet, and cork – the choice of floor covering for homes and offices seems almost endless - and once you have chosen the type, a similarly endless choice of producers and quality levels can cause headaches even before the flooring has been installed. Unfortunately, a successful installation may not quite signal the end of your headaches. If the carefully chosen flooring – or the glue used to install it - emits VOCs/SVOCs, these could contaminate the indoor air and even cause irritation and negative health effects. And to top it off, reactions to contaminants in air are highly individual, varying significantly from person to person. Hardened breathers of inner city air and perpetually recycled indoor atmospheres in modern energy-efficient buildings may feel nothing. Others may be in for constant suffering while in the building. And the list of real or perceived symptoms is endless. If headaches, mucus membrane irritation, fatigue, allergic reactions, immune system deficiency, frequent infections, deterioration of pre-existing asthmatic conditions, depressions, or simply a sudden general lack of well-being occurs after moving into a new building – or after a building has been renovated or redecorated, the informed physician should not exclude a case of sick building syndrome (SBS). According to the US EPA, indicators of SBS include: Building occupants complain of symptoms associated with acute discomfort, e.g., headache; eye, nose, or throat irritation; dry cough; dry or itchy skin; dizziness and nausea; difficulty in concentrating; fatigue; and sensitivity to odors. The cause of the symptoms is not known. Most of the complainants report relief soon after leaving the building. We humans in modern society spend most of our lives indoors, depending on the season up to 80 – 90 % of every day. This means that IAQ in homes and offices has significant and decisive influence on our health and well-being. Temperature and relative humidity (RH) are also critical factors. In addition, VOC- (C6-C16) and SVOC (>C16-C22) contamination plays a role that is increasingly in focus of regulating government agencies. Many construction products used in buildings are potential sources of VOC- or SVOC emissions. Apart from the flooring materials and the glues used to install them, some of the culprits may be paints, lacquers, varnishes, coatings, wood preservation products, wall paper, caulks and sealants, cement, prefabricated bricks, and concrete. We are surrounded by a huge range of industrially produced materials that contain a long list of ingredients and additives to make them easy to use, low cost and durable. The European Union is recognizing the importance of this area and is moving towards regulation of emission of chemicals into indoor air. So far, so good. But, to paraphrase a popular saying, good intentions don’t always pave the way to paradise. True to the old credo: Trust, but verify. Construction products should be checked in a standardized way in order to even the playing field for producers by applying the same rules to everyone while allowing the consumer to win by being allowed to live, work, and play in a healthy indoor environment. The Committee for Healthrelated Evaluation of Building Products produced the AgBB evaluation scheme, which is used in Germany and increasingly throughout Europe. The process enables a clear and uniform assessment of emissions of VOCs and SVOCs under standard conditions. “These test conditions for flooring have given us, for the first time, a set of standardized test conditions for approval of flooring materials that are used for an annual verification check of the emission properties of approved products”, states Gerd Bittner of the Textiles & Flooring Institute (TFI) in Aachen, Germany. Testing of flooring materials and flooring systems (i.e. including the glue used to install the flooring) is performed at the TFI using environmental test chambers based on the DIN EN ISO 16000-11, DIN EN ISO 16000-9, and DIN ISO 16000-6 standards for indoor air. These standards specify conditions for all aspects of testing various flooring materials in environmental test chambers as well as the analytical determination of identity and concentration of emitted organic compounds (VOCs/SVOCs). Chamber air is collected using active pumped sampling onto a suitable adsorbent tube after three and 28 days. The tubes are typically filled with Tenax TA® and the analysis, as specified in the AgBB scheme is performed by Thermal Desorption - Gas Chromatography combined with Mass Spectrometry Detection (GC/MS) of the analytes. A non-polar separation column is used, which means that individual analytes can be assigned to a boiling point range or retention time range C6-C16 (VOC) or >C16- C22 (SVOC) as specified in the AgBB scheme for health-related evaluation of construction products rev. 2010. The term “individual analytes” refers to both identified and non-identified compounds. The AgBB scheme requires a limit of detection of 1 μg/m3 for each compound in order to comprehensively cover and describe the material emissions. Depending on the specific requirements, quantitative information on individual compounds must be obtained. Whenever individual compound concentrations exceed 5 μg/m3, they must be quantified both individually and in summation as part of the relevant group. Exceptions are EU category 1 and 2 carcinogens. For identified carcinogens and compounds that have an LCI value, compound specific quantification must be performed. Unidentified compounds as well as compounds to which no LCI value is assigned are quantified as toluene equivalents. The test over 28 days, as required in the AgBB scheme, results in a comprehensive and standardized emission profile, according to Gerd Bittner. Typical peak patterns can be observed and compared during data analysis and key analytes are therefore easily found. Emissions from different materials are easily compared both quantitatively and qualitatively, and quantitation using internal standard, typically expressed as an equivalent toluene concentration, is easily performed as specified for the unknown minor compounds and those without LCI values. However, emission chamber test results take almost a month to produce and they are highly labor- and cost intensive. This poses a serious problem for the industry, especially during product development: Test cycles of a month can cause significant project delays with serious consequences, for example, in terms of development cost and loss of competitiveness. A clear indication of the emission profile of a product in every development stage, or during trouble shooting following customer complaints, can save companies both lots of time and pots of money. For these reasons, the TFI has for many years offered their customers accelerated emission tests based on thermal extraction, a dynamic headspace technique based on trapping on a standard adsorbent tube. GERSTEL TE: Thanks to the large thermal extraction tube, a range of different sample types and amounts can be analyzed based on thermal extraction in the TE. Industry clients come to the TFI for emission tests during product development; for regular Quality Control of product batches; for trouble shooting following customer complaints; as well as for sample identity verification. Testing is often performed using the GERSTEL Thermal Extractor (TE). The large extraction tube of the TE (ID 14 mm, length of heated zone 75 mm) can be loaded with much larger and more representative samples than regular thermal desorption tubes. “We use the Thermal Extractor to test textiles, elastic flooring material, multi-layer systems, as well as glues used to install flooring material”, reports Mr. Bittner. The samples are heated in a flow of inert gas and the extracted analytes are purged onto the adsorbent tube and concentrated on TenaxTA. Thermal Desorption (TD)-GC/MS analysis is subsequently performed following the AgBB guidelines.
import html import inline import re def forge_line(modifiers, line): for modifier in modifiers: line = modifier(line) return line class Paragraph: lines = [] def __init__(self, lines): self.lines = lines def build(self): return self.tail( reduce(lambda r, line: r + [forge_line(self.modifiers(), line)], self.lines, self.head())) def head(self): return [] def tail(self, result): return result def modifiers(self): from nijiconf import BR return [html.forge, inline.forge, lambda x: x + BR] def length(self): return reduce(lambda length, line: length + len(line), self.lines, 0) class Table(Paragraph): def __init__(self, lines): Paragraph.__init__(self, lines) def head(self): from nijiconf import TABLE_BEGIN return [TABLE_BEGIN] def tail(self, result): from nijiconf import TABLE_END result.append(TABLE_END) return result def modifiers(self): from table import row_extract from nijiconf import ROW_BEGIN, ROW_END return [html.forge, inline.forge, lambda text: ROW_BEGIN + row_extract(text) + ROW_END] class CodeBlock(Paragraph): def __init__(self, lines): Paragraph.__init__(self, lines) def head(self): from nijiconf import MONO_BLOCK_BEGIN return [MONO_BLOCK_BEGIN] def tail(self, result): from nijiconf import MONO_BLOCK_END result.append(MONO_BLOCK_END) return result def modifiers(self): from nijiconf import BR return [html.forge, inline.forge, lambda x: x + BR] class AsciiArt(Paragraph): def __init__(self, lines): Paragraph.__init__(self, lines) def head(self): from nijiconf import AA_BEGIN return [AA_BEGIN] def tail(self, result): from nijiconf import AA_END result.append(AA_END) return result def modifiers(self): from nijiconf import BR return [html.forge, lambda x: x[2: ] + BR] class Bullets(Paragraph): def __init__(self, lines): Paragraph.__init__(self, lines) def head(self): from nijiconf import UL_BEGIN return [UL_BEGIN] def tail(self, result): from nijiconf import UL_END result.append(UL_END) return result def modifiers(self): from nijiconf import LI_BEGIN, LI_END return [html.forge, inline.forge, lambda text: LI_BEGIN + text[2: len(text)] + LI_END] import nijiconf LEVEL_2_STR = ( (nijiconf.H1_BEGIN, nijiconf.H1_END), (nijiconf.H2_BEGIN, nijiconf.H2_END), (nijiconf.H3_BEGIN, nijiconf.H3_END), ) class Head(Paragraph): level = 0 def __init__(self, lines, level): Paragraph.__init__(self, lines) self.level = level def modifiers(self): from nijiconf import LI_BEGIN, LI_END return [html.forge, inline.forge, lambda text: LEVEL_2_STR[self.level][0] + text[self.level + 2: len(text)] + LEVEL_2_STR[self.level][1]] class Head1(Head): def __init__(self, lines): Head.__init__(self, lines, 0) class Head2(Head): def __init__(self, lines): Head.__init__(self, lines, 1) class Head3(Head): def __init__(self, lines): Head.__init__(self, lines, 2) LINE_PATTERNS = ( ('{{{', '}}}', CodeBlock, True), ('\[\[\[', ']]]', Table, True), ('[*][ ]', '(?![*][ ])', Bullets, False), ('(: |:$)', '(?!(: |:$))', AsciiArt, False), ('=[ ]', '', Head1, False), ('==[ ]', '', Head2, False), ('===[ ]', '', Head3, False), ) def pattern_begin(pattern): return pattern[0] def pattern_end(pattern): return pattern[1] def pattern_ctor(pattern): return pattern[2] def pattern_excluded(pattern): return pattern[3] def search_for_para(document, begin, paragraphs): pattern = match_pattern_begin(document[begin]) begin += 1 if pattern_excluded(pattern) else 0 end = begin + 1 while end < len(document) and not re.match(pattern_end(pattern), document[end]): end += 1 paragraphs.append(pattern_ctor(pattern)(document[begin: end])) return end + (1 if pattern_excluded(pattern) else 0) def normal_text_from(document, begin, paragraphs): if match_pattern_begin(document[begin]): return begin end = begin while end < len(document) and not match_pattern_begin(document[end]): end += 1 paragraphs.append(Paragraph(document[begin: end])) return end def match_pattern_begin(line): for pattern in LINE_PATTERNS: if re.match(pattern_begin(pattern), line): return pattern return None def split_document(document): paragraphs = [] cursor = 0 while cursor < len(document): cursor = normal_text_from(document, cursor, paragraphs) if cursor < len(document): cursor = search_for_para(document, cursor, paragraphs) return paragraphs
DESCRIPTION amcrypt-ossl uses OpenSSL to encrypt and decrypt data. OpenSSL is available from www.openssl.org. OpenSSL offers a wide variety of cipher choices ( amcrypt-ossl defaults to 256-bit AES) and can use hardware cryptographic accelerators on several platforms. PASSPHRASE MANAGEMENT amcrypt-ossl uses the same pass phrase to encrypt and decrypt data. It is very important to store and protect the pass phrase properly. Encrypted backup data can only be recovered with the correct passphrase. FILES /var/lib/amanda/.am_passphrase File containing the pass phrase. It should not be readable by any user other than the Amanda user.
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue Jul 18 16:55:14 2017 @author: ajaver """ import os import tables import numpy as np import warnings from .getFoodContourNN import get_food_contour_nn from .getFoodContourMorph import get_food_contour_morph from tierpsy.helper.misc import TimeCounter, print_flush, get_base_name def calculate_food_cnt(mask_file, use_nn_food_cnt, model_path, _is_debug=False, solidity_th=0.98): if use_nn_food_cnt: if not os.path.exists(model_path): warnings.warn('The model to obtain the food contour was not found. Nothing to do here...\n If you dont have a valid model. You could try to set `food_method=MORPH` to use a different algorithm.') return food_cnt, food_prob,cnt_solidity = get_food_contour_nn(mask_file, model_path, _is_debug=_is_debug) if cnt_solidity < solidity_th: food_cnt = np.zeros(0) else: food_cnt = get_food_contour_morph(mask_file, _is_debug=_is_debug) return food_cnt def getFoodContour(mask_file, skeletons_file, use_nn_food_cnt, model_path, solidity_th=0.98, _is_debug = False ): base_name = get_base_name(mask_file) progress_timer = TimeCounter('') print_flush("{} Calculating food contour {}".format(base_name, progress_timer.get_time_str())) food_cnt = calculate_food_cnt(mask_file, use_nn_food_cnt = use_nn_food_cnt, model_path = model_path, solidity_th= solidity_th, _is_debug = _is_debug) #store contour coordinates into the skeletons file and mask_file the contour file for fname in [skeletons_file, mask_file]: with tables.File(fname, 'r+') as fid: if '/food_cnt_coord' in fid: fid.remove_node('/food_cnt_coord') #if it is a valid contour save it if food_cnt is not None and \ food_cnt.size >= 2 and \ food_cnt.ndim == 2 and \ food_cnt.shape[1] == 2: tab = fid.create_array('/', 'food_cnt_coord', obj=food_cnt) tab._v_attrs['use_nn_food_cnt'] = int(use_nn_food_cnt)
Are you contemplating of getting a pet and you do not know how to begin on how to train it? Do you already have a dog you need to change the habits of? This article will be perfect for your needs! Keep reading to learn easy, simple tips on how to train your dog to be your best buddy. Make sure to let your pet know a crate is their home initially. You should put their food in the crate when they eat, but leave the door open. This will create a good association between the crate and their food. TIP! Train your dog on a regular basis to make sure he understands the rules of the house. Dog training must be practiced to maintain obedience behavior, and that is one thing that many owners neglect to remember. Crate training your new puppy is most effective when you take small steps. If they are uncomfortable with the door closed then try to feed them snacks to reassure them that they are okay. Leave them in for short periods of time, as little as 10 seconds. Work up from there. If they become upset, you are moving too fast. Only reward your dog if they are calm. When your dog responds to a command in the right way, you should reward this behavior when he is calm. While you might be pleased, acting too excited may cause the dog to get agitated, lessening your control. Maintain a calm atmosphere and provide an appropriate reward. Do not tie up several dogs in close proximity to each other. The dogs could get wrapped up and they may be injured. In the case that a large dog and small dog become entangled, the large dog’s rope or chain could cut off air circulation to the small dog, and it could be severely injured or die. TIP! Train your dog in a positive manner. Use a happy tone when you are rewarding your dog; pet him a lot, and give him a little treat once in a while. The first rule to remember when you begin to house train your puppy is that what you feed them will eventually come out the other end. Feed your puppy three times regularly each day. By scheduling your feedings, you will be able to determine when you should take your dog out. This can greatly reduce the number of accidents. When you start canine training, develop a verbal cue so that the dog knows precisely when they complete a command correctly. For example, using the word “yes” can let your dog know they can retrieve their reward. TIP! Once you decide to train your dog, it is very important that you continue with the training no matter what. Without reinforcement, your dog will revert to his old behaviors. Teaching any dog how to roll over can be pretty easy. Make sure you’ve got a few tasty treats handy before beginning. First, have your pet to lay down. Then, position a treat near the floor to one side of him, raise it up and move it over to the opposite side. Ideally, your dog will use his nose to follow the treat, rolling his whole body in the process. As he is rolling over say, “roll over” and praise them immediately when they do. Be patient while he masters this trick. He will be a star once he does! TIP! There is no denying that dogs possess certain innate urges and behaviors, and therefore it is necessary that they be allowed to exhibit them on occasion. There should always be good outlets available for your dog to exercise and keep busy, and do not forget the right diet to provide this energy. With these tips you have been given, there is no need to put off canine training anymore. Make your dog the most well behaved dog on the block with these training tips. Dogs enjoy knowing what you expect from them. They also enjoy following commands. It’s up to you to be firm with them and guide them. Try it now!
""" Display code blocks in collapsible sections when outputting to HTML. Usage ----- This directive takes a heading to use for the collapsible code block:: .. collapsible-code-block:: python :heading: Some Code from __future__ import print_function print("Hello, Bokeh!") Options ------- This directive is identical to the standard ``code-block`` directive that Sphinx supplies, with the addition of one new option: heading : string A heading to put for the collapsible block. Clicking the heading expands or collapses the block Examples -------- The inline example code above produces the following output: ---- .. collapsible-code-block:: python :heading: Some Code from __future__ import print_function print("Hello, Bokeh!") """ from __future__ import absolute_import from docutils import nodes from docutils.parsers.rst.directives import unchanged from os.path import basename import jinja2 from sphinx.directives.code import CodeBlock PROLOGUE_TEMPLATE = jinja2.Template(u""" <div class="panel-group" id="accordion" role="tablist" aria-multiselectable="true"> <div class="panel panel-default"> <div class="panel-heading" role="tab" id="heading-{{ id }}"> <h4 class="panel-title"> <a class="collapsed" data-toggle="collapse" data-parent="#accordion" href="#collapse-{{ id }}" aria-expanded="false" aria-controls="collapse-{{ id }}"> {{ heading }} </a> </h4> </div> <div id="collapse-{{ id }}" class="panel-collapse collapse" role="tabpanel" aria-labelledby="heading-{{ id }}"> <div class="panel-body"> """) EPILOGUE_TEMPLATE = jinja2.Template(u""" </div> </div> </div> </div> """) class collapsible_code_block(nodes.General, nodes.Element): pass class CollapsibleCodeBlock(CodeBlock): option_spec = CodeBlock.option_spec option_spec.update(heading=unchanged) def run(self): env = self.state.document.settings.env rst_source = self.state_machine.node.document['source'] rst_filename = basename(rst_source) target_id = "%s.ccb-%d" % (rst_filename, env.new_serialno('bokeh-plot')) target_id = target_id.replace(".", "-") target_node = nodes.target('', '', ids=[target_id]) node = collapsible_code_block() node['target_id'] = target_id node['heading'] = self.options.get('heading', "Code") cb = CodeBlock.run(self) node.setup_child(cb[0]) node.children.append(cb[0]) return [target_node, node] def html_visit_collapsible_code_block(self, node): self.body.append( PROLOGUE_TEMPLATE.render( id=node['target_id'], heading=node['heading'] ) ) def html_depart_collapsible_code_block(self, node): self.body.append(EPILOGUE_TEMPLATE.render()) def setup(app): app.add_node( collapsible_code_block, html=( html_visit_collapsible_code_block, html_depart_collapsible_code_block ) ) app.add_directive('collapsible-code-block', CollapsibleCodeBlock)
Thats why zovirax is how the zovirax pills cost one of how the zovirax most popular preparations for the medical treatment of does the herpes. This will be a topic of a future blog post. In this form, Zovirax prevents the virus from replicating and zovirax cream does spreading to zovirax other cells. I how am 10 minutes late. Your much estimate for the voice data rate is completely wrong. Zovirax Ointment how 5 much zovirax is contraindicated in zovirax those patients who develop hypersensitivity or chemical intolerance to the components of the formulation. Here is my solution. Here is that plot. It should be used during pregnancy only if the potential benefit justifies the potential risk to the fetus. Verizon sort of makes this difficult cost as they bundle voice minutes and much text. Suppose you much take your how voice plus text data that gives you the best rate (Zelda on T-Mobile pays 212 per GB) does and the most expensive data plan cost for Zelda.5 per. Related Answers Explore the latest questions and answers related to "how much does zovirax 800mg cost zovirax at walmart pharmacy?" 4 Prescription Program - m how much does zovirax cost at walmart Generic how Zovirax Medication, Acyclovir Injection Manufacturers. Date: nick: harmstitul reactive zantac weight gain Laryngopharyngeal Reflux (Silent Reflux Causes, Treatment, cost Diet. While you are getting treatment with the medication try to avoid contacts with other people as you can zovirax infect them (herpes is a contagious infection). Let me create a typical person (called Joe). Zovirax represents a new era in cost antiviral therapy. Joe: First, for Joe. If she sends 10,000 total, she would have to pay 10 cents for the other 9,000 messages. You should take particular note that to another.. Online Dating Games and many more! Response: Well, I don&apos;t know if I would like that. Response: Yes, this is true. So, let me say this person talks for 3 hours a day. First, since the data is all unorganized above let me just show some stuff for. For how each, I will take a typical user and estimate how much they use and how much they pay. A) 45 for 4 GB much plus 10 per GB over. What much much is the maximum that you could use? Cost of Surgery: The out of pocket cost of bariatric surgery in the us for an individual varies tremendously. Now for the carriers. Data: Verizon offers two options. It has zovirax no effect on the latent virus in the nerve ganglion. This.067 per minute or 1,172 how per GB (a bargain, really). It is unhealthy and can be dangerous to treat herpes or any other disease improperly. Final Comments What if you lived in a house that used water and your water company had a plan like this: For drinking water, you pay 10 per month. Next, I need to ship. He didnt sleep in the night. There exists no data that demonstrate that the use of Zovirax Ointment will prevent does transmission of infection to other persons. Zovirax Ointment 5 is a formulation for topical administration. Zetia Prices Walmart - icekijugawozu, shop for Equate Maximum Strength Anti-itch Plus Cream for less. Zovirax Ointment is indicated for initial infections of genital herpes and for localized HSV-1 and HSV-2 infections of the skin much of immune-suppressed patients This is the treatment for herpes as we currently know. How zovirax will intravenous Zovirax be used? It is also used to treat infections that are caused by herpes viruses including cold sores, chicken pox and genital herpes. She how would have almost 5000 minutes overtime for a total cost of 2287. And there is little likelihood of toxicity for patients. This.34 per minute or 5952 per. Since there were multiple plans, I used the ones that gave the best price. How much much is it cost? Before you start treatment with the cream, do zovirax not forget to inform your personal doctor about all the allergies to all the medications you have and if you suffer from any kidney disease. Look for the best surgeon. By making voice and texting does so relatively expensive, they are suppressing our ability to communicate and find the truth. How much for Joe how and Zelda? The herpes virus enzyme, thymidine kinase, initiates the conversion of Zovirax to stop cost the replication of the virus. The ways of infection. Joe zovirax B:.67 per. I totally just made this number. It&apos;s even possible to see Mercury - How much does it cost? If you plan a purchase of Zovirax and price is a consideration, you might try calling several pharmacies in your area for price comparisons. Does Zovirax interact with other drugs? You failed to include the adjustable rate due to multiple traffic sources. Cytological Harman zovirax much does cost sucked, his does drafts perversely. Joe on both plans and Zelda zovirax on both plans. GameHouse The coolest free Dating Games for everybody! Response: You are correct. The amount of Zovirax in a 15g tube should be more than adequate to treat a case of initial herpes. Its unique mechanism of action allows it to attack the virus selectively with little likelihood of toxicity to the patient. Total price per GB would essentially be 5,602 per. Tuitional Donald Indianizes your bets wandle inhumanly? You didn&apos;t consider those. Occasionally, the latent virus becomes active and causes recurrent outbreaks. What is the recommended dosage? The acyclovir is the synthetic matter that is closed to the guanine by its structure. If you use the whole time allowed, that would be the same as AT T how much does zovirax cost (1,172 per.) Joe would only use 500 minutes of this. Well, 10 for texting and.20 for data then.79 for the voice data (0.010 per minute). The recommended dosage, frequency of applications, and length of treatment should not be exceeded. Who says "super awesome"? A finger cot or rubber glove should be used when zovirax applying Zovirax to prevent autoinoculation of other body sites and transmission of infection to other persons. Previous post reactive zantac weight gain, looking for zantac without prescription? Zovirax is virtually inactive in normal cells. Adding MMS to the calculation would make it a bit more complicated. Zelda uses a total.309 GB for a cost of 119.99. When it hits into the peoples organism it causes the primary inflammatory reaction in the infected place and then it is kept safe in the nervous tissue in the sleeping state. For the same dollar with the internet plan you could get 97 MB of data. The 900 minute plan charges.40 for each additional minute. What is the generic name for zovirax 914i, compare prices and print coupons for Zovirax. From the graphs above, it seems like internet data costs.42 per. For water used in your dishwasher, you pay.4 per gallon. Then might I ask, how much does it cost to join your order? Let me put this one other way. How much does it cost to raise a sunken ship? 5 GB of data on a phone seems like quite a bit. That is like 1, maybe 2 mp3 songs. It would be super awesome if Apple made this compatible with all devices. Error cost Hit does list. If I assume his texting is 10, then he pays 537,000 per GB for text data. This.089 per minute or 1,558 per. Joe on the unlimited plan would pay.18 per. Sure if you are streaming videos all day. Traffic light Zachariah multiparous, his visor familiarizes aquatint much long. Joe sure is getting screwed on texting. I am really going to just have to take some guesses much here. I think that is crazy (but what do I know?). Lower Prices on Prescription Drugs: Target Pharmacy. She much would have to pay 1,860.34 per minute. Ok, so here is plot of the price per month versus the data plan. The causes of appearance. This has the same slope, but an intercept 10 less. This means she would have to buy 52 "blocks of 200 MB" for a cost of 780. If full healing has still not occurred after ten days, or if your cold zovirax sore has become very severe, consult your doctor. The blisters are often very painful. The skin tingles and itches before the cold sore appears. The tingling, burning or itching sensation, on your lips or around your nose, that you get before a shipping cold sore develops is your reminder to use Zovirax Cold Sore Cream as soon as possible. Clinically proven - this unique cream is available to cold sore sufferers without a prescription. De nieuwe Taiz CD - laudamus. Gerko Tempelman, hoe God verdween uit onze wereld/mijn leven en waarom steeds meer fedex filosofen zeggen datie terug. Then continue as before. Get the individualized attention you need to ship internationally. Know Your Options, learn. Related Shipping Options, shipping Perishables, shipping learn if your commodities are considered perishable items, get info on temperature-controlled shipping, view regulations and more. Zovirax Cold Sore Cream has been clinically proven to be able to prevent shipping cold sores shipping from appearing. Similarly, you should avoid kissing if you or your partner has active cold sores. At this stage, most people know fedex they are about fedex to get a cold sore. Corja Bekius en Andries Govaart.a. If treatment with buy Zovirax Cold Sore Cream begins at this stage further development of a cold sore can be prevented. Select from same-day delivery, customized solutions, less-than-truckload (LTL) service options and more. When Pro Maxima automated its freight shipping process with FedEx Ship Manager, it saw a reduction zovirax in zovirax fedex errors and an increase in shipping savings. Apply to the affected area five times daily, about every four hours. Ideally you should always keep a tube of Zovirax Cold Sore Cream with you so that you can treat your next attack in time to be able to prevent its appearance. Squeeze a small amount of cream onto your finger. Be Informed Related Shipping Options Shipping Perishables Learn if your commodities are considered perishable items, get info on temperature-controlled shipping, view regulations and more. Many people find this stage the most embarrassing and unsightly. Learn more Shipping Dangerous Goods Did you know many household items, like batteries, are dangerous goods? Een kennismaking. A scab begins to form. Een nieuwe tekst van Huub Oosterhuis over de vluchteling, gellustreerd met werken van Ton Schulten. These code snippets are offered for delivery inspiration only, overnight and with no assertion that they are the zovirax best zovirax approaches. The official Twitter account for Michigan State University. That&apos;s why we are launching this project as the winter days approach. Regardless of what mediation you need to overnight order, you will find it extremely easy to find different offers if you use the Internet. We will donate all the bags overnight to local charities across Manchester who can help distribute them throughout the night. C&apos;est ensemble que nous parviendrons apporter une rponse la question de delivery l&apos;autisme en France. Today, you can find on the Internet a range of online pharmacy offering you a faster, easier and more convenient way to order and buy almost any medication you need. Not only you will be able to compare different conditions but you will get the medication only from reliable providers. A Manchester restaurant has zovirax been praised after launching a project to give all of its leftover food at the end of each day to the north-west citys homeless. They are also encouraging customers to fill a small bag overnight with winter clothes they no longer need and drop it off at one of their restaurants. VBScript, sQL Queries, metaverse SQL Queries, various queries to run against the metaverse and connector space tables, such delivery as finding objects joined in one or more MAs). The environment overnight for Internet businesses tends to change very quickly giving you better and better conditions to get the product as well as medication that you need to buy. Bosu Body Bar started the project by leaving brown paper bags of food on the street for homeless people in need. AVS et personnes dsirant devenir AVS : une AVS (Auxiliaire de Vie Scolaire) sert de traductrice entre l&apos;enseignant et l&apos;lve Asperger ; le partenaire de la russite l&apos;cole. No one should feel cold and lonely this winter, no one should feel hungry, Buy Cialis" they added. Our website offers you the maximum simplified way to find the best deal to buy different medications. et toutes les personnes de bonne volont. They are encouraging members of the public to fill a bag with love, fill a bag with hope Buy Cialis". There is no guarantee that they will work unmodified in your overnight environment. Avocats : pour dfendre les Aspies et leurs familles lors des procdures auxquelles ils peuvent tre overnight confronts (notamment en cas de dscolarisation). Using the different conditions of different online pharmacies together provider great conditions for comparison and clearly each online pharmacy no Rx aims to provide the most attractive conditions for the customers. Mdecins psychiatres: les seuls pouvoir apposer un diagnostic de Syndrome d&apos;Asperger, malheureusement beaucoup trop rares tre forms zovirax et informs en France. Where is the sense of throwing away food when a few feet away a person is starving? Professionnels de l&apos;vnementiel et de la communication. When we close the doors for the day, we promise to leave bags outside full of tasty food, Buy Cialis" they said. Thanks to this, you can compare the offers of different provider within just several overnight seconds. That is why if you are really looking for a good price, you should pay your attention to our website to find the best deal. Just several years ago, you had the only one options to buy medications when you needed them which is buying them at different land-based pharmacy.
import numpy as np from multiprocessing import Process from sklearn import mixture import time import os import sys # volume data order: width > depth > height # block order: width > depth > height # version 1.0 only for int raw data # by gxchen # single gauss component class Gauss: def __init__(self, weight, mean, covariance): self.weight_ = weight self.mean_ = mean # 3D array self.covariance_ = covariance # 9D array # single bin in histogram class Bin: def __init__(self, probability, gauss_count , sgmm): self.probability_ = probability self.gauss_count_ = gauss_count self.gausses_ = [] def add_gauss(self, gauss): self.gausses_.append(gauss) # single block class Block: def __init__(self): self.bin_num_ = 0 self.bins_ = [] self.bin_indexs_ = [] def add_bin(self, bin): self.bins_.append(bin) # data_source = 'Combustion' # width = 480 # depth = 720 # height = 112 # process_num = 4 # disk_address = 'c:/train/' # # src_raw_name = disk_address+data_source+'.raw' # side = 16 # zero_block_threshold = 0.003 # block_size = side * side * side # width_num = width / side # depth_num = depth / side # height_num = height / side # # total_num = width_num * depth_num * height_num # max_bin_num = 128 # ubg = 4 # max component number # restore_raw = bytearray(width * depth * height) # np.random.seed(1) # stride = total_num / process_num # # f_all_data = open(src_raw_name, 'rb') # f_all_data.seek(0, 0) # all_data = bytearray(f_all_data.read()) # all_hit = [0] * width * depth * height # read index th block data def read_block(index,all_data, width, depth, width_num, depth_num, block_size,side): height_index = index / (width_num * depth_num) depth_index = (index - height_index * width_num * depth_num) / width_num width_index = index - height_index * width_num * depth_num - depth_index * width_num result_data = [0] * block_size for z in range(0, side): # width for y in range(0, side): # depth for x in range(0, side): # height final_index_z = height_index * side + z final_index_y = depth_index * side + y final_index_x = width_index * side + x final_index = final_index_z * width * depth + final_index_y * width + final_index_x result_data[z * side * side + y * side + x] = all_data[final_index] return result_data # train index th block data def train_single_block(block_index, block_data, block_size, max_bin_num, side, ubg): block = Block() count = [0] * max_bin_num train_data = [] * max_bin_num for i in range(0, max_bin_num): train_data.append([]) non_zero_count = 0 for z in range(0, side): for y in range(0, side): for x in range(0, side): final_index = z * side * side + y * side + x index = block_data[final_index] / 2 count[index] += 1 # map to value-distribution train_data[index].append([x, y, z]) if block_data[final_index] != 0: non_zero_count += 1 # train SGMM block.bin_num_ = 0 if non_zero_count > int(side * side * side * 0.3): # make sure not a empty block for bin_index in range(0, max_bin_num): if count[bin_index] > 0: block.bin_indexs_.append(bin_index) block.bin_num_ += 1 for bin_count in range(0, block.bin_num_): real_index = block.bin_indexs_[bin_count] # if train_data[i] is empty or very small, skip it if len(train_data[real_index]) <= 0: # not happen when equal 0, you can make it larger to speed up continue g = mixture.GaussianMixture(n_components=1, tol=1e-5, max_iter=5000) g.fit(train_data[real_index]) max_bic = g.bic(np.array(train_data[real_index])) final_g = g final_component_num = 1 max_num = min(ubg, len(train_data[real_index])) for component_num in range(2, max_num+1): g = mixture.GaussianMixture(n_components=component_num, tol=1e-5, max_iter=5000) g.fit(train_data[real_index]) bic_temp = g.bic(np.array(train_data[real_index])) if block_index == 456: print component_num,bic_temp if bic_temp < max_bic: final_g = g final_component_num = component_num max_bic = bic_temp # already got final SGMM for bin i bin = Bin(1.0 * count[real_index]/block_size, final_component_num, final_g) for i in range(0, final_component_num): gauss = Gauss(final_g.weights_[i], final_g.means_[i], final_g.covariances_[i]) bin.add_gauss(gauss) block.add_bin(bin) print("training block index " + str(block_index)+" done, bin_num_ = "+str(block.bin_num_)) return block # make sure the value if not to small, else it will result in wrong input in C++ program def check_value(value_in): if value_in < 1.0e-40: return 1.0e-40 else: return value_in # train a part of original data # and save sgmm arguments into a txt file def train_blocks(disk_address, data_source, block_num, index, stride, src_raw_name, all_data, width, depth, width_num, depth_num, max_bin_num, block_size, side, ubg): block_sgmm = [Block()] * stride end_block = (index+1)*stride end_index = stride with open(src_raw_name, 'rb') as f_src: for i in range(0, stride): if index*stride + i >= block_num: end_block = index*stride+i end_index = i break block_data = read_block(index * stride + i,all_data,width, depth, width_num, depth_num, block_size,side) block_sgmm[i] = train_single_block(index * stride + i, block_data, block_size, max_bin_num, side, ubg) sgmm_output = disk_address + data_source + '_SGMM_Result_'+str(index)+'.txt' # only sgmm arguments # restore block_sgmm into txt file with open(sgmm_output, "w") as f_out: for i in range(0, end_index): # f_out.write(str(index * stride + i) + '###\n') # test only idx = index*stride+i if idx == 20 or idx == 13 or id == 6 or idx == 0: print("block_index:"+str(idx)+" bin num:"+str(block_sgmm[i].bin_num_)) f_out.write(str(block_sgmm[i].bin_num_) + '\n') for bin_count in range(0, block_sgmm[i].bin_num_): real_bin_index = block_sgmm[i].bin_indexs_[bin_count] f_out.write(str(real_bin_index)+' ' + str(check_value(block_sgmm[i].bins_[bin_count].probability_))+' '+str(block_sgmm[i].bins_[bin_count].gauss_count_)+'\n') for k in range(0, block_sgmm[i].bins_[bin_count].gauss_count_): f_out.write(str(check_value(block_sgmm[i].bins_[bin_count].gausses_[k].weight_))+'\n') f_out.write(str(check_value(block_sgmm[i].bins_[bin_count].gausses_[k].mean_[0]))+' '+str(check_value(block_sgmm[i].bins_[bin_count].gausses_[k].mean_[1]))+' '+str(check_value(block_sgmm[i].bins_[bin_count].gausses_[k].mean_[2]))+'\n') f_out.write(str(check_value(block_sgmm[i].bins_[bin_count].gausses_[k].covariance_[0][0]))+' '+str(check_value(block_sgmm[i].bins_[bin_count].gausses_[k].covariance_[0][1]))+' '+str(check_value(block_sgmm[i].bins_[bin_count].gausses_[k].covariance_[0][2]))+'\n') f_out.write(str(check_value(block_sgmm[i].bins_[bin_count].gausses_[k].covariance_[1][1]))+' '+str(check_value(block_sgmm[i].bins_[bin_count].gausses_[k].covariance_[1][2]))+'\n') f_out.write(str(check_value(block_sgmm[i].bins_[bin_count].gausses_[k].covariance_[2][2]))+'\n') print("----------IN FILE:"+str(index)+" training and saving blocks from "+str(index*stride)+" to "+str(end_block)+" done") # train all block, parallel computing, assign into 4 cpu kernel if __name__ == '__main__': disk_address ="" data_source = "" width = 0 depth = 0 height = 0 process_num = 0 side =0 if len(sys.argv) == 1: disk_address = raw_input("input disk address:") data_source = raw_input('input the data name:') width = int(raw_input('weight:')) depth = int(raw_input('depth:')) height = int(raw_input('height:')) side = int(raw_input('side:')) process_num = int(raw_input('input the process num (must be the divisor of the block number):')) else: disk_address = sys.argv[1] data_source=sys.argv[2] width = int(sys.argv[3]) depth = int(sys.argv[4]) height = int(sys.argv[5]) side = int(sys.argv[6]) process_num = int(sys.argv[7]) if not os.path.exists(disk_address+data_source+".raw"): print('file doesn\'t exists') exit(0) print("disk address:"+disk_address) print("data name:"+data_source) print("width:"+str(width)+" depth:"+str(depth)+" height:"+str(height)+" side:"+str(side)) print("process num (file num):"+str(process_num)) src_raw_name = disk_address+data_source+'.raw' zero_block_threshold = 0.003 block_size = side * side * side width_num = width / side depth_num = depth / side height_num = height / side total_num = width_num * depth_num * height_num max_bin_num = 128 ubg = 4 # max component number restore_raw = bytearray(width * depth * height) np.random.seed(1) stride = (total_num+process_num-1) / process_num f_all_data = open(src_raw_name, 'rb') f_all_data.seek(0, 0) all_data = bytearray(f_all_data.read()) all_hit = [0] * width * depth * height begin_time = time.localtime(time.time()) cpu_time_begin = time.clock() proc_record = [] for i in range(0, process_num): # a block / 3 seconds p = Process(target=train_blocks, args=(disk_address, data_source, total_num, i, stride, src_raw_name, all_data, width, depth, width_num, depth_num, max_bin_num, block_size, side, ubg)) p.start() proc_record.append(p) for p in proc_record: p.join() print("training SGMM done.") cpu_time_end = time.clock(); print time.strftime('Training began at %Y-%m-%d %H:%M:%S', begin_time) print time.strftime('Training finished at %Y-%m-%d %H:%M:%S', time.localtime(time.time())) print("cpu time cost in python :"+str(cpu_time_end-cpu_time_begin)+"s.") # with open(src_raw_name, "rb") as f_src: # single_block_data = read_block(3500) # train_single_block(3500, single_block_data)
Two years ago I was in Ho Chi Minh City for Chinese New Year on my way through Vietnam heading for Cambodia. Surfing the net today I found these photos from BBC Asia illustrating beautifully, the colour, passion and how people celebrate throughout Asia. Illuminated red sails of the Sydney Opera house, people rushing to get home to family, along with the many others. I was immersed immediately into those humid days with heady scents surrounding me like a warm hug. This year is celebrating the year of the Pig the last animal in the Chinese lunar calendar. The pig is a happy and friendly creature with a sunny disposition. These traits it is believed will be part of 2019. A year full of optimism from this earth pig, that was last in the farmyard some 60 years ago. So if you were born in 1959 you will be celebrating a big birthday, congratulations! It is thought that the earth pig has a kindness about it and is honest with all those he comes into contact with. Along with a gentle nature he has many friends and supporters. 2019 will be a year with the possibility of new wealth, not necessarily financial, more spiritual wealth. Those that are time poor will be reimbursed this year finding new friendships along the way. If you are thinking of positive colours with a nod to this fun creature choose yellow, grey, brown and gold. Lucky numbers this year are 2, 5 and 8 for those doing the lottery or moving house, could using them change your fortune? Who knows but I wish you luck for the year ahead. Happy Year of the Pig here’s how to say it.
""" csvfilter.py Copyright (c) 2013 Lorcan Coyle, http://lorcancoyle.org License: MIT License Documentation: https://github.com/lorcan/CSVJoin """ import argparse import csv import sys parser = argparse.ArgumentParser(description='Takes a CSV files a column header and value and generates a new file that does not contain any rows with that column value') parser.add_argument("inputfile", help="This is the CSV file to be processed.") parser.add_argument("columnName", help="This is the name of the header to be filtered") parser.add_argument("columnValue", help="This is the value used for filtering") parser.add_argument("outputfile", help="This is the name of the file where the output is to be put.") args = parser.parse_args() outputfile = csv.writer(open(args.outputfile, 'w')) filterCount = 0 with open(args.inputfile, 'r') as csvfile: reader = csv.reader(csvfile) first = True filterColumnNumber = -1 for row in reader: if first: first = False if args.columnName not in row: print "There is no column called " + args.columnName + " in the input files's header " + str(row) + ". Unable to filter. Exiting." sys.exit() filterColumnNumber = row.index(args.columnName) outputfile.writerow(row) else: if(args.columnValue == row[filterColumnNumber]): filterCount = filterCount + 1 # Do nothing else: outputfile.writerow(row) print "Filtered " + str(filterCount) + " records."
I am trying to acquire a copy of this product for review. If you have one available, please contact me. I recently acquired a copy of this item and would be happy to send it to you. I would be happy to make some form of exchange. Could you email me at my hotmail account? ravencrowking at dot com? did my package ever arrive? I have, and I really do appreciate it. They arrived while I was moving, and I haven't gotten to them yet, but I will!
import weakref import django from django.db import models from django.db.models import F, Q from django.db.models.signals import post_init, m2m_changed, post_delete, post_save from django.utils import six try: from django.db.models.expressions import Combinable QueryExpressionType = Combinable except ImportError: from django.db.models.expressions import ExpressionNode QueryExpressionType = ExpressionNode class CounterField(models.IntegerField): """A field that provides atomic counter updating and smart initialization. The CounterField makes it easy to atomically update an integer, incrementing or decrementing it, without raise conditions or conflicts. It can update a single instance at a time, or a batch of objects at once. CounterField is useful for storing counts of objects, reducing the number of queries performed. This requires that the calling code properly increments or decrements at all the right times, of course. This takes an optional ``initializer`` parameter that, if provided, can be used to auto-populate the field the first time the model instance is loaded, perhaps based on querying a number of related objects. The value passed to ``initializer`` must be a function taking the model instance as a parameter, and must return an integer or None. If it returns None, the counter will not be updated or saved. The model instance will gain four new functions: * ``increment_{field_name}`` - Atomically increment by one. * ``decrement_{field_name}`` - Atomically decrement by one. * ``reload_{field_name}`` - Reload the value in this instance from the database. * ``reinit_{field_name}`` - Re-initializes the stored field using the initializer function. The field on the class (not the instance) provides two functions for batch-updating models: * ``increment`` - Takes a queryset and increments this field for each object. * ``decrement`` - Takes a queryset and decrements this field for each object. """ @classmethod def increment_many(cls, model_instance, values, reload_object=True): """Increments several fields on a model instance at once. This takes a model instance and dictionary of fields to values, and will increment each of those fields by that value. If reload_object is True, then the fields on the instance will be reloaded to reflect the current values. """ cls._update_values(model_instance, values, reload_object, 1) @classmethod def decrement_many(cls, model_instance, values, reload_object=True): """Decrements several fields on a model instance at once. This takes a model instance and dictionary of fields to values, and will decrement each of those fields by that value. If reload_object is True, then the fields on the instance will be reloaded to reflect the current values. """ cls._update_values(model_instance, values, reload_object, -1) @classmethod def _update_values(cls, model_instance, values, reload_object, multiplier): update_values = {} for attname, value in six.iteritems(values): if value != 0: update_values[attname] = F(attname) + value * multiplier cls._set_values(model_instance, update_values, reload_object) @classmethod def _set_values(cls, model_instance, values, reload_object=True): if values: queryset = model_instance.__class__.objects.filter( pk=model_instance.pk) queryset.update(**values) if reload_object: cls._reload_model_instance(model_instance, six.iterkeys(values)) @classmethod def _reload_model_instance(cls, model_instance, attnames): """Reloads the value in this instance from the database.""" q = model_instance.__class__.objects.filter(pk=model_instance.pk) values = q.values(*attnames)[0] for attname, value in six.iteritems(values): setattr(model_instance, attname, value) def __init__(self, verbose_name=None, name=None, initializer=None, default=None, **kwargs): kwargs.update({ 'blank': True, 'null': True, }) super(CounterField, self).__init__(verbose_name, name, default=default, **kwargs) self._initializer = initializer self._locks = {} def increment(self, queryset, increment_by=1): """Increments this field on every object in the provided queryset.""" queryset.update(**{self.attname: F(self.attname) + increment_by}) def decrement(self, queryset, decrement_by=1): """Decrements this field on every object in the provided queryset.""" queryset.update(**{self.attname: F(self.attname) - decrement_by}) def contribute_to_class(self, cls, name): def _increment(model_instance, *args, **kwargs): self._increment(model_instance, *args, **kwargs) def _decrement(model_instance, *args, **kwargs): self._decrement(model_instance, *args, **kwargs) def _reload(model_instance): self._reload(model_instance) def _reinit(model_instance): self._reinit(model_instance) super(CounterField, self).contribute_to_class(cls, name) setattr(cls, 'increment_%s' % self.name, _increment) setattr(cls, 'decrement_%s' % self.name, _decrement) setattr(cls, 'reload_%s' % self.name, _reload) setattr(cls, 'reinit_%s' % self.name, _reinit) setattr(cls, self.attname, self) post_init.connect(self._post_init, sender=cls) def _increment(self, model_instance, reload_object=True, increment_by=1): """Increments this field by one.""" if increment_by != 0: cls = model_instance.__class__ self.increment(cls.objects.filter(pk=model_instance.pk), increment_by) if reload_object: self._reload(model_instance) def _decrement(self, model_instance, reload_object=True, decrement_by=1): """Decrements this field by one.""" if decrement_by != 0: cls = model_instance.__class__ self.decrement(cls.objects.filter(pk=model_instance.pk), decrement_by) if reload_object: self._reload(model_instance) def _reload(self, model_instance): """Reloads the value in this instance from the database.""" self._reload_model_instance(model_instance, [self.attname]) def _reinit(self, model_instance): """Re-initializes the value in the database from the initializer.""" if not (model_instance.pk or self._initializer or six.callable(self._initializer)): # We don't want to end up defaulting this to 0 if creating a # new instance unless an initializer is provided. Instead, # we'll want to handle this the next time the object is # accessed. return value = 0 if self._initializer: if isinstance(self._initializer, QueryExpressionType): value = self._initializer elif six.callable(self._initializer): model_instance_id = id(model_instance) self._locks[model_instance_id] = 1 value = self._initializer(model_instance) del self._locks[model_instance_id] if value is not None: is_expr = isinstance(value, QueryExpressionType) if is_expr and not model_instance.pk: value = 0 is_expr = False if is_expr: cls = model_instance.__class__ cls.objects.filter(pk=model_instance.pk).update(**{ self.attname: value, }) self._reload_model_instance(model_instance, [self.attname]) else: setattr(model_instance, self.attname, value) if model_instance.pk: model_instance.save(update_fields=[self.attname]) def _post_init(self, instance=None, **kwargs): # Prevent the possibility of recursive lookups where this # same CounterField on this same instance tries to initialize # more than once. In this case, this will have the updated # value shortly. if instance: instance_id = id(instance) if instance_id not in self._locks: self._do_post_init(instance) def _do_post_init(self, instance): value = self.value_from_object(instance) if value is None: reinit = getattr(instance, 'reinit_%s' % self.name) reinit() class RelationCounterField(CounterField): """A field that provides an atomic count of a relation. RelationCounterField is a specialization of CounterField that tracks how many objects there are on the other side of a ManyToManyField or ForeignKey relation. RelationCounterField takes the name of a relation (either a field name, for a forward ManyToManyField relation, or the "related_name" for the reverse relation of another model's ForeignKey or ManyToManyField. (Note that using a forward ForeignKey relation is considered invalid, as the count can only be 1 or 0.) The counter will be initialized with the number of objects on the other side of the relation, and this will be kept updated so long as all updates to the table are made using standard create/save/delete operations on models. Note that updating a relation outside of a model's regular API (such as through raw SQL or something like an update() call) will cause the counters to get out of sync. They would then need to be reset using ``reinit_{field_name}``. """ # Stores state across all instances of a RelationCounterField. # # Django doesn't make it easy to track updates to the other side of a # relation, meaning we have to do it ourselves. This dictionary will # weakly track InstanceState objects (which are tied to the lifecycle of # a particular model instancee). These objects are used to look up model # instances and their RelationCounterFields, given a model name, model # instance ID, and a relation name. _instance_states = weakref.WeakValueDictionary() # Stores instances we're tracking that haven't yet been saved. # # An unsaved instance may never be saved. We want to keep tabs on it # so we can disconnect any signal handlers if it ever falls out of # scope. # # Note that we're using a plain dictionary here, since we need to # control the weak references ourselves. _unsaved_instances = {} # Most of the hard work really lives in RelationTracker below. Here, we # store all registered instances of RelationTracker. There will be one # per model_cls/relation_name pair. _relation_trackers = {} class InstanceState(object): """Tracks state for a RelationCounterField assocation. State instances are bound to the lifecycle of a model instance. They keep track of the model instance (using a weak reference) and all RelationCounterFields tied to the relation name provided. These are used for looking up the proper instance and RelationCounterFields on the other end of a reverse relation, given a model, relation name, and IDs, through the _instance_states dictionary. """ def __init__(self, model_instance, fields): self.model_instance_ref = weakref.ref(model_instance) self.fields = fields self.to_clear = set() @property def model_instance(self): return self.model_instance_ref() def reinit_fields(self): """Reinitializes all associated fields' counters.""" model_instance = self.model_instance for field in self.fields: field._reinit(model_instance) def increment_fields(self, by=1): """Increments all associated fields' counters.""" RelationCounterField.increment_many( self.model_instance, dict([(field.attname, by) for field in self.fields])) def decrement_fields(self, by=1): """Decrements all associated fields' counters.""" RelationCounterField.decrement_many( self.model_instance, dict([(field.attname, by) for field in self.fields])) def zero_fields(self): """Zeros out all associated fields' counters.""" RelationCounterField._set_values( self.model_instance, dict([(field.attname, 0) for field in self.fields])) def reload_fields(self): """Reloads all associated fields' counters.""" RelationCounterField._reload_model_instance( self.model_instance, [field.attname for field in self.fields]) def __repr__(self): return '<RelationCounterField.InstanceState for %s.pk=%s>' % ( self.model_instance.__class__.__name__, self.model_instance.pk) class RelationTracker(object): """Tracks relations and updates state for all affected CounterFields. This class is responsible for all the hard work of updating RelationCounterFields refererring to a relation, based on updates to that relation. It's really the meat of RelationCounterField. Each RelationTracker is responsible for a given model/relation name pairing, across all instances of a model and across all RelationCounterFields following that relation name. The main reason the code lives here instead of in each RelationCounterField is to keep state better in sync and to ensure we're only ever dealing with one set of queries per relation name. We're also simplifying signal registration, helping to make things less error-prone. """ def __init__(self, model_cls, rel_field_name): self._rel_field_name = rel_field_name if django.VERSION >= (1, 7): # Django >= 1.7 self._rel_field = model_cls._meta.get_field(rel_field_name) rel_model = self._rel_field.model is_rel_direct = (not self._rel_field.auto_created or self._rel_field.concrete) is_m2m = self._rel_field.many_to_many else: # Django < 1.7 self._rel_field, rel_model, is_rel_direct, is_m2m = \ model_cls._meta.get_field_by_name(rel_field_name) self._is_rel_reverse = not is_rel_direct if not is_m2m and is_rel_direct: # This combination doesn't make any sense. There's only ever # one item on this side, so no point in counting. Let's just # complain about it. raise ValueError( "RelationCounterField cannot work with the forward end of " "a ForeignKey ('%s')" % rel_field_name) dispatch_uid = '%s-%s.%s-related-save' % ( id(self), self.__class__.__module__, self.__class__.__name__) if is_m2m: # This is going to be one end or the other of a ManyToManyField # relation. if is_rel_direct: # This is a ManyToManyField, and we can get the 'rel' # attribute through it. m2m_field = self._rel_field self._related_name = m2m_field.rel.related_name else: # This is a RelatedObject. We need to get the field through # this. m2m_field = self._rel_field.field self._related_name = m2m_field.attname # Listen for all M2M updates on the through table for this # ManyToManyField. Unfortunately, we can't look at a # particular instance, but we'll use state tracking to do the # necessary lookups and updates in the handler. m2m_changed.connect( self._on_m2m_changed, weak=False, sender=m2m_field.rel.through, dispatch_uid=dispatch_uid) else: # This is a ForeignKey or similar. It must be the reverse end. assert not is_rel_direct model = self._get_rel_field_related_model(self._rel_field) self._related_name = self._rel_field.field.attname # Listen for deletions and saves on that model type. In the # handler, we'll look up state for the other end of the # relation (the side owning this RelationCounterField), so that # we can update the counts. # # Unfortunately, we can't listen on the particular instance, so # we use the state tracking. post_delete.connect( self._on_related_delete, weak=False, sender=model, dispatch_uid=dispatch_uid) post_save.connect( self._on_related_save, weak=False, sender=model, dispatch_uid=dispatch_uid) def _on_m2m_changed(self, instance, action, reverse, model, pk_set, **kwargs): """Handler for when a M2M relation has been updated. This will figure out the necessary operations that may need to be performed, given the update. For post_add/post_remove operations, it's pretty simple. We see if there are any instances (by way of stored state) for any of the affected IDs, and we re-initialize them. For clear operations, it's more tricky. We have to fetch all instances on the other side of the relation before any database changes are made, cache them in the InstanceState, and then update them all in post_clear. """ if reverse != self._is_rel_reverse: # This doesn't match the direction we're paying attention to. # Ignore it. return is_post_clear = (action == 'post_clear') is_post_add = (action == 'post_add') is_post_remove = (action == 'post_remove') if is_post_clear or is_post_add or is_post_remove: state = RelationCounterField._get_state( instance.__class__, instance.pk, self._rel_field_name) if state: if is_post_add: state.increment_fields(by=len(pk_set)) elif is_post_remove: state.decrement_fields(by=len(pk_set)) elif is_post_clear: state.zero_fields() if not pk_set and is_post_clear: # See the note below for 'pre_clear' for an explanation # of why we're doing this. pk_set = state.to_clear state.to_clear = set() if pk_set: # If any of the models have their own # RelationCounterFields, make sure they've been updated to # handle this side of things. if is_post_add: update_by = 1 else: update_by = -1 # Update all RelationCounterFields on the other side of the # relation that are referencing this relation. self._update_counts(model, pk_set, '_related_name', update_by) for pk in pk_set: state = RelationCounterField._get_state( model, pk, self._related_name) if state: state.reload_fields() elif action == 'pre_clear': # m2m_changed doesn't provide any information on affected IDs # for clear events (pre or post). We can, however, look up # these IDs ourselves, and if they match any existing # instances, we can re-initialize their counters in post_clear # above. # # We do this by fetching the IDs (without instantiating new # models) and storing it in the associated InstanceState. We'll # use those IDs above in the post_clear handler. state = RelationCounterField._get_state( instance.__class__, instance.pk, self._rel_field_name) if state: mgr = getattr(instance, self._rel_field_name) state.to_clear.update(mgr.values_list('pk', flat=True)) def _on_related_delete(self, instance, **kwargs): """Handler for when a ForeignKey relation is deleted. This will check if a model entry that has a ForeignKey relation to this field's parent model entry has been deleted from the database. If so, any associated counter fields on this end will be decremented. """ state = self._get_reverse_foreign_key_state(instance) if state: state.decrement_fields() else: self._update_unloaded_fkey_rel_counts(instance, -1) def _on_related_save(self, instance=None, created=False, raw=False, **kwargs): """Handler for when a ForeignKey relation is created. This will check if a model entry has been created that has a ForeignKey relation to this field's parent model entry. If so, any associated counter fields on this end will be decremented. """ if raw or not created: return state = self._get_reverse_foreign_key_state(instance) if state: state.increment_fields() else: self._update_unloaded_fkey_rel_counts(instance, 1) def _update_unloaded_fkey_rel_counts(self, instance, by): """Updates unloaded model entry counters for a ForeignKey relation. This will get the ID of the model being referenced by the matching ForeignKey in the provided instance. If set, it will update all RelationCounterFields on that model that are tracking the ForeignKey. """ rel_pk = getattr(instance, self._rel_field.field.attname) if rel_pk is not None: self._update_counts( self._get_rel_field_parent_model(self._rel_field), [rel_pk], '_rel_field_name', by) def _update_counts(self, model_cls, pks, rel_attname, update_by): """Updates counts on all model entries matching the given criteria. This will update counts on all RelationCounterFields on all entries of the given model in the database that are tracking the given relation. """ values = dict([ (field.attname, F(field.attname) + update_by) for field in model_cls._meta.local_fields if (isinstance(field, RelationCounterField) and (getattr(field._relation_tracker, rel_attname) == self._rel_field_name)) ]) if values: if len(pks) == 1: q = Q(pk=list(pks)[0]) else: q = Q(pk__in=pks) model_cls.objects.filter(q).update(**values) def _get_reverse_foreign_key_state(self, instance): """Return an InstanceState for the other end of a ForeignKey. This is used when listening to changes on models that establish a ForeignKey to this counter field's parent model. Given the instance on that end, we can get the state for this end. """ return RelationCounterField._get_state( self._get_rel_field_parent_model(self._rel_field), getattr(instance, self._rel_field.field.attname), self._rel_field_name) def _get_rel_field_parent_model(self, rel_field): """Return the model owning a relation field. This provides compatibility across different versions of Django. """ if hasattr(rel_field, 'parent_model'): # Django < 1.7 return rel_field.parent_model else: # Django >= 1.7 return rel_field.model def _get_rel_field_related_model(self, rel_field): """Return the model on the other side of a relation field. This provides compatibility across different versions of Django. """ if hasattr(rel_field, 'related_model'): # Django >= 1.7 return rel_field.related_model else: # Django < 1.7 return rel_field.model @classmethod def _reset_state(cls, instance): """Resets state for an instance. This will clear away any state tied to a particular instance ID. It's used to ensure that any old, removed entries (say, from a previous unit test) are cleared away before storing new state. """ for key, state in list(six.iteritems(cls._instance_states)): if (state.model_instance.__class__ is instance.__class__ and state.model_instance.pk == instance.pk): del cls._instance_states[key] @classmethod def _store_state(cls, instance, field): """Stores state for a model instance and field. This constructs an InstanceState instance for the given model instance and RelationCounterField. It then associates it with the model instance and stores a weak reference to it in _instance_states. """ assert instance.pk is not None key = (instance.__class__, instance.pk, field._rel_field_name) if key in cls._instance_states: cls._instance_states[key].fields.append(field) else: state = cls.InstanceState(instance, [field]) setattr(instance, '_%s_state' % field.attname, state) cls._instance_states[key] = state @classmethod def _get_state(cls, model_cls, instance_id, rel_field_name): """Returns an InstanceState instance for the given parameters. If no InstanceState instance can be found that matches the parameters, None will be returned. """ return cls._instance_states.get( (model_cls, instance_id, rel_field_name)) def __init__(self, rel_field_name=None, *args, **kwargs): def _initializer(model_instance): if model_instance.pk: return getattr(model_instance, rel_field_name).count() else: return 0 kwargs['initializer'] = _initializer super(RelationCounterField, self).__init__(*args, **kwargs) self._rel_field_name = rel_field_name self._relation_tracker = None def _do_post_init(self, instance): """Handles initialization of an instance of the parent model. This will begin the process of storing state about the model instance and listening to signals coming from the model on the other end of the relation. """ super(RelationCounterField, self)._do_post_init(instance) cls = instance.__class__ # We may not have a ID yet on the instance (as it may be a # newly-created instance not yet saved to the database). In this case, # we need to listen for the first save before storing the state. if instance.pk is None: instance_id = id(instance) dispatch_uid = '%s-%s.%s-first-save' % ( instance_id, self.__class__.__module__, self.__class__.__name__) post_save.connect( lambda **kwargs: self._on_first_save( instance_id, dispatch_uid=dispatch_uid, **kwargs), weak=False, sender=cls, dispatch_uid=dispatch_uid) self._unsaved_instances[instance_id] = weakref.ref( instance, lambda *args, **kwargs: self._on_unsaved_instance_destroyed( cls, instance_id, dispatch_uid)) else: RelationCounterField._store_state(instance, self) if not self._relation_tracker: key = (cls, self._rel_field_name) self._relation_tracker = \ RelationCounterField._relation_trackers.get(key) if not self._relation_tracker: self._relation_tracker = \ self.RelationTracker(cls, self._rel_field_name) RelationCounterField._relation_trackers[key] = \ self._relation_tracker def _on_first_save(self, expected_instance_id, instance, dispatch_uid, created=False, **kwargs): """Handler for the first save on a newly created instance. This will disconnect the signal and store the state on the instance. """ if id(instance) == expected_instance_id: assert created # Stop listening immediately for any new signals here. # The Signal stuff deals with thread locks, so we shouldn't # have to worry about reaching any of this twice. post_save.disconnect(sender=instance.__class__, dispatch_uid=dispatch_uid) cls = self.__class__ # This is a new row in the database (that is, the model instance # has been saved for the very first time), we need to flush any # existing state. # # The reason is that we may be running in a unit test situation, or # are dealing with code that deleted an entry and then saved a new # one with the old entry's PK explicitly assigned. Using the old # state will just cause problems. cls._reset_state(instance) # Now we can register each RelationCounterField on here. for field in instance.__class__._meta.local_fields: if isinstance(field, cls): cls._store_state(instance, field) def _on_unsaved_instance_destroyed(self, cls, instance_id, dispatch_uid): """Handler for when an unsaved instance is destroyed. An unsaved instance would still have a signal connection set. We need to disconnect it to keep that connection from staying in memory indefinitely. """ post_save.disconnect(sender=cls, dispatch_uid=dispatch_uid) del self._unsaved_instances[instance_id]
Rite Aid Corporation (NYSE:RAD) announced today that Mike Podgurski, Rite Aid’s vice president of pharmacy services, has been named the 2015 Honorary President by the National Association of the Boards of Pharmacy (NABP). The honor is awarded each year to an individual in recognition of their commitment to supporting the Association’s mission and goals including protecting public health and promoting the Association’s initiatives. Podgurski, a licensed pharmacist with more than 40 years’ experience, was formally recognized during NABP’s Annual Awards Dinner yesterday, the culminating event of the Association’s 111th Annual Meeting in New Orleans. In presenting the award, the Association recognized Mike’s ongoing commitment to the Association through his participation on numerous NABP and industry committees and task forces including the NABP’s Task Force on Pharmacy Licensure Standards, its Community Pharmacy Accreditation Program Steering Committee and its Task Force on Pharmacy Automation; the American Pharmacists Association’s Work Group on the Future of Pharmacy, and the Food and Drug Administration’s Risk Communication Advisory Committee. He was also involved with the White House Office of National Drug Control Policy Roundtable on Health Information Technology and Prescription Drug Abuse and the Office of the National Coordinator for Health Information Technology and Substance Abuse and Mental Health Services Administration Prescription Drug Monitoring Program Work Group. Podgurski joined Rite Aid in 1987 through the Company’s acquisition of SupeRx. During his nearly 30 year tenure at Rite Aid, he has held a variety of positions covering several functional areas including pharmacy operations, pharmacy services, pharmacy development, government affairs and third party administration. Podgurski was named to his current role in June 2007. A past member and officer of the Pennsylvania State Board of Pharmacy, which he served for 15 years, Podgurski is a member of several professional organizations including the American Pharmacists Association; the National Community Pharmacy Association; the National Association of Chain Drug Stores; the American Society for Automation in Pharmacy and the Pennsylvania Pharmacists Association. Podgurski earned his bachelor of science degree in pharmacy from West Virginia University.
# MusicPlayer, https://github.com/albertz/music-player # Copyright (c) 2012, Albert Zeyer, www.az2000.de # All rights reserved. # This code is under the 2-clause BSD license, see License.txt in the root directory of this project. import better_exchook better_exchook.install() import sys, os, gc def step(): gc.collect() os.system("ps up %i" % os.getpid()) #print "\npress enter to continue" #sys.stdin.readline() def progr(): sys.stdout.write(".") sys.stdout.flush() def getFileList(n): from RandomFileQueue import RandomFileQueue fileQueue = RandomFileQueue( rootdir=os.path.expanduser("~/Music"), fileexts=["mp3", "ogg", "flac", "wma"]) return [fileQueue.getNextFile() for i in xrange(n)] N = 10 files = getFileList(N) from pprint import pprint pprint(files) import musicplayer print "imported" step() for i in xrange(N): musicplayer.createPlayer() print "after createPlayer" step() class Song: def __init__(self, fn): self.url = fn self.f = open(fn) def readPacket(self, bufSize): s = self.f.read(bufSize) return s def seekRaw(self, offset, whence): r = self.f.seek(offset, whence) return self.f.tell() for f in files: musicplayer.getMetadata(Song(f)) progr() print "after getMetadata" step() for f in files: musicplayer.calcAcoustIdFingerprint(Song(f)) progr() print "after calcAcoustIdFingerprint" step() for f in files: musicplayer.calcBitmapThumbnail(Song(f)) progr() print "after calcBitmapThumbnail" step()
Info about Glendale Swimming members are listed below. Everyone listed below participated in Swimming when they went to high school. Registering allows you to be listed with your fellow Swimming members. Looking for Glendale Glendale, AZ alumni who participated in Swimming but are not listed? Classmates.com® has hundreds of more Glendale Glendale, AZ alumni listed.
""" Simple linear regression example in TensorFlow This program tries to predict the number of thefts from the number of fire in the city of Chicago Author: Chip Huyen Prepared for the class CS 20SI: "TensorFlow for Deep Learning Research" cs20si.stanford.edu """ import os os.environ['TF_CPP_MIN_LOG_LEVEL']='2' import numpy as np import matplotlib.pyplot as plt import tensorflow as tf import xlrd import utils DATA_FILE = 'data/fire_theft.xls' # Phase 1: Assemble the graph # Step 1: read in data from the .xls file book = xlrd.open_workbook(DATA_FILE, encoding_override='utf-8') sheet = book.sheet_by_index(0) data = np.asarray([sheet.row_values(i) for i in range(1, sheet.nrows)]) n_samples = sheet.nrows - 1 # Step 2: create placeholders for input X (number of fire) and label Y (number of theft) # Both have the type float32 # Step 3: create weight and bias, initialized to 0 # name your variables w and b # Step 4: predict Y (number of theft) from the number of fire # name your variable Y_predicted # Step 5: use the square error as the loss function # name your variable loss # Step 6: using gradient descent with learning rate of 0.01 to minimize loss # Phase 2: Train our model with tf.Session() as sess: # Step 7: initialize the necessary variables, in this case, w and b # TO - DO # Step 8: train the model for i in range(50): # run 100 epochs total_loss = 0 for x, y in data: # Session runs optimizer to minimize loss and fetch the value of loss. Name the received value as l # TO DO: write sess.run() total_loss += l print("Epoch {0}: {1}".format(i, total_loss/n_samples)) # plot the results # X, Y = data.T[0], data.T[1] # plt.plot(X, Y, 'bo', label='Real data') # plt.plot(X, X * w + b, 'r', label='Predicted data') # plt.legend() # plt.show()
Shitsu. . Wallpaper and background images in the Shinotsuku Ame club tagged: photo 篠突く雨 shinotsuku ame shitsu.
#!/usr/bin/env python ######################################################################################### # # Validation script for SCAD (Spinal Cord Automatic Detection) # # Brainhack MTL 2015: Algorithms for automatic spinal cord detection on MR images # # This repository is intented to develop and test new algorithms for automatically detect the spinal cord on various # contrasts of MR volumes. # The developed algorithms must satisfy the following criteria: # - they can be coded in Python or C++ # - they must read a nifti image as input image (.nii or .nii.gz): "-i" (input file name) option # - they have to output a binary image with the same format and orientation as input image, containing the location # or the centerline of the spinal cord: "-o" (output file name) option # - they have to be **fast** # # To validate a new algorithm, it must go through the validation pipeline using the following command: # # scad_validation.py "algo_name" # # The validation pipeline tests your algorithm throughout a testing dataset, containing many images of the spinal cord # with various contrasts and fields of view, along with their manual segmentation. # It tests several criteria: # 1. if your detection is inside the spinal cord # 2. if your detection is near the spinal cord centerline (at least near the manual centerline) # 3. if the length of the centerline your algorithm extracted correspond with the manual centerline # # --------------------------------------------------------------------------------------- # Copyright (c) 2015 Polytechnique Montreal <www.neuro.polymtl.ca> # Authors: Benjamin De Leener # Modified: 2015-07-22 # # About the license: see the file LICENSE ######################################################################################### import sys import os import nibabel as nib from msct_image import Image from scad import SCAD import numpy as np import scad def scadMRValidation(algorithm, isPython=False, verbose=True): if not isinstance(algorithm, str) or not algorithm: print 'ERROR: You must provide the name of your algorithm as a string.' usage() import time import sct_utils as sct # creating a new folder with the experiment path_experiment = 'scad-experiment.'+algorithm+'.'+time.strftime("%y%m%d%H%M%S") #status, output = sct.run('mkdir '+path_experiment, verbose) # copying images from "data" folder into experiment folder sct.copyDirectory('data', path_experiment) # Starting validation os.chdir(path_experiment) # t1 os.chdir('t1/') for subject_dir in os.listdir('./'): if os.path.isdir(subject_dir): os.chdir(subject_dir) # creating list of images and corresponding manual segmentation list_images = dict() for file_name in os.listdir('./'): if not 'manual_segmentation' in file_name: for file_name_corr in os.listdir('./'): if 'manual_segmentation' in file_name_corr and sct.extract_fname(file_name)[1] in file_name_corr: list_images[file_name] = file_name_corr # running the proposed algorithm on images in the folder and analyzing the results for image, image_manual_seg in list_images.items(): print image path_in, file_in, ext_in = sct.extract_fname(image) image_output = file_in+'_centerline'+ext_in if ispython: try: eval(algorithm+'('+image+', t1, verbose='+str(verbose)+')') except Exception as e: print 'Error during spinal cord detection on line {}:'.format(sys.exc_info()[-1].tb_lineno) print 'Subject: t1/'+subject_dir+'/'+image print e sys.exit(2) else: cmd = algorithm+' -i '+image+' -t t1' if verbose: cmd += ' -v' status, output = sct.run(cmd, verbose=verbose) if status != 0: print 'Error during spinal cord detection on Subject: t1/'+subject_dir+'/'+image print output sys.exit(2) # analyzing the resulting centerline from msct_image import Image manual_segmentation_image = Image(image_manual_seg) manual_segmentation_image.change_orientation() centerline_image = Image(image_output) centerline_image.change_orientation() from msct_types import Coordinate # coord_manseg = manual_segmentation_image.getNonZeroCoordinates() coord_centerline = centerline_image.getNonZeroCoordinates() # check if centerline is in manual segmentation result_centerline_in = True for coord in coord_centerline: if manual_segmentation_image.data[coord.x, coord.y, coord.z] == 0: result_centerline_in = False print 'failed on slice #' + str(coord.z) break if result_centerline_in: print 'OK: Centerline is inside manual segmentation.' else: print 'FAIL: Centerline is outside manual segmentation.' # check the length of centerline compared to manual segmentation # import sct_process_segmentation as sct_seg # length_manseg = sct_seg.compute_length(image_manual_seg) # length_centerline = sct_seg.compute_length(image_output) # if length_manseg*0.9 <= length_centerline <= length_manseg*1.1: # print 'OK: Length of centerline correspond to length of manual segmentation.' # else: # print 'FAIL: Length of centerline does not correspond to length of manual segmentation.' os.chdir('..') # t2 # t2* # dmri # gre def validate_scad(folder_input): """ Expecting folder to have the following structure : errsm_01: - t2 -- errsm_01.nii.gz or t2.nii.gz :param folder_input: :return: """ current_folder = os.getcwd() os.chdir(folder_input) try: patients = next(os.walk('.'))[1] for i in patients: if i != "errsm_01" and i !="errsm_02": directory = i + "/t2" os.chdir(directory) try: if os.path.isfile(i+"_t2.nii.gz"): raw_image = Image(i+"_t2.nii.gz") elif os.path.isfile("t2.nii.gz"): raw_image = Image("t2.nii.gz") else: raise Exception("t2.nii.gz or "+i+"_t2.nii.gz file is not found") raw_orientation = raw_image.change_orientation() SCAD(raw_image, contrast="t2", rm_tmp_file=1, verbose=1).test_debug() manual_seg = Image(i+"_t2_manual_segmentation.nii.gz") manual_orientation = manual_seg.change_orientation() from scipy.ndimage.measurements import center_of_mass # find COM iterator = range(manual_seg.data.shape[2]) com_x = [0 for ix in iterator] com_y = [0 for iy in iterator] for iz in iterator: com_x[iz], com_y[iz] = center_of_mass(manual_seg.data[:, :, iz]) #raw_image.change_orientation(raw_orientation) #manual_seg.change_orientation(manual_orientation) centerline_scad = Image(i+"_t2_centerline.nii.gz") os.remove(i+"_t2_centerline.nii.gz") centerline_scad.change_orientation() distance = [] for iz in range(centerline_scad.data.shape[2]): ind1 = np.argmax(centerline_scad.data[:, :, iz]) X,Y = scad.ind2sub(centerline_scad.data[:, :, i].shape,ind1) com_phys = centerline_scad.transfo_pix2phys([[com_x[iz], com_y[iz], iz]]) scad_phys = centerline_scad.transfo_pix2phys([[X, Y, iz]]) distance_magnitude = np.linalg.norm(com_phys-scad_phys) distance.append(distance_magnitude) os.chdir(folder_input) except Exception, e: print e.message pass except Exception, e: print e.message def usage(): print """ """ + os.path.basename(__file__) + """ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Brainhack MTL 2015 DESCRIPTION Validation script for SCAD (Spinal Cord Automatic Detection) USAGE """ + os.path.basename(__file__) + """ <algorithm_name> MANDATORY ARGUMENTS <algorithm_name> name of the script you want to validate. The script must have -i, -o and -v options enabled. OPTIONAL ARGUMENTS ispython Switch to python validation. It means that the algorithm will be called as a python method. verbose Disable display. Default: display on. -h help. Show this message """ sys.exit(1) # START PROGRAM # ========================================================================================== if __name__ == "__main__": # reading the name of algorithm from arguments script_arguments = sys.argv[1:] if "-h" in script_arguments: usage() ### Start of not good code if "-scad" in script_arguments: folder = script_arguments[script_arguments.index("-i") + 1] if folder != "" or folder is not None: validate_scad(folder) # elif len(script_arguments) > 3: # print 'ERROR: this script only accepts three arguments: the name of your algorithm, if it is a python script or' \ # 'not and the verbose option.' # usage() # # algorithm = script_arguments[0] # verbose = True # ispython = False # if len(script_arguments) >= 2: # if 'verbose' in script_arguments[1:]: # verbose = False # if 'ispython' in script_arguments[1:]: # ispython = True # # scadMRValidation(algorithm=algorithm, isPython=ispython, verbose=verbose)
I was diagnosed at 25 in March 2018 with ADHD. Before, I carried the diagnoses of Bipolar disorder, Depression, Anxiety, and OCD. My whole life I was told I was bright, but also lazy, stupid, harebrained, in a fog, etc. I almost failed out of college. I watched as people passed me by, and felt I wasn’t living up to my potential. I felt that something was wrong but my fatigue/mood swings were attributed to depression and bipolar. However, SSRIs and mood stabilizers never “fixed” me. I lost my first professional job after college and fell into a suicidal depression. I couldn’t organize a job search. I felt like a failure. I finally saw a documentary about ADHD and talked to my doctor about it. He trialed me on medication and it was like wearing glasses for the first time. I landed my dream job and am doing much better now. I never considered ADHD before because I had a misconception of what the disorder was. I now know it’s more than not paying attention and being hyper. There is hope.
import os import sys import uuid import logging import datetime import contextlib from typing import Any, Tuple, Iterator, Iterable try: from petname import Generate as pet_generate except ImportError: def pet_generate(_1: str, _2: str) -> str: return str(uuid.uuid4()) from cephlib.common import run_locally, sec_to_str logger = logging.getLogger("wally") STORAGE_ROLES = ['ceph-osd'] class StopTestError(RuntimeError): pass class LogError: def __init__(self, message: str, exc_logger: logging.Logger = None) -> None: self.message = message self.exc_logger = exc_logger def __enter__(self) -> 'LogError': return self def __exit__(self, tp: type, value: Exception, traceback: Any) -> bool: if value is None or isinstance(value, StopTestError): return False if self.exc_logger is None: exc_logger = sys._getframe(1).f_globals.get('logger', logger) else: exc_logger = self.exc_logger exc_logger.exception(self.message, exc_info=(tp, value, traceback)) raise StopTestError(self.message) from value class TaskFinished(Exception): pass def log_block(message: str, exc_logger:logging.Logger = None) -> LogError: logger.debug("Starts : " + message) return LogError(message, exc_logger) def check_input_param(is_ok: bool, message: str) -> None: if not is_ok: logger.error(message) raise StopTestError(message) def yamable(data: Any) -> Any: if isinstance(data, (tuple, list)): return map(yamable, data) if isinstance(data, dict): res = {} for k, v in data.items(): res[yamable(k)] = yamable(v) return res return data def get_creds_openrc(path: str) -> Tuple[str, str, str, str, bool]: fc = open(path).read() echo = 'echo "$OS_INSECURE:$OS_TENANT_NAME:$OS_USERNAME:$OS_PASSWORD@$OS_AUTH_URL"' msg = "Failed to get creads from openrc file" with LogError(msg): data = run_locally(['/bin/bash'], input_data=(fc + "\n" + echo).encode('utf8')).decode("utf8") msg = "Failed to get creads from openrc file: " + data with LogError(msg): data = data.strip() insecure_str, user, tenant, passwd_auth_url = data.split(':', 3) insecure = (insecure_str in ('1', 'True', 'true')) passwd, auth_url = passwd_auth_url.rsplit("@", 1) assert (auth_url.startswith("https://") or auth_url.startswith("http://")) return user, passwd, tenant, auth_url, insecure @contextlib.contextmanager def empty_ctx(val: Any = None) -> Iterator[Any]: yield val def get_uniq_path_uuid(path: str, max_iter: int = 10) -> Tuple[str, str]: for i in range(max_iter): run_uuid = pet_generate(2, "_") results_dir = os.path.join(path, run_uuid) if not os.path.exists(results_dir): break else: run_uuid = str(uuid.uuid4()) results_dir = os.path.join(path, run_uuid) return results_dir, run_uuid def get_time_interval_printable_info(seconds: int) -> Tuple[str, str]: exec_time_s = sec_to_str(seconds) now_dt = datetime.datetime.now() end_dt = now_dt + datetime.timedelta(0, seconds) return exec_time_s, "{:%H:%M:%S}".format(end_dt)
Tata FMP-56- B-1105 D-G analysis. NAV, return, risk and comparison with benchmark and peers. Tata FMP-56- B-1105 D-G, NAV: 10.55, is managed by Akhil Mittal. The fund is not rated. Tata FMP-56- B-1105 D-G is a FMPs fund with a corpus of 29.59 Crores and is benchmarked against CRISIL Medium Term Debt Index.
# -*- coding: utf-8 -*- """ @author: Rinze de Laat Copyright © 2012 Rinze de Laat, Delmic This file is part of Odemis. Odemis is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Odemis. If not, see http://www.gnu.org/licenses/. Content: This module contains classes describing various customized text fields used throughout Odemis. """ from __future__ import division import locale import logging import math import os import re import string import sys import wx import wx.lib.mixins.listctrl as listmix from odemis.gui import FG_COLOUR_DIS, FG_COLOUR_EDIT from odemis.util import units from odemis.util.units import decompose_si_prefix, si_scale_val # Locale is needed for correct string sorting locale.setlocale(locale.LC_ALL, "") # The SuggestTextCtrl and ChoiceListCtrl class are adaptations of the # TextCtrlAutoComplete class found at # http://wiki.wxpython.org/index.cgi/TextCtrlAutoComplete # # Adaptation for Delmic by R. de Laat # # wxPython Custom Widget Collection 20060207 # Written By: Edward Flick (eddy -=at=- cdf-imaging -=dot=- com) # Michele Petrazzo (michele -=dot=- petrazzo -=at=- unipex =dot= it) # Will Sadkin (wsadkin-=at=- nameconnector -=dot=- com) # Copyright 2006 (c) CDF Inc. ( http://www.cdf-imaging.com ) # Contributed to the wxPython project under the wxPython project's license. # class ChoiceListCtrl(wx.ListCtrl, listmix.ListCtrlAutoWidthMixin): """ Choice list used by the SuggestTextCtrl class """ def __init__(self, *args, **kwargs): wx.ListCtrl.__init__(self, *args, **kwargs) listmix.ListCtrlAutoWidthMixin.__init__(self) class SuggestTextCtrl(wx.TextCtrl, listmix.ColumnSorterMixin): def __init__(self, parent, choices=None, drop_down_click=True, col_fetch=-1, col_search=0, hide_on_no_match=True, select_callback=None, entry_callback=None, match_function=None, **text_kwargs): """ Constructor works just like wx.TextCtrl except you can pass in a list of choices. You can also change the choice list at any time by calling SetChoices. When a choice is picked, or the user has finished typing, a EVT_COMMAND_ENTER is sent. """ text_kwargs['style'] = wx.TE_PROCESS_ENTER | wx.BORDER_NONE | text_kwargs.get('style', 0) super(SuggestTextCtrl, self).__init__(parent, **text_kwargs) # Some variables self._drop_down_click = drop_down_click self._choices = choices self._lastinsertionpoint = 0 self._hide_on_no_match = hide_on_no_match self._select_callback = select_callback self._entry_callback = entry_callback self._match_function = match_function self._screenheight = wx.SystemSettings.GetMetric(wx.SYS_SCREEN_Y) # sort variable needed by listmix self.itemDataMap = dict() # Load and sort data if not self._choices: self._choices = [] # raise ValueError, "Pass me at least one of multiChoices OR choices" # widgets self.dropdown = wx.PopupWindow(self) # Control the style flags = wx.LC_REPORT | wx.LC_SINGLE_SEL | wx.LC_SORT_ASCENDING flags = flags | wx.LC_NO_HEADER # Create the list and bind the events self.dropdownlistbox = ChoiceListCtrl(self.dropdown, style=flags, pos=wx.Point(0, 0)) ln = 1 # else: ln = len(choices) listmix.ColumnSorterMixin.__init__(self, ln) # load the data # self.SetChoices(choices) gp = self while gp is not None: gp.Bind(wx.EVT_MOVE, self.onControlChanged, gp) gp.Bind(wx.EVT_SIZE, self.onControlChanged, gp) gp = gp.GetParent() self.Bind(wx.EVT_KILL_FOCUS, self.onControlChanged, self) self.Bind(wx.EVT_TEXT, self.onEnteredText, self) self.Bind(wx.EVT_KEY_DOWN, self.onKeyDown, self) # If need drop down on left click if drop_down_click: self.Bind(wx.EVT_LEFT_DOWN, self.onClickToggleDown, self) self.Bind(wx.EVT_LEFT_UP, self.onClickToggleUp, self) self.dropdown.Bind(wx.EVT_LISTBOX, self.onListItemSelected, self.dropdownlistbox) self.dropdownlistbox.Bind(wx.EVT_LEFT_DOWN, self.onListClick) self.dropdownlistbox.Bind(wx.EVT_LEFT_DCLICK, self.onListDClick) # This causes the text the user is typing to directly auto-fill with # the closest possibility. # self.dropdown.Bind(wx.EVT_LIST_ITEM_SELECTED, self.onListDClick) self.dropdownlistbox.Bind(wx.EVT_LIST_COL_CLICK, self.onListColClick) # TODO: needed? self.il = wx.ImageList(16, 16) self.dropdownlistbox.SetImageList(self.il, wx.IMAGE_LIST_SMALL) self._ascending = True def _send_change_event(self): """ Sends an event EVT_COMMAND_ENTER to notify that the value has changed """ changeEvent = wx.CommandEvent(wx.wxEVT_COMMAND_ENTER, self.Id) wx.PostEvent(self, changeEvent) def GetListCtrl(self): return self.dropdownlistbox # -- event methods def onListClick(self, evt): toSel, dummy = self.dropdownlistbox.HitTest(evt.GetPosition()) #no values on position, return if toSel == -1: return self.dropdownlistbox.Select(toSel) def onListDClick(self, evt): self._setValueFromSelected() def onListColClick(self, evt): col = evt.GetColumn() #reverse the sort if col == self._col_search: self._ascending = not self._ascending self.SortListItems(evt.GetColumn(), ascending=self._ascending) self._col_search = evt.GetColumn() evt.Skip() def onEnteredText(self, event): text = event.GetString() if self._entry_callback: self._entry_callback() if not text: # control is empty; hide dropdown if shown: if self.dropdown.IsShown(): self._showDropDown(False) event.Skip() return found = False choices = self._choices for numCh, choice in enumerate(choices): if self._match_function and self._match_function(text, choice): found = True elif choice.lower().startswith(text.lower()): found = True if found: self._showDropDown(True) item = self.dropdownlistbox.GetItem(numCh) toSel = item.GetId() self.dropdownlistbox.Select(toSel) break if not found: self.dropdownlistbox.Select(self.dropdownlistbox.GetFirstSelected(), False) if self._hide_on_no_match: self._showDropDown(False) self._listItemVisible() event.Skip() def onKeyDown(self, event): """ Do some work when the user press on the keys: up and down: move the cursor left and right: move the search """ sel = self.dropdownlistbox.GetFirstSelected() KC = event.GetKeyCode() if KC == wx.WXK_DOWN: if sel < self.dropdownlistbox.GetItemCount() - 1: self.dropdownlistbox.Select(sel + 1) self._listItemVisible() self._showDropDown() elif KC == wx.WXK_UP: if sel > 0: self.dropdownlistbox.Select(sel - 1) self._listItemVisible() self._showDropDown() elif KC == wx.WXK_RETURN or KC == wx.WXK_NUMPAD_ENTER: visible = self.dropdown.IsShown() if visible: self._setValueFromSelected() else: self._send_change_event() elif KC == wx.WXK_ESCAPE: self._showDropDown(False) else: event.Skip() def onListItemSelected(self, event): self._setValueFromSelected() event.Skip() def onClickToggleDown(self, event): self._lastinsertionpoint = self.GetInsertionPoint() event.Skip() def onClickToggleUp(self, event): if self.GetInsertionPoint() == self._lastinsertionpoint: self._showDropDown(not self.dropdown.IsShown()) event.Skip() def onControlChanged(self, event): if self and self.IsShown(): self._showDropDown(False) if isinstance(event, wx.FocusEvent): # KILL_FOCUS => that means the user is happy with the current value self._send_change_event() event.Skip() def SetChoices(self, choices): """ Sets the choices available in the popup wx.ListBox. The items will be sorted case insensitively. """ self._choices = choices flags = wx.LC_REPORT | wx.LC_SINGLE_SEL | \ wx.LC_SORT_ASCENDING | wx.LC_NO_HEADER self.dropdownlistbox.SetWindowStyleFlag(flags) if not isinstance(choices, list): self._choices = list(choices) self._choices.sort(cmp=locale.strcoll) self._updateDataList(self._choices) self.dropdownlistbox.InsertColumn(0, "") for num, colVal in enumerate(self._choices): index = self.dropdownlistbox.InsertImageStringItem(sys.maxint, colVal, -1) self.dropdownlistbox.SetStringItem(index, 0, colVal) self.dropdownlistbox.SetItemData(index, num) self._setListSize() # there is only one choice for both search and fetch if setting a # single column: self._col_search = 0 self._col_fetch = -1 def GetChoices(self): return self._choices def Setselect_callback(self, cb=None): self._select_callback = cb def Setentry_callback(self, cb=None): self._entry_callback = cb def Setmatch_function(self, mf=None): self._match_function = mf #-- Internal methods def _setValueFromSelected(self): """ Sets the wx.TextCtrl value from the selected wx.ListCtrl item. Will do nothing if no item is selected in the wx.ListCtrl. """ sel = self.dropdownlistbox.GetFirstSelected() if sel > -1: if self._col_fetch != -1: col = self._col_fetch else: col = self._col_search itemtext = self.dropdownlistbox.GetItem(sel, col).GetText() if self._select_callback: dd = self.dropdownlistbox values = [dd.GetItem(sel, x).GetText() for x in xrange(dd.GetColumnCount())] self._select_callback(values) self.SetValue(itemtext) self.SetToolTip(wx.ToolTip(itemtext)) self.SetInsertionPointEnd() self.SetSelection(-1, -1) self._showDropDown(False) self._send_change_event() def _showDropDown(self, show=True): """ Either display the drop down list (show = True) or hide it (show = False). """ if show: size = self.dropdown.GetSize() width, height = self . GetSizeTuple() x, y = self.ClientToScreenXY(0, height) if size.GetWidth() != width: size.SetWidth(width) self.dropdown.SetSize(size) self.dropdownlistbox.SetSize(self.dropdown.GetClientSize()) if y + size.GetHeight() < self._screenheight: self.dropdown.SetPosition(wx.Point(x, y)) else: self.dropdown.SetPosition( wx.Point(x, y - height - size.GetHeight())) self.dropdown.Show(show) def _listItemVisible(self): """ Moves the selected item to the top of the list ensuring it is always visible. """ toSel = self.dropdownlistbox.GetFirstSelected() if toSel == -1: return self.dropdownlistbox.EnsureVisible(toSel) def _updateDataList(self, choices): #delete, if need, all the previous data if self.dropdownlistbox.GetColumnCount() != 0: self.dropdownlistbox.DeleteAllColumns() self.dropdownlistbox.DeleteAllItems() #and update the dict if choices: for numVal, data in enumerate(choices): self.itemDataMap[numVal] = data else: numVal = 0 self.SetColumnCount(numVal) def _setListSize(self): choices = self._choices longest = 0 for choice in choices: longest = max(len(choice), longest) longest += 3 itemcount = min(len(choices), 7) + 2 charheight = self.dropdownlistbox.GetCharHeight() charwidth = self.dropdownlistbox.GetCharWidth() self.popupsize = wx.Size(charwidth * longest, charheight * itemcount) self.dropdownlistbox.SetSize(self.popupsize) self.dropdown.SetClientSize(self.popupsize) class _NumberValidator(wx.PyValidator): """ Base class used for number validation Note:: In wxPython 3.0 Phoenix, wx.PyValidator will be replaced with wx. Validator. When you try and replace wx.PyValidator with wx.Validator in wxPython 3.0 Classic, however, validators will not be assigned correctly to TextCtrls (most notably the one in the Slider class) No clear reason was found for this and no attempt to change the super class should be made as long as Odemis uses the Classic version of wxPython. """ def __init__(self, min_val=None, max_val=None, choices=None, unit=None): """ Constructor """ super(_NumberValidator, self).__init__() self.Bind(wx.EVT_CHAR, self.on_char) # this is a kludge because default value in XRC is 0: if min_val == 0 and max_val == 0: min_val = None max_val = None # Minimum and maximum allowed values self.min_val = min_val self.max_val = max_val self.choices = choices self.unit = unit if None not in (min_val, max_val) and min_val > max_val: raise ValueError("Min value is bigger than max value: %r > %r" % (min_val, max_val)) self._validate_choices() # Build a regular expression pattern against which we can match the data that is being # entered reg_data = { 'negative_sign': '', 'unit': u"[ ]*[GMkmµunp]?(%s)?" % unit if unit else '' } if ( (min_val is None or min_val < 0) or (max_val is not None and max_val < 0) or (choices and min(choices) < 0) ): reg_data['negative_sign'] = '-' # Update the regular expression with the variables we've discovered self.entry_regex = self.entry_regex.format(**reg_data) # Compile the regex pattern what will be used for validation self.entry_pattern = re.compile(self.entry_regex) def set_value_range(self, min_val, max_val): # TODO: check values and recompute .legal as in init self.min_val = min_val self.max_val = max_val def GetRange(self): return self.min_val, self.max_val def _validate_choices(self): """ Validate all the choice values, if choice values are defined """ if self.choices: if not all([self._is_valid_value(c) for c in self.choices]): raise ValueError("Illegal value (%s) found in choices" % c) def _is_valid_value(self, val): """ Validate the given value Args: val (str): Returns: (boolean): True if the given string is valid """ # Don't fail on empty string if val is False or val is None: return False try: num = self._cast(val) except ValueError: return False if self.choices and num not in self.choices: return False if self.min_val and num < self.min_val: return False if self.max_val and num > self.max_val: return False return True def _get_str_value(self): """ Return the string value of the wx.Window to which this validator belongs """ # Special trick in, the very likely, case we are validating a NumberTextCtrl, which has it's # default 'GetValue' method replaced with one that returns number instances fld = self.GetWindow() if hasattr(fld, "get_value_str"): val = fld.get_value_str() else: val = fld.GetValue() return val def Clone(self): raise NotImplementedError def on_char(self, event): """ This method prevents the entry of illegal characters """ key = event.GetKeyCode() # Allow control keys to propagate if key < wx.WXK_SPACE or key == wx.WXK_DELETE or key > 255: event.Skip() return field_val = unicode(self._get_str_value()) start, end = self.GetWindow().GetSelection() field_val = field_val[:start] + chr(key) + field_val[end:] if not field_val or self.entry_pattern.match(field_val): # logging.debug("Field value %s accepted using %s", "field_val", self.entry_regex) event.Skip() else: logging.debug("Field value %s NOT accepted using %s", field_val, self.entry_regex) def Validate(self, win=None): """ This method is called when the 'Validate()' method is called on the parent of the TextCtrl to which this validator belongs. It can also be called as a standalone validation method. returns (boolean) """ is_valid = self._is_valid_value(self._get_str_value()) # logging.debug("Value '%s' is %s valid", self._get_str_value(), "" if is_valid else "not") return is_valid def get_validated_number(self, str_val): """ Return a validated number represented by the string value provided If choices is set, it will pick the closest matching value available. If min_val or max_val are set, it will always return a value within bounds. Args: str_val (string): a string representing a number Returns: (None or number of the right type): the most meaningful value that would fit the validator for the given string or None if the string is empty. """ if not str_val: return None # Aggressively try to cast the string to a legal value by removing characters while len(str_val): try: num = self._cast(str_val) break except ValueError: pass str_val = str_val[:-1] if not str_val: return None # Find the closest value in choices if self.choices: num = min(self.choices, key=lambda x: abs(x - num)) # bound the value by min/max msg = "Truncating out of range [{}, {}] value {}" if self.min_val is not None and num < self.min_val: logging.debug(msg.format(self.min_val, self.max_val, num)) num = self.min_val if self.max_val is not None and num > self.max_val: logging.debug(msg.format(self.min_val, self.max_val, num)) num = self.max_val return num def _cast(self, str_val): """ Cast the value string to the desired type Args: str_val (str): Value to cast Returns: number: Scaled and correctly typed number value """ raise NotImplementedError def _step_from_range(min_val, max_val): """ Dynamically create step size based on range """ try: step = (max_val - min_val) / 255 # To keep the inc/dec values 'clean', set the step # value to the nearest power of 10 step = 10 ** round(math.log10(step)) return step except ValueError: msg = "Error calculating step size for range [%s..%s]" % (min_val, max_val) logging.exception(msg) class _NumberTextCtrl(wx.TextCtrl): """ A base text control specifically tailored to contain numerical data Use .GetValue() and .SetValue()/.ChangeValue() to get/set the raw value (number). SetValue and ChangeValue are identical but the first one generates an event as if the user had typed something in. To get the string that is displayed by the control, use .get_value_str() and .SetValueStr(). Generates a wxEVT_COMMAND_ENTER whenever a new number is set by the user. This happens typically when loosing the focus or when pressing the [Enter] key. """ def __init__(self, *args, **kwargs): """ Args: validator (Validator): Validator that checks the value entered by the user key_step (number or None): by how much the value should be changed on key up/down accuracy (None or int): how many significant digits to keep when cleanly displayed. If None, it is never truncated. """ # Make sure that a validator is provided if "validator" not in kwargs: raise ValueError("Validator required!") # The step size for when the up and down keys are pressed self.key_step = kwargs.pop('key_step', None) self.accuracy = kwargs.pop('accuracy', None) # For the wx.EVT_TEXT_ENTER event to work, the TE_PROCESS_ENTER style needs to be set, but # setting it in XRC throws an error. A possible workaround is to include the style by hand kwargs['style'] = kwargs.get('style', 0) | wx.TE_PROCESS_ENTER | wx.BORDER_NONE if len(args) > 2: val = args[2] args = args[:2] else: val = kwargs.pop('value', None) # The self._number_value = val wx.TextCtrl.__init__(self, *args, **kwargs) self.SetBackgroundColour(self.Parent.BackgroundColour) self.SetForegroundColour(FG_COLOUR_EDIT) # Set the value so it will be validated to be a valid number if val is not None: self.SetValue(self._number_value) if self.key_step is not None: self.Bind(wx.EVT_CHAR, self.on_char) self.Bind(wx.EVT_KILL_FOCUS, self.on_kill_focus) self.Bind(wx.EVT_SET_FOCUS, self.on_focus) self.Bind(wx.EVT_TEXT_ENTER, self.on_text_enter) def _display_pretty(self): if self._number_value is None: str_val = u"" else: str_val = units.readable_str(self._number_value, sig=self.accuracy) wx.TextCtrl.ChangeValue(self, str_val) def Disable(self): self.Enable(False) def Enable(self, enable=True): # TODO: Find a better way to deal with this hack that was put in place because under # MS Windows the background colour cannot (at all?) be set when a control is disabled if os.name == 'nt': self.SetEditable(enable) if enable: self.SetForegroundColour(FG_COLOUR_EDIT) else: self.SetForegroundColour(FG_COLOUR_DIS) else: super(_NumberTextCtrl, self).Enable(enable) def SetValue(self, val): """ Set the numerical value of the text field Args: val (numerical type): The value to set the field to """ self.ChangeValue(val) def GetValue(self): """ Return the numerical value of the text field or None if no (valid) value is present Warning: we return the last validated value, not the current value in the text field """ return self._number_value def ChangeValue(self, val): """ Set the value of the text field No checks are done on the value to be correct. If this is needed, use the validator. Args: val (numerical type): The value to set the field to """ self._number_value = val # logging.debug("Setting value to '%s' for %s", val, self.__class__.__name__) self._display_pretty() def get_value_str(self): """ Return the value of the control as a string """ return wx.TextCtrl.GetValue(self) def set_value_str(self, val): wx.TextCtrl.SetValue(self, val) def change_value_str(self, val): """ Set the value of the field, without generating a change event """ wx.TextCtrl.ChangeValue(self, val) def SetValueRange(self, minv, maxv): """ Same as SetRange of a slider """ self.Validator.set_value_range(minv, maxv) def GetValueRange(self): return self.GetValidator().GetRange() def _set_number_value(self, str_number): """ Parse the given number string and set the internal number value This method is used when the enter key is pressed, or when the text field loses focus, i.e. situations where we always need to leave a valid and well formatted value. """ prev_num = self._number_value if str_number is None or str_number == "": self._number_value = None else: # set new value even if not validated, so that we reach the boundaries self._number_value = self.GetValidator().get_validated_number(str_number) # TODO: turn the text red temporarily if not valid? # if not validated: # logging.debug("Converted '%s' into '%s'", str_number, self._number_value) if prev_num != self._number_value: self._send_change_event() # Event handlers def _send_change_event(self): """ Create and send a change event (wxEVT_COMMAND_ENTER) """ changeEvent = wx.CommandEvent(wx.wxEVT_COMMAND_ENTER, self.Id) wx.PostEvent(self, changeEvent) def on_char(self, evt): """ This event handler increases or decreases the integer value when the up/down cursor keys are pressed. The event is ignored otherwise. """ key = evt.GetKeyCode() prev_num = self._number_value num = self._number_value if key == wx.WXK_UP and self.key_step and self.IsEditable(): num = (num or 0) + self.key_step elif key == wx.WXK_DOWN and self.key_step and self.IsEditable(): num = (num or 0) - self.key_step else: # Skip the event, so it can be processed in the regular way # (As in validate typed numbers etc.) evt.Skip() return val = u"%r" % num # GetNumber needs a string self._number_value = self.GetValidator().get_validated_number(val) if prev_num != self._number_value: self._display_pretty() # Update the GUI immediately self._send_change_event() def on_focus(self, evt): """ Select the number part (minus any unit indication) of the data in the text field """ number_length = len(self.get_value_str().rstrip(string.ascii_letters + u" µ")) wx.CallAfter(self.SetSelection, 0, number_length) evt.Skip() def on_kill_focus(self, evt): """ Display the current number value as a formatted string when the focus is lost """ wx.CallAfter(self.SetSelection, 0, 0) str_val = wx.TextCtrl.GetValue(self) self._set_number_value(str_val) self._display_pretty() evt.Skip() def on_text_enter(self, evt): """ Process [enter] key presses """ logging.debug("New text entered in %s", self.__class__.__name__) # almost the same as on_kill_focus, but still display raw wx.CallAfter(self.SetSelection, 0, 0) str_val = wx.TextCtrl.GetValue(self) self._set_number_value(str_val) self._display_pretty() # END Event handlers class UnitNumberCtrl(_NumberTextCtrl): def __init__(self, *args, **kwargs): """ unit (None or string): if None then behave like NumberTextCtrl """ self.unit = kwargs.pop('unit', None) _NumberTextCtrl.__init__(self, *args, **kwargs) def _display_pretty(self): if self._number_value is None: str_val = u"" elif self._number_value == 0 and self.key_step and self.unit not in units.IGNORE_UNITS: # Special case with 0: readable_str return just "0 unit", without # prefix. This is technically correct, but quite inconvenient and # a little strange when the typical value has a prefix (eg, nm, kV). # => use prefix of key_step (as it's a "small value") _, prefix = units.get_si_scale(self.key_step) str_val = "0 %s%s" % (prefix, self.unit) else: str_val = units.readable_str(self._number_value, self.unit, self.accuracy) # Get the length of the number (string length, minus the unit length) number_length = len(str_val.rstrip(string.ascii_letters + u" µ")) wx.TextCtrl.ChangeValue(self, str_val) # Select the number value wx.CallAfter(self.SetSelection, number_length, number_length) ######################################### # Integer controls ######################################### class IntegerValidator(_NumberValidator): """ This validator can be used to make sure only valid characters are entered into a control (digits and a minus symbol). It can also validate if the value that is present is a valid integer. """ def __init__(self, min_val=None, max_val=None, choices=None, unit=None): # The regular expression to check the validity of what is being typed, is a bit different # from a regular expression that would validate an entire string, because we need to check # validity as the user types self.entry_regex = u"[+{negative_sign}]?[\d]*{unit}$" _NumberValidator.__init__(self, min_val, max_val, choices, unit) def Clone(self): """ Required method """ return IntegerValidator(self.min_val, self.max_val, self.choices, self.unit) def _cast(self, str_val): """ Cast the string value to an integer and return it Args: str_val (str): A string representing a number value Returns: (int) Raises: ValueError: When the string cannot be parsed correctly """ if self.unit and str_val.endswith(self.unit): # Help it to find the right unit (important for complicated ones like 'px') str_val, si_prefix, unit = decompose_si_prefix(str_val, self.unit) else: str_val, si_prefix, unit = decompose_si_prefix(str_val) return int(si_scale_val(float(str_val), si_prefix)) class IntegerTextCtrl(_NumberTextCtrl): """ This class describes a text field that may only hold integer data. The 'min_val' and 'max_val' keyword arguments may be used to set limits on the value contained within the control. When the 'key_inc' argument is set, the value can be altered by the up and down cursor keys. The 'choices' keyword argument can be used to pass an iterable containing valid values If the object is created with an invalid integer value a ValueError exception will be raised. """ # TODO: should use the same parameter as NumberSlider: val_range instead # of min_val/max_val # TODO: refactor to have IntegerTextCtrl a UnitIntegerCtrl with unit=None? def __init__(self, *args, **kwargs): min_val = kwargs.pop('min_val', None) max_val = kwargs.pop('max_val', None) choices = kwargs.pop('choices', None) kwargs['validator'] = IntegerValidator(min_val, max_val, choices) kwargs['key_step'] = kwargs.get('key_step', 1) _NumberTextCtrl.__init__(self, *args, **kwargs) def SetValue(self, val): _NumberTextCtrl.SetValue(self, int(val)) class UnitIntegerCtrl(UnitNumberCtrl): """ This class represents a text control which is capable of formatting it's content according to the unit it set to: '<int value> <unit str>' The value defaults to 0 if none is provided. The 'unit' argument is mandatory. When the value is set through the API, the units are shown. When the control gets the focus, the value is shown without the units When focus is lost, the units will be shown again. """ def __init__(self, *args, **kwargs): min_val = kwargs.pop('min_val', None) max_val = kwargs.pop('max_val', None) choices = kwargs.pop('choices', None) unit = kwargs.get('unit', None) kwargs['validator'] = IntegerValidator(min_val, max_val, choices, unit) if 'key_step' not in kwargs and (min_val != max_val): kwargs['key_step'] = max(int(round(_step_from_range(min_val, max_val))), 1) UnitNumberCtrl.__init__(self, *args, **kwargs) def SetValue(self, val): UnitNumberCtrl.SetValue(self, int(val)) ######################################### # Float controls ######################################### class FloatValidator(_NumberValidator): def __init__(self, min_val=None, max_val=None, choices=None, unit=None): # The regular expression to check the validity of what is being typed, is a bit different # from a regular expression that would validate an entire string, because we need to check # validity as the user types self.entry_regex = u"[+{negative_sign}]?[\d]*[.]?[\d]*[eE]?[+-]?[\d]*{unit}$" _NumberValidator.__init__(self, min_val, max_val, choices, unit) def Clone(self): """ Required method """ return FloatValidator(self.min_val, self.max_val, self.choices, self.unit) def _cast(self, str_val): """ Cast the string value to a float and return it Args: str_val (str): A string representing a number value Returns: (float) Raises: ValueError: When the string cannot be parsed correctly """ if self.unit and str_val.endswith(self.unit): # Help it to find the right unit (important for complicated ones like 'px') str_val, si_prefix, unit = decompose_si_prefix(str_val, self.unit) else: str_val, si_prefix, unit = decompose_si_prefix(str_val) return si_scale_val(float(str_val), si_prefix) class FloatTextCtrl(_NumberTextCtrl): def __init__(self, *args, **kwargs): min_val = kwargs.pop('min_val', None) max_val = kwargs.pop('max_val', None) choices = kwargs.pop('choices', None) kwargs['validator'] = FloatValidator(min_val, max_val, choices) kwargs['key_step'] = kwargs.get('key_step', 0.1) _NumberTextCtrl.__init__(self, *args, **kwargs) class UnitFloatCtrl(UnitNumberCtrl): def __init__(self, *args, **kwargs): min_val = kwargs.pop('min_val', None) max_val = kwargs.pop('max_val', None) choices = kwargs.pop('choices', None) unit = kwargs.get('unit', None) kwargs['validator'] = FloatValidator(min_val, max_val, choices, unit) if 'key_step' not in kwargs and (min_val != max_val): kwargs['key_step'] = _step_from_range(min_val, max_val) kwargs['accuracy'] = kwargs.get('accuracy', None) UnitNumberCtrl.__init__(self, *args, **kwargs)
The Bionic arm, making bowls easy with left and right hand and 3 different sizes. Being spring loaded you don’t need to hold the bowl in place and for delivery its as simple as a push of a button. Extremely lightweight at only 620 grams! Australian Made and fully approved by Bowls Australia.
from __future__ import unicode_literals import hashlib import hmac import logging from django.contrib.sites.models import Site from django.http.request import HttpRequest from django.utils.six.moves.urllib.parse import urlencode from django.utils.six.moves.urllib.request import Request, urlopen from django.template import Context, Template from django.template.base import Lexer, Parser from djblets.siteconfig.models import SiteConfiguration from djblets.webapi.encoders import (JSONEncoderAdapter, ResourceAPIEncoder, XMLEncoderAdapter) from reviewboard import get_package_version from reviewboard.notifications.models import WebHookTarget from reviewboard.reviews.models import Review, ReviewRequest from reviewboard.reviews.signals import (review_request_closed, review_request_published, review_request_reopened, review_published, reply_published) from reviewboard.webapi.resources import resources class FakeHTTPRequest(HttpRequest): """A fake HttpRequest implementation. The WebAPI serialization methods use HttpRequest.build_absolute_uri to generate all the links, but none of the various signals that generate webhook events have the request plumbed through. Since we don't actually need a valid request, this impersonates it enough to get valid results from build_absolute_uri. """ _is_secure = None _host = None def __init__(self, user, local_site_name=None): """Initialize a FakeHTTPRequest. Args: user (django.contrib.auth.models.User): The user who initiated the request. local_site_name (unicode, optional): The local site name (if the request was carried out against a local site). """ super(FakeHTTPRequest, self).__init__() self.user = user self._local_site_name = local_site_name if self._is_secure is None: siteconfig = SiteConfiguration.objects.get_current() self._is_secure = siteconfig.get('site_domain_method') == 'https' self._host = Site.objects.get_current().domain def is_secure(self): return self._is_secure def get_host(self): return self._host class CustomPayloadParser(Parser): """A custom template parser that blocks certain tags. This extends Django's Parser class for template parsing, and removes some built-in tags, in order to prevent mailicious use. """ BLACKLISTED_TAGS = ('block', 'debug', 'extends', 'include', 'load', 'ssi') def __init__(self, *args, **kwargs): super(CustomPayloadParser, self).__init__(*args, **kwargs) # Remove some built-in tags that we don't want to expose. # There are no built-in filters we have to worry about. for tag_name in self.BLACKLISTED_TAGS: try: del self.tags[tag_name] except KeyError: pass def render_custom_content(body, context_data={}): """Renders custom content for the payload using Django templating. This will take the custom payload content template provided by the user and render it using a stripped down version of Django's templating system. In order to keep the payload safe, we use a limited Context along with a custom Parser that blocks certain template tags. This gives us tags like {% for %} and {% if %}, but blacklists tags like {% load %} and {% include %}. """ lexer = Lexer(body, origin=None) parser = CustomPayloadParser(lexer.tokenize()) template = Template('') template.nodelist = parser.parse() return template.render(Context(context_data)) def dispatch_webhook_event(request, webhook_targets, event, payload): """Dispatch the given event and payload to the given WebHook targets.""" encoder = ResourceAPIEncoder() bodies = {} for webhook_target in webhook_targets: if webhook_target.use_custom_content: try: body = render_custom_content(webhook_target.custom_content, payload) body = body.encode('utf-8') except Exception as e: logging.exception('Could not render WebHook payload: %s', e) continue else: encoding = webhook_target.encoding if encoding not in bodies: try: if encoding == webhook_target.ENCODING_JSON: adapter = JSONEncoderAdapter(encoder) body = adapter.encode(payload, request=request) body = body.encode('utf-8') elif encoding == webhook_target.ENCODING_XML: adapter = XMLEncoderAdapter(encoder) body = adapter.encode(payload, request=request) elif encoding == webhook_target.ENCODING_FORM_DATA: adapter = JSONEncoderAdapter(encoder) body = urlencode({ 'payload': adapter.encode(payload, request=request), }) body = body.encode('utf-8') else: logging.error('Unexpected WebHookTarget encoding "%s" ' 'for ID %s', encoding, webhook_target.pk) continue except Exception as e: logging.exception('Could not encode WebHook payload: %s', e) continue bodies[encoding] = body else: body = bodies[encoding] headers = { b'X-ReviewBoard-Event': event.encode('utf-8'), b'Content-Type': webhook_target.encoding.encode('utf-8'), b'Content-Length': len(body), b'User-Agent': ('ReviewBoard-WebHook/%s' % get_package_version()) .encode('utf-8'), } if webhook_target.secret: signer = hmac.new(webhook_target.secret.encode('utf-8'), body, hashlib.sha1) headers[b'X-Hub-Signature'] = \ ('sha1=%s' % signer.hexdigest()).encode('utf-8') logging.info('Dispatching webhook for event %s to %s', event, webhook_target.url) try: url = webhook_target.url.encode('utf-8') urlopen(Request(url, body, headers)) except Exception as e: logging.exception('Could not dispatch WebHook to %s: %s', webhook_target.url, e) def _serialize_review(review, request): return { 'review_request': resources.review_request.serialize_object( review.review_request, request=request), 'review': resources.review.serialize_object( review, request=request), 'diff_comments': [ resources.filediff_comment.serialize_object( comment, request=request) for comment in review.comments.all() ], 'file_attachment_comments': [ resources.file_attachment_comment.serialize_object( comment, request=request) for comment in review.file_attachment_comments.all() ], 'screenshot_comments': [ resources.screenshot_comment.serialize_object( comment, request=request) for comment in review.screenshot_comments.all() ], 'general_comments': [ resources.review_general_comment.serialize_object( comment, request=request) for comment in review.general_comments.all() ], } def _serialize_reply(reply, request): return { 'review_request': resources.review_request.serialize_object( reply.review_request, request=request), 'reply': resources.review_reply.serialize_object( reply, request=request), 'diff_comments': [ resources.review_reply_diff_comment.serialize_object( comment, request=request) for comment in reply.comments.all() ], 'file_attachment_comments': [ resources.review_reply_file_attachment_comment.serialize_object( comment, request=request) for comment in reply.file_attachment_comments.all() ], 'screenshot_comments': [ resources.review_reply_screenshot_comment.serialize_object( comment, request=request) for comment in reply.screenshot_comments.all() ], 'general_comments': [ resources.review_reply_general_comment.serialize_object( comment, request=request) for comment in reply.general_comments.all() ], } def review_request_closed_cb(user, review_request, type, **kwargs): event = 'review_request_closed' webhook_targets = WebHookTarget.objects.for_event( event, review_request.local_site_id, review_request.repository_id) if review_request.local_site_id: local_site_name = review_request.local_site.name else: local_site_name = None if webhook_targets: if type == review_request.SUBMITTED: close_type = 'submitted' elif type == review_request.DISCARDED: close_type = 'discarded' else: logging.error('Unexpected close type %s for review request %s ' 'when dispatching webhook.', type, review_request.pk) return if not user: user = review_request.submitter request = FakeHTTPRequest(user, local_site_name=local_site_name) payload = { 'event': event, 'closed_by': resources.user.serialize_object( user, request=request), 'close_type': close_type, 'review_request': resources.review_request.serialize_object( review_request, request=request), } dispatch_webhook_event(request, webhook_targets, event, payload) def review_request_published_cb(user, review_request, changedesc, **kwargs): event = 'review_request_published' webhook_targets = WebHookTarget.objects.for_event( event, review_request.local_site_id, review_request.repository_id) if review_request.local_site_id: local_site_name = review_request.local_site.name else: local_site_name = None if webhook_targets: request = FakeHTTPRequest(user, local_site_name=local_site_name) payload = { 'event': event, 'is_new': changedesc is None, 'review_request': resources.review_request.serialize_object( review_request, request=request), } if changedesc: payload['change'] = resources.change.serialize_object( changedesc, request=request), dispatch_webhook_event(request, webhook_targets, event, payload) def review_request_reopened_cb(user, review_request, **kwargs): event = 'review_request_reopened' webhook_targets = WebHookTarget.objects.for_event( event, review_request.local_site_id, review_request.repository_id) if review_request.local_site_id: local_site_name = review_request.local_site.name else: local_site_name = None if webhook_targets: if not user: user = review_request.submitter request = FakeHTTPRequest(user, local_site_name=local_site_name) payload = { 'event': event, 'reopened_by': resources.user.serialize_object( user, request=request), 'review_request': resources.review_request.serialize_object( review_request, request=request), } dispatch_webhook_event(request, webhook_targets, event, payload) def review_published_cb(user, review, **kwargs): event = 'review_published' review_request = review.review_request webhook_targets = WebHookTarget.objects.for_event( event, review_request.local_site_id, review_request.repository_id) if review_request.local_site_id: local_site_name = review_request.local_site.name else: local_site_name = None if webhook_targets: request = FakeHTTPRequest(user, local_site_name=local_site_name) payload = _serialize_review(review, request) payload['event'] = event dispatch_webhook_event(request, webhook_targets, event, payload) def reply_published_cb(user, reply, **kwargs): event = 'reply_published' review_request = reply.review_request webhook_targets = WebHookTarget.objects.for_event( event, review_request.local_site_id, review_request.repository_id) if review_request.local_site_id: local_site_name = review_request.local_site.name else: local_site_name = None if webhook_targets: request = FakeHTTPRequest(user, local_site_name=local_site_name) payload = _serialize_reply(reply, request) payload['event'] = event dispatch_webhook_event(request, webhook_targets, event, payload) def connect_signals(): review_request_closed.connect(review_request_closed_cb, sender=ReviewRequest) review_request_published.connect(review_request_published_cb, sender=ReviewRequest) review_request_reopened.connect(review_request_reopened_cb, sender=ReviewRequest) review_published.connect(review_published_cb, sender=Review) reply_published.connect(reply_published_cb, sender=Review)
To help people and webmaster who want to open their own faucet, we have made available for download our latest version of MasterFaucet with all the new Multicurrency VirtualCoin API Integration. 3. Upload all the scripts files to the server. – Multi currency! now you can start your own LTC faucet! – Multi captcha provider! you can choose solvemedia or google!
import os import shutil import sys import time import pytest import ray from ray.test_utils import check_call_ray def unix_socket_create_path(name): unix = sys.platform != "win32" return os.path.join(ray.utils.get_user_temp_dir(), name) if unix else None def unix_socket_verify(unix_socket): if sys.platform != "win32": assert os.path.exists(unix_socket), "Socket not found: " + unix_socket def unix_socket_delete(unix_socket): unix = sys.platform != "win32" return os.remove(unix_socket) if unix else None def test_tempdir(shutdown_only): shutil.rmtree(ray.utils.get_ray_temp_dir(), ignore_errors=True) ray.init( _temp_dir=os.path.join(ray.utils.get_user_temp_dir(), "i_am_a_temp_dir")) assert os.path.exists( os.path.join(ray.utils.get_user_temp_dir(), "i_am_a_temp_dir")), "Specified temp dir not found." assert not os.path.exists( ray.utils.get_ray_temp_dir()), ("Default temp dir should not exist.") shutil.rmtree( os.path.join(ray.utils.get_user_temp_dir(), "i_am_a_temp_dir"), ignore_errors=True) def test_tempdir_commandline(): shutil.rmtree(ray.utils.get_ray_temp_dir(), ignore_errors=True) check_call_ray([ "start", "--head", "--temp-dir=" + os.path.join( ray.utils.get_user_temp_dir(), "i_am_a_temp_dir2") ]) assert os.path.exists( os.path.join(ray.utils.get_user_temp_dir(), "i_am_a_temp_dir2")), "Specified temp dir not found." assert not os.path.exists( ray.utils.get_ray_temp_dir()), "Default temp dir should not exist." check_call_ray(["stop"]) shutil.rmtree( os.path.join(ray.utils.get_user_temp_dir(), "i_am_a_temp_dir2"), ignore_errors=True) def test_tempdir_long_path(): if sys.platform != "win32": # Test AF_UNIX limits for sockaddr_un->sun_path on POSIX OSes maxlen = 104 if sys.platform.startswith("darwin") else 108 temp_dir = os.path.join(ray.utils.get_user_temp_dir(), "z" * maxlen) with pytest.raises(OSError): ray.init(_temp_dir=temp_dir) # path should be too long def test_raylet_tempfiles(shutdown_only): expected_socket_files = ({"plasma_store", "raylet"} if sys.platform != "win32" else set()) ray.init(num_cpus=0) node = ray.worker._global_node top_levels = set(os.listdir(node.get_session_dir_path())) assert top_levels.issuperset({"sockets", "logs"}) log_files = set(os.listdir(node.get_logs_dir_path())) log_files_expected = { "log_monitor.out", "log_monitor.err", "plasma_store.out", "plasma_store.err", "monitor.out", "monitor.err", "redis-shard_0.out", "redis-shard_0.err", "redis.out", "redis.err", "raylet.out", "raylet.err", "gcs_server.out", "gcs_server.err" } for expected in log_files_expected: assert expected in log_files assert log_files_expected.issubset(log_files) assert log_files.issuperset(log_files_expected) socket_files = set(os.listdir(node.get_sockets_dir_path())) assert socket_files == expected_socket_files ray.shutdown() ray.init(num_cpus=2) node = ray.worker._global_node top_levels = set(os.listdir(node.get_session_dir_path())) assert top_levels.issuperset({"sockets", "logs"}) time.sleep(3) # wait workers to start log_files = set(os.listdir(node.get_logs_dir_path())) assert log_files.issuperset(log_files_expected) # Check numbers of worker log file. assert sum( 1 for filename in log_files if filename.startswith("worker")) == 4 socket_files = set(os.listdir(node.get_sockets_dir_path())) assert socket_files == expected_socket_files def test_tempdir_privilege(shutdown_only): os.chmod(ray.utils.get_ray_temp_dir(), 0o000) ray.init(num_cpus=1) session_dir = ray.worker._global_node.get_session_dir_path() assert os.path.exists(session_dir), "Specified socket path not found." def test_session_dir_uniqueness(): session_dirs = set() for i in range(2): ray.init(num_cpus=1) session_dirs.add(ray.worker._global_node.get_session_dir_path) ray.shutdown() assert len(session_dirs) == 2 if __name__ == "__main__": # Make subprocess happy in bazel. os.environ["LC_ALL"] = "en_US.UTF-8" os.environ["LANG"] = "en_US.UTF-8" sys.exit(pytest.main(["-v", __file__]))
SOCSD Districtwide Celebrations! Please join us on Thursday, June 7, 2018 at 6:30 PM in TZHS Cafeteria as we celebrate the tenures and retirements of SOCSD Staff, as well as PTA Founders Day and Volunteers of the Year recipients, and 2018-2019 PTA Officer Installations. All PTA Volunteers are welcome and encouraged to attend! SOMS School Store will hold its Father’s Day Sale on Friday, June 8th during all lunch periods. SOMS and DUTCHMEN Spirit Wear 20% Off SALE!
# # Copyright (c) 2015 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. # ScanCode is a trademark of nexB Inc. # # You may not use this software except in compliance with the License. # You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. # # When you publish or redistribute any data created with ScanCode or any ScanCode # derivative work, you must accompany this data with the following acknowledgment: # # Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, either express or implied. No content created from # ScanCode should be considered or used as legal advice. Consult an Attorney # for any legal advice. # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. from __future__ import absolute_import, print_function from commoncode.testcase import FileBasedTesting from textcode import pdf import os class TestPdf(FileBasedTesting): test_data_dir = os.path.join(os.path.dirname(__file__), 'data') def test_get_text_lines(self): test_file = self.get_test_loc('pdf/pdf.pdf') result = pdf.get_text_lines(test_file) expected = u'''pdf """ Extracts text from a pdf file. """ import contextlib from StringIO import StringIO from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter from pdfminer.pdfpage import PDFPage from pdfminer.converter import TextConverter def get_text(location): rs_mgr = PDFResourceManager() extracted_text = StringIO() with contextlib.closing(TextConverter(rs_mgr, extracted_text)) as extractor: with open(location, \'rb\') as pdf_file: interpreter = PDFPageInterpreter(rs_mgr, extractor) pages = PDFPage.get_pages(pdf_file, check_extractable=True) for page in pages: interpreter.process_page(page) return extracted_text Page 1 \x0c'''.splitlines(True) assert expected == result def test_pdfminer_cant_parse_faulty_broadcom_doc(self): # test for https://github.com/euske/pdfminer/issues/118 test_file = self.get_test_loc('pdf/pdfminer_bug_118/faulty.pdf') from pdfminer.pdfparser import PDFParser from pdfminer.pdfdocument import PDFDocument from pdfminer.pdfdocument import PDFEncryptionError with open(test_file, 'rb') as inputfile: parser = PDFParser(inputfile) try: PDFDocument(parser) except PDFEncryptionError: # this should not fail of course, and will when upstream is fixed pass def test_get_text_lines_skip_parse_faulty_broadcom_doc(self): test_file = self.get_test_loc('pdf/pdfminer_bug_118/faulty.pdf') try: pdf.get_text_lines(test_file) self.fail('Exception should be thrown on faulty PDF') except: pass
July 28 – August 3rd is Hepatitis Awareness Week. Hepatitis is an infection of the liver. The liver is a vital organ that turns nutrients into energy, filters the blood, and fights illness. When the liver is inflamed or damaged, how it works can be affected in a negative way. Hepatitis can result in both short-term and long-term liver disease and cancer. Hepatitis A can occur when an unvaccinated person eats contaminated food or drinks contaminated water. Hepatitis B, C, and D can spread by contact with blood and body fluids of an infected person. Hepatitis E is spread through contaminated water or undercooked pork, deer, or shellfish. Hepatitis A, B and C are the most common types in the United States. · Individuals who have used injectable drugs even once in their lifetime. · during certain food based outbreaks of hepatitis.
from pyglet.window import mouse import event def DragHandler(rule, buttons=mouse.LEFT): class _DragHandler(object): original_position = None mouse_buttons = buttons @event.select(rule) def on_drag(self, widget, x, y, dx, dy, buttons, modifiers): if not buttons & self.mouse_buttons: return event.EVENT_UNHANDLED if self.original_position is None: self.original_position = (widget.x, widget.y, widget.z) widget.z += 1 widget.x += dx; widget.y += dy return event.EVENT_HANDLED @event.select(rule) def on_drag_complete(self, widget, x, y, buttons, modifiers, ok): if ok: widget.z = self.original_position[2] self.original_position = None else: if self.original_position is None: return widget.x, widget.y, widget.z = self.original_position self.original_position = None return event.EVENT_HANDLED return _DragHandler()
Tablet has just been updated to version 1.17.08.17. NEW: Updated the included Java runtime to 1.8.0_144. CHG: Renamed Show Cigar-I to Show Cigar in the overlays band of the ribbon. CHG: Tablet now doesn’t attempt to render Cigar overlays when the width of a base is less than 1. BUG: Cigar feature creation code can now handle reads where the read sequence is set to “*”.
#-*-coding:utf-8-*- from django.db import models from django.contrib.auth.models import User from filebrowser.fields import FileBrowseField import datetime from posts.models import Post TYPE_OF_PROJECT = ( ('Cases', 'Case'), ('Projects', 'Project'), ('Workshops', 'Workshops'), ('Installation', 'Installation'), ) PROCESS = ( ('Initiated', 'Initiated'), ('Announced', 'Announced'), ('Program', 'Program'), ('Completed', 'Completed'), ('Documented', 'Documented'), ('Reported', 'Reported'), ('Post_process', 'Post_process'), ) class Project(models.Model): title = models.CharField(max_length=255, help_text="Title if the project. Can be anything up to 255 characters.") slug = models.SlugField() announce = models.CharField(max_length=255, help_text="Can be anything up to 255 characters.") summery = models.TextField() image = FileBrowseField("Image", max_length=200, directory="images/", extensions=[".jpg"], blank=True, null=True) author = models.ForeignKey(User, help_text="who is posting.") datetime_created = models.DateTimeField(auto_now_add=True) datetime_modified = models.DateTimeField(auto_now=True) publish_at = models.DateTimeField(default=datetime.datetime.now(), help_text="Date and time post should become visible") realtime_started = models.DateTimeField(blank=True, null=True) realtime_ended = models.DateTimeField(blank=True, null=True) active = models.BooleanField(default=False, help_text="Controls whether or not this item is visable on the site.") posts = models.ManyToManyField(Post, help_text="What blog does the post belong to?", related_name="posts") type = models.CharField(max_length=40, choices=TYPE_OF_PROJECT) process = models.CharField(max_length=12, choices=PROCESS) credits = models.TextField(blank=True, null=True) class Meta: ordering = ['-publish_at',] verbose_name_plural = 'projects' def __unicode__(self): return u'%s' %self.title def process_procent(self): if (self.process == 'Initiated'): procent = 10 elif (self.process == 'Announced'): procent = 20 elif (self.process == 'Program'): procent = 40 elif (self.process == 'Completed'): procent = 50 elif (self.process == 'Documented'): procent = 70 elif (self.process == 'Reported'): procent = 95 elif (self.process == 'Postprocess'): procent = 100 else: procent = 0 return procent
Former world champion Tom Daley secured a confidence boosting personal best as he claimed silver behind China's Qiu Bo at the diving World Series in Mexico. Olympic bronze medallist Daley had looked solid throughout qualifying in Monterrey and scored 577.20 points to finish just behind the three-time world champion and just ahead of Russia's Victor Minibaev. Minibaev's third place was enough to win him the overall World Series title ahead of Bo, who missed the rounds in London and Moscow, with Daley in third place. At the British Championships in Sheffield, Matthew Dixon, 14, and 13-year old Victoria Vincent won the 10m platform titles while James Denny and Alicia Blagg claimed wins in the 3m springboard. Rebecca Gallantree and Hannah Starling won the gold medal in the women's 3m synchro, while Freddie Woodward and Nick Robinson-Baker won the equivalent men's event.
# vim: fileencoding=utf-8 et ts=4 sts=4 sw=4 tw=0 """ Base RPC client class Authors: * Brian Granger * Alexander Glyzov * Axel Voitier """ #----------------------------------------------------------------------------- # Copyright (C) 2012-2014. Brian Granger, Min Ragan-Kelley, Alexander Glyzov, # Axel Voitier # # Distributed under the terms of the BSD License. The full license is in # the file LICENSE distributed as part of this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from sys import exc_info from random import randint from logging import getLogger import zmq from zmq.utils import jsonapi from .base import RPCBase from .errors import RemoteRPCError, RPCError from .utils import RemoteMethod #----------------------------------------------------------------------------- # RPC Client base #----------------------------------------------------------------------------- class RPCClientBase(RPCBase): """An RPC Client (base class)""" logger = getLogger('netcall.client') def _create_socket(self): super(RPCClientBase, self)._create_socket() self.socket = self.context.socket(zmq.DEALER) self.socket.setsockopt(zmq.IDENTITY, self.identity) def _build_request(self, method, args, kwargs, ignore=False, req_id=None): req_id = req_id or b'%x' % randint(0, 0xFFFFFFFF) method = bytes(method) msg_list = [b'|', req_id, method] data_list = self._serializer.serialize_args_kwargs(args, kwargs) msg_list.extend(data_list) msg_list.append(bytes(int(ignore))) return req_id, msg_list def _send_request(self, request): self.logger.debug('sending %r', request) self.socket.send_multipart(request) def _parse_reply(self, msg_list): """ Parse a reply from service (should not raise an exception) The reply is received as a multipart message: [b'|', req_id, type, payload ...] Returns either None or a dict { 'type' : <message_type:bytes> # ACK | OK | YIELD | FAIL 'req_id' : <id:bytes>, # unique message id 'srv_id' : <service_id:bytes> | None # only for ACK messages 'result' : <object> } """ logger = self.logger if len(msg_list) < 4 or msg_list[0] != b'|': logger.error('bad reply %r', msg_list) return None msg_type = msg_list[2] data = msg_list[3:] result = None srv_id = None if msg_type == b'ACK': srv_id = data[0] elif msg_type in (b'OK', b'YIELD'): try: result = self._serializer.deserialize_result(data) except Exception, e: msg_type = b'FAIL' result = e elif msg_type == b'FAIL': try: error = jsonapi.loads(msg_list[3]) if error['ename'] == 'StopIteration': result = StopIteration() elif error['ename'] == 'GeneratorExit': result = GeneratorExit() else: result = RemoteRPCError(error['ename'], error['evalue'], error['traceback']) except Exception, e: logger.error('unexpected error while decoding FAIL', exc_info=True) result = RPCError('unexpected error while decoding FAIL: %s' % e) else: result = RPCError('bad message type: %r' % msg_type) return dict( type = msg_type, req_id = msg_list[1], srv_id = srv_id, result = result, ) def _generator(self, req_id, get_val_exc): """ Mirrors a service generator on a client side """ #logger = self.logger def _send_cmd(cmd, args): _, msg_list = self._build_request( cmd, args, None, ignore=False, req_id=req_id ) self._send_request(msg_list) _send_cmd('_SEND', None) while True: val, exc = get_val_exc() if exc is not None: raise exc try: res = yield val except GeneratorExit: _send_cmd('_CLOSE', None) except: etype, evalue, _ = exc_info() _send_cmd('_THROW', [etype.__name__, evalue]) else: _send_cmd('_SEND', res) def __getattr__(self, name): return RemoteMethod(self, name) def call(self, proc_name, args=[], kwargs={}, result='sync', timeout=None): """ Call the remote method with *args and **kwargs (may raise an exception) Parameters ---------- proc_name : <bytes> name of the remote procedure to call args : <tuple> positional arguments of the remote procedure kwargs : <dict> keyword arguments of the remote procedure result : 'sync' | 'async' | 'ignore' timeout : <float> | None Number of seconds to wait for a reply. RPCTimeoutError is raised in case of timeout. Set to None, 0 or a negative number to disable. Returns ------- <result:object> if result is 'sync' <Future> if result is 'async' None if result is 'ignore' If remote call fails: - raises <RemoteRPCError> if result is 'sync' - sets <RemoteRPCError> into the <Future> if result is 'async' """ assert result in ('sync', 'async', 'ignore'), \ 'expected any of "sync", "async", "ignore" -- got %r' % result if not (timeout is None or isinstance(timeout, (int, float))): raise TypeError("timeout param: <float> or None expected, got %r" % timeout) if not self._ready: raise RuntimeError('bind or connect must be called first') ignore = result == 'ignore' req_id, msg_list = self._build_request(proc_name, args, kwargs, ignore) self._send_request(msg_list) if ignore: return None future = self._tools.Future() self._futures[req_id] = future if result == 'sync': # block waiting for a reply passed by _reader return future.result(timeout=timeout) else: # async return future
GRIM is an absolutely amazing, almost skizophrenic and obscure power-electronics and noise-one-man project by Jun Konagaya. Jun's sound change its mood and athmosphere in seconds. From extreme harsh and hard power electronic noise-walls to ingenious folk-songs and tunes as you know it from Charles Manson. In the very early 80s Jun formed White Hospital together with Tomosada Kuwabara. They released one album called "Holocaust" in 1984 and split. At that time Jun had already released the 'Vital' tape with his project GRIM. § "Vital 1983 - 86" § "Amaterasu" & "Folk Music"
#!/usr/bin/env python # # urllib.urlencode({'abc':'d f', 'def': '-!2'}) # 'abc=d+f&def=-%212' # urllib.quote_plus() # # r = Request(url='http://www.mysite.com') # r.add_header('User-Agent', 'awesome fetcher') # r.add_data(urllib.urlencode({'foo': 'bar'}) # response = urlopen(r) # # datetime.datetime(2000,1,1) # # socket.gethostbyname(platform.node()) # socket.gethostbyaddr("69.59.196.211") # # (h, a, n )= socket.gethostbyaddr( socket.gethostbyname(platform.node()) ) # h = host, a = short alias list, n= ip address list # import os import platform import pwd import random import socket import sys import urllib import urllib2 import datetime ''' curl --connect-timeout 20 --max-time 180 -sS 'http://panda.cern.ch:25980/server/pandamon/query? autopilot=updateservicelist& status=running& name=Job+scheduler& grp=TestPilot& type=tpmon& pid=12345& userid=sm& doaction=& host=gridui10.usatlas.bnl.gov& tstart=2012-08-14+10%3A17%3A14.900791& tstop=2000-01-01+00%3A00%3A00& message=& lastmod=2012-08-14+10%3A17%3A14.900791& config=pilotScheduler.py+--queue%3DANALY_NET2-pbs+--pandasite%3DANALY_NET2+--pilot%3DatlasOfficial2& description=TestPilot+service' curl --connect-timeout 20 --max-time 180 -sS 'http://panda.cern.ch:25980/server/pandamon/query? autopilot=updatepilot& status=active& queueid=ANALY_NET2& tsubmit=2012-08-14+10%3A21%3A20.295097& workernode=unassigned& tpid=tp_gridui10_88777_9999999-102119_13& url=http%3A%2F%2Fgridui10.usatlas.bnl.gov%3A25880%2Fschedlogs%2Ftp_gridui10_88888_9999999%2Ftp_gridui10_28847_20120814-102119_13& nickname=ANALY_NET2-pbs& tcheck=2012-08-14+10%3A21%3A20.295375& system=osg&jobid=3333333.0& tenter=2012-08-14+10%3A21%3A19.521314& host=gridui10.usatlas.bnl.gov& state=submitted& submithost=gridui10& user=sm& schedd_name=gridui10.usatlas.bnl.gov& type=atlasOfficial2& tstate=2012-08-14+10%3A21%3A20.295097& errinfo=+' works: curl --connect-timeout 20 --max-time 180 -sS 'http://panda.cern.ch:25980/server/pandamon/query? autopilot=updatepilot& status=active& queueid=BNL_CLOUD& tsubmit=2012-08-15+22%3A58%3A57.528556& workernode=unassigned& tpid=9999999.3& url=http%3A%2F%2Fgridui08.usatlas.bnl.gov%3A25880%2Fschedlogs%2Ftp_gridui12_23036_20120815%2Ftp_gridui12_23036_20120815-225856_624& nickname=BNL_CLOUD& tcheck=2012-08-15+22%3A58%3A57.528842& system=osg&jobid=9999999.0& tenter=2012-08-15+22%3A58%3A56.771376& host=gridui08.usatlas.bnl.gov& state=submitted& submithost=gridui08& user=jhover& schedd_name=gridui08.usatlas.bnl.gov& type=atlasOfficial2& tstate=2012-08-15+22%3A58%3A57.528556& errinfo=+' NOT working: http://panda.cern.ch:25980/server/pandamon/query? autopilot=updatepilot& status=active& queueid=BNL_CLOUD& tsubmit=2012-08-16+18%3A16%3A20.803098& workernode=unassigned& tpid=95219.1& url=http%3A%2F%2Fgridtest03.racf.bnl.gov%3A25880%2F2012-08-16%2FBNL_CLOUD& type=atlasOfficial2& tcheck=2012-08-16+18%3A16%3A20.803163& system=osg& jobid=14147.1& tenter=2012-08-16+18%3A16%3A20.803170& state=submitted& submithost=gridui08& user=jhover& host=gridui08.usatlas.bnl.gov& schedd_name=gridui08.usatlas.bnl.gov& nickname=BNL_CLOUD& tstate=2012-08-16+18%3A16%3A20.803172& errinfo= Job status sequence: [root@gridui12 scheduler]# cat service_gridui12.usatlas.bnl.gov_sm_21388 service_gridui12.usatlas.bnl.gov_sm_707 service_gridui12.usatlas.bnl.gov_sm_1300 | grep tp_gridui12_23036_20120815-225856_624 | grep messageDB Schedulerutils.messageDB(): cmd= curl --connect-timeout 20 --max-time 180 -sS 'http://panda.cern.ch:25980/server/pandamon/query?autopilot=updatepilot&status=active&queueid=BU_ATLAS_Tier2o&tsubmit=2012-08-15+22%3A58%3A57.528556&workernode=unassigned&tpid=tp_gridui12_23036_20120815-225856_624&url=http%3A%2F%2Fgridui12.usatlas.bnl.gov%3A25880%2Fschedlogs%2Ftp_gridui12_23036_20120815%2Ftp_gridui12_23036_20120815-225856_624&nickname=BU_ATLAS_Tier2o-pbs&tcheck=2012-08-15+22%3A58%3A57.528842&system=osg&jobid=39949228.0&tenter=2012-08-15+22%3A58%3A56.771376&host=gridui12.usatlas.bnl.gov&state=submitted&submithost=gridui12&user=sm&schedd_name=gridui12.usatlas.bnl.gov&type=atlasOfficial2&tstate=2012-08-15+22%3A58%3A57.528556&errinfo=+' Schedulerutils.messageDB(): cmd= curl --connect-timeout 20 --max-time 180 -sS 'http://panda.cern.ch:25980/server/pandamon/query?autopilot=updatepilot&status=active&tschedule=2012-08-15+23%3A06%3A18.985095&tpid=tp_gridui12_23036_20120815-225856_624&tcheck=2012-08-15+23%3A06%3A18.985130&jobid=39949228.0&state=scheduled&nickname=BU_ATLAS_Tier2o-pbs&tstate=2012-08-15+23%3A06%3A18.985095' Schedulerutils.messageDB(): cmd= curl --connect-timeout 20 --max-time 180 -sS 'http://panda.cern.ch:25980/server/pandamon/query?autopilot=updatepilot&status=active&tschedule=2012-08-15+23%3A09%3A15.139811&tpid=tp_gridui12_23036_20120815-225856_624&tcheck=2012-08-15+23%3A09%3A15.139847&jobid=39949228.0&state=scheduled&nickname=BU_ATLAS_Tier2o-pbs&tstate=2012-08-15+23%3A09%3A15.139811' Schedulerutils.messageDB(): cmd= curl --connect-timeout 20 --max-time 180 -sS 'http://panda.cern.ch:25980/server/pandamon/query?autopilot=updatepilot&status=active&tpid=tp_gridui12_23036_20120815-225856_624&tcheck=2012-08-16+05%3A42%3A22.543749&jobid=39949228.0&state=running&tstart=2012-08-16+05%3A42%3A22.543696&nickname=BU_ATLAS_Tier2o-pbs&tstate=2012-08-16+05%3A42%3A22.543696' Schedulerutils.messageDB(): cmd= curl --connect-timeout 20 --max-time 180 -sS 'http://panda.cern.ch:25980/server/pandamon/query?autopilot=updatepilot&status=finished&PandaID=1577156287&workernode=atlas-cm2.bu.edu&tpid=tp_gridui12_23036_20120815-225856_624&tcheck=2012-08-16+05%3A43%3A55.515658&host=atlas-cm2.bu.edu&jobid=39949228.0&tdone=2012-08-16+05%3A43%3A55.515617&state=done&errcode=0&message=straggling_pilot_not_on_queue_but_in_DB&nickname=BU_ATLAS_Tier2o-pbs&tstate=2012-08-16+05%3A43%3A55.515617&errinfo=Job+successfully+completed' Schedulerutils.messageDB(): cmd= curl --connect-timeout 20 --max-time 180 -sS 'http://panda.cern.ch:25980/server/pandamon/query?autopilot=updatepilot&status=finished&PandaID=1577156287&workernode=atlas-cm2.bu.edu&tpid=tp_gridui12_23036_20120815-225856_624&tcheck=2012-08-16+05%3A45%3A30.689477&host=atlas-cm2.bu.edu&jobid=39949228.0&tdone=2012-08-16+05%3A45%3A30.689436&state=done&errcode=0&message=straggling_pilot_not_on_queue_but_in_DB&nickname=BU_ATLAS_Tier2o-pbs&tstate=2012-08-16+05%3A45%3A30.689436&errinfo=Job+successfully+completed' Schedulerutils.messageDB(): cmd= curl --connect-timeout 20 --max-time 180 -sS 'http://panda.cern.ch:25980/server/pandamon/query?autopilot=updatepilot&status=finished&PandaID=1577156287&workernode=atlas-cm2.bu.edu&tpid=tp_gridui12_23036_20120815-225856_624&tcheck=2012-08-16+05%3A47%3A07.572424&host=atlas-cm2.bu.edu&jobid=39949228.0&tdone=2012-08-16+05%3A47%3A07.572383&state=done&errcode=0&message=straggling_pilot_not_on_queue_but_in_DB&nickname=BU_ATLAS_Tier2o-pbs&tstate=2012-08-16+05%3A47%3A07.572383&errinfo=Job+successfully+completed' ''' SERVER='panda.cern.ch' PORT='25980' SVCPATH='/server/pandamon/query?' def runtest1(): print("Running service update...") (h, a, n )= socket.gethostbyaddr( socket.gethostbyname(platform.node()) ) #h = host, a = short alias list, n= ip address list tnow = datetime.datetime.utcnow() am = { 'status' : 'running', 'name' : 'Job scheduler', 'grp' : 'TestPilot', 'type' : 'tpmon', 'pid' : os.getpid(), 'userid' : pwd.getpwuid(os.getuid()).pw_name, 'doaction' : '', 'host' : h, 'tstart' : datetime.datetime.utcnow(), 'lastmod' : datetime.datetime.utcnow(), 'message' : '', 'config' : 'BNL-CLOUD-condor', # config=pilotScheduler.py+--queue%3DANALY_NET2-pbs+--pandasite%3DANALY_NET2+--pilot%3DatlasOfficial2& 'description': 'TestPilot service', 'cyclesec' : '360' } sendQuery(am) def runtest2(): print("Running job update test...") (host, alias, n )= socket.gethostbyaddr( socket.gethostbyname(platform.node()) ) #h = host, a = short alias list, n= ip address list jobid= "%d.1" % (random.random() * 100000 ) am = { 'status' : 'active', # active, or finished 'state' : 'submitted', # or scheduled, running this is equivialent to globus PENDING, ACTIVE 'queueid' : 'BNL_CLOUD', 'tsubmit' : datetime.datetime.utcnow(), 'workernode' : 'unassigned', 'host' : 'unassigned', 'tpid' : jobid, 'nickname' : 'BNL_CLOUD' , # actually panda queuename, i.e. with -condor, etc. 'url' : 'http://gridtest03.racf.bnl.gov:25880/2012-08-16/BNL_CLOUD', 'user' : pwd.getpwuid(os.getuid()).pw_name, 'tcheck' : datetime.datetime.utcnow(), 'system' : 'osg', 'jobid' : jobid, 'submithost' : alias[0], 'tenter' : datetime.datetime.utcnow(), 'schedd_name' : host, 'type' : 'AutoPyFactory', 'tstate' : datetime.datetime.utcnow(), 'errinfo' : ' ', ## MUST HAVE space, or won't work!!! } sendQuery(am, 'updatepilot') def sendQuery(attributemap, querytype='updateservicelist'): ''' querytype: updateservicelist | updatepilot | currentlyqueued ''' q = '' for k in attributemap.keys(): q += "&%s=%s" % (k, urllib.quote_plus(str(attributemap[k])) ) qurl='http://%s:%s%s%s%s' % ( SERVER, PORT, SVCPATH, 'autopilot=%s' % querytype , q ) print("%s" % qurl) r = urllib2.Request(url=qurl) #r.add_header('User-Agent', 'awesome fetcher') #r.add_data(urllib.urlencode({'foo': 'bar'}) response = urllib2.urlopen(r) print(response.read()) if __name__ == '__main__': #runtest1() #runtest2() usage = '''test-pandamon.py <jobid> <state> jobid, e.g. 9999999.2 state submitted | scheduled | done ''' print("sys.argv = %s" % sys.argv) if len(sys.argv < 3): print(usage)
It is imperative for the new age entrants in the marketing world to build consistent and representative profiles. The online portal must portray the administrator in favorable light. One must be astute enough to sense what information would best represent the spirit of the business. For instance, an online promotional page of a marketing specialist which confounds branding and designing is doomed to drown to the depths. One fictitious or exaggerated post by the page manager is sure to invite unbridled troubles from all stakeholders. It is best advised to abstain from putting any false information on the online platform. No matter if all your previous posts have been as authentic as they could have been. In order to prevent any damage to your online presence follow the mantra- ‘A no post is better than a false post’. Yeah!!! This is the gospel for all those aspiring to rule the online roost. A shoddy design for your online face spells disaster like nothing else. One must work to ensure symmetry, color shade, balance, and other visual effects for the web portal or the Facebook page. The visitors may build a positive or lousy imprint of you on the basis of your website’s first look. To get a glimpse into the pleasant versus poor design window, visit the CCD page. Go aspirants, pay a look. Innovation can be your savior if you are vying with other players in your league to conquer the online battle for greater traction. Anything commonplace should not be at your place. One can take the venture to a rewarding pedestal only by practicing innovation and variety in the action plan.
# coding: utf-8 from __future__ import (absolute_import, division, print_function, unicode_literals) import json import logging import boto3 import os import sys import time # Path to modules needed to package local lambda function for upload currentdir = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(currentdir, "./vendored")) # Modules downloaded into the vendored directory # Logging for Serverless log = logging.getLogger() log.setLevel(logging.DEBUG) # Initializing AWS services dynamodb = boto3.resource('dynamodb') sts = boto3.client('sts') sns = boto3.client('sns') def handler(event, context): log.debug("Received event {}".format(json.dumps(event))) accountInfo = dynamodb.Table(os.environ['TAILOR_TABLENAME_ACCOUNTINFO']) taskStatus = dynamodb.Table(os.environ['TAILOR_TABLENAME_TASKSTATUS']) cbInfo = dynamodb.Table(os.environ['TAILOR_TABLENAME_CBINFO']) dispatchRequestArn = os.environ['TAILOR_SNSARN_DISPATCH_REQUEST'] incomingMessage = json.loads(event['Records'][0]['Sns']['Message']) try: if incomingMessage['info'] == "LinkedAccountCreationStarted": getAccountInfo = accountInfo.get_item( Key={ 'accountEmailAddress': incomingMessage['email'] } ) requestId = getAccountInfo['Item']['requestId'] # Update task start status updateStatus = taskStatus.put_item( Item={ "requestId": requestId, "eventTimestamp": str(time.time()), "period": "start", "taskName": "CLA_CREATION", "function": "talr-director", "message": incomingMessage } ) return except KeyError: pass # Look up email address and other account fields in accountInfo table accountEmailAddress = incomingMessage['linkedAccountEmail'] getAccountInfo = accountInfo.get_item( Key={ 'accountEmailAddress': accountEmailAddress } ) requestId = getAccountInfo['Item']['requestId'] accountTagShortProjectName = getAccountInfo['Item']['accountTagShortProjectName'] accountTagEnvironment = getAccountInfo['Item']['accountTagEnvironment'] accountCbAlias = getAccountInfo['Item']['accountCbAlias'] # Look up account division getCbInfo = cbInfo.get_item( Key={ 'accountCbAlias': accountCbAlias } ) accountDivision = getCbInfo['Item']['accountDivision'].lower() accountCompanyCode = getCbInfo['Item']['accountCompanyCode'] accountCbId = getCbInfo['Item']['accountCbId'] if "linkedAccountId" in incomingMessage and getAccountInfo['Item']['accountEmailAddress'] == accountEmailAddress: # Update task end status updateStatus = taskStatus.put_item( Item={ "requestId": requestId, "eventTimestamp": str(time.time()), "period": "end", "taskName": "CLA_CREATION", "function": "talr-director", "message": incomingMessage } ) laAccountId = incomingMessage['linkedAccountId'] print("New linked account: " + laAccountId) updateAccountInfo = accountInfo.update_item( Key={ 'accountEmailAddress': accountEmailAddress }, UpdateExpression='SET #accountId = :val1', ExpressionAttributeNames={'#accountId': "accountId"}, ExpressionAttributeValues={':val1': incomingMessage['linkedAccountId']} ) else: # Update task failure status updateStatus = taskStatus.put_item( Item={ "requestId": requestId, "eventTimestamp": str(time.time()), "period": "failed", "taskName": "CLA_CREATION", "function": "talr-director", "message": incomingMessage } ) return {"code": "601", "requestId": requestId, "message": "ERROR: Linked account failed to create"} # Start linked account validation updateStatus = taskStatus.put_item( Item={ "requestId": requestId, "eventTimestamp": str(time.time()), "period": "start", "taskName": "CLA_VALIDATION", "function": "talr-director", "message": "Linked account: " + laAccountId } ) # Payer account credentials payerAssumeRole = sts.assume_role( RoleArn="arn:aws:iam::" + accountCbId + ":role/tailor", RoleSessionName="talrDirectorPayerAssumeRole" ) payerCredentials = payerAssumeRole['Credentials'] payer_aws_access_key_id = payerCredentials['AccessKeyId'] payer_aws_secret_access_key = payerCredentials['SecretAccessKey'] payer_aws_session_token = payerCredentials['SessionToken'] # Linked account credentials laSts = boto3.client( 'sts', aws_access_key_id=payer_aws_access_key_id, aws_secret_access_key=payer_aws_secret_access_key, aws_session_token=payer_aws_session_token, ) laAssumeRole = laSts.assume_role( RoleArn="arn:aws:iam::" + laAccountId + ":role/PayerAccountAccessRole", RoleSessionName="talrDirectorLaAssumeRole" ) laCredentials = laAssumeRole['Credentials'] la_aws_access_key_id = laCredentials['AccessKeyId'] la_aws_secret_access_key = laCredentials['SecretAccessKey'] la_aws_session_token = laCredentials['SessionToken'] # List roles in linked account to validate access laIam = boto3.client( 'iam', aws_access_key_id=la_aws_access_key_id, aws_secret_access_key=la_aws_secret_access_key, aws_session_token=la_aws_session_token, ) laListRoles = laIam.list_roles() print(laListRoles) # Create IAM Account Alias in Linked Account accountIamAlias = accountCompanyCode + "-" + accountDivision.lower() + "-" + \ accountTagShortProjectName + "-" + accountTagEnvironment laCreateAccountIamAlias = laIam.create_account_alias( AccountAlias=accountIamAlias ) # Add account IAM alias to accountInfo table updateAccountInfo = accountInfo.update_item( Key={ 'accountEmailAddress': accountEmailAddress }, UpdateExpression='SET #accountIamAlias = :val1', ExpressionAttributeNames={'#accountIamAlias': "accountIamAlias"}, ExpressionAttributeValues={':val1': accountIamAlias} ) # Update task end status updateStatus = taskStatus.put_item( Item={ "requestId": requestId, "eventTimestamp": str(time.time()), "period": "end", "taskName": "CLA_VALIDATION", "function": "talr-director", "message": "Linked account: " + laAccountId } ) publishToTalrDispatchRequest = sns.publish( TopicArn=dispatchRequestArn, Message='{ "default" : { "requestId": "' + requestId + '", "accountEmailAddress": "' + accountEmailAddress + '" }, "lambda" : { "requestId": "' + requestId + '", "accountEmailAddress": "' + accountEmailAddress + '" }}' )
Your management prerogative can determine the success of your business. Even if the decisions involved are not too big, you will be surprised at how even the smallest purchase orders can spell the difference between a massive success and a stale neighborhood diner. Below, Titan Slicing Systems shares three tips on how to make your business flourish with simple but smart business decisions. Before you approve that supply order, think of the alternatives you have that offer long-term benefits and advantages. In every food business, taste is important, but consistency and speed also weigh heavily. One smart purchase to make is a commercial food slicer. It will not only improve the consistency of the food you dish out, but it will also lessen the preparation time of the food you serve. This makes for great customer service. Even the smallest update in technology can cause massive increases in your company’s efficiency and ultimately, profits. Upgrade your counters to computerised, touch screen systems; invest in kitchen helpers such as a commercial food slicers, top grade coffeemakers and speed mixers. You will increase the speed of work, the consistency of output and the overall productivity. What key factors contribute to the productivity of your personnel? Determine what you can do to trigger these factors and help maintain this level of productivity. Pay attention to your personnel. How do they work and are there significant changes in their work flow during peak hours of lunch or dinner service? Remember that customer service can make or break your business and pay attention to these key factors. Write down these three simple tips to help remind yourself of the importance of small decisions. Be mindful of your strategies, policies and decisions and see your business work its way to the top.
#p-Center Facility Location Problem #This script creates a linear programming file to be read into an optimizer. ''' GNU LESSER GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/> Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. ''' # Developed by: James D. Gaboardi, MSGIS # 03/2015 # © James Gaboardi # **Attention** **Adjust the following** # 66 --> 'c##' needs to changed depending on data and constraint number based on .lp file # 66 --> '= ##\n' needs to changed for the number of facilities to be sited # 71 --> 'c##' needs to changed depending on data and constraint number based on .lp file # 83 --> 'c##' needs to changed depending on data and constraint number based on .lp file # Terminology & General Background for Facility Location and Summation Notation: # * The objective of the p-center Facility Location Problem is to minimize the maximum cost # of travel between service facilities and clients on a network. # * [i] - a specific origin # * [j] - a specifc destination # * [n] - the set of origins # * [m] - the set of destinations # * [Cij] - travel costs between nodes # * [W] - the maximum travel costs between service facilities and clients # * [x#_#] - the decision variable in # row, # column position in the matrix # * [y#] - service facility in the # row # * [p] - the number of facilities to be sited # 1. IMPORTS # Other imports may be necessary for matrix creation and manipulation import numpy as np # 2. DEFINED FUNCTIONS # Assignment Constraints # This indicates a client can only be served by one facility. # Each column in the matrix must equal 1. def get_assignment_constraints(rows): counter = 0 outtext = '' for i in range(1,cols+1): counter = counter + 1 temp = ' c' + str(counter) + ': ' for j in range(1,rows+1): temp += 'x' + str(j) + '_' + str(i) + ' + ' outtext += temp[:-2] + '= 1\n' return outtext # Facility Constraint # *** '= 1\n' indicates 1 facility def get_p_facilities(rows): outtext = '' for i in range(1, rows+1): temp = '' temp += 'y' + str(i) outtext += temp + ' + ' outtext = ' c##: ' + outtext[:-2] + '= #\n' return outtext # Opening Constraints def get_opening_constraints_p_center(Cij): counter = 151 outtext = '' for i in range(1, rows+1): for j in range(1, cols+1): counter = counter + 1 outtext += ' c' + str(counter) + ': - x' + str(i) + '_' + str(j) + ' + ' + 'y' + str(i) + ' >= 0\n' return outtext # Maximum Cost Constraints # This indicates that the maximum travel cost from any client to service facility is greater than the travel cost from client to client. # This code chunk works by summing the columns not rows def get_max_cost(rows): counter = 1501 outtext = '' for j in range(cols): counter = counter + 1 temp = ' c' + str(counter) + ': ' for i in range(rows): temp += str(Cij[i,j]) + ' x' + str(i+1) + '_' + str(j+1) + ' + ' outtext += temp[:-2] + '- W <= 0\n' return outtext # Declaration of Bounds def get_bounds_allocation(Cij): outtext = '' for i in range(rows): temp = '' for j in range(cols): temp += ' 0 <= x' + str(i+1) + '_' + str(j+1) + ' <= 1\n' outtext += temp return outtext def get_bounds_facility(Cij): outtext = '' for i in range(rows): outtext += ' 0 <= y' + str(i+1) + ' <= 1\n' return outtext # Declaration of Decision Variable (form can be: Binary, Integer, etc.) # In this case decision variables are binary. # *** 0 for no sited facility, 1 for a sited facilty def get_decision_variables_p_center(Cij): outtext = ' ' for i in range(1, rows+1): temp = '' for j in range(1, cols+1): temp += 'x' + str(i) + '_' + str(j) + ' ' outtext += temp return outtext def get_facility_decision_variables_p_center(rows): outtext = '' for i in range (1, rows+1): outtext += 'y' + str(i) + ' ' #outtext += temp return outtext # 3. DATA READS & VARIABLE DECLARATION ''' ########## Cost Matrix ########## Cij --> [ 0, 13, 8, 15, ########## 13, 0, 12, 11, ########## 8, 12, 0, 10, ########## 15, 11, 10, 0] ########## Read Cij in as a vector text file. ''' Cij = np.fromfile('path/Cij.txt', dtype=float, sep='\n') Cij = Cij.reshape(#, #) rows,cols = Cij.shape # 4. START TEXT FOR .lp FILE # Declaration of Objective Function text = "p-Center Facility Location Problem\n" text += "'''\n" text += 'Minimize\n' text += ' obj: W\n' # Declaration of Constraints text += 'Subject To\n' text += get_assignment_constraints(rows) text += get_p_facilities(rows) text += get_opening_constraints_p_center(Cij) text += get_max_cost(rows) # Declaration of Bounds text += 'Bounds\n' text += get_bounds_allocation(Cij) text += get_bounds_facility(Cij) # Declaration of Decision Variables form: Binaries text += 'Binaries\n' text += get_decision_variables_p_center(Cij) text += get_facility_decision_variables_p_center(rows) text += '\n' text += 'End\n' text += "'''\n" text += "© James Gaboardi, 2015" # 5. CREATE & WRITE .lp FILE TO DISK # Fill path name -- File name must not have spaces. outfile = open('path/name.lp', 'w') outfile.write(text) outfile.close()
The death of six US soldiers in Afghanistan on December 21 at the hands of a Taliban suicide bomber brings to 21 the number of US combat deaths there in 2015. Once again we must confront the question of national purpose in waging war without debate or declaration. Like all other battlefield deaths in the Middle East, the Obama administration rationalizes these latest as being part of “training, advising, and assisting,” not combat. But those are merely code words for direct interventions that Congress has not authorized since 2002, in clear violation of restrictions the War Powers Resolution of 1973 places on presidential power. There will be plenty more casualties in the Middle East for years to come, and not just because of the seemingly permanent US military presence in Iraq and Afghanistan. Consider two recent news items. According to a plan not yet formally approved, the Pentagon wants to create a worldwide string of “hubs” as staging areas for Special Operations forces to strike quickly against terrorists. Second, most members of Congress are unwilling to introduce and debate a bill authorizing the Obama administration’s use of force in the Middle East and beyond. Thus, there is no end in sight to the US at war, both because the Pentagon has found the perfect enemy and because no one in Congress is willing to stand up to it. The Pentagon’s plan is to have a forward presence that, in the words of Defense Secretary Ashton Carter, “will enable unilateral crisis response, counterterror operations, or strikes on high-value targets.” Not long ago the Pentagon’s mantra was “places, not bases,” so as to avoid all the political problems, as well as the monetary costs, associated with a permanent military presence on foreign soil. Now “places” evidently have been modified to “hubs” and “spokes,” Pentagon-speak for small-scale leased bases of the sort already in place all over Africa. Northern Iraq and southern Europe are being considered as additional hub sites. Not everyone is reportedly on board with the Pentagon’s plan. The State Department correctly sees it as a power grab that may actually harm US foreign policy. The plan works at cross-purposes with diplomacy, substituting the deployment and use of force for potential opportunities to engage governments and rival groups. More US military facilities, no matter their size, invite criticism in the host countries, may become targets of terror groups, and feed the hostile propaganda of militants. In our terrorism era, however, State has no chance to win this battle. Schiff’s proposed Authorization for the Use of Military Force (AUMF) would have limited military action against the Islamic State (ISIS or IS) to three years, prohibited the use of US ground troops, and immediately terminated both the 2001 and 2002 congressional authorizations tied to the 9/11 attacks and the Iraq War, removing two pillars of presidential authority claimed by President Obama as sufficient to go after IS. Neither Schiff’s resolution nor a similar one proposed in June by Senators Tim Kaine (D-VA) and Jeff Flake (R-AZ) in the Senate Foreign Relations Committee ever got to the floor of Congress. Now, however, there is a twist from the previous story: the executive branch wants a new authorization, and it is Congress that is balking. Democrats fear being seen as weak on terrorism if they try to constrain the president while IS is around, and Republicans fear giving him control over what they regard as a weak-kneed strategy. The president is thus left free to do as he pleases in Syria, Iraq, Afghanistan, and Pakistan, totally ignoring the War Powers Resolution and, as a lame duck, ignoring Congress too. (3) in numbers which substantially enlarge United States Armed Forces equipped for combat already located in a foreign nation . . . There is little doubt that President Obama violated the WPR by introducing and reintroducing US forces into “hostilities,” not only in Afghanistan and Iraq but also in Syria, Libya, Yemen, and Pakistan. In Iraq and Afghanistan, nearly 2,000 U.S. service personnel have died since Obama took office in January 2009. Unfortunately, the academic notion of shared powers is something of a myth. Only in rare instances will Congress attempt to tie the president’s hands in a war situation—for instance, when the Boland Amendment prohibited intelligence agencies from supporting the overthrow of the Nicaraguan government in the 1980s and when Congress placed a six-month limit on President Reagan’s troop commitment in Lebanon in 1983. But those attempts mattered little. The Boland Amendment failed to prevent President Reagan’s National Security Council from secretly funneling money to the Nicaraguan contras. And Reagan pulled US troops from Lebanon after the disastrous attack on their barracks in Beirut. In general, Congress simply defers to the president. Even the president’s most hostile critics will bend to his leadership when national security is believed (or said) to be at stake. Republicans are more likely than Democrats to support presidential war making. Today, some of them—for example Senator Marco Rubio (R-FL)—are saying the president should have the authority to use “all means necessary” to defeat IS. In other words, they want to go beyond what Obama is asking under the authorization resolution. That’s the same mistake Congress made in 1964, in the Tonkin Gulf Resolution, when it gave Lyndon Johnson a virtual blank check in Southeast Asia (“to take all necessary steps . . . to promote international peace and security”). The WPR, in fact, has never been effective. No president has ever regarded it as a legitimate exercise of congressional authority in war. No president has been forced to abide by its key provision: obtain congressional approval of a troop deployment within 60 days or withdraw the troops. Only a few presidents (including Obama) have even acknowledged the resolution when planning military action abroad. All presidents have insisted that as commander-in-chief they have all the constitutional authority they need to make war. Thus, when Congress votes to authorize military action abroad, what it is really doing is legitimizing what the President has already decided to do—and would do even in the absence of Congressional authorization. So the only difference between then and now is that Congress won’t bother to vote, or even debate, presidential war powers. Senator Kaine is apparently going to try again with his resolution. But even if his or Representative Schiff’s proposal were to pass, the president would not be prohibited from many forms of military intervention, all of which he is employing today: using “advisers,” CIA operatives, and special forces; transferring arms to friendly forces; conducting drone strikes; directing air strikes by non-US air forces; training other militaries; and supporting third countries or groups whose ground forces substitute for US forces that Congress would prohibit. Congress will thus be bypassed once again. And only a few members of Congress will likely speak out against the potential human and monetary costs associated with the Pentagon’s latest basing plan. Indeed, most members of Congress, not to mention most of the presidential candidates, will push for increased use of force abroad. Endless war, both undeclared and undebated, will thus remain a central feature of the next presidency.
import libqtile.config from libqtile.bar import Bar from libqtile.widget.check_updates import CheckUpdates, Popen # noqa: F401 def no_op(*args, **kwargs): pass wrong_distro = "Barch" good_distro = "Arch" cmd_0_line = "export toto" # quick "monkeypatch" simulating 0 output, ie 0 update cmd_1_line = "echo toto" # quick "monkeypatch" simulating 1 output, ie 1 update cmd_error = "false" nus = "No Update Avalaible" def test_unknown_distro(): """ test an unknown distribution """ cu = CheckUpdates(distro=wrong_distro) text = cu.poll() assert text == "N/A" def test_update_available(fake_qtile, fake_window): """ test output with update (check number of updates and color) """ cu2 = CheckUpdates(distro=good_distro, custom_command=cmd_1_line, colour_have_updates="#123456" ) fakebar = Bar([cu2], 24) fakebar.window = fake_window fakebar.width = 10 fakebar.height = 10 fakebar.draw = no_op cu2._configure(fake_qtile, fakebar) text = cu2.poll() assert text == "Updates: 1" assert cu2.layout.colour == cu2.colour_have_updates def test_no_update_available_without_no_update_string(fake_qtile, fake_window): """ test output with no update (without dedicated string nor color) """ cu3 = CheckUpdates(distro=good_distro, custom_command=cmd_0_line) fakebar = Bar([cu3], 24) fakebar.window = fake_window fakebar.width = 10 fakebar.height = 10 fakebar.draw = no_op cu3._configure(fake_qtile, fakebar) text = cu3.poll() assert text == "" def test_no_update_available_with_no_update_string_and_color_no_updates( fake_qtile, fake_window ): """ test output with no update (with dedicated string and color) """ cu4 = CheckUpdates(distro=good_distro, custom_command=cmd_0_line, no_update_string=nus, colour_no_updates="#654321" ) fakebar = Bar([cu4], 24) fakebar.window = fake_window fakebar.width = 10 fakebar.height = 10 fakebar.draw = no_op cu4._configure(fake_qtile, fakebar) text = cu4.poll() assert text == nus assert cu4.layout.colour == cu4.colour_no_updates def test_update_available_with_restart_indicator(monkeypatch, fake_qtile, fake_window): """ test output with no indicator where restart needed """ cu5 = CheckUpdates(distro=good_distro, custom_command=cmd_1_line, restart_indicator="*", ) monkeypatch.setattr("os.path.exists", lambda x: True) fakebar = Bar([cu5], 24) fakebar.window = fake_window fakebar.width = 10 fakebar.height = 10 fakebar.draw = no_op cu5._configure(fake_qtile, fakebar) text = cu5.poll() assert text == "Updates: 1*" def test_update_available_with_execute(manager_nospawn, minimal_conf_noscreen, monkeypatch): """ test polling after executing command """ # Use monkeypatching to patch both Popen (for execute command) and call_process # This class returns None when first polled (to simulate that the task is still running) # and then 0 on the second call. class MockPopen: def __init__(self, *args, **kwargs): self.call_count = 0 def poll(self): if self.call_count == 0: self.call_count += 1 return None return 0 # Bit of an ugly hack to replicate the above functionality but for a method. class MockSpawn: call_count = 0 @classmethod def call_process(cls, *args, **kwargs): if cls.call_count == 0: cls.call_count += 1 return "Updates" return "" cu6 = CheckUpdates(distro=good_distro, custom_command="dummy", execute="dummy", no_update_string=nus, ) # Patch the necessary object monkeypatch.setattr(cu6, "call_process", MockSpawn.call_process) monkeypatch.setattr("libqtile.widget.check_updates.Popen", MockPopen) config = minimal_conf_noscreen config.screens = [ libqtile.config.Screen( top=libqtile.bar.Bar([cu6], 10) ) ] manager_nospawn.start(config) topbar = manager_nospawn.c.bar["top"] assert topbar.info()["widgets"][0]["text"] == "Updates: 1" # Clicking the widget triggers the execute command topbar.fake_button_press(0, "top", 0, 0, button=1) # The second time we poll the widget, the update process is complete # and there are no more updates _, result = manager_nospawn.c.widget["checkupdates"].eval("self.poll()") assert result == nus def test_update_process_error(fake_qtile, fake_window): """ test output where update check gives error""" cu7 = CheckUpdates(distro=good_distro, custom_command=cmd_error, no_update_string="ERROR", ) fakebar = Bar([cu7], 24) fakebar.window = fake_window fakebar.width = 10 fakebar.height = 10 fakebar.draw = no_op cu7._configure(fake_qtile, fakebar) text = cu7.poll() assert text == "ERROR" def test_line_truncations(fake_qtile, monkeypatch, fake_window): """ test update count is reduced""" # Mock output to return 5 lines of text def mock_process(*args, **kwargs): return "1\n2\n3\n4\n5\n" # Fedora is set up to remove 1 from line count cu8 = CheckUpdates(distro="Fedora") monkeypatch.setattr(cu8, "call_process", mock_process) fakebar = Bar([cu8], 24) fakebar.window = fake_window fakebar.width = 10 fakebar.height = 10 fakebar.draw = no_op cu8._configure(fake_qtile, fakebar) text = cu8.poll() # Should have 4 updates assert text == "Updates: 4"
If civilized people are expected to have read all important works of literature, and thousands more books are published every year, what are we supposed to do in those awkward social situations in which we're forced to talk about books we haven't read? In this delightfully witty, provocative book, a huge hit in France that has drawn huge attention from critics around the world, literature professor and psychoanalyst Bayard argues that it's actually more important to know a book's role in ourcollective library than its details. Using examples from such writers as Graham Greene, Oscar Wilde, Montaigne, and Umberto Eco, and even the movieGroundhog Day, he describes the many varieties of "non-reading" and the horribly sticky social situations that might confront us, and then offers his advice on what to do. Practical, funny, and thought-provoking,How to Talk About Books You Haven't Readis in the end a love letter to books, offering a whole new perspective on how we read and absorb them. It's the book that readers everywhere will be talking about-and despite themselves, reading-this holiday season.
from django.conf.urls import patterns, url from django.contrib import messages from django.core.urlresolvers import reverse from django import forms from django.http import HttpResponseRedirect, HttpResponse from django.template import RequestContext, Template from django.template.response import TemplateResponse from django.views.decorators.cache import never_cache from django.contrib.messages.views import SuccessMessageMixin from django.views.generic.edit import FormView TEMPLATE = """{% if messages %} <ul class="messages"> {% for message in messages %} <li{% if message.tags %} class="{{ message.tags }}"{% endif %}> {{ message }} </li> {% endfor %} </ul> {% endif %} """ @never_cache def add(request, message_type): # don't default to False here, because we want to test that it defaults # to False if unspecified fail_silently = request.POST.get('fail_silently', None) for msg in request.POST.getlist('messages'): if fail_silently is not None: getattr(messages, message_type)(request, msg, fail_silently=fail_silently) else: getattr(messages, message_type)(request, msg) show_url = reverse('django.contrib.messages.tests.urls.show') return HttpResponseRedirect(show_url) @never_cache def add_template_response(request, message_type): for msg in request.POST.getlist('messages'): getattr(messages, message_type)(request, msg) show_url = reverse('django.contrib.messages.tests.urls.show_template_response') return HttpResponseRedirect(show_url) @never_cache def show(request): t = Template(TEMPLATE) return HttpResponse(t.render(RequestContext(request))) @never_cache def show_template_response(request): return TemplateResponse(request, Template(TEMPLATE)) class ContactForm(forms.Form): name = forms.CharField(required=True) slug = forms.SlugField(required=True) class ContactFormViewWithMsg(SuccessMessageMixin, FormView): form_class = ContactForm success_url = show success_message = "%(name)s was created successfully" urlpatterns = patterns('', ('^add/(debug|info|success|warning|error)/$', add), url('^add/msg/$', ContactFormViewWithMsg.as_view(), name='add_success_msg'), ('^show/$', show), ('^template_response/add/(debug|info|success|warning|error)/$', add_template_response), ('^template_response/show/$', show_template_response), )
Sergio Canales has reached an agreement with the club about his pay check and will join Valencia CF on a two year loan deal worth 2 millions euros or 1 million euros per season with an double purchase option. After the two years of loan Valencia CF can choose to purchase the player for 12 million euros and Real Madrid can then purchase the player themselves for a fee of 18 million euros. The deal says that Sergio Canales can't play against Real Madrid and reportedly this is why Villarreal broke the negotiations for Canales. Sergio Canales came last season to Real Madrid from Racing Santander for 6 million euros, but has failed to play much at all in Real Madrid under Mourinho and the coach and player have looked to part ways, with Valencia doing a smart move and lending him. This makes our attacking midfield even more potent and dangerous and increases the competition so much more. Valencia CF is still in the transfer for a central defender and there are few players we have talks at this moment with Mangala transfer coming do a sudden stop.
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class RBiostrings(RPackage): """Memory efficient string containers, string matching algorithms, and other utilities, for fast manipulation of large biological sequences or sets of sequences.""" homepage = "https://bioconductor.org/packages/Biostrings/" git = "https://git.bioconductor.org/packages/Biostrings.git" version('2.44.2', commit='e4a2b320fb21c5cab3ece7b3c6fecaedfb1e5200') depends_on('r-biocgenerics', type=('build', 'run')) depends_on('r-s4vectors', type=('build', 'run')) depends_on('r-iranges', type=('build', 'run')) depends_on('r-xvector', type=('build', 'run')) depends_on('[email protected]:3.4.9', when='@2.44.2')
Pets can be your best friends, but they are able to even be your worst enemy if you have problems with allergies or asthma. Many households have pets such as dogs, cats, birds and hamsters. Dander, fur, hair and feathers from pets can trigger allergies. Plus, many pets shed hair and that could make a mess of the cleanliness of our houses! Pet allergens are smaller than pollen or mold spores and are more difficult to remove effectively. Nevertheless, there are lots of methods to reduce pet allergens for example it is wise to maintain your pet outside as much as really possible, after playing with your pets bathe your hand and face. But one answer that is great is, to place one air purifier for pets, pet hair, pet smells, pet allergies in your own home. A HEPA purifier can help reduce pet allergies. A best air purifier with HEPA filters can capture airborne particles like pet dander and dried salvia, then you definitely do not have to worry about the allergy problems. Perhaps you’d like to find one appropriate pet dander air purifier below. Airborne contaminants generated by your pets are mainly dealt with by the Hamilton Beach TrueAir Compact Pet Air Purifier. Despite its really tiny height of 10.8 x 8.1 x 16.4 inches, and weight of just 6.3lbs. The HEPA filter included with this pet air purifier uses technology that means the consumer WOn’t ever possess the extra cost of buying a replacement. This air purifier is ideal for practically any room your pet spends time in and with 3 speeds, it is going to take care of the most heavy of dander and scents. It acceptable to be used in rooms of up to 160 square feet in size, this air purifier uses a triple filtration system to remove both large and microscopic particles that are airborne inside. Overall, the Hamilton Beach TrueAir Compact Pet Air Purifier does its job good, it is also among the most affordable models in the marketplace for people who have problems with pet allergies. The GermGuardian AC5250PT 3-in-1 Digital Air Cleaning System with PetPure Filter Treatment, UVC and Scent Reduction is among the few air purifiers available on the market featuring a UVC light. This aids to kill airborne bacteria, mould spores, and viruses. The American Academy of Pediatric Allergy and Immunology Doctors has also recognized it as having the possibility to lessen exposure to indoor asthma triggers. This air purifier offers a 3 step cleaning system, including filtration by means of a PetPure filter, a HEPA filter, as well as a UVC light. The True HEPA filter captures up to 99.97% of allergens in a room, including those connected with pollen, pet dander, and dust mites. The GermGuardian AC5250PT is used in a medium to big sized room. Its abilities are successful in cleaning the atmosphere of a 193 square foot room in a single hour. The Winix WAC9500 Ultimate Air Purifier is a top end air purifier with superior functionality. Those who have attempted the WAC9500 pet dander air purifier generally discover that it’s superior to other models. Pet dander is removed by it from circulation, which can alleviate the symptoms of allergies in sensitive people. The HEPA filter removes dust, smoke particles and other pollutants that are microscopic. The unit has detectors that detect increasing amounts of pollutants and vapors, and fixes filtration settings correctly. In addition, it has a remote control, in order to control it from the comfort of bed or a sofa. Winix WAC9500 Ultimate Air Purifier with PlasmaWave Technology is certainly one of the most effective pet dander air purifiers out there and is an important step up from a number of other air cleaners. If you or somebody in your family is experiencing pet dander allergies, the most effective air purifier you’ll be able to get to get rid of the issue is a HEPA kind unit that is true. A true HEPA air purifier is certified to remove 99.97% of microscopic allergens as small as 0.3 microns in size. This characteristic makes it the best air purifier for pet dander.
# -*- coding: utf-8 -*- import family __version__ = '$Id$' # An inofficial Gentoo wiki project. # Ask for permission at http://gentoo-wiki.com/Help:Bots before running a bot. # Be very careful, and set a long throttle: "until we see it is good one edit # ever minute and one page fetch every 30 seconds, maybe a *bit* faster later". class Family(family.Family): def __init__(self): family.Family.__init__(self) self.name = 'gentoo' self.languages_by_size = [ 'en', 'ru', 'de', 'fr', 'tr', 'es', 'scratch', 'cs', 'nl', 'fi', ] for l in self.languages_by_size: self.langs[l] = '%s.gentoo-wiki.com' % l # TODO: sort # he: also uses the default 'Media' self.namespaces[4] = { '_default': u'Gentoo Linux Wiki', } self.namespaces[5] = { '_default': u'Gentoo Linux Wiki talk', 'cs': u'Gentoo Linux Wiki diskuse', 'de': u'Gentoo Linux Wiki Diskussion', 'es': u'Gentoo Linux Wiki Discusión', 'fi': u'Keskustelu Gentoo Linux Wikistä', 'fr': u'Discussion Gentoo Linux Wiki', 'nl': u'Overleg Gentoo Linux Wiki', 'ru': u'Обсуждение Gentoo Linux Wiki', 'tr': u'Gentoo Linux Wiki tartışma', } self.namespaces[90] = { '_default': u'Thread', } self.namespaces[91] = { '_default': u'Thread talk', } self.namespaces[92] = { '_default': u'Summary', } self.namespaces[93] = { '_default': u'Summary talk', } self.namespaces[100] = { '_default': u'Index', 'tr': u'Icerik', } self.namespaces[101] = { '_default': u'Index Talk', 'tr': u'Icerik Talk', } self.namespaces[102] = { '_default': u'Ebuild', } self.namespaces[103] = { '_default': u'Ebuild Talk', } self.namespaces[104] = { '_default': u'News', 'tr': u'Haberler', } self.namespaces[105] = { '_default': u'News Talk', 'tr': u'Haberler Talk', } self.namespaces[106] = { '_default': u'Man', } self.namespaces[107] = { '_default': u'Man Talk', } self.namespaces[110] = { '_default': u'Ucpt', } self.namespaces[111] = { '_default': u'Ucpt talk', } self.known_families.pop('gentoo-wiki') def version(self, code): return "1.16alpha"
I work at an inner city school and tension around there can be high so the teachers all play pranks on each other. All the time. When I found this site I knew I had something no one else had ever thought of. Once you get the covers on a book there’s just no way it could be fake. They are so real. I got the Recycle Diarrhea one for an overly obsessed eco-friendly teacher. Put it in her bookshelf and waited. When the principal found it? Best. Prank. Ever.
#!/usr/bin/env python # -*- coding: utf-8 -*- # # smoke_zephyr/utilities.py # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of the project nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import collections import functools import inspect import ipaddress import itertools import logging import os import random import re import shutil import string import subprocess import sys import time import unittest import urllib.parse import urllib.request import weakref EMAIL_REGEX = re.compile(r'^[a-z0-9._%+-]+@[a-z0-9.-]+\.[a-z]{2,6}$', flags=re.IGNORECASE) class AttributeDict(dict): """ This class allows dictionary keys to be accessed as attributes. For example: ``ad = AttributeDict(test=1); ad['test'] == ad.test`` """ __getattr__ = dict.__getitem__ __setattr__ = dict.__setitem__ class BruteforceGenerator(object): """ This class allows itarating sequences for bruteforcing. """ # requirments = itertools def __init__(self, startlen, endlen=None, charset=None): """ :param int startlen: The minimum sequence size to generate. :param int endlen: The maximum sequence size to generate. :param charset: The characters to include in the resulting sequences. """ self.startlen = startlen if endlen is None: self.endlen = startlen else: self.endlen = endlen if charset is None: charset = list(map(chr, range(0, 256))) elif isinstance(charset, str): charset = list(charset) elif isinstance(charset, bytes): charset = list(map(chr, charset)) charset.sort() self.charset = tuple(charset) self.length = self.startlen self._product = itertools.product(self.charset, repeat=self.length) self._next = self.__next__ def __iter__(self): return self def __next__(self): return self.next() def next(self): try: value = next(self._product) except StopIteration: if self.length == self.endlen: raise StopIteration self.length += 1 self._product = itertools.product(self.charset, repeat=self.length) value = next(self._product) return ''.join(value) _ArgSpec = collections.namedtuple('_ArgSpec', ('args', 'varargs', 'keywords', 'defaults')) class Cache(object): """ This class provides a simple to use cache object which can be applied as a decorator. """ def __init__(self, timeout): """ :param timeout: The amount of time in seconds that a cached result will be considered valid for. :type timeout: int, str """ if isinstance(timeout, str): timeout = parse_timespan(timeout) self.cache_timeout = timeout self._target_function = None self._target_function_arg_spec = None self.__cache = {} self.__obj = None def __get__(self, instance, _): self.__obj = instance return self def __call__(self, *args, **kwargs): if not getattr(self, '_target_function', False): target_function = args[0] if not inspect.isfunction(target_function) and not inspect.ismethod(target_function): raise RuntimeError('the cached object must be a function or method') arg_spec = inspect.getfullargspec(target_function) # pylint: disable=W1505 arg_spec = _ArgSpec(args=arg_spec.args, varargs=arg_spec.varargs, keywords=arg_spec.kwonlyargs, defaults=arg_spec.defaults) if arg_spec.varargs or arg_spec.keywords: raise RuntimeError('the cached function can not use dynamic args or kwargs') self._target_function = target_function self._target_function_arg_spec = arg_spec return functools.wraps(target_function)(self) self.cache_clean() if self.__obj is not None: args = (self.__obj,) + args self.__obj = None is_method = True else: is_method = False args = self._flatten_args(args, kwargs) if is_method: inst = args.popleft() args = tuple(args) ref = weakref.ref(inst, functools.partial(self._ref_callback, args)) cache_args = (ref,) + args args = (inst,) + args else: cache_args = tuple(args) args = tuple(args) result, expiration = self.__cache.get(cache_args, (None, 0)) if expiration > time.time(): return result result = self._target_function(*args) self.__cache[cache_args] = (result, time.time() + self.cache_timeout) return result def __repr__(self): return "<cached function {0} at 0x{1:x}>".format(self._target_function.__name__, id(self._target_function)) def _flatten_args(self, args, kwargs): flattened_args = collections.deque(args) arg_spec = self._target_function_arg_spec arg_spec_defaults = (arg_spec.defaults or []) default_args = tuple(arg_spec.args[:-len(arg_spec_defaults)]) default_kwargs = dict(zip(arg_spec.args[-len(arg_spec_defaults):], arg_spec_defaults)) for arg_id in range(len(args), len(arg_spec.args)): arg_name = arg_spec.args[arg_id] if arg_name in default_args: if not arg_name in kwargs: raise TypeError("{0}() missing required argument '{1}'".format(self._target_function.__name__, arg_name)) flattened_args.append(kwargs.pop(arg_name)) else: flattened_args.append(kwargs.pop(arg_name, default_kwargs[arg_name])) if kwargs: unexpected_kwargs = tuple("'{0}'".format(a) for a in kwargs.keys()) raise TypeError("{0}() got an unexpected keyword argument{1} {2}".format(self._target_function.__name__, ('' if len(unexpected_kwargs) == 1 else 's'), ', '.join(unexpected_kwargs))) return flattened_args def _ref_callback(self, args, ref): args = (ref,) + args self.__cache.pop(args, None) def cache_clean(self): """ Remove expired items from the cache. """ now = time.time() keys_for_removal = collections.deque() for key, (_, expiration) in self.__cache.items(): if expiration < now: keys_for_removal.append(key) for key in keys_for_removal: del self.__cache[key] def cache_clear(self): """ Remove all items from the cache. """ self.__cache = {} class FileWalker(object): """ This class is used to easily iterate over files and subdirectories of a specified parent directory. """ def __init__(self, filespath, absolute_path=False, skip_files=False, skip_dirs=False, filter_func=None, follow_links=False, max_depth=None): """ .. versionchanged:: 1.4.0 Added the *follow_links* and *max_depth* parameters. :param str filespath: A path to either a file or a directory. If a file is passed then that will be the only file returned during the iteration. If a directory is passed, all files and subdirectories will be recursively returned during the iteration. :param bool absolute_path: Whether or not the absolute path or a relative path should be returned. :param bool skip_files: Whether or not to skip files. :param bool skip_dirs: Whether or not to skip directories. :param function filter_func: If defined, the filter_func function will be called for each path (with the path as the one and only argument) and if the function returns false the path will be skipped. :param bool follow_links: Whether or not to follow directories pointed to by symlinks. :param max_depth: A maximum depth to recurse into. """ if not (os.path.isfile(filespath) or os.path.isdir(filespath)): raise Exception(filespath + ' is neither a file or directory') if absolute_path: self.filespath = os.path.abspath(filespath) else: self.filespath = os.path.relpath(filespath) self.skip_files = skip_files self.skip_dirs = skip_dirs self.filter_func = filter_func self.follow_links = follow_links self.max_depth = float('inf') if max_depth is None else max_depth if os.path.isdir(self.filespath): self._walk = None self._next = self._next_dir elif os.path.isfile(self.filespath): self._next = self._next_file def __iter__(self): return self._next() def _skip(self, cur_file): if self.skip_files and os.path.isfile(cur_file): return True if self.skip_dirs and os.path.isdir(cur_file): return True if self.filter_func is not None: if not self.filter_func(cur_file): return True return False def _next_dir(self): for root, dirs, files in os.walk(self.filespath, followlinks=self.follow_links): if root == self.filespath: depth = 0 else: depth = os.path.relpath(root, start=self.filespath).count(os.path.sep) + 1 if depth >= self.max_depth: continue for entry in itertools.chain(dirs, files): current_path = os.path.join(root, entry) if not self._skip(current_path): yield current_path if self.max_depth >= 0 and not self._skip(self.filespath): yield self.filespath def _next_file(self): if self.max_depth >= 0 and not self._skip(self.filespath): yield self.filespath class SectionConfigParser(object): """ Proxy access to a section of a ConfigParser object. """ __version__ = '0.2' def __init__(self, section_name, config_parser): """ :param str section_name: Name of the section to proxy access for. :param config_parser: ConfigParser object to proxy access for. :type config_parse: :py:class:`ConfigParser.ConfigParser` """ self.section_name = section_name self.config_parser = config_parser def _get_raw(self, option, opt_type, default=None): get_func = getattr(self.config_parser, 'get' + opt_type) if default is None: return get_func(self.section_name, option) elif self.config_parser.has_option(self.section_name, option): return get_func(self.section_name, option) else: return default def get(self, option, default=None): """ Retrieve *option* from the config, returning *default* if it is not present. :param str option: The name of the value to return. :param default: Default value to return if the option does not exist. """ return self._get_raw(option, '', default) def getint(self, option, default=None): """ Retrieve *option* from the config, returning *default* if it is not present. :param str option: The name of the value to return. :param default: Default value to return if the option does not exist. :rtype: int """ return self._get_raw(option, 'int', default) def getfloat(self, option, default=None): """ Retrieve *option* from the config, returning *default* if it is not present. :param str option: The name of the value to return. :param default: Default value to return if the option does not exist. :rtype: float """ return self._get_raw(option, 'float', default) def getboolean(self, option, default=None): """ Retrieve *option* from the config, returning *default* if it is not present. :param str option: The name of the value to return. :param default: Default value to return if the option does not exist. :rtype: bool """ return self._get_raw(option, 'boolean', default) def has_option(self, option): """ Check that *option* exists in the configuration file. :param str option: The name of the option to check. :rtype: bool """ return self.config_parser.has_option(self.section_name, option) def options(self): """ Get a list of all options that are present in the section of the configuration. :return: A list of all set options. :rtype: list """ return self.config_parser.options(self.section_name) def items(self): """ Return all options and their values in the form of a list of tuples. :return: A list of all values and options. :rtype: list """ return self.config_parser.items(self.section_name) def set(self, option, value): """ Set an option to an arbitrary value. :param str option: The name of the option to set. :param value: The value to set the option to. """ self.config_parser.set(self.section_name, option, value) class TestCase(unittest.TestCase): """ This class provides additional functionality over the built in :py:class:`unittest.TestCase` object, including better compatibility for methods across Python 2.x and Python 3.x. """ def __init__(self, *args, **kwargs): super(TestCase, self).__init__(*args, **kwargs) if not hasattr(self, 'assertRegex') and hasattr(self, 'assertRegexpMatches'): self.assertRegex = self.assertRegexpMatches if not hasattr(self, 'assertNotRegex') and hasattr(self, 'assertNotRegexpMatches'): self.assertNotRegex = self.assertNotRegexpMatches if not hasattr(self, 'assertRaisesRegex') and hasattr(self, 'assertRaisesRegexp'): self.assertRaisesRegex = self.assertRaisesRegexp def configure_stream_logger(logger='', level=None, formatter='%(levelname)-8s %(message)s'): """ Configure the default stream handler for logging messages to the console, remove other logging handlers, and enable capturing warnings. .. versionadded:: 1.3.0 :param str logger: The logger to add the stream handler for. :param level: The level to set the logger to, will default to WARNING if no level is specified. :type level: None, int, str :param formatter: The format to use for logging messages to the console. :type formatter: str, :py:class:`logging.Formatter` :return: The new configured stream handler. :rtype: :py:class:`logging.StreamHandler` """ level = level or logging.WARNING if isinstance(level, str): level = getattr(logging, level, None) if level is None: raise ValueError('invalid log level: ' + level) root_logger = logging.getLogger('') for handler in root_logger.handlers: root_logger.removeHandler(handler) logging.getLogger(logger).setLevel(logging.DEBUG) console_log_handler = logging.StreamHandler() console_log_handler.setLevel(level) if isinstance(formatter, str): formatter = logging.Formatter(formatter) elif not isinstance(formatter, logging.Formatter): raise TypeError('formatter must be an instance of logging.Formatter') console_log_handler.setFormatter(formatter) logging.getLogger(logger).addHandler(console_log_handler) logging.captureWarnings(True) return console_log_handler def download(url, filename=None): """ Download a file from a url and save it to disk. :param str url: The URL to fetch the file from. :param str filename: The destination file to write the data to. """ # requirements os, shutil, urllib.parse, urllib.request if not filename: url_parts = urllib.parse.urlparse(url) filename = os.path.basename(url_parts.path) url_h = urllib.request.urlopen(url) with open(filename, 'wb') as file_h: shutil.copyfileobj(url_h, file_h) url_h.close() return def escape_single_quote(unescaped): """ Escape a string containing single quotes and backslashes with backslashes. This is useful when a string is evaluated in some way. :param str unescaped: The string to escape. :return: The escaped string. :rtype: str """ # requirements = re return re.sub(r'(\'|\\)', r'\\\1', unescaped) def format_bytes_size(val): """ Take a number of bytes and convert it to a human readable number. :param int val: The number of bytes to format. :return: The size in a human readable format. :rtype: str """ if not val: return '0 bytes' for sz_name in ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB']: if val < 1024.0: return "{0:.2f} {1}".format(val, sz_name) val /= 1024.0 raise OverflowError() def grep(expression, file, flags=0, invert=False): """ Search a file and return a list of all lines that match a regular expression. :param str expression: The regex to search for. :param file: The file to search in. :type file: str, file :param int flags: The regex flags to use when searching. :param bool invert: Select non matching lines instead. :return: All the matching lines. :rtype: list """ # requirements = re if isinstance(file, str): file = open(file) lines = [] for line in file: if bool(re.search(expression, line, flags=flags)) ^ invert: lines.append(line) return lines def is_valid_email_address(email_address): """ Check that the string specified appears to be a valid email address. :param str email_address: The email address to validate. :return: Whether the email address appears to be valid or not. :rtype: bool """ # requirements = re return EMAIL_REGEX.match(email_address) != None def get_ip_list(ip_network, mask=None): """ Quickly convert an IPv4 or IPv6 network (CIDR or Subnet) to a list of individual IPs in their string representation. :param str ip_network: :param int mask: :return: list """ if mask and '/' not in ip_network: net = ipaddress.ip_network("{0}/{1}".format(ip_network, mask)) elif '/' not in ip_network: return [str(ipaddress.ip_address(ip_network))] else: net = ipaddress.ip_network(ip_network) hosts = net.hosts() if net.netmask == ipaddress.IPv4Address('255.255.255.255') and sys.version_info > (3, 9): # see: https://github.com/zeroSteiner/smoke-zephyr/issues/8 hosts = [] return [host.__str__() for host in hosts] def sort_ipv4_list(ip_list, unique=True): """ Sorts a provided list of IPv4 addresses. Optionally can remove duplicate values Supports IPv4 addresses with ports included (ex: [10.11.12.13:80, 10.11.12.13:8080]) :param ip_list: (list) iterable of IPv4 Addresses :param unique: (bool) removes duplicate values if true :return: sorted list of IP addresses """ if unique: ip_list = list(set(ip_list)) ipv4_list = sorted([i.rstrip(':') for i in ip_list], key=lambda ip: ( int(ip.split(".")[0]), int(ip.split(".")[1]), int(ip.split(".")[2]), int(ip.split(".")[3].split(':')[0]), int(ip.split(":")[1]) if ":" in ip else 0 )) return ipv4_list def open_uri(uri): """ Open a URI in a platform intelligent way. On Windows this will use 'cmd.exe /c start' and on Linux this will use gvfs-open or xdg-open depending on which is available. If no suitable application can be found to open the URI, a RuntimeError will be raised. .. versionadded:: 1.3.0 :param str uri: The URI to open. """ close_fds = True startupinfo = None proc_args = [] if sys.platform.startswith('win'): proc_args.append(which('cmd.exe')) proc_args.append('/c') proc_args.append('start') uri = uri.replace('&', '^&') close_fds = False startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW startupinfo.wShowWindow = subprocess.SW_HIDE elif which('gvfs-open'): proc_args.append(which('gvfs-open')) elif which('xdg-open'): proc_args.append(which('xdg-open')) else: raise RuntimeError('could not find suitable application to open uri') proc_args.append(uri) proc_h = subprocess.Popen(proc_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=close_fds, startupinfo=startupinfo) return proc_h.wait() == 0 def parse_case_camel_to_snake(camel): """ Convert a string from CamelCase to snake_case. :param str camel: The CamelCase string to convert. :return: The snake_case version of string. :rtype: str """ # requirements = re return re.sub('((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))', r'_\1', camel).lower() def parse_case_snake_to_camel(snake, upper_first=True): """ Convert a string from snake_case to CamelCase. :param str snake: The snake_case string to convert. :param bool upper_first: Whether or not to capitalize the first character of the string. :return: The CamelCase version of string. :rtype: str """ snake = snake.split('_') first_part = snake[0] if upper_first: first_part = first_part.title() return first_part + ''.join(word.title() for word in snake[1:]) def parse_server(server, default_port): """ Convert a server string to a tuple suitable for passing to connect, for example converting 'www.google.com:443' to ('www.google.com', 443). :param str server: The server string to convert. :param int default_port: The port to use in case one is not specified in the server string. :return: The parsed server information. :rtype: tuple """ server = server.rsplit(':', 1) host = server[0] if host.startswith('[') and host.endswith(']'): host = host[1:-1] if len(server) == 1: return (host, default_port) port = server[1] if not port: port = default_port else: port = int(port) return (host, port) def parse_timespan(timedef): """ Convert a string timespan definition to seconds, for example converting '1m30s' to 90. If *timedef* is already an int, the value will be returned unmodified. :param timedef: The timespan definition to convert to seconds. :type timedef: int, str :return: The converted value in seconds. :rtype: int """ if isinstance(timedef, int): return timedef converter_order = ('w', 'd', 'h', 'm', 's') converters = { 'w': 604800, 'd': 86400, 'h': 3600, 'm': 60, 's': 1 } timedef = timedef.lower() if timedef.isdigit(): return int(timedef) elif len(timedef) == 0: return 0 seconds = -1 for spec in converter_order: timedef = timedef.split(spec) if len(timedef) == 1: timedef = timedef[0] continue elif len(timedef) > 2 or not timedef[0].isdigit(): seconds = -1 break adjustment = converters[spec] seconds = max(seconds, 0) seconds += (int(timedef[0]) * adjustment) timedef = timedef[1] if not len(timedef): break if seconds < 0: raise ValueError('invalid time format') return seconds def parse_to_slug(words, maxlen=24): """ Parse a string into a slug format suitable for use in URLs and other character restricted applications. Only utf-8 strings are supported at this time. :param str words: The words to parse. :param int maxlen: The maximum length of the slug. :return: The parsed words as a slug. :rtype: str """ slug = '' maxlen = min(maxlen, len(words)) for c in words: if len(slug) == maxlen: break c = ord(c) if c == 0x27: continue elif c >= 0x30 and c <= 0x39: slug += chr(c) elif c >= 0x41 and c <= 0x5a: slug += chr(c + 0x20) elif c >= 0x61 and c <= 0x7a: slug += chr(c) elif len(slug) and slug[-1] != '-': slug += '-' if len(slug) and slug[-1] == '-': slug = slug[:-1] return slug def random_string_alphanumeric(size): """ Generate a random string of *size* length consisting of mixed case letters and numbers. This function is not meant for cryptographic purposes. :param int size: The length of the string to return. :return: A string consisting of random characters. :rtype: str """ # requirements = random, string return ''.join(random.choice(string.ascii_letters + string.digits) for x in range(size)) def random_string_lower_numeric(size): """ Generate a random string of *size* length consisting of lowercase letters and numbers. This function is not meant for cryptographic purposes. :param int size: The length of the string to return. :return: A string consisting of random characters. :rtype: str """ # requirements = random, string return ''.join(random.choice(string.ascii_lowercase + string.digits) for x in range(size)) def selection_collision(selections, poolsize): """ Calculate the probability that two random values selected from an arbitrary sized pool of unique values will be equal. This is commonly known as the "Birthday Problem". :param int selections: The number of random selections. :param int poolsize: The number of unique random values in the pool to choose from. :rtype: float :return: The chance that a collision will occur as a percentage. """ # requirments = sys probability = 100.0 poolsize = float(poolsize) for i in range(selections): probability = probability * (poolsize - i) / poolsize probability = (100.0 - probability) return probability def unescape_single_quote(escaped): """ Unescape a string which uses backslashes to escape single quotes. :param str escaped: The string to unescape. :return: The unescaped string. :rtype: str """ escaped = escaped.replace('\\\\', '\\') escaped = escaped.replace('\\\'', '\'') return escaped def unique(seq, key=None): """ Create a unique list or tuple from a provided list or tuple and preserve the order. :param seq: The list or tuple to preserve unique items from. :type seq: list, tuple :param key: If key is provided it will be called during the comparison process. :type key: function, None """ if key is None: key = lambda x: x preserved_type = type(seq) if preserved_type not in (list, tuple): raise TypeError("unique argument 1 must be list or tuple, not {0}".format(preserved_type.__name__)) seen = [] result = [] for item in seq: marker = key(item) if marker in seen: continue seen.append(marker) result.append(item) return preserved_type(result) def weighted_choice(choices, weight): """ Make a random selection from the specified choices. Apply the *weight* function to each to return a positive integer representing shares of selection pool the choice should received. The *weight* function is passed a single argument of the choice from the *choices* iterable. :param choices: The choices to select from. :type choices: list, tuple :param weight: The function used for gather weight information for choices. :type weight: function :return: A randomly selected choice from the provided *choices*. """ # requirements = random weights = [] # get weight values for each of the choices for choice in choices: choice_weight = weight(choice) if not (isinstance(choice_weight, int) and choice_weight > 0): raise TypeError('weight results must be positive integers') weights.append(choice_weight) # make a selection within the acceptable range selection = random.randint(0, sum(weights) - 1) # find and return the corresponding choice for idx, choice in enumerate(choices): if selection < sum(weights[:idx + 1]): return choice raise RuntimeError('no selection could be made') def which(program): """ Locate an executable binary's full path by its name. :param str program: The executables name. :return: The full path to the executable. :rtype: str """ # requirements = os is_exe = lambda fpath: (os.path.isfile(fpath) and os.access(fpath, os.X_OK)) for path in os.environ['PATH'].split(os.pathsep): path = path.strip('"') exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file if is_exe(program): return os.path.abspath(program) return None def xfrange(start, stop=None, step=1): """ Iterate through an arithmetic progression. :param start: Starting number. :type start: float, int, long :param stop: Stopping number. :type stop: float, int, long :param step: Stepping size. :type step: float, int, long """ if stop is None: stop = start start = 0.0 start = float(start) while start < stop: yield start start += step
Are you running out of fuel? If you do not have fuel left to finish your journey, in this guide of Gas stations in Thedford you will find the nearest gas station to where you are now. In a place in the world like Thedford, which has 0 inhabitants and where everyone is traveling by car, it is good to look thoroughly at the price of fuel daily, not to pay more inadvertently. For this, we provide you the contact and location of the Gas stations in Thedford, so you can always make your queries. If you are the owner of one or more Gas stations in Thedford, here you can give more visibility to your company. Send us the data of your business. We will review them and if everything is correct, we will register it.. It is very easy to know if a gas station you know is already in this guide of Gas stations in Thedford: perform a search in our search engine located in the upper right section of this screen. Put the name of the gas station and click on 'enter'. 17080 i-20, lindale, tx 75771, united states 75771 Hide-A-Way Lake ,Smith County ,Texas ,USA . 302 w bermuda st, quitman, tx 75783, united states 75783 Quitman ,Wood County ,Texas ,USA . 622 w garland st, grand saline, tx 75140, united states 75140 Grand Saline ,Van Zandt County ,Texas ,USA . 502 houston st, wills point, tx 75169, united states 75169 Wills Point ,Van Zandt County ,Texas ,USA .
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """"Defines abstractions around compiler artifacts produced in compiling micro TVM binaries.""" import hashlib import io import os import json import shutil import tarfile class ArtifactFileNotFoundError(Exception): """Raised when an artifact file cannot be found on disk.""" class ArtifactBadSymlinkError(Exception): """Raised when an artifact symlink points outside the base directory.""" class ArtifactBadArchiveError(Exception): """Raised when an artifact archive is malformed.""" class ImmobileArtifactError(Exception): """Raised when an artifact is declared immobile and thus cannot be archived.""" class ArchiveModifiedError(Exception): """Raised when the underlying files in a metadata-only archive were modified after archiving.""" def sha256_hexdigest(path): with open(path, "rb") as path_fd: h = hashlib.sha256() chunk = path_fd.read(1 * 1024 * 1024) while chunk: h.update(chunk) chunk = path_fd.read(1 * 1024 * 1024) return h.hexdigest() def _validate_metadata_only(metadata): """Validate that the files in a metadata-only archive have not changed.""" problems = [] for files in metadata["labelled_files"].values(): for f in files: disk_path = os.path.join(metadata["base_dir"], f) try: sha = sha256_hexdigest(disk_path) except FileNotFoundError: problems.append(f"{f}: original file not found") continue expected_sha = metadata["file_digests"][f] if sha != expected_sha: problems.append(f"{f}: sha256 mismatch: expected {expected_sha}, got {sha}") if problems: raise ArchiveModifiedError( "Files in metadata-only archive have been modified:\n" + "\n".join([f" * {p}" for p in problems]) ) class Artifact: """Describes a compiler artifact and defines common logic to archive it for transport.""" # A version number written to the archive. ENCODING_VERSION = 2 # A unique string identifying the type of artifact in an archive. Subclasses must redefine this # variable. ARTIFACT_TYPE = None @classmethod def unarchive(cls, archive_path, base_dir): """Unarchive an artifact into base_dir. Parameters ---------- archive_path : str Path to the archive file. base_dir : str Path to a non-existent, empty directory under which the artifact will live. If working with a metadata-only archive, this directory will just hold the metadata.json. Returns ------- Artifact : The unarchived artifact. """ if os.path.exists(base_dir): raise ValueError(f"base_dir exists: {base_dir}") base_dir_parent, base_dir_name = os.path.split(base_dir) temp_dir = os.path.join(base_dir_parent, f"__tvm__{base_dir_name}") os.mkdir(temp_dir) try: with tarfile.open(archive_path) as tar_f: tar_f.extractall(temp_dir) temp_dir_contents = os.listdir(temp_dir) if len(temp_dir_contents) != 1: raise ArtifactBadArchiveError( "Expected exactly 1 subdirectory at root of archive, got " f"{temp_dir_contents!r}" ) metadata_path = os.path.join(temp_dir, temp_dir_contents[0], "metadata.json") if not metadata_path: raise ArtifactBadArchiveError("No metadata.json found in archive") with open(metadata_path) as metadata_f: metadata = json.load(metadata_f) version = metadata.get("version") if version != cls.ENCODING_VERSION: raise ArtifactBadArchiveError( f"archive version: expect {cls.EXPECTED_VERSION}, found {version}" ) metadata_only = metadata.get("metadata_only") if metadata_only: _validate_metadata_only(metadata) os.rename(os.path.join(temp_dir, temp_dir_contents[0]), base_dir) artifact_cls = cls for sub_cls in cls.__subclasses__(): if sub_cls.ARTIFACT_TYPE is not None and sub_cls.ARTIFACT_TYPE == metadata.get( "artifact_type" ): artifact_cls = sub_cls break return artifact_cls.from_unarchived( base_dir if not metadata_only else metadata["base_dir"], metadata["labelled_files"], metadata["metadata"], immobile=metadata.get("immobile"), ) finally: shutil.rmtree(temp_dir) @classmethod def from_unarchived(cls, base_dir, labelled_files, metadata, immobile): return cls(base_dir, labelled_files, metadata, immobile) def __init__(self, base_dir, labelled_files, metadata, immobile=False): """Create a new artifact. Parameters ---------- base_dir : str The path to a directory on disk which contains all the files in this artifact. labelled_files : Dict[str, str] A dict mapping a file label to the relative paths of the files that carry that label. metadata : Dict A dict containing artitrary JSON-serializable key-value data describing the artifact. immobile : bool True when this artifact can't be used after being moved out of its current location on disk. This can happen when artifacts contain absolute paths or when it's not feasible to include enough files in the artifact to reliably re-run commands in arbitrary locations. Setting this flag will cause archive() to raise ImmboileArtifactError. """ self.base_dir = os.path.realpath(base_dir) self.labelled_files = labelled_files self.metadata = metadata self.immobile = immobile for label, files in labelled_files.items(): for f in files: f_path = os.path.join(self.base_dir, f) if not os.path.lexists(f_path): raise ArtifactFileNotFoundError(f"{f} (label {label}): not found at {f_path}") if os.path.islink(f_path): link_path = os.path.readlink(f_path) if os.path.isabs(link_path): link_fullpath = link_path else: link_fullpath = os.path.join(os.path.dirname(f_path), link_path) link_fullpath = os.path.realpath(link_fullpath) if not link_fullpath.startswith(self.base_dir): raise ArtifactBadSymlinkError( f"{f} (label {label}): symlink points outside artifact tree" ) def abspath(self, rel_path): """Return absolute path to the member with the given relative path.""" return os.path.join(self.base_dir, rel_path) def label(self, label): """Return a list of relative paths to files with the given label.""" return self.labelled_files[label] def label_abspath(self, label): return [self.abspath(p) for p in self.labelled_files[label]] def archive(self, archive_path, metadata_only=False): """Create a relocatable tar archive of the artifacts. Parameters ---------- archive_path : str Path to the tar file to create. Or, path to a directory, under which a tar file will be created named {base_dir}.tar. metadata_only : bool If true, don't archive artifacts; instead, just archive metadata plus original base_path. A metadata-only archive can be unarchived and used like a regular archive provided none of the files have changed in their original locations on-disk. Returns ------- str : The value of archive_path, after potentially making the computation describe above. Raises ------ ImmboileArtifactError : When immobile=True was passed to the constructor. """ if self.immobile and not metadata_only: raise ImmobileArtifactError("This artifact can't be moved") if os.path.isdir(archive_path): archive_path = os.path.join(archive_path, f"{os.path.basename(self.base_dir)}.tar") archive_name = os.path.splitext(os.path.basename(archive_path))[0] with tarfile.open(archive_path, "w") as tar_f: def _add_file(name, data, f_type): tar_info = tarfile.TarInfo(name=name) tar_info.type = f_type data_bytes = bytes(data, "utf-8") tar_info.size = len(data) tar_f.addfile(tar_info, io.BytesIO(data_bytes)) metadata = { "version": self.ENCODING_VERSION, "labelled_files": self.labelled_files, "metadata": self.metadata, "metadata_only": False, } if metadata_only: metadata["metadata_only"] = True metadata["base_dir"] = self.base_dir metadata["immobile"] = self.immobile metadata["file_digests"] = {} for files in self.labelled_files.values(): for f in files: metadata["file_digests"][f] = sha256_hexdigest(self.abspath(f)) _add_file( f"{archive_name}/metadata.json", json.dumps(metadata, indent=2, sort_keys=True), tarfile.REGTYPE, ) for dir_path, _, files in os.walk(self.base_dir): for f in files: file_path = os.path.join(dir_path, f) archive_file_path = os.path.join( archive_name, os.path.relpath(file_path, self.base_dir) ) if not os.path.islink(file_path): tar_f.add(file_path, archive_file_path, recursive=False) continue link_path = os.readlink(file_path) if not os.path.isabs(link_path): tar_f.add(file_path, archive_file_path, recursive=False) continue relpath = os.path.relpath(link_path, os.path.dirname(file_path)) _add_file(archive_file_path, relpath, tarfile.LNKTYPE) return archive_path
This book reports on the first substantial UK study of parenting, disability and mental health. It examines the views of parents and children in 75 families. Covering a broad spectrum of issues facing disabled parents and their families, it provides a comprehensive review of relevant policy issues.
# -*- encoding: utf-8 -*- import pytest from abjad.tools import systemtools from supriya import synthdefs from supriya.tools import servertools @pytest.fixture(scope='function') def server(request): def server_teardown(): server.quit() server = servertools.Server().boot() request.addfinalizer(server_teardown) return server def test_Group_append_01(server): group_a = servertools.Group() group_a.allocate(target_node=server) group_b = servertools.Group() group_b.allocate(target_node=server) synthdef = synthdefs.test assert not synthdef.is_allocated synth_a = servertools.Synth(synthdef) assert not synthdef.is_allocated assert not synth_a.is_allocated group_a.append(synth_a) assert synthdef.is_allocated assert synth_a.is_allocated assert synth_a.parent is group_a assert synth_a in group_a assert synth_a not in group_b server_state = str(server.query_remote_nodes()) assert systemtools.TestManager.compare( server_state, ''' NODE TREE 0 group 1 group 1001 group 1000 group 1002 test ''', ), server_state group_b.append(synth_a) assert synthdef.is_allocated assert synth_a.is_allocated assert synth_a.parent is group_b assert synth_a in group_b assert synth_a not in group_a server_state = str(server.query_remote_nodes()) assert systemtools.TestManager.compare( server_state, ''' NODE TREE 0 group 1 group 1001 group 1002 test 1000 group ''', ), server_state synth_b = servertools.Synth(synthdef) assert not synth_b.is_allocated assert synth_b.parent is None group_b.append(synth_b) assert synth_b.is_allocated assert synth_b.parent is group_b server_state = str(server.query_remote_nodes()) assert systemtools.TestManager.compare( server_state, ''' NODE TREE 0 group 1 group 1001 group 1002 test 1003 test 1000 group ''', ), server_state
All the way from Vietnam, we bring you a flavoursome spice – star anise, to enhance the taste of your recipes with sweet and heady aroma. Also known as badiam, our organic star anise is a classic ingredient in fine confectionery and best teterias. This incredible spice is valuable for its various medicinal properties include digestive and expectorant qualities.
"""Test cases for Zinnia's templatetags""" from datetime import date from django.test import TestCase from django.utils import timezone from django.template import Context from django.template import Template from django.template import TemplateSyntaxError from django.db.models.signals import post_save from django.core.paginator import Paginator from django.core.urlresolvers import reverse from django.contrib.sites.models import Site from django.test.utils import override_settings from django.contrib.auth.tests.utils import skipIfCustomUser import django_comments as comments from django_comments.models import CommentFlag from tagging.models import Tag from zinnia.models.entry import Entry from zinnia.models.author import Author from zinnia.models.category import Category from zinnia.managers import DRAFT from zinnia.managers import PUBLISHED from zinnia.flags import PINGBACK, TRACKBACK from zinnia.tests.utils import datetime from zinnia.tests.utils import urlEqual from zinnia.signals import disconnect_entry_signals from zinnia.signals import disconnect_discussion_signals from zinnia.signals import flush_similar_cache_handler from zinnia.templatetags.zinnia import widont from zinnia.templatetags.zinnia import week_number from zinnia.templatetags.zinnia import get_authors from zinnia.templatetags.zinnia import get_gravatar from zinnia.templatetags.zinnia import get_tag_cloud from zinnia.templatetags.zinnia import get_categories from zinnia.templatetags.zinnia import get_categories_tree from zinnia.templatetags.zinnia import zinnia_pagination from zinnia.templatetags.zinnia import zinnia_statistics from zinnia.templatetags.zinnia import get_draft_entries from zinnia.templatetags.zinnia import get_recent_entries from zinnia.templatetags.zinnia import get_random_entries from zinnia.templatetags.zinnia import zinnia_breadcrumbs from zinnia.templatetags.zinnia import get_popular_entries from zinnia.templatetags.zinnia import get_similar_entries from zinnia.templatetags.zinnia import get_recent_comments from zinnia.templatetags.zinnia import get_recent_linkbacks from zinnia.templatetags.zinnia import get_featured_entries from zinnia.templatetags.zinnia import get_calendar_entries from zinnia.templatetags.zinnia import get_archives_entries from zinnia.templatetags.zinnia import get_archives_entries_tree from zinnia.templatetags.zinnia import user_admin_urlname from zinnia.templatetags.zinnia import comment_admin_urlname class TemplateTagsTestCase(TestCase): """Test cases for Template tags""" def setUp(self): disconnect_entry_signals() disconnect_discussion_signals() params = {'title': 'My entry', 'content': 'My content', 'tags': 'zinnia, test', 'publication_date': datetime(2010, 1, 1, 12), 'slug': 'my-entry'} self.entry = Entry.objects.create(**params) self.site = Site.objects.get_current() def publish_entry(self): self.entry.status = PUBLISHED self.entry.featured = True self.entry.sites.add(self.site) self.entry.save() def make_local(self, date_time): """ Convert aware datetime to local datetime. """ if timezone.is_aware(date_time): return timezone.localtime(date_time) return date_time def test_get_categories(self): source_context = Context() with self.assertNumQueries(0): context = get_categories(source_context) self.assertEqual(len(context['categories']), 0) self.assertEqual(context['template'], 'zinnia/tags/categories.html') self.assertEqual(context['context_category'], None) category = Category.objects.create(title='Category 1', slug='category-1') self.entry.categories.add(category) self.publish_entry() source_context = Context({'category': category}) with self.assertNumQueries(0): context = get_categories(source_context, 'custom_template.html') self.assertEqual(len(context['categories']), 1) self.assertEqual(context['categories'][0].count_entries_published, 1) self.assertEqual(context['template'], 'custom_template.html') self.assertEqual(context['context_category'], category) def test_get_categories_tree(self): source_context = Context() with self.assertNumQueries(0): context = get_categories_tree(source_context) self.assertEqual(len(context['categories']), 0) self.assertEqual(context['template'], 'zinnia/tags/categories_tree.html') self.assertEqual(context['context_category'], None) category = Category.objects.create(title='Category 1', slug='category-1') source_context = Context({'category': category}) with self.assertNumQueries(0): context = get_categories_tree( source_context, 'custom_template.html') self.assertEqual(len(context['categories']), 1) self.assertEqual(context['template'], 'custom_template.html') self.assertEqual(context['context_category'], category) @skipIfCustomUser def test_get_authors(self): source_context = Context() with self.assertNumQueries(0): context = get_authors(source_context) self.assertEqual(len(context['authors']), 0) self.assertEqual(context['template'], 'zinnia/tags/authors.html') self.assertEqual(context['context_author'], None) author = Author.objects.create_user(username='webmaster', email='[email protected]') self.entry.authors.add(author) self.publish_entry() source_context = Context({'author': author}) with self.assertNumQueries(0): context = get_authors(source_context, 'custom_template.html') self.assertEqual(len(context['authors']), 1) self.assertEqual(context['authors'][0].count_entries_published, 1) self.assertEqual(context['template'], 'custom_template.html') self.assertEqual(context['context_author'], author) def test_get_recent_entries(self): with self.assertNumQueries(0): context = get_recent_entries() self.assertEqual(len(context['entries']), 0) self.assertEqual(context['template'], 'zinnia/tags/entries_recent.html') self.publish_entry() with self.assertNumQueries(0): context = get_recent_entries(3, 'custom_template.html') self.assertEqual(len(context['entries']), 1) self.assertEqual(context['template'], 'custom_template.html') with self.assertNumQueries(0): context = get_recent_entries(0) self.assertEqual(len(context['entries']), 0) def test_get_featured_entries(self): with self.assertNumQueries(0): context = get_featured_entries() self.assertEqual(len(context['entries']), 0) self.assertEqual(context['template'], 'zinnia/tags/entries_featured.html') self.publish_entry() with self.assertNumQueries(0): context = get_featured_entries(3, 'custom_template.html') self.assertEqual(len(context['entries']), 1) self.assertEqual(context['template'], 'custom_template.html') with self.assertNumQueries(0): context = get_featured_entries(0) self.assertEqual(len(context['entries']), 0) def test_draft_entries(self): with self.assertNumQueries(0): context = get_draft_entries() self.assertEqual(len(context['entries']), 1) self.assertEqual(context['template'], 'zinnia/tags/entries_draft.html') self.publish_entry() with self.assertNumQueries(0): context = get_draft_entries(3, 'custom_template.html') self.assertEqual(len(context['entries']), 0) self.assertEqual(context['template'], 'custom_template.html') with self.assertNumQueries(0): context = get_draft_entries(0) self.assertEqual(len(context['entries']), 0) def test_get_random_entries(self): with self.assertNumQueries(0): context = get_random_entries() self.assertEqual(len(context['entries']), 0) self.assertEqual(context['template'], 'zinnia/tags/entries_random.html') self.publish_entry() with self.assertNumQueries(0): context = get_random_entries(3, 'custom_template.html') self.assertEqual(len(context['entries']), 1) self.assertEqual(context['template'], 'custom_template.html') with self.assertNumQueries(0): context = get_random_entries(0) self.assertEqual(len(context['entries']), 0) def test_get_popular_entries(self): with self.assertNumQueries(0): context = get_popular_entries() self.assertEqual(len(context['entries']), 0) self.assertEqual(context['template'], 'zinnia/tags/entries_popular.html') self.publish_entry() with self.assertNumQueries(0): context = get_popular_entries(3, 'custom_template.html') self.assertEqual(len(context['entries']), 0) self.assertEqual(context['template'], 'custom_template.html') params = {'title': 'My second entry', 'content': 'My second content', 'tags': 'zinnia, test', 'status': PUBLISHED, 'comment_count': 2, 'slug': 'my-second-entry'} second_entry = Entry.objects.create(**params) second_entry.sites.add(self.site) self.entry.comment_count = 1 self.entry.save() with self.assertNumQueries(0): context = get_popular_entries(3) self.assertEqual(list(context['entries']), [second_entry, self.entry]) self.entry.comment_count = 2 self.entry.save() with self.assertNumQueries(0): context = get_popular_entries(3) self.assertEqual(list(context['entries']), [second_entry, self.entry]) self.entry.comment_count = 3 self.entry.save() with self.assertNumQueries(0): context = get_popular_entries(3) self.assertEqual(list(context['entries']), [self.entry, second_entry]) self.entry.status = DRAFT self.entry.save() with self.assertNumQueries(0): context = get_popular_entries(3) self.assertEqual(list(context['entries']), [second_entry]) def test_get_similar_entries(self): post_save.connect( flush_similar_cache_handler, sender=Entry, dispatch_uid='flush_cache') self.publish_entry() source_context = Context({'object': self.entry}) with self.assertNumQueries(0): context = get_similar_entries(source_context) self.assertEqual(len(context['entries']), 0) self.assertEqual(context['template'], 'zinnia/tags/entries_similar.html') source_context = Context({'entry': self.entry}) with self.assertNumQueries(1): context = get_similar_entries(source_context) self.assertEqual(len(context['entries']), 0) self.assertEqual(context['template'], 'zinnia/tags/entries_similar.html') params = {'title': 'My second entry', 'content': 'This is the second content of my testing', 'excerpt': 'Similarity testing', 'status': PUBLISHED, 'slug': 'my-second-entry'} second_entry = Entry.objects.create(**params) second_entry.sites.add(self.site) params = {'title': 'My third entry', 'content': 'This is the third content for testing', 'excerpt': 'Similarity testing', 'status': PUBLISHED, 'slug': 'my-third-entry'} third_entry = Entry.objects.create(**params) third_entry.sites.add(self.site) with self.assertNumQueries(2): context = get_similar_entries(source_context, 3, 'custom_template.html') self.assertEqual(len(context['entries']), 2) self.assertEqual(context['entries'][0].pk, second_entry.pk) self.assertEqual(context['template'], 'custom_template.html') with self.assertNumQueries(0): context = get_similar_entries(source_context, 3) second_site = Site.objects.create(domain='second', name='second') second_entry.sites.add(second_site) with override_settings(SITE_ID=second_site.pk): with self.assertNumQueries(2): context = get_similar_entries(source_context, 3) self.assertEqual(len(context['entries']), 0) source_context = Context({'entry': second_entry}) with self.assertNumQueries(1): context = get_similar_entries(source_context) self.assertEqual(len(context['entries']), 2) post_save.disconnect( sender=Entry, dispatch_uid='flush_cache') def test_get_archives_entries(self): with self.assertNumQueries(0): context = get_archives_entries() self.assertEqual(len(context['archives']), 0) self.assertEqual(context['template'], 'zinnia/tags/entries_archives.html') self.publish_entry() params = {'title': 'My second entry', 'content': 'My second content', 'tags': 'zinnia, test', 'status': PUBLISHED, 'publication_date': datetime(2009, 1, 1), 'slug': 'my-second-entry'} second_entry = Entry.objects.create(**params) second_entry.sites.add(self.site) with self.assertNumQueries(0): context = get_archives_entries('custom_template.html') self.assertEqual(len(context['archives']), 2) self.assertEqual( context['archives'][0], self.make_local(self.entry.publication_date).replace( day=1, hour=0)) self.assertEqual( context['archives'][1], self.make_local(second_entry.publication_date).replace( day=1, hour=0)) self.assertEqual(context['template'], 'custom_template.html') def test_get_archives_tree(self): with self.assertNumQueries(0): context = get_archives_entries_tree() self.assertEqual(len(context['archives']), 0) self.assertEqual(context['template'], 'zinnia/tags/entries_archives_tree.html') self.publish_entry() params = {'title': 'My second entry', 'content': 'My second content', 'tags': 'zinnia, test', 'status': PUBLISHED, 'publication_date': datetime(2009, 1, 10), 'slug': 'my-second-entry'} second_entry = Entry.objects.create(**params) second_entry.sites.add(self.site) with self.assertNumQueries(0): context = get_archives_entries_tree('custom_template.html') self.assertEqual(len(context['archives']), 2) self.assertEqual( context['archives'][0], self.make_local( second_entry.publication_date).replace(hour=0)) self.assertEqual( context['archives'][1], self.make_local( self.entry.publication_date).replace(hour=0)) self.assertEqual(context['template'], 'custom_template.html') def test_get_calendar_entries_no_params(self): source_context = Context() with self.assertNumQueries(2): context = get_calendar_entries(source_context) self.assertEqual(context['previous_month'], None) self.assertEqual(context['next_month'], None) self.assertEqual(context['template'], 'zinnia/tags/entries_calendar.html') self.publish_entry() with self.assertNumQueries(2): context = get_calendar_entries(source_context) self.assertEqual( context['previous_month'], self.make_local(self.entry.publication_date).date().replace(day=1)) self.assertEqual(context['next_month'], None) def test_get_calendar_entries_incomplete_year_month(self): self.publish_entry() source_context = Context() with self.assertNumQueries(2): context = get_calendar_entries(source_context, year=2009) self.assertEqual( context['previous_month'], self.make_local(self.entry.publication_date).date().replace(day=1)) self.assertEqual(context['next_month'], None) with self.assertNumQueries(2): context = get_calendar_entries(source_context, month=1) self.assertEqual( context['previous_month'], self.make_local(self.entry.publication_date).date().replace(day=1)) self.assertEqual(context['next_month'], None) def test_get_calendar_entries_full_params(self): self.publish_entry() source_context = Context() with self.assertNumQueries(2): context = get_calendar_entries(source_context, 2009, 1, template='custom_template.html') self.assertEqual(context['previous_month'], None) self.assertEqual( context['next_month'], self.make_local(self.entry.publication_date).date().replace(day=1)) self.assertEqual(context['template'], 'custom_template.html') def test_get_calendar_entries_no_prev_next(self): self.publish_entry() source_context = Context() with self.assertNumQueries(2): context = get_calendar_entries(source_context, 2010, 1) self.assertEqual(context['previous_month'], None) self.assertEqual(context['next_month'], None) def test_get_calendar_entries_month_context(self): self.publish_entry() source_context = Context({'month': date(2009, 1, 1)}) with self.assertNumQueries(2): context = get_calendar_entries(source_context) self.assertEqual(context['previous_month'], None) self.assertEqual( context['next_month'], self.make_local(self.entry.publication_date).date().replace(day=1)) source_context = Context({'month': date(2010, 6, 1)}) with self.assertNumQueries(2): context = get_calendar_entries(source_context) self.assertEqual( context['previous_month'], self.make_local(self.entry.publication_date).date().replace(day=1)) self.assertEqual(context['next_month'], None) source_context = Context({'month': date(2010, 1, 1)}) with self.assertNumQueries(2): context = get_calendar_entries(source_context) self.assertEqual(context['previous_month'], None) self.assertEqual(context['next_month'], None) def test_get_calendar_entries_week_context(self): self.publish_entry() source_context = Context({'week': date(2009, 1, 5)}) with self.assertNumQueries(2): context = get_calendar_entries(source_context) self.assertEqual(context['previous_month'], None) self.assertEqual( context['next_month'], self.make_local(self.entry.publication_date).date().replace(day=1)) source_context = Context({'week': date(2010, 5, 31)}) with self.assertNumQueries(2): context = get_calendar_entries(source_context) self.assertEqual( context['previous_month'], self.make_local(self.entry.publication_date).date().replace(day=1)) self.assertEqual(context['next_month'], None) source_context = Context({'week': date(2010, 1, 4)}) with self.assertNumQueries(2): context = get_calendar_entries(source_context) self.assertEqual(context['previous_month'], None) self.assertEqual(context['next_month'], None) def test_get_calendar_entries_day_context(self): self.publish_entry() source_context = Context({'day': date(2009, 1, 15)}) with self.assertNumQueries(2): context = get_calendar_entries(source_context) self.assertEqual(context['previous_month'], None) self.assertEqual( context['next_month'], self.make_local(self.entry.publication_date).date().replace(day=1)) source_context = Context({'day': date(2010, 6, 15)}) with self.assertNumQueries(2): context = get_calendar_entries(source_context) self.assertEqual( context['previous_month'], self.make_local(self.entry.publication_date).date().replace(day=1)) self.assertEqual(context['next_month'], None) source_context = Context({'day': date(2010, 1, 15)}) with self.assertNumQueries(2): context = get_calendar_entries(source_context) self.assertEqual(context['previous_month'], None) self.assertEqual(context['next_month'], None) def test_get_calendar_entries_object_context(self): self.publish_entry() source_context = Context({'object': object()}) with self.assertNumQueries(2): context = get_calendar_entries(source_context) self.assertEqual( context['previous_month'], self.make_local(self.entry.publication_date).date().replace(day=1)) self.assertEqual(context['next_month'], None) params = {'title': 'My second entry', 'content': 'My second content', 'tags': 'zinnia, test', 'status': PUBLISHED, 'publication_date': datetime(2008, 1, 15), 'slug': 'my-second-entry'} second_entry = Entry.objects.create(**params) second_entry.sites.add(self.site) source_context = Context({'object': self.entry}) with self.assertNumQueries(2): context = get_calendar_entries(source_context) self.assertEqual( context['previous_month'], self.make_local(second_entry.publication_date).date().replace( day=1)) self.assertEqual(context['next_month'], None) source_context = Context({'object': second_entry}) with self.assertNumQueries(2): context = get_calendar_entries(source_context) self.assertEqual(context['previous_month'], None) self.assertEqual( context['next_month'], self.make_local(self.entry.publication_date).date().replace(day=1)) @skipIfCustomUser def test_get_recent_comments(self): with self.assertNumQueries(1): context = get_recent_comments() self.assertEqual(len(context['comments']), 0) self.assertEqual(context['template'], 'zinnia/tags/comments_recent.html') comment_1 = comments.get_model().objects.create( comment='My Comment 1', site=self.site, content_object=self.entry, submit_date=timezone.now()) with self.assertNumQueries(1): context = get_recent_comments(3, 'custom_template.html') self.assertEqual(len(context['comments']), 0) self.assertEqual(context['template'], 'custom_template.html') self.publish_entry() with self.assertNumQueries(3): context = get_recent_comments() self.assertEqual(len(context['comments']), 1) self.assertEqual(context['comments'][0].content_object, self.entry) author = Author.objects.create_user(username='webmaster', email='[email protected]') comment_2 = comments.get_model().objects.create( comment='My Comment 2', site=self.site, content_object=self.entry, submit_date=timezone.now()) comment_2.flags.create(user=author, flag=CommentFlag.MODERATOR_APPROVAL) with self.assertNumQueries(3): context = get_recent_comments() self.assertEqual(list(context['comments']), [comment_2, comment_1]) self.assertEqual(context['comments'][0].content_object, self.entry) self.assertEqual(context['comments'][1].content_object, self.entry) @skipIfCustomUser def test_get_recent_linkbacks(self): user = Author.objects.create_user(username='webmaster', email='[email protected]') with self.assertNumQueries(1): context = get_recent_linkbacks() self.assertEqual(len(context['linkbacks']), 0) self.assertEqual(context['template'], 'zinnia/tags/linkbacks_recent.html') linkback_1 = comments.get_model().objects.create( comment='My Linkback 1', site=self.site, content_object=self.entry, submit_date=timezone.now()) linkback_1.flags.create(user=user, flag=PINGBACK) with self.assertNumQueries(1): context = get_recent_linkbacks(3, 'custom_template.html') self.assertEqual(len(context['linkbacks']), 0) self.assertEqual(context['template'], 'custom_template.html') self.publish_entry() with self.assertNumQueries(3): context = get_recent_linkbacks() self.assertEqual(len(context['linkbacks']), 1) self.assertEqual(context['linkbacks'][0].content_object, self.entry) linkback_2 = comments.get_model().objects.create( comment='My Linkback 2', site=self.site, content_object=self.entry, submit_date=timezone.now()) linkback_2.flags.create(user=user, flag=TRACKBACK) with self.assertNumQueries(3): context = get_recent_linkbacks() self.assertEqual(list(context['linkbacks']), [linkback_2, linkback_1]) self.assertEqual(context['linkbacks'][0].content_object, self.entry) self.assertEqual(context['linkbacks'][1].content_object, self.entry) def test_zinnia_pagination(self): class FakeRequest(object): def __init__(self, get_dict): self.GET = get_dict source_context = Context({'request': FakeRequest( {'page': '1', 'key': 'val'})}) paginator = Paginator(range(200), 10) with self.assertNumQueries(0): context = zinnia_pagination( source_context, paginator.page(1), begin_pages=3, end_pages=3, before_pages=2, after_pages=2) self.assertEqual(context['page'].number, 1) self.assertEqual(list(context['begin']), [1, 2, 3]) self.assertEqual(list(context['middle']), []) self.assertEqual(list(context['end']), [18, 19, 20]) self.assertEqual(context['GET_string'], '&key=val') self.assertEqual(context['template'], 'zinnia/tags/pagination.html') source_context = Context({'request': FakeRequest({})}) with self.assertNumQueries(0): context = zinnia_pagination( source_context, paginator.page(2), begin_pages=3, end_pages=3, before_pages=2, after_pages=2) self.assertEqual(context['page'].number, 2) self.assertEqual(list(context['begin']), [1, 2, 3, 4]) self.assertEqual(list(context['middle']), []) self.assertEqual(list(context['end']), [18, 19, 20]) self.assertEqual(context['GET_string'], '') with self.assertNumQueries(0): context = zinnia_pagination( source_context, paginator.page(3), begin_pages=3, end_pages=3, before_pages=2, after_pages=2) self.assertEqual(list(context['begin']), [1, 2, 3, 4, 5]) self.assertEqual(list(context['middle']), []) self.assertEqual(list(context['end']), [18, 19, 20]) with self.assertNumQueries(0): context = zinnia_pagination( source_context, paginator.page(6), begin_pages=3, end_pages=3, before_pages=2, after_pages=2) self.assertEqual(list(context['begin']), [1, 2, 3, 4, 5, 6, 7, 8]) self.assertEqual(list(context['middle']), []) self.assertEqual(list(context['end']), [18, 19, 20]) with self.assertNumQueries(0): context = zinnia_pagination( source_context, paginator.page(11), begin_pages=3, end_pages=3, before_pages=2, after_pages=2) self.assertEqual(list(context['begin']), [1, 2, 3]) self.assertEqual(list(context['middle']), [9, 10, 11, 12, 13]) self.assertEqual(list(context['end']), [18, 19, 20]) with self.assertNumQueries(0): context = zinnia_pagination( source_context, paginator.page(15), begin_pages=3, end_pages=3, before_pages=2, after_pages=2) self.assertEqual(list(context['begin']), [1, 2, 3]) self.assertEqual(list(context['middle']), []) self.assertEqual(list(context['end']), [13, 14, 15, 16, 17, 18, 19, 20]) with self.assertNumQueries(0): context = zinnia_pagination( source_context, paginator.page(18), begin_pages=3, end_pages=3, before_pages=2, after_pages=2) self.assertEqual(list(context['begin']), [1, 2, 3]) self.assertEqual(list(context['middle']), []) self.assertEqual(list(context['end']), [16, 17, 18, 19, 20]) with self.assertNumQueries(0): context = zinnia_pagination( source_context, paginator.page(19), begin_pages=3, end_pages=3, before_pages=2, after_pages=2) self.assertEqual(list(context['begin']), [1, 2, 3]) self.assertEqual(list(context['middle']), []) self.assertEqual(list(context['end']), [17, 18, 19, 20]) with self.assertNumQueries(0): context = zinnia_pagination( source_context, paginator.page(20), begin_pages=3, end_pages=3, before_pages=2, after_pages=2) self.assertEqual(list(context['begin']), [1, 2, 3]) self.assertEqual(list(context['middle']), []) self.assertEqual(list(context['end']), [18, 19, 20]) with self.assertNumQueries(0): context = zinnia_pagination( source_context, paginator.page(10), begin_pages=1, end_pages=3, before_pages=4, after_pages=3, template='custom_template.html') self.assertEqual(list(context['begin']), [1]) self.assertEqual(list(context['middle']), [6, 7, 8, 9, 10, 11, 12, 13]) self.assertEqual(list(context['end']), [18, 19, 20]) self.assertEqual(context['template'], 'custom_template.html') paginator = Paginator(range(50), 10) with self.assertNumQueries(0): context = zinnia_pagination( source_context, paginator.page(1), begin_pages=3, end_pages=3, before_pages=2, after_pages=2) self.assertEqual(list(context['begin']), [1, 2, 3, 4, 5]) self.assertEqual(list(context['middle']), []) self.assertEqual(list(context['end']), []) paginator = Paginator(range(60), 10) with self.assertNumQueries(0): context = zinnia_pagination( source_context, paginator.page(1), begin_pages=3, end_pages=3, before_pages=2, after_pages=2) self.assertEqual(list(context['begin']), [1, 2, 3, 4, 5, 6]) self.assertEqual(list(context['middle']), []) self.assertEqual(list(context['end']), []) paginator = Paginator(range(70), 10) with self.assertNumQueries(0): context = zinnia_pagination( source_context, paginator.page(1), begin_pages=3, end_pages=3, before_pages=2, after_pages=2) self.assertEqual(list(context['begin']), [1, 2, 3]) self.assertEqual(list(context['middle']), []) self.assertEqual(list(context['end']), [5, 6, 7]) def test_zinnia_pagination_on_my_website(self): """ Reproduce the issue encountred on my website, versus the expected result. """ class FakeRequest(object): def __init__(self, get_dict={}): self.GET = get_dict source_context = Context({'request': FakeRequest()}) paginator = Paginator(range(40), 10) with self.assertNumQueries(0): for i in range(1, 5): context = zinnia_pagination( source_context, paginator.page(i), begin_pages=1, end_pages=1, before_pages=2, after_pages=2) self.assertEqual(context['page'].number, i) self.assertEqual(list(context['begin']), [1, 2, 3, 4]) self.assertEqual(list(context['middle']), []) self.assertEqual(list(context['end']), []) @skipIfCustomUser def test_zinnia_breadcrumbs(self): class FakeRequest(object): def __init__(self, path): self.path = path class FakePage(object): def __init__(self, number): self.number = number def check_only_last_have_no_url(crumb_list): size = len(crumb_list) - 1 for i, crumb in enumerate(crumb_list): if i != size: self.assertNotEqual(crumb.url, None) else: self.assertEqual(crumb.url, None) source_context = Context({'request': FakeRequest('/')}) with self.assertNumQueries(0): context = zinnia_breadcrumbs(source_context) self.assertEqual(len(context['breadcrumbs']), 1) self.assertEqual(context['breadcrumbs'][0].name, 'Blog') self.assertEqual(context['breadcrumbs'][0].url, reverse('zinnia:entry_archive_index')) self.assertEqual(context['template'], 'zinnia/tags/breadcrumbs.html') with self.assertNumQueries(0): context = zinnia_breadcrumbs(source_context, 'Weblog', 'custom_template.html') self.assertEqual(len(context['breadcrumbs']), 1) self.assertEqual(context['breadcrumbs'][0].name, 'Weblog') self.assertEqual(context['template'], 'custom_template.html') source_context = Context( {'request': FakeRequest(self.entry.get_absolute_url()), 'object': self.entry}) with self.assertNumQueries(0): context = zinnia_breadcrumbs(source_context) self.assertEqual(len(context['breadcrumbs']), 5) check_only_last_have_no_url(context['breadcrumbs']) cat_1 = Category.objects.create(title='Category 1', slug='category-1') source_context = Context( {'request': FakeRequest(cat_1.get_absolute_url()), 'object': cat_1}) with self.assertNumQueries(0): context = zinnia_breadcrumbs(source_context) self.assertEqual(len(context['breadcrumbs']), 3) check_only_last_have_no_url(context['breadcrumbs']) cat_2 = Category.objects.create(title='Category 2', slug='category-2', parent=cat_1) source_context = Context( {'request': FakeRequest(cat_2.get_absolute_url()), 'object': cat_2}) with self.assertNumQueries(1): context = zinnia_breadcrumbs(source_context) self.assertEqual(len(context['breadcrumbs']), 4) check_only_last_have_no_url(context['breadcrumbs']) tag = Tag.objects.get(name='test') source_context = Context( {'request': FakeRequest(reverse('zinnia:tag_detail', args=['test'])), 'object': tag}) with self.assertNumQueries(0): context = zinnia_breadcrumbs(source_context) self.assertEqual(len(context['breadcrumbs']), 3) check_only_last_have_no_url(context['breadcrumbs']) author = Author.objects.create_user(username='webmaster', email='[email protected]') source_context = Context( {'request': FakeRequest(author.get_absolute_url()), 'object': author}) with self.assertNumQueries(0): context = zinnia_breadcrumbs(source_context) self.assertEqual(len(context['breadcrumbs']), 3) check_only_last_have_no_url(context['breadcrumbs']) source_context = Context( {'request': FakeRequest(reverse( 'zinnia:entry_archive_year', args=[2011]))}) with self.assertNumQueries(0): context = zinnia_breadcrumbs(source_context) self.assertEqual(len(context['breadcrumbs']), 2) check_only_last_have_no_url(context['breadcrumbs']) source_context = Context({'request': FakeRequest(reverse( 'zinnia:entry_archive_month', args=[2011, '03']))}) with self.assertNumQueries(0): context = zinnia_breadcrumbs(source_context) self.assertEqual(len(context['breadcrumbs']), 3) check_only_last_have_no_url(context['breadcrumbs']) source_context = Context({'request': FakeRequest(reverse( 'zinnia:entry_archive_week', args=[2011, 15]))}) with self.assertNumQueries(0): context = zinnia_breadcrumbs(source_context) self.assertEqual(len(context['breadcrumbs']), 3) check_only_last_have_no_url(context['breadcrumbs']) source_context = Context({'request': FakeRequest(reverse( 'zinnia:entry_archive_day', args=[2011, '03', 15]))}) with self.assertNumQueries(0): context = zinnia_breadcrumbs(source_context) self.assertEqual(len(context['breadcrumbs']), 4) check_only_last_have_no_url(context['breadcrumbs']) source_context = Context({'request': FakeRequest('%s?page=2' % reverse( 'zinnia:entry_archive_day', args=[2011, '03', 15])), 'page_obj': FakePage(2)}) with self.assertNumQueries(0): context = zinnia_breadcrumbs(source_context) self.assertEqual(len(context['breadcrumbs']), 5) check_only_last_have_no_url(context['breadcrumbs']) source_context = Context({'request': FakeRequest(reverse( 'zinnia:entry_archive_day_paginated', args=[2011, '03', 15, 2])), 'page_obj': FakePage(2)}) with self.assertNumQueries(0): context = zinnia_breadcrumbs(source_context) self.assertEqual(len(context['breadcrumbs']), 5) check_only_last_have_no_url(context['breadcrumbs']) # More tests can be done here, for testing path and objects in context def test_get_gravatar(self): self.assertTrue(urlEqual( get_gravatar('[email protected]'), 'http://www.gravatar.com/avatar/86d4fd4a22de452' 'a9228298731a0b592?s=80&amp;r=g')) self.assertTrue(urlEqual( get_gravatar(' [email protected] ', 15, 'x', '404'), 'http://www.gravatar.com/avatar/86d4fd4a22de452' 'a9228298731a0b592?s=15&amp;r=x&amp;d=404')) self.assertTrue(urlEqual( get_gravatar(' [email protected] ', 15, 'x', '404', 'https'), 'https://secure.gravatar.com/avatar/86d4fd4a22de452' 'a9228298731a0b592?s=15&amp;r=x&amp;d=404')) def test_get_tags(self): Tag.objects.create(name='tag') t = Template(""" {% load zinnia %} {% get_tags as entry_tags %} {{ entry_tags|join:", " }} """) with self.assertNumQueries(1): html = t.render(Context()) self.assertEqual(html.strip(), '') self.publish_entry() html = t.render(Context()) self.assertEqual(html.strip(), 'test, zinnia') template_error_as = """ {% load zinnia %} {% get_tags a_s entry_tags %}""" self.assertRaises(TemplateSyntaxError, Template, template_error_as) template_error_args = """ {% load zinnia %} {% get_tags as entry tags %}""" self.assertRaises(TemplateSyntaxError, Template, template_error_args) def test_get_tag_cloud(self): source_context = Context() with self.assertNumQueries(1): context = get_tag_cloud(source_context) self.assertEqual(len(context['tags']), 0) self.assertEqual(context['template'], 'zinnia/tags/tag_cloud.html') self.assertEqual(context['context_tag'], None) self.publish_entry() tag = Tag.objects.get(name='test') source_context = Context({'tag': tag}) with self.assertNumQueries(1): context = get_tag_cloud(source_context, 6, 1, 'custom_template.html') self.assertEqual(len(context['tags']), 2) self.assertEqual(context['template'], 'custom_template.html') self.assertEqual(context['context_tag'], tag) def test_widont(self): self.assertEqual( widont('Word'), 'Word') self.assertEqual( widont('A complete string'), 'A complete&nbsp;string') self.assertEqual( widont('A complete\tstring'), 'A complete&nbsp;string') self.assertEqual( widont('A complete string'), 'A complete&nbsp;string') self.assertEqual( widont('A complete string with trailing spaces '), 'A complete string with trailing&nbsp;spaces ') self.assertEqual( widont('A complete string with <markup>', autoescape=False), 'A complete string with&nbsp;<markup>') self.assertEqual( widont('A complete string with <markup>', autoescape=True), 'A complete string with&nbsp;&lt;markup&gt;') def test_widont_pre_punctuation(self): """ In some languages like French, applying the widont filter before a punctuation sign preceded by a space, leads to ugly visual results, instead of a better visual results. """ self.assertEqual( widont('Releases : django-blog-zinnia'), 'Releases&nbsp;:&nbsp;django-blog-zinnia') self.assertEqual( widont('Releases ; django-blog-zinnia'), 'Releases&nbsp;;&nbsp;django-blog-zinnia') self.assertEqual( widont('Releases ! django-blog-zinnia'), 'Releases&nbsp;!&nbsp;django-blog-zinnia') self.assertEqual( widont('Releases ? django-blog-zinnia'), 'Releases&nbsp;?&nbsp;django-blog-zinnia') self.assertEqual( widont('Releases - django-blog-zinnia'), 'Releases&nbsp;-&nbsp;django-blog-zinnia') self.assertEqual( widont('Releases + django-blog-zinnia'), 'Releases&nbsp;+&nbsp;django-blog-zinnia') self.assertEqual( widont('Releases * django-blog-zinnia'), 'Releases&nbsp;*&nbsp;django-blog-zinnia') self.assertEqual( widont('Releases / django-blog-zinnia'), 'Releases&nbsp;/&nbsp;django-blog-zinnia') self.assertEqual( widont('Releases % django-blog-zinnia'), 'Releases&nbsp;%&nbsp;django-blog-zinnia') self.assertEqual( widont('Releases = django-blog-zinnia'), 'Releases&nbsp;=&nbsp;django-blog-zinnia') self.assertEqual( widont('Releases : django-blog-zinnia '), 'Releases&nbsp;:&nbsp;django-blog-zinnia ') self.assertEqual( widont('Releases :: django-blog-zinnia'), 'Releases&nbsp;::&nbsp;django-blog-zinnia') self.assertEqual( widont('Releases :z django-blog-zinnia'), 'Releases :z&nbsp;django-blog-zinnia') def test_widont_post_punctuation(self): """ Sometimes applying the widont filter on just a punctuation sign, leads to ugly visual results, instead of better visual results. """ self.assertEqual( widont('Move !'), 'Move&nbsp;!') self.assertEqual( widont('Move it ! '), 'Move&nbsp;it&nbsp;! ') self.assertEqual( widont('Move it ?'), 'Move&nbsp;it&nbsp;?') self.assertEqual( widont('I like to move : it !'), 'I like to move&nbsp;:&nbsp;it&nbsp;!') self.assertEqual( widont('I like to : move it !'), 'I like to : move&nbsp;it&nbsp;!') def test_week_number(self): self.assertEqual(week_number(datetime(2013, 1, 1)), '0') self.assertEqual(week_number(datetime(2013, 12, 21)), '50') def test_comment_admin_urlname(self): comment_admin_url = comment_admin_urlname('action') self.assertTrue(comment_admin_url.startswith('admin:')) self.assertTrue(comment_admin_url.endswith('_action')) @skipIfCustomUser def test_user_admin_urlname(self): user_admin_url = user_admin_urlname('action') self.assertEqual(user_admin_url, 'admin:auth_user_action') @skipIfCustomUser def test_zinnia_statistics(self): with self.assertNumQueries(8): context = zinnia_statistics() self.assertEqual(context['template'], 'zinnia/tags/statistics.html') self.assertEqual(context['entries'], 0) self.assertEqual(context['categories'], 0) self.assertEqual(context['tags'], 0) self.assertEqual(context['authors'], 0) self.assertEqual(context['comments'], 0) self.assertEqual(context['pingbacks'], 0) self.assertEqual(context['trackbacks'], 0) self.assertEqual(context['rejects'], 0) self.assertEqual(context['words_per_entry'], 0) self.assertEqual(context['words_per_comment'], 0) self.assertEqual(context['entries_per_month'], 0) self.assertEqual(context['comments_per_entry'], 0) self.assertEqual(context['linkbacks_per_entry'], 0) Category.objects.create(title='Category 1', slug='category-1') author = Author.objects.create_user(username='webmaster', email='[email protected]') comments.get_model().objects.create( comment='My Comment 1', site=self.site, content_object=self.entry, submit_date=timezone.now()) self.entry.authors.add(author) self.publish_entry() with self.assertNumQueries(13): context = zinnia_statistics('custom_template.html') self.assertEqual(context['template'], 'custom_template.html') self.assertEqual(context['entries'], 1) self.assertEqual(context['categories'], 1) self.assertEqual(context['tags'], 2) self.assertEqual(context['authors'], 1) self.assertEqual(context['comments'], 1) self.assertEqual(context['pingbacks'], 0) self.assertEqual(context['trackbacks'], 0) self.assertEqual(context['rejects'], 0) self.assertEqual(context['words_per_entry'], 2) self.assertEqual(context['words_per_comment'], 3) self.assertEqual(context['entries_per_month'], 1) self.assertEqual(context['comments_per_entry'], 1) self.assertEqual(context['linkbacks_per_entry'], 0) class TemplateTagsTimezoneTestCase(TestCase): def create_published_entry_at(self, publication_date): params = {'title': 'My entry', 'content': 'My content', 'slug': 'my-entry', 'status': PUBLISHED, 'publication_date': publication_date} entry = Entry.objects.create(**params) entry.sites.add(Site.objects.get_current()) return entry @override_settings(USE_TZ=False) def test_calendar_entries_no_timezone(self): template = Template('{% load zinnia %}' '{% get_calendar_entries 2014 1 %}') self.create_published_entry_at(datetime(2014, 1, 1, 12, 0)) self.create_published_entry_at(datetime(2014, 1, 1, 23, 0)) self.create_published_entry_at(datetime(2012, 12, 31, 23, 0)) self.create_published_entry_at(datetime(2014, 1, 31, 23, 0)) output = template.render(Context()) self.assertTrue('/2014/01/01/' in output) self.assertTrue('/2014/01/02/' not in output) self.assertTrue('/2012/12/' in output) self.assertTrue('/2014/02/' not in output) @override_settings(USE_TZ=True, TIME_ZONE='Europe/Paris') def test_calendar_entries_with_timezone(self): template = Template('{% load zinnia %}' '{% get_calendar_entries 2014 1 %}') self.create_published_entry_at(datetime(2014, 1, 1, 12, 0)) self.create_published_entry_at(datetime(2014, 1, 1, 23, 0)) self.create_published_entry_at(datetime(2012, 12, 31, 23, 0)) self.create_published_entry_at(datetime(2014, 1, 31, 23, 0)) output = template.render(Context()) self.assertTrue('/2014/01/01/' in output) self.assertTrue('/2014/01/02/' in output) self.assertTrue('/2013/01/' in output) self.assertTrue('/2014/02/' in output) @override_settings(USE_TZ=False) def test_archives_entries_no_timezone(self): template = Template('{% load zinnia %}' '{% get_archives_entries %}') self.create_published_entry_at(datetime(2014, 1, 1, 12, 0)) self.create_published_entry_at(datetime(2014, 1, 31, 23, 0)) output = template.render(Context()) self.assertTrue('/2014/01/' in output) self.assertTrue('/2014/02/' not in output) @override_settings(USE_TZ=True, TIME_ZONE='Europe/Paris') def test_archives_entries_with_timezone(self): template = Template('{% load zinnia %}' '{% get_archives_entries %}') self.create_published_entry_at(datetime(2014, 1, 1, 12, 0)) self.create_published_entry_at(datetime(2014, 1, 31, 23, 0)) output = template.render(Context()) self.assertTrue('/2014/01/' in output) self.assertTrue('/2014/02/' in output) @override_settings(USE_TZ=False) def test_archives_entries_tree_no_timezone(self): template = Template('{% load zinnia %}' '{% get_archives_entries_tree %}') self.create_published_entry_at(datetime(2014, 1, 1, 12, 0)) self.create_published_entry_at(datetime(2014, 1, 31, 23, 0)) output = template.render(Context()) self.assertTrue('/2014/01/01/' in output) self.assertTrue('/2014/02/01/' not in output) @override_settings(USE_TZ=True, TIME_ZONE='Europe/Paris') def test_archives_entries_tree_with_timezone(self): template = Template('{% load zinnia %}' '{% get_archives_entries_tree %}') self.create_published_entry_at(datetime(2014, 1, 1, 12, 0)) self.create_published_entry_at(datetime(2014, 1, 31, 23, 0)) output = template.render(Context()) self.assertTrue('/2014/01/01/' in output) self.assertTrue('/2014/02/01/' in output)
ELMHURST, Ill. – The University of Denver accepted an invitation to become The Summit League’s newest member institution, the university and league announced today. Denver will join The Summit League during the 2013-14 academic year. Denver’s membership will take effect on July 1, 2013. The Pioneers, who compete in men’s and women’s basketball, men’s and women’s golf, men’s and women’s soccer, men’s and women’s swimming and diving, men’s and women’s tennis, and women’s volleyball, will be immediately eligible for all Summit League Championships during the 2013-14 season. Founded in 1864, the University of Denver has an enrollment of 11,797, including 5,453 undergraduates and 6,344 graduate students. Academically, the Wall Street Journal’s 2007 rankings of top business schools ranked DU’s Daniels College of Business seventh in the nation for producing graduates with high ethical standards. Daniels College of Business is in the top 15 percent of undergraduate programs and the top five percent of graduate programs among the approximately 550 business schools accredited by the Association to Advance Collegiate Schools of Business (AACSB). U.S. News & World Report’s annual 2012 college rankings for undergraduate education place Denver 82nd among national doctorial universities. In athletics, Denver has won four of the last five NCAA Division I-AAA Director’s Cups and finished as high as No. 47 in the Directors Cup among all Division I institutions. Within the past 10 years, the Pioneers have captured five NCAA Championships and 18 individual NCAA Championships. Denver also combined for 32 conference tournament championships, 20 regular season championships and 153 individual championships during that span. The Pioneers produced 30 CoSIDA Academic All-Americans and captured 11 Sun Belt Conference Graduate Rate Awards over the past 12 seasons. Famous alumni of the University of Denver include: Former Secretary of State Condoleezza Rice; Pete Coors, CEO of Coors Brewing Co.; Brad Anderson, Vice Chairman and CEO of Best Buy, Co., Inc.; Andy Taylor, Chairman and CEO Enterprise Rent-A-Car; and Olympic Silver and Bronze Figure Skating Medalist Michelle Kwan. Notables in professional sports from DU include: current San Antonio Spurs head coach Gregg Popovich; Jerome Biffle, 1952 Olympic Long Jump Gold Medalist; former NHL players Kevin Dineen, Keith Magnuson and Bill Masterson; former NHL executive and 1980 “Miracle On Ice” assistant coach Craig Patrick; and current NHL standouts Matt Carle (Philadelphia Flyers), Tyler Bozak (Toronto Maple Leafs) and Paul Stastny (Colorado Avalanche). Denver’s metro area ranks as the 21st largest in the United States with a population of 2.87 million. The city of Denver had a population of 619,968 in 2011 and features the 10th largest downtown area. Denver has professional teams in each of the five major sports leagues and its airport is the largest by area in the United States and ranked as the 11th-busiest by passenger traffic in the world in 2011. Entering its fourth decade of Division I athletics, The Summit League offers 19 championship sports and has a combined enrollment of over 144,000 at nine universities, four of which are located in top 58 U.S. metro populations (Detroit, Indianapolis, Kansas City and Omaha). Member institutions include: Fort Wayne (Indiana University-Purdue University-Fort Wayne), IUPUI (Indiana University-Purdue University-Indianapolis), Kansas City (University of Missouri-Kansas City), North Dakota State University, Oakland University, Omaha (University of Nebraska Omaha), the University of South Dakota, South Dakota State University and Western Illinois University. Previously known as the Mid-Continent Conference, The Summit League rebranded in 2007 and has since produced 54 All-Americans, 37 Academic All-America selections and seven NCAA Championships. For more information about the league and its history, visit www.thesummitleague.org to “See The Summit” or follow on Twitter @thesummitleague. Founded in 1864, only a few years after the city itself, the University of Denver is one of the country’s premier private universities. The University’s 125-acre campus, a few miles south of downtown Denver, is home to more than 11,000 students hailing from all regions of the U.S. and 93 other countries.
#!/usr/bin/python from scapy.all import * import random import requests conf.verb=0 base_URL = "http://10.13.37.23:" def knock(ports): print "[*] Knocking on ports"+str(ports) for dport in range(0, len(ports)): ip = IP(dst = "10.13.37.23") SYN = ip/TCP(dport=ports[dport], flags="S", window=14600, options=[('MSS',1460)]) send(SYN) def get_flag_part(port,part): command = ["curl", "-s" ,base_URL+str(port)+"/"+part+"_part_of_flag"] p = subprocess.Popen(command, stdout=subprocess.PIPE) result = p.communicate()[0] return result.strip() flag='' ports = [9264,11780,2059,8334] port = 24931 knock(ports) flag_part = get_flag_part(port,"first") flag = ''.join([flag,flag_part]) print flag_part ports = [42304,53768,3297] port = 19760 knock(ports) flag_part = get_flag_part(port,"second") flag = ''.join([flag,flag_part]) print flag_part ports= [23106,4250,62532,11655,33844] port=3695 knock(ports) flag_part = get_flag_part(port,"third") flag = ''.join([flag,flag_part]) print flag_part ports= [49377,48116,54900,8149] port=31054 knock(ports) flag_part = get_flag_part(port,"fourth") flag = ''.join([flag,flag_part]) print flag_part ports= [16340,59991,37429,60012,15397,21864,12923] port=8799 knock(ports) flag_part = get_flag_part(port,"last") flag = ''.join([flag,flag_part]) print flag_part print "Flag: %s" % flag
Ron Nunnery moved that the June 24, 2013 Minutes be accepted. Ron Hartoin seconded the motion. Motion carried 100%. There was a discussion about the repainting of double yellow lines. There was an extensive discussion about the new requirement for reflective street and stop signs. Rick Schultz is attending a training meeting on this topic Thursday and will report back to Council. Sgt. Daugherty asked if there is a noise ordinance. Yes, North Bend does have a noise ordinance, but some complaints about the golf course need to be directed to Miami Township, as many areas of the course and the clubhouse are in the Township. There were 51 calls in June, 2 DUIs and 3 criminal investigations. There has been an increase in criminal activity in North Bend and in Miami Township. An arrest has been made and a suspect was connected to at least four (4) burglaries. Fran Romweber said the two (2) complaints about the golf course noise may not be enough to warrant enforcement. She stated living on a golf course means there will be early morning mowing. The Mayor reviewed that just because you have a North Bend address does not mean you actually live in North Bend. Many residents with a North Bend address live in the Township. The training exercise at the new school is August 20 from 10:00am – 2:00p.m. for police and the fire departments including the bomb squad. Mayor Sammons asked residents to be aware of increased crime and to keep the lights on at night. Ron Nunnery introduced Eric Winhusen, Superintendent of the Cleves Water Works. Eric explained the EPA Consumer Confidence Report that many Cleves Water Works customers recently received. Eric said the EPA sets the standards and he said CWW exceeds those standards in all categories. Bill Welch asked for an explanation of some of the terms in the report. Eric states CWW customer costs are in the bottom 5% in Ohio. Ron Hartoin asked Eric about the fire hydrant outages. Eric says they are working as fast as possible given the staff they have at this time. There was a discussion about the current state of the sewers in our area. Eric stated the sewers are the responsibility of MSD and we can expect sewer rates to increase. A resident asked about the level of the Aquifer. Eric says the level has not dropped six (6) inches in the last eleven (11) years. We are drawing 1 million gallons per day and the Aquifer could support 8 million gallons per day. Chief Ober reported for the month of June there were 10 EMS runs and 2 fire details. The fire inspections are ongoing; two (2) hydrants are on the repair list. North Bend is averaging one drug overdose per month. Dave Moorman asked about the fire hydrants that are out of service. One hydrant is an old outage and one is a new outage. The District has many old hydrants. Chief Ober says we have plenty of back-up hydrants as hydrants are a few hundred feet apart and the fire trucks carry 1,000 feet of hose. New school dedication is August 18, from 2:00p.m. - 4:00p.m. Taylor High School surplus auction is scheduled for August 24th at 10:00a.m. Three Rivers Middle School is under contract for sale to Crossroads Church. Ron Hartoin asked Council for their thoughts on upgrading the storage building. Rick Schultz said upgrades could be completed for $15,000 - $18,000. Tim Boll, Bill Welch, Ron Hartoin and Dave Moorman agreed for the need for upgrading with a possible 2-3 year phase-in. A proposal will be made at the August meeting. Tim Boll requested the building be fenced and lockable. There will be a Citizen’s Crime Commission Meeting (Neighborhood Watch) on August 22 at 7:00p.m. at the Village Council Chambers. Dave Moorman stated that some properties are still not in compliance and notices will be sent out next week. Regarding the property maintenance code, several complaints have been made about grass and weeds on residential and non-residential property. The ordinance committee is forming a proposal for council to address these issues. The Beautification Committee meeting was held Saturday, July 6. The We Thrive meeting is scheduled for Tuesday, August 6 at 7:00p.m. to discuss the playground grant. "We Thrive" is a possible grant for upgrading/replacing the Village park next to Council Hall. Sidewalk repairs will be completed as the Washington Sewer Project begins. The US 50 guardrail has been repaired. Mayor Sammons will provide a sample Resolution to the Law and Ordinance Committee for consideration of an Ordinance establishing a policy for excessive emergency runs. Bill Welch reviewed a letter from St. Joseph Church to its’ members about the possible purchase of Taylor High School. They have formed a committee to look into the purchase. Bill Welch will contact St. Joseph’s committee and share North Bend’s vision for the Taylor High School property. Two bids have been received for the repairs of St. Anne’s. There is additional damage so Rick will ask for rebids on that issue. Bill Welch asked for a temporary repair and Rick Schultz will take care of that. There are two houses under construction on St. Anne’s. There was a review of the current fire levy. This levy will replace the 1 mil levy (Life Squad) and our $15,000 annual payment to Miami Township for Fire Protection. Dave Moorman said we are under a 10-year contract through 2018. Dave feels the growth in North Bend should cover the increased cost of services. There was a discussion around how levies work. Levies do not produce additional money even though property values may have increased during the term of the levy. Dave Moorman reiterated, a vote to increase property taxes from 1 mill to 2.29 mills represents an increase of over 100% to all property tax payers. Miami Township residents pay 3.29 mills for Fire/EMS. Chief Ober discussed the history of Miami Township’s Fire/EMS Service and how they have responded to the changes in the Village. They’ve provided an additional life squad and additional EMTs. He also mentioned the increased amount of homes in the Village. Ron Hartoin made a motion to adopt Resolution 2013-18 Declaring it Necessary to Put a Replacement Levy on the November 5, 2013 Ballot as Allowed by Article XII, 2 of the Ohio Constitution, in Excess of the Limitation for Life Squad and Emergency Medical Services. Fran Romweber seconded the motion. Motion passed 5-1. Dave Moorman opposed. Discussion: Dave Moorman asked if Addyston and Cleves are asking for levies. They are; Cleves is next week and Addyston is next year. Ron Nunnery made motion to adjourn the meeting. Ron Hartoin seconded the motion. Motion carried 100%. Meeting was adjourned at 9:04p.m.
import numpy as np from Gilles import * import matplotlib.pyplot as plt from DeviationAnalysis import * from mpl_toolkits.mplot3d import Axes3D # Initial conditions user_input = ['A', 100, 'B', 0] # Constants (this is not necessary, they could be filled up already in the reaction tuple) k = (10,10) # Reaction template ((stoch_1,reactant_1,stoch_2,reactant_2),(stoch_1,product_1,stoch_2,product_2),k) reactions = ( (1,'A'),(1,'B'),k[0], (1,'B'),(1,'A'),k[1], ) # dt is used for the deterministic calculation, and the dt=0.0001 t = np.arange(0, 0.6, dt) (solution,(tgill, valsgill,all_mus, all_taus),rows,mode) = ReAct(user_input,reactions,t,rounds=300) fig = plt.figure() Gillesplot(solution,t,tgill, valsgill,rows,mode) j=0 f, axarr = plt.subplots(1, 10) for i in np.arange(0,0.3,0.03): A,X,Y = EquationMaker(reactions,tgill,all_mus, all_taus,i,i+0.02) Y,X=np.meshgrid(Y,X) #ax = fig.gca(projection='3d') #ax.plot_surface(X,Y,A, rstride=1, cstride=1, cmap='hot', linewidth=0, antialiased=False) axarr[j].imshow(A[:5,:], cmap='hot') j+=1 plt.draw() plt.show()
We offer a range of products including Floor Coatings, Wall Coatings, Waterproofing, Flame and Fire retardant coatings. The thickness of the floor may vary from 1mm to 3mm depending on the level of impact resistance required by the customer. The Variant System has a compressive strength of 98N/mm (14,200 psi) and is extremely easy to clean even with heavy traffic. The slip resistance can be adjusted based on the customer’s needs and specified regulations. The Variant System is the best solution for parking garages, automotive service centers, paper processing, stadiums, conference centers, pharmaceutical industry, laboratories, hospitals, power plants, schools and hangars. We can provide Variant in any RAL colour. The Flake System is 2 mm to 3 mm thick and contains decorative flakes within its chemically bonded coating system. The unparalleled long-lasting system is aesthetically versatile and primarily designed for heavy foot traffic and moderate industrial environments. By using our unique Granito blends of color flakes, we are able to provide granite or marble style floors in a fraction of the time normally associated with the traditional systems. The Flake System is the best solution for supermarkets, shopping centers, retail, hospitals, stadiums, schools, mass-transit and bathrooms. The Flake System is the best solution for supermarkets, shopping centers, retail, hospitals, stadiums, schools, mass-transit and bathrooms. This system is a colour-quartz filled, trowel applied coating with extraordinary performance that is ideal for wet or heavily abraded areas where hygiene and cleanliness are required. Not only does this innovative monolithic system have excellent slip resistance values, longevity and cleanability, but it also provides minimal facility downtime due to its 2-hour curing process. With it's unique ability to chemically bond to itself, the optional coved skirting is completely seamless with the floor. This is the best solution for interior and exterior wet environments such as food/beverage processing, commercial kitchens, automotive, transportation platforms and industrial areas with heavy traffic and high compressive strength needs. The Decor System has very high compressive strength, very high bending and tensile strength and a very high resistance to ageing. Due to the Chemical properties of the resins, each layer (primer, body coat and seal coat) chemically bond together rather than mechanically. This leaves no chance of delamination between layers, or from the concrete. - High compressive strength of 98N/mm.sq. - 1-2 hours full cure. Can be laid at weekends or overnight, minimising disruction to operation. - Suitable for heavy industrial use. Full manufacturers 10 year performance warranty and lifetime guarantee on bond to concrete. In summary, this is the strongest, longest lasting ESD flooring system on the market today. The unique properties to Bond to Concrete means that we can use our resin to fill expansion joints. Expansion joints that have filler installed will be truly waterproof. Contractors who are engaged to build waterproof raised decks, use Joint fillers. Fragmentation is the number one cause of injury in an explosion. The Blast Mitigation protective coatings can be applied to buildings and has been designed to mitigate the effects of an explosion by containing the fragmented pieces.
from . import Message class Submessage(object): need_lock_object = True def __init__(self, obj, message_id, sender, params=None, need_lock_object=True, raw_params=None): self.obj = obj self.sender = sender self.message_id = message_id self.params = params self.raw_params = raw_params self.need_lock_object = need_lock_object super(Submessage, self).__init__() def as_message(self, parent): return Message(self.sender, self.message_id, raw_params=self.raw_params, clean_params=self.params, parent_message_id=parent.unique_id, message_group=parent.message_group, ) def dispatch(self, parent_obj, parent_message): from yawf.dispatch import dispatch_message message = self.as_message(parent_message) return dispatch_message( self.obj, message=message, defer_side_effect=True, need_lock_object=self.need_lock_object) class RecursiveSubmessage(Submessage): def __init__(self, message_id, sender, params=None, raw_params=None): super(RecursiveSubmessage, self).__init__( obj=None, sender=sender, message_id=message_id, params=params, raw_params=raw_params) def dispatch(self, parent_obj, parent_message): from yawf.dispatch import dispatch_message message = self.as_message(parent_message) return dispatch_message( parent_obj, message=message, defer_side_effect=True, need_lock_object=False)
A teenage patient at the first-ever NHS Proton Beam Therapy in the United Kingdom has become the first patient ever to receive the groundbreaking treatment. The treatment, as a result, was successful in saving him from the grueling after-effects of chemotherapy. Mason is currently preparing for the GCSEs. When he grows up, Mason wants to become a cancer doctor. He was found saying, “I am really grateful to all my doctors who are involved in taking care of me. Therefore, I would love to do what they are doing one day in my life.” Mason lives with his mother, stepdad, and siblings. He will be having 28 PBT – Proton Beam Therapy sessions over the course of the next 6 weeks. PBT –Proton Beam Therapy is aimed at targeting tumors with a high level of precision. Unlike the conventional process of radiotherapy – that could damage the surrounding tissues as well, PBT only targets the specific tumor cells. Moreover, patients undergoing the PBT treatment are known to have lesser side effects in comparison to other treatments.
"""This is the topword receiver for the topword model.""" from enum import Enum from lexos.receivers.base_receiver import BaseReceiver class TopwordAnalysisType(Enum): """This is the class that assigns the options to constants.""" ALL_TO_PARA = "Each Document to the Corpus" CLASS_TO_PARA = "Each Document to Other Classes" CLASS_TO_CLASS = "Each Class to Other Classes" class TopwordReceiver(BaseReceiver): """This is the class that receives the options from front end.""" def __init__(self): """Get the topword analysis type from front end using this receiver.""" super().__init__() def options_from_front_end(self) -> TopwordAnalysisType: """Get the topword option from front end. :return: a TopwordAnalysisType object that holds the analysis option. """ if self._front_end_data["comparison_method"] == \ "Each Document to the Corpus": return TopwordAnalysisType.ALL_TO_PARA elif self._front_end_data["comparison_method"] == \ "Each Document to Other Classes": return TopwordAnalysisType.CLASS_TO_PARA elif self._front_end_data["comparison_method"] == \ "Each Class to Other Classes": return TopwordAnalysisType.CLASS_TO_CLASS else: raise ValueError("Invalid topword analysis option from front end.")
Was the first dinner I ever served. I was 6 and it was my parent's anniversary. With obvious limitations of cash and ability, I maintained a determination to create a lit celebration. This desire: to turn nothing into something and deliver despite obstacles -- was a learned mentality. I attribute this outlook to 2 early constants in life: the kitchen that raised me and the music played through my headphones. Born in South Korea, as a baby I came to America and was adopted by a Sicilian/Irish family. From a very young age, it was food that helped me connect with my culture and hip hop that made me feel fine about being different. Food and hip hop would remain my OG loves for life. Burning mixtapes for friends, serving up hot plates in dorms, house party playlists, DatPiff and Hip Hop Early. Much to my delight, I have been able to continue working with these OG loves all these years. From event planning on yachts to vendor management for the premier LES street food conglomerate. That scrappy, hustle mentality has resulted in securing sponsorship deals with brands like Nike and Honest Tea and placing culture projects with press outlets like MTV, Huffington Post Music, and Mashable. Today, it's a real opportunity to build this dream and share it with you. A dream that can connect people and help us see things differently - stories about tradition, culture, memory, experience, feeling, and identity. Nothing is more dope than having a unique passion that you can share with others. If you'd like to pitch collaborations, creative projects, or new ideas, link up with me (fill out the contact form!).
# -*- coding: utf-8 -*- """A text file based task plugin implementation.""" import logging from pomito.plugins import task from pomito.task import Task from io import open __all__ = ['TextTask'] logger = logging.getLogger('pomito.plugins.task.text') class TextTask(task.TaskPlugin): """Implements a plugin to read/write Tasks from a text file. See doc/sample_tasks.txt for details of task file. """ def __init__(self, pomodoro_service): self._pomodoro_service = pomodoro_service self.tasks = [] def initialize(self): # Read plugin configuration try: file_path = self._pomodoro_service.get_config("task.text", "file") with open(file_path, 'r') as f: for t in f.readlines(): if not t.startswith("--"): task_tuple = self.parse_task(t) self.tasks.append(Task(*task_tuple)) except Exception as e: logger.debug(("Error initializing plugin: {0}".format(e))) return def get_tasks(self): return self.tasks def parse_task(self, task): return TextTask._parse_task(task) @staticmethod def _parse_task(task): import re # Sample task format: I:<id> | E:<estimate> | A:<actual> | T:<tags> | D:<desc> # Only <desc> can contain spaces. <tags> can be comma separated p = re.compile("[IEATD]:([\w,\s]*)\|?") task_tuple = tuple(map(lambda x: x.groups()[-1].strip('\n '), p.finditer(task))) return task_tuple
Do you like this page? 22 acid reflux baby prevacid not working 23 prevacid online order 133 Levitra Discount Program On the first morning driving into town we realised that | FREE SHIPPING 🔥 |. What You are Looking Best pill? buy Online Viagra Store ,Know the uses, side effects, price, composition, substitutes,. Check More » | Up to 30% Off🔥 |. Is this what you are looking Best pill? Zithromax Uk Online ,No side effects. Check More » Wholesale Kamagra Oral Jelly Otc Lowest prices for Generic and Brand drugs. Bonus 10 free pills, discounts and FREE SHIPPING. Cheapest drugs online - buy and save money. Flomax 8 Pump For Sale azithromycin 500mg tablets (generic zithromax) met maar liefst 91 fantastische (organische) ingredien is tonic alchemy Cellular and Molecular Biology, Cialis Cost Per Pill 2018 Methods in Molecular Biology, Molecular Biology of the Cell, Progress in Molecular Biology and Translational Science, Advances in Enzymology and Related Areas of Molecular Biology, Molecular Biology Today, Molecular and Cellular Biology, Brazilian Journal of Medical and Biological Research. Walmart Pharmacy Lexapro Generic. Purchase Viagra and Cialis pills online at very affordable price. Save money buying Viagra Professional and generic levitra. Anafranil Rezeptfrei Online viagra without prescription in uk youve got an awful lot of text for only having 1 or two images | Up to 50% Off🔥 |. Are You Searching Best pill? ☀☀☀ Do You Need A Prescription For Doxycycline Hyclate ☀☀☀,special reduced price.. Buy Now » Alesse Birth Control Price Walmart share it! If you want to start an internet business on a “Shoe String Budget” you NEED to have a strong passion for the business/industry you are getting into. If you have the passion, you will acquire knowledge, and eventually wisdom; becoming a leading voice in your industry. As an expert you rely on yourself to build & market so your expenses are less. Figure it out from there: Dan Fernandez, WebBizIdeas CEO, famous quote is “We’ll figure it out.” I am a big planner but sometimes you need to run before you can crawl and you need to fail before you succeed. Building your idea this way will cost you less than $300. During the process you WILL find the perfect business / revenue model (don’t rely just on advertising or selling a product…be creative) for your “horror idea.
#!/usr/bin/env python3 import os import argparse import shutil import psr_common thispath = os.path.dirname(os.path.realpath(__file__)) modpath = os.path.join(os.path.dirname(thispath), "pulsar", "modulebase") parser = argparse.ArgumentParser() parser.add_argument("--author", required=True, help="Author of the file") parser.add_argument("--desc", required=True, help="Short description of the base module") parser.add_argument("name", help="Name of the module class") args = parser.parse_args() hfilename = args.name + ".hpp" cfilename = args.name + ".cpp" hfilepath = os.path.join(modpath, hfilename) cfilepath = os.path.join(modpath, cfilename) htemplatepath = os.path.join(modpath, "NewModule.hpp.template") ctemplatepath = os.path.join(modpath, "NewModule.cpp.template") hguard = psr_common.GenIncludeGuard(hfilename) print("Creating {}".format(hfilepath)) with open(hfilepath, 'w') as dest: for l in open(htemplatepath, 'r').readlines(): l = l.replace("AUTHOR", args.author) l = l.replace("MODULEDESC", args.desc) l = l.replace("CLASSNAME", args.name) l = l.replace("HEADERGUARD", hguard) dest.write(l)
American Eagle Exteriors is exactly what you are searching for if you need a top rated siding and gutter company in Franklin Lakes NJ. American Eagle has the experienced and skilled contractors capable of perfectly installing, repairing or replacing any kind of siding or gutter system, to keep your home safe and dry this winter. If you are searching for a leading siding and gutter company in Franklin Lakes NJ, make certain to call American Eagle Exteriors before you contact anyone else. When your siding has become old and worn down, and you’re in need of exterior replacement in Franklin Lakes NJ, our skilled siding experts have you covered. Worn down and old siding means that your house isn’t being sufficiently protected and it will also negatively impact your home’s visual appeal. We can install any kind of siding, and replace the siding which you had on your home originally. Sometimes just a portion of siding will have to be replaced. In this kind of instance, our contractors are going to have the skill needed to make certain that the damaged siding is replaced in such a way that it’s going to blend seamlessly with the siding on your house currently. From fiber siding to vinyl siding in Franklin Lakes NJ, we carry and install all types of siding, so no matter what kind of siding you have on your home, we’ll be sure to have the materials to replace or repair it perfectly. It’s vital to have a siding contractor in Franklin Lakes NJ you can trust to perform expert siding installation, particularly during the winter months. Moisture is capable of getting into walls and cause damage to your home’s structure when you don’t have proper siding on the home. That is the reason our contractors are specialists at installing the highest quality siding, including vinyl and fiber siding in Franklin Lakes NJ. For when you’re looking for the very best in terms of protection, we also carry top quality Jamie Hardie siding. American Eagle Exteriors is also a leading choice if you need a gutter company in Franklin Lakes NJ, together with our services for siding. American Eagle Exteriors offers thorough gutter service, guaranteed to make sure your gutters remain fully functioning, leak-free, and free from debris that is capable of interfering with proper gutter function. Gutter guards, downspouts and leaders are all included in our maintenance services and things we possess a great deal of experience in. If your gutters are not properly maintained, it can lead to serious problems, particularly in the winter. Ensuring your gutters are repaired right away is important if it has been damaged by improper maintenance. We’re the company to contact if you’re in need of gutter cleaning, gutter installation or gutter repair in Franklin Lakes NJ. In addition to being a top choice for a gutter contractor in Franklin Lakes NJ for general maintenance and installation, American Eagle Exteriors is standing by when you need effective and thorough gutter repairs, no matter how extensive. Gutters that are working properly are going to ensure that your home is safeguarded from the elements. American Eagle Exteriors has for years been the first choice of homeowners looking to restore maximum functionality of their gutters to protect their home. Say goodbye to leaky gutters for good with top quality seamless gutter installation in Franklin Lakes NJ. American Eagle Exteriors is your all-in-one gutter and siding company, and the only company you’ll have to contact to keep your home dry this winter. You can rely on us for all your needs for exterior siding or gutter repair in Franklin Lakes NJ or the rest of the local area.
# This file is part of DQXServer - (C) Copyright 2014, Paul Vauterin, Ben Jeffery, Alistair Miles <[email protected]> # This program is free software licensed under the GNU Affero General Public License. # You can find a copy of this license in LICENSE in the top directory of the source code or at <http://opensource.org/licenses/AGPL-3.0> from urlparse import parse_qs import importlib import simplejson import os import traceback import DQXUtils import DQXDbTools import responders from responders import uploadfile # import customresponders # Try to import all custom modules customRespondersPath = os.path.join(os.path.dirname(__file__), 'customresponders') for dirname in os.listdir(customRespondersPath): tryModulePath = os.path.join(customRespondersPath, dirname) if os.path.isdir(tryModulePath): importlib.import_module('customresponders.' + dirname) def application(environ, start_response): request_data = dict((k,v[0]) for k,v in parse_qs(environ['QUERY_STRING']).items()) if 'datatype' not in request_data: DQXUtils.LogServer('--> request does not contain datatype') start_response('404 NOT FOUND', [('Content-Type', 'text/plain')]) yield 'Not found: request does not contain datatype' return request_type = request_data['datatype'] tm = DQXUtils.Timer() if request_type == 'custom': request_custommodule = request_data['respmodule'] request_customid = request_data['respid'] responder = importlib.import_module('customresponders.' + request_custommodule + '.' + request_customid) else: try: #Fetch the handler by request type, using some introspection magic in responders/__init__.py responder = getattr(responders, request_type) except AttributeError: raise Exception("Unknown request {0}".format(request_type)) request_data['environ'] = environ response = request_data try: try: response = responder.response(request_data) status = '200 OK' except DQXDbTools.CredentialException as e: print('CREDENTIAL EXCEPTION: '+str(e)) response['Error'] = 'Credential problem: ' + str(e) #Really should be 403 - but I think the JS will break as it expects 200 #status = '403 Forbidden' status = '200 OK' except DQXDbTools.Timeout as e: status = '504 Gateway Timeout' #Check for a custom response (eg in downloadtable) if 'handler' in dir(responder): for item in responder.handler(start_response, response): yield item else: #Default is to respond with JSON del response['environ'] response = simplejson.dumps(response, use_decimal=True) response_headers = [('Content-type', 'application/json'), ('Access-Control-Allow-Origin','*'), ('Content-Length', str(len(response)))] start_response(status, response_headers) yield response except Exception as e: start_response('500 Server Error', []) traceback.print_exc() yield str(e) DQXUtils.LogServer('Responded to {0} in wall={1}s cpu={2}s'.format(request_type, tm.Elapsed(),tm.ElapsedCPU()))
Wallpaper stores in houston wallpaper hd background is just one of the many collections of pictures or photos that are on this website. Wallpaper stores in houston wallpaper hd background is posted on the category Wallpaper Stores Near Me in the joswall.com website. This post of "wallpaper stores in houston wallpaper hd background" was published on 01-08-2018 by dave and has been viewed 661,127 times. We hope you can find what you need here. We always effort to show a picture with HD resolution or at least with perfect images. Wallpaper stores in houston wallpaper hd background can be beneficial inspiration for those who seek an image according specific categories, you can find it in this site. Finally all pictures we have been displayed in this site will inspire you all. Thank you for visiting.
# Copyright (c) 2017 The Regents of the University of Michigan. # All Rights Reserved. Licensed according to the terms of the Revised # BSD License. See LICENSE.txt for details. from hamcrest.core.base_matcher import BaseMatcher class HasAttrs (BaseMatcher): def __init__ (self, description_text, **kwargs): if description_text is None: description_text = "attrs" self.desc = description_text self.kwargs = kwargs def _matches (self, item): self.problem = None try: for key, value in self.kwargs.items(): if not hasattr(item, key) \ or getattr(item, key) != value: self.problem = key return False return True except: return False def describe_to (self, description): if self.problem is None: attrs = ", ".join("s.{}={}".format(k, repr(v)) for (k, v) in self.kwargs.items()) else: attrs = "including s.{}={}".format( self.problem, repr(self.kwargs[self.problem])) description.append_text("a structure with {} {}".format( self.desc, attrs)) def describe_mismatch (self, item, description): if self.problem is None: super().describe_mismatch(item, description) elif hasattr(item, self.problem): description.append_text("had s.{}={}".format( self.problem, repr(getattr(item, self.problem)))) else: description.append_text("didn't have s." + self.problem)
through digital scanning of IDs. revolutionized the way people connect. Visually appealing, and intuitive and comprehensive features and functions were two major aspects of University News app that needed to be catered perceptively. Socializing and networking app for students of colleges and universities require a platform that is engaging and incudes up-to-date elements that can keep students busy and entertained. We made University News app to be interactive and constructive, tailored for the needs and requirements of college and university students. The cutting-edge and intuitive features of UN social networking app are designed to enable the efficient exchange of information that help students never miss out on any update and deadline. Download Yasmo; the digital ice-breaker that allows you to discover & meet people of interest at conferences, events & any social gathering you create in real-time! Get more than 2300 cocktail recipes. Every cocktail has a detailed description and a ingredients list. There are no Ads. TekRevol developed the first iteration of a mobile networking application for iOS, focusing on simplicity and a user-friendly interface. They'll continue to help iterate and develop the app for Android. The TekRevol team made it possible for the app to be deployed on the App Store quickly. While communication and transparency could be improved for longer-term projects, they’re a detailed and reliable development team. I'm CEO at a customer-focused networking application called Social ID, or SID, which improves networking at events and in daily business life, as well as reduces the waste associated with paper-based business card printing. For what projects/services did your company hire TekRevol? I hired TekRevol to develop the first iteration of the app in a time and cost-efficient manner. We needed to create a user-friendly, simple, and speedy interface for the app that made it easy to exchange business contacts. TekRevol approached me via a post I put on Facebook. They quickly got in touch, and we started the project. After signing an NDA, we got straight into the design phase. It was a rapid development process with quick deployment. This app is still only on iOS, but soon we'll expand to Android. TekRevol assigned a project manager, who is the liaison to the rest of their team. Overall, I'm satisfied with the results. We've successfully accomplished the major milestone of deployment, which allowed me to test the idea in the market quickly. Their workflow throughout the project was effective. However, I always lacked direct contact with the developers. The project manager has been great, especially in the early phases, but over time their responsiveness has decreased. This is partially due to the time difference, but it is an area in need of improvement. Their design was incredible, and the app is very stable. The developers followed the wireframes we put forward at the start exactly. They could improve communication and transparency. Overall Score Apart from minor pitfalls, TekRevol was great. Overall the scheduling was good, only minor delays. Their initial estimation was great. Some issues arose later on, but nothing that can't be overcome. We had some communication issues. Outsourcing is hard, so the time difference may significantly affect their efficiency. "It makes me feel good to work with a team that cares as much as I do." TekRevol created iOS and Android mobile applications for a beach rental service. The team also designed a logo, web pages, and a teaser video. They are currently adding further features to the app. First impressions of the app have been positive. TekRevol strives to be proud of their work through persistence and tenacity. They are communicative and passionate. Customers can expect a hardworking and skilled team that delivers high-quality work at an affordable price. I’m the owner and founder of Beach Bandits, an app where users can rent chairs, towels, speakers, and other beach related items. It’s on-demand delivery and pickup service right to your spot on the beach. What challenge were you trying to address with TekRevol? I hired TekRevol to build the app. TekRevol is building two Android and iOS applications for my business. The first app is for consumers that works like any shopping cart application. Users can create personal profiles, select their items, then add them to the cart before checking out. There’s a timer that displays how long customers have their orders before the product needs to be returned. The app also has GPS tracking features where users can see where their item is and when it will be delivered. Their team also added promotional codes to the app. The other application is for management purposes. I can accept orders and see the whole process. Throughout the project, their team provides screenshots and wireframes. I test the application through emailed links. TekRevol also created a new logo and web pages for pre-and post-launch. They also filmed a promotional video for the application. My account manager was my main point of contact. I also worked with someone on the development side. How did you come to work with TekRevol? I had no idea where to start in finding an app development company. I posted the question on Facebook which prompted recommendations and areas where to begin. It came down to three companies, but I saw that TekRevol would provide me with the most value when it came down to the quoting process. We began working with TekRevol in September 2018, and the work is ongoing. TekRevol delivered a plethora of work at a reasonable price. Their team is still working on the app, providing new features I requested. They will also provide maintenance support once it is completed. I’ve shown a few people the work throughout the development process. Everyone is impressed with the designs. How did TekRevol perform from a project management standpoint? Working with my account manager is terrific. He’s thorough, making sure I’m satisfied before moving onto the next phase. We have conference calls with the developer. After the calls, the developer puts everything into action once he understands my goals. We frequently text and email. They’ve become part of my team. TekRevol uses Asana where I can see the progress of each task and all of their files. All the work is updated through there. Their team is on track with the timeline. I appreciate the constant contact and dedication to being on the same page. TekRevol is equally as excited about the project as I am. Their team is extremely invested, offering feedback and ideas to improve my approach. This app is outside their usual realm of work. They want to showcase the app for other potential customers. It makes me feel good to work with a team that cares as much as I do. Working with TekRevol has been going well. Sometimes the first drafts had grammar or content mistakes, which they edited and got back to me within a day. They’re a design team, so grammar isn’t their strongest suit. It’s on me to go over the screens and make sure the content is correct. Clients should have a vision for their project. TekRevol can bring that vision to life. Their team offers suggestions to make their customers’ ideas function better than anticipated. "They bring an uncommon level of experience and expertise." TekRevol did mobile app development based on wireframes. Services included strategic planning, finishing wireframes, and testing. The solution has launched. The team called upon its experience to oversee the successful development of a new product through MVP and launch. Customers can expect an efficient and transparent process from a partner that can handle technical issues. I’m a founder of uNews, a university news app. We needed an outside vendor to complete wireframing and do mobile app development. We wanted an efficient partner who would deliver quality work on time. TekRevol’s experience assured us that we were making the best decision in choosing them. We liked their leadership team and loved their presentation. Also, they took the time to understand how we could reach our goal. They worked with us to understand our vision and develop strategies. Then they finished the wireframes and worked in production sprints through the delivery of a minimum viable product (MVP). They helped with testing through launch. We were in direct contact with their leadership team. Their assistance was invaluable in seeing our app through from planning to launch. From the beginning, TekRevol's leadership team made their process clear. We worked together successfully and they handled issues well. They bring an uncommon level of experience and expertise. I can’t find anything that they'd need to improve on. Overall Score The process was smooth and successful. We'd refer them to everyone possible . TekRevol migrated a complex talent staffing site into WordPress before implementing video upload functions. They’re now working on adding a commenting feature to encourage user interactions. The migration has allowed internal stakeholders to easily change content on the backend while maintaining a simplified end-user interface. Although TekRevol could provide more technical consultation, they’ve adapted well to a changing scope and tackle all ideas head on. I’m the founder of a talent staffing site called Talpool.com. I had originally purchased Talpool.com from a previous group. However, they’d built the site in such a complicated way that it was too technically challenging to make even the simplest changes. I tasked TekRevol with migrating the site to a WordPress platform, which would make it easier for me to change administrative content and add features. Initially, they migrated the existing site and content into WordPress, mimicking all of the previous features. It’s a talent staffing site where users can create profiles, display their portfolios, and upload information about themselves. From there, TekRevol’s team added new features that were impossible to implement in the original build, including video uploading and admin capabilities. Currently, we’re in the process of adding a comment feature, which will encourage more dynamic interactions between users. I first spoke with their president before discussing the project with a technical specialist. They translated my goals into business and technical requirements and then passed them to the developers. I originally searched for local vendors who would be able to dissect my convoluted site into a streamlined WordPress build, but I couldn’t find anyone willing to take on the project. After searching online, I found TekRevol based in California and gave them a call. We went from there. I’ve spent about $25,000 CAD (approximately $18,700 USD) so far. Their work began in March 2018 and is ongoing. The actual migration only took around 4–5 months, but I often changed the scope to implement more features, which extended development time. Like with the original build, users have commended the simplistic format and enjoy how their photos are displayed. We use Asana to coordinate the project, which allows me to see all tasks, individual timelines, and progress statuses. If I have a request or question, I write a message through Asana, and the appropriate teammate responds. I’ve often revamped ideas and changed the scope throughout the project, which would frustrate most people. However, I appreciate that TekRevol has remained receptive to my requests; they’re happy to take on any challenge. I’ve probably spent more man hours with them than they’ve charged. Although they’re great at executing my ideas on new features, I would prefer it if they’d offer more suggestions and alternative solutions for technical challenges. Overall Score I have no complaints whatsoever. The scope has changed a lot, but they’ve done well. Working from initial ideas, TekRevol manages frontend design and backend development of a mobile app for both Android and iOS. User feedback received so far has been positive. Building on strong personal relationships, TekRevol’s collaborative development process combines client ideas and with TekRevol expertise. I’m the COO of a startup that connects veterans and resources through a digital platform utilizing AI technology. We seek to reduce the suicide rate in the veteran population and ensure healthy transitions from military service. We needed to develop the core software to pursue our mission of supporting veterans. TekRevol is developing our app for both Android and iOS. We worked through a series of planning stages. We had an idea of what it would look like and I'd drawn out each screen. They developed the frontend designs from those drawings and started developing the backend for the app. We have an account manager who is our main point of communication as well as a technical project manager. There’s a team of programmers on each the Android and iOS sides as well as a marketing person. We speak to the CEO around once a month. We put out a tweet and they responded on Twitter. We started working together in December 2018 and it’s ongoing. We've had positive feedback so far on the look and feel of the app. It’s been described as simple and clean, which is the best feedback we could receive and what we wanted. We use Trello and Slack. We’ll often use a whiteboard to draw things out together. We're very collaborative and communicate extensively. We consider them as part of our team and I believe they feel the same. We come up with ideas and they let us know what’s possible or whether there’s a more efficient way to do it. The relationship I have with their CEO is the biggest thing that stands out. It’s not just about business, it’s also personal. They've done a really good job so far. Be ready because they're going to move quickly. Overall Score There's always room for improvement but they've done a great job so far. We're the ones that hold things up. "The TekRevol team goes above and beyond what they’re asked for." TekRevol created a game design document for an app and are currently helping build the final product. The completed design document was positively received by partners and has earned the attention of notable programmers. As a team, TekRevol has excelled in providing useful insight and quality work. Their willingness to continuously support the project surpasses expectations. I’m the founder of the ZombieTag game app. We hired TekRevol to create our game design document (GDD). In addition to this, they’re also redesigning our website. At the beginning, TekRevol helped us lay out our ideas for the app. Their team created the GDD to include our pricing, technology, and design requirement based on the story I gave them. Once the GDD was complete, they began working on our website. Their developers are currently working with our partners on the game’s artwork and design. We primarily work with Chris (Managing Director, TekRevol), and he communicates with the other engineers on his team. I searched for vendors online and chose them because they were professional in our first meeting. So far, we’ve spent around $7,000. We began working together in April 2018, and the TekRevol team is currently building the game for us. The GDD has captured the interest of prominent programmers in our industry, and we believe the market value will be strong. Our partners are all pleased with the product so far and anticipate success. They use Asana to manage the project and share important information with us. Chris helps to communicate with the engineers using industry terms they’ll understand. We keep in contact using email, phone calls, Skype, etc. The TekRevol team goes above and beyond what they’re asked for. They are quick to respond and have been a friendly, professional team to work with. Their help and expertise fulfill our needs, and they’re doing what we hired them to do. "They do quality work within a tight budget." TekRevol built a website from scratch and designed a new company logo, following guidelines and requirements. They worked with the internal team and provided WordPress training. TekRevol is responsive and communicative, tracking progress and providing updates on any changes. They could be better at ensuring that all requirements are fulfilled, but they do good work and meet expectations. I'm the founder of Dream Energy Solutions, an energy consulting venture with an emphasis on energy procurement for B2B customers. We're a new business, and we needed a logo design and website. We wanted quality work under a tight budget and a user-friendly backend platform. I reviewed the work of around 60+ companies and individual freelancers before finally picking TekRevol. They were based in the U.S., willing to work with our tight budget, and had a portfolio of past work that made me confident in their ability to produce quality work. TekRevol helped with our logo and website design and dev. They built the website from scratch and helped with the backend training in WordPress. They also provide continuous support throughout the process until the website goes live. I contributed to some of the guidelines via suggestions and feedback on the design and UX/UI, but TekRevol did the bulk of the work; they just made sure I approved everything before moving on to the next stage. My main point of contact was the project manager, who had his team working on the design and dev. We haven't launched yet, but the interactive process and website details are great so far; I hope they stay that way once we go live. This is the first place our consumers would interact with our company, so it's a big step for our business. The project manager was responsive to my requests, and we used Asana for project management and project deadline adjustments. We also used Skype for video conferencing when necessary, and we had no issues with a language barrier or time difference. They do quality work within a tight budget. They should double check client suggestions, feedback, and requirements before moving onto the next stage. Clients shouldn't have to continuously ensure that their requests are being met. TekRevol built a series of custom web integrations as part of a white-label project for a small design and development firm. Features included user and admin portals, payment systems, and UI/UX design. The team at TekRevol was instrumental in delivering many complex custom project demands successfully. Their project manager coordinated the project well and communicated effectively throughout the process. The whole team was professional and accommodating. I’m the owner of Raging Rocket Web Design, LLC. We’re a web design and development company focused on small businesses. We hired them to help us develop a complex customized ordering system for a client’s business. Our goal was to create a new system from the ground up that would cater to all of our client’s needs. These included features like custom user and administration portals, as well as a new payment system with an obscure provider. We ultimately chose them not just because they offered reasonable prices, but because their portfolio of previous work seemed to meet our needs. They did a number of development tasks for us, which included building custom invoicing, payment, and ordering systems for our client’s business. They also built three customized web portals, set up user and administrator roles, and performed UI/UX design on an integrated emailing system. I worked with two members of their team, although the project manager was my main point of contact. Their developers were instrumental in completing this project for our client. The final results met or exceeded my expectations and I look forward to working with TekRevol again when we continue the project. We relayed information to our project manager, who handled all of the coordination with the developers. They kept track of everything via a project management program, although I didn’t find it very intuitive and would have preferred just to use email. I found their willingness to accommodate requests very refreshing. It’s common for contractors to concentrate on income, but they focused on client satisfaction, which earned our company’s loyalty. Communication and professionalism are their biggest strengths. I don’t think so since our experience was a positive one. Perhaps they could’ve collaborated more effectively with us when planning the total project scope. A step-by-step approach might have been more optimal for us with this project. With that said, we were still able to accomplish everything outlined in our project requirements.
# -*- coding: utf-8 -*- # # Copyright (C) 2016 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ OpenSwitch Test for custom login banners """ from pytest import mark TOPOLOGY = """ # +-------+ # | | # |ops1 | for netop login # | | # +-------+ # +-------+ # | | # |ops2 | for user root # | | # +-------+ # Since each topology gets only one bash shell and that same shell session is # returned by get_shell(), we use 2 containers so that the current user is # unambiguous. There is no link between the containers. # Nodes [type=openswitch name="OpenSwitch 1"] ops1 [type=openswitch name="OpenSwitch 1"] ops2 # Links """ # terminal commands config_command = "configure terminal" exit_command = "exit" pre_cmd = "banner" post_cmd = "banner exec" disable_command = "no banner" show_banner_command = "show banner" ssh_command = "ssh -o StrictHostKeyChecking=no netop@localhost" cat_issue = "cat /etc/issue.net" cat_motd = "cat /etc/motd" # sample custom login banner line1 = "The use of COBOL cripples the mind;" line2 = "its teaching should, therefore," line3 = "be regarded as a criminal offense" line4 = "Edsgar Djikstra" terminator = "\%" # another sample custom login banner line1b = "Software is like entropy:" line2b = "It is diffuclt to grasp, weighs nothing," line3b = "and obeys the Second Law of Thermodyanmics;" line4b = "i.e., it always increases" line5b = "Norman Augustine" # default banners pre_default = "Welcome to OpenSwitch" post_default = "Please be responsible" # banner update responses success = "Banner updated successfully!" invalid_user = "Only network operators may change login banners." # terminal prompts vty_prompt = ".*\#" bash_prompt = vty_prompt # for readability banner_readline = ">>" vty_config = ".*\(config\)#" conn_closed = "Connection to localhost closed" # passwords netop_pw = "netop" @mark.platform_incompatible(['ostl']) def test_custom_pre_login_valid_user(topology): """ Update the banner as a user in the netop group. The result should be reflected by the show banner command (OVSDB), and in the contents of the file /etc/issue.net which are displayed before the password prompt in an SSH session. Begin an interactive bash shell as root 1. su to 'netop' inheriting environment Using vtysh shell from the switch: 1. set banner to an empty string 2. set banner to a known value, checking for success indicator SSH to switch 1. make sure that known value is displayed between before password prompt """ ops1 = topology.get('ops1') assert ops1 is not None print("Get bash shell") shell = ops1.get_shell('bash') print("Switch to user 'netop' with default shell (vtysh)") shell.send_command("su - netop", vty_prompt) print("Enter configuration context") shell.send_command(config_command, [vty_config]) print("Set banner to empty string") shell.send_command(disable_command, [success, vty_config]) print("Set banner to a known value") shell.send_command(" ".join([pre_cmd, terminator]), [banner_readline], timeout=1) shell.send_command(line1, [banner_readline]) shell.send_command(line2, [banner_readline]) shell.send_command(line3, [banner_readline]) shell.send_command(line4, [banner_readline]) shell.send_command(terminator, [success]) print("Exit configuration context") shell.send_command(exit_command, [vty_prompt]) print("Return to bash prompt") shell.send_command(exit_command, [bash_prompt]) print("SSH to localhost as netop") shell.send_command(ssh_command, [line1]) shell.send_command(netop_pw, [bash_prompt]) print("Return to bash shell") shell.send_command(exit_command, [conn_closed]) print("Banner set succesfully") print("Test custom_pre_login_valid_user PASSED") @mark.platform_incompatible(['ostl']) def test_custom_post_login_valid_user(topology): """ Update the banner as a user in the netop group. The result should be reflected by the show banner command (OVSDB), and in the contents of the file /etc/motd, Begin an interactive bash shell as root 1. su to 'netop' inheriting environment Using vtysh shell from the switch: 1. set banner to an empty string 2. set banner to known value, checking for success indicator SSH to switch 1. make sure that known value is displayed after password is provided """ ops1 = topology.get('ops1') assert ops1 is not None print("Get bash shell") shell = ops1.get_shell('bash') print("Switch to user 'netop' with default shell (vtysh)") shell.send_command("su - netop", vty_prompt) print("Enter configuration context") shell.send_command(config_command, [vty_config]) print("Set banner to empty string") shell.send_command(" ".join([disable_command, "exec"]), [success, vty_config]) print("Set banner to a known value") shell.send_command(" ".join([post_cmd, terminator]), [banner_readline], timeout=1) shell.send_command(line1b, [banner_readline]) shell.send_command(line2b, [banner_readline]) shell.send_command(line3b, [banner_readline]) shell.send_command(line4b, [banner_readline]) shell.send_command(line5b, [banner_readline]) shell.send_command(terminator, [success]) print("Exit configuration context") shell.send_command(exit_command, [vty_prompt]) print("Return to bash prompt") shell.send_command(exit_command, [bash_prompt]) print("SSH to localhost as netop") shell.send_command(ssh_command, ["password"]) shell.send_command(netop_pw, [line3b]) print("Return to bash shell") shell.send_command(exit_command, [conn_closed]) print("Banner set succesfully") print("Test custom_post_login_valid_user PASSED") @mark.platform_incompatible(['ostl']) def test_custom_pre_login_invalid_user(topology): """ Update the banner as a user that is not in the netop group. The requested update should be refused. Begin an interactive bash shell as root 1. run vtysh Using vtysh shell from the switch: 1. issue command to change banner 2. check for failure message """ ops2 = topology.get('ops2') assert ops2 is not None print("Get bash shell") shell = ops2.get_shell('bash') print("Run vtysh as root") shell.send_command("vtysh", vty_prompt) print("Enter configuration context") shell.send_command(config_command, [vty_config]) print("Attempt to set banner to custom value") shell.send_command(" ".join([pre_cmd, terminator]), [banner_readline], timeout=1) shell.send_command("hello", [banner_readline]) shell.send_command(terminator, [invalid_user]) print("Exit configuration context") shell.send_command(exit_command, [vty_prompt]) print("Exit to bash shell") shell.send_command(exit_command, [bash_prompt]) print("Banner unchanged") print("Test custom_pre_login_invalid_user PASSED") @mark.platform_incompatible(['ostl']) def test_custom_post_login_invalid_user(topology): """ Update the banner as a user that is not in the netop group. The requested update should be refused. Begin an interactive bash shell as root Using vtysh shell from the switch: 1. issue command to change banner 2. check for failure message """ ops2 = topology.get('ops2') assert ops2 is not None print("Get bash shell") shell = ops2.get_shell('bash') print("Run vtysh as root") shell.send_command("vtysh", vty_prompt) print("Enter configuration context") shell.send_command(config_command, [vty_config]) print("Attempt to set banner to custom value") shell.send_command(" ".join([post_cmd, terminator]), [banner_readline], timeout=1) shell.send_command("hello", [banner_readline]) shell.send_command(terminator, [invalid_user]) print("Exit configuration context") shell.send_command(exit_command, [vty_prompt]) print("Exit to bash shell") shell.send_command(exit_command, [bash_prompt]) print("Banner unchanged") print("Test custom_post_login_invalid_user PASSED") @mark.platform_incompatible(['ostl']) def test_default_pre_login_valid_user(topology): """ Restore defualt pre-login banner as a user in the netop group. The result should be reflected by the show banner command (OVSDB), and in the contents of the file /etc/issue.net Begin an interactive bash shell as root 1. su to 'netop' inheriting environment Using vtysh shell from the switch: 1. restore default banner SSH to switch 1. make sure that known value is displayed between before password prompt """ ops1 = topology.get('ops1') assert ops1 is not None print("Get bash shell") shell = ops1.get_shell('bash') print("Switch to user 'netop' with default shell (vtysh)") shell.send_command("su - netop", vty_prompt) print("Enter configuration context") shell.send_command(config_command, [vty_config]) print("Set banner to empty string") shell.send_command(disable_command, [success, vty_config]) print("Set banner to default value") shell.send_command(" ".join([pre_cmd, "default"]), [success]) print("Exit configuration context") shell.send_command(exit_command, [vty_prompt]) print("Return to bash prompt") shell.send_command(exit_command, [bash_prompt]) print("SSH to localhost as netop") shell.send_command(ssh_command, [pre_default]) shell.send_command(netop_pw, [bash_prompt]) print("Exit to bash shell") shell.send_command(exit_command, [conn_closed]) print("Banner set succesfully") print("Test default_pre_login_valid_user PASSED") @mark.platform_incompatible(['ostl']) def test_default_post_login_valid_user(topology): """ Restore defualt post-login banner as a user in the netop group. The result should be reflected by the show banner command (OVSDB), and in the contents of the file /etc/issue.net Begin an interactive bash shell as root 1. su to 'netop' inheriting environment Using vtysh shell from the switch: 1. restore default banner SSH to switch 1. make sure that known value is displayed between before password prompt """ ops1 = topology.get('ops1') assert ops1 is not None print("Get bash shell") shell = ops1.get_shell('bash') print("Switch to user 'netop' with default shell (vtysh)") shell.send_command("su - netop", vty_prompt) print("Enter configuration context") shell.send_command(config_command, [vty_config]) print("Set banner to empty string") shell.send_command(" ".join([disable_command, "exec"]), [success, vty_config]) print("Set banner to default value") shell.send_command(" ".join([post_cmd, "default"]), [success]) print("Exit configuration context") shell.send_command(exit_command, [vty_prompt]) print("Return to bash prompt") shell.send_command(exit_command, [bash_prompt]) print("SSH to localhost as netop") shell.send_command(ssh_command, ["password"]) shell.send_command(netop_pw, [post_default]) print("Exit to bash shell") shell.send_command(exit_command, [conn_closed]) print("Banner set succesfully") print("Test default_post_login_valid_user PASSED") @mark.platform_incompatible(['ostl']) def test_default_pre_login_invalid_user(topology): """ Restore defualt pre-login banner as a user not in the netop group. The attempt should be rejected. Begin an interactive bash shell as root Using vtysh shell from the switch: 1. issue restore default banner command, check for failure message """ ops2 = topology.get('ops2') assert ops2 is not None print("Get bash shell") shell = ops2.get_shell('bash') print("Enter vtysh as root") shell.send_command("vtysh", vty_prompt) print("Enter configuration context") shell.send_command(config_command, [vty_config]) print("Set banner to default value") shell.send_command(" ".join([pre_cmd, "default"]), [invalid_user]) print("Exit configuration context") shell.send_command(exit_command, [vty_prompt]) print("Return to bash prompt") shell.send_command(exit_command, [bash_prompt]) print("Banner set succesfully") print("Test default_pre_login_invalid_user PASSED") @mark.platform_incompatible(['ostl']) def test_default_post_login_invalid_user(topology): """ Restore defualt pre-login banner as a user in the netop group. The result should be reflected by the show banner command (OVSDB), and in the contents of the file /etc/issue.net Begin an interactive bash shell as root Using vtysh shell from the switch: 1. issue restore default banner command, check for failure message """ ops2 = topology.get('ops2') assert ops2 is not None print("Get bash shell") shell = ops2.get_shell('bash') print("Enter vtysh as root") shell.send_command("vtysh", vty_prompt) print("Enter configuration context") shell.send_command(config_command, [vty_config]) print("Set banner to default value") shell.send_command(" ".join([pre_cmd, "default"]), [invalid_user]) print("Exit configuration context") shell.send_command(exit_command, [vty_prompt]) print("Return to bash prompt") shell.send_command(exit_command, [bash_prompt]) print("Banner set succesfully") print("Test default_pre_login_invalid_user PASSED") @mark.platform_incompatible(['ostl']) def test_disable_pre_login_valid_user(topology): """ Disable the pre-login banner. If the file /etc/issue.net contains only a single new line, then OVSDB and the SSH banner have been changed appropriately. Begin an interactive bash shell as root 1. su to user netop, inheriting environment Using vtysh shell from the switch: 1. restore the default banner 2. disable the banner, check for success 3. exit vtysh Using bash, once again 1. confirm that the length of /etc/issue.net is one byte """ ops1 = topology.get('ops1') assert ops1 is not None print("Get bash shell") shell = ops1.get_shell('bash') print("Switch to user netop, inheriting shell") shell.send_command("su - netop", [vty_prompt]) print("Enter configuration context") shell.send_command(config_command, [vty_config]) print("Set banner to default value") shell.send_command(" ".join([pre_cmd, "default"]), [success, vty_config], timeout=1) print("Disable banner, checking for success") shell.send_command(" ".join(["no", pre_cmd]), [success]) print("Exit to bash") shell.send_command(exit_command, [vty_prompt]) shell.send_command(exit_command, [bash_prompt]) shell.send_command('du -b /etc/issue.net', ['1', '/etc/issue.net']) print("Banner disabled succesfully") print("Test disable_pre_login_invalid_user PASSED") @mark.platform_incompatible(['ostl']) def test_disable_post_login_valid_user(topology): """ Disable the post-login banner. If the file /etc/motd contains only a single new line, then OVSDB and the SSH banner have been changed appropriately. Begin an interactive bash shell as root 1. su to user netop, inheriting environment Using vtysh shell from the switch: 1. restore the default banner 2. disable the banner, check for success 3. exit vtysh Using bash, once again 1. confirm the length of /etc/motd is 1 byte """ ops1 = topology.get('ops1') assert ops1 is not None print("Get bash shell") shell = ops1.get_shell('bash') print("Switch to user netop, inheriting shell") shell.send_command("su - netop", [vty_prompt]) print("Enter configuration context") shell.send_command(config_command, [vty_config]) print("Set banner to default value") shell.send_command(" ".join([post_cmd, "default"]), [success, vty_config], timeout=1) print("Disable banner, checking for success") shell.send_command(" ".join(["no", post_cmd]), [success]) print("Exit to bash") shell.send_command(exit_command, [vty_prompt]) shell.send_command(exit_command, [bash_prompt]) issue_output = ops1(cat_motd, shell='bash') issue_output = ops1(cat_motd, shell='bash') shell.send_command('du -b /etc/motd', ['1', '/etc/motd']) print("Banner disabled succesfully") print("Test disable_post_login_invalid_user PASSED") @mark.platform_incompatible(['ostl']) def test_disable_pre_login_invalid_user(topology): """ Attempt to disable the login banner. The attempt should be refused. Begin an interactive bash shell as root 1. enter vtysh interactive shell Using vtysh shell from the switch: 1. enter configuration context 2. attempt to change banner, checking for failure """ ops1 = topology.get('ops1') assert ops1 is not None print("Get bash shell") shell = ops1.get_shell('bash') print("Enter vtysh shell") shell.send_command("vtysh", [vty_prompt]) print("Enter configuration context") shell.send_command(config_command, [vty_config]) print("Disable banner, checking for failure") shell.send_command(" ".join(["no", pre_cmd]), [invalid_user]) print("Exit to bash") shell.send_command(exit_command, [vty_prompt]) shell.send_command(exit_command, [bash_prompt]) @mark.platform_incompatible(['ostl']) def test_disable_post_login_invalid_user(topology): """ Attempt to disable the login banner. The attempt should be refused. Begin an interactive bash shell as root 1. enter vtysh interactive shell Using vtysh shell from the switch: 1. enter configuration context 2. attempt to change banner, checking for failure """ ops1 = topology.get('ops1') assert ops1 is not None print("Get bash shell") shell = ops1.get_shell('bash') print("Enter vtysh shell") shell.send_command("vtysh", [vty_prompt]) print("Enter configuration context") shell.send_command(config_command, [vty_config]) print("Disable banner, checking for failure") shell.send_command(" ".join(["no", post_cmd]), [invalid_user]) print("Exit to bash") shell.send_command(exit_command, [vty_prompt]) shell.send_command(exit_command, [bash_prompt]) @mark.platform_incompatible(['ostl']) def test_display_pre_login(topology): """ Attempt to display the login banner. It should match the expected value. Begin an interactive bash shell as root 1. su to user netop, inheriting environment Using vtysh shell from the switch: 1. enter configuration context 2. restore default banner 3. exit configuration context 4. issue command to show banner, confirm it matches default """ ops1 = topology.get('ops1') assert ops1 is not None print("Get bash shell") shell = ops1.get_shell('bash') print("su to user netop") shell.send_command("su - netop", vty_prompt) print("Enter configuration context") shell.send_command(config_command, [vty_config]) print("Set banner to default value") shell.send_command(" ".join([pre_cmd, "default"]), [success, ""]) print("Exit configuration context") shell.send_command(exit_command, [vty_prompt]) print("Display the configured banner") shell.send_command(" ".join(["show", pre_cmd]), [pre_default]) print("Exit to bash shell") shell.send_command(exit_command, [bash_prompt]) print("Banner displayed succesfully") print("Test display_pre_login PASSED") @mark.platform_incompatible(['ostl']) def test_display_post_login(topology): """ Attempt to display the login banner. It should match the expected value. Begin an interactive bash shell as root 1. su to user netop, inheriting environment Using vtysh shell from the switch: 1. enter configuration context 2. restore default banner 3. exit configuration context 4. issue command to show banner, confirm it matches default """ ops1 = topology.get('ops1') assert ops1 is not None ops1 = topology.get('ops1') assert ops1 is not None print("Get bash shell") shell = ops1.get_shell('bash') print("su to user netop") shell.send_command("su - netop", vty_prompt) print("Enter configuration context") shell.send_command(config_command, [vty_config]) print("Set banner to default value") shell.send_command(" ".join([post_cmd, "default"]), [success, ""]) print("Exit configuration context") shell.send_command(exit_command, [vty_prompt]) print("Display the configured banner") shell.send_command(" ".join(["show", post_cmd]), [post_default]) print("Exit to bash shell") shell.send_command(exit_command, [bash_prompt]) print("Banner displayed succesfully") print("Test display_post_login PASSED")
As leaked data from extramarital affair web site Ashley Madison continues to bubble up like toxic waste, the company is firing off copyright takedown notices to stop file sharing and social networking sites like Twitter from propagating customer and internal information. The “desperate move” is “the last refuge of people who are embarrassed about something on the Internet,” says copyright lawyer Ren Bucholz of Lenczner Slaght Royce Smith Griffin LLP. “They’re trying to use copyright to try and shut down web sites and stop the flow of information they don’t want to be available, but which really, on it’s face, is not copyrighted. In fact there is no copyright in “basic facts” such as company and personal information, says Noel Courage, a partner with Bereskin & Parr LLP. There can be, however, some copyright in how the information is “organized and laid out” by Ashley Madison. The use of takedown notices has not historically had much success and comes with some perils as well, especially when applied in the United States. Issuing takedown notices to reporters and news organizations is “overzealous,” says May Cheng, partner with Fasken Martineau DuMoulin LLP. “There is an exception to copyright infringement for ‘news reporting’ under the fair dealing exceptions in Canada, for example,” she says. Bucholz recalls a 2004 American case he was involved in in which internal e-mails from the archive of an electronic voting machine company were leaked during an election year and posted online. The e-mails expressed concern about security issues with the voting machines. The company, Diebold, used the U.S. Digital Millennium Copyright Act to try and get the material taken offline. “There are provisions in that legislation that allow a party to challenge the notice and recover punitive costs if there has been an abuse of the takedown procedures,” he says. The case — OPG v. Diebold — was a similar situation where there was a security breach and the party whose material was put online was casting about to find any legal tool they could to try and stop the information from getting out without a lot of attention paid to whether the material was copyrighted. A U.S. district judge in California ruled the plaintiffs’ publishing of the e-mails was clearly fair use “because there was no commercial harm and no diminishment of value of the works” in republishing them. Diebold was found to have misrepresented its copyright control, putting it in violation of the DMCA, leaving it liable for court costs and damages. “It’s very difficult to imagine how a collection of people’s addresses and names could reach that level of originality to even attract copyright protection in the first place,” says. The problem is that even under the U.S. copyright regime, considered to be the most robust, Ashley Madison would have to establish a good faith belief the things they are complaining about are actually copyrighted. “The notices must state that the complaint is being made under penalty of perjury and I would be very nervous if I were a lawyer for a company that was making that kind of statement under these circumstances,” adds Bucholz. There are Images in that database that may be copyrighted once they are formatted and watermarked by the Ashley Madison System. Section 342.1 of the criminal code says that anyone that uses, possesses, traffics in or permits the use of a password for accessing a computer (or its components including data) is guilty of an indictable offence. That would make pretty much anyone in Canada that has seeded or accessed the data and shared it guilty. Even accessing the torrent involves seeding data back in even if you haven't completed the download. The downloads also include source code and intellectual property that is proprietary. Millions in the database may be there maliciously. Exposing them to permanent long term harm and defamation. Once the data is hosted by any web site. Hasn't it then taken on the responsibility and liability for its accuracy, and retention requirements of Pii.
# -*- coding: utf-8 -*- # Copyright(C) 2010 Nicolas Duhamel # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. import urllib from weboob.tools.browser import BaseBrowser from weboob.tools.browser.decorators import id2url from .pages import InitPage, CanalplusVideo, VideoPage import lxml.etree class XMLParser: def parse(self, data, encoding=None): if encoding is None: parser = None else: parser = lxml.etree.XMLParser(encoding=encoding, strip_cdata=False) return lxml.etree.XML(data.get_data(), parser) __all__ = ['CanalplusBrowser'] class CanalplusBrowser(BaseBrowser): DOMAIN = u'service.canal-plus.com' ENCODING = 'utf-8' PAGES = {r"http://service.canal-plus.com/video/rest/initPlayer/cplus/": InitPage, r"http://service.canal-plus.com/video/rest/search/cplus/.*": VideoPage, r"http://service.canal-plus.com/video/rest/getVideosLiees/cplus/(?P<id>.+)": VideoPage, } #We need lxml.etree.XMLParser for read CDATA PARSER = XMLParser() FORMATS = { 'sd': 'BAS_DEBIT', 'hd': 'HD' } def __init__(self, quality, *args, **kwargs): BaseBrowser.__init__(self, parser= self.PARSER, *args, **kwargs) if quality in self.FORMATS: self.quality = self.FORMATS[quality] else: self.quality = 'HD' def home(self): self.location("http://service.canal-plus.com/video/rest/initPlayer/cplus/") def iter_search_results(self, pattern): self.location("http://service.canal-plus.com/video/rest/search/cplus/" + urllib.quote_plus(pattern)) return self.page.iter_results() @id2url(CanalplusVideo.id2url) def get_video(self, url, video=None): self.location(url) return self.page.get_video(video, self.quality)
Below we have a list of questions frequently asked by our customers. Can I meet with you to see the chair covers and sashes? Absolutely, we typically meet with customers at their event locations. This way they will know exactly how the chair covers will look on their big day. We can also let you know when we will be at local venues so you can see our work. Weekends tend to work best for us, but we will work hard to come up with a time convenient for you as well. We have interacted with many banquet managers throughout the area, so if you do not have time to meet with us, please just ask about us! My venue requires that all vendors are fully insured. Are you insured? We are fully insured up $1,000,000, in accordance with most venues requirements. To place an order, please first check with us for availablility. Once availability is confirmed print out our order form (located on our "order placement" tab). An approximate 50% deposit is due at the time of order placement. As soon as we receive your order form and payment we will mail you a copy of the order form and a receipt for your records. I am not sure how many chair covers I will need. What do I write on the order form? We ask that when you fill out the order form you give us a range of chair covers you may need. The lower number should be your best estimate and the higher number should be the maximum number of guests you are inviting. We need this higher number to guarantee availablity for you. It is perfectly fine if you base your approximate 50% deposit on your best estimate. What if the number of chair covers I need changes between the time I order and the event date? We will get your final number from you approximately one week before your event. At this time, we will base your remaining payment on the number of chair covers you need. Therefore, you are only paying for what you are actually using. Do I have to coordinate setup time between your company and my venue? Absolutely not! We will contact your venue 1-2 weeks before your event and determine what setup and tear-down times work best. We will do all we can to setup/teardown the chair covers within the time constraints given to us by venues. The venue must allow us a reasonable period of time to perform setup (at least one hour/100 chair covers for setup in advance of the wedding). Will the chair covers be cleaned and pressed before my event? Our chair covers with be immaculately clean and wrinkle-free! We clean and press every chair cover and sash before each event. The chair covers are kept on hangers after laundering and are transported on hangers to your venue location. Should I order more chair covers than the number of guests I am inviting? It is always recommended to order a few more chair covers than guests you are inviting. We will then setup extra chairs for the venue just in case unexpected guests show up (this happens more than you would think!). Also, please look at your venue's seating chart before ordering. Often venues will put extra chairs at tables. If you know you are having extra chairs setup at your venue (ie.to keep an even number per table), please factor this in when giving us your final count. We always recommend booking as soon as possible. We start to book up for holiday weekends 6 months or more in advance. Also, most summer brides tend to order approximately 5-6 months before their event so please keep this in mind. However, sometimes we do not fully book up for certain dates (even in the summer), so please check with us for availability. We have booked weddings just days before the actual event, so please just ask! Do you ever book up? Yes, due to the time contraints placed on us by venues, we can only book a set number of events in one day. We value our customers and would never jeopordize our work by over-extending ourselves. We know that you are trusting us to decorate your reception flawlessly and we will not disappoint! Our prices include setup, teardown and delivery (to Rocheter and Batavia). The only additional charge will be sales tax which is 8%. We may charge additional travel charges to areas outside of our delivery area. Please do not hesitate to contact us with any additional questions. We would love hearing from you!
# Copyright Cartopy Contributors # # This file is part of Cartopy and is released under the LGPL license. # See COPYING and COPYING.LESSER in the root of the repository for full # licensing details. import numpy as np from numpy.testing import assert_almost_equal import cartopy.crs as ccrs from .helpers import check_proj_params class TestStereographic: def test_default(self): stereo = ccrs.Stereographic() other_args = {'ellps=WGS84', 'lat_0=0.0', 'lon_0=0.0', 'x_0=0.0', 'y_0=0.0'} check_proj_params('stere', stereo, other_args) assert_almost_equal(np.array(stereo.x_limits), [-5e7, 5e7], decimal=4) assert_almost_equal(np.array(stereo.y_limits), [-5e7, 5e7], decimal=4) def test_eccentric_globe(self): globe = ccrs.Globe(semimajor_axis=1000, semiminor_axis=500, ellipse=None) stereo = ccrs.Stereographic(globe=globe) other_args = {'a=1000', 'b=500', 'lat_0=0.0', 'lon_0=0.0', 'x_0=0.0', 'y_0=0.0'} check_proj_params('stere', stereo, other_args) # The limits in this test are sensible values, but are by no means # a "correct" answer - they mean that plotting the crs results in a # reasonable map. assert_almost_equal(np.array(stereo.x_limits), [-7839.27971444, 7839.27971444], decimal=4) assert_almost_equal(np.array(stereo.y_limits), [-3932.82587779, 3932.82587779], decimal=4) def test_true_scale(self): # The "true_scale_latitude" parameter only makes sense for # polar stereographic projections (#339 and #455). # For now only the proj string creation is tested # See test_scale_factor for test on projection. globe = ccrs.Globe(ellipse='sphere') stereo = ccrs.NorthPolarStereo(true_scale_latitude=30, globe=globe) other_args = {'ellps=sphere', 'lat_0=90', 'lon_0=0.0', 'lat_ts=30', 'x_0=0.0', 'y_0=0.0'} check_proj_params('stere', stereo, other_args) def test_scale_factor(self): # See #455 # Use spherical Earth in North Polar Stereographic to check # equivalence between true_scale and scale_factor. # In these conditions a scale factor of 0.75 corresponds exactly to # a standard parallel of 30N. globe = ccrs.Globe(ellipse='sphere') stereo = ccrs.Stereographic(central_latitude=90., scale_factor=0.75, globe=globe) other_args = {'ellps=sphere', 'lat_0=90.0', 'lon_0=0.0', 'k_0=0.75', 'x_0=0.0', 'y_0=0.0'} check_proj_params('stere', stereo, other_args) # Now test projections lon, lat = 10, 10 projected_scale_factor = stereo.transform_point(lon, lat, ccrs.Geodetic()) # should be equivalent to North Polar Stereo with # true_scale_latitude = 30 nstereo = ccrs.NorthPolarStereo(globe=globe, true_scale_latitude=30) projected_true_scale = nstereo.transform_point(lon, lat, ccrs.Geodetic()) assert projected_true_scale == projected_scale_factor def test_eastings(self): stereo = ccrs.Stereographic() stereo_offset = ccrs.Stereographic(false_easting=1234, false_northing=-4321) other_args = {'ellps=WGS84', 'lat_0=0.0', 'lon_0=0.0', 'x_0=1234', 'y_0=-4321'} check_proj_params('stere', stereo_offset, other_args) assert (tuple(np.array(stereo.x_limits) + 1234) == stereo_offset.x_limits)
Happy Halloween everybody. What are you doing for halloween? Are we supposed to be happy on Halloween? Now I’m really scared 🙂 Go play with some candy. 4- Fawned over the small kids dressed at cute animals that came to the door. 5- Ate some candy, but not a lot. Mini chocolate bars were another story entirely…. 6- Talked my way out of a speeding ticket.
#!/usr/bin/env python3 # noinspection PyPackageRequirements from datetime import datetime import pan.xapi import tablib import xmltodict import yaml HEADERS_DEFAULT_MAP = {'rule-type': 'universal', 'negate-source': 'no', 'negate-destination': 'no'} HEADERS_REMOVE = ['option', 'profile-setting', 'disabled', 'log-end', 'log-start', 'category'] HEADERS_ORDER = ['@name', 'action', 'tag', 'rule-type', 'from', 'source', 'negate-source', 'source-user', 'hip-profiles', 'to', 'destination', 'negate-destination', 'application', 'service', 'profile-setting', 'description'] __author__ = 'Jay Shepherd' class Config: def __init__(self, filename): with open(filename, 'r') as stream: config = yaml.load(stream) self.top_domain = config['top_domain'] self.firewall_api_key = config['firewall_api_key'] self.firewall_hostnames = config['firewall_hostnames'] def retrieve_firewall_configuration(hostname, api_key, config='running'): """ This takes the FQDN of the firewall and retrieves the requested config. Defaults to running. :param hostname: Hostname (FQDN) of firewall to retrieve configuration from :param api_key: API key to access firewall configuration ;param config: Which config to retrieve, defaults to running. :return: Dictionary containing firewall configuration """ firewall = pan.xapi.PanXapi(hostname=hostname, api_key=api_key) command = "show config {}".format(config) firewall.op(cmd=command, cmd_xml=True) return xmltodict.parse(firewall.xml_result()) def combine_the_rulebase(pushed_config, running_config): pre_rulebase = safeget(pushed_config, 'policy', 'panorama', 'pre-rulebase', 'security', 'rules', 'entry') device_rulebase = safeget(running_config, 'config', 'devices', 'entry', 'vsys', 'entry', 'rulebase', 'entry') post_rulebase = safeget(pushed_config, 'policy', 'panorama', 'post-rulebase', 'security', 'rules', 'entry') default_rulebase = safeget(pushed_config, 'policy', 'panorama', 'post-rulebase', 'default-security-rules', 'rules', 'entry') # Combine the pre, on-device, and post rule sets into a single ordered view combined_rulebase = pre_rulebase + device_rulebase + post_rulebase + default_rulebase return combined_rulebase def safeget(dct, *keys): """ Takes a dictionary and key path. Checks if key exists and returns value of key :param dct: Dictionary to iterate over :param keys: Keys to iterate over :return: Returns value of key as list if it exists, else returns empty list """ dct_as_list = [] for key in keys: try: dct = dct[key] except (KeyError, TypeError): return list() if isinstance(dct, list): return dct else: dct_as_list.append(dct) return dct_as_list def get_headers(data_dict, preferred_header_order=None, headers_to_remove=None): """ Takes a nested dictionary and returns headers as a unique list. For PanOS the top level of each dictionary database is a entry "ID" field of value xxx. Which then contain additional attributes/keys with values. :param data_dict: Dictionary in format correctly :param preferred_header_order: List of headers. If one or more headers in this list are found in the provided dictionary, they will be returned in the same order they occur in this list. Headers found in the dict but not in this list will be sorted and appended to the end of the list. :param headers_to_remove: Collection of headers which will not appear in the returned list. :return: list of found headers, in an order approximately following the preferred order """ if preferred_header_order is None: preferred_header_order = [] if headers_to_remove is None: headers_to_remove = [] scraped_headers = set() for item in data_dict: for header in item: scraped_headers.add(header) ordered_headers = [] scraped_headers = scraped_headers.difference(set(headers_to_remove)) for header in preferred_header_order: if header in scraped_headers: ordered_headers.append(header) scraped_headers.remove(header) ordered_headers += sorted(list(scraped_headers)) return ordered_headers def check_default(object_to_check, default_key, default_map=None): """ Takes a string_to_check, header, and a default_map table. If string is empty and there is a default_key mapping returns default. :param object_to_check: Python object to check against table, the object type must match the default_key ty :param default_key: :param default_map: :return: """ if object_to_check is '' and default_key in default_map.keys(): return default_map[default_key] return object_to_check def write_to_excel(rule_list, filename, preferred_header_order=None, headers_to_remove=None, default_map=None): # Initialize Tablib Data dataset = tablib.Dataset() # Define headers we would like to include rule_headers = get_headers(rule_list, preferred_header_order, headers_to_remove) dataset.headers = ["Order"] + rule_headers # Add rules to dataset index_num = 0 for rule in rule_list: index_num += 1 formatted_rule = [index_num] for header in rule_headers: cell = rule.get(header, '') if isinstance(cell, dict): cell = cell.get('member', cell) if isinstance(cell, list): combined_cell = '' first_item = True for item in cell: if first_item is True: combined_cell += item first_item = False else: combined_cell += ', {}'.format(item) formatted_rule.append(combined_cell) else: safe_cell = check_default(str(cell), header, default_map) formatted_rule.append(safe_cell) dataset.append(formatted_rule) # Use tablib to write rules with open(filename, mode='wb') as file: file.write(dataset.xlsx) def do_the_things(firewall, api_key, top_domain=''): """ This is the primary meat of the script. It takes a firewall and API key and writes out excel sheets with the rulebase. :param firewall: Firewall to query :param api_key: API key to query ;return: """ # "Zhu Li, do the thing!" # Retrieve both possible configurations from firewall running_config = retrieve_firewall_configuration(firewall, api_key, config='running') pushed_config = retrieve_firewall_configuration(firewall, api_key, config='pushed-shared-policy') # Store objects from config in separate dictionaries. # Use helper functions to achieve. # Safety First address = safeget(pushed_config, 'policy', 'panorama', 'address', 'entry') address_groups = safeget(pushed_config, 'policy', 'panorama', 'address-group', 'entry') combined_rulebase = combine_the_rulebase(pushed_config, running_config) # Define headers we care about being ordered in the order they should be. rulebase_headers_order = HEADERS_ORDER # I'm removing excel columns that I don't want in output based upon stupid stuff. # Perhaps I don't care. # Perhaps the fields just don't work correctly because PaloAlto output refuses any consistency. # Yeah I'm going to go with the latter option. rulebase_headers_remove = HEADERS_REMOVE # Remember that consistency thing... # ... yeah this is to populate the excel fields with known default mappings. # This is for fields I do need to be in output. rulebase_default_map = HEADERS_DEFAULT_MAP # Finally let's write the damn thing write_to_excel( combined_rulebase, get_filename(firewall.strip(top_domain)), rulebase_headers_order, rulebase_headers_remove, rulebase_default_map ) # I should print something to let user know it worked. # Dharma says feedback is important for good coding. print('{} processed. Please check directory for output files.'.format(firewall)) def get_filename(firewall): """ Generate an excel spreadsheet filename from a firewall name and the current time. :param firewall: firewall name :return: A filename in the format YYYY-MM-DD-{firewall}-combined-rules.xlsx """ current_time = datetime.now() return ( "{year}-" "{month}-" "{day}-" "{firewall}-combined-rules" ".xlsx" ).format( firewall=firewall, year=pad_to_two_digits(current_time.year), month=pad_to_two_digits(current_time.month), day=pad_to_two_digits(current_time.day), ) def pad_to_two_digits(n): """ Add leading zeros to format a number as at least two digits :param n: any number :return: The number as a string with at least two digits """ return str(n).zfill(2) def main(): script_config = Config('config.yml') for firewall in script_config.firewall_hostnames: do_the_things(firewall, script_config.firewall_api_key, script_config.top_domain) if __name__ == '__main__': main()
God is the Creator of the heavens and the earth, the Alpha and the Omega, the all-loving, forgiving Father and the Great I Am. With such a big God, how can we understand His many facets? One way is through scripture, where God is described in more than 200 unique ways. Praying the Names of God takes these names and makes them personal through heartfelt prayers accompanied by the scriptures where these names of God are found. Inspired by the beloved classic, The Wonderful Names of Our Wonderful Lord, this brand-new book will lead you to a deeper faith in the one true Lord.
# -*- coding=utf-8 -*- import win32serviceutil import win32service import win32event import os, sys import time import wmi,zlib,json def log(log_string): f=open("c:\\log.txt",'a+') f.write(str(log_string)+"\n\n") f.close() def get_sys_info(): syinfo = {} tmplist = [] encrypt_str = "" c = wmi.WMI () cpu_tmp = [] for cpu in c.Win32_Processor(): #cpu 序列号 cpu_item = {} #print cpu encrypt_str = encrypt_str + cpu.ProcessorId.strip() #print "cpu id:", cpu.ProcessorId.strip() cpu_item['ProcessorId'] = cpu.ProcessorId.strip() cpu_item['Name'] = cpu.Name.strip() cpu_tmp.append(cpu_item) syinfo['cpu'] = cpu_tmp dis_tmp = [] for physical_disk in c.Win32_DiskDrive(): dis_itm = {} dis_itm['Caption'] = physical_disk.Caption.strip() dis_itm['SerialNumber'] = physical_disk.SerialNumber.strip() dis_itm['Size'] = long(physical_disk.Size)/1000/1000/1000 dis_tmp.append(dis_itm) encrypt_str = encrypt_str+physical_disk.SerialNumber.strip() syinfo['disk'] = dis_tmp tmp = {} for board_id in c.Win32_BaseBoard(): #print board_id #主板序列号 tmp['SerialNumber'] = board_id.SerialNumber.strip() tmp['Manufacturer'] = board_id.Manufacturer.strip() encrypt_str = encrypt_str+board_id.SerialNumber.strip() syinfo['board'] = tmp tmp = {} for bios_id in c.Win32_BIOS(): #print bios_id tmp['SerialNumber'] = bios_id.SerialNumber.strip() #bios 序列号 encrypt_str = encrypt_str+bios_id.SerialNumber.strip() syinfo['bios'] = tmp #加密算法 syinfo['encrypt_str'] = zlib.adler32(encrypt_str) return syinfo class test1(win32serviceutil.ServiceFramework): _svc_name_ = "test_python" _svc_display_name_ = "test_python" def __init__(self, args): win32serviceutil.ServiceFramework.__init__(self, args) self.hWaitStop = win32event.CreateEvent(None, 0, 0, None) def SvcStop(self): self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING) win32event.SetEvent(self.hWaitStop) def SvcDoRun(self): syinfo = get_sys_info() while 1: print syinfo syinfo = get_sys_info() log(json.dumps(syinfo)) time.sleep(1) win32event.WaitForSingleObject(self.hWaitStop, win32event.INFINITE) if __name__=='__main__': win32serviceutil.HandleCommandLine(test1)
Three simple ways to utilise triggers to increase emotional aliveness, also known as your natural magnetism, zest for life or the genuine expression of your inner vim. Turn lead to gold! Enjoy! #7MinAudioSeries = all recordings are 7mins or less. A bit like a good mug of the finest hot chocolate, sweet enough, rich enough, and it just hits the spot. Let me know if you agree?
#!/usr/bin/python import sys; import os; import glob; import string; import imp; import shutil; import re; ############################################################### ### set up if len(sys.argv) != 3: print "usage:" print "%s <config file> <output dir>" % sys.argv[0]; sys.exit(1); OutPath = sys.argv[2]; ConfigFile = OutPath + "/" + sys.argv[1]; if os.path.exists(OutPath) == True: shutil.rmtree(OutPath); os.mkdir(OutPath); shutil.copy(sys.argv[1], ConfigFile); imp.load_source('Conf', ConfigFile); import Conf; ImgDir = OutPath + "/" + os.path.basename(Conf.OutImgDir); shutil.copytree(Conf.OutImgDir, ImgDir); ############################################################### ### read img names Pattern = Conf.OutImgDir + "/*.jpg"; print Pattern; ImgList = glob.glob(Pattern); Imgs = {}; for i in ImgList: j = os.path.basename(i).split("_"); if Imgs.has_key(j[0]) == False: Imgs[j[0]] = []; Imgs[j[0]].append(os.path.basename(i)); f = open(OutPath + "/db.html", 'w+'); f.write("<html>\n"); for i, j in Imgs.iteritems(): f.write("Person: " + i + "<br>\n"); for k in j: img = "<img src=\"./SavedImages/%s\">" % k; f.write(img); f.write("<br><br>"); f.write("</html>\n"); f.close(); ############################################################### ### write self test data in html def PlotPNG(InFile, OutFile, Src, Dst, Tmp): TmpFile = open(Tmp, 'w+'); TmpFile.truncate(); TmpFile.seek(0, 0); TmpFile.write("set term png medium\n"); TmpFile.write("set output \'%s/%s\'\n" % (os.path.abspath(Dst), OutFile)); TmpFile.write("set xr[0:6]\n"); TmpFile.write("load \'%s\'\n" % (InFile)); TmpFile.flush(); Cwd = os.getcwd(); os.chdir(Src); os.system("gnuplot %s/%s" % (Cwd, TmpFile.name)); os.chdir(Cwd); TmpFile.close(); def WriteHtml(Id, Protected): PerHtml = open(OutPath + "/Person_" + Id + ".html", 'w+'); PerHtml.write("Self Test for Person: %s<br>" % Id); PerHtml.write("<br><br>") PerHtml.write("<a href=\"./results.html\">HOME</a>"); PerHtml.write("<br><br>") PerHtml.write("<br><br>") Tmp = OutPath + "/tmp.p"; for i in Protected: fDir = Conf.EigenMethod_DataDir + "/SelfTest_" + Id + "/Face_" + i[1]; if os.path.exists(fDir): PerHtml.write("<a href=\"./Face_%s_%s.html\"> Face %s </a><br>" % (Id, i[1], i[1])); FaceHtml = open("%s/Face_%s_%s.html" % (OutPath, Id, i[1]), 'w+'); FaceHtml.write("<a href=\"./Person_%s.html\"> Person %s </a><br>" % (Id, Id) ); FaceHtml.write("<a href=\"./SavedImages/%s\"> Tested Image </a><br>" % (i[0])); FaceHtml.write("<br><br>"); FaceHtml.write("<br><br>"); avgImg = "avg_%s_%s.png" % (Id, i[1]); PlotPNG("person_avg.p", avgImg, fDir, OutPath, Tmp); FaceHtml.write("Average of persons\n"); FaceHtml.write("<br>"); FaceHtml.write("<img src=\"./%s\"></img>" % avgImg); FaceHtml.write("<br>"); avgWithInput = glob.glob(Conf.EigenMethod_DataDir + "/SelfTest_%s/Face_%s/avg_with_input*" % (Id, i[1])); if len(avgWithInput) > 0: avgWithInputImg = "avg_input_%s_%s.png" % (Id, i[1]); #print os.path.basename(avgWithInput[0]); PlotPNG(os.path.basename(avgWithInput[0]), avgWithInputImg, fDir, OutPath, Tmp); FaceHtml.write("Average with Input Image\n"); FaceHtml.write("<br>"); FaceHtml.write("<img src=\"./%s\"></img>" % avgWithInputImg); FaceHtml.write("<br>"); FaceHtml.close(); PerHtml.close(); ############################################################### ### read self test info Pattern = Conf.EigenMethod_DataDir + "/SelfTest_*"; PersonDir = glob.glob(Pattern); ResultsHtml = open(OutPath + "/results.html", 'w+'); ResultsHtml.write('#'*30 + "<br>"); ResultsHtml.write("Results<br>"); ResultsHtml.write('#'*30 + "<br>"); ResultsHtml.write("<br><br>"); ResultsHtml.write("<a href=\"./db.html\">Database</a>"); ResultsHtml.write("<br><br>"); for i in PersonDir: BaseDir = os.path.basename(i); Id = BaseDir.split("_")[1]; f = open(i + "/SelfTest." + Id + ".log"); Protected = []; for line in f: j = re.search("Protected", line); if j != None: k = string.split(string.rstrip(line, '\n'), ":"); k = map(string.rstrip, k); k = map(string.lstrip, k); Protected.append(k[1:]); f.close(); WriteHtml(Id, Protected) ResultsHtml.write("<a href=\"./Person_%s.html\">Person %s</a>" % (Id, Id)); ResultsHtml.write("<br>"); ResultsHtml.close();
Gewurztraminer 2007, from Alsace. It’s nice and spicy, cinnamon-scented. I suggest you do the same…this is going to take some time.
#!/usr/bin/python # Modified from averagesecurityguy # Props to him # https://github.com/averagesecurityguy/ # # Command-line parser taken from from below: # by Konrads Smelkovs (https://github.com/truekonrads) # # merger.py # based off: http://cmikavac.net/2011/07/09/merging-multiple-nessus-scans-python-script/ # by: mastahyeti # # Everything glued together by _sen import requests import json import time import argparse import os import sys import getpass import xml.etree.ElementTree as etree # Hard-coded variables requests.packages.urllib3.disable_warnings() verify = False token = '' parser = argparse.ArgumentParser(description='Download Nesuss results in bulk / Merge Nessus files') parser.add_argument('--url', '-u', type=str, default='localhost', help="url to nessus instance! This or --merge must be specified") parser.add_argument('--format','-F', type=str, default="html", choices=['nessus', 'html'], help='Format of nesuss output, defaults to html') parser.add_argument('-o', '--output', type=str, default=os.getcwd(), help='Output directory') parser.add_argument('-m', '--merge', action='store_true', help='Merge all .nessus files in output directory') parser.add_argument('-e', '--export', action='store_true', help='Export files') parser.add_argument('--folder','-f', type=str, help='Scan Folder from which to download', default=0) args = parser.parse_args() def smart_str(x): if isinstance(x, unicode): return unicode(x).encode("utf-8") elif isinstance(x, int) or isinstance(x, float): return str(x) return x def build_url(resource): nessus_url = "https://"+args.url+":8834" return '{0}{1}'.format(nessus_url, resource) def connect(method, resource, data=None): """ Send a request Send a request to Nessus based on the specified data. If the session token is available add it to the request. Specify the content type as JSON and convert the data to JSON format. """ headers = {'X-Cookie': 'token={0}'.format(token), 'content-type': 'application/json'} data = json.dumps(data) if method == 'POST': r = requests.post(build_url(resource), data=data, headers=headers, verify=verify) elif method == 'PUT': r = requests.put(build_url(resource), data=data, headers=headers, verify=verify) elif method == 'DELETE': r = requests.delete(build_url(resource), data=data, headers=headers, verify=verify) else: r = requests.get(build_url(resource), params=data, headers=headers, verify=verify) # Exit if there is an error. if r.status_code != 200: e = r.json() print e['error'] sys.exit() # When downloading a scan we need the raw contents not the JSON data. if 'download' in resource: return r.content else: return r.json() def login(usr, pwd): """ Login to nessus. """ login = {'username': usr, 'password': pwd} data = connect('POST', '/session', data=login) print data['token'] return data['token'] def logout(): """ Logout of nessus. """ connect('DELETE', '/session') def get_format(): # TODO: Add support for more formats if needed return args.format def get_scans(): """ Get Scans from JSON data """ scans_to_export = {} data = connect('GET', '/scans') all_scans = data['scans'] # Create dictionary mapping scanid:scan_name (This case scan_name = host ip) folder = args.folder for scans in all_scans: if scans['folder_id'] == int(folder): scans_to_export[scans['id']] = smart_str(scans['name']) return scans_to_export def export_status(sid, fid): """ Check export status Check to see if the export is ready for download. """ data = connect('GET', '/scans/{0}/export/{1}/status'.format(sid, fid)) return data['status'] == 'ready' def export(scans): """ Make an export request Request an export of the scan results for the specified scan and historical run. In this case the format is hard coded as nessus but the format can be any one of nessus, html, pdf, csv, or db. Once the request is made, we have to wait for the export to be ready. """ # get format for export and handle POST params export_format = get_format() params = {'format': export_format, 'chapters': 'vuln_by_host'} fids = {} # Create dictionary mapping scan_id:file_id (File ID is used to download the file) for scan_id in scans.keys(): # Attempt to Export scans print "Exporting {0}".format(scans[scan_id]) data = connect('POST', '/scans/{0}/export'.format(scan_id), data=params) fids[scan_id] = data['file'] while export_status(scan_id, fids[scan_id]) is False: time.sleep(5) # Attempt to Download scans print "Downloading {0}".format(scans[scan_id]) data = connect('GET', '/scans/{0}/export/{1}/download'.format(scan_id, fids[scan_id])) scan_name = '{0}.{1}'.format(scans[scan_id],params['format']) scan_name_duplicate = 0 while True: if scan_name in os.listdir(args.output): print "Duplicate Scan Name!" scan_name_duplicate += 1 scan_name = '{0}_{1}.{2}'.format(scans[scan_id], str(scan_name_duplicate), params['format']) else: break print('Saving scan results to {0}.'.format(scan_name)) with open(os.path.join(args.output, scan_name), 'w') as f: f.write(data) print "All Downloads complete! hax0r" def merge(): first = 1 for fileName in os.listdir(args.output): fileName = os.path.join(args.output, fileName) if ".nessus" in fileName: print(":: Parsing", fileName) if first: mainTree = etree.parse(fileName) report = mainTree.find('Report') report.attrib['name'] = 'Merged Report' first = 0 else: tree = etree.parse(fileName) for host in tree.findall('.//ReportHost'): existing_host = report.find(".//ReportHost[@name='"+host.attrib['name']+"']") if not existing_host: print "adding host: " + host.attrib['name'] report.append(host) else: for item in host.findall('ReportItem'): if not existing_host.find("ReportItem[@port='"+ item.attrib['port'] +"'][@pluginID='"+ item.attrib['pluginID'] +"']"): print "adding finding: " + item.attrib['port'] + ":" + item.attrib['pluginID'] existing_host.append(item) print(":: => done.") with open(os.path.join(args.output, "nessus_merged.nessus"), 'w') as merged_file: mainTree.write(merged_file, encoding="utf-8", xml_declaration=True) print "All .nessus files merged to 'nessus_merged.nessus' file in current dir" if __name__ == '__main__': # Download Files if args.export or args.merge: if args.export: # Login username = raw_input("Username: ") password = getpass.getpass("Password: ") print('Logging in....') token = login(username, password) print("Getting scan List....") scans = get_scans() print('Downloading and Exporting Scans...') export(scans) # Merge files if args.merge: merge() else: print parser.format_usage() # removes newline + None when print_usage() is used
Over the last 25 years, Steve's worked on everything from launching the iPhone and Amazon in the UK to projects for the Greek government, NASA, The UN and now heads up the Creative lab at Google in London. Steve Vranakis shares his creative ideas and experimental work running Google's Creative Lab, from how they used machine learning in music to create thousands of never heard before sounds to using artificial intelligence to find great works of art. They developed an invisible interface using conductive thread woven into fabrics to design fashion with function and allowed riders to see through the eyes of a self-driving car to help them gain a better understanding into how they work. Steve's ideas centre on how creativity combined with technology can make for truly innovative interactive experiences.
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: skip-file """Layers for defining NCSNv3. """ from . import layers from . import up_or_down_sampling import flax.nn as nn import jax import jax.numpy as jnp import numpy as np conv1x1 = layers.ddpm_conv1x1 conv3x3 = layers.ddpm_conv3x3 NIN = layers.NIN default_init = layers.default_init class GaussianFourierProjection(nn.Module): """Gaussian Fourier embeddings for noise levels.""" def apply(self, x, embedding_size=256, scale=1.0): W = self.param('W', (embedding_size,), jax.nn.initializers.normal(stddev=scale)) W = jax.lax.stop_gradient(W) x_proj = x[:, None] * W[None, :] * 2 * jnp.pi return jnp.concatenate([jnp.sin(x_proj), jnp.cos(x_proj)], axis=-1) class Combine(nn.Module): """Combine information from skip connections.""" def apply(self, x, y, method='cat'): h = conv1x1(x, y.shape[-1]) if method == 'cat': return jnp.concatenate([h, y], axis=-1) elif method == 'sum': return h + y else: raise ValueError(f'Method {method} not recognized.') class AttnBlockv3(nn.Module): """Channel-wise self-attention block. Modified from DDPM.""" def apply(self, x, normalize, skip_rescale=False, init_scale=0.): B, H, W, C = x.shape h = normalize(x, num_groups=min(x.shape[-1] // 4, 32)) q = NIN(h, C) k = NIN(h, C) v = NIN(h, C) w = jnp.einsum('bhwc,bHWc->bhwHW', q, k) * (int(C) ** (-0.5)) w = jnp.reshape(w, (B, H, W, H * W)) w = jax.nn.softmax(w, axis=-1) w = jnp.reshape(w, (B, H, W, H, W)) h = jnp.einsum('bhwHW,bHWc->bhwc', w, v) h = NIN(h, C, init_scale=init_scale) if not skip_rescale: return x + h else: return (x + h) / np.sqrt(2.) class Upsample(nn.Module): def apply(self, x, out_ch=None, with_conv=False, fir=False, fir_kernel=[1, 3, 3, 1]): B, H, W, C = x.shape out_ch = out_ch if out_ch else C if not fir: h = jax.image.resize(x, (x.shape[0], H * 2, W * 2, C), 'nearest') if with_conv: h = conv3x3(h, out_ch) else: if not with_conv: h = up_or_down_sampling.upsample_2d(x, fir_kernel, factor=2) else: h = up_or_down_sampling.Conv2d( x, out_ch, kernel=3, up=True, resample_kernel=fir_kernel, bias=True, kernel_init=default_init()) assert h.shape == (B, 2 * H, 2 * W, out_ch) return h class Downsample(nn.Module): def apply(self, x, out_ch=None, with_conv=False, fir=False, fir_kernel=[1, 3, 3, 1]): B, H, W, C = x.shape out_ch = out_ch if out_ch else C if not fir: if with_conv: x = conv3x3(x, out_ch, stride=2) else: x = nn.avg_pool(x, window_shape=(2, 2), strides=(2, 2), padding='SAME') else: if not with_conv: x = up_or_down_sampling.downsample_2d(x, fir_kernel, factor=2) else: x = up_or_down_sampling.Conv2d( x, out_ch, kernel=3, down=True, resample_kernel=fir_kernel, bias=True, kernel_init=default_init()) assert x.shape == (B, H // 2, W // 2, out_ch) return x class ResnetBlockDDPMv3(nn.Module): """ResBlock adapted from DDPM.""" def apply(self, x, act, normalize, temb=None, out_ch=None, conv_shortcut=False, dropout=0.1, train=True, skip_rescale=False, init_scale=0.): B, H, W, C = x.shape out_ch = out_ch if out_ch else C h = act(normalize(x, num_groups=min(x.shape[-1] // 4, 32))) h = conv3x3(h, out_ch) # Add bias to each feature map conditioned on the time embedding if temb is not None: h += nn.Dense( act(temb), out_ch, kernel_init=default_init())[:, None, None, :] h = act(normalize(h, num_groups=min(h.shape[-1] // 4, 32))) h = nn.dropout(h, dropout, deterministic=not train) h = conv3x3(h, out_ch, init_scale=init_scale) if C != out_ch: if conv_shortcut: x = conv3x3(x, out_ch) else: x = NIN(x, out_ch) if not skip_rescale: return x + h else: return (x + h) / np.sqrt(2.) class ResnetBlockBigGANv3(nn.Module): """ResBlock adapted from BigGAN.""" def apply(self, x, act, normalize, up=False, down=False, temb=None, out_ch=None, dropout=0.1, fir=False, fir_kernel=[1, 3, 3, 1], train=True, skip_rescale=True, init_scale=0.): B, H, W, C = x.shape out_ch = out_ch if out_ch else C h = act(normalize(x, num_groups=min(x.shape[-1] // 4, 32))) if up: if fir: h = up_or_down_sampling.upsample_2d(h, fir_kernel, factor=2) x = up_or_down_sampling.upsample_2d(x, fir_kernel, factor=2) else: h = up_or_down_sampling.naive_upsample_2d(h, factor=2) x = up_or_down_sampling.naive_upsample_2d(x, factor=2) elif down: if fir: h = up_or_down_sampling.downsample_2d(h, fir_kernel, factor=2) x = up_or_down_sampling.downsample_2d(x, fir_kernel, factor=2) else: h = up_or_down_sampling.naive_downsample_2d(h, factor=2) x = up_or_down_sampling.naive_downsample_2d(x, factor=2) h = conv3x3(h, out_ch) # Add bias to each feature map conditioned on the time embedding if temb is not None: h += nn.Dense( act(temb), out_ch, kernel_init=default_init())[:, None, None, :] h = act(normalize(h, num_groups=min(h.shape[-1] // 4, 32))) h = nn.dropout(h, dropout, deterministic=not train) h = conv3x3(h, out_ch, init_scale=init_scale) if C != out_ch or up or down: x = conv1x1(x, out_ch) if not skip_rescale: return x + h else: return (x + h) / np.sqrt(2.)
Wonderful take on Cheetara by Sydney based artist Chris Wahl. Chris is a commercial artist and illustrator who has worked across a number of diverse formats. From Mad Magazine to Vallejo style fantasy, to dark, brooding horror styles, he transcends into all corners of the imagination. Today we’re releasing three free ThunderCats themed wallpapers. These images were used by Bandai on their website to market the 2011 Classic and New Series toys. Now they can be used to adorn your desktop background! They come in the two most popular screen resolutions. Enjoy! Well the votes are in, and our Facebook Page competition for user submitted Lion-O art pieces has determined the winner. This wonderful piece by resident Staff Artist, Wilycub earned the majority of votes and is a tantalizing celebration of Lion-O crossing over into 18 other favourite cartoon properties. ThunderCats flashback – 1986 Burger King Kids Meal Pack commercial. Who remembers these? Go to the facebook page to upload and like your favourites! This early draft model cel of Lynx-O reveals a possible direction that the animators and character designers were taking his final look. This was more in line with Lion-O’s lotsa legs look. Have you ever wondered if Chris Pratt (Guardians of the Galaxy) would be a good He-Man… well jury is still out but he gave it a great try on Saturday Night live last night. Watch this chuckle worthy skit from last night’s premiere episode hosted by Chris. Today is the last day that US broadcast TV will air Saturday Morning Cartoon programming. And so ends an era.
# -*- coding: utf-8 -*- import networkx as nx ''' ###reduce_t### --> route calculator singleton pattern 1) mpls path calculation 2) end to end route calculation ''' class RouteCalculator(object): # singletone _instance = None def __init__(self): super(RouteCalculator, self).__init__() # { # (dpid,dpid):[[dpid,dpid,dpid],[dpid,dpid,dpid,dpid],...], # (dpid,dpid):[[dpid,dpid,dpid],[dpid,dpid,dpid,dpid],...], # ...} self.path_table = dict() self.pre_path_table = dict() self.route_table = dict() self.pre_route_table = dict() @staticmethod def get_instance(): if not RouteCalculator._instance: RouteCalculator._instance = RouteCalculator() return RouteCalculator._instance def get_path_table(self, matrix, dpids_to_access_port): if matrix: dpids = matrix.keys() g = nx.DiGraph() g.add_nodes_from(dpids) for i in dpids: for j in dpids: if matrix[i][j] == 1: g.add_edge(i,j,weight=1) edge_dpids = [] for each_dpid in dpids_to_access_port: if len(dpids_to_access_port[each_dpid]) != 0:# only for edge_switches edge_dpids.append(each_dpid) return self.__graph_to_path(g, edge_dpids) def __graph_to_path(self, g, edge_dpids): # {(i,j):[i,k,l,j],(i,j):[],...} path_table = dict() for i in edge_dpids: for j in edge_dpids: if i != j: path = [] try: temp = nx.shortest_path(g,i,j) if len(temp) > 4: # 2 path = temp except nx.exception.NetworkXNoPath: pass path_table[(i,j)] = path return path_table def get_route_table(self, matrix, dpids_to_access_port): if matrix: dpids = matrix.keys() g = nx.DiGraph() g.add_nodes_from(dpids) for i in dpids: for j in dpids: if matrix[i][j] == 1: g.add_edge(i,j,weight=1) edge_dpids = [] for each_dpid in dpids_to_access_port: if len(dpids_to_access_port[each_dpid]) != 0: edge_dpids.append(each_dpid) return self.__graph_to_route(g, edge_dpids) def __graph_to_route(self, g, edge_dpids): route_table = dict() for i in edge_dpids: for j in edge_dpids: if i != j: route = [] try: route = nx.shortest_path(g,i,j) except nx.exception.NetworkXNoPath: pass route_table[(i,j)] = route return route_table def get_path(self, src_dpid, dst_dpid): path = None if src_dpid != dst_dpid: path = self.path_table[(src_dpid,dst_dpid)] return path def get_route(self, src_dpid, dst_dpid): route = None if src_dpid != dst_dpid: route = self.route_table[(src_dpid,dst_dpid)] return route #---------------------Print_to_debug------------------------ def show_path_table(self): print "---------------------path_table---------------------" for pair in self.path_table.keys(): print("pair:",pair) for each in self.path_table[pair]: print each, print""
Wooden hand-made round magnet is made in the USA. An Elk with Longs Peak in the background are on the front of the magnet along with the words Rocky Mountain National Park surrounding the picture. Measures 2-1/4″ x 2-1/4″. The magnet is made from sustainable harvested red alder trees. Alder is found in the western United States and is sought after for musical instruments, carving, millwork and furniture. Native Americans used alder to make bowls, tool handles, and other small items.
""" Module provides function argument type and return type validators for @accept(a,b,c) and @returns(x,y,z) function decorators. This simplifies the formalisation all function validation, particularly useful for API interfaces definition. Usage Example: @accepts(int, int) @returns(int) def add(a, b) return a + b add(1,"3") Original Author: Jackson Cooper Article: Validate Python Function Parameter & Return Types with Decorators Published at: http://pythoncentral.io/validate-python-function-parameters-and-return-types-with-decorators/ Published: Tuesday 20th August 2013 Last Updated: Friday 23rd August 2013 """ import functools def accepts(*accepted_arg_types): ''' A decorator to validate the parameter types of a given function. It is passed a tuple of types. eg. (<type 'tuple'>, <type 'int'>) Note: It doesn't do a deep check, for example checking through a tuple of types. The argument passed must only be types. ''' def accept_decorator(validate_function): # Check if the number of arguments to the validator # function is the same as the arguments provided # to the actual function to validate. We don't need # to check if the function to validate has the right # amount of arguments, as Python will do this # automatically (also with a TypeError). @functools.wraps(validate_function) def decorator_wrapper(*function_args, **function_args_dict): if len(accepted_arg_types) is not len(accepted_arg_types): raise InvalidArgumentNumberError(validate_function.__name__) # We're using enumerate to get the index, so we can pass the # argument number with the incorrect type to ArgumentValidationError. for arg_num, (actual_arg, accepted_arg_type) in enumerate(zip(function_args, accepted_arg_types)): if not type(actual_arg) is accepted_arg_type: ord_num = ordinal(arg_num + 1) raise ArgumentValidationError(ord_num, validate_function.__name__, accepted_arg_type) return validate_function(*function_args) return decorator_wrapper return accept_decorator def returns(*accepted_return_type_tuple): ''' Validates the return type. Since there's only ever one return type, this makes life simpler. Along with the accepts() decorator, this also only does a check for the top argument. For example you couldn't check (<type 'tuple'>, <type 'int'>, <type 'str'>). In that case you could only check if it was a tuple. ''' def return_decorator(validate_function): # No return type has been specified. if len(accepted_return_type_tuple) == 0: raise TypeError('You must specify a return type.') @functools.wraps(validate_function) def decorator_wrapper(*function_args): # More than one return type has been specified. if len(accepted_return_type_tuple) > 1: raise TypeError('You must specify one return type.') # Since the decorator receives a tuple of arguments # and the is only ever one object returned, we'll just # grab the first parameter. accepted_return_type = accepted_return_type_tuple[0] # We'll execute the function, and # take a look at the return type. return_value = validate_function(*function_args) return_value_type = type(return_value) if return_value_type is not accepted_return_type: raise InvalidReturnType(return_value_type, validate_function.__name__) return return_value return decorator_wrapper return return_decorator class ArgumentValidationError(ValueError): ''' Raised when the type of an argument to a function is not what it should be. ''' def __init__(self, arg_num, func_name, accepted_arg_type): self.error = 'The {0} argument of {1}() is not a {2}'.format(arg_num, func_name, accepted_arg_type) def __str__(self): return self.error class InvalidArgumentNumberError(ValueError): ''' Raised when the number of arguments supplied to a function is incorrect. Note that this check is only performed from the number of arguments specified in the validate_accept() decorator. If the validate_accept() call is incorrect, it is possible to have a valid function where this will report a false validation. ''' def __init__(self, func_name): self.error = 'Invalid number of arguments for {0}()'.format(func_name) def __str__(self): return self.error class InvalidReturnType(ValueError): ''' As the name implies, the return value is the wrong type. ''' def __init__(self, return_type, func_name): self.error = 'Invalid return type {0} for {1}()'.format(return_type, func_name) def __str__(self): return self.error
Float through the season in the Cloud Top by Thing Thing. Designed to take you from day to night, a round neckline is paired with back lacing detail and elasticised cuffs. Style with jeans and sneakers for a causal fall look. Our model is wearing a size AU 8 top. She usually takes a standard AU 8/Small, is 5’9in (175cm) tall, has a 83cm bust, 80cm hips, and a 60cm waist.
""" This module implements methods for generating random connections between nodes in a graph. Method generate() will create all the necessary connections for the graph: dataset <-> dataset collection system <-> system collection dataset collection <-> collection system collection <-> collection dataset read <-> system input dataset write <-> system output """ from itertools import islice import random class ConnectionGenerator: """ A class to generate random connections between node ids, based on distribution maps. ... Attributes: dataset_count: Integer of how many datasets are in a graph. dataset_count_map: Dictionary int:int that maps number of datasets in collection to count of its collections. system_count: Integer of how many systems are in a graph. system_count_map: Dictionary int:int that maps number of systems in collection to count of system collections. dataset_read_count: Integer of how many dataset reads are in a graph. dataset_write_count: Integer of how many dataset writes are in a graph. system_input_count: Integer of how many system inputs are in a graph. system_output_count: Integer of how many system outputs are in a graph. dataset_read_count_map: Dictionary int:int that maps number of system inputs of dataset read to count of dataset reads. system_input_count_map: Dictionary int:int that maps number of dataset reads by system input to count of system inputs. dataset_write_count_map: Dictionary int:int that maps number of system outputs of dataset write to count of dataset writes. system_output_count_map: Dictionary int:int that maps number of dataset writes by system output to count of system outputs. dataset_collections_conn_collection: Dictionary int:[int] that maps collection id to dataset collection ids. system_collections_conn_collection: Dictionary int:[int] that maps collection id to system collection ids. datasets_conn_collection: Dictionary int:[int] that maps dataset collection id to dataset ids. systems_conn_collection: Dictionary int:[int] that maps system collection id to system ids. dataset_read_conn_systems: Dictionary int:[int] that maps dataset read id to system ids this dataset inputs to. dataset_write_conn_systems: Dictionary int:[int] that maps dataset write id to system ids this dataset outputs from. Methods: get_one_to_many_connections() Creates connections between an element and a group. Each element belongs to one group exactly. get_many_to_many_connections() Creates connections between two groups with many to many relationship. _dataset_to_dataset_collection() Generates dataset - dataset collection connections. _system_to_system_collection() Generates system - system collection connections. _dataset_read_to_system_input() Generates connections between dataset reads and system inputs. _dataset_write_to_system_output() Generates connections between dataset write and system outputs. generate() Generates all the needed connections for data dependency mapping graph. """ def __init__(self, dataset_params, system_params, dataset_to_system_params, collection_params): """ Args: dataset_params: DatasetParams object. system_params: SystemParams object. dataset_to_system_params: DatasetToSystemParams object. collection_params: CollectionParams object. """ self.dataset_count = dataset_params.dataset_count self.dataset_count_map = collection_params.dataset_count_map self.dataset_collection_count = collection_params.dataset_collection_count self.dataset_collection_count_map = collection_params.dataset_collection_count_map self.system_count = system_params.system_count self.system_count_map = collection_params.system_count_map self.system_collection_count = collection_params.system_collection_count self.system_collection_count_map = collection_params.system_collection_count_map self.dataset_read_count = dataset_to_system_params.dataset_read_count self.dataset_write_count = dataset_to_system_params.dataset_write_count self.system_input_count = dataset_to_system_params.system_input_count self.system_output_count = dataset_to_system_params.system_output_count self.dataset_read_count_map = dataset_to_system_params.dataset_read_count_map self.system_input_count_map = dataset_to_system_params.system_input_count_map self.dataset_write_count_map = dataset_to_system_params.dataset_write_count_map self.system_output_count_map = dataset_to_system_params.system_output_count_map self.dataset_collections_conn_collection = {} self.system_collections_conn_collection = {} self.datasets_conn_collection = {} self.systems_conn_collection = {} self.dataset_read_conn_systems = {} self.dataset_write_conn_systems = {} @staticmethod def get_one_to_many_connections(element_count, element_count_map): """Generate group id for each element, based on number of element in group distribution. Args: element_count: Total number of elements. element_count_map: Dictionary int:int that maps element count in a group to number of groups with that count. Returns: Dictionary int:[int] that maps group id to a list of element ids. """ # Create element ids. element_values = list(range(1, element_count + 1)) # Get number of elements for each group id from their count. elements_per_group = [i for i in element_count_map for _ in range(element_count_map[i])] # Randomise element ids and group ids. random.shuffle(element_values) random.shuffle(elements_per_group) # Split element ids into chunks to get connections for each group. group_to_elements = {} last_index = 0 for i in range(len(elements_per_group)): group_to_elements[i + 1] = element_values[last_index:last_index + elements_per_group[i]] last_index += elements_per_group[i] # In case we don't have a full config - assign rest of elements to a last group. if last_index != element_count - 1: group_to_elements[len(elements_per_group)] += element_values[last_index:] return group_to_elements @staticmethod def get_many_to_many_connections(element_1_count, element_2_count, element_1_count_map, element_2_count_map): """Generates random connections between elements of type 1 and type 2 that have many-to-many relationship. Generation is based on element count maps. The output distribution is expected to be exact for most counts, except for large element group outliers. Args: element_1_count: Total number of elements of type 1. element_2_count: Total number of elements of type 2. element_1_count_map: Dictionary int:int that maps element 1 count in element 2 group to number of elements 2. element_2_count_map: Dictionary int:int that maps element 2 count in element 1 group to number of elements 1. Returns: Dictionary that maps group 1 id to a list of group 2 ids. """ # Count zeros for each group. element_1_zeros = element_1_count_map[0] if 0 in element_1_count_map else 0 element_2_zeros = element_2_count_map[0] if 0 in element_2_count_map else 0 # Create element ids. element_1_values = list(range(1, element_1_count - element_1_zeros + 1)) element_2_values = list(range(1, element_2_count - element_2_zeros + 1)) # Get number of elements in each group and remove groups with 0 elements. elements_per_group_1 = [i for i in element_1_count_map for j in range(element_1_count_map[i]) if i != 0] elements_per_group_2 = [i for i in element_2_count_map for j in range(element_2_count_map[i]) if i != 0] element_1_group_counter = {i + 1: elements_per_group_1[i] for i in range(len(elements_per_group_1))} element_2_group_counter = {i + 1: elements_per_group_2[i] for i in range(len(elements_per_group_2))} # Create connection dictionary. element_1_conn_element_2 = {i: set() for i in element_1_values} # Loop until any group runs out of elements. while element_1_values and element_2_values: # Generate a random connection element_1_gen = random.choice(element_1_values) element_2_gen = random.choice(element_2_values) # Check if connection doesn't already exist. if not element_2_gen in element_1_conn_element_2[element_1_gen]: # Add to existing connections and reduce count. element_1_conn_element_2[element_1_gen].add(element_2_gen) element_1_group_counter[element_1_gen] -= 1 element_2_group_counter[element_2_gen] -= 1 # If have all needed number of connections, remove id from possible options. if element_1_group_counter[element_1_gen] == 0: element_1_values.remove(element_1_gen) if element_2_group_counter[element_2_gen] == 0: element_2_values.remove(element_2_gen) # Check if all leftover elements aren't already included in this group. elif set(element_2_values).issubset(element_1_conn_element_2[element_1_gen]): element_1_values.remove(element_1_gen) return element_1_conn_element_2 def _system_collection_to_collection(self): """Generates collection - system collection one to many connections.""" self.system_collections_conn_collection = self.get_one_to_many_connections(self.system_collection_count, self.system_collection_count_map) def _dataset_collection_to_collection(self): """Generates collection - dataset collection one to many connections.""" self.dataset_collections_conn_collection = self.get_one_to_many_connections(self.dataset_collection_count, self.dataset_collection_count_map) def _dataset_to_dataset_collection(self): """Generates dataset collection - dataset one to many connections.""" self.datasets_conn_collection = self.get_one_to_many_connections(self.dataset_count, self.dataset_count_map) def _system_to_system_collection(self): """Generates system collection - system one to many connections.""" self.systems_conn_collection = self.get_one_to_many_connections(self.system_count, self.system_count_map) def _dataset_read_to_system_input(self): """Generates dataset reads and system inputs many to many connections.""" self.dataset_read_conn_systems = self.get_many_to_many_connections(self.dataset_read_count, self.system_input_count, self.dataset_read_count_map, self.system_input_count_map) def _dataset_write_to_system_output(self): """Generates dataset write and system outputs many to many connections.""" self.dataset_write_conn_systems = self.get_many_to_many_connections(self.dataset_write_count, self.system_output_count, self.dataset_write_count_map, self.system_output_count_map) def generate(self): """Generate all connections for a graph.""" self._dataset_collection_to_collection() self._system_collection_to_collection() self._dataset_to_dataset_collection() self._system_to_system_collection() self._dataset_read_to_system_input() self._dataset_write_to_system_output()
A troubled history of armed conflict between the Philippines government and various militant groups has uprooted millions of people from their homes during the past four decades. While a climate of peace has prevailed during the past five years, huge inequities remain between the wealthy and extremely poor. Communities in the Philippines are highly vulnerable to recurring natural disasters, such as typhoons, tsunamis, droughts and floods. In November 2013, Typhoon Haiyan, one of the strongest storms to ever hit land, struck the Philippines, leaving hundreds of thousands of families homeless and without food or safe drinking water. A year later, Typhoon Hagupit threatened the same areas of the Philippines and saw 700,000 people evacuated from their homes. The situation has improved significantly for affected communities, but there is still a long way to go to help families rebuild their lives post-Haiyan for the long-term. It is estimated that full recovery could take up to a decade. We have worked in the Philippines since 2000, helping families affected by natural disasters or displaced by conflict rebuild their lives. When typhoon Haiyan struck, for example, our teams immediately responded. During the first weeks, we sent 12 aircraft with 250 tons of critical emergency supplies to more than half a million people. Treating and preventing malnutrition: we are screening and treating children under five for malnutrition, supporting pregnant and breastfeeding mothers, and strengthening the capacity of government agencies to fight malnutrition. Recovering livelihoods: we are supporting local markets, facilitating cash transfers, and providing vouchers for the most vulnerable families so they can purchase materials for rebuilding their homes and livelihoods. Providing access to safe water and sanitation: we are installing water points, repairing sanitation networks, and training the communities to maintain them. Providing disaster risk management: our interventions always include a disaster prevention component so that families are in a better position to face future disasters.
#! /usr/bin/env python ''' patch_edit.py: PatchEdit major mode Copyright (c) 2010 Bill Gribble <[email protected]> ''' from ..input_mode import InputMode from .autoplace import AutoplaceMode from .selection import SingleSelectionEditMode, MultiSelectionEditMode from ..text_element import TextElement from ..processor_element import ProcessorElement from ..connection_element import ConnectionElement from ..message_element import MessageElement from ..enum_element import EnumElement from ..plot_element import PlotElement from ..slidemeter_element import FaderElement, BarMeterElement, DialElement from ..via_element import SendViaElement, ReceiveViaElement from ..via_element import SendSignalViaElement, ReceiveSignalViaElement from ..button_element import BangButtonElement, ToggleButtonElement, ToggleIndicatorElement class PatchEditMode (InputMode): def __init__(self, window): self.manager = window.input_mgr self.window = window self.autoplace_mode = None self.autoplace_x = None self.autoplace_y = None self.selection_edit_mode = None InputMode.__init__(self, "Edit patch", "Edit") self.bind('ESC', self.window.control_major_mode, "Exit edit mode") self.bind("p", lambda: self.add_element(ProcessorElement), "Add processor box") self.bind("m", lambda: self.add_element(MessageElement), "Add message box") self.bind("n", lambda: self.add_element(EnumElement), "Add number box") self.bind("t", lambda: self.add_element(TextElement), "Add text comment") self.bind("u", lambda: self.add_element(ToggleButtonElement), "Add toggle button") self.bind("g", lambda: self.add_element(BangButtonElement), "Add bang button") self.bind("i", lambda: self.add_element(ToggleIndicatorElement), "Add on/off indicator") self.bind("s", lambda: self.add_element(FaderElement), "Add slider") self.bind("b", lambda: self.add_element(BarMeterElement), "Add bar meter") self.bind("d", lambda: self.add_element(DialElement), "Add dial control") self.bind("x", lambda: self.add_element(PlotElement), "Add X/Y plot") self.bind("v", lambda: self.add_element(SendViaElement), "Add send message via") self.bind("V", lambda: self.add_element(ReceiveViaElement), "Add receive message via") self.bind("A-v", lambda: self.add_element(SendSignalViaElement), "Add send signal via") self.bind("A-V", lambda: self.add_element(ReceiveSignalViaElement), "Add receive signal via") self.bind("C-x", self.cut, "Cut selection to clipboard") self.bind("C-c", self.copy, "Copy selection to clipboard") self.bind("C-v", self.paste, "Paste clipboard to selection") self.bind("C-d", self.duplicate, "Duplicate selection") self.bind("C-n", self.window.layer_new, "Create new layer") self.bind("C-N", self.window.layer_new_scope, "Create new layer in a new scope") self.bind("C-U", self.window.layer_move_up, "Move current layer up") self.bind("C-D", self.window.layer_move_down, "Move current layer down") self.bind("TAB", self.select_next, "Select next element") self.bind("S-TAB", self.select_prev, "Select previous element") self.bind("C-TAB", self.select_mru, "Select most-recent element") self.bind("C-a", self.select_all, "Select all (in this layer)") self.bind("a", self.auto_place_below, "Auto-place below") self.bind("A", self.auto_place_above, "Auto-place above") self.window.add_callback("select", self.selection_changed_cb) self.window.add_callback("unselect", self.selection_changed_cb) def selection_changed_cb(self, obj): if not self.enabled: return False if self.window.selected: self.update_selection_mode() else: self.disable_selection_mode() def add_element(self, factory): self.window.unselect_all() if self.autoplace_mode is None: self.window.add_element(factory) else: dx = factory.style_defaults.get('autoplace-dx', 0) dy = factory.style_defaults.get('autoplace-dy', 0) self.window.add_element(factory, self.autoplace_x + dx, self.autoplace_y + dy) self.manager.disable_minor_mode(self.autoplace_mode) self.autoplace_mode = None self.update_selection_mode() return True def auto_place_below(self): self.autoplace_mode = AutoplaceMode(self.window, callback=self.set_autoplace, initially_below=True) self.manager.enable_minor_mode(self.autoplace_mode) return True def auto_place_above(self): self.autoplace_mode = AutoplaceMode(self.window, callback=self.set_autoplace, initially_below=False) self.manager.enable_minor_mode(self.autoplace_mode) return True def set_autoplace(self, x, y): self.autoplace_x = x self.autoplace_y = y if x is None and y is None: self.manager.disable_minor_mode(self.autoplace_mode) self.autoplace_mode = None return True def select_all(self): self.window.select_all() self.update_selection_mode() def select_next(self): self.window.select_next() self.update_selection_mode() return True def select_prev(self): self.window.select_prev() self.update_selection_mode() return True def select_mru(self): self.window.select_mru() self.update_selection_mode() return True def update_selection_mode(self): if len(self.window.selected) > 1: if isinstance(self.selection_edit_mode, SingleSelectionEditMode): self.manager.disable_minor_mode(self.selection_edit_mode) self.selection_edit_mode = None if not self.selection_edit_mode: self.selection_edit_mode = MultiSelectionEditMode(self.window) self.manager.enable_minor_mode(self.selection_edit_mode) elif len(self.window.selected) == 1: if isinstance(self.selection_edit_mode, MultiSelectionEditMode): self.manager.disable_minor_mode(self.selection_edit_mode) self.selection_edit_mode = None if not self.selection_edit_mode: self.selection_edit_mode = SingleSelectionEditMode(self.window) self.manager.enable_minor_mode(self.selection_edit_mode) return True def disable_selection_mode(self): if self.selection_edit_mode is not None: self.manager.disable_minor_mode(self.selection_edit_mode) self.selection_edit_mode = None return True def enable(self): self.enabled = True self.manager.global_mode.allow_selection_drag = True self.update_selection_mode() def disable(self): self.enabled = False if self.autoplace_mode: self.manager.disable_minor_mode(self.autoplace_mode) self.autoplace_mode = None self.disable_selection_mode() def cut(self): return self.window.clipboard_cut((self.manager.pointer_x, self.manager.pointer_y)) def copy(self): return self.window.clipboard_copy((self.manager.pointer_x, self.manager.pointer_y)) def paste(self): return self.window.clipboard_paste() def duplicate(self): self.window.clipboard_copy((self.manager.pointer_x, self.manager.pointer_y)) return self.window.clipboard_paste()
Burntisland Links The Links was granted by Royal Charter in 1541 to the Burgh of Burntisland by King James V of Scotland. Facilities include pitch and putt in the summer, play area all year round. Burntisland highland games are held on the third Monday in July, and are Scotland’s second oldest games dating back to 1652. A fairground is located on the links between May and August every year.
# Copyright (c) 2015 Ansible, Inc. # All Rights Reserved. # Python import logging from urlparse import urljoin # Django from django.conf import settings from django.db import models from django.utils.text import Truncator from django.utils.translation import ugettext_lazy as _ from django.core.exceptions import ValidationError # AWX from awx.api.versioning import reverse from awx.main.models.base import * # noqa from awx.main.models.events import AdHocCommandEvent from awx.main.models.unified_jobs import * # noqa from awx.main.models.notifications import JobNotificationMixin, NotificationTemplate logger = logging.getLogger('awx.main.models.ad_hoc_commands') __all__ = ['AdHocCommand'] class AdHocCommand(UnifiedJob, JobNotificationMixin): class Meta(object): app_label = 'main' diff_mode = models.BooleanField( default=False, ) job_type = models.CharField( max_length=64, choices=AD_HOC_JOB_TYPE_CHOICES, default='run', ) inventory = models.ForeignKey( 'Inventory', related_name='ad_hoc_commands', null=True, on_delete=models.SET_NULL, ) limit = models.CharField( max_length=1024, blank=True, default='', ) credential = models.ForeignKey( 'Credential', related_name='ad_hoc_commands', null=True, default=None, on_delete=models.SET_NULL, ) module_name = models.CharField( max_length=1024, default='', blank=True, ) module_args = models.TextField( blank=True, default='', ) forks = models.PositiveIntegerField( blank=True, default=0, ) verbosity = models.PositiveIntegerField( choices=VERBOSITY_CHOICES, blank=True, default=0, ) become_enabled = models.BooleanField( default=False, ) hosts = models.ManyToManyField( 'Host', related_name='ad_hoc_commands', editable=False, through='AdHocCommandEvent', ) extra_vars = prevent_search(models.TextField( blank=True, default='', )) extra_vars_dict = VarsDictProperty('extra_vars', True) def clean_inventory(self): inv = self.inventory if not inv: raise ValidationError(_('No valid inventory.')) return inv def clean_credential(self): cred = self.credential if cred and cred.kind != 'ssh': raise ValidationError( _('You must provide a machine / SSH credential.'), ) return cred def clean_limit(self): # FIXME: Future feature - check if no hosts would match and reject the # command, instead of having to run it to find out. return self.limit def clean_module_name(self): if type(self.module_name) not in (str, unicode): raise ValidationError(_("Invalid type for ad hoc command")) module_name = self.module_name.strip() or 'command' if module_name not in settings.AD_HOC_COMMANDS: raise ValidationError(_('Unsupported module for ad hoc commands.')) return module_name def clean_module_args(self): if type(self.module_args) not in (str, unicode): raise ValidationError(_("Invalid type for ad hoc command")) module_args = self.module_args if self.module_name in ('command', 'shell') and not module_args: raise ValidationError(_('No argument passed to %s module.') % self.module_name) return module_args @property def event_class(self): return AdHocCommandEvent @property def passwords_needed_to_start(self): '''Return list of password field names needed to start the job.''' if self.credential: return self.credential.passwords_needed else: return [] def _get_parent_field_name(self): return '' @classmethod def _get_task_class(cls): from awx.main.tasks import RunAdHocCommand return RunAdHocCommand @classmethod def supports_isolation(cls): return True def get_absolute_url(self, request=None): return reverse('api:ad_hoc_command_detail', kwargs={'pk': self.pk}, request=request) def get_ui_url(self): return urljoin(settings.TOWER_URL_BASE, "/#/jobs/command/{}".format(self.pk)) @property def notification_templates(self): all_orgs = set() for h in self.hosts.all(): all_orgs.add(h.inventory.organization) active_templates = dict(error=set(), success=set(), any=set()) base_notification_templates = NotificationTemplate.objects for org in all_orgs: for templ in base_notification_templates.filter(organization_notification_templates_for_errors=org): active_templates['error'].add(templ) for templ in base_notification_templates.filter(organization_notification_templates_for_success=org): active_templates['success'].add(templ) for templ in base_notification_templates.filter(organization_notification_templates_for_any=org): active_templates['any'].add(templ) active_templates['error'] = list(active_templates['error']) active_templates['any'] = list(active_templates['any']) active_templates['success'] = list(active_templates['success']) return active_templates def get_passwords_needed_to_start(self): return self.passwords_needed_to_start @property def task_impact(self): # NOTE: We sorta have to assume the host count matches and that forks default to 5 from awx.main.models.inventory import Host count_hosts = Host.objects.filter( enabled=True, inventory__ad_hoc_commands__pk=self.pk).count() return min(count_hosts, 5 if self.forks == 0 else self.forks) + 1 def copy(self): data = {} for field in ('job_type', 'inventory_id', 'limit', 'credential_id', 'module_name', 'module_args', 'forks', 'verbosity', 'extra_vars', 'become_enabled', 'diff_mode'): data[field] = getattr(self, field) return AdHocCommand.objects.create(**data) def save(self, *args, **kwargs): update_fields = kwargs.get('update_fields', []) if not self.name: self.name = Truncator(u': '.join(filter(None, (self.module_name, self.module_args)))).chars(512) if 'name' not in update_fields: update_fields.append('name') super(AdHocCommand, self).save(*args, **kwargs) @property def preferred_instance_groups(self): if self.inventory is not None and self.inventory.organization is not None: organization_groups = [x for x in self.inventory.organization.instance_groups.all()] else: organization_groups = [] if self.inventory is not None: inventory_groups = [x for x in self.inventory.instance_groups.all()] else: inventory_groups = [] selected_groups = inventory_groups + organization_groups if not selected_groups: return self.global_instance_groups return selected_groups ''' JobNotificationMixin ''' def get_notification_templates(self): return self.notification_templates def get_notification_friendly_name(self): return "AdHoc Command"
The transmitter is designed for the transformation of an RTD Pt100 sensor signal. Various measurement ranges are configurable. The standardized output signal used for process measurement is a 4 to 20 mA signal. This means a fast, easy and cost-saving temperature measuring as well as reliable and precise measured values for for a wide range of non-ex industry applications.