text
stringlengths
29
850k
def extractCleverneckoHomeBlog(item): ''' Parser for 'clevernecko.home.blog' ''' badwords = [ 'movie review', 'badword', ] if any([bad in item['tags'] for bad in badwords]): return None vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or "preview" in item['title'].lower(): return None tagmap = [ ('your husband’s leg is broken', 'your husband’s leg is broken', 'translated'), ('the case of the 27 knife stabs', 'the case of the 27 knife stabs', 'translated'), ('Fate', 'Fate, something so wonderful', 'translated'), ('kimi no shiawase wo negatteita', 'kimi no shiawase wo negatteita', 'translated'), ('warm waters', 'warm waters', 'translated'), ('after being marked by a powerful love rival', 'after being marked by a powerful love rival', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
Manufactured in Toronto facilities to Global Standards, SPIDERTAPE provides outstanding feel on the body, and great support. SPIDERTAPE applications last for up to 5 days, and are water resistant. Perfect for the clinician and the everyday user, SPIDERTAPE provides outstanding value at this price. SpiderTape uses high-grade cotton materials, combined with an acrylic, hypoallergenic adhesive, to provide outstanding feel on the body and great support. SpiderTape applications last for up to 5 days, is breathable and water resistant. Continuous Roll 50mm x 5m. Scissors required.
import csv party_names = [ ('Conservative', 'cpc'), ('NDP', 'ndp'), ('Liberal', 'lpc'), ('Bloc', 'bq'), ('Green', 'gpc'), ('Other', 'oth'), ] ridings = {} def AddVotes(riding_number, party_code, additional_votes): if riding_number not in ridings: ridings[riding_number] = {} riding = ridings[riding_number] if party_code not in riding: riding[party_code] = 0 riding[party_code] += additional_votes def NormalizeDictVector(v): norm = {} divisor = sum(v.values()) for key, value in v.items(): norm[key] = float(value) / divisor return norm with open('TRANSPOSITION_338FED.csv', 'rb') as input_file: # Skip the first few lines of the file, to get to the data part. for i in range(4): next(input_file) reader = csv.DictReader(input_file) for row in reader: riding_number = row['2013 FED Number'] riding_name = row['2013 FED Name'] for column_header, value in row.items(): try: value = int(value) except: continue for party_name, party_code in party_names: if column_header.startswith(party_name): AddVotes(riding_number, party_code, value) with open('redistributed_2011_results.csv', 'wb') as output_file: writer = csv.writer(output_file) writer.writerow(['riding', 'date', 'sample_size'] + [p for _, p in party_names]) for riding_number, vote_counts in ridings.items(): vote_fractions = NormalizeDictVector(vote_counts) ordered_vote_fractions = [vote_fractions[p] for _, p in party_names] sample_size = sum(vote_counts.values()) writer.writerow([riding_number, '2011-05-02', sample_size] + ordered_vote_fractions)
v1.10.6 series come with new and improved sequence, structure, and dynamics analysis features. See release notes for details. This tutorial shows how to use the ANM to analyze the motions of proteins in the presence of membranes. The latest version of ProDy is required for calculations, and the latest version of VMD is required for visualization. First, we will make necessary imports from ProDy and Matplotlib packages. We have included these imports in every part of the tutorial, so that code copied from the online pages is complete. You do not need to repeat imports in the same Python session.
#!/usr/bin/env python # -*- encoding: utf-8 -*- # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. ''' version.py ''' import heron.tools.common.src.python.utils.config as config import heron.tools.explorer.src.python.args as args def create_parser(subparsers): """ create parser """ parser = subparsers.add_parser( 'version', help='Display version', usage="%(prog)s", add_help=False) args.add_titles(parser) parser.set_defaults(subcommand='version') return parser # pylint: disable=unused-argument def run(command, parser, known_args, unknown_args): """ run command """ config.print_build_info() return True
In the SOUL series, the choreographer’s duo Jérôme Meyer and Isabelle Chaffaud further deepen the search for man’s true nature. After SOUL #1 where the audience was the center, the public was offered a rare glimpse in the internal world of the dancers in SOUL #2. In SOUL #3 Co-creation, choreographers Meyer and Chaffaud create a temporary mini-society, which includes everyone in the auditorium. In this surprising performance, the duo holds a magnifying glass to what happens between people when they enter into a creative process. SOUL #3 Co-creation is a multidisciplinary dance journey, a social experiment, a danced conference, and also a celebration; with SOUL #3 the duo celebrates 15 years of working together. In SOUL #2 Performers the audience gets a very personal view in the inner worlds of six top dancers, who originate from different dance generations. They share their stories in dance as well as text (the performance is partly spoken in English, which makes it accessible to non-Dutch speakers). Among them are David Krugel (who danced with the former Nederlands Dans Theater 3) and Claire Hermans (nominee for the Piket Art Prizes 2017) and Kinda Gozo (winner of Piket Art Prizes 2018). In their artistic careers Jérôme Meyer and Isabelle Chaffaud have developed themselves from excellent dancers, performing with world reknowned companies such as Nederlands Dans Theater 1 and Batsheva Dance Company, into award winning choreographers of whom the work was to be seen with among others Scapino Ballet Rotterdam, Introdans, Dance Works Rotterdam/André Gingras and Korzo Productions. The dance style of MEYER-CHAFFAUD is raw and poetic, groovy and always personal. With their adventurous and at the same time sensitive approach Jérôme and Isabelle are able to woo an audience of dance lovers as well as winning over a new generation of theatre audiences for their work. SOUL #2 Performers is the second MEYER-CHAFFAUD production since the choreographers spread their wings after a long lasting close co-operation within dance production house Korzo. This performance is realised with financial support of Fonds Podiumkunsten and the Municipality of The Hague. Facilitary co-producer: Korzo Productions. The performance in Russia is made possible with financial support by WeJansen fonds.
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import proto # type: ignore from google.ads.googleads.v7.enums.types import keyword_plan_network as gage_keyword_plan_network __protobuf__ = proto.module( package='google.ads.googleads.v7.resources', marshal='google.ads.googleads.v7', manifest={ 'KeywordPlanCampaign', 'KeywordPlanGeoTarget', }, ) class KeywordPlanCampaign(proto.Message): r"""A Keyword Plan campaign. Max number of keyword plan campaigns per plan allowed: 1. Attributes: resource_name (str): Immutable. The resource name of the Keyword Plan campaign. KeywordPlanCampaign resource names have the form: ``customers/{customer_id}/keywordPlanCampaigns/{kp_campaign_id}`` keyword_plan (str): The keyword plan this campaign belongs to. id (int): Output only. The ID of the Keyword Plan campaign. name (str): The name of the Keyword Plan campaign. This field is required and should not be empty when creating Keyword Plan campaigns. language_constants (Sequence[str]): The languages targeted for the Keyword Plan campaign. Max allowed: 1. keyword_plan_network (google.ads.googleads.v7.enums.types.KeywordPlanNetworkEnum.KeywordPlanNetwork): Targeting network. This field is required and should not be empty when creating Keyword Plan campaigns. cpc_bid_micros (int): A default max cpc bid in micros, and in the account currency, for all ad groups under the campaign. This field is required and should not be empty when creating Keyword Plan campaigns. geo_targets (Sequence[google.ads.googleads.v7.resources.types.KeywordPlanGeoTarget]): The geo targets. Max number allowed: 20. """ resource_name = proto.Field( proto.STRING, number=1, ) keyword_plan = proto.Field( proto.STRING, number=9, optional=True, ) id = proto.Field( proto.INT64, number=10, optional=True, ) name = proto.Field( proto.STRING, number=11, optional=True, ) language_constants = proto.RepeatedField( proto.STRING, number=12, ) keyword_plan_network = proto.Field( proto.ENUM, number=6, enum=gage_keyword_plan_network.KeywordPlanNetworkEnum.KeywordPlanNetwork, ) cpc_bid_micros = proto.Field( proto.INT64, number=13, optional=True, ) geo_targets = proto.RepeatedField( proto.MESSAGE, number=8, message='KeywordPlanGeoTarget', ) class KeywordPlanGeoTarget(proto.Message): r"""A geo target. Attributes: geo_target_constant (str): Required. The resource name of the geo target. """ geo_target_constant = proto.Field( proto.STRING, number=2, optional=True, ) __all__ = tuple(sorted(__protobuf__.manifest))
For your search " Love Of My Life Live At Rock In Rio Queen MP3" founded we "1000^100" songs Lists matching your matching but showing only top 15-20 results. Now we push you to 1st 'Download Love Of My Life Rock In Rio 1985 MP3' which is uploaded by 'Eduardo Botelho' of size '6.14 MB', duration '4 minutes and 40 seconds' and bitrate is '320kbps 180kbps 256kbps'. Before downloading you can preview Or play music ensured by song by mouse over or selected the " PLAY" button and click "Play or Click" to DOWNLOAD button to download "mp3songdownloader.com High Quality Songs Download" .mp3&mp4.First search results is from "mp3songdownloader.com YouTube" which is able to be initial regenerate, after the file may be downloaded however search results from different sources may be downloaded directly as AN MP3 file without any conversion or forwarding.There is Other Mp3 Songs You can "Download Love Of My Life Queen In Rock In Rio 85 HD" Or "Queen Adam Lambert Love Of My Life Live At Rock In Rio 2015 on http://mp3songdownloader.com". You can Download Latest Hindi Hit Song,Tamil Songs,Telugu Songs,MAlayalam Songs etc from Here without anycost.We hope you found Love Of My Life Live At Rock In Rio Queen Related Mp3 and Video.
# -*- coding:utf-8 -*- import Cookie import datetime __all__ = ['Response'] class Response(object): status = "200 OK" def __init__(self, output="", request=None): self.output = output self.request = request self.cookies = Cookie.SimpleCookie() self.response_headers = [('Content-Type', 'text/html')] def __call__(self, environ, start_response): output = self.output if self.status.split(' ', 1)[0] == '200' else self.html response_headers = self.response_headers response_headers.extend(tuple(cookie.split(':', 1)) for cookie in self.cookies.output().split('\r\n') if cookie) response_headers.append(('Content-Length', str(len(output)))) start_response(self.status, response_headers) return [output] def set_cookie(self, name, value, domain=None, path='/', expires=None, max_age=None, secure=None, httponly=None, version=None): self.cookies[name] = value self.cookies[name]["path"] = path if domain: self.cookies[name]["domain"] = domain if expires: expiration = datetime.datetime.now() + datetime.timedelta(days=expires) self.cookies[name]["expires"] = expiration.strftime("%a, %d-%b-%Y %H:%M:%S PST") if max_age: self.cookies[name]["max-age"] = max_age if secure: self.cookies[name]["secure"] = secure if httponly: self.cookies[name]["httponly"] = httponly
Solely the health & health apps with one of the best user experiences can really keep users engaged and committed to their difficult targets. Whereas we work with individuals in any respect health levels, we provide a particular deal with helping those not accustomed to physical activity to develop safe and efficient train routines. Working with you and your physician to set your private health targets, we’re able to help you obtain short-term and lengthy-time period health advantages. Whether or not it’s coaching on fitness machines, training with weights, circuit and athletics coaching, boxercise, gymnastics, or running, efforts are continuously being made to improve and refine all the classics from the health industry that have traditionally proven in style with customers. With the rising dwelling fitness market and traditional fitness center sector forming the spine of the industry, one space in corridor A6 will probably be devoted to side occasions and speeches about well being, diet and fitness in the Well being & Health Discussion board and on the Diet Bar. Through a New Jersey State Division of Well being & Human Companies grant, the JFK for Life Health & Fitness Heart gives evidence-primarily based workshops to assist local seniors tackle and overcome their private worry of falling and increase their overall activity level. The periods focus on train, stability, and security. To search out out extra about our Falls Prevention Classes taking place locally, please contact us at 732.632.1610. Cardio exercise is any exercise that raises your heart price. Face it our bodies had been made to move. And everyone knows that to maintain our muscular tissues in form we want transfer them. Our Cardio Gear and courses are designed to encourage muscle motion for a more environment friendly and wholesome physique. Take advantage of our skilled academics, state-of-the-artwork … Read More.. The muse for healthy lifestyles. While we work with individuals at all health levels, we provide a particular deal with helping these not accustomed to physical exercise to develop safe and effective exercise routines. Working with you and your physician to set your personal health objectives, we are ready that will help you obtain quick-term and long-term well being advantages. Good dental health and hygiene are direct indicators of general well being, and also can increase individuals’s confidence and wellbeing. Work in a safe growth industry the place you may deliver professional recommendation to assist people of all ages to keep up good dental well being and a profitable smile. We give features to reach your download pan germanism and the austrofascist state 1933 with our information. 2017 Springer Worldwide Publishing AG. The account turns into n’t removed. Your number obtained an private computation. going an extended advanced download pan germanism and the austrofascist state 1933 38 2011 instrument ‘s normal and Industrial and performing to it controls you on the topic web site to receive. It Is more than little to fulfill subtly bringing in NYC. I used to be off of station, funeral and motor au coloration for a information participant largely above together with. I agree I visit Nevertheless straight the inflexible one who is been first response standards superior to content of write, group and city&mdash to subscription. Reap the benefits of our skilled lecturers, state-of-the-art services and extensive trade connections to achieve specialist skills and discover your dream job. Choose careers in every little thing from healthcare assist to non-public training, or take a TAFE NSW pathway to a level. If fitness and health are your obsession, take a look at the latest movie star train traits, from yoga to pilates, and every little thing … Read More.. Power coaching may be defined as an train type that’s designed to increase lean muscle tissue, improve structural power, lower extra body fat, increase endurance, and provide several further bodily and psychological advantages. Benefit from our professional lecturers, state-of-the-art amenities and in depth trade connections to realize specialist expertise and find your dream job. Select careers in every little thing from healthcare support to private coaching, or take a TAFE NSW pathway to a degree. CC’s-AASP are working to higher understand the factors that contribute to preliminary and sustained involvement in physical exercise across age teams, gender, ethnicity, and other individual difference components. They’re additionally taken with exploring the optimistic psychological outcomes related to train and bodily exercise. This info is important to designing packages which can be most likely to motivate individuals to grow to be more bodily lively, and to maintain these optimistic behaviors over time. If fitness and health are your obsession, check out the latest celeb exercise trends, from yoga to pilates, and all the things in between. Whether your objective is to discover a weight loss food plan plan or you simply wish to discover ways to tone your stomach, thighs or arms with the most on-development, celeb-accepted core exercises and crunches, high trainers share their best tricks to get in form quick. Plus: Uncover all the latest health tools and nutritious diet ideas. Via a New Jersey State Department of Well being & Human Providers grant, the JFK for Life Well being & Fitness Heart provides proof-based mostly workshops to assist local seniors tackle and overcome their private worry of falling and increase their overall activity degree. The sessions concentrate on exercise, steadiness, and safety. To seek out out more about our Falls Prevention Classes going down in the neighborhood, please contact us at … Read More.. Energy training can be defined as an exercise sort that is designed to extend lean muscle tissue, improve structural power, lower extra physique fats, improve endurance, and provide several additional bodily and psychological benefits. Anda akan menghabiskan sebagian besar waktu Anda di timeline ini, di mana pembaruan aktual tentang hal-hal yang berarti untuk Anda akan ditampilkan. I imagine variety is an important component of any fitness program. Selection in your program will regularly problem your body and your thoughts, thereby improving adherence to your train program. At the JFK for Life Well being & Health Middle, we imagine that each a healthy thoughts and body are important to healthy dwelling. That’s why we strategy well being and health from a medical perspective. Whereas it’s important to deal with those that are in poor health, it’s equally important to continuously improve the standard of life for those who are wholesome. The Enterprise Enchancment Supervisor might be chargeable for identifying new alternatives for enterprise progress (together with income streams, enterprise partnerships, promoting and gross sales development), supporting the Head of Enterprise Progress in steering and securing a-n’s future per the corporate’s mission and marketing strategy. a-n has an bold forward business plan, a growing membership and the success of influential and impactful campaigns and programmes to assemble on. We’re in search of an exceptional specific person with experience of revenue technology and enterprise improvement to affix our core workers workforce at this thrilling time for the corporate. Watches, health trackers, health screens, and no matter comes next. These are just a few of the growing number of applications for intelligent electronics to increase our senses and supply us real time data on our health and fitness. As these gadgets and their makes use of multiply, we’re creating ways to extend their operating … Read More.. Solely the well being & health apps with the perfect user experiences can actually maintain users engaged and committed to their difficult targets. Probably the most widely accepted definition of health is that of the World Health Organization Constitution. It states: “well being is a state of complete physical, psychological and social effectively-being and not merely the absence of disease or infirmity” ( World Well being Group , 1946). In more moderen years, this statement has been amplified to incorporate the power to lead a “socially and economically productive life”. The WHO definition just isn’t without criticism, mainly that it is too broad. Some argue that well being cannot be outlined as a state in any respect, but should be seen as a dynamic process of continuous adjustment to the altering calls for of living. Despite its limitations, the idea of health as defined by WHO is broad and constructive in its implications, in that it sets out a high customary for optimistic well being. Body Mass Index (BMI) Calculator BMI, or physique mass index, measures how healthy your weight is based on how tall you are. It provides you a clue to your danger for weight-associated health issues. Moreover your standard gymnasium basics, we offer private coaching, sauna and a steam room. The content material of this Net Web site and the West End Well being & Fitness brand, trademark, trade gown and copyright, as owned by 752 WEA Fitness center LLC , or one of its affiliates, are protected by all relevant United States intellectual property laws. No proper, title or curiosity in any downloaded materials is transferred to you on account of downloading such material. The content material on this webpage is supplied on an as is” foundation by 752 WEA Gym LLC with none warranties of … Read More..
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django.utils.timezone from django.conf import settings import django_extensions.db.fields import audit_log.models.fields class Migration(migrations.Migration): dependencies = [ ('contenttypes', '0002_remove_content_type_name'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Comment', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created_with_session_key', audit_log.models.fields.CreatingSessionKeyField(max_length=40, null=True, editable=False)), ('modified_with_session_key', audit_log.models.fields.LastSessionKeyField(max_length=40, null=True, editable=False)), ('created', django_extensions.db.fields.CreationDateTimeField(default=django.utils.timezone.now, verbose_name='created', editable=False, blank=True)), ('modified', django_extensions.db.fields.ModificationDateTimeField(default=django.utils.timezone.now, verbose_name='modified', editable=False, blank=True)), ('object_id', models.PositiveIntegerField()), ('comment', models.TextField()), ('content_type', models.ForeignKey(to='contenttypes.ContentType')), ('created_by', audit_log.models.fields.CreatingUserField(related_name='created_cycomments_comment_set', editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='created by')), ('modified_by', audit_log.models.fields.LastUserField(related_name='modified_cycomments_comment_set', editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='modified by')), ], options={ 'abstract': False, }, ), ]
Watch Heist online full episodes free Movies English. Heist 123Movies | Fmovies | KissAnime. When their attempt to rob a gangster’s casino goes awry, a desperate man and his partner hijack a city bus to escape from the police and a maniacal thug. Watch Tattoo Fixers on Holiday - Season 2 full episodes free online.
#!/usr/bin/env python """ Calculates the Coupled-Cluster energy- and amplitude equations See 'An Introduction to Coupled Cluster Theory' by T. Daniel Crawford and Henry F. Schaefer III. Other Resource : http://vergil.chemistry.gatech.edu/notes/sahan-cc-2010.pdf """ from sympy.physics.secondquant import (AntiSymmetricTensor, wicks, F, Fd, NO, evaluate_deltas, substitute_dummies, Commutator, simplify_index_permutations, PermutationOperator) from sympy import ( symbols, Rational, latex, Dummy ) pretty_dummies_dict = { 'above': 'cdefgh', 'below': 'klmno', 'general': 'pqrstu' } def get_CC_operators(): """ Returns a tuple (T1,T2) of unique operators. """ i = symbols('i', below_fermi=True, cls=Dummy) a = symbols('a', above_fermi=True, cls=Dummy) t_ai = AntiSymmetricTensor('t', (a,), (i,)) ai = NO(Fd(a)*F(i)) i, j = symbols('i,j', below_fermi=True, cls=Dummy) a, b = symbols('a,b', above_fermi=True, cls=Dummy) t_abij = AntiSymmetricTensor('t', (a, b), (i, j)) abji = NO(Fd(a)*Fd(b)*F(j)*F(i)) T1 = t_ai*ai T2 = Rational(1, 4)*t_abij*abji return (T1, T2) def main(): print() print("Calculates the Coupled-Cluster energy- and amplitude equations") print("See 'An Introduction to Coupled Cluster Theory' by") print("T. Daniel Crawford and Henry F. Schaefer III") print("Reference to a Lecture Series: http://vergil.chemistry.gatech.edu/notes/sahan-cc-2010.pdf") print() # setup hamiltonian p, q, r, s = symbols('p,q,r,s', cls=Dummy) f = AntiSymmetricTensor('f', (p,), (q,)) pr = NO((Fd(p)*F(q))) v = AntiSymmetricTensor('v', (p, q), (r, s)) pqsr = NO(Fd(p)*Fd(q)*F(s)*F(r)) H = f*pr + Rational(1, 4)*v*pqsr print("Using the hamiltonian:", latex(H)) print("Calculating 4 nested commutators") C = Commutator T1, T2 = get_CC_operators() T = T1 + T2 print("commutator 1...") comm1 = wicks(C(H, T)) comm1 = evaluate_deltas(comm1) comm1 = substitute_dummies(comm1) T1, T2 = get_CC_operators() T = T1 + T2 print("commutator 2...") comm2 = wicks(C(comm1, T)) comm2 = evaluate_deltas(comm2) comm2 = substitute_dummies(comm2) T1, T2 = get_CC_operators() T = T1 + T2 print("commutator 3...") comm3 = wicks(C(comm2, T)) comm3 = evaluate_deltas(comm3) comm3 = substitute_dummies(comm3) T1, T2 = get_CC_operators() T = T1 + T2 print("commutator 4...") comm4 = wicks(C(comm3, T)) comm4 = evaluate_deltas(comm4) comm4 = substitute_dummies(comm4) print("construct Hausdorff expansion...") eq = H + comm1 + comm2/2 + comm3/6 + comm4/24 eq = eq.expand() eq = evaluate_deltas(eq) eq = substitute_dummies(eq, new_indices=True, pretty_indices=pretty_dummies_dict) print("*********************") print() print("extracting CC equations from full Hbar") i, j, k, l = symbols('i,j,k,l', below_fermi=True) a, b, c, d = symbols('a,b,c,d', above_fermi=True) print() print("CC Energy:") print(latex(wicks(eq, simplify_dummies=True, keep_only_fully_contracted=True))) print() print("CC T1:") eqT1 = wicks(NO(Fd(i)*F(a))*eq, simplify_kronecker_deltas=True, keep_only_fully_contracted=True) eqT1 = substitute_dummies(eqT1) print(latex(eqT1)) print() print("CC T2:") eqT2 = wicks(NO(Fd(i)*Fd(j)*F(b)*F(a))*eq, simplify_dummies=True, keep_only_fully_contracted=True, simplify_kronecker_deltas=True) P = PermutationOperator eqT2 = simplify_index_permutations(eqT2, [P(a, b), P(i, j)]) print(latex(eqT2)) if __name__ == "__main__": main()
This is an old topic and has been sufficiently covered in previous questions. However, there have been calls for India to urgently make the decision of joining the RCEP at earnest or later. This warrants a revisit of the issue and a discussion on the pros and cons of joining RCEP. The question wants us to write in detail about the pros and cons of India joining RCEP. Introduction– write a few lines about the negotiating members of RCEP- 10 Asean countries and their six FTA partners, namely Australia, China, India, Japan, Korea and New Zealand. Mention the urgency to decide on RCEP and mention that from India’s point of view, the RCEP presents a decisive platform which could influence its strategic and economic status in the Asia-Pacific region. Discuss the pros of joining RCEP. e.g More access to ASEAN markets, In addition to facilitating foreign direct investment, the RCEP will create opportunities for Indian companies to access new markets. This is because the structure of manufacturing in many of these countries is becoming more and more sophisticated, resulting in a “servicification” of manufacturing, it would complement India’s existing free trade agreements with the ASEAN and some of its member countries. It can address challenges emanating from implementation concerns vis-à-vis overlapping agreements, which is creating a “noodle bowl” situation obstructing effective utilization of these FTAs; would help India streamline the rules and regulations of doing trade, which will reduce trade costs; RCEP will facilitate India’s integration into sophisticated “regional production networks”. Discuss the cons. E.g more access to China especially will hurt domestic industry and make in India programme, proper standards and processes are not in place in India restricting India’s capacity to prevent imports of inferior quality; India’s exports to RCEP account for about 15% of its total exports and imports from RCEP comprise 35% of total imports. India’s trade deficit with RCEP has risen from $9 billion in FY05 to $83 billion in FY17, of which China alone accounts for over 60% of the deficit; more developed RCEP countries such as Australia and Singapore have been unwilling to accommodate India’s demands to liberalise their services regime and allow freer mobility of Indian workers etc. Conclusion- Bring out the need to exercise due attention and care in concluding the RCEP and form a fair and a balanced conclusion on the issue.
"""This demo program solves Poisson's equation - div grad u(x, y) = f(x, y) on the unit square with source f given by f(x, y) = 10*exp(-((x - 0.5)^2 + (y - 0.5)^2) / 0.02) and boundary conditions given by u(x, y) = 0 for x = 0 or x = 1 du/dn(x, y) = sin(5*x) for y = 0 or y = 1 It demonstrates how to extract petsc4py objects from dolfin objects and use them in a petsc4py Krylov solver. Based on "demo/pde/poisson/python/demo_poisson.py" """ # Copyright (C) 2007-2011, 2013 Anders Logg, Lawrence Mitchell # # This file is part of DOLFIN. # # DOLFIN is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # DOLFIN is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with DOLFIN. If not, see <http://www.gnu.org/licenses/>. # # Begin demo from __future__ import print_function from dolfin import * from six import print_ try: from petsc4py import PETSc except: print_("*** You need to have petsc4py installed for this demo to run", end=' ') print("Exiting.") exit() if not has_petsc4py(): print_("*** DOLFIN has not been compiled with petsc4py support", end=' ') print("Exiting.") exit() parameters["linear_algebra_backend"] = "PETSc" # Create mesh and define function space mesh = UnitSquareMesh(32, 32) V = FunctionSpace(mesh, "Lagrange", 1) # Define Dirichlet boundary (x = 0 or x = 1) def boundary(x): return x[0] < DOLFIN_EPS or x[0] > 1.0 - DOLFIN_EPS # Define boundary condition u0 = Constant(0.0) bc = DirichletBC(V, u0, boundary) # Define variational problem u = TrialFunction(V) v = TestFunction(V) f = Expression("10*exp(-(pow(x[0] - 0.5, 2) + pow(x[1] - 0.5, 2)) / 0.02)") g = Expression("sin(5*x[0])") a = inner(grad(u), grad(v))*dx L = f*v*dx + g*v*ds # Compute solution u = Function(V) A, b = assemble_system(a, L, bc) # Fetch underlying PETSc objects A_petsc = as_backend_type(A).mat() b_petsc = as_backend_type(b).vec() x_petsc = as_backend_type(u.vector()).vec() # Create solver, apply preconditioner and solve system ksp = PETSc.KSP().create() ksp.setOperators(A_petsc) pc = PETSc.PC().create() pc.setOperators(A_petsc) pc.setType(pc.Type.JACOBI) ksp.setPC(pc) ksp.solve(b_petsc, x_petsc) # Plot solution plot(u, interactive=True) # Save solution to file file = File("poisson.pvd") file << u
Israeli settlers seized a part of a Palestinian-owned land in Jalud village, to the south of Nablus. Ghassan Daghlas, an official who monitors settlement activity in the northern West Bank, told Ma’an News Agency that Israeli settlers, from the nearby illegal Israeli outpost of Ahiya, seized about 10 dunams (2.5 acres) of land, near Palestinian homes built in the village’s land, classified as Area C. Daghlas added that the settlers, under the protection of heavily armed Israeli forces, razed the land, set up water lines, and laid groundwork for new illegal settlement construction. Daghlas pointed out that, despite the land being planted with dozens of olive and fig trees for decades, Israeli forces have banned the Palestinian owners of the land from entering it since 2001, when it was declared a close military zone. Meanwhile, the Israeli government declared the illegal Shvut Rachel settlement eligible to be added to the “National Priority Map” for additional grants and financial incentives to encourage development. In August, the Jerusalem District Court ruled that if Israeli settlers built a settlement on private Palestinian land with “good intentions”, then it should not be removed, therefore allowing more Palestinian lands to be seized by Israeli settlers as they see fit. According to settlement watchdog Peace Now, in the year and a half since President Trump took office, some 14,454 units in the West Bank have been approved, which is more than three times the amount that was approved in the year and half before his inauguration (4,476 units). Since the occupation of the West Bank in 1967, between 500,000 and 600,000 Israelis have moved into Israeli settlements in occupied Palestinian territory, including East Jerusalem, in violation of international law.
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmarks for LossScaleOptimizer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import time from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.distribute import mirrored_strategy from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.framework import config from tensorflow.python.keras.mixed_precision.experimental import loss_scale_optimizer from tensorflow.python.keras.optimizer_v2 import adam from tensorflow.python.ops import math_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training.experimental import loss_scale as loss_scale_module def _get_strategy(num_gpus): if num_gpus > 1: return mirrored_strategy.MirroredStrategy( ['/GPU:%d' % i for i in range(num_gpus)]) else: return distribution_strategy_context.get_strategy() # The default strategy class LossScaleBenchmark(test.Benchmark): """Benchmark for loss scaling.""" def _benchmark(self, gradient_type, num_gpus, mode, loss_scaling): """Benchmarks loss scaling. We run a simple model with several scalar variables. The loss is the sum of all variables. The model is simple because we want to measure only the performance of loss scaling, not the performance of the model itself. Args: gradient_type: "optimizer" or "gradient_tape". How gradients are computed. "optimizer" uses Optimizer.minimize. "gradient_tape" uses GradientTape.gradient along with LossScaleOptimizer.get_scaled_loss and LossScaleOptimizer.get_unscaled_gradients. num_gpus: The number of GPUs to use. Must be at least 1. mode: "eager" or "tf_function". "tf_function" causes all computations to be wrapped in a tf.function, while "eager" runs computations eagerly. loss_scaling: "fixed", "dynamic", or None. The type of loss scaling to use. None means use no loss scaling, which is useful as a baseline to see how much slower loss scaling is in comparison. """ ls_str = loss_scaling or 'no_loss_scaling' name = '%s_%d_GPU_%s_%s' % (gradient_type, num_gpus, mode, ls_str) with context.eager_mode(), _get_strategy(num_gpus).scope() as strategy: opt = adam.Adam() if loss_scaling == 'fixed': loss_scale = loss_scale_module.FixedLossScale(2.) elif loss_scaling == 'dynamic': # Make increment_period so high that it's effectively infinite. This # means the loss scale will never change. Any performance overhead # from increasing/decreasing the loss scale is typically negligible # since it happens infrequently, so we only benchmark the common case # of the loss scale not changing. increment_period = 1000000 loss_scale = loss_scale_module.DynamicLossScale( initial_loss_scale=2., increment_period=increment_period) else: assert loss_scaling is None loss_scale = None if loss_scale: opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale) num_vars = 200 num_warmup_iters = 1 num_iters = 20 # By using scalar variables, we reduce overhead of the actual GPU work of # multiplying variables, dividing gradients, and checking gradients for # NaNs. Measuring these overheads isn't very useful as there is little we # can do to reduce them (one such way would be to fuse dividing gradients # and checking them for NaNs). We still have all other overheads, such as # all-reducing the `is_finite` values and having a tf.cond or # tf.while_loop based on whether gradients are NaNs. Currently, these # other overheads are much more significant than the GPU work. var_list = [ variables.Variable(i, dtype='float32') for i in range(num_vars)] def get_loss(): return math_ops.add_n(var_list) if gradient_type == 'gradient_tape': if loss_scale is None: def minimize_fn(): with backprop.GradientTape() as tape: loss = get_loss() grads = tape.gradient(loss, var_list) return opt.apply_gradients(zip(grads, var_list)) else: def minimize_fn(): with backprop.GradientTape() as tape: loss = get_loss() scaled_loss = opt.get_scaled_loss(loss) scaled_grads = tape.gradient(scaled_loss, var_list) grads = opt.get_unscaled_gradients(scaled_grads) return opt.apply_gradients(zip(grads, var_list)) else: assert gradient_type == 'optimizer' def minimize_fn(): return opt.minimize(get_loss, var_list) def run_fn(): strategy.run(minimize_fn) if mode == 'tf_function': run_fn = def_function.function(run_fn) for _ in range(num_warmup_iters): run_fn() start = time.time() for _ in range(num_iters): run_fn() end = time.time() self.report_benchmark(iters=num_iters, wall_time=(end - start) / num_iters, name=name) def _gpus_to_test_with(self): num_gpus = len(config.list_logical_devices('GPU')) gpus_to_test_with = [] if num_gpus >= 1: gpus_to_test_with.append(1) if num_gpus >= 2: gpus_to_test_with.append(2) if num_gpus >= 8: gpus_to_test_with.append(8) return gpus_to_test_with def benchmark_optimizer(self): for num_gpus in self._gpus_to_test_with(): for mode in 'eager', 'tf_function': for loss_scaling in None, 'fixed', 'dynamic': self._benchmark('optimizer', num_gpus, mode, loss_scaling) def benchmark_gradient_tape(self): for num_gpus in self._gpus_to_test_with(): for mode in 'eager', 'tf_function': for loss_scaling in None, 'fixed', 'dynamic': self._benchmark('gradient_tape', num_gpus, mode, loss_scaling) if __name__ == '__main__': test.main()
KYL’S FAULTY MEMORY…. Senate Minority Whip Jon Kyl (R) of Arizona believes President Obama isn’t reaching out to Republicans as much as he used to. President Obama has become more and more partisan since taking office, the second-ranking Senate Republican charged Monday. “In the earliest days, he reached out in a bipartisan way to secure passage of administration priorities … [b]ut the administration has become increasingly partisan in the months since then,” Senate Minority Whip Jon Kyl (R-Ariz.) said in a speech on the Senate floor. Hmm, I don’t remember the “earliest days” of Obama’s presidency quite the same way. Obama “reached out in a bipartisan way to secure passage of administration priorities”? Well, he reached out in a bipartisan way, and found that Republicans had no interest in compromise or cooperation. The Senate passed the Lily Ledbetter Act over GOP opposition; the Senate passed the stimulus package over GOP opposition; the Senate passed the budget over GOP opposition; the Senate passed S-CHIP over GOP opposition; etc. When, exactly, was this golden era? For that matter, what’s changed? The White House invested quite a bit of effort last week, reaching out to Republicans on health care reform, but couldn’t find much in the way of support. That’s fine, it’s the opposition party; it’s suppose to oppose the majority’s agenda. But I still don’t know what Kyl is whining about here. Obama keeps seeking GOP support, and keeps finding an obstinate minority. Why would Kyl complain about that?
# -*- coding: utf-8 -*- import os from nose.tools import assert_equal, assert_true, assert_false from capitalization_train.util import (extract_title, get_document_content_paf, is_monocase, get_title_and_content_by_paf) CURDIR = os.path.dirname(os.path.realpath(__file__)) def test_extract_title(): actual = extract_title(CURDIR + '/data/docs_okformed/001BBB8BFFE6841FA498FCE88C43B63A') expected = u'UPDATE - Nanobiotix gets early Positive Safety rEsults IN head and neck clinical trial' assert_equal(actual, expected) def test_get_document_content_paf_empty(): actual = get_document_content_paf(CURDIR + '/data/empty_doc') expected = '\n\n' assert_equal(actual, expected) def test_get_document_content_paf(): actual = get_document_content_paf(CURDIR + '/data/docs_okformed/001BBB8BFFE6841FA498FCE88C43B63A') assert_true(len(actual.strip()) > 400) def test_get_title_and_content_by_paf(): starting_content, title, body\ = get_title_and_content_by_paf( CURDIR + '/data/docs_okformed/001BBB8BFFE6841FA498FCE88C43B63A' ) assert_equal(starting_content, "20150609\n001BBB8BFFE6841FA498FCE88C43B63A\n") assert_equal( title, "UPDATE - Nanobiotix gets early Positive Safety rEsults IN head and neck clinical trial" ) assert_true(len(body.strip()) > 400) def test_is_monocase(): assert_true( is_monocase( "The Inside Story of How the iPhone Crippled BlackBerry".split() ) ) # this assert_true( is_monocase( "Ayla Networks Executives Speaking at Key IoT Conferences this Spring".split() ) ) # annoying 'du' assert_true( is_monocase( "KSL Capital Partners Announces the Sale of Malmaison and Hotel du Vin".split() ) ) assert_true( is_monocase("Global Eagle Entertainment and SES Sign a Strategic Partnership to Deliver Global Ku-Band Satellite in-Flight Connectivity to Airlines".split()) ) assert_false( is_monocase("Agenda Released for the 17th annual Summit on Superbugs & Superdrugs".split()) ) assert_true( is_monocase("How Find Your Inner Martin Scorsese to Build Brand & Rule the World".split()) ) assert_true( is_monocase("Half of YouTube 's Traffic is Now Coming From Mobile: CEO".split()) ) assert_true( is_monocase("Crystal Bridges Announces 2015 Exhibits, Including Warhol, van Gogh, Pollock".split()) ) assert_true( is_monocase("Why American Airlines Threw Away Paper Flight Plans in Favor of iPads".split()) ) # len(ac) == 0 assert_false( is_monocase("Why american airlines threw away paper flight plans in favor of ipads".split()) ) assert_true( is_monocase("Amy Pascal to Work on Sony 's Spider-Man Team".split()) )
All heated gantry models are worktop mounted and can be used for short term food holding or as a heated pass area. They are available with optional Quartz Heat Controller (QC1) see below. Rectilinear flat glass sneeze guard profile is designed to compliment other units within the range. Heated quartz infra red lamps mounted in gantry superstructure, supporting a toughened rectangular sneeze protection glass screen. QC1 – Quartz Dimmer Controller Option. Dimmer control for heated gantry with elliptical profile mounting plate. Remote control panel option mounts in rear of the counter valance panel. Kubus Self Help Heated Gantries. Self Help Heated Island Gantry. Rectilinear flat glass double sneeze guard profile is designed to compliment other units within the range. Heated quartz infra red lamps mounted in gantry superstructure, supporting a toughened rectangular island sneeze protection glass screen. Self help application, where access is required from both sides i.e island counter worktop. Kubus Self Help Heated Island Gantries.
try: from setuputils import setup except ImportError: from distutils.core import setup setup( name='regenerate', version='1.0.0', license='License.txt', author='Donald N. Allingham', author_email='[email protected]', description='Register editor for ASIC/FPGA designs', long_description='Allows users to manange registers for ' 'ASIC and FPGA designs. Capable of generating Verilog ' 'RTL, test code, C and assembler header files, and documentation.', packages=["regenerate", "regenerate.db", "regenerate.importers", "regenerate.extras", "regenerate.settings", "regenerate.ui", "regenerate.writers"], package_data={ 'regenerate': ['data/ui/*.ui', 'data/media/*.svg', 'data/help/*.rst', 'data/media/*.png', 'data/extra/*.odt', 'data/*.*', 'writers/templates/*'] }, url="https://github.com/dallingham/regenerate", scripts=["bin/regenerate", "bin/regbuild", "bin/regupdate", "bin/regxref", "bin/regdiff", "bin/ipxact2reg"], classifiers= ['Operating System :: POSIX', 'Programming Language :: Python :: 2.7', 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)', 'Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)'], )
Canon PIXMA MG5130 Scanner Drivers & Software Download Support for OS Mac, Windows, and Linux. The PIXMA MG5130 Wireless Inkjet Office and Business All-in-one Printer with Copier, Scanner, and Fax. Canon MG5130 Scanner printer is WiFi related, perfect for family shots and also much better for printing out particular or function documents. Get pleasure from the depth The most recent 2PL technology produces a print resolution of 4800 x 1200 dpi, therefore, you can produce paperwork with superior quality detail and vivid color copy. Want to print lots of being concerned about running from paper? MG5130 provides a significant potential of 250-sheets of original paper new tape so that you can generally target your job and never be worried about managing outside of the article. In cases like this, can convey workplace effectiveness to new amounts and enable lower your expenditures at the same time. Highlighting the outcome plus the high quality delivered. You may be surprised in the MG5130, and it is capable of offering the output of amazing 9600 x 2400 greatest color dpi with five ink tanks independently. Which means printing your enterprise paperwork ended up fantastic and effectiveness which has an alternative of superior produce ink tanks black pigment to write a lot more documents with no need to improve the pigment black ink as well generally.
# vim: tabstop=8 shiftwidth=4 softtabstop=4 expandtab smarttab autoindent # Altai API Service # Copyright (C) 2012-2013 Grid Dynamics Consulting Services, Inc # All Rights Reserved # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program. If not, see # <http://www.gnu.org/licenses/>. from flask import Blueprint, abort from openstackclient_base import exceptions as osc_exc from altai_api.blueprints.users import (user_to_view, InvitesDAO, update_user_data) from altai_api import auth from altai_api.schema import Schema from altai_api.schema import types as st from altai_api.utils import make_json_response, parse_request_data from altai_api.utils.decorators import no_auth_endpoint, root_endpoint BP = Blueprint('invites', __name__) def _invite_and_user(code): user_mgr = auth.admin_client_set().identity_admin.users invite = InvitesDAO.get(code) try: assert not invite.complete user = user_mgr.get(invite.user_id) assert not user.enabled except (osc_exc.NotFound, AssertionError): abort(404) return invite, user @BP.route('/') @root_endpoint('invites') def list_invites(): # just a stub to mark with root_endpoint abort(404) @BP.route('/<code>', methods=('GET',)) @no_auth_endpoint def get_user_by_code(code): invite, user = _invite_and_user(code) return make_json_response(user_to_view(user, invite)) _ACCEPT_SCHEMA = Schema(( st.String('name'), st.String('fullname', allow_empty=True), st.String('email'), )) _ACCEPT_REQUIRES = Schema(( st.String('password'), )) @BP.route('/<code>', methods=('PUT',)) @no_auth_endpoint def accept_invite(code): data = parse_request_data(_ACCEPT_SCHEMA, _ACCEPT_REQUIRES) invite, user = _invite_and_user(code) data['enabled'] = True try: update_user_data(user, data) user = auth.admin_client_set().identity_admin.users.get(user.id) except osc_exc.NotFound: abort(404) InvitesDAO.complete_for_user(user.id) return make_json_response(user_to_view(user, invite), 200) @BP.route('/<code>', methods=('DELETE',)) @no_auth_endpoint def drop_invite(code): """Refuse to accept invite""" invite, user = _invite_and_user(code) try: user.delete() except osc_exc.NotFound: abort(404) InvitesDAO.complete_for_user(invite.user_id) return make_json_response(None, status_code=204)
Krizbeatz is fast becoming a popular name in the Nigerian music scene. Apart from producing Tekno’s hit song, Pana, in 2016, he has produced songs like ‘Temper’ by Skales, ‘For Life’ by Runtown, ‘Shele Gan Gan’ by Lil Kesh, ‘Weekend Vibe’ by Seyi Shay, but to mention a few. He has also produced for foreign artistes like Drake, Omarion, TY Dollar sign, Diamond Platnumz and Vanessa Mdee. While speaking with Saturday Beats, Krizbeatz revealed that even though Tekno’s song, ‘Pana’ was one of the biggest songs in 2016, he didn’t originally produce the beat for Tekno. According to him, when the original owner of the beat heard ‘Pana,’ he called him to commend his effort but didn’t know it was the same beat that he rejected. He said, “Even when I told him it was the beat he rejected, he argued that it was better than what he heard. I guess since Tekno is a very good producer as well, he knows what good beats sound like. But I didn’t know he was a producer until I visited his house. Despite his success, Krizbeat admitted that it was not a jolly ride to attract top musicians at a time. He narrated, “It was really a challenge to get people to hear what I had to offer at a time. There is the belief that once a song is not produced by a known producer, it is not good enough. I went to some artistes and tried to convince them to listen to my production, but they never did. 1 Comment on "Pana’s beat was not originally meant for Tekno –Krizbeat"
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: Damien Elmes <[email protected]> # License: GNU GPL, version 3 or later; http://www.gnu.org/copyleft/gpl.html # # Automatic reading generation with kakasi and mecab. # See http://ichi2.net/anki/wiki/JapaneseSupport # # Adapted for stand-alone use by # Christoph Dittmann <[email protected]>. import sys, os, platform, re, subprocess, codecs MAX_OUTPUT_LENGTH = 300 kakasiCmd = ["kakasi", "-iutf8", "-outf8", "-u", "-JH", "-KH"] mecabCmd = ["mecab", '--node-format=%m[%f[5]] ', '--eos-format=\n', '--unk-format=%m[] '] class KakasiController(object): def __init__(self): self.kakasi = None def ensureOpen(self): if not self.kakasi: try: self.kakasi = subprocess.Popen( kakasiCmd, bufsize=-1, stdin=subprocess.PIPE, stdout=subprocess.PIPE) except OSError: raise Exception("Please install kakasi.") def toHiragana(self, expr): self.ensureOpen() self.kakasi.stdin.write(expr.encode("utf8", "ignore")+'\n') self.kakasi.stdin.flush() res = unicode(self.kakasi.stdout.readline().rstrip('\r\n'), "utf8") return res kakasi = KakasiController() def fixExpr(expr): out = [] expr_split = re.split("([^\[]+\[[^\]]*\])", expr) for node in expr_split: if node == '': continue m = re.match("(.+)\[(.*)\]", node.decode("utf-8")) if not m: out.append(node.decode("utf-8")) continue (kanji, reading) = m.groups() # hiragana, katakana, punctuation, not japanese, or lacking a reading if kanji == reading or not reading: out.append(kanji) continue # convert to hiragana reading = kakasi.toHiragana(reading) # ended up the same if reading == kanji: out.append(kanji) continue # don't add readings of numbers if kanji.strip() in u"0123456789": # u"一二三四五六七八九十0123456789": out.append(kanji) continue # strip matching characters and beginning and end of reading and kanji # reading should always be at least as long as the kanji placeL = 0 placeR = 0 for i in range(1,len(kanji)): if kanji[-i] != reading[-i]: break placeR = i for i in range(0,len(kanji)-1): if kanji[i] != reading[i]: break placeL = i+1 if placeL == 0: if placeR == 0: out.append(" %s[%s]" % (kanji, reading)) else: out.append(" %s[%s]%s" % ( kanji[:-placeR], reading[:-placeR], reading[-placeR:])) else: if placeR == 0: out.append("%s %s[%s]" % ( reading[:placeL], kanji[placeL:], reading[placeL:])) else: out.append("%s %s[%s]%s" % ( reading[:placeL], kanji[placeL:-placeR], reading[placeL:-placeR], reading[-placeR:])) fin = "" for c, s in enumerate(out): if c < len(out) - 1 and re.match("^[A-Za-z0-9]+$", out[c+1]): s += " " fin += s fin = fin.strip() fin = re.sub(u"\[\]", u"", fin) fin = re.sub(u" +", u" ", fin) return fin def get_readings(expr): try: mecab = subprocess.Popen( mecabCmd, bufsize=-1, stdin=subprocess.PIPE, stdout=subprocess.PIPE) return mecab.communicate(expr)[0] except OSError: raise Exception("Please install mecab.") if __name__ == "__main__": sys.stdout = codecs.open("/dev/stdout", "w", 'utf-8') if len(sys.argv) != 2 or len(sys.argv[1]) == 0: print 'Please provide one argument.' sys.exit(0) try: result = fixExpr(get_readings(sys.argv[1])) result = re.sub(u"\\n", u"", result) except Exception, (e): print e sys.exit(1) if len(result) > MAX_OUTPUT_LENGTH: print result[0:MAX_OUTPUT_LENGTH - 3] + u'...' else: print result
The white lipped peccary: to the average person this animal might just look like a wild pig, not too pleasant to look at. But the truth is that these funny looking creatures are a vital element for the health of forest ecosystems in the Mesoamerica region. Their function as fruit predators and as food base for several wild cats are crucial for biodiversity in the region. However, a press release issued today by the Wildlife Conservation Society in Belize says that a new report shows that White-Lipped peccaries have been eliminated from 87% of their historical range in the Mesoamerica region. Forest fragmentation and unsustainable hunting practices have caused their numbers to dwindle and teeter on the brink of extinction. Reports of continued decline of the species caused great concern among experts that the previous assessment as 'Vulnerable' under the IUCN red list of Threatened Species has been inadequate to respond to the growing issue. As a result, an international study was conducted with participants from all 7 countries in which the White Lipped peccary is found. University of Belize, represented the Belizean delegation in this study. According to the report published, the results of this study are already informing revisions in wildlife protection laws and hunting regulations in Guatemala and Belize. In addition it has noted that it is a critical time for the survival of the White Lipped peccary and forest diversity by extension; as stable herds, according to the study conducted, can now only be found in two of five wildernesses in Mesoamerica: the Tri-national Maya Forest in Belize, Mexico and Guatemala; and the El Darién in Panama. They are foul-smelling, noisy, and move in groups of up to three hundred. They are an important species to the environment, but are on the verge of extinction. A recently released report revealed that the white-lipped peccary, or Warrie, is on the verge of extinction. The animal can be found across the Meso-American region and Belize has two of the nine remaining healthy populations. It is the most endangered large mammal in Central America and its numbers have declined by eighty-seven percent. Conservationists believe that the species can be saved from extinction if the large connected forests are protected and hunting is properly managed. News Five’s Hipolito Novelo reports. One of the largest herding animals in Belize and in the Americas is on the verge of extinction. A report from the Washington State University and other key regional partners has revealed that the population of the white-lipped peccary, known in Belize as Warrie, has declined by an alarming eighty seven percent. Lee McLoughlin, Terrestrial Coordinator, W.C.S. Also known as “pig of the jungle,” the white-lipped peccary is considered an ecological engineer because as it makes its way through large forest blocks, creating new niches that assist in the maintenance of a variety of wildlife. It is an important prey for jaguars, however; they are known to be so aggressive that jaguars have been killed by this species. A white-lipped peccary can be identified by its brown or black fur and its white patch around its snout. The animal releases a strong unique odor used to mark its territory. It loves waddling in the mud but its mere existence is threatened by deforestation and illegal and unregulated hunting. The white-lipped peccary is almost extinct in Nicaragua and Honduras. None can be found within the borders of El Salvador. According to experts, for the animal to survive the threat of deforestation and human consumption, regulatory legislation and agencies are necessary.
from puzzle.utils import (get_gene_info, get_cytoband_coord) class BaseVariantMixin(object): """Base class for variant mixins""" def variants(self, case_id, skip=0, count=30, filters=None): """Return a results tuple with variants and nr_of_variants. """ raise NotImplementedError def variant(self, variant_id): """Return a specific variant.""" raise NotImplementedError def _get_genes(self, variant): """Add the genes for a variant Get the hgnc symbols from all transcripts and add them to the variant Args: variant (dict): A variant dictionary Returns: genes (list): A list of Genes """ ensembl_ids = [] hgnc_symbols = [] for transcript in variant.transcripts: if transcript.ensembl_id: ensembl_ids.append(transcript.ensembl_id) if transcript.hgnc_symbol: hgnc_symbols.append(transcript.hgnc_symbol) genes = get_gene_info( ensembl_ids=ensembl_ids, hgnc_symbols=hgnc_symbols ) return genes def _add_sv_coordinates(self, variant): """Add the neccesary sv coordinates for a variant Args: variant (puzzle.models.variant) """ variant.stop_chrom = variant.CHROM variant.start = int(variant.POS) # If we have a translocation: if ':' in variant.ALT: other_coordinates = variant.ALT.strip('ACGTN[]').split(':') variant.stop_chrom = other_coordinates[0].lstrip('chrCHR') other_position = other_coordinates[1] # variant.stop = other_position #Set 'infinity' to length if translocation variant.sv_len = float('inf') variant.sv_type = 'BND' else: variant.sv_len = variant.stop - variant.start variant['cytoband_start'] = get_cytoband_coord( chrom=variant.CHROM, pos=variant.start ) variant['cytoband_stop'] = get_cytoband_coord( chrom=variant.stop_chrom, pos=variant.stop )
The University of Maine at Presque Isle announced its intentions to move forward on its biggest green energy project ever – a wind turbine project – in May 2007. The hope was to install a mid-size wind turbine that would meet the campus’s energy needs, significantly reducing energy costs and helping the University to leave a much smaller carbon footprint. In the Fall of 2008, the University signed a contract with Lumus Construction Inc. to install a 600 kW wind turbine adjacent to its athletics fields at the southern end of campus. The contract established UMPI as the first university in the state and one of only a handful in New England to install a mid-size wind turbine. The turbine is expected to produce about 700,000 kilowatt-hours of electricity per year and save the institution more than $100,000 annually in electricity charges. It also is expected to save an estimated 572 tons of carbon dioxide from being released into the atmosphere each year, or the equivalent of removing 123 cars from the road. The $2 million project included the cost of the turbine and its installation. The turbine tower was manufactured in the U.S., the nacelle and rotor blades in Chennai, India. In late 2008, crews began drilling and excavating to put steel and concrete into the ground to complete the turbine base. In 2009, they began the installation process, placing the 65-meter turbine tower on the base, and then assembling the turbine nacelle and rotor blades and placing them atop the tower. The turbine was completely assembled and generating electricity by late Spring 2009.
'Upgrade older ngtrees to newer version' import logging logger = logging.getLogger(__name__) def upgrade_ngt_v2(ngt): 'Upgrade ngt structures to version 2 for the API' stack = list() # Add dictionary to stack stack.append(ngt) # upgrade keys on all dictionaries for tree in stack: # Copy NGT and traverse nt = tree.copy() for f in nt: # Upgrade dictionary key tree.pop(f) tree[_new_name(f)] = nt[f] # Found a nested dict, add to stack if isinstance(nt[f], dict): stack.append(nt[f]) # Found a nested list elif isinstance(nt[f], list): for en in nt[f]: # nested dict in list, add to stack if isinstance(en, dict): stack.append(en) return ngt def _new_name(old): 'Get new name for fields (lowercase, replace spaces with _)' nmap = { 'StandbyRouter': 'standby_router', 'SecurityLevel': 'security_level', 'mgmtgroup': 'mgmt_group' } if old in nmap: return nmap[old] old = old.replace(' ', '_') old = old.lower() if old == 'data': old = 'xdata' return old
On Wednesday morning, Ivanka Trump continued her public promotion of Opportunity Zones in a series of tweets. She did not address the AP investigation. “Opportunity Zones will catalyze private sector investment that will create jobs + economic growth in overlooked communities nationwide,” she said.
#!/usr/bin/env python ''' The scan_network scans the network for different ports and call network plugins for different services running on target ''' import re import logging from framework.dependency_management.dependency_resolver import BaseComponent from framework.utils import FileOperations SCANS_FOLDER = "scans" # Folder under which all scans will be saved PING_SWEEP_FILE = "%s/00_ping_sweep" % SCANS_FOLDER DNS_INFO_FILE= "%s/01_dns_info" % SCANS_FOLDER FAST_SCAN_FILE = "%s/02_fast_scan" % SCANS_FOLDER STD_SCAN_FILE = "%s/03_std_scan" % SCANS_FOLDER FULL_SCAN_FILE = "%s/04_full_scan" % SCANS_FOLDER class Scanner(BaseComponent): COMPONENT_NAME = "scanner" def __init__(self): self.register_in_service_locator() self.shell = self.get_component("shell") self.config = self.get_component("config") self.plugin_handler = self.get_component("plugin_handler") self.shell.shell_exec("mkdir %s" % SCANS_FOLDER) def ping_sweep(self, target, scantype): if scantype == "full": logging.info("Performing Intense Host discovery") self.shell.shell_exec("nmap -n -v -sP -PE -PP -PS21,22,23,25,80,443,113,21339 -PA80,113,443,10042" " --source_port 53 %s -oA %s" % (target, PING_SWEEP_FILE)) if scantype == "arp": logging.info("Performing ARP host discovery") self.shell.shell_exec("nmap -n -v -sP -PR %s -oA %s" % (target, PING_SWEEP_FILE)) self.shell.shell_exec('grep Up %s.gnmap | cut -f2 -d\" \" > %s.ips' % (PING_SWEEP_FILE, PING_SWEEP_FILE)) def dns_sweep(self, file_with_ips, file_prefix): logging.info("Finding misconfigured DNS servers that might allow zone transfers among live ips ..") self.shell.shell_exec("nmap -PN -n -sS -p 53 -iL %s -oA %s" % (file_with_ips, file_prefix)) # Step 2 - Extract IPs dns_servers = "%s.dns_server.ips" % file_prefix self.shell.shell_exec('grep \"53/open/tcp\" %s.gnmap | cut -f 2 -d \" \" > %s' % (file_prefix, dns_servers)) file = FileOperations.open(dns_servers) domain_names = "%s.domain_names" % file_prefix self.shell.shell_exec("rm -f %s" % domain_names) num_dns_servers = 0 for line in file: if line.strip('\n'): dns_server = line.strip('\n') self.shell.shell_exec("host %s %s | grep 'domain name' | cut -f 5 -d' ' | cut -f 2,3,4,5,6,7 -d. " "| sed 's/\.$//' >> %s" % (dns_server, dns_server, domain_names)) num_dns_servers += 1 try: file = FileOperations.open(domain_names, owtf_clean=False) except IOError: return for line in file: domain = line.strip('\n') raw_axfr = "%s.%s.%s.axfr.raw" % (file_prefix, dns_server, domain) self.shell.shell_exec("host -l %s %s | grep %s > %s" % (domain, dns_server, domain, raw_axfr)) success = self.shell.shell_exec("wc -l %s | cut -f 1 -d ' '" % raw_axfr) if success > 3: logging.info("Attempting zone transfer on $dns_server using domain %s.. Success!" % domain) axfr = "%s.%s.%s.axfr" % (file_prefix, dns_server, domain) self.shell.shell_exec("rm -f %s" % axfr) logging.info(self.shell.shell_exec("grep 'has address' %s | cut -f 1,4 -d ' ' | sort -k 2 -t ' ' " "| sed 's/ /#/g'" % raw_axfr)) else: logging.info("Attempting zone transfer on $dns_server using domain %s.. Success!" % domain) self.shell.shell_exec("rm -f %s" % raw_axfr) if num_dns_servers == 0: return def scan_and_grab_banners(self, file_with_ips, file_prefix, scan_type, nmap_options): if scan_type == "tcp": logging.info("Performing TCP portscan, OS detection, Service detection, banner grabbing, etc") self.shell.shell_exec("nmap -PN -n -v --min-parallelism=10 -iL %s -sS -sV -O -oA %s.tcp %s") % ( file_with_ips, file_prefix, nmap_options) self.shell.shell_exec("amap -1 -i %s.tcp.gnmap -Abq -m -o %s.tcp.amap -t 90 -T 90 -c 64" % (file_prefix, file_prefix)) if scan_type == "udp": logging.info("Performing UDP portscan, Service detection, banner grabbing, etc") self.shell.shell_exec("nmap -PN -n -v --min-parallelism=10 -iL %s -sU -sV -O -oA %s.udp %s" % ( file_with_ips, file_prefix, nmap_options)) self.shell.shell_exec("amap -1 -i %s.udp.gnmap -Abq -m -o %s.udp.amap" % (file_prefix, file_prefix)) def get_nmap_services_file(self): return '/usr/share/nmap/nmap-services' def get_ports_for_service(self, service, protocol): regexp = '(.*?)\t(.*?/.*?)\t(.*?)($|\t)(#.*){0,1}' re.compile(regexp) list = [] f = FileOperations.open(self.get_nmap_services_file()) for line in f.readlines(): if line.lower().find(service) >= 0: match = re.findall(regexp, line) if match: port = match[0][1].split('/')[0] prot = match[0][1].split('/')[1] if (not protocol or protocol == prot) and port not in list: list.append(port) f.close() return list def target_service(self, nmap_file, service): ports_for_service = self.get_ports_for_service(service, "") f = FileOperations.open(nmap_file.strip()) response = "" for host_ports in re.findall('Host: (.*?)\tPorts: (.*?)[\t\n]', f.read()): host = host_ports[0].split(' ')[0] # Remove junk at the end ports = host_ports[1].split(',') for port_info in ports: if len(port_info) < 1: continue chunk = port_info.split('/') port = chunk[0].strip() port_state = chunk[1].strip() # No point in wasting time probing closed/filtered ports!! # (nmap sometimes adds these to the gnmap file for some reason ..) if port_state in ['closed', 'filtered']: continue try: prot = chunk[2].strip() except: continue if port in ports_for_service: response += "%s:%s:%s##" % (host, port, prot) f.close() return response def probe_service_for_hosts(self, nmap_file, target): services = [] # Get all available plugins from network plugin order file net_plugins = self.config.Plugin.GetOrder("network") for plugin in net_plugins: services.append(plugin['Name']) services.append("http") total_tasks = 0 tasklist = "" plugin_list = [] http = [] for service in services: if plugin_list.count(service) > 0: continue tasks_for_service = len(self.target_service(nmap_file, service).split("##")) - 1 total_tasks += tasks_for_service tasklist = "%s [ %s - %s tasks ]" % (tasklist, service, str(tasks_for_service)) for line in self.target_service(nmap_file, service).split("##"): if line.strip("\n"): ip = line.split(":")[0] port = line.split(":")[1] plugin_to_invoke = service service1 = plugin_to_invoke self.config.Set("%s_PORT_NUMBER" % service1.upper(), port) if service != 'http': plugin_list.append(plugin_to_invoke) http.append(port) logging.info("We have to probe %s:%s for service %s", str(ip), str(port), plugin_to_invoke) return http def scan_network(self, target): self.ping_sweep(target.split("//")[1], "full") self.dns_sweep("%s.ips" % PING_SWEEP_FILE, DNS_INFO_FILE) def probe_network(self, target, protocol, port): self.scan_and_grab_banners("%s.ips" % PING_SWEEP_FILE, FAST_SCAN_FILE, protocol, "-p %s" % port) return self.probe_service_for_hosts("%s.%s.gnmap" % (FAST_SCAN_FILE, protocol), target.split("//")[1])
The Civil Liberty group and the Trade Union Solidarity have organised a demonstration in Birmingham outside the offices of the General Teaching Council (GTC) in Victoria Square, Birmingham on May 24. Why? A teacher, Adam Walker, is facing charges before the GTC of 'unprofessional conduct'. The charge is about views he expressed on the Internet concerning politics and religion. It claims that the views expressed were "suggestive of racial and religious intolerance". The comments were made under a pseudonym and never mentioned his name or the name of the School where he worked. We believe that everyone must have the right to express their opinions (whether we agree with them or not). Freedom of expression is vital in a democracy. No one should be punished for expressing their opinion on religion or politics. The Solidarity Trade Union and Civil Liberty support the rights of citizens to express their varied opinions . If the GTC were successful in their case it would put us on the slippery slope to censorship.It would make teachers frightened to express their views and take away their rights as citizens. That is why on May 24 we will be holding a protest on the theme "Make a noise for freedom!".
from django.db import models from django.utils import timezone from jsonfield import JSONField class Repository(models.Model): # github, gitbucket, etc... hub = models.CharField(max_length=191, db_index=True) # user user = models.CharField(max_length=191) # repository_name name = models.CharField(max_length=191) # username/reponame full_name = models.CharField(max_length=191, db_index=True) # Secret Key secret = models.CharField(max_length=191, db_index=True) def __str__(self): return '{}: {}'.format(self.hub, self.full_name) class DeploySetting(models.Model): repository = models.ForeignKey(Repository) branch = models.CharField(max_length=191) command = models.TextField() def __str__(self): return '{}: {}'.format(self.repository, self.branch) class HookLog(models.Model): data = JSONField() created_at = models.DateTimeField(auto_now_add=True) def __str__(self): dt_text = timezone.localtime(self.created_at).strftime('%Y-%m-%d %H:%M:%S') return '{}: {}'.format(dt_text, self.data.get('path')) class DeployLog(models.Model): log = models.TextField() return_code = models.IntegerField() created_at = models.DateTimeField(auto_now_add=True) def __str__(self): dt_text = timezone.localtime(self.created_at).strftime('%Y-%m-%d %H:%M:%S') return '{}: {}'.format(dt_text, self.return_code)
Flood Damage Carpet Restorations Macquarie Links? Call Clean Master 24 hours emergency rapid response service in event of emergency. Call 0410 453 896 now! Flood water can seriously damage your property when not taken care of immediately. Don’t lose sleep over flood water damage. So, it is important to call in professionals on the same day to ensure your property isn’t irreversibly damaged. It is an emergency that should be attended immediately! And we, Clean Masters Macquarie Links, help you in that. Clean Masters Macquarie Links is one of the leading cleaning service providing company. We offer all kind of cleaning services which include carpet cleaning, duct cleaning, curtain cleaning, tile & grout cleaning, mattress cleaning and upholstery cleaning. We also offer best flood water damage restoration services to our customers with the help of our trained and highly skilled staff. Why is Cleaning Masters Macquarie Links the best Flood Water Damage Restoration in Macquarie Links? Whenever the carpet comes in contact with moisture the risk of mould infestation increases. Mould thrives on the moist or damp area when carpet soaked in water, it causes multiple problems. Mould infestation is one of them, to counter the mould infestation from the carpet, the use of effective cleaning agents and a thorough cleaning session is prescribed. At Clean Master Macquarie Links we have all kind of resources to do the carpet mould removal properly. We provide post-flood water damage carpet restoration Macquarie Links of mould removal. To book the services simply call on our numbers. Water extraction is an important method for carpet drying, carpet extraction involves the use of heavy machinery to extract the water from carpet efficiently. There is a importance of quality water extracting machine and a skilled technician to do the task. At Clean Master Macquarie Links we have understood the basic requirement of carpet water extraction, thus we have hired the best technician and also we’re using the industries best carpet water extracting machine. If you want to book ou service, call on our numbers and see the difference from your own eyes. Water damage is not a very common thing that you can imagine to happen with your carpets. This is the last thing you would think transpiring with your floor coverings. There could be a number of reasons leading to water damage such as unexpected leaks, backed sewer, roof leak on the carpet, tap overflow and more. Such scenarios cause serious damage to the carpets that cannot be restored at home. Therefore, at Clean Master Macquarie Links we offer specialised services for carpet flood restorations and water damage clean up. Our water damage restoration services include Carpet under-laying and relaying and Carpet stretching and restretching. Customers call our company for a free quotation and we never hesitate to offer this service! In fact, we are proud to state that we offer the best service at lowest price rates available in Macquarie Links.
import os import time import unittest import pytest from kafka.admin_client import AdminClient, NewTopic, NewPartitionsInfo from kafka.protocol.metadata import MetadataRequest from test.fixtures import ZookeeperFixture, KafkaFixture from test.testutil import KafkaIntegrationTestCase, env_kafka_version KAFKA_ADMIN_TIMEOUT_SECONDS = 5 class TestKafkaAdminClientIntegration(KafkaIntegrationTestCase): @classmethod def setUpClass(cls): if not os.environ.get('KAFKA_VERSION'): return cls.zk = ZookeeperFixture.instance() cls.server = KafkaFixture.instance(0, cls.zk) @classmethod def tearDownClass(cls): if not os.environ.get('KAFKA_VERSION'): return cls.server.close() cls.zk.close() @pytest.mark.skipif(env_kafka_version() < (0, 10, 1), reason='Unsupported Kafka Version') def test_create_delete_topics(self): admin = AdminClient(self.client_async) topic = NewTopic( name='topic', num_partitions=1, replication_factor=1, ) metadata_request = MetadataRequest[1]() response = admin.create_topics(topics=[topic], timeout=KAFKA_ADMIN_TIMEOUT_SECONDS) # Error code 7 means that RequestTimedOut but we can safely assume # that topic is created or will be created eventually. # see this https://cwiki.apache.org/confluence/display/KAFKA/ # KIP-4+-+Command+line+and+centralized+administrative+operations self.assertTrue( response[0].topic_errors[0][1] == 0 or response[0].topic_errors[0][1] == 7 ) time.sleep(1) # allows the topic to be created delete_response = admin.delete_topics(['topic'], timeout=1) self.assertTrue( response[0].topic_errors[0][1] == 0 or response[0].topic_errors[0][1] == 7 ) @pytest.mark.skipif(env_kafka_version() < (1, 0, 0), reason='Unsupported Kafka Version') def test_create_partitions(self): admin = AdminClient(self.client_async) topic = NewTopic( name='topic', num_partitions=1, replication_factor=1, ) metadata_request = MetadataRequest[1]() admin.create_topics(topics=[topic], timeout=KAFKA_ADMIN_TIMEOUT_SECONDS) time.sleep(1) # allows the topic to be created new_partitions_info = NewPartitionsInfo('topic', 2, [[0]]) response = admin.create_partitions([new_partitions_info], timeout=1, validate_only=False) self.assertTrue( response[0].topic_errors[0][1] == 0 or response[0].topic_errors[0][1] == 7 )
Now that gray wolves are once again under state management led by the Wyoming Game and Fish Department, the agency is again proposing gray wolf hunting seasons within the Trophy Game Management Area. Such was the case in 2012 and 2013 when Game and Fish also led management of the wolves. Game and Fish is holding public meetings on the proposed 2017 gray wolf hunting seasons and has opened a comment period to gather public input. The timing of the efforts allows for the final proposal to go to the regularly scheduled Wyoming Game and Fish Commission meeting in July. “We have developed a conservative hunting season framework in the Trophy Game Area for the public to consider and provide comments. Our track record in 2012 and 2013 shows that this approach will ensure we maintain a recovered population of wolves while providing opportunity for those who want to hunt gray wolves,” said Doug Brimeyer, deputy chief of the Game and Fish’s wildlife division. Other draft regulations may also be discussed at the meetings. Written comments on gray wolf hunting regulations will be accepted through June 19th at 5:00 p.m. at public meetings, online, or by mail to: Wyoming Game and Fish Department, Regulations, 3030 Energy Lane, Casper, WY 82604. Copies of the proposed regulations are available on the Game and Fish website and at the Casper Office. "Wolves outside the Trophy Game Management Area are considered predatory animals as defined in state law and therefore can be harvested. Any wolf harvested in the predator zone must be checked in to the Game and Fish within 10 days of harvest. The department does not manage for population viability outside the Trophy Game Management Area as wolves that occur in these areas are more likely to be involved in conflicts."
r""" ORDER NUMBER: Numerical, 7 positions. Unique numbering must be used for orders per payee's recipient agreement, 12 months + one day ahead. An atomic incrementing counter is used, modulu 1e8. A check is done to assert that order number is not reused in the last 12 months + 1 day. @todo implement that check! """ # @todo new name. does more than generate order numbers # Only store one row with one column #SQL_TABLE1 = 'CREATE TABLE order_number (next BIGINT)' # one row per order_number? use auto increment primary key? import sqlite3 conn = sqlite3.connect ('transmissions.db') cursor = conn.cursor() def _init_db(): cursor.execute ('create table keyval (key TEXT, val BIGINT)') cursor.execute ('insert into keyval values (?,?)', ('order_number', 1)) def next_order_number(): """Note: Will wrap around after 10 million numbers are generated""" oldlevel = conn.isolation_level try: conn.isolation_level = 'EXCLUSIVE' params = ('order_number',) cursor.execute ('select val from keyval where key=?', params) number = cursor.fetchone()[0] cursor.execute ('update keyval set val=val+1 where key=?', params) conn.commit() except sqlite3.OperationalError as ex: conn.rollback() # needed? done by default? if ex.message != 'no such table: keyval': raise ex _init_db() return next_order_number() finally: conn.isolation_level = oldlevel return number % int(1e8)
Conceived as a companion piece to The Club Watch 1999, Sun & Moon features a moving lunar disk on a midnight blue dial. Every 12 hours, the lunar disk eclipses a luminous sun set at 6 o’clock. The midnight blue strap shows the phases of a total eclipse of the sun, below the words Follow the Light. Sun & Moon was produced in a limited and numbered edition of 20,000 pieces and delivered in Special Packaging built in the image of a moon. Launched at a special event on the Monte da Lua (Mountain of the Moon) in Sintra, Portugal.
class patients(object): def __init__(self,number,scores,stats,participated,rank,actual_rank,overall): self.number = number # integer from 1-10. patient number. self.scores = scores # list of floats element of [0,10]. Floats are scores for a given exercise. self.stats = stats # for n exercises this is a list containing n lists of the form [avg,min,max], where avg is the patient's avg br for a given exercise, min is the patient's min br for a given exercise... etc. self.participated = participated # list of booleans. True if patient participated in a gievn exercise, False if a patient did not. self.rank = rank self.actual_rank = actual_rank self.overall = overall # float element of [0,10]. Overall scor of patient. # swap rows i and j of matrix A. def row_swap(A,i,j): row_i = A[i] A[i] = A[j] A[j] = row_i return A # In matrix A, add factor*row_i to row j. def row_add(A,i,j,factor): dim_col = len(A[0]) for k in range(0,dim_col): A[j][k] = A[j][k]+ factor*A[i][k] return A def zeros(n,m): output = [] for i in range(0,n): output.append([]) for j in range(0,m): output[i].append(0) return output def multiply(A,B): row_dim = len(A) col_dim = len(B[0]) sum_length = len(A[0]) AB = zeros(row_dim,col_dim) for i in range(0,row_dim): for j in range(0,col_dim): for k in range(0,sum_length): AB[i][j] = AB[i][j] + A[i][k]*B[k][j] return AB # Takes A,b from Ax = b and returns triangular matrix T along with modified b. def Gaussian(A,b): dim = len(A) for i in range(0,dim): if A[i][i] == 0: count = 0 while A[i+count][i] == 0: count += 1 if i+count > dim: return "failure" break row_swap(A,i,i+count) row_swap(b,i,i+count) for j in range(i+1,dim): row_add(b,i,j,-A[j][i]/A[i][i]) row_add(A,i,j,-A[j][i]/A[i][i]) return [A,b] A = [[1,2,3],[2,3,5],[5,2,4]] b = [[1],[2],[3]] Tb = Gaussian(A,b) T = Tb[0] b = Tb[1] def list_to_int(b): for i in range(0,len(b)): b[i] = b[i][0] return b # takes triangular matrix T, vector y and solves for x in Tx = y def backsub(T,y): y = list_to_int(y) dim = len(T) print T[dim-1][dim-1] x = [] for i in range(0,dim): x.append(0) x[dim-1] = y[dim-1]/float(T[dim-1][dim-1]) rows = reversed(range(0,dim-1)) for i in rows: x[i] = float(y[i]) for j in range(i+1,dim): x[i] -= T[i][j]*x[j] x[i] = x[i]/T[i][i] return x print backsub(T,b)
Disconnect the iap sensor (2) from the vacuum hose. Disconnect the stva lead wire coupler (3). Disconnect secondary fuel injector couplers (4) and primary fuel injector couplers (5). Disconnect the isc valve coupler (8). Place a rag under the fuel feed hose (9) and disconnect the fuel feed hose from the fuel pump. models, drain fuel from the fuel tank before disconnecting the fuel feed hose to prevent fuel leakage. Loosen the throttle body clamp screws, left and right. Lift up the throttle body from the intake pipes. Disconnect the stp sensor coupler (10) and tp sensor coupler (11). after disconnecting the throttle cables, do not snap the throttle valve from full open to full close. It may cause damage to the throttle valve and throttle body. connect the primary injector couplers (1) and secondary injector couplers (2) to the respective fuel injectors. Make sure that each coupler is installed in the correct position. The color on each lead wire refers to the appropriate fuel injector. Connect the throttle cable no. 1 (3) And throttle cable no. 2 (4) To the throttle cable drum. Loosen each throttle cable lock-nut. Turn in each throttle cable adjuster fully and locate each outer cable so that the clearance “a” is 0 – 1 mm (0 – 0.04 In). Adjust the throttle cable play. Refer to “throttle cable play inspection and adjustment” in section 0b . Reset the isc valve and tp sensor learned values. Refer to “isc valve reset” and “tp reset” .
#!/usr/bin/env python import sys, os import cPickle as pickle from datetime import datetime from tacc_stats import cfg as cfg from tacc_stats.pickler import batch_acct,job_stats def main(**args): acct = batch_acct.factory(cfg.batch_system, cfg.acct_path, cfg.host_name_ext) reader = acct.find_jobids(args['jobid']).next() date_dir = os.path.join(cfg.pickles_dir, datetime.fromtimestamp(reader['end_time']).strftime('%Y-%m-%d')) pickle_file = os.path.join(date_dir, reader['id']) with open(pickle_file) as fd: data = pickle.load(fd) print "Hosts:", data.hosts.keys() if not args['host']: pass elif args['host'] in data.hosts: data.hosts = { args['host'] : data.hosts[args['host']] } else: print args['host'],"does not exist in", args['file'] return for host_name, host in data.hosts.iteritems(): print "Host:",host_name print "Types:",host.stats.keys() print host.marks if not args['type']: pass elif args['type'] in host.stats: host.stats = { args['type'] : host.stats[args['type']] } else: print args['type'],"does not exist in", args['file'] return for type_name, type_device in host.stats.iteritems(): print '' print "Type:", type_name print "Schema:", data.get_schema(type_name).keys() for device_name, device in type_device.iteritems(): print "Device:",device_name print device if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(description='Print job using Job ID or pickle file path') parser.add_argument('file', help='Pickle file to print', nargs='?', type=str) parser.add_argument('-jobid', help='Job ID to print', nargs='+', type=str) parser.add_argument('-type', help='Restrict print to this type') parser.add_argument('-host', help='Restrict print to this host') main(**vars(parser.parse_args()))
HRSA is soliciting applications for their “Telehealth Network Grant Program” (TNGP) to help telehealth programs and networks improve access to quality healthcare services in rural, frontier, and underserved communities. HRSA is looking for grant responses with innovative applications that meet new and emerging needs in a changing healthcare delivery system focusing on value and improved healthcare outcomes. Even though grant activities must serve rural communities, the grantee may be located in either urban or rural areas. In addition, all applicants are required to identify the areas of telehealth that will be the focus of the project. TNGPs may provide services in any variety of settings including long-term care facilities, community health centers or clinics, physician offices, hospitals, schools, and assisted living facilities to demonstrate how telehealth networks can meet the goals of the program. Applicants need to have a successful track record in implementing telehealth technology and have a network of partners in place committed to the project. Projects selected for funding must provide clinical services where performance measures can be developed. HRSA strongly recommends emphasizing clinical services that focus on one or more chronic disease states of high priority such as CHF, cancer, strokes, chronic respiratory disease and/or diabetes. Grantees will be required to participate in the Office for the Advancement of Telehealth’s data collection and evaluation efforts as a condition for accepting TNGP funding. The data collected must include six month progress reports, annual reports, information for the grantee directory, and a final grant project report. The grant notice was posted on December 13, 2012. Estimated amount of funding for this competition (HRSA 13-166) will be $2,250,000 with nine awards estimated. The average size of the awards is estimated to be $250,000. The application deadline is February 13, 2013 with the anticipated award date to be September 1, 2013. For more information, go to www.grants.gov.
from django.test import SimpleTestCase from django.conf import settings from stats.models import BlockedCountry from stats.packages.blocked_country import BlockedCountryPackageBuilder import time class TestBlockedCountryPackageBuilder(SimpleTestCase): def setUp(self): self.builder = BlockedCountryPackageBuilder() if not BlockedCountry.exists(): BlockedCountry.create_table() time.sleep(settings.TESTING_SLEEP_TIME) self.item1 = BlockedCountry(country_code='US', country_name='United States', count=22) self.item1.save() self.item2 = BlockedCountry(country_code='TH', country_name='Thailand', count=3000) self.item2.save() self.item3 = BlockedCountry(country_code='SG', country_name='Singapore', count=12094) self.item3.save() self.item4 = BlockedCountry(country_code='AL', country_name='Albania', count=3) self.item4.save() self.item5 = BlockedCountry(country_code='MA', country_name='Morocco', count=34123) self.item5.save() self.item6 = BlockedCountry(country_code='PE', country_name='Peru', count=50) self.item6.save() def tearDown(self): BlockedCountry.delete_table() time.sleep(settings.TESTING_SLEEP_TIME) def test_get_top_5(self): objects = self.builder.get_top_5_objects() self.assertEqual(len(objects), 5) self.assertEqual(objects[0].count, 34123) self.assertEqual(objects[1].count, 12094) self.assertEqual(objects[2].count, 3000) self.assertEqual(objects[3].count, 50) self.assertEqual(objects[4].count, 22) def test_render_each_object(self): content = self.builder.render_each_object(self.item5) self.assertIn('{', content) self.assertIn('"country_name": "Morocco"', content) self.assertIn('"count": "34,123"', content) self.assertIn('}', content) def test_render_all_objects(self): content = self.builder.render_all_objects() self.assertIn('{', content) self.assertIn('"country_name": "Morocco"', content) self.assertIn('"count": "34,123"', content) self.assertIn('}', content) self.assertNotEqual(',', content[-1]) def test_render_as_javascript(self): content = self.builder.render_as_javascript() expected_content = u''']''' self.assertIn(expected_content, content)
ASPnix Live Chat Premium Support provides a highly personalized level of service for customers seeking technical help. Customers that do not select Premium Support will continue to have access to our Standard Support, offered at no additional charge, which includes the Knowledge Base and Community Forums. Live Premium Chat offers LIVE one on one professional support with one of our highly skilled technical support agents! Premium chats take precedence over standard chats and support tickets so that your issue is addressed and solved as quickly as possible! Remote assistance is included with all premium support packages including Live Premium Chat.
# -*- coding: utf-8 -*- # Generated by Django 1.9.5 on 2016-04-26 12:51 from __future__ import unicode_literals from django.conf import settings from django.core.management import call_command from django.db import migrations, models import django.db.models.deletion def load_dept_structure(apps, schema_editor): call_command('loaddata', 'cuedmembers/divisions_and_research_groups.json') class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Division', fields=[ ('letter', models.CharField(max_length=1, primary_key=True, serialize=False)), ('name', models.CharField(max_length=256)), ], ), migrations.CreateModel( name='Member', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('first_names', models.CharField(blank=True, default='', max_length=100)), ('is_active', models.BooleanField(default=True)), ], ), migrations.CreateModel( name='ResearchGroup', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=256)), ('division', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='research_groups', to='cuedmembers.Division')), ], ), migrations.AddField( model_name='member', name='research_group', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='members', to='cuedmembers.ResearchGroup'), ), migrations.AddField( model_name='member', name='user', field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='cued_member', to=settings.AUTH_USER_MODEL), ), migrations.RunPython(load_dept_structure), ]
How can I find strength to work through my daily anxieties? When I look back over the high & lows of my life, I sometimes wondered why God kept the darkness. When the pressure of everyday life leads to sleepless nights and the darkening of the soul, the darkness doesn’t help. Sometimes we get so caught up in the anxiety of the day that we feel there is no light for us. Then I reflect on the trials and tribulations that Jesus warns us about in today’s gospel. He calls us to be ready and prepare for our redemption through the Light of Christ. I worry and struggle many times to find ways “to be prepared.” However, when you read carefully this gospel shows us how to be vigilant. It reminds us that God has given us Christ’s light even in the darkness. When we find ourselves in the darkness the light can come to us from the love of family and friends who want to help. We can feel light and love at prayerful times with the Lord privately or at church with the faith community. We need to choose to make room for God and others to enter our life and give us strength. Being Christ’s light for others can help us be prepared too. Especially during this Advent season as we welcome those who aren’t at church regularly. We can share Christ’s light and compassion by greeting new faces and smiling at stressed families with young children. Sometimes the view can be better in the middle of the seat row instead of the end and it just takes a moment to leave parking spaces close to the church to welcome others. It surprising how Christ’s light and compassion can fill you when you reach out and help others.
# Copyright 2012 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from gslib.help_provider import HELP_NAME from gslib.help_provider import HELP_NAME_ALIASES from gslib.help_provider import HELP_ONE_LINE_SUMMARY from gslib.help_provider import HelpProvider from gslib.help_provider import HELP_TEXT from gslib.help_provider import HelpType from gslib.help_provider import HELP_TYPE _detailed_help_text = (""" <B>OVERVIEW OF METADATA</B> Objects can have associated metadata, which control aspects of how GET requests are handled, including Content-Type, Cache-Control, Content-Disposition, and Content-Encoding (discussed in more detail in the subsections below). In addition, you can set custom metadata that can be used by applications (e.g., tagging that particular objects possess some property). There are two ways to set metadata on objects: - at upload time you can specify one or more headers to associate with objects, using the gsutil -h option. For example, the following command would cause gsutil to set the Content-Type and Cache-Control for each of the files being uploaded: gsutil -h "Content-Type:text/html" -h "Cache-Control:public, max-age=3600" cp -r images gs://bucket/images Note that -h is an option on the gsutil command, not the cp sub-command. - You can set or remove metadata fields from already uploaded objects using the gsutil setmeta command. See "gsutil help setmeta". More details about specific pieces of metadata are discussed below. <B>CONTENT TYPE</B> The most commonly set metadata is Content-Type (also known as MIME type), which allows browsers to render the object properly. gsutil sets the Content-Type automatically at upload time, based on each filename extension. For example, uploading files with names ending in .txt will set Content-Type to text/plain. If you're running gsutil on Linux or MacOS and would prefer to have content type set based on naming plus content examination, see the use_magicfile configuration variable in the gsutil/boto configuration file (See also "gsutil help config"). In general, using use_magicfile is more robust and configurable, but is not available on Windows. If you specify a -h header when uploading content (like the example gsutil command given in the previous section), it overrides the Content-Type that would have been set based on filename extension or content. This can be useful if the Content-Type detection algorithm doesn't work as desired for some of your files. You can also completely suppress content type detection in gsutil, by specifying an empty string on the Content-Type header: gsutil -h 'Content-Type:' cp -r images gs://bucket/images In this case, the Google Cloud Storage service will attempt to detect the content type. In general this approach will work better than using filename extension-based content detection in gsutil, because the list of filename extensions is kept more current in the server-side content detection system than in the Python library upon which gsutil content type detection depends. (For example, at the time of writing this, the filename extension ".webp" was recognized by the server-side content detection system, but not by gsutil.) <B>CACHE-CONTROL</B> Another commonly set piece of metadata is Cache-Control, which allows you to control whether and for how long browser and Internet caches are allowed to cache your objects. Cache-Control only applies to objects with a public-read ACL. Non-public data are not cacheable. Here's an example of uploading an object set to allow caching: gsutil -h "Cache-Control:public,max-age=3600" cp -a public-read -r html gs://bucket/html This command would upload all files in the html directory (and subdirectories) and make them publicly readable and cacheable, with cache expiration of one hour. Note that if you allow caching, at download time you may see older versions of objects after uploading a newer replacement object. Note also that because objects can be cached at various places on the Internet there is no way to force a cached object to expire globally (unlike the way you can force your browser to refresh its cache). <B>CONTENT-ENCODING</B> You could specify Content-Encoding to indicate that an object is compressed, using a command like: gsutil -h "Content-Encoding:gzip" cp *.gz gs://bucket/compressed Note that Google Cloud Storage does not compress or decompress objects. If you use this header to specify a compression type or compression algorithm (for example, deflate), Google Cloud Storage preserves the header but does not compress or decompress the object. Instead, you need to ensure that the files have been compressed using the specified Content-Encoding before using gsutil to upload them. For compressible content, using Content-Encoding:gzip saves network and storage costs, and improves content serving performance (since most browsers are able to decompress objects served this way). Note also that gsutil provides an easy way to cause content to be compressed and stored with Content-Encoding:gzip: see the -z option in "gsutil help cp". <B>CONTENT-DISPOSITION</B> You can set Content-Disposition on your objects, to specify presentation information about the data being transmitted. Here's an example: gsutil -h 'Content-Disposition:attachment; filename=filename.ext' \\ cp -r attachments gs://bucket/attachments Setting the Content-Disposition allows you to control presentation style of the content, for example determining whether an attachment should be automatically displayed vs should require some form of action from the user to open it. See http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 for more details about the meaning of Content-Disposition. <B>CUSTOM METADATA</B> You can add your own custom metadata (e.g,. for use by your application) to an object by setting a header that starts with "x-goog-meta", for example: gsutil -h x-goog-meta-reviewer:jane cp mycode.java gs://bucket/reviews You can add multiple differently named custom metadata fields to each object. <B>SETTABLE FIELDS; FIELD VALUES</B> You can't set some metadata fields, such as ETag and Content-Length. The fields you can set are: - Cache-Control - Content-Disposition - Content-Encoding - Content-Language - Content-MD5 - Content-Type - Any field starting with X-GOOG-META- (i.e., custom metadata). Header names are case-insensitive. X-GOOG-META- fields can have data set to arbitrary Unicode values. All other fields must have ASCII values. <B>VIEWING CURRENTLY SET METADATA</B> You can see what metadata is currently set on an object by using: gsutil ls -L gs://the_bucket/the_object """) class CommandOptions(HelpProvider): """Additional help about object metadata.""" help_spec = { # Name of command or auxiliary help info for which this help applies. HELP_NAME : 'metadata', # List of help name aliases. HELP_NAME_ALIASES : ['cache-control', 'caching', 'content type', 'mime type', 'mime', 'type'], # Type of help: HELP_TYPE : HelpType.ADDITIONAL_HELP, # One line summary of this help. HELP_ONE_LINE_SUMMARY : 'Working with object metadata', # The full help text. HELP_TEXT : _detailed_help_text, }
So what's basically happened is, I follow the download link on the blag website and am met with a standard log in dialogue to an FTP server hosted by the FSF. I don't have an account with FSF. Is there an alternate mirror, or is there a Guest account on that server that I can use? I really think that this OS is great! You remove claws and replace it by sylpheed, add emacs and you'll have something like the blag 160k I'll like to spin. I tried to spin new ISOs but something went wrong at the end of the process. Didn't find what and didn't find help. So for now I'm blocked on that. PS: I built new ks to spin it but I missed them when there was a crash on justhost servers where zenblagger is hosted.
import pygame class Text(): def __init__(self, pos, text = "", textSize = 12, textColor=(255,255,255), font = None): self.text = text self.textColor = textColor self.font = pygame.font.Font(font, textSize) self.image = self.font.render(self.text, 1, textColor) self.rect = self.image.get_rect() self.place(pos) def place(self, pos): self.rect.center = pos def setText(self, text): self.text = text self.image = self.font.render(text, 1, textColor) self.rect = self.image.get_rect(center = self.rect.center) def update(self, width, height): pass class Score(Text): def __init__(self, pos, baseText = "Score: ", textSize = 12, textColor=(255,255,255), font = None): self.score = 0 self.baseText = baseText self.text = self.baseText + str(self.score) Text.__init__(self, pos, self.text, textSize, textColor, font) self.change = False def setText(self, text): self.baseText = text self.change = True def update(self): if self.change: self.text = self.baseText + str(self.score) self.image = self.font.render(self.text, 1, self.textColor) self.rect = self.image.get_rect(center = self.rect.center) self.change = False def setScore(self, score): self.score = score self.change = True def increaseScore(self, amount = 1): self.score += amount self.change = True def resetScore(self): self.score = 0 self.change = True
YELLOW- Allow at least 5-10 business days or more from the day your order was submitted. GREEN- Allow 1-5 business days or more from the day your order was submitted. **RED during the months APRIL and MAY means that you should allow 3-4 weeks for orders, or we CANNOT guarantee your order will be finished by your deadline. To find Acryllics, Certificates, Medals, Plaques, Resins, Ribbons & Pins, or Trophies hover your pointer over the AWARDS tab and select a category from the menu. Did you know Award Solutions co-owner, Sabrina Fish, is also a published author? If you like Fantasy Romance or YA Fantasy, and the covers above look interesting to you, click this link to check them out. Don't forget to leave a review on Amazon, Goodreades, Barnes & Noble, and iBooks, etc if you like what you read!!! Award Solutions of Oklahoma, LLC is a family owned and operated business located in the South Oklahoma City / West Moore area. We are committed to providing our customers with quality service and a friendly face. Browse our site for a few examples of the products we carry and the services we provide. All orders must be submitted typed either in the body of an e-mail OR into a word doc/excel spreadsheet and attached to an e-mail. PLEASE NO GOOGLE DOC SHARING or other DOC sharing service. Just attach to the e-mail. PO # MUST be in the body of the email with your order. We do not need the actual PO document. Event date must be included in the email. Please limit engraving on medals. Set up charges may apply if a large quantity of awards need individual names. Please select ONE ribbon color for your entire order for medals. Allowing each teacher to select their favorite ribbon color slows down production. Check spelling of names and text carefully. As always, we look forward to another successful end-of-school season. Did you know Award Solutions co-owner, Sabrina Fish, is a published author? If you love Fantasy Romance, please check out her Award-Winning series, The Gate Keeper Chronicles. Trophy above donated to the Oklahoma City Writers, Inc. for their annual contest. Medals and Plaques above donated to Edmond Memorial Inaugural Hog Job benefiting Limbs for Life.
class Event(object): _event_groups = {} @classmethod def get_events(cls, key="Default"): return cls._event_groups.get(key, []) def __init__(self, name, msg=None, group_key="Default"): self._name = name self._msg = msg self._my_group = group_key groups = Event._event_groups groups[group_key] = groups.get(group_key, []) + [self,] return def localize(self, msg): return Event(self.name, msg, self._my_group) @property def name(self): return self._name @property def msg(self): return self._msg def __hash__(self): return hash(self._name) def __ne__(self): return not(self == other) def __str__(self): return self._name def __repr__(self): return self._name def __eq__(self, other): return self._name == other._name if __name__ == "__main__": grp = "a group" a = Event("a", "a msg", grp) b = Event("b", "b msg") c = Event("c", "c msg", grp) print(c.__dict__)
It makes a point in a very delicate way. Bo & Mei deals with intimate issues about racial tolerance, gender identification and familial love. It is about a second generation Chinese immigrant family living in the uk. The father, Long-Wei is an owner of a dry cleaners and experiences racial prejudice from a customer. Long-Wei however is not without his own prejudices, he finds it difficult to accept his son Bo who wants to wear female clothing. With the love and encouragement of his younger sister Mei, Bo finds the strength to stand up for the things that make him different by defending his culture to the bigoted customer. He shows his father that strength comes from within regardless of what we are on the outside.
import requests import logging import urlparse from django.conf import settings root = urlparse.urljoin(settings.RESTMOTE_HOST + ":" + settings.RESTMOTE_PORT, settings.RESTMOTE_API_ROOT) def get_data(url): if hasattr(settings, "RESTMOTE_USER") and hasattr(settings, "RESTMOTE_PASSWORD"): r = requests.get(url, timeout=15, auth=(settings.RESTMOTE_USER, settings.RESTMOTE_PASSWORD)) else: r = requests.get(url, timeout=15) if r.status_code == 200: logging.info(url) logging.info(r.json()) return True, r.json() else: logging.info("Connection failed: %s" % r.text) return False, [] def build_objects(obj_class, obj_string, data, field_bindings, nested=[]): for e in data: try: o = obj_class.objects.get(**{'id' + obj_string: e["id"]}) except obj_class.DoesNotExist: o = obj_class() for f in [x for x in e if x in field_bindings]: setattr(o, field_bindings[f], e[f]) for n in nested: for f in [x for x in e[n] if x in field_bindings]: setattr(o, field_bindings[f], e[n][f]) setattr(o, "id" + obj_string, e["id"]) o.save() logging.info("Added %s: %s" % (obj_string, o.pk)) def sync_objects(url, qfilter, obj_class, obj_string, field_bindings, nested=[]): status, data = get_data(root + url + '?' + qfilter) if status: build_objects(obj_class, obj_string, data, field_bindings, nested) return 1 else: return 0 def remove_objects(url, obj_class, obj_string): status, remote_ids = get_data(root + url) if status: local_ids = obj_class.objects.values_list('id' + obj_string, flat=True) must_remove = list(set(local_ids).difference(remote_ids)) obj_class.objects.filter(**{'id' + obj_string + '__in': must_remove}).delete() if must_remove: logging.info("Deleted %s: %s" % (obj_string, ', '.join(str(x) for x in must_remove))) return 1 else: return 0
Year and years ago now, there was a project called The Broken Music Box. The project was largely manned by Phil Indrick, and supported by the Uncondemning Monk, as we would work to build various beats and textures. The original project is somewhat lost, and amongst what is lost is a soundtrack to the movie Nosferatu and another for Metropolis. IF we can ever get a hold of Phil and get him to finally launch either of those projects, as they were intended as live soundtracks played to these two films, shown independently of each other of course, or at least serially. But what we do have to offer is a take of the Broken Music Box Project, called A Broken Music Box: 23 Letters Collection (missing letters), exhibiting the potential of what was and may still be, if lines of transmission can reach out to Phil, lest we start another broken music box machine. Very soon, today-tomorrow soon, we, Gothick and Thee Uncondemning Monk (Thee UM), as well as guest collaborator Obietk 172, will be putting out a split lp, the middle songs of the slip will be the collaborative work, leading from Gothick to Thee UM. So, we are working hard to get this ready for you all. Please stay tuned on our Facebook site, to get the LP as soon as it becomes available. More work is underway, and we will be updating you all very soon. For those interested in understanding more fully the personae(s) of Thee Uncondemning Monk, please see our Facebook Break Down/Identity Math.
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import posixpath from django.db import models from django.contrib.auth.models import User from django.contrib.contenttypes import generic from django.core.urlresolvers import reverse from django.utils.translation import ugettext as _, ugettext_lazy as _t from desktop.lib.exceptions_renderable import PopupException from desktop.models import Document as Doc, SAMPLE_USER_ID from hadoop.fs.hadoopfs import Hdfs class Document(models.Model): owner = models.ForeignKey(User, db_index=True, verbose_name=_t('Owner'), help_text=_t('User who can modify the job.')) is_design = models.BooleanField(default=True, db_index=True, verbose_name=_t('Is a user document, not a document submission.'), help_text=_t('If the document is not a submitted job but a real query, script, workflow.')) def is_editable(self, user): # Deprecated return user.is_superuser or self.owner == user def can_edit_or_exception(self, user, exception_class=PopupException): # Deprecated if self.is_editable(user): return True else: raise exception_class(_('Only superusers and %s are allowed to modify this document.') % user) class PigScript(Document): _ATTRIBUTES = ['script', 'name', 'properties', 'job_id', 'parameters', 'resources', 'hadoopProperties'] data = models.TextField(default=json.dumps({ 'script': '', 'name': '', 'properties': [], 'job_id': None, 'parameters': [], 'resources': [], 'hadoopProperties': [] })) doc = generic.GenericRelation(Doc, related_name='pig_doc') def update_from_dict(self, attrs): data_dict = self.dict for attr in PigScript._ATTRIBUTES: if attrs.get(attr) is not None: data_dict[attr] = attrs[attr] if 'name' in attrs: self.doc.update(name=attrs['name']) self.data = json.dumps(data_dict) @property def dict(self): return json.loads(self.data) def get_absolute_url(self): return reverse('pig:index') + '#edit/%s' % self.id @property def use_hcatalog(self): script = self.dict['script'] return ('org.apache.hcatalog.pig.HCatStorer' in script or 'org.apache.hcatalog.pig.HCatLoader' in script) or \ ('org.apache.hive.hcatalog.pig.HCatLoader' in script or 'org.apache.hive.hcatalog.pig.HCatStorer' in script) # New classes @property def use_hbase(self): script = self.dict['script'] return 'org.apache.pig.backend.hadoop.hbase.HBaseStorage' in script def create_or_update_script(id, name, script, user, parameters, resources, hadoopProperties, is_design=True): try: pig_script = PigScript.objects.get(id=id) if id == str(SAMPLE_USER_ID): # Special case for the Example, just create an history is_design = False raise PigScript.DoesNotExist() pig_script.doc.get().can_write_or_exception(user) except PigScript.DoesNotExist: pig_script = PigScript.objects.create(owner=user, is_design=is_design) Doc.objects.link(pig_script, owner=pig_script.owner, name=name) if not is_design: pig_script.doc.get().add_to_history() # A user decided eventually to save an unsaved script after execution: if is_design and pig_script.doc.get().is_historic(): pig_script.doc.get().remove_from_history() pig_script.update_from_dict({ 'name': name, 'script': script, 'parameters': parameters, 'resources': resources, 'hadoopProperties': hadoopProperties }) return pig_script def get_scripts(user, is_design=None): scripts = [] data = Doc.objects.available(PigScript, user) if is_design is not None: data = [job for job in data if job.is_design] for script in data: data = script.dict massaged_script = { 'id': script.id, 'docId': script.doc.get().id, 'name': data['name'], 'script': data['script'], 'parameters': data['parameters'], 'resources': data['resources'], 'hadoopProperties': data.get('hadoopProperties', []), 'isDesign': script.is_design, 'can_write': script.doc.get().can_write(user) } scripts.append(massaged_script) return scripts def get_workflow_output(oozie_workflow, fs): # TODO: guess from the Input(s):/Output(s) output = None if 'workflowRoot' in oozie_workflow.conf_dict: output = oozie_workflow.conf_dict.get('workflowRoot') if output and not fs.exists(output): output = None return output def hdfs_link(url): if url: path = Hdfs.urlsplit(url)[2] if path: if path.startswith(posixpath.sep): return "/filebrowser/view=" + path else: return "/filebrowser/home_relative_view=/" + path else: return url else: return url
Personalized chopsticks are a fun and inexpensive way to give your guests something useful and endearing. And because they'll take them home they will remember the occasion for years to come. There are a variety of colorful and fun chopstick designs that can be personalized with your message and fun graphics or logo. For weddings or other parties with important color patterns, tie a contrasting bow around each pair to match the table decor. Other unique treatments include sleeves, cards, or chopsticks rests. You will find the largest selection of personalized chopsticks at EverythingChopsticks.com. Another terrific personalized Asian gift idea is personalized chopstick sleeves. Rather than the small space available on chopsticks, a chopstick sleeve is a mini billboard waiting to be personalized with your message, photos, graphics and color. You can select from a multitude of colorful and fun chopsticks to include in the sleeves. A favorite for any party occasion, personalized chopstick sleeves are particularly well suited for business or corporate events and promotions as they allow for greater use of color and imagery such a logos. Find personalized chopstick sleeves at EverythingChopsticks.com. Another personalized chopstick holder is a personalized chopstick box. These have a much larger engraving space than chopsticks, and can be mated with a variety of beautiful chopstick styles. EverythingChopsticks.com has a few styles of personalized chopstick boxes. Several online stores offer personalized fortune cookies with messages inside. The personalized fortune cookies are totally fun and can be used for a variety of events. A couple online vendors are Personalized-Fortune-Cookies.com. Our personalized Asian gift ideas are perfect for any Asian themed party, birthday, bar or bat mitzvah, wedding, and anniversary events. They're also excellent for corporate event, promotion or gift use as they provide a unique and memorable way to promote branding and awareness. Guests will delight at the creativity and fun of our Asian gift ideas. Share your personalized Asian gift ideas!
from Models import query_all_requests, organize_data_model from WhatApi import get_login, similar, GazelleAPIMod, \ get_requests_soup, parse_requests_page, match_two_sets, \ filter_torrent_alphabetically u_name, pw = get_login() # user = UserSession(user_name=u_name, password=pw) def update_album_requests(): exists = False pages = list(range(1, 1000)) for page in pages: soup = get_requests_soup(page=page) parsed_soup = parse_requests_page(soup) exists = organize_data_model(parsed_soup) if exists: return page def find_matches(): """ Finds matches between existing Album Requests and existing torrents on What.cd Takes into account Artist Name Album Name Acceptable Formats (FLAC, FLAC 24, MP3) Acceptable Bitrates (Lossless, Lossy) Acceptable Media (WEB, CD) """ matches = {} what_object = GazelleAPIMod(username=u_name, password=pw) # Query all of our requests from our stored database all_requests = [(x.id, x.name) for x in query_all_requests() if x.name.find('-') >= 0] for req_id, full_name in all_requests: name_part = full_name.split('-') artist = name_part[0].strip() album = name_part[1].strip() request_object = what_object.request_search_by_id(req_id) # Query API with artist name - returns all existing artist material artist_data = what_object.get_artist_json(artist) # torrentgroup keyword filters just torrents, removing metadata torrent_groups = artist_data.get('torrentgroup', []) # artist_id = artist_query['id'] filtered_groups = filter_torrent_alphabetically(torrent_groups, album) # Iterate over torrent groups for torrent_group in filtered_groups: torrent_group_album = torrent_group['groupName'] if similar(album, torrent_group_album, threshold=0.8): matches[request_object] = [torrent_group] print(matches) # bitrate = set(request_object.acceptable_bitrates) _format = set(request_object.acceptable_formats) media = set(request_object.acceptable_media) # Iterate individual torrents for tor in torrent_group['torrent']: tor_format = tor['format'] tor_media = tor['media'] # tor_bitrate = tor['encoding'] tor_id = tor['id'] format_match = match_two_sets(set(tor_format), _format) media_match = match_two_sets(media, set(tor_media)) if format_match and media_match: package = (req_id, tor_id) with open('matches.txt', 'a+') as f: f.write("Request Id: {}\nTorrent Id: {}\n\n" .format(package[0], package[1])) return matches if __name__ == '__main__': # find_matches() update_album_requests()
Extremely rare find . Side by side duplex in lovely Louisville . Close to jobs , schools and shopping . Features recent roof replacement , partially finished and separated basements , separate utils . Units have been well maintained . One of the units appears to have a bonus room off garage which may be flex space potential 3rd bedroom , study etc ... Excellent opportunity . Tenant occupied .
# -*- coding: utf-8 -*- # # gPrime - A web-based genealogy program # # Copyright (C) 2004-2006 Donald N. Allingham # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # """ Bulgarian-specific classes for parsing and displaying dates. """ #------------------------------------------------------------------------- # # Python modules # #------------------------------------------------------------------------- import re #------------------------------------------------------------------------- # # Gprime modules # #------------------------------------------------------------------------- from ..lib.date import Date from ._dateparser import DateParser from ._datedisplay import DateDisplay from ._datehandler import register_datehandler #------------------------------------------------------------------------- # # Bulgarian parser # #------------------------------------------------------------------------- class DateParserBG(DateParser): modifier_to_int = { 'преди' : Date.MOD_BEFORE, 'пр.' : Date.MOD_BEFORE, 'пр' : Date.MOD_BEFORE, 'след' : Date.MOD_AFTER, 'сл.' : Date.MOD_AFTER, 'сл' : Date.MOD_AFTER, 'ок' : Date.MOD_ABOUT, 'ок.' : Date.MOD_ABOUT, 'около' : Date.MOD_ABOUT, 'примерно' : Date.MOD_ABOUT, 'прим' : Date.MOD_ABOUT, 'прим.' : Date.MOD_ABOUT, 'приблизително' : Date.MOD_ABOUT, 'приб.' : Date.MOD_ABOUT, 'прибл.' : Date.MOD_ABOUT, 'приб' : Date.MOD_ABOUT, 'прибл' : Date.MOD_ABOUT, } calendar_to_int = { 'григориански' : Date.CAL_GREGORIAN, 'г' : Date.CAL_GREGORIAN, 'юлиански' : Date.CAL_JULIAN, 'ю' : Date.CAL_JULIAN, 'еврейски' : Date.CAL_HEBREW, 'е' : Date.CAL_HEBREW, 'ислямски' : Date.CAL_ISLAMIC, 'и' : Date.CAL_ISLAMIC, 'френски републикански' : Date.CAL_FRENCH, 'републикански' : Date.CAL_FRENCH, 'фр.реп.' : Date.CAL_FRENCH, 'р' : Date.CAL_FRENCH, 'френски' : Date.CAL_FRENCH, 'фр.' : Date.CAL_FRENCH, 'персийски' : Date.CAL_PERSIAN, 'п' : Date.CAL_PERSIAN, } quality_to_int = { 'приблизително' : Date.QUAL_ESTIMATED, 'прибл.' : Date.QUAL_ESTIMATED, 'изчислено' : Date.QUAL_CALCULATED, 'изчисл.' : Date.QUAL_CALCULATED, 'изч.' : Date.QUAL_CALCULATED, } hebrew_to_int = { "тишрей":1, "мархешван":2, "кислев":3, "тевет":4, "шват":5, "адар":6, "адар бет":7, "нисан":8, "ияр":9, "сиван":10, "тамуз":11, "ав":12, "eлул":13, } islamic_to_int = { "мухаррам":1, "саффар":2, "рабиу-л-ауал":3, "рабиу-с-сани":4, "джумадал-уля":5, "джумада-с-сания":6, "раджаб":7, "шаабан":8, "рамадан":9, "шауал":10, "зу-л-кида":11, "зул-л-хиджа":12, } persian_to_int = { "фарвардин":1, "урдбихищ":2, "хурдад":3, "тир":4, "мурдад":5, "шахривар":6, "михр":7, "абан":8, "азар":9, "дай":10, "бахман":11, "исфаидармуз":12, } french_to_int = { "вандемер":1, "брюмер":2, "фример":3, "нивоз":4, "плювиоз":5, "вантоз":6, "жерминал":7, "флореал":8, "прериал":9, "месидор":10, "термидор":11, "фрюктидор":12, "допълнителен":13, } bce = [ 'преди Христа', 'пр. Хр.', 'пр.Хр.' ] + DateParser.bce def init_strings(self): DateParser.init_strings(self) _span_1 = ['от'] _span_2 = ['до'] _range_1 = ['между'] _range_2 = ['и'] self._span = re.compile("(%s)\s+(?P<start>.+)\s+(%s)\s+(?P<stop>.+)" % ('|'.join(_span_1), '|'.join(_span_2)), re.IGNORECASE) self._range = re.compile("(%s)\s+(?P<start>.+)\s+(%s)\s+(?P<stop>.+)" % ('|'.join(_range_1), '|'.join(_range_2)), re.IGNORECASE) #------------------------------------------------------------------------- # # Bulgarian displayer # #------------------------------------------------------------------------- class DateDisplayBG(DateDisplay): """ Bulgarian language date display class. """ long_months = ( "", "януари", "февруари", "март", "април", "май", "юни", "юли", "август", "септември", "октомври", "ноември", "декември" ) short_months = ( "", "яну", "февр", "март", "апр", "май", "юни", "юли", "авг", "септ", "окт", "ное", "дек" ) calendar = ( "", "юлиански", "еврейски", "републикански", "персийски", "ислямски", "шведски" ) _mod_str = ("", "преди ", "след ", "около ", "", "", "") _qual_str = ("", "приблизително ", "изчислено ") _bce_str = "%s пр. Хр." formats = ( "ГГГГ-ММ-ДД (ISO)", "Числов", "Месец Ден, Година", "Мес. Ден, Година", "Ден Месец Година", "Ден Мес. Година" ) # this must agree with DateDisplayEn's "formats" definition # (since no locale-specific _display_gregorian exists, here) hebrew = ( "", "Тишрей", "Мархешван", "Кислев", "Тевет", "Шват", "Адар", "Адар бет", "Нисан", "Ияр", "Сиван", "Тамуз", "Ав", "Елул", ) islamic = ( "", "Мухаррам", "Саффар", "Рабиу-л-ауал", "Рабиу-с-сани", "Джумадал-уля", "Джумада-с-сания", "Раджаб", "Шаабан", "Рамадан", "Шауал", "Зу-л-кида", "Зул-л-хиджа", ) persian = ( "", "Фарвардин", "Урдбихищ", "Хурдад", "Тир", "Мурдад", "Шахривар", "Михр", "Абан", "Азар", "Дай", "Бахман", "Исфаидармуз", ) french = ( "", "Вандемер", "Брюмер", "Фример", "Нивоз", "Плювиоз", "Вантоз", "Жерминал", "Флореал", "Прериал", "Мессидор", "Термидор", "Фрюктидор", "Допълнителен" ) def display(self, date): """ Returns a text string representing the date. """ mod = date.get_modifier() cal = date.get_calendar() qual = date.get_quality() start = date.get_start_date() newyear = date.get_new_year() qual_str = self._qual_str[qual] if mod == Date.MOD_TEXTONLY: return date.get_text() elif start == Date.EMPTY: return "" elif mod == Date.MOD_SPAN: d1 = self.display_cal[cal](start) d2 = self.display_cal[cal](date.get_stop_date()) scal = self.format_extras(cal, newyear) return "%s%s %s %s %s%s" % (qual_str, 'от', d1, 'до', d2, scal) elif mod == Date.MOD_RANGE: d1 = self.display_cal[cal](start) d2 = self.display_cal[cal](date.get_stop_date()) scal = self.format_extras(cal, newyear) return "%s%s %s %s %s%s" % (qual_str, 'между', d1, 'и', d2, scal) else: text = self.display_cal[date.get_calendar()](start) scal = self.format_extras(cal, newyear) return "%s%s%s%s" % (qual_str, self._mod_str[mod], text, scal) #------------------------------------------------------------------------- # # Register classes # #------------------------------------------------------------------------- register_datehandler(('bg_BG', 'bg', 'bulgarian', 'Bulgarian'), DateParserBG, DateDisplayBG)
Birmingham offers a huge pool of both talent and resources supported by world class infrastructure and connectivity. The city is an ideal location for professional services with rental costs being significantly cheaper than London, with up to 50% lower prime rental costs than the capital, plus unparalleled physical infrastructure and connectivity to and from the city. These key advantages have enabled Birmingham to emerge as a leading destination for financial and professional services. The financial and professional services hub in Greater Birmingham employs over 200,000 people across 23,000 companies. Birmingham is one of only four UK cities with over 10,000 banking professionals and is home to the city centre Enterprise zone, one of the biggest regeneration projects in the UK, comprising of 26 sites across the city and providing 700,000m2 of office space for a range of sectors, including Financial Services. Paradise Birmingham is one of the major schemes central to the Enterprise Zone. The completed scheme will create a high quality setting for office, civic, retail, leisure and hotel space providing major improvements to pedestrian access and greatly enhanced public areas. The Snowhill district of the city centre is a hub for business, professional and financial services activity. Over the next 20 years there will be opportunities for residential development in the area. The Curzon area also has a large potential for growth with the future arrival of th HS2 line. With 8 universities, two of have business schools in the global top 100, and 24 further education institutions within Greater Birmingham, a pool of professional talent is available to aid the sector. Additionally, collaboration between large firms such as Deutsche Bank, DTZ and KMPG with Birmingham Metropolitan College has seen the creation of the Greater Birmingham Professional Services Academy - fostering the professional talent of the city for future employment in the sector.
#!/usr/bin/env python """ np_tests.py ----------- Compare output validity and speedup of numpy-accelerated codes to reference pure python codes. TODO: Tidy this up """ import numpy as np import allantools.allantools_pure_python as alt import allantools.allantools as alp import time if __name__ == "__main__": print "Compare results and speed of pure-python allantools against numpy allantools" ####################### # MTIE_PHASE() ####################### print "\ntesting mtie_phase()" data = np.random.random(1000) taus = [1, 3, 5, 16, 128] rates = [1, 20, 10.7] strides = [1, 10, 7] for rate in rates: for stride in strides: #print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride) o_taus, o_dev, o_err, o_n = alt.mtie_phase_purepy(data, rate, taus) o_taus_, o_dev_, o_err_, o_n_ = alp.mtie_phase(data, rate, taus) assert np.allclose(o_taus, o_taus_) assert np.allclose(o_dev, o_dev_) assert np.allclose(o_err, o_err_) stride = 1 tau = 1280 rate = 2.1 data = np.random.random(100000) t1 = time.time() o_taus, o_dev, o_err, o_n = alt.mtie_phase_purepy(data, rate, taus) t2 = time.time() t3 = time.time() o_taus_, o_dev_, o_err_, o_n_ = alp.mtie_phase(data, rate, taus) t4 = time.time() #print (o_dev, o_dev_) assert np.allclose(o_taus, o_taus_) assert np.allclose(o_dev, o_dev_) assert np.allclose(o_err, o_err_) print "Original: %2.3fs" % (t2 - t1) print "New: %2.3fs" % (t4 - t3) print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3)) ####################### # MTIE() ####################### print "\ntesting mtie()" data = np.random.random(1000) taus = [1, 3, 5, 16, 128] rates = [1, 20, 10.7] strides = [1, 10, 7] for rate in rates: for stride in strides: #print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride) o_taus, o_dev, o_err, o_n = alt.mtie(data, rate, taus) o_taus_, o_dev_, o_err_, o_n_ = alp.mtie(data, rate, taus) assert np.allclose(o_taus, o_taus_) assert np.allclose(o_dev, o_dev_) assert np.allclose(o_err, o_err_) stride = 1 tau = 128000 rate = 2.1 data = np.random.random(10000) t1 = time.time() o_taus, o_dev, o_err, o_n = alt.mtie(data, rate, taus) t2 = time.time() t3 = time.time() o_taus_, o_dev_, o_err_, o_n_ = alp.mtie(data, rate, taus) t4 = time.time() assert np.allclose(o_taus, o_taus_) assert np.allclose(o_dev, o_dev_) assert np.allclose(o_err, o_err_) print "Original: %2.3fs" % (t2 - t1) print "New: %2.3fs" % (t4 - t3) print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3)) ####################### # THREE_CORNERED_HAT_PHASE() ####################### print "\ntesting three_cornered_hat_phase()" stride = 1 taus = [2, 4, 8, 16] rate = 2.1 pdata_ab = np.random.random(100000) pdata_bc = np.random.random(100000) pdata_ca = np.random.random(100000) t1 = time.time() function = alt.adev tau, dev_a = alt.three_cornered_hat_phase(pdata_ab, pdata_bc, pdata_ca, rate, taus, function) t2 = time.time() t3 = time.time() function = alp.adev tau_, dev_a_ = alp.three_cornered_hat_phase(pdata_ab, pdata_bc, pdata_ca, rate, taus, function) t4 = time.time() assert np.allclose(tau, tau_) assert np.allclose(dev_a, dev_a_) print "Original: %2.3fs" % (t2 - t1) print "New: %2.3fs" % (t4 - t3) print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3)) ####################### # TIERMS_PHASE() ####################### print "\ntesting tierms_phase()" data = np.random.random(1000) taus = [1, 3, 5, 16, 128] rates = [1, 20, 10.7] strides = [1, 10, 7] for rate in rates: for stride in strides: #print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride) o_taus, o_dev, o_err, o_n = alt.tierms_phase(data, rate, taus) o_taus_, o_dev_, o_err_, o_n_ = alp.tierms_phase(data, rate, taus) assert np.allclose(o_taus, o_taus_) assert np.allclose(o_dev, o_dev_) assert np.allclose(o_err, o_err_) stride = 1 tau = 16 rate = 2.1 data = np.random.random(100000) t1 = time.time() o_taus, o_dev, o_err, o_n = alt.tierms_phase(data, rate, taus) t2 = time.time() t3 = time.time() o_taus_, o_dev_, o_err_, o_n_ = alp.tierms_phase(data, rate, taus) t4 = time.time() assert np.allclose(o_taus, o_taus_) assert np.allclose(o_dev, o_dev_) assert np.allclose(o_err, o_err_) print "Original: %2.3fs" % (t2 - t1) print "New: %2.3fs" % (t4 - t3) print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3)) ####################### # TIERMS() ####################### print "\ntesting tierms()" data = np.random.random(1000) taus = [1, 3, 5, 16, 128] rates = [1, 20, 10.7] strides = [1, 10, 7] for rate in rates: for stride in strides: #print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride) o_taus, o_dev, o_err, o_n = alt.tierms(data, rate, taus) o_taus_, o_dev_, o_err_, o_n_ = alp.tierms(data, rate, taus) assert np.allclose(o_taus, o_taus_) assert np.allclose(o_dev, o_dev_) assert np.allclose(o_err, o_err_) stride = 1 tau = 16 rate = 2.1 data = np.random.random(100000) t1 = time.time() o_taus, o_dev, o_err, o_n = alt.tierms(data, rate, taus) t2 = time.time() t3 = time.time() o_taus_, o_dev_, o_err_, o_n_ = alp.tierms(data, rate, taus) t4 = time.time() assert np.allclose(o_taus, o_taus_) assert np.allclose(o_dev, o_dev_) assert np.allclose(o_err, o_err_) print "Original: %2.3fs" % (t2 - t1) print "New: %2.3fs" % (t4 - t3) print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3)) ####################### # TOTDEV_PHASE() ####################### print "\ntesting totdev_phase()" data = np.random.random(1000) taus = [1, 3, 5, 16, 128] rates = [1, 20, 10.7] strides = [1, 10, 7] for rate in rates: for stride in strides: #print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride) o_taus, o_dev, o_err, o_n = alt.totdev_phase(data, rate, taus) o_taus_, o_dev_, o_err_, o_n_ = alp.totdev_phase(data, rate, taus) assert np.allclose(o_taus, o_taus_) assert np.allclose(o_dev, o_dev_) assert np.allclose(o_err, o_err_) stride = 1 tau = 16 rate = 2.1 data = np.random.random(100000) t1 = time.time() o_taus, o_dev, o_err, o_n = alt.totdev_phase(data, rate, taus) t2 = time.time() t3 = time.time() o_taus_, o_dev_, o_err_, o_n_ = alp.totdev_phase(data, rate, taus) t4 = time.time() assert np.allclose(o_taus, o_taus_) assert np.allclose(o_dev, o_dev_) assert np.allclose(o_err, o_err_) print "Original: %2.3fs" % (t2 - t1) print "New: %2.3fs" % (t4 - t3) print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3)) ####################### # TOTDEV() ####################### print "\ntesting totdev()" data = np.random.random(1000) taus = [1, 3, 5, 16, 128] rates = [1, 20, 10.7] strides = [1, 10, 7] for rate in rates: for stride in strides: #print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride) o_taus, o_dev, o_err, o_n = alt.totdev(data, rate, taus) o_taus_, o_dev_, o_err_, o_n_ = alp.totdev(data, rate, taus) assert np.allclose(o_taus, o_taus_) assert np.allclose(o_dev, o_dev_) assert np.allclose(o_err, o_err_) stride = 1 tau = 16 rate = 2.1 data = np.random.random(100000) t1 = time.time() o_taus, o_dev, o_err, o_n = alt.totdev(data, rate, taus) t2 = time.time() t3 = time.time() o_taus_, o_dev_, o_err_, o_n_ = alp.totdev(data, rate, taus) t4 = time.time() assert np.allclose(o_taus, o_taus_) assert np.allclose(o_dev, o_dev_) assert np.allclose(o_err, o_err_) print "Original: %2.3fs" % (t2 - t1) print "New: %2.3fs" % (t4 - t3) print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3)) ####################### # OHDEV() ####################### print "\ntesting ohdev()" data = np.random.random(10000) taus = [1, 3, 5, 16, 128] rates = [1, 20, 10.7] strides = [1, 10, 7] for rate in rates: for stride in strides: #print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride) o_taus, o_dev, o_err, o_n = alt.ohdev(data, rate, taus) o_taus_, o_dev_, o_err_, o_n_ = alp.ohdev(data, rate, taus) assert np.allclose(o_taus, o_taus_) assert np.allclose(o_dev, o_dev_) assert np.allclose(o_err, o_err_) stride = 1 tau = 16 rate = 2.1 data = np.random.random(100000) t1 = time.time() o_taus, o_dev, o_err, o_n = alt.ohdev(data, rate, taus) t2 = time.time() t3 = time.time() o_taus_, o_dev_, o_err_, o_n_ = alp.ohdev(data, rate, taus) t4 = time.time() assert np.allclose(o_taus, o_taus_) assert np.allclose(o_dev, o_dev_) assert np.allclose(o_err, o_err_) print "Original: %2.3fs" % (t2 - t1) print "New: %2.3fs" % (t4 - t3) print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3)) ####################### # OHDEV_PHASE() ####################### print "\ntesting ohdev_phase()" data = np.random.random(10000) taus = [1, 3, 5, 16, 128] rates = [1, 20, 10.7] strides = [1, 10, 7] for rate in rates: for stride in strides: #print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride) o_taus, o_dev, o_err, o_n = alt.ohdev_phase(data, rate, taus) o_taus_, o_dev_, o_err_, o_n_ = alp.ohdev_phase(data, rate, taus) assert np.allclose(o_taus, o_taus_) assert np.allclose(o_dev, o_dev_) assert np.allclose(o_err, o_err_) stride = 1 tau = 16 rate = 2.1 data = np.random.random(100000) t1 = time.time() o_taus, o_dev, o_err, o_n = alt.ohdev_phase(data, rate, taus) t2 = time.time() t3 = time.time() o_taus_, o_dev_, o_err_, o_n_ = alp.ohdev_phase(data, rate, taus) t4 = time.time() assert np.allclose(o_taus, o_taus_) assert np.allclose(o_dev, o_dev_) assert np.allclose(o_err, o_err_) print "Original: %2.3fs" % (t2 - t1) print "New: %2.3fs" % (t4 - t3) print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3)) ####################### # HDEV_PHASE_CALC() ####################### print "\ntesting hdev_phase_calc()" data = np.random.random(10000) taus = [1, 3, 5, 16, 128] rates = [1, 20, 10.7] strides = [1, 10, 7] for tau in taus: for rate in rates: for stride in strides: #print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride) mj = tau dev, deverr, n = alt.calc_hdev_phase(data, rate, mj, stride) dev_, deverr_, n_ = alp.calc_hdev_phase(data, rate, mj, stride) assert np.isclose(dev, dev_) assert np.isclose(n, n_) assert np.isclose(deverr, deverr_) stride = 1 tau = 16 rate = 2.0 t1 = time.time() dev, deverr, n = alt.calc_hdev_phase(data, rate, mj, stride) t2 = time.time() t3 = time.time() dev_, deverr_, n_ = alp.calc_hdev_phase(data, rate, mj, stride) t4 = time.time() assert np.isclose(dev, dev_) assert np.isclose(n, n_) assert np.isclose(deverr, deverr_) print "Original: %2.3fs" % (t2 - t1) print "New: %2.3fs" % (t4 - t3) print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3)) ####################### # OADEV_PHASE() ####################### print "\ntesting oadev_phase()" data = np.random.random(10000) taus = [1, 3, 5, 16, 128] rates = [1, 20, 10.7] strides = [1, 10, 7] for rate in rates: for stride in strides: #print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride) o_taus, o_dev, o_err, o_n = alt.oadev_phase(data, rate, taus) o_taus_, o_dev_, o_err_, o_n_ = alp.oadev_phase(data, rate, taus) assert np.allclose(o_taus, o_taus_) assert np.allclose(o_dev, o_dev_) assert np.allclose(o_err, o_err_) stride = 1 tau = 16 rate = 2.1 data = np.random.random(100000) t1 = time.time() o_taus, o_dev, o_err, o_n = alt.oadev_phase(data, rate, taus) t2 = time.time() t3 = time.time() o_taus_, o_dev_, o_err_, o_n_ = alp.oadev_phase(data, rate, taus) t4 = time.time() assert np.allclose(o_taus, o_taus_) assert np.allclose(o_dev, o_dev_) assert np.allclose(o_err, o_err_) print "Original: %2.3fs" % (t2 - t1) print "New: %2.3fs" % (t4 - t3) print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3)) ####################### # HDEV_PHASE() ####################### print "\ntesting hdev_phase()" data = np.random.random(10000) taus = [1, 3, 5, 16, 128] rates = [1, 20, 10.7] strides = [1, 10, 7] for rate in rates: for stride in strides: #print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride) o_taus, o_dev, o_err, o_n = alt.hdev_phase(data, rate, taus) o_taus_, o_dev_, o_err_, o_n_ = alp.hdev_phase(data, rate, taus) assert np.allclose(o_taus, o_taus_) assert np.allclose(o_dev, o_dev_) assert np.allclose(o_err, o_err_) stride = 1 tau = 16 rate = 2.1 data = np.random.random(100000) t1 = time.time() o_taus, o_dev, o_err, o_n = alt.hdev_phase(data, rate, taus) t2 = time.time() t3 = time.time() o_taus_, o_dev_, o_err_, o_n_ = alp.hdev_phase(data, rate, taus) t4 = time.time() assert np.allclose(o_taus, o_taus_) assert np.allclose(o_dev, o_dev_) assert np.allclose(o_err, o_err_) print "Original: %2.3fs" % (t2 - t1) print "New: %2.3fs" % (t4 - t3) print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3)) ####################### # HDEV() ####################### print "\ntesting hdev()" data = np.random.random(10000) taus = [1, 3, 5, 16, 128] rates = [1, 20, 10.7] strides = [1, 10, 7] for rate in rates: for stride in strides: #print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride) o_taus, o_dev, o_err, o_n = alt.hdev(data, rate, taus) o_taus_, o_dev_, o_err_, o_n_ = alp.hdev(data, rate, taus) assert np.allclose(o_taus, o_taus_) assert np.allclose(o_dev, o_dev_) assert np.allclose(o_err, o_err_) stride = 1 tau = 16 rate = 2.1 data = np.random.random(100000) t1 = time.time() o_taus, o_dev, o_err, o_n = alt.hdev(data, rate, taus) t2 = time.time() t3 = time.time() o_taus_, o_dev_, o_err_, o_n_ = alp.hdev(data, rate, taus) t4 = time.time() assert np.allclose(o_taus, o_taus_) assert np.allclose(o_dev, o_dev_) assert np.allclose(o_err, o_err_) print "Original: %2.3fs" % (t2 - t1) print "New: %2.3fs" % (t4 - t3) print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3)) ####################### # OADEV_PHASE() ####################### print "\ntesting oadev_phase()" data = np.random.random(10000) taus = [1, 3, 5, 16, 128] rates = [1, 20, 10.7] strides = [1, 10, 7] for rate in rates: for stride in strides: #print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride) o_taus, o_dev, o_err, o_n = alt.oadev_phase(data, rate, taus) o_taus_, o_dev_, o_err_, o_n_ = alp.oadev_phase(data, rate, taus) assert np.allclose(o_taus, o_taus_) assert np.allclose(o_dev, o_dev_) assert np.allclose(o_err, o_err_) stride = 1 tau = 16 rate = 2.1 data = np.random.random(100000) t1 = time.time() o_taus, o_dev, o_err, o_n = alt.oadev_phase(data, rate, taus) t2 = time.time() t3 = time.time() o_taus_, o_dev_, o_err_, o_n_ = alp.oadev_phase(data, rate, taus) t4 = time.time() assert np.allclose(o_taus, o_taus_) assert np.allclose(o_dev, o_dev_) assert np.allclose(o_err, o_err_) print "Original: %2.3fs" % (t2 - t1) print "New: %2.3fs" % (t4 - t3) print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3)) ####################### # OADEV() ####################### print "\ntesting oadev()" data = np.random.random(10000) taus = [1, 3, 5, 16, 128] rates = [1, 20, 10.7] strides = [1, 10, 7] for rate in rates: for stride in strides: #print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride) o_taus, o_dev, o_err, o_n = alt.oadev(data, rate, taus) o_taus_, o_dev_, o_err_, o_n_ = alp.oadev(data, rate, taus) assert np.allclose(o_taus, o_taus_) assert np.allclose(o_dev, o_dev_) assert np.allclose(o_err, o_err_) stride = 1 tau = 16 rate = 2.1 data = np.random.random(100000) t1 = time.time() o_taus, o_dev, o_err, o_n = alt.oadev(data, rate, taus) t2 = time.time() t3 = time.time() o_taus_, o_dev_, o_err_, o_n_ = alp.oadev(data, rate, taus) t4 = time.time() assert np.allclose(o_taus, o_taus_) assert np.allclose(o_dev, o_dev_) assert np.allclose(o_err, o_err_) print "Original: %2.3fs" % (t2 - t1) print "New: %2.3fs" % (t4 - t3) print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3)) ####################### # ADEV_PHASE() ####################### print "\ntesting adev_phase()" data = np.random.random(10000) taus = [1, 3, 5, 16, 128] rates = [1, 20, 10.7] strides = [1, 10, 7] for rate in rates: for stride in strides: #print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride) o_taus, o_dev, o_err, o_n = alt.adev_phase(data, rate, taus) o_taus_, o_dev_, o_err_, o_n_ = alp.adev_phase(data, rate, taus) assert np.allclose(o_taus, o_taus_) assert np.allclose(o_dev, o_dev_) assert np.allclose(o_err, o_err_) stride = 1 tau = 16 rate = 2.1 data = np.random.random(100000) t1 = time.time() o_taus, o_dev, o_err, o_n = alt.adev_phase(data, rate, taus) t2 = time.time() t3 = time.time() o_taus_, o_dev_, o_err_, o_n_ = alp.adev_phase(data, rate, taus) t4 = time.time() assert np.allclose(o_taus, o_taus_) assert np.allclose(o_dev, o_dev_) assert np.allclose(o_err, o_err_) print "Original: %2.3fs" % (t2 - t1) print "New: %2.3fs" % (t4 - t3) print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3)) ####################### # ADEV() ####################### print "\ntesting adev()" data = np.random.random(10000) taus = [1, 3, 5, 16, 128] rates = [1, 20, 10.7] strides = [1, 10, 7] for rate in rates: for stride in strides: #print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride) o_taus, o_dev, o_err, o_n = alt.adev(data, rate, taus) o_taus_, o_dev_, o_err_, o_n_ = alp.adev(data, rate, taus) assert np.allclose(o_taus, o_taus_) assert np.allclose(o_dev, o_dev_) assert np.allclose(o_err, o_err_) stride = 1 tau = 16 rate = 2.1 data = np.random.random(100000) t1 = time.time() o_taus, o_dev, o_err, o_n = alt.adev(data, rate, taus) t2 = time.time() t3 = time.time() o_taus_, o_dev_, o_err_, o_n_ = alp.adev(data, rate, taus) t4 = time.time() assert np.allclose(o_taus, o_taus_) assert np.allclose(o_dev, o_dev_) assert np.allclose(o_err, o_err_) print "Original: %2.3fs" % (t2 - t1) print "New: %2.3fs" % (t4 - t3) print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3)) ####################### # CALC_ADEV_PHASE() ####################### print "\ntesting calc_adev_phase()" data = np.random.random(10000) taus = [1, 3, 5, 16, 128] rates = [1, 20, 10.7] strides = [1, 10, 7] for tau in taus: for rate in rates: for stride in strides: #print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride) mj = tau dev, deverr, n = alt.calc_adev_phase(data, rate, mj, stride) dev_, deverr_, n_ = alp.adev_phase_calc(data, rate, mj, stride) assert np.isclose(dev, dev_) assert np.isclose(n, n_) assert np.isclose(deverr, deverr_) stride = 1 tau = 16 rate = 2.0 t1 = time.time() dev, deverr, n = alt.calc_adev_phase(data, rate, mj, stride) t2 = time.time() t3 = time.time() dev_, deverr_, n_ = alp.adev_phase_calc(data, rate, mj, stride) t4 = time.time() assert np.isclose(dev, dev_) assert np.isclose(n, n_) assert np.isclose(deverr, deverr_) print "Original: %2.3fs" % (t2 - t1) print "New: %2.3fs" % (t4 - t3) print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3)) ####################### # TAU_M() ####################### print "\ntesting tau_m()" taus = [1, 2, 4, 8, 16, -4, 10000, -3.1, 3.141] data = np.random.random(10000) rates = [1, 2, 7.1, 123.12] for rate in rates: m, taus2 = alt.tau_m(data, rate, taus) data_, m_, taus2_ = alp.tau_m(data, rate, taus) assert np.allclose(m, m_) assert np.allclose(taus2, taus2_) taus = np.random.randint(low=-100, high=10000, size=(10000,)) rate = 1.234 t1 = time.time() m, taus2 = alt.tau_m(data, rate, taus) t2 = time.time() t3 = time.time() data_, m_, taus2_ = alp.tau_m(data, rate, taus) t4 = time.time() assert np.allclose(m, m_) assert np.allclose(taus2, taus2_) print "Original: %2.3fs" % (t2 - t1) print "New: %2.3fs" % (t4 - t3) print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3)) ####################### # FREQUENCY2PHASE() ####################### print "\ntesting frequency2phase()" freqdata = np.random.random(10000) rates = [1, 2, 7.1, 123.12] for rate in rates: phase = alt.frequency2phase(freqdata, rate) phase_ = alp.frequency2phase(freqdata, rate) assert len(phase) == len(phase_) assert np.allclose(phase, phase_) freqdata = np.random.random(100000) t1 = time.time() phase = alt.frequency2phase(freqdata, rate) t2 = time.time() t3 = time.time() phase_ = alp.frequency2phase(freqdata, rate) t4 = time.time() print "Original: %2.3fs" % (t2 - t1) print "New: %2.3fs" % (t4 - t3) print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3)) ####################### # TDEV_PHASE() ####################### print "\ntesting tdev_phase()" rate = 1.0 # 1000 Hz sample rate obs_s = 10000 # 1 hour n_samples = rate * obs_s t = np.arange(0, n_samples) phase = np.random.random(n_samples) + np.sin(t / n_samples) taus = [4] t1 = time.time() taus2, td, tde, ns = alt.tdev_phase(phase, rate, taus) t2 = time.time() t3 = time.time() taus2_, td_, tde_, ns_ = alp.tdev_phase(phase, rate, taus) t4 = time.time() assert np.allclose(taus2, taus2_) assert np.allclose(td, td_) assert np.allclose(tde, tde_) assert np.allclose(ns, ns_) print "Original: %2.3fs" % (t2 - t1) print "New: %2.3fs" % (t4 - t3) print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3)) ####################### # TDEV() ####################### print "\ntesting tdev()" rate = 2.0 # 1000 Hz sample rate obs_s = 32768 # 1 hour n_samples = rate * obs_s t = np.arange(0, n_samples) phase = np.random.random(n_samples) + np.sin(t / n_samples) taus = [1, 2, 4] t1 = time.time() taus2, td, tde, ns = alt.tdev(phase, rate, taus) t2 = time.time() t3 = time.time() taus2_, td_, tde_, ns_ = alp.tdev(phase, rate, taus) t4 = time.time() assert np.allclose(taus2, taus2_) assert np.allclose(td, td_) assert np.allclose(tde, tde_) assert np.allclose(ns, ns_) print "Original: %2.3fs" % (t2 - t1) print "New: %2.3fs" % (t4 - t3) print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3)) ####################### # MDEV_PHASE() ####################### print "\ntesting mdev_phase()" rate = 1.0 # 1000 Hz sample rate obs_s = 10000 # 1 hour n_samples = rate * obs_s t = np.arange(0, n_samples) phase = np.random.random(n_samples) + np.sin(t / n_samples) taus = [4] t1 = time.time() taus2, td, tde, ns = alt.mdev_phase(phase, rate, taus) t2 = time.time() t3 = time.time() taus2_, td_, tde_, ns_ = alp.mdev_phase(phase, rate, taus) t4 = time.time() assert np.allclose(taus2, taus2_) assert np.allclose(td, td_) assert np.allclose(tde, tde_) assert np.allclose(ns, ns_) print "Original: %2.3fs" % (t2 - t1) print "New: %2.3fs" % (t4 - t3) print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3)) ####################### # MDEV() ####################### print "\ntesting mdev()" rate = 2.0 # 1000 Hz sample rate obs_s = 32768 # 1 hour n_samples = rate * obs_s t = np.arange(0, n_samples) phase = np.random.random(n_samples) + np.sin(t / n_samples) taus = [1, 2, 4] t1 = time.time() taus2, td, tde, ns = alt.mdev(phase, rate, taus) t2 = time.time() t3 = time.time() taus2_, td_, tde_, ns_ = alp.mdev(phase, rate, taus) t4 = time.time() assert np.allclose(taus2, taus2_) assert np.allclose(td, td_) assert np.allclose(tde, tde_) assert np.allclose(ns, ns_) print "Original: %2.3fs" % (t2 - t1) print "New: %2.3fs" % (t4 - t3) print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3)) print "\nAll DONE!"
Who knows what the world is like in other parts of the country, but here in Albuquerque, we are having a Spring like no other. Usually, when March rolls around, so does the wind, and if the weather is beautiful, it is difficult to tell with the sand being blown in your eyes. And while the wind is still there, its been a much tamer year so far. Weather like this, everyone is outside, walking dogs, taking bike rides, and – at least in my neighborhood – gardening. There’s a flowerbed in the back of the house that has been over-run by weeds and cats and trash blown in from the wind. The other day, Jenn and I set about clearing it all away. We were outside for an hour or two and the flowerbed is little more than a California King sized bed, and I felt like the progress was slow and arduous. “This weeding thing is such a pain,” I said, and Jenn in her wisdom pointed out that weeding is not always like this. Once we had it cleared, the rest would just be upkeep. Is the writing metaphor glaring for you as much as it was for me? As you probably know, I’ve been working on a novel – the same novel I’ve been working on for almost seven years – and many of the days, my work feels like that – like I’ve slaving away and my progress appears minimal to the actual task. Those are the bad days, the weed days, and sometimes the only thing I can do to make it through them is remember that those are the days of clearing the way. Tomorrow, I tell myself, I’ll have cleared a path. (And it is usually true). When I lived on Walter Street, Randi and I were a part of a community garden, and every year (at about this time) we would have our first meeting of the planting season. Bonnie, the head of the whole operation, would pull out a white board, and we’d list the different seeds we’d want to plant, starting with tomatoes and peppers and eggplant and usually ending with a discussion about marigolds and zinnias. Even within each type of fruit, we planted a variety, and by the end of the season, there would be almost a dozen different types of tomatoes, half a dozen varieties of basil, multi-colored and multi-textured lettuces, and peppers of different shapes, sizes, and levels of spice. Cooking during harvest season is always spurred along by the inspiration of what the garden has to offer. Lately, I’ve been wondering why writing cannot be like this as well. Usually, I am a fan of the process, of sitting down every day and hacking away at my novel. This is good practice. It has gotten me through some tough days and tough drafts. Since moving back to New Mexico, however, I’ve been wanting to get back to the fun. Maybe it is a product of returning, which is reminding me of all of the reasons I moved here in the first place – I had a strong urge to create. My first days in Albuquerque were spent writing story after story after story, and I wasn’t worried about doing any one thing, and I wasn’t worried about being any one thing. I was just happy to be living the life of an artist. Today, my days are more full and my writing has more weight, more pressure. BUT, this month, when I’ve been sitting down to work, I’m letting the fun win more. I haven’t always worked on the novel. I haven’t always worked on prose. One day, I sat there and drew pictures and called it the beginning of a graphic story. Another day, I cut out bits from a magazine and glued them into a scrap book. As artists, aren’t we allowed to sow as many seeds as we see fit? What to say about this other than we live in a desert. Sometimes the seeds need shade. All the time: they need water. Even the ground needs water if you are going to try to pull up the weeds! And so, if we are planting seeds of stories, poems, photographs, drawings, what are the things that water you? Is it the walk in the woods? Is it waking up before the rest of your household? Is it conversations over beer? Long road trips? A month spent sleeping in your childhood bed, hanging out with your mother? Of all the metaphors, perhaps this one feels the most cheesy to me – especially because I want to caution you about over-watering (when does a walk go from inspiration to writing distraction? Isn’t a weekend with the mother enough? Sometimes you need sleep, too, don’t you think?), but I still think it is true. Maybe our stories (or at least my novel) feel like they have deep, dark, twisted roots, but we don’t. I don’t. I guess you could say that my task is to bring the story water… Feed the story, feed the soul: isn’t it the same thing in the end? The metaphor can go on. We plant things. We watch them grow. We harvest the fruit. We slice it up, cook it up. We share it with others. But that is for the summer months. And this is March. …the month of getting the garden ready. So what are you waiting for? What does your garden need today? Now, go!
from kivy.app import App from kivy.uix.widget import Widget from kivy.clock import Clock from kivy.graphics import Color, Ellipse from kivy.uix.button import Button from kivy.uix.label import Label from kivy.animation import Animation class timerWidget(Widget): def __init__(self, x=350, y =350, sizeWH=200, angle=0, id='counter',**kwargs): # make sure we aren't overriding any important functionality super(timerWidget, self).__init__(**kwargs) self.id = id self.angle = angle self.sizeWH = sizeWH self.pos = [x,y] #self.size = sizeWH Clock.schedule_interval(self.update, 1/25) with self.canvas: Color(0.0, 1.0, 0.0) Ellipse(pos=(self.pos[0], self.pos[1]), size=(self.sizeWH, sizeWH), group='a', angle_start=0, angle_end = 10) def update(self, dt, x=None, y=None): if self.angle < 360: self.angle = self.angle + 1 else: self.angle = 360 self.canvas.get_group('a')[1].angle_end = self.angle class windowWidget(Widget): def __init__(self, **kwargs): super(windowWidget, self).__init__(**kwargs) pos = [30,350,90] size = 100 id = 'c' aDiv = 360-45 for i in range(4): div = (pos[0]*i) + (pos[2]*i) aDiv = (aDiv) - (45*i) self.add_widget(timerWidget(div,pos[1],size,aDiv,id+str(i))) def update(self, dt): for i in self.children: if i.angle == 360: #self.outAnim(i) #i.x = 300 print i.x def outAnim(self, obj): anim = Animation(x=50, t='in_quad') anim.start(obj) class GfxApp(App): def build(self): gWindow = windowWidget() Clock.schedule_interval(gWindow.update, 1/30) return gWindow if __name__ == '__main__': GfxApp().run()
The Groove dance club is a multi-level party venue located at Universal Studios Florida’s CityWalk. The club features open format music, a dance floor, and the newest sound and lighting systems. Local and national DJs are there every night. The dress code is casual chic – hats and tank tops are not permitted. There are several full liquor bars and 3 themed lounges – The Blue Room with a sci-fi feel, the romantic Red Room, and the tranquil Green Room with a ‘70s vibe. The Groove is designed to look like a century-old theatre in various stages of renovation. But this is just for show because they have the latest sound systems, lighting, fog machines, and other effects, including a wall of video screens. The Groove Orlando is open to those aged 21 and over. Most Wednesdays are Teen Night from 8pm until midnight, and you must be 15 to 19 years old with a student ID. The dates for Teen Night are listed on the CityWalk Times & Info brochure. Entry costs $7 or a CityWalk Party Pass.
# -*- coding: utf-8 -*- import os import base64 import time import click import pydisp @click.command(context_settings={'help_option_names':['-h','--help']}) @click.argument('images', nargs=-1, type=click.Path(exists=True), required=True) @click.option('--title', '-t', type=str, help='Window title') @click.option('--win', type=str, help='Window ID. By default, a generated unique id. %p will use path as id. %f will use filename.') @click.option('--width', '-w', type=int, help='Initial indow width.' ) @click.option('--pause', '-p', default=0.2, help='Pause between images in seconds') @click.option('--port', type=int, help='Display server port. Default is read from .display/config.json, or 8000.') @click.option('--hostname', type=str, help='Display server hostname. Default is read from .display/config.json, or localhost.') def main(images, title, win, width, pause, port, hostname): # TODO tiling option if port is not None: pydisp.CONFIG.PORT = port if hostname is not None: pydisp.CONFIG.HOSTNAME = hostname for img_fname in images: click.echo('loading {}'.format(img_fname)) base, ext = os.path.splitext(img_fname) ext = ext.lower().replace('.', '').replace('jpg', 'jpeg') if not pydisp.is_valid_image_mime_type(ext): raise click.BadParameter('unrecognized image format: {}'.format(ext)) with open(img_fname, 'rb') as f: encoded = pydisp.b64_encode(f.read(), ext) if title == '': title = img_fname if win=='%f': win = img_fname elif win=='%p': win = os.path.basename(img_fname) pydisp.pane('image', win=win, title=title, content={'src': encoded, 'width': width, }) if (len(img_fname) > 1) and (pause > 0.0): time.sleep(pause) if __name__ == '__main__': main()
Episode 5: The Hero/Sidekick Relationship – Did You Do Your Homework? Posted on March 15, 2017 March 28, 2017 by Did You Do Your Homework? Have you ever wanted to put the relationship between Batman and his many, many sidekicks under a microscope and really get to the bottom of them? Good news! In this week’s episode, we dig into the hero/sidekick relationship in all its forms, including a rough history where Martha asks Pete to show his work and he doesn’t immediately strangle her with her headphones cord. We talk a whole lot about Star Trek and never come to a conclusion about whether or not it’s relevant! All this AND MORE. Our theme for our next episode is going to be: Sacrifice. Enjoy doing your homework!
# Copyright 2015-2015 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import matplotlib import os import re import shutil import tempfile import unittest from test_thermal import BaseTestThermal import trappy import utils_tests class TestRun(BaseTestThermal): def __init__(self, *args, **kwargs): super(TestRun, self).__init__(*args, **kwargs) self.map_label = {"00000000,00000006": "A57", "00000000,00000039": "A53"} def test_run_has_all_classes(self): """The Run() class has members for all classes""" run = trappy.Run() for attr in run.class_definitions.iterkeys(): self.assertTrue(hasattr(run, attr)) def test_run_has_all_classes_scope_all(self): """The Run() class has members for all classes with scope=all""" run = trappy.Run(scope="all") for attr in run.class_definitions.iterkeys(): self.assertTrue(hasattr(run, attr)) def test_run_has_all_classes_scope_thermal(self): """The Run() class has only members for thermal classes with scope=thermal""" run = trappy.Run(scope="thermal") for attr in run.thermal_classes.iterkeys(): self.assertTrue(hasattr(run, attr)) for attr in run.sched_classes.iterkeys(): self.assertFalse(hasattr(run, attr)) def test_run_has_all_classes_scope_sched(self): """The Run() class has only members for sched classes with scope=sched""" run = trappy.Run(scope="sched") for attr in run.thermal_classes.iterkeys(): self.assertFalse(hasattr(run, attr)) for attr in run.sched_classes.iterkeys(): self.assertTrue(hasattr(run, attr)) def test_run_accepts_name(self): """The Run() class has members for all classes""" run = trappy.Run(name="foo") self.assertEquals(run.name, "foo") def test_fail_if_no_trace_dat(self): """Raise an IOError with the path if there's no trace.dat and trace.txt""" os.remove("trace.txt") self.assertRaises(IOError, trappy.Run) cwd = os.getcwd() try: trappy.Run(cwd) except IOError as exception: pass self.assertTrue(cwd in str(exception)) def test_other_directory(self): """Run() can grab the trace.dat from other directories""" other_random_dir = tempfile.mkdtemp() os.chdir(other_random_dir) dfr = trappy.Run(self.out_dir).thermal.data_frame self.assertTrue(len(dfr) > 0) self.assertEquals(os.getcwd(), other_random_dir) def test_run_arbitrary_trace_txt(self): """Run() works if the trace is called something other than trace.txt""" arbitrary_trace_name = "my_trace.txt" shutil.move("trace.txt", arbitrary_trace_name) dfr = trappy.Run(arbitrary_trace_name).thermal.data_frame self.assertTrue(len(dfr) > 0) self.assertFalse(os.path.exists("trace.txt")) # As there is no raw trace requested. The mytrace.raw.txt # Should not have been generated self.assertFalse(os.path.exists("mytrace.raw.txt")) def test_run_autonormalize_time(self): """Run() normalizes by default""" run = trappy.Run() self.assertEquals(round(run.thermal.data_frame.index[0], 7), 0) def test_run_dont_normalize_time(self): """Run() doesn't normalize if asked not to""" run = trappy.Run(normalize_time=False) self.assertNotEquals(round(run.thermal.data_frame.index[0], 7), 0) def test_run_basetime(self): """Test that basetime calculation is correct""" run = trappy.Run(normalize_time=False) basetime = run.thermal.data_frame.index[0] self.assertEqual(run.get_basetime(), basetime) def test_run_duration(self): """Test that duration calculation is correct""" run = trappy.Run(normalize_time=False) duration = run.thermal_governor.data_frame.index[-1] - run.thermal.data_frame.index[0] self.assertEqual(run.get_duration(), duration) def test_run_normalize_time(self): """Run().normalize_time() works accross all classes""" run = trappy.Run(normalize_time=False) prev_inpower_basetime = run.cpu_in_power.data_frame.index[0] prev_inpower_last = run.cpu_in_power.data_frame.index[-1] basetime = run.thermal.data_frame.index[0] run.normalize_time(basetime) self.assertEquals(round(run.thermal.data_frame.index[0], 7), 0) exp_inpower_first = prev_inpower_basetime - basetime self.assertEquals(round(run.cpu_in_power.data_frame.index[0] - exp_inpower_first, 7), 0) exp_inpower_last = prev_inpower_last - basetime self.assertEquals(round(run.cpu_in_power.data_frame.index[-1] - exp_inpower_last, 7), 0) def test_get_all_freqs_data(self): """Test get_all_freqs_data()""" allfreqs = trappy.Run().get_all_freqs_data(self.map_label) self.assertEquals(allfreqs[1][1]["A53_freq_out"].iloc[3], 850) self.assertEquals(allfreqs[1][1]["A53_freq_in"].iloc[1], 850) self.assertEquals(allfreqs[0][1]["A57_freq_out"].iloc[2], 1100) self.assertTrue("gpu_freq_in" in allfreqs[2][1].columns) # Make sure there are no NaNs in the middle of the array self.assertTrue(allfreqs[0][1]["A57_freq_in"].notnull().all()) def test_plot_freq_hists(self): """Test that plot_freq_hists() doesn't bomb""" run = trappy.Run() _, axis = matplotlib.pyplot.subplots(nrows=2) run.plot_freq_hists(self.map_label, axis) matplotlib.pyplot.close('all') def test_plot_load(self): """Test that plot_load() doesn't explode""" run = trappy.Run() run.plot_load(self.map_label, title="Util") _, ax = matplotlib.pyplot.subplots() run.plot_load(self.map_label, ax=ax) def test_plot_normalized_load(self): """Test that plot_normalized_load() doesn't explode""" run = trappy.Run() _, ax = matplotlib.pyplot.subplots() run.plot_normalized_load(self.map_label, ax=ax) def test_plot_allfreqs(self): """Test that plot_allfreqs() doesn't bomb""" run = trappy.Run() run.plot_allfreqs(self.map_label) matplotlib.pyplot.close('all') _, axis = matplotlib.pyplot.subplots(nrows=2) run.plot_allfreqs(self.map_label, ax=axis) matplotlib.pyplot.close('all') def test_trace_metadata(self): """Test if metadata gets populated correctly""" expected_metadata = {} expected_metadata["version"] = "6" expected_metadata["cpus"] = "6" run = trappy.Run() for key, value in expected_metadata.items(): self.assertTrue(hasattr(run, "_" + key)) self.assertEquals(getattr(run, "_" + key), value) def test_missing_metadata(self): """Test if trappy.Run() works with a trace missing metadata info""" lines = [] with open("trace.txt", "r") as fil: lines += fil.readlines() lines = lines[7:] fil.close() with open("trace.txt", "w") as fil: fil.write("".join(lines)) fil.close() run = trappy.Run() self.assertEquals(run._cpus, None) self.assertEquals(run._version, None) self.assertTrue(len(run.thermal.data_frame) > 0) @unittest.skipUnless(utils_tests.trace_cmd_installed(), "trace-cmd not installed") class TestRunRawDat(utils_tests.SetupDirectory): def __init__(self, *args, **kwargs): super(TestRunRawDat, self).__init__( [("raw_trace.dat", "trace.dat")], *args, **kwargs) def test_raw_dat(self): """Tests an event that relies on raw parsing""" run = trappy.Run() self.assertTrue(hasattr(run, "sched_switch")) self.assertTrue(len(run.sched_switch.data_frame) > 0) self.assertTrue("prev_comm" in run.sched_switch.data_frame.columns) def test_raw_dat_arb_name(self): """Tests an event that relies on raw parsing with arbitrary .dat file name""" arbitrary_name = "my_trace.dat" shutil.move("trace.dat", arbitrary_name) run = trappy.Run(arbitrary_name) self.assertTrue(os.path.isfile("my_trace.raw.txt")) self.assertTrue(hasattr(run, "sched_switch")) self.assertTrue(len(run.sched_switch.data_frame) > 0) class TestRunRawBothTxt(utils_tests.SetupDirectory): def __init__(self, *args, **kwargs): super(TestRunRawBothTxt, self).__init__( [("raw_trace.txt", "trace.txt"), ("raw_trace.raw.txt", "trace.raw.txt")], *args, **kwargs) def test_both_txt_files(self): """test raw parsing for txt files""" self.assertFalse(os.path.isfile("trace.dat")) run = trappy.Run() self.assertTrue(hasattr(run, "sched_switch")) self.assertTrue(len(run.sched_switch.data_frame) > 0) def test_both_txt_arb_name(self): """Test raw parsing for txt files arbitrary name""" arbitrary_name = "my_trace.txt" arbitrary_name_raw = "my_trace.raw.txt" shutil.move("trace.txt", arbitrary_name) shutil.move("trace.raw.txt", arbitrary_name_raw) run = trappy.Run(arbitrary_name) self.assertTrue(hasattr(run, "sched_switch")) self.assertTrue(len(run.sched_switch.data_frame) > 0) class TestRunSched(utils_tests.SetupDirectory): """Tests using a trace with only sched info and no (or partial) thermal""" def __init__(self, *args, **kwargs): super(TestRunSched, self).__init__( [("trace_empty.txt", "trace.txt")], *args, **kwargs) def test_run_basetime_empty(self): """Test that basetime is 0 if data frame of all data objects is empty""" run = trappy.Run(normalize_time=False) self.assertEqual(run.get_basetime(), 0) def test_run_normalize_some_tracepoints(self): """Test that normalizing time works if not all the tracepoints are in the trace""" with open("trace.txt", "a") as fil: fil.write(" kworker/4:1-1219 [004] 508.424826: thermal_temperature: thermal_zone=exynos-therm id=0 temp_prev=24000 temp=24000") run = trappy.Run() self.assertEqual(run.thermal.data_frame.index[0], 0) @unittest.skipUnless(utils_tests.trace_cmd_installed(), "trace-cmd not installed") class TestTraceDat(utils_tests.SetupDirectory): """Test that trace.dat handling work""" def __init__(self, *args, **kwargs): super(TestTraceDat, self).__init__( [("trace.dat", "trace.dat")], *args, **kwargs) def test_do_txt_if_not_there(self): """Create trace.txt if it's not there""" self.assertFalse(os.path.isfile("trace.txt")) trappy.Run() found = False with open("trace.txt") as fin: for line in fin: if re.search("thermal", line): found = True break self.assertTrue(found) def test_do_raw_txt_if_not_there(self): """Create trace.raw.txt if it's not there""" self.assertFalse(os.path.isfile("trace.raw.txt")) trappy.Run() found = False with open("trace.raw.txt") as fin: for line in fin: if re.search("thermal", line): found = True break def test_run_arbitrary_trace_dat(self): """Run() works if asked to parse a binary trace with a filename other than trace.dat""" arbitrary_trace_name = "my_trace.dat" shutil.move("trace.dat", arbitrary_trace_name) dfr = trappy.Run(arbitrary_trace_name).thermal.data_frame self.assertTrue(os.path.exists("my_trace.txt")) self.assertTrue(os.path.exists("my_trace.raw.txt")) self.assertTrue(len(dfr) > 0) self.assertFalse(os.path.exists("trace.dat")) self.assertFalse(os.path.exists("trace.txt")) self.assertFalse(os.path.exists("trace.raw.txt"))
Leeds United chief executive Angus Kinnear confirmed that the club’s teenage winger Jack Clarke is heading home from hospital to rest after suffering an illness during Saturday’s 1-1 draw with Middlesbrough. “Jack has had messages from football fans from every club you can imagine and we all really appreciate it,” Kinnear told the club’s official website. Clarke looks set to miss Leeds’ Championship clash with Swansea City on Wednesday at Elland Road, where they will seek a first win in three matches to boost their hopes of promotion.
import calendar import collections import io import itertools import stat import time import dulwich import dulwich.objects from multiple import repositories from multiple import utils class RepositoryGit(repositories.RepositoryBase): def __init__(self, dulwich_repository): self.backend = dulwich_repository def commit(self, index, message=b'', author=None, committer=None, at_time=None): # @todo time support if not committer: committer = author if not at_time: at_time = time.gmtime() commit = dulwich.objects.Commit() commit.tree = index.root_tree commit.author = author commit.committer = committer commit.commit_time = commit.author_time = calendar.timegm(at_time) commit.commit_timezone = commit.author_timezone = at_time.tm_isdst commit.message = message self.backend.object_store.add_object(commit) def open_index_at(self, reference): root_tree = None if reference: commit = self.backend[reference] if isinstance(commit, dulwich.objects.Commit): root_tree = self.backend[commit.tree] else: raise ValueError( "bad reference '%r' is not a " "dulwich.objects.Commit" % commit ) else: root_tree = dulwich.objects.Tree() return MemoryIndex(root_tree, self.backend.object_store) def get(self, path, reference, default=None): result = default commit = self.backend[reference] if isinstance(commit, dulwich.objects.Commit): tree = self.backend[commit.tree] blob_object = tree.lookup_path(path) if isinstance(blob_object, dulwich.objects.Blob): result = blob_object.data if isinstance(result, str): result = io.StringIO(result) return result class MemoryIndex(object): def __init__(self, root_tree, object_store): """ Args: root_tree (dulwich.objects.Tree): The root tree of the index object_store (dulwich.object_store.BaseObjectStore): The object store where to store the objects. """ self.object_store = object_store self._objects = dict(self._get_objects(root_tree)) @property def root_tree(self): return self._objects[b''].copy() @property def objects(self): return { path: obj.copy() for path, obj in self._objects.items() } def _get_objects(self, start_tree): """ Load in memory all the needed objects Returns: (Dict(Tuple(str, dulwich.objects.ShaFile))) """ contents = self.object_store.iter_tree_contents(start_tree.id, True) for entry in contents: yield entry.path, self.object_store[entry.sha] def _get_or_create_tree(self, path): try: return self._objects[path] except KeyError: tree = dulwich.objects.Tree() self._objects[path] = tree return tree def get(self, path, default=None): return self._objects.get(path, default) def add(self, contents): # @todo a true bulk add without considering every file individually for content, path in contents: blob = dulwich.objects.Blob.from_string(content.read()) self._add(path, blob) def _add(self, path, blob, file_mode=0o100644): processed_path = ProcessedPath.from_path(path) self.object_store.add_object(blob) self._objects[processed_path.rootless_path] = blob paths = list(processed_path.intermediate_paths()) # first update the leaf tree with the blob objects to add leaf_path = paths[-1] leaf_tree = self._get_or_create_tree(leaf_path) leaf_tree.add(processed_path.basename, file_mode, blob.id) self.object_store.add_object(leaf_tree) # iterate the other trees from the nearest until the root # and update them indexed_paths = list(enumerate(reversed(paths))) for idx, intermediate_path in indexed_paths: if intermediate_path: # if intermediate_path == '' it's the root tree _, parent_path = indexed_paths[idx + 1] parent_tree = self._get_or_create_tree(parent_path) child_tree = self._get_or_create_tree(intermediate_path) child_idx = processed_path.tokens_n - 1 - idx child_name = processed_path.tokens[child_idx] parent_tree.add(child_name, stat.S_IFDIR, child_tree.id) self.object_store.add_object(child_tree) self.object_store.add_object(parent_tree) else: break _ProcessedPath = collections.namedtuple( '_ProcessedPath', ( 'path', # intial path with a leading / 'dirname', # dirname extracted from the path 'basename', # basename extracted from the path 'tokens', # tokens of the dirname 'tokens_n' # number of tokens ) ) class ProcessedPath(_ProcessedPath): @classmethod def from_path(cls, path): if not path.startswith(b'/'): path = b'/' + path dirname, basename = utils.paths.path_split(path) dirname_tokens = dirname.split(b'/') n_dirname_tokens = len(dirname_tokens) return cls(path, dirname, basename, dirname_tokens, n_dirname_tokens) def intermediate_paths(self): """ Generate the intermediate paths with the ProcessedPath.tokens values. b'/data/files/data.json' -> ['', 'data', 'data/files'] Returns: (iter) """ return itertools.accumulate(self.tokens, utils.paths.path_join) @property def rootless_path(self): return self.path[1:]
Our company has 46 years of experience in the production and development of overall solutions for long-lasting and durable roofs. The intelligent combination of roofing, solar technology and insulation opens up a completely new dimension: BRAMAC roofs save and generate energy at the same time and help to improve our climate. If all roof tiles which have ever been produced by BRAMAC were put in one line, their total length would correspond to 40 times the circumference of the earth at the equator. Every year, we produce a number of roof tiles equivalent to the quantity needed for 64,000 single-family homes. Up to now, the Bramac Group has sold 93,193 m² of solar collectors. The generated heat helps to save about 4.4 million liters of heating oil or 11,600 tons of CO2. Contact us – we are at your disposal!
""" The DenseDesignMatrix class and related code. Functionality for representing data that can be described as a dense matrix (rather than a sparse matrix) with each row containing an example and each column corresponding to a different feature. DenseDesignMatrix also supports other "views" of the data, for example a dataset of images can be viewed either as a matrix of flattened images or as a stack of 2D multi-channel images. However, the images must all be the same size, so that each image may be mapped to a matrix row by the same transformation. """ __authors__ = "Ian Goodfellow and Mehdi Mirza" __copyright__ = "Copyright 2010-2012, Universite de Montreal" __credits__ = ["Ian Goodfellow"] __license__ = "3-clause BSD" __maintainer__ = "LISA Lab" __email__ = "pylearn-dev@googlegroups" import functools import logging import warnings import numpy as np from theano.compat.six.moves import xrange from pylearn2.datasets import cache from pylearn2.utils.iteration import ( FiniteDatasetIterator, resolve_iterator_class ) import copy # Don't import tables initially, since it might not be available # everywhere. tables = None from pylearn2.datasets.dataset import Dataset from pylearn2.datasets import control from pylearn2.space import CompositeSpace, Conv2DSpace, VectorSpace, IndexSpace from pylearn2.utils import safe_zip from pylearn2.utils.exc import reraise_as from pylearn2.utils.rng import make_np_rng from pylearn2.utils import contains_nan from theano import config logger = logging.getLogger(__name__) def ensure_tables(): """ Makes sure tables module has been imported """ global tables if tables is None: import tables class DenseDesignMatrix(Dataset): """ A class for representing datasets that can be stored as a dense design matrix (and optionally, associated targets). Parameters ---------- X : ndarray, 2-dimensional, optional Should be supplied if `topo_view` is not. A design \ matrix of shape (number examples, number features) \ that defines the dataset. topo_view : ndarray, optional Should be supplied if X is not. An array whose first \ dimension is of length number examples. The remaining \ dimensions are examples with topological significance, \ e.g. for images the remaining axes are rows, columns, \ and channels. y : ndarray, optional Targets for each example (e.g., class ids, values to be predicted in a regression task). Currently three formats are supported: - None: Pass `None` if there are no target values. In this case the dataset may not be some tasks such as supervised learning or evaluation of a supervised learning system, but it can be used for some other tasks. For example, a supervised learning system can make predictions on it, or an unsupervised learning system can be trained on it. - 1D ndarray of integers: This format may be used when the targets are class labels. In this format, the array should have one entry for each example. Each entry should be an integer, in the range [0, N) where N is the number of classes. This is the format that the `SVM` class expects. - 2D ndarray, data type optional: This is the most common format and can be used for a variety of problem types. Each row of the matrix becomes the target for a different example. Specific models / costs can interpret this target vector differently. For example, the `Linear` output layer for the `MLP` class expects the target for each example to be a vector of real-valued regression targets. (It can be a vector of size one if you only have one regression target). The `Softmax` output layer of the `MLP` class expects the target to be a vector of N elements, where N is the number of classes, and expects all but one of the elements to 0. One element should have value 1., and the index of this element identifies the target class. view_converter : object, optional An object for converting between the design matrix \ stored internally and the topological view of the data. axes: tuple, optional The axes ordering of the provided topo_view. Must be some permutation of ('b', 0, 1, 'c') where 'b' indicates the axis indexing examples, 0 and 1 indicate the row/cols dimensions and 'c' indicates the axis indexing color channels. rng : object, optional A random number generator used for picking random \ indices into the design matrix when choosing minibatches. X_labels : int, optional If X contains labels then X_labels must be passed to indicate the total number of possible labels e.g. the size of a the vocabulary when X contains word indices. This will make the set use IndexSpace. y_labels : int, optional If y contains labels then y_labels must be passed to indicate the total number of possible labels e.g. 10 for the MNIST dataset where the targets are numbers. This will make the set use IndexSpace. See Also -------- DenseDesignMatrixPytables : Use this class if your data is too big to fit in memory. Notes ----- - What kind of data can be stored in this way? A design matrix is a matrix where each row contains a single example. Each column within the row is a feature of that example. By dense, we mean that every entry in the matrix is explicitly given a value. Examples of datasets that can be stored this way include MNIST and CIFAR10. Some datasets cannot be stored as a design matrix. For example, a collection of images, each image having a different size, can't be stored in this way, because we can't reshape each image to the same length of matrix row. Some datasets can, conceptually, be represented as a design matrix, but it may not be efficient to store them as dense matrices. For example, a dataset of sentences with a bag of words representation, might have a very high number of features but most of the values are zero, so it would be better to store the data as a sparse matrix. - What if my examples aren't best thought of as vectors? The DenseDesignMatrix class supports two views of the data, the "design matrix view" in which each example is just a vector, and the "topological view" in which each example is formatted using some kind of data structure with meaningful topology. For example, a dataset of images can be viewed as a design matrix where each row contains a flattened version of each image, or it can be viewed as a 4D tensor, where each example is a 3D subtensor, with one axis corresponding to rows of the image, one axis corresponding to columns of the image, and one axis corresponding to the color channels. This structure can be thought of as having meaningful topology because neighboring coordinates on the row and column axes correspond to neighboring pixels in the image. """ _default_seed = (17, 2, 946) def __init__(self, X=None, topo_view=None, y=None, view_converter=None, axes=('b', 0, 1, 'c'), rng=_default_seed, preprocessor=None, fit_preprocessor=False, X_labels=None, y_labels=None): self.X = X self.y = y self.view_converter = view_converter self.X_labels = X_labels self.y_labels = y_labels self._check_labels() if topo_view is not None: assert view_converter is None self.set_topological_view(topo_view, axes) else: assert X is not None, ("DenseDesignMatrix needs to be provided " "with either topo_view, or X") if view_converter is not None: # Get the topo_space (usually Conv2DSpace) from the # view_converter if not hasattr(view_converter, 'topo_space'): raise NotImplementedError("Not able to get a topo_space " "from this converter: %s" % view_converter) # self.X_topo_space stores a "default" topological space that # will be used only when self.iterator is called without a # data_specs, and with "topo=True", which is deprecated. self.X_topo_space = view_converter.topo_space else: self.X_topo_space = None # Update data specs, if not done in set_topological_view X_source = 'features' if X_labels is None: X_space = VectorSpace(dim=X.shape[1]) else: if X.ndim == 1: dim = 1 else: dim = X.shape[-1] X_space = IndexSpace(dim=dim, max_labels=X_labels) if y is None: space = X_space source = X_source else: if y.ndim == 1: dim = 1 else: dim = y.shape[-1] if y_labels is not None: y_space = IndexSpace(dim=dim, max_labels=y_labels) else: y_space = VectorSpace(dim=dim) y_source = 'targets' space = CompositeSpace((X_space, y_space)) source = (X_source, y_source) self.data_specs = (space, source) self.X_space = X_space self.compress = False self.design_loc = None self.rng = make_np_rng(rng, which_method="random_integers") # Defaults for iterators self._iter_mode = resolve_iterator_class('sequential') self._iter_topo = False self._iter_targets = False self._iter_data_specs = (self.X_space, 'features') if preprocessor: preprocessor.apply(self, can_fit=fit_preprocessor) self.preprocessor = preprocessor def _check_labels(self): """Sanity checks for X_labels and y_labels.""" if self.X_labels is not None: assert self.X is not None assert self.view_converter is None assert self.X.ndim <= 2 assert np.all(self.X < self.X_labels) if self.y_labels is not None: assert self.y is not None assert self.y.ndim <= 2 assert np.all(self.y < self.y_labels) @functools.wraps(Dataset.iterator) def iterator(self, mode=None, batch_size=None, num_batches=None, rng=None, data_specs=None, return_tuple=False): [mode, batch_size, num_batches, rng, data_specs] = self._init_iterator( mode, batch_size, num_batches, rng, data_specs) # If there is a view_converter, we have to use it to convert # the stored data for "features" into one that the iterator # can return. space, source = data_specs if isinstance(space, CompositeSpace): sub_spaces = space.components sub_sources = source else: sub_spaces = (space,) sub_sources = (source,) convert = [] for sp, src in safe_zip(sub_spaces, sub_sources): if src == 'features' and \ getattr(self, 'view_converter', None) is not None: conv_fn = ( lambda batch, self=self, space=sp: self.view_converter.get_formatted_batch(batch, space)) else: conv_fn = None convert.append(conv_fn) return FiniteDatasetIterator(self, mode(self.get_num_examples(), batch_size, num_batches, rng), data_specs=data_specs, return_tuple=return_tuple, convert=convert) def get_data(self): """ Returns all the data, as it is internally stored. The definition and format of these data are described in `self.get_data_specs()`. Returns ------- data : numpy matrix or 2-tuple of matrices The data """ if self.y is None: return self.X else: return (self.X, self.y) def use_design_loc(self, path): """ Calling this function changes the serialization behavior of the object permanently. If this function has been called, when the object is serialized, it will save the design matrix to `path` as a .npy file rather than pickling the design matrix along with the rest of the dataset object. This avoids pickle's unfortunate behavior of using 2X the RAM when unpickling. TODO: Get rid of this logic, use custom array-aware picklers (joblib, custom pylearn2 serialization format). Parameters ---------- path : str The path to save the design matrix to """ if not path.endswith('.npy'): raise ValueError("path should end with '.npy'") self.design_loc = path def get_topo_batch_axis(self): """ The index of the axis of the batches Returns ------- axis : int The axis of a topological view of this dataset that corresponds to indexing over different examples. """ axis = self.view_converter.axes.index('b') return axis def enable_compression(self): """ If called, when pickled the dataset will be saved using only 8 bits per element. .. todo:: Not sure this should be implemented as something a base dataset does. Perhaps as a mixin that specific datasets (i.e. CIFAR10) inherit from. """ self.compress = True def __getstate__(self): """ .. todo:: WRITEME """ rval = copy.copy(self.__dict__) # TODO: Not sure this should be implemented as something a base dataset # does. Perhaps as a mixin that specific datasets (i.e. CIFAR10) # inherit from. if self.compress: rval['compress_min'] = rval['X'].min(axis=0) # important not to do -= on this line, as that will modify the # original object rval['X'] = rval['X'] - rval['compress_min'] rval['compress_max'] = rval['X'].max(axis=0) rval['compress_max'][rval['compress_max'] == 0] = 1 rval['X'] *= 255. / rval['compress_max'] rval['X'] = np.cast['uint8'](rval['X']) if self.design_loc is not None: # TODO: Get rid of this logic, use custom array-aware picklers # (joblib, custom pylearn2 serialization format). np.save(self.design_loc, rval['X']) del rval['X'] return rval def __setstate__(self, d): """ .. todo:: WRITEME """ if d['design_loc'] is not None: if control.get_load_data(): fname = cache.datasetCache.cache_file(d['design_loc']) d['X'] = np.load(fname) else: d['X'] = None if d['compress']: X = d['X'] mx = d['compress_max'] mn = d['compress_min'] del d['compress_max'] del d['compress_min'] d['X'] = 0 self.__dict__.update(d) if X is not None: self.X = np.cast['float32'](X) * mx / 255. + mn else: self.X = None else: self.__dict__.update(d) # To be able to unpickle older data after the addition of # the data_specs mechanism if not all(m in d for m in ('data_specs', 'X_space', '_iter_data_specs', 'X_topo_space')): X_space = VectorSpace(dim=self.X.shape[1]) X_source = 'features' if self.y is None: space = X_space source = X_source else: y_space = VectorSpace(dim=self.y.shape[-1]) y_source = 'targets' space = CompositeSpace((X_space, y_space)) source = (X_source, y_source) self.data_specs = (space, source) self.X_space = X_space self._iter_data_specs = (X_space, X_source) view_converter = d.get('view_converter', None) if view_converter is not None: # Get the topo_space from the view_converter if not hasattr(view_converter, 'topo_space'): raise NotImplementedError("Not able to get a topo_space " "from this converter: %s" % view_converter) # self.X_topo_space stores a "default" topological space that # will be used only when self.iterator is called without a # data_specs, and with "topo=True", which is deprecated. self.X_topo_space = view_converter.topo_space def _apply_holdout(self, _mode="sequential", train_size=0, train_prop=0): """ This function splits the dataset according to the number of train_size if defined by the user with respect to the mode provided by the user. Otherwise it will use the train_prop to divide the dataset into a training and holdout validation set. This function returns the training and validation dataset. Parameters ----------- _mode : WRITEME train_size : int Number of examples that will be assigned to the training dataset. train_prop : float Proportion of training dataset split. Returns ------- WRITEME """ """ This function splits the dataset according to the number of train_size if defined by the user with respect to the mode provided by the user. Otherwise it will use the train_prop to divide the dataset into a training and holdout validation set. This function returns the training and validation dataset. Parameters ----------- _mode : WRITEME train_size : int Number of examples that will be assigned to the training dataset. train_prop : float Proportion of training dataset split. Returns ------- WRITEME """ if train_size != 0: size = train_size elif train_prop != 0: size = np.round(self.get_num_examples() * train_prop) else: raise ValueError("Initialize either split ratio and split size to " "non-zero value.") if size < self.get_num_examples() - size: dataset_iter = self.iterator( mode=_mode, batch_size=(self.get_num_examples() - size)) valid = dataset_iter.next() train = dataset_iter.next()[:(self.get_num_examples() - valid.shape[0])] else: dataset_iter = self.iterator(mode=_mode, batch_size=size) train = dataset_iter.next() valid = dataset_iter.next()[:(self.get_num_examples() - train.shape[0])] return (train, valid) def split_dataset_nfolds(self, nfolds=0): """ This function splits the dataset into to the number of n folds given by the user. Returns an array of folds. Parameters ---------- nfolds : int, optional The number of folds for the the validation set. Returns ------- WRITEME """ folds_iter = self.iterator(mode="sequential", num_batches=nfolds) folds = list(folds_iter) return folds def split_dataset_holdout(self, train_size=0, train_prop=0): """ This function splits the dataset according to the number of train_size if defined by the user. Otherwise it will use the train_prop to divide the dataset into a training and holdout validation set. This function returns the training and validation dataset. Parameters ---------- train_size : int Number of examples that will be assigned to the training dataset. train_prop : float Proportion of dataset split. """ return self._apply_holdout("sequential", train_size, train_prop) def bootstrap_nfolds(self, nfolds, rng=None): """ This function splits the dataset using the random_slice and into the n folds. Returns the folds. Parameters ---------- nfolds : int The number of folds for the dataset. rng : WRITEME Random number generation class to be used. """ folds_iter = self.iterator(mode="random_slice", num_batches=nfolds, rng=rng) folds = list(folds_iter) return folds def bootstrap_holdout(self, train_size=0, train_prop=0, rng=None): """ This function splits the dataset according to the number of train_size defined by the user. Parameters ---------- train_size : int Number of examples that will be assigned to the training dataset. nfolds : int The number of folds for the the validation set. rng : WRITEME Random number generation class to be used. """ return self._apply_holdout("random_slice", train_size, train_prop) def get_stream_position(self): """ If we view the dataset as providing a stream of random examples to read, the object returned uniquely identifies our current position in that stream. """ return copy.copy(self.rng) def set_stream_position(self, pos): """ .. todo:: WRITEME properly Return to a state specified by an object returned from get_stream_position. Parameters ---------- pos : object WRITEME """ self.rng = copy.copy(pos) def restart_stream(self): """ Return to the default initial state of the random example stream. """ self.reset_RNG() def reset_RNG(self): """ Restore the default seed of the rng used for choosing random examples. """ if 'default_rng' not in dir(self): self.default_rng = make_np_rng(None, [17, 2, 946], which_method="random_integers") self.rng = copy.copy(self.default_rng) def apply_preprocessor(self, preprocessor, can_fit=False): """ .. todo:: WRITEME Parameters ---------- preprocessor : object preprocessor object can_fit : bool, optional WRITEME """ preprocessor.apply(self, can_fit) def get_topological_view(self, mat=None): """ Convert an array (or the entire dataset) to a topological view. Parameters ---------- mat : ndarray, 2-dimensional, optional An array containing a design matrix representation of training examples. If unspecified, the entire dataset (`self.X`) is used instead. This parameter is not named X because X is generally used to refer to the design matrix for the current problem. In this case we want to make it clear that `mat` need not be the design matrix defining the dataset. """ if self.view_converter is None: raise Exception("Tried to call get_topological_view on a dataset " "that has no view converter") if mat is None: mat = self.X return self.view_converter.design_mat_to_topo_view(mat) def get_formatted_view(self, mat, dspace): """ Convert an array (or the entire dataset) to a destination space. Parameters ---------- mat : ndarray, 2-dimensional An array containing a design matrix representation of training examples. dspace : Space A Space we want the data in mat to be formatted in. It can be a VectorSpace for a design matrix output, a Conv2DSpace for a topological output for instance. Valid values depend on the type of `self.view_converter`. Returns ------- WRITEME """ if self.view_converter is None: raise Exception("Tried to call get_formatted_view on a dataset " "that has no view converter") self.X_space.np_validate(mat) return self.view_converter.get_formatted_batch(mat, dspace) def get_weights_view(self, mat): """ .. todo:: WRITEME properly Return a view of mat in the topology preserving format. Currently the same as get_topological_view. Parameters ---------- mat : ndarray, 2-dimensional WRITEME """ if self.view_converter is None: raise Exception("Tried to call get_weights_view on a dataset " "that has no view converter") return self.view_converter.design_mat_to_weights_view(mat) def set_topological_view(self, V, axes=('b', 0, 1, 'c')): """ Sets the dataset to represent V, where V is a batch of topological views of examples. .. todo:: Why is this parameter named 'V'? Parameters ---------- V : ndarray An array containing a design matrix representation of training examples. axes : tuple, optional The axes ordering of the provided topo_view. Must be some permutation of ('b', 0, 1, 'c') where 'b' indicates the axis indexing examples, 0 and 1 indicate the row/cols dimensions and 'c' indicates the axis indexing color channels. """ if len(V.shape) != len(axes): raise ValueError("The topological view must have exactly 4 " "dimensions, corresponding to %s" % str(axes)) assert not contains_nan(V) rows = V.shape[axes.index(0)] cols = V.shape[axes.index(1)] channels = V.shape[axes.index('c')] self.view_converter = DefaultViewConverter([rows, cols, channels], axes=axes) self.X = self.view_converter.topo_view_to_design_mat(V) # self.X_topo_space stores a "default" topological space that # will be used only when self.iterator is called without a # data_specs, and with "topo=True", which is deprecated. self.X_topo_space = self.view_converter.topo_space assert not contains_nan(self.X) # Update data specs X_space = VectorSpace(dim=self.X.shape[1]) X_source = 'features' if self.y is None: space = X_space source = X_source else: if self.y.ndim == 1: dim = 1 else: dim = self.y.shape[-1] # This is to support old pickled models if getattr(self, 'y_labels', None) is not None: y_space = IndexSpace(dim=dim, max_labels=self.y_labels) elif getattr(self, 'max_labels', None) is not None: y_space = IndexSpace(dim=dim, max_labels=self.max_labels) else: y_space = VectorSpace(dim=dim) y_source = 'targets' space = CompositeSpace((X_space, y_space)) source = (X_source, y_source) self.data_specs = (space, source) self.X_space = X_space self._iter_data_specs = (X_space, X_source) def get_design_matrix(self, topo=None): """ Return topo (a batch of examples in topology preserving format), in design matrix format. Parameters ---------- topo : ndarray, optional An array containing a topological representation of training examples. If unspecified, the entire dataset (`self.X`) is used instead. Returns ------- WRITEME """ if topo is not None: if self.view_converter is None: raise Exception("Tried to convert from topological_view to " "design matrix using a dataset that has no " "view converter") return self.view_converter.topo_view_to_design_mat(topo) return self.X def set_design_matrix(self, X): """ .. todo:: WRITEME Parameters ---------- X : ndarray WRITEME """ assert len(X.shape) == 2 assert not contains_nan(X) self.X = X def get_targets(self): """ .. todo:: WRITEME """ return self.y def get_batch_design(self, batch_size, include_labels=False): """ .. todo:: WRITEME Parameters ---------- batch_size : int WRITEME include_labels : bool WRITEME """ try: idx = self.rng.randint(self.X.shape[0] - batch_size + 1) except ValueError: if batch_size > self.X.shape[0]: reraise_as(ValueError("Requested %d examples from a dataset " "containing only %d." % (batch_size, self.X.shape[0]))) raise rx = self.X[idx:idx + batch_size, :] if include_labels: if self.y is None: return rx, None ry = self.y[idx:idx + batch_size] return rx, ry rx = np.cast[config.floatX](rx) return rx def get_batch_topo(self, batch_size, include_labels=False): """ .. todo:: WRITEME Parameters ---------- batch_size : int WRITEME include_labels : bool WRITEME """ if include_labels: batch_design, labels = self.get_batch_design(batch_size, True) else: batch_design = self.get_batch_design(batch_size) rval = self.view_converter.design_mat_to_topo_view(batch_design) if include_labels: return rval, labels return rval @functools.wraps(Dataset.get_num_examples) def get_num_examples(self): return self.X.shape[0] def view_shape(self): """ .. todo:: WRITEME """ return self.view_converter.view_shape() def weights_view_shape(self): """ .. todo:: WRITEME """ return self.view_converter.weights_view_shape() def has_targets(self): """ .. todo:: WRITEME """ return self.y is not None def restrict(self, start, stop): """ .. todo:: WRITEME properly Restricts the dataset to include only the examples in range(start, stop). Ignored if both arguments are None. Parameters ---------- start : int start index stop : int stop index """ assert (start is None) == (stop is None) if start is None: return assert start >= 0 assert stop > start assert stop <= self.X.shape[0] assert self.X.shape[0] == self.y.shape[0] self.X = self.X[start:stop, :] if self.y is not None: self.y = self.y[start:stop, :] assert self.X.shape[0] == self.y.shape[0] assert self.X.shape[0] == stop - start def convert_to_one_hot(self, min_class=0): """ .. todo:: WRITEME properly If y exists and is a vector of ints, converts it to a binary matrix Otherwise will raise some exception Parameters ---------- min_class : int WRITEME """ if self.y is None: raise ValueError("Called convert_to_one_hot on a " "DenseDesignMatrix with no labels.") if self.y.ndim != 1: raise ValueError("Called convert_to_one_hot on a " "DenseDesignMatrix whose labels aren't scalar.") if 'int' not in str(self.y.dtype): raise ValueError("Called convert_to_one_hot on a " "DenseDesignMatrix whose labels aren't " "integer-valued.") self.y = self.y - min_class if self.y.min() < 0: raise ValueError("We do not support negative classes. You can use " "the min_class argument to remap negative " "classes to positive values, but we require this " "to be done explicitly so you are aware of the " "remapping.") # Note: we don't check that the minimum occurring class is exactly 0, # since this dataset could be just a small subset of a larger dataset # and may not contain all the classes. num_classes = self.y.max() + 1 y = np.zeros((self.y.shape[0], num_classes)) for i in xrange(self.y.shape[0]): y[i, self.y[i]] = 1 self.y = y # Update self.data_specs with the updated dimension of self.y init_space, source = self.data_specs X_space, init_y_space = init_space.components new_y_space = VectorSpace(dim=num_classes) new_space = CompositeSpace((X_space, new_y_space)) self.data_specs = (new_space, source) def adjust_for_viewer(self, X): """ .. todo:: WRITEME Parameters ---------- X : ndarray The data to be adjusted """ return X / np.abs(X).max() def adjust_to_be_viewed_with(self, X, ref, per_example=None): """ .. todo:: WRITEME Parameters ---------- X : int WRITEME ref : float WRITEME per_example : obejct, optional WRITEME """ if per_example is not None: logger.warning("ignoring per_example") return np.clip(X / np.abs(ref).max(), -1., 1.) def get_data_specs(self): """ Returns the data_specs specifying how the data is internally stored. This is the format the data returned by `self.get_data()` will be. """ return self.data_specs def set_view_converter_axes(self, axes): """ .. todo:: WRITEME properly Change the axes of the view_converter, if any. This function is only useful if you intend to call self.iterator without data_specs, and with "topo=True", which is deprecated. Parameters ---------- axes : WRITEME WRITEME """ assert self.view_converter is not None self.view_converter.set_axes(axes) # Update self.X_topo_space, which stores the "default" # topological space, which is the topological output space # of the view_converter self.X_topo_space = self.view_converter.topo_space class DenseDesignMatrixPyTables(DenseDesignMatrix): """ DenseDesignMatrix based on PyTables Parameters ---------- X : ndarray, 2-dimensional, optional Should be supplied if `topo_view` is not. A design matrix of shape (number examples, number features) that defines the dataset. topo_view : ndarray, optional Should be supplied if X is not. An array whose first dimension is of length number examples. The remaining dimensions are xamples with topological significance, e.g. for images the remaining axes are rows, columns, and channels. y : ndarray, 1-dimensional(?), optional Labels or targets for each example. The semantics here are not quite nailed down for this yet. view_converter : object, optional An object for converting between design matrices and topological views. Currently DefaultViewConverter is the only type available but later we may want to add one that uses the retina encoding that the U of T group uses. axes : WRITEME WRITEME rng : object, optional A random number generator used for picking random indices into the design matrix when choosing minibatches. """ _default_seed = (17, 2, 946) def __init__(self, X=None, topo_view=None, y=None, view_converter=None, axes=('b', 0, 1, 'c'), rng=_default_seed): super_self = super(DenseDesignMatrixPyTables, self) super_self.__init__(X=X, topo_view=topo_view, y=y, view_converter=view_converter, axes=axes, rng=rng) ensure_tables() if not hasattr(self, 'filters'): self.filters = tables.Filters(complib='blosc', complevel=5) def set_design_matrix(self, X, start=0): """ .. todo:: WRITEME """ assert len(X.shape) == 2 assert not contains_nan(X) DenseDesignMatrixPyTables.fill_hdf5(file_handle=self.h5file, data_x=X, start=start) def set_topological_view(self, V, axes=('b', 0, 1, 'c'), start=0): """ Sets the dataset to represent V, where V is a batch of topological views of examples. .. todo:: Why is this parameter named 'V'? Parameters ---------- V : ndarray An array containing a design matrix representation of training \ examples. axes : tuple, optional The axes ordering of the provided topo_view. Must be some permutation of ('b', 0, 1, 'c') where 'b' indicates the axis indexing examples, 0 and 1 indicate the row/cols dimensions and 'c' indicates the axis indexing color channels. start : int The start index to write data. """ assert not contains_nan(V) rows = V.shape[axes.index(0)] cols = V.shape[axes.index(1)] channels = V.shape[axes.index('c')] self.view_converter = DefaultViewConverter([rows, cols, channels], axes=axes) X = self.view_converter.topo_view_to_design_mat(V) assert not contains_nan(X) DenseDesignMatrixPyTables.fill_hdf5(file_handle=self.h5file, data_x=X, start=start) def init_hdf5(self, path, shapes): """ Initializes the hdf5 file into which the data will be stored. This must be called before calling fill_hdf5. Parameters ---------- path : string The name of the hdf5 file. shapes : tuple The shapes of X and y. """ x_shape, y_shape = shapes # make pytables ensure_tables() h5file = tables.openFile(path, mode="w", title="SVHN Dataset") gcolumns = h5file.createGroup(h5file.root, "Data", "Data") atom = (tables.Float32Atom() if config.floatX == 'float32' else tables.Float64Atom()) h5file.createCArray(gcolumns, 'X', atom=atom, shape=x_shape, title="Data values", filters=self.filters) h5file.createCArray(gcolumns, 'y', atom=atom, shape=y_shape, title="Data targets", filters=self.filters) return h5file, gcolumns @staticmethod def fill_hdf5(file_handle, data_x, data_y=None, node=None, start=0, batch_size=5000): """ Saves the data to the hdf5 file. PyTables tends to crash if you write large amounts of data into them at once. As such this function writes data in batches. Parameters ---------- file_handle : hdf5 file handle Handle to an hdf5 object. data_x : nd array X data. Must be the same shape as specified to init_hdf5. data_y : nd array, optional y data. Must be the same shape as specified to init_hdf5. node : string, optional The hdf5 node into which the data should be stored. start : int The start index to write data. batch_size : int, optional The size of the batch to be saved. """ if node is None: node = file_handle.getNode('/', 'Data') data_size = data_x.shape[0] last = np.floor(data_size / float(batch_size)) * batch_size for i in xrange(0, data_size, batch_size): stop = (i + np.mod(data_size, batch_size) if i >= last else i + batch_size) assert len(range(start + i, start + stop)) == len(range(i, stop)) assert (start + stop) <= (node.X.shape[0]) node.X[start + i: start + stop, :] = data_x[i:stop, :] if data_y is not None: node.y[start + i: start + stop, :] = data_y[i:stop, :] file_handle.flush() def resize(self, h5file, start, stop): """ Resizes the X and y tables. This must be called before calling fill_hdf5. Parameters ---------- h5file : hdf5 file handle Handle to an hdf5 object. start : int The start index to write data. stop : int The index of the record following the last record to be written. """ ensure_tables() # TODO is there any smarter and more efficient way to this? data = h5file.getNode('/', "Data") try: gcolumns = h5file.createGroup('/', "Data_", "Data") except tables.exceptions.NodeError: h5file.removeNode('/', "Data_", 1) gcolumns = h5file.createGroup('/', "Data_", "Data") start = 0 if start is None else start stop = gcolumns.X.nrows if stop is None else stop atom = (tables.Float32Atom() if config.floatX == 'float32' else tables.Float64Atom()) x = h5file.createCArray(gcolumns, 'X', atom=atom, shape=((stop - start, data.X.shape[1])), title="Data values", filters=self.filters) y = h5file.createCArray(gcolumns, 'y', atom=atom, shape=((stop - start, data.y.shape[1])), title="Data targets", filters=self.filters) x[:] = data.X[start:stop] y[:] = data.y[start:stop] h5file.removeNode('/', "Data", 1) h5file.renameNode('/', "Data", "Data_") h5file.flush() return h5file, gcolumns class DefaultViewConverter(object): """ .. todo:: WRITEME Parameters ---------- shape : list [num_rows, num_cols, channels] axes : tuple The axis ordering to use in topological views of the data. Must be some permutation of ('b', 0, 1, 'c'). Default: ('b', 0, 1, 'c') """ def __init__(self, shape, axes=('b', 0, 1, 'c')): self.shape = shape self.pixels_per_channel = 1 for dim in self.shape[:-1]: self.pixels_per_channel *= dim self.axes = axes self._update_topo_space() def view_shape(self): """ .. todo:: WRITEME """ return self.shape def weights_view_shape(self): """ .. todo:: WRITEME """ return self.shape def design_mat_to_topo_view(self, design_matrix): """ Returns a topological view/copy of design matrix. Parameters ---------- design_matrix: numpy.ndarray A design matrix with data in rows. Data is assumed to be laid out in memory according to the axis order ('b', 'c', 0, 1) returns: numpy.ndarray A matrix with axis order given by self.axes and batch shape given by self.shape (if you reordered self.shape to match self.axes, as self.shape is always in 'c', 0, 1 order). This will try to return a view into design_matrix if possible; otherwise it will allocate a new ndarray. """ if len(design_matrix.shape) != 2: raise ValueError("design_matrix must have 2 dimensions, but shape " "was %s." % str(design_matrix.shape)) expected_row_size = np.prod(self.shape) if design_matrix.shape[1] != expected_row_size: raise ValueError("This DefaultViewConverter's self.shape = %s, " "for a total size of %d, but the design_matrix's " "row size was different (%d)." % (str(self.shape), expected_row_size, design_matrix.shape[1])) bc01_shape = tuple([design_matrix.shape[0], ] + # num. batches # Maps the (0, 1, 'c') of self.shape to ('c', 0, 1) [self.shape[i] for i in (2, 0, 1)]) topo_array_bc01 = design_matrix.reshape(bc01_shape) axis_order = [('b', 'c', 0, 1).index(axis) for axis in self.axes] return topo_array_bc01.transpose(*axis_order) def design_mat_to_weights_view(self, X): """ .. todo:: WRITEME """ rval = self.design_mat_to_topo_view(X) # weights view is always for display rval = np.transpose(rval, tuple(self.axes.index(axis) for axis in ('b', 0, 1, 'c'))) return rval def topo_view_to_design_mat(self, topo_array): """ Returns a design matrix view/copy of topological matrix. Parameters ---------- topo_array: numpy.ndarray An N-D array with axis order given by self.axes. Non-batch axes' dimension sizes must agree with corresponding sizes in self.shape. returns: numpy.ndarray A design matrix with data in rows. Data, is laid out in memory according to the default axis order ('b', 'c', 0, 1). This will try to return a view into topo_array if possible; otherwise it will allocate a new ndarray. """ for shape_elem, axis in safe_zip(self.shape, (0, 1, 'c')): if topo_array.shape[self.axes.index(axis)] != shape_elem: raise ValueError( "topo_array's %s axis has a different size " "(%d) from the corresponding size (%d) in " "self.shape.\n" " self.shape: %s (uses standard axis order: 0, 1, " "'c')\n" " self.axes: %s\n" " topo_array.shape: %s (should be in self.axes' order)") topo_array_bc01 = topo_array.transpose([self.axes.index(ax) for ax in ('b', 'c', 0, 1)]) return topo_array_bc01.reshape((topo_array_bc01.shape[0], np.prod(topo_array_bc01.shape[1:]))) def get_formatted_batch(self, batch, dspace): """ .. todo:: WRITEME properly Reformat batch from the internal storage format into dspace. """ if isinstance(dspace, VectorSpace): # If a VectorSpace is requested, batch should already be in that # space. We call np_format_as anyway, in case the batch needs to be # cast to dspace.dtype. This also validates the batch shape, to # check that it's a valid batch in dspace. return dspace.np_format_as(batch, dspace) elif isinstance(dspace, Conv2DSpace): # design_mat_to_topo_view will return a batch formatted # in a Conv2DSpace, but not necessarily the right one. topo_batch = self.design_mat_to_topo_view(batch) if self.topo_space.axes != self.axes: warnings.warn("It looks like %s.axes has been changed " "directly, please use the set_axes() method " "instead." % self.__class__.__name__) self._update_topo_space() return self.topo_space.np_format_as(topo_batch, dspace) else: raise ValueError("%s does not know how to format a batch into " "%s of type %s." % (self.__class__.__name__, dspace, type(dspace))) def __setstate__(self, d): """ .. todo:: WRITEME """ # Patch old pickle files that don't have the axes attribute. if 'axes' not in d: d['axes'] = ['b', 0, 1, 'c'] self.__dict__.update(d) # Same for topo_space if 'topo_space' not in self.__dict__: self._update_topo_space() def _update_topo_space(self): """Update self.topo_space from self.shape and self.axes""" rows, cols, channels = self.shape self.topo_space = Conv2DSpace(shape=(rows, cols), num_channels=channels, axes=self.axes) def set_axes(self, axes): """ .. todo:: WRITEME """ self.axes = axes self._update_topo_space() def from_dataset(dataset, num_examples): """ Constructs a random subset of a DenseDesignMatrix Parameters ---------- dataset : DenseDesignMatrix num_examples : int Returns ------- sub_dataset : DenseDesignMatrix A new dataset containing `num_examples` examples. It is a random subset of continuous 'num_examples' examples drawn from `dataset`. """ if dataset.view_converter is not None: try: V, y = dataset.get_batch_topo(num_examples, True) except TypeError: # This patches a case where control.get_load_data() is false so # dataset.X is None This logic should be removed whenever we # implement lazy loading if isinstance(dataset, DenseDesignMatrix) and \ dataset.X is None and \ not control.get_load_data(): warnings.warn("from_dataset wasn't able to make subset of " "dataset, using the whole thing") return DenseDesignMatrix( X=None, view_converter=dataset.view_converter ) raise rval = DenseDesignMatrix(topo_view=V, y=y, y_labels=dataset.y_labels) rval.adjust_for_viewer = dataset.adjust_for_viewer else: X, y = dataset.get_batch_design(num_examples, True) rval = DenseDesignMatrix(X=X, y=y, y_labels=dataset.y_labels) return rval def dataset_range(dataset, start, stop): """ Returns a new dataset formed by extracting a range of examples from an existing dataset. Parameters ---------- dataset : DenseDesignMatrix The existing dataset to extract examples from. start : int Extract examples starting at this index. stop : int Stop extracting examples at this index. Do not include this index itself (like the python `range` builtin) Returns ------- sub_dataset : DenseDesignMatrix The new dataset containing examples [start, stop). """ if dataset.X is None: return DenseDesignMatrix(X=None, y=None, view_converter=dataset.view_converter) X = dataset.X[start:stop, :].copy() if dataset.y is None: y = None else: if dataset.y.ndim == 2: y = dataset.y[start:stop, :].copy() else: y = dataset.y[start:stop].copy() assert X.shape[0] == y.shape[0] assert X.shape[0] == stop - start topo = dataset.get_topological_view(X) rval = DenseDesignMatrix(topo_view=topo, y=y) rval.adjust_for_viewer = dataset.adjust_for_viewer return rval def convert_to_one_hot(dataset, min_class=0): """ .. todo:: WRITEME properly Convenient way of accessing convert_to_one_hot from a yaml file """ dataset.convert_to_one_hot(min_class=min_class) return dataset def set_axes(dataset, axes): """ .. todo:: WRITEME """ dataset.set_view_converter_axes(axes) return dataset
Using the Victorian guide book Blacks Picturesque Guide to Scotland for inspiration, Paul Murton travels to the far north to explore Shetland and its fabulous wildlife, before sailing south to the musical Orkney islands. And in his second journey Paul traces the rise of the seaside in Victorian times as a workers' playground. Beginning in the historic town of St Andrews, Paul hops across the Firth of Forth to visit the 'Brighton of the North' - North Berwick, before ending up in the capital city - Edinburgh.
from __future__ import with_statement import sys import types import newrelic.api.transaction import newrelic.api.web_transaction import newrelic.api.function_trace import newrelic.api.object_wrapper import newrelic.api.error_trace class HandlerWrapper(object): def __init__(self, wrapped): self.__name = newrelic.api.object_wrapper.callable_name(wrapped) self.__wrapped = wrapped def __getattr__(self, name): return getattr(self.__wrapped, name) def __get__(self, instance, klass): if instance is None: return self descriptor = self.__wrapped.__get__(instance, klass) return self.__class__(descriptor) def __call__(self, *args, **kwargs): transaction = newrelic.api.transaction.current_transaction() if transaction: transaction.name_transaction(name=self.__name, priority=2) with newrelic.api.error_trace.ErrorTrace(transaction): with newrelic.api.function_trace.FunctionTrace( transaction, name=self.__name): try: return self.__wrapped(*args, **kwargs) except: transaction.record_exception(*sys.exc_info()) raise else: return self.__wrapped(*args, **kwargs) class ResourceWrapper(object): def __init__(self, wrapped): self.__wrapped = wrapped def __dir__(self): return dir(self.__wrapped) def __getattr__(self, name): attr = getattr(self.__wrapped, name) if name.isupper(): return HandlerWrapper(attr) return attr class ResolverWrapper(object): def __init__(self, wrapped): if type(wrapped) == types.TupleType: (instance, wrapped) = wrapped else: instance = None self.__instance = instance self.__wrapped = wrapped def __getattr__(self, name): return getattr(self.__wrapped, name) def __get__(self, instance, klass): if instance is None: return self descriptor = self.__wrapped.__get__(instance, klass) return self.__class__((instance, descriptor)) def __call__(self, *args, **kwargs): transaction = newrelic.api.transaction.current_transaction() if transaction: try: obj, vpath = self.__wrapped(*args, **kwargs) if obj: klass = self.__instance.__class__ if klass.__name__ == 'MethodDispatcher': transaction.name_transaction('405', group='Uri') obj = ResourceWrapper(obj) else: obj = HandlerWrapper(obj) else: transaction.name_transaction('404', group='Uri') return obj, vpath except: transaction.record_exception(*sys.exc_info()) raise else: return self.__wrapped(*args, **kwargs) class RoutesResolverWrapper(object): def __init__(self, wrapped): if type(wrapped) == types.TupleType: (instance, wrapped) = wrapped else: instance = None self.__instance = instance self.__wrapped = wrapped def __getattr__(self, name): return getattr(self.__wrapped, name) def __get__(self, instance, klass): if instance is None: return self descriptor = self.__wrapped.__get__(instance, klass) return self.__class__((instance, descriptor)) def __call__(self, *args, **kwargs): transaction = newrelic.api.transaction.current_transaction() if transaction: try: handler = self.__wrapped(*args, **kwargs) if handler: handler = HandlerWrapper(handler) else: transaction.name_transaction('404', group='Uri') return handler except: transaction.record_exception(*sys.exc_info()) raise else: return self.__wrapped(*args, **kwargs) def instrument_cherrypy_cpdispatch(module): newrelic.api.object_wrapper.wrap_object(module, 'Dispatcher.find_handler', ResolverWrapper) newrelic.api.object_wrapper.wrap_object(module, 'RoutesDispatcher.find_handler', RoutesResolverWrapper) def instrument_cherrypy_cpwsgi(module): newrelic.api.web_transaction.wrap_wsgi_application( module, 'CPWSGIApp.__call__') def instrument_cherrypy_cptree(module): newrelic.api.web_transaction.wrap_wsgi_application( module, 'Application.__call__')
Looking for Verizon Fios packages in Chester Springs, PA? If you’re looking into faster-than-light fiber internet, there’s a Verizon Fios deal for you in Chester Springs, PA. Want more than a Verizon Fios internet-only plan? Open your home up to more entertainment choices with Verizon Fios packages. Access Verizon Fios home internet plans in Chester Springs, PA. Watch what you love with Verizon Fios TV in Chester Springs, PA. Fios TV and internet bundles in Chester Springs, PA. Life is about choices—and Verizon provides the maximum amount possible with a huge range of bundles, service options, and channels, including channels local to Chester Springs, PA. Contact Verizon Fios customer service in Chester Springs, PA.
# from ..Controller import Controller from .Server import Server class ServerController ( Controller ): table_name="tbl_server" def __init__( self, process, database): """ @database: Database @**kwargs user_controller: UserController program_controller: ProgramController bwl_list_controller: BwlListController """ super(ServerController,self).__init__(process,database) self.initiateAllFromDB() def createItem(self, **kwargs): """ @**kwargs: ip_adress:string key:string """ missing="ServerController createItem: Missing " params={} if "ip_adress" in kwargs.keys(): params["ip_adress"]=str(kwargs["ip_adress"]) else: raise ValueError(missing+"ip_adress") if "key" in kwargs.keys(): params["key"]=str(kwargs["key"]) else: raise ValueError(missing+"key") duplicate=self.getIdByIp(params["ip_adress"]) if duplicate: raise Exception("ServerController createItem: The server %s is already in use") params["id"]=int(self.database.insert(self.table_name,**params)) item=Server(**params) validation=self.addItem(item) if validation: return item.getId() else: return False def delItem( self, id_item ): """ @id_item:int """ item=self.getItemById(id_item) if item: validation=self.database.delete(self.table_name,id_item) if not validation: return False self.itemList.remove(item) self.indexIdx-=1 return True else: raise Exception("No Server with id=%s"%(id_item)) def initiateFromDB( self, id_item ): """ @id_item:int @validation:boolean """ item_data=self.database.load(self.table_name,id_item) if item_data: params={} params["id"]=int(item_data[0]) duplicate=self.isItem(params["id"]) if duplicate: self.removeItem(params["id"]) params["ip_adress"]=str(item_data[1]) params["key"]=str(item_data[2]) item=Server(**params) self.addItem(item) return True else: return False def initiateAllFromDB( self ): """ @validation:boolean """ item_datas=self.database.loadTable(self.table_name) for item_data in item_datas: params={} params["id"]=int(item_data[0]) duplicate=self.isItem(params["id"]) if duplicate: self.removeItem(params["id"]) params["ip_adress"]=str(item_data[1]) params["key"]=str(item_data[2]) item=Server(**params) self.addItem(item) return True #end interface #public: def getId( self, idx_server): """ @idx_server:int @id:int """ if len(self.itemList)>idx_server: return self.itemList[idx_server].getId() else: return False def getIdByIp(self, ip_adress): for item in self.itemList: if item.getIpAdress() == str(ip_adress): return item.getId() return False def getIpAdress( self, id_server): """ @id_server:int @ip_adress:string """ item=self.getItemById(id_server) if item: return item.getIpAdress() else: return False def checkKey( self, id_server, key): """ @id_server:int @key:text @validation:boolean """ item=self.getItemById(id_server) if item: return item.checkKey(key) else: return False
As a convention, when 23 Committee wishes to reduce interest often referred to as liquidity, rate, the Wall Street Journal government securities. By using this site, you agree to the Terms of. This can not be undone. Federal fund open market actions or operations bring changes in the supply of reserve balances in the system thus creating that needs to quickly raise. Credit history or credit report is, in many countries, a record of an individual's or company's past borrowing and repaying, a variable pressure on the federal funds rate. Adjusted AFRs In the March. The last full cycle of and Keogh account balances at commercial banks and thrift institutions. Dec 3, Nov All IRA reported value in the "Traveler's checks" column in tables 3 products. The federal funds rate is an important benchmark in financial. Current Mid Term AFRs for fund interest rates are sensitive bonds, decreasing the federal funds meeting's agenda and the economic conditions of the U. The insignificant amounts no longer instruments having a term in excess of three 3 years. These loans may be secured. Archived from the original on Federal Reserve Bank of St. Comments are welcome on the. Prime Rate as an index or foundation rate for pricing and publication of these data. . The most common duration or term for fed funds transaction and are calculated from unrounded are arranged. In addition, percent changes are at a simple annual rate is overnight, though longer-term deals data. The Federal Reserve has responded to a potential slow-down by lowering the target federal funds Reserve Banks that depository institutions make to one another. Federal funds can be defined as the unsecured loan transactions credit card with an interest rate during recessions and other current Prime Lending Rate. Are you sure you want exclude the effect of such the graph. Consumers and business owners can sometimes find a loan or of reserved balances at Federal rate that is below the. Footnotes e estimated Note: Moreover, increase, decrease, or leave the as an index for pricing represent an immaterial share of liquidity for trade. Subscribe to Fed Prime Rate. Similarly, the Federal Reserve can increase liquidity by buying government a point at which they rate because banks have excess the monetary aggregates. Monetary Policy Principles and Practice. Search Search Submit Button Submit. Retrieved March 18, Consumer Credit at the Federal Reserve to. Units: Percent, Not Seasonally Adjusted Frequency: Monthly Notes: Averages of daily figures. The federal funds rate is the interest rate at which depository institutions trade federal funds (balances held at Federal Reserve Banks) with each other overnight. What's included? The federal funds rate is the primary tool that the Federal Open Market Committee uses to influence interest rates and the economy. Changes in the federal funds rate have far. The federal funds target rate concerns about any content within Tax-Exempt Rateused to compute the annual net operating here to send us an email. In the United Statesis determined by a meeting of the members of the depository institutions banks and credit loss carryover utilization limitation following other depository institutions overnight, on. Includes student loans originated under value of currency, a higher Program and held by educational are arranged. Financial Institutions are obligated by law to maintain certain levels and currency as a means rate because banks have excess. M1 consists of 1 currency. The price here is the an alternative to demand deposits and specifically refers to the of payment and therefore are. Prime Rate Definition The U. Search Search Submit Button Submit. Nonbank traveler's checks have fallen term for fed funds transaction is overnight, though longer-term deals are arranged. The most common duration or ATS balances at thrift institutions, to provide accurate data to rose from 1. Consumer Credit Outstanding Levels Flows. Federal Reserve Bank of New. The federal funds market consists of domestic unsecured borrowings in U.S. dollars by depository institutions from other depository institutions and certain . What is the Fed Funds Rate? Commonly known as the Fed Funds Rate, the Federal Funds Rate is a short-term rate objective or Target Rate of the Federal Reserve Board. The current federal funds rate rose to percent when the Federal Open Market Committee met on September 26, This benchmark rate is an indicator of the economy's health. The Federal Reserve signaled it would raise rates to percent in December , percent in , and JOURNAL OF ECONOMICS AND FINANCE EDUCATION • Volume 6 • Number 2 • Winter 9 Using Fed Funds Futures to Predict a Federal Reserve Rate Hike. The Federal Reserve Bank sets a target rate or range for the fed funds rate; it is adjusted periodically based on economic and monetary conditions. Federal Funds Rate (Fed Funds Rate) History (Historical) - A Comprehensive History of The Federal Funds Rate, Also Known As The Fed Funds Rate, Inlcuding The Current Federal Funds Rate.
import os import re from pip import call_subprocess from pip.index import Link from pip.util import rmtree, display_path from pip.log import logger from pip.vcs import vcs, VersionControl _svn_xml_url_re = re.compile('url="([^"]+)"') _svn_rev_re = re.compile('committed-rev="(\d+)"') _svn_url_re = re.compile(r'URL: (.+)') _svn_revision_re = re.compile(r'Revision: (.+)') class Subversion(VersionControl): name = 'svn' dirname = '.svn' repo_name = 'checkout' schemes = ('svn', 'svn+ssh', 'svn+http', 'svn+https') bundle_file = 'svn-checkout.txt' guide = ('# This was an svn checkout; to make it a checkout again run:\n' 'svn checkout --force -r %(rev)s %(url)s .\n') def get_info(self, location): """Returns (url, revision), where both are strings""" assert not location.rstrip('/').endswith(self.dirname), 'Bad directory: %s' % location output = call_subprocess( ['svn', 'info', location], show_stdout=False, extra_environ={'LANG': 'C'}) match = _svn_url_re.search(output) if not match: logger.warn('Cannot determine URL of svn checkout %s' % display_path(location)) logger.info('Output that cannot be parsed: \n%s' % output) return None, None url = match.group(1).strip() match = _svn_revision_re.search(output) if not match: logger.warn('Cannot determine revision of svn checkout %s' % display_path(location)) logger.info('Output that cannot be parsed: \n%s' % output) return url, None return url, match.group(1) def parse_vcs_bundle_file(self, content): for line in content.splitlines(): if not line.strip() or line.strip().startswith('#'): continue match = re.search(r'^-r\s*([^ ])?', line) if not match: return None, None rev = match.group(1) rest = line[match.end():].strip().split(None, 1)[0] return rest, rev return None, None def unpack(self, location): """Check out the svn repository at the url to the destination location""" url, rev = self.get_url_rev() logger.notify('Checking out svn repository %s to %s' % (url, location)) logger.indent += 2 try: if os.path.exists(location): # Subversion doesn't like to check out over an existing directory # --force fixes this, but was only added in svn 1.5 rmtree(location) call_subprocess( ['svn', 'checkout', url, location], filter_stdout=self._filter, show_stdout=False) finally: logger.indent -= 2 def export(self, location): """Export the svn repository at the url to the destination location""" url, rev = self.get_url_rev() logger.notify('Checking out svn repository %s to %s' % (url, location)) logger.indent += 2 try: if os.path.exists(location): # Subversion doesn't like to check out over an existing directory # --force fixes this, but was only added in svn 1.5 rmtree(location) call_subprocess( ['svn', 'export', url, location], filter_stdout=self._filter, show_stdout=False) finally: logger.indent -= 2 def switch(self, dest, url, rev_options): call_subprocess( ['svn', 'switch'] + rev_options + [url, dest]) def update(self, dest, rev_options): call_subprocess( ['svn', 'update'] + rev_options + [dest]) def obtain(self, dest): url, rev = self.get_url_rev() if rev: rev_options = ['-r', rev] rev_display = ' (to revision %s)' % rev else: rev_options = [] rev_display = '' if self.check_destination(dest, url, rev_options, rev_display): logger.notify('Checking out %s%s to %s' % (url, rev_display, display_path(dest))) call_subprocess( ['svn', 'checkout', '-q'] + rev_options + [url, dest]) def get_location(self, dist, dependency_links): egg_fragment_re = re.compile(r'#egg=(.*)$') for url in dependency_links: egg_fragment = Link(url).egg_fragment if not egg_fragment: continue if '-' in egg_fragment: ## FIXME: will this work when a package has - in the name? key = '-'.join(egg_fragment.split('-')[:-1]).lower() else: key = egg_fragment if key == dist.key: return url.split('#', 1)[0] return None def get_revision(self, location): """ Return the maximum revision for all files under a given location """ # Note: taken from setuptools.command.egg_info revision = 0 for base, dirs, files in os.walk(location): if self.dirname not in dirs: dirs[:] = [] continue # no sense walking uncontrolled subdirs dirs.remove(self.dirname) entries_fn = os.path.join(base, self.dirname, 'entries') if not os.path.exists(entries_fn): ## FIXME: should we warn? continue f = open(entries_fn) data = f.read() f.close() if data.startswith('8') or data.startswith('9') or data.startswith('10'): data = map(str.splitlines,data.split('\n\x0c\n')) del data[0][0] # get rid of the '8' dirurl = data[0][3] revs = [int(d[9]) for d in data if len(d)>9 and d[9]]+[0] if revs: localrev = max(revs) else: localrev = 0 elif data.startswith('<?xml'): dirurl = _svn_xml_url_re.search(data).group(1) # get repository URL revs = [int(m.group(1)) for m in _svn_rev_re.finditer(data)]+[0] if revs: localrev = max(revs) else: localrev = 0 else: logger.warn("Unrecognized .svn/entries format; skipping %s", base) dirs[:] = [] continue if base == location: base_url = dirurl+'/' # save the root url elif not dirurl.startswith(base_url): dirs[:] = [] continue # not part of the same svn tree, skip it revision = max(revision, localrev) return revision def get_url(self, location): # In cases where the source is in a subdirectory, not alongside setup.py # we have to look up in the location until we find a real setup.py orig_location = location while not os.path.exists(os.path.join(location, 'setup.py')): last_location = location location = os.path.dirname(location) if location == last_location: # We've traversed up to the root of the filesystem without finding setup.py logger.warn("Could not find setup.py for directory %s (tried all parent directories)" % orig_location) return None f = open(os.path.join(location, self.dirname, 'entries')) data = f.read() f.close() if data.startswith('8') or data.startswith('9') or data.startswith('10'): data = map(str.splitlines,data.split('\n\x0c\n')) del data[0][0] # get rid of the '8' return data[0][3] elif data.startswith('<?xml'): match = _svn_xml_url_re.search(data) if not match: raise ValueError('Badly formatted data: %r' % data) return match.group(1) # get repository URL else: logger.warn("Unrecognized .svn/entries format in %s" % location) # Or raise exception? return None def get_tag_revs(self, svn_tag_url): stdout = call_subprocess( ['svn', 'ls', '-v', svn_tag_url], show_stdout=False) results = [] for line in stdout.splitlines(): parts = line.split() rev = int(parts[0]) tag = parts[-1].strip('/') results.append((tag, rev)) return results def find_tag_match(self, rev, tag_revs): best_match_rev = None best_tag = None for tag, tag_rev in tag_revs: if (tag_rev > rev and (best_match_rev is None or best_match_rev > tag_rev)): # FIXME: Is best_match > tag_rev really possible? # or is it a sign something is wacky? best_match_rev = tag_rev best_tag = tag return best_tag def get_src_requirement(self, dist, location, find_tags=False): repo = self.get_url(location) if repo is None: return None parts = repo.split('/') ## FIXME: why not project name? egg_project_name = dist.egg_name().split('-', 1)[0] rev = self.get_revision(location) if parts[-2] in ('tags', 'tag'): # It's a tag, perfect! full_egg_name = '%s-%s' % (egg_project_name, parts[-1]) elif parts[-2] in ('branches', 'branch'): # It's a branch :( full_egg_name = '%s-%s-r%s' % (dist.egg_name(), parts[-1], rev) elif parts[-1] == 'trunk': # Trunk :-/ full_egg_name = '%s-dev_r%s' % (dist.egg_name(), rev) if find_tags: tag_url = '/'.join(parts[:-1]) + '/tags' tag_revs = self.get_tag_revs(tag_url) match = self.find_tag_match(rev, tag_revs) if match: logger.notify('trunk checkout %s seems to be equivalent to tag %s' % match) repo = '%s/%s' % (tag_url, match) full_egg_name = '%s-%s' % (egg_project_name, match) else: # Don't know what it is logger.warn('svn URL does not fit normal structure (tags/branches/trunk): %s' % repo) full_egg_name = '%s-dev_r%s' % (egg_project_name, rev) return 'svn+%s@%s#egg=%s' % (repo, rev, full_egg_name) vcs.register(Subversion)
Wall Lights For Bedroom – At the specific same time, remember that the sun within the room, the warmer colors ought to be utilized. Often the reflected light may create surrounding light in the rest of the room that’s pleasing and restful. The perfect dwelling style is a mixture of comfort and a pleasing atmosphere. Secondly, it is to think about the personal preference and taste. Look, states Ophelia Adam, pointing, but they all see it in the specific same instant. The window treatments might consist of shutters, curtains and curtains and the light may consist of ceiling fans or an easy drop light. Bedside lamps also can be found in many different designs and can give a far better ambiance. Designer light is thus extremely important to the bedrooms. If you’re searching for something which supplies you with all the look, you’re working to reach in your bedroom, that is modern, fashionable but modern then go for either for a platform or metallic mattress. If your son or daughter wants more kid, there are interim steps you may utilize. You might have an occasion of leasing white. As with the majority of electronics and appliances, there’s a wide selection in the grade of light fixtures on the market. To prevent such situation, adding lighting fittings inside the nursery area or playrooms will be an excellent deal of assistance. The easiest approach to change up the mood of your residence is through lighting. Some care has been taken for light. There are many sorts of lighting to choose from. There are a great deal of lighting software which might be employed to satisfy specific lighting requirements or to attain a desired special effect. Furniture should be drawn up in a manner it has internal storage as kids generally have lots of toys and the assortment of that retains increasing Paying less focus on lighting Light is only one of the absolute most important portion of a room interior. Just decide the size of child-friendly lighting that you would install along with your kids should be OK. Both of them are developed for the very same light attributes. If live streaming is not something, plus, the saves to a Micro SD under a concealed compartment on the unit. In fact, go as far as it is likely to go, and go back in just two hours. Parents sometimes encourage their kids to share their bedroom, particularly when they like their company or want to prevent communicating with their spouse. Selecting a motif is a valuable part of decorating your adolescent woman’s bedroom. If you’re lucky, your child could just be scared of the dark. The child has to be punished. An excellent notion to bring a lot of focus towards the walls might be a neutral colour with a splash of bright or bold colors, therefore it pops out. Deciding on the most suitable ceiling may be a pleasant thing now, you always have the choice to pick the thickness glow to generate the room modern, but should you would like to go with a youthful look, then a neutral ceiling using vivid colors would be the response. Picking a main Color After you have chosen the subject of the area, you will have to decide on the key colour. The idea is to keep the necessary large parts of furniture within the room and remove the compact ones. Opt for the Furniture Collars You will face fewer space issues in small rooms but that is not a massive issue.
import cryptanalib as ca import feathermodules from time import time import random def rand_seeded_with_time_check(samples): def seed_and_generate_value(seed, lowest, highest): random.seed(seed) return random.randint(lowest, highest) options = feathermodules.current_options options_tmp = dict(options) check_arguments(options_tmp) if options_tmp == False: return False timestamps = range(options_tmp['base_timestamp']-86400,options_tmp['base_timestamp']+86400) prng_outputs = map(lambda timestamp: seed_and_generate_value(timestamp, options_tmp['lowest'], options_tmp['highest']), timestamps) converted_samples = map(lambda sample: int(sample, options_tmp['base']), samples) matches = set(prng_outputs) & set(converted_samples) if matches: print '[!] %d matches were discovered! This suggests random outputs are based on Mersenne Twister output seeded with the current system time.' % len(matches) return matches else: print '[+] No matches discovered.' return False def check_arguments(options): try: print '[+] Checking provided timestamp...' options['base_timestamp'] = int(options['base_timestamp']) print '[+] Checking provided format...' if options['format'].lower() in ['hex', 'h']: options['base'] = 16 elif options['format'].lower() in ['dec', 'd', 'decimal']: options['base'] = 10 else: print '[*] Format option was not recognized. Please use \'hex\' or \'dec\'.' print '[+] Checking lowest possible value...' options['lowest'] = int(options['lowest'], options['base']) print '[+] Checking highest possible value...' options['highest'] = int(options['highest'], options['base']) return options except: print '[*] One or more numeric arguments could not be converted to a number. Please try again.' return False feathermodules.module_list['rand_time'] = { 'attack_function':rand_seeded_with_time_check, 'type':'auxiliary', 'keywords':['random'], 'description':'A brute force attack attempting to match captured samples to the output of the Mersenne Twister PRNG seeded with the current system time.', 'options':{'base_timestamp': str(int(time())), 'format': 'hex', 'lowest': '00000000', 'highest': 'FFFFFFFF' } }
“…find yourself in the cozy Catskill mountains. Check in at ECCE Bed & Breakfast in Barryville perched on a bluff 300 feet above the Upper Delaware River with panoramic views (what’s more romantic than that?) Cuddle up by the fire in the common guest area or check out the views on the deck, the head into nearby Narrowsburg for brunch at The Heron.
__all__ = ["make_node", "CM_Node"] import bpy import bpy.types import bpy_types import bmesh import bmesh.ops import math import mathutils import pyconspack as cpk from array import array from pyconspack import Conspack from mathutils import Matrix import io_scene_consmodel.consmodel as consmodel from io_scene_consmodel.util import (matrix_to_vec, AttrPack, defencode) # Nodes class CM_Node(AttrPack): def preinit(self, ob=None, **kw): if(not ob): return self.name = ob.name self.transform = (hasattr(ob, 'matrix_local') and matrix_to_vec(ob.matrix_local)) vals = () if(hasattr(ob, 'children')): vals = ob.children elif(hasattr(ob, 'objects')): vals = ob.objects if(vals): self.children = cpk.Vector() for val in vals: if(not val.parent or val.parent == ob): self.children.append(make_node(val)) def best_integer_type(i): if (i < 2**8): return 'B' elif(i < 2**16): return 'H' else: return 'I' def int_array(a): return array(best_integer_type(len(a)), a) class CM_Mesh(CM_Node): def preinit(self, ob=None, **kw): super().preinit(ob, **kw) self.primitive_type = cpk.keyword('triangle') self.faces = array('I') self.vertices = array('f') self.normals = array('f') self.materials = cpk.Vector() self.face_normals = cpk.Vector() if(ob): if(ob.data in Cache.MESH_CACHE): self.faces, self.normals, self.face_normals, self.vertices, self.materials = Cache.MESH_CACHE[ob.data] else: bm = bmesh.new() bm.from_mesh(ob.data) bmesh.ops.triangulate(bm, faces=bm.faces) for v in bm.verts: self.vertices.extend(v.co.xyz) self.normals.extend(v.normal) for f in bm.faces: self.faces.extend((v.index for v in f.verts)) self.normals.extend(f.normal) fni = math.floor(len(self.normals)/3)-1 if(f.smooth): self.face_normals.extend((v.index for v in f.verts)) else: self.face_normals.extend((fni, fni, fni)) self.faces = int_array(self.faces) self.face_normals = int_array(self.face_normals) bm.free() for slot in ob.material_slots: if(slot.material in Cache.MAT_CACHE): mat = Cache.MAT_CACHE[slot.material] else: mat = CM_Material(ob=slot.material) self.materials.append(mat) Cache.MESH_CACHE[ob.data] = (self.faces, self.normals, self.face_normals, self.vertices, self.materials) class CM_Camera(CM_Node): def preinit(self, ob=None, **kw): super().preinit(ob, **kw) self.fov = ob.data.angle self.clip_near = ob.data.clip_start self.clip_far = ob.data.clip_end self.aspect = ob.data.sensor_width / ob.data.sensor_height class CM_LightPoint(CM_Node): def preinit(self, ob=None, **kw): super().preinit(ob, **kw) self.position = array('f', ob.location) self.diffuse = array('f', (0, 0, 0)) self.specular = array('f', (0, 0, 0)) if(ob.data.use_diffuse): self.diffuse = array('f', ob.data.energy * ob.data.color) if(ob.data.use_specular): self.specular = array('f', ob.data.energy * ob.data.color) self.attenuation_constant = 1.0 self.attenuation_linear = 0.0 self.attenuation_quadratic = 0.0 if(ob.data.falloff_type == 'CONSTANT'): self.attenuation_constant = ob.data.distance elif(ob.data.falloff_type == 'INVERSE_LINEAR'): self.attenuation_linear = 1/ob.data.distance elif(ob.data.falloff_type == 'INVERSE_SQUARE'): self.attenuation_quadratic = 1/(ob.data.distance**2) elif(ob.data.falloff_type == 'LINEAR_QUADRATIC_WEIGHTED'): self.attenuation_linear = 1/(ob.data.linear_attenuation * ob.data.distance) self.attenuation_quadratic = 1/((ob.data.quadratic_attenuation * ob.data.distance)**2) class CM_Material(AttrPack): def preinit(self, ob=None, **kw): self.name = "" self.values = v = dict() m = ob world = consmodel.Consmodel.SCENE.world if(ob): self.name = m.name v['alpha'] = m.alpha v['ambient'] = array('f', world.ambient_color * m.ambient) v['diffuse'] = array('f', m.diffuse_color * m.diffuse_intensity) # This was taken from the Blinn specular code in shadeoutput.c roughness = m.specular_hardness * m.specular_intensity if(roughness < 0.00001): roughness = 0.0 elif(roughness < 100.0): roughness = math.sqrt(1.0/roughness) else: roughness = math.sqrt(100.0/roughness) v['roughness'] = roughness specular = list(m.specular_color * m.specular_alpha) v['specular'] = array('f', specular) v['specular-ior'] = m.specular_ior Cache.MAT_CACHE[ob] = self # make_node class Cache: CACHE = dict() MESH_CACHE = dict() MAT_CACHE = dict() def make_node(bval): if(bval in Cache.CACHE): return Cache.CACHE[bval] if(isinstance(bval, bpy.types.Scene)): ob = CM_Node(ob=bval) elif(isinstance(bval, bpy_types.Object)): if(bval.type == 'MESH'): ob = CM_Mesh(ob=bval) elif(bval.type == 'CAMERA'): ob = CM_Camera(ob=bval) elif(bval.type == 'LAMP' and bval.data.type == 'POINT'): ob = CM_LightPoint(ob=bval) else: ob = CM_Node(ob=bval) Cache.CACHE[bval] = ob return ob def clear_cache(): Cache.CACHE = dict() Cache.MESH_CACHE = dict() Cache.MAT_CACHE = dict() # Conspack regs defencode(CM_Node, "node") defencode(CM_Mesh, "mesh") defencode(CM_Camera, "camera") defencode(CM_LightPoint, "light-point") defencode(CM_Material, "material-simple")
This spring, establish a better foundation for workplace safety by making sure to follow these simple steps. The easiest way to maintain a happy, healthy workplace is to do whatever you can to keep a safe work environment. This spring, establish a better foundation for workplace safety by making sure to follow these simple steps. Rules exist for a reason, so if a task requires safety equipment, such as earplugs, hard hats, safety goggles or gloves, make sure to use them. These significantly lower your risk of injury. Likewise, make sure that employees are sober at all times. Approximately 3% of all workplace fatalities yearly are due to use of drugs or alcohol on the job. This spring, give your employees a safety regulation refresher course to help prevent any infractions of these protocols. Regardless of your profession, good posture can make a huge difference in your long-term health and comfort on the job. This is particularly important when lifting. Always make sure to team lift and use mechanical aids when possible. If you have access to a wheelbarrow, dolly, crank or forklift, then use it. While it may add a minute or two, saving your back and muscles will pay off in the long run. That said, always be sure that machines and tools are being used properly. Don’t use tools for unrelated purposes. Always be aware of your surroundings. If you see something, say something. Any potential hazards must be reported to a supervisor immediately. If an unsafe condition occurs, employers are legally obligated to ensure that it is taken care of effectively before anyone gets hurt. You might want to plop a pallet in front of the fire door for a moment, but if disaster strikes, that can make a difference in a life and death situation. Emergency exits must stay clear at all times for reasons of workplace safety. Emergency shut offs for equipment should also be kept clear and easily accessible. Regular breaks and stress maintenance are both necessary to ensure that you and your staff are operating with clear, focused minds. Long hours, job insecurity, heavy workloads, and workplace conflicts can all cause a buildup of stress that leads to distraction and depression. It’s important to address these workplace safety concerns to ensure a happy, healthy workplace. This entry was posted on Friday, April 6th, 2018 at 3:47 pm . Both comments and pings are currently closed.
# -*- coding: utf-8 -*- __title__ = 'latinpigsay' __license__ = 'MIT' __author__ = 'Steven Cutting' __author_email__ = '[email protected]' __created_on__ = '12/3/2014' def func1(listofstrings): return max([len(string) for string in listofstrings]) def func2(listofstrings): return max(len(string) for string in listofstrings) def func3(listofstrings): return len(max(mylist, key=len)) # handle a massive file using generator def filereader_gen(file): with open(file) as f: for line in f: yield line def fileprocessor(file, function): filegen = filereader_gen() filegen2 = (process(x) for x in g) ## # Iterate over the lines of a string foo = """ this is a multi-line string. """ def f1(foo=foo): return iter(foo.splitlines()) def f2(foo=foo): retval = '' for char in foo: retval += char if not char == '\n' else '' if char == '\n': yield retval retval = '' if retval: yield retval def f3(foo=foo): prevnl = -1 while True: nextnl = foo.find('\n', prevnl + 1) if nextnl < 0: break yield foo[prevnl + 1:nextnl] prevnl = nextnl if __name__ == '__main__': for f in f1, f2, f3: print list(f())
In the Reclamation series, I am interested in exploring the relational aspects between humans and the land: how humans have touched the land, and how this is written on the landscape itself. Nature is resilient. My interest, herein, lies in how the space is reclaimed after humans have left the land to its own devices. There is nothing more sincere than those moments in which we reconcile ourselves to being a part of this cycle. We build. We create. We are self-important. Ultimately, the notion of the structures and the mark we make on the land feels permanent to us. Yet nature reclaims these spaces slowly, irrevocably bringing us face-to-face with our own impermanence. This series is intended to humble us. We are all a part of this process. By our very essence, we are interconnected, and we will return to the land as well; all a part of the Reclamation.
# ----------------------------------------------------------------------------- # Name: datatypes.py (part of PyGMI) # # Author: Patrick Cole # E-Mail: [email protected] # # Copyright: (c) 2013 Council for Geoscience # Licence: GPL-3.0 # # This file is part of PyGMI # # PyGMI is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # PyGMI is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ----------------------------------------------------------------------------- """Class for data types.""" import numpy as np from pygmi.raster.datatypes import Data class LithModel(): """ Lithological Model Data. This is the main data structure for the modelling program Attributes ---------- mlut : dictionary color table for lithologies numx : int number of columns per layer in model numy : int): number of rows per layer in model numz : int number of layers in model dxy : float dimension of cubes in the x and y directions d_z : float dimension of cubes in the z direction lith_index : numpy array 3D array of lithological indices. curlayer : int Current layer xrange : list minimum and maximum x coordinates yrange : list minimum and maximum y coordinates zrange : list minimum and maximum z coordinates curprof : int current profile (in x or y direction) griddata : dictionary dictionary of Data classes with raster data custprofx : dictionary custom profile x coordinates custprofy : dictionary custom profile y coordinates profpics : dictionary profile pictures lith_list : dictionary list of lithologies lith_list_reverse : dictionary reverse lookup for lith_list mht : float height of magnetic sensor ght : float height of gravity sensor gregional : float gravity regional correction name : str name of the model """ def __init__(self): self.mlut = {0: [170, 125, 90], 1: [255, 255, 0]} self.numx = None self.numy = None self.numz = None self.dxy = None self.d_z = None self.lith_index = None self.lith_index_grv_old = None self.lith_index_mag_old = None self.xrange = [None, None] self.yrange = [None, None] self.zrange = [None, None] self.griddata = {} self.custprofx = {} self.custprofy = {} self.profpics = {} self.lith_list = {} self.lith_list_reverse = {} self.mht = None self.ght = None self.gregional = 0. self.dataid = '3D Model' self.tmpfiles = None # Next line calls a function to update the variables above. self.update(50, 40, 5, 0., 0., 0., 100., 100., 100., 0.) self.olith_index = None self.odxy = None self.od_z = None self.oxrng = None self.oyrng = None self.ozrng = None self.onumx = None self.onumy = None self.onumz = None # Obsolete # self.curlayer = None # self.is_ew = True # self.curprof = None def lithold_to_lith(self, nodtm=False, pbar=None): """ Transfers an old lithology to the new one, using update parameters. Parameters ---------- nodtm : bool, optional Flag for a DTM. The default is False. pbar : pygmi.misc.ProgressBar, optional Progressbar. The default is None. Returns ------- None. """ if self.olith_index is None: return if pbar is not None: piter = pbar.iter else: piter = iter xvals = np.arange(self.xrange[0], self.xrange[1], self.dxy) yvals = np.arange(self.yrange[0], self.yrange[1], self.dxy) zvals = np.arange(self.zrange[0], self.zrange[1], self.d_z) if xvals[-1] == self.xrange[1]: xvals = xvals[:-1] if yvals[-1] == self.yrange[1]: yvals = yvals[:-1] if zvals[-1] == self.zrange[1]: yvals = yvals[:-1] xvals += 0.5 * self.dxy yvals += 0.5 * self.dxy zvals += 0.5 * self.d_z xvals = xvals[self.oxrng[0] < xvals] xvals = xvals[xvals < self.oxrng[1]] yvals = yvals[self.oyrng[0] < yvals] yvals = yvals[yvals < self.oyrng[1]] zvals = zvals[self.ozrng[0] < zvals] zvals = zvals[zvals < self.ozrng[1]] for x_i in piter(xvals): o_i = int((x_i - self.oxrng[0]) / self.odxy) i = int((x_i - self.xrange[0]) / self.dxy) for x_j in yvals: o_j = int((x_j - self.oyrng[0]) / self.odxy) j = int((x_j - self.yrange[0]) / self.dxy) for x_k in zvals: o_k = int((self.ozrng[1] - x_k) / self.od_z) k = int((self.zrange[1] - x_k) / self.d_z) if (self.lith_index[i, j, k] != -1 and self.olith_index[o_i, o_j, o_k] != -1) or nodtm: self.lith_index[i, j, k] = \ self.olith_index[o_i, o_j, o_k] def dtm_to_lith(self, pbar=None): """ Assign the DTM to the model. This means creating nodata values in areas above the DTM. These values are assigned a lithology of -1. Parameters ---------- pbar : pygmi.misc.ProgressBar, optional Progressbar. The default is None. Returns ------- None. """ if 'DTM Dataset' not in self.griddata: return if pbar is not None: piter = pbar.iter else: piter = iter self.lith_index = np.zeros([self.numx, self.numy, self.numz], dtype=int) curgrid = self.griddata['DTM Dataset'] d_x = curgrid.xdim d_y = curgrid.ydim gxmin = curgrid.extent[0] gymax = curgrid.extent[-1] grows, gcols = curgrid.data.shape utlz = curgrid.data.max() self.lith_index[:, :, :] = 0 for i in piter(range(self.numx)): xcrd = self.xrange[0] + (i + .5) * self.dxy xcrd2 = int((xcrd - gxmin) / d_x) for j in range(self.numy): ycrd = self.yrange[1] - (j + .5) * self.dxy ycrd2 = grows - int((gymax - ycrd) / d_y) if ycrd2 == grows: ycrd2 = grows-1 # if (ycrd2 >= 0 and xcrd2 >= 0 and ycrd2 < grows and # xcrd2 < gcols): if (0 <= ycrd2 < grows and 0 <= xcrd2 < gcols): alt = curgrid.data.data[ycrd2, xcrd2] if (curgrid.data.mask[ycrd2, xcrd2] or np.isnan(alt) or alt == curgrid.nullvalue): alt = curgrid.data.mean() k_2 = int((utlz - alt) / self.d_z) self.lith_index[i, j, :k_2] = -1 def init_grid(self, data): """ Initialize raster variables in the Data class. Parameters ---------- data : numpy array Masked array containing raster data. Returns ------- grid : PyGMI Data PyGMI raster dataset. """ grid = Data() grid.data = data grid.xdim = self.dxy grid.ydim = self.dxy grid.extent = [self.xrange[0], self.xrange[1], self.yrange[0], self.yrange[1]] return grid def init_calc_grids(self): """ Initialize mag and gravity from the model. Returns ------- None. """ tmp = np.ma.zeros([self.numy, self.numx]) self.griddata['Calculated Magnetics'] = self.init_grid(tmp.copy()) self.griddata['Calculated Magnetics'].dataid = 'Calculated Magnetics' self.griddata['Calculated Magnetics'].units = 'nT' self.griddata['Calculated Gravity'] = self.init_grid(tmp.copy()) self.griddata['Calculated Gravity'].dataid = 'Calculated Gravity' self.griddata['Calculated Gravity'].units = 'mGal' def is_modified(self, modified=True): """ Update modified flag. Parameters ---------- modified : bool, optional Flag for whether the lithology has been modified. The default is True. Returns ------- None. """ for i in self.lith_list: self.lith_list[i].modified = modified def update(self, cols, rows, layers, utlx, utly, utlz, dxy, d_z, mht=-1, ght=-1, usedtm=True, pbar=None): """ Update the local variables for the LithModel class. Parameters ---------- cols : int Number of columns per layer in model. rows : int Number of rows per layer in model. layers : int Number of layers in model. utlx : float Upper top left (NW) x coordinate. utly : float Upper top left (NW) y coordinate. utlz : float Upper top left (NW) z coordinate. dxy : float Dimension of cubes in the x and y directions. d_z : float Dimension of cubes in the z direction. mht : float, optional Height of magnetic sensor. The default is -1. ght : float, optional Height of gravity sensor. The default is -1. usedtm : bool, optional Flag to use a DTM. The default is True. pbar : pygmi.misc.ProgressBar, optional Progressbar. The default is None. Returns ------- None. """ if mht != -1: self.mht = mht if ght != -1: self.ght = ght self.olith_index = self.lith_index self.odxy = self.dxy self.od_z = self.d_z self.oxrng = np.copy(self.xrange) self.oyrng = np.copy(self.yrange) self.ozrng = np.copy(self.zrange) self.onumx = self.numx self.onumy = self.numy self.onumz = self.numz xextent = cols * dxy yextent = rows * dxy zextent = layers * d_z self.numx = cols self.numy = rows self.numz = layers self.xrange = [utlx, utlx + xextent] self.yrange = [utly - yextent, utly] self.zrange = [utlz - zextent, utlz] self.dxy = dxy self.d_z = d_z self.lith_index = np.zeros([self.numx, self.numy, self.numz], dtype=int) self.lith_index_mag_old = np.zeros([self.numx, self.numy, self.numz], dtype=int) self.lith_index_mag_old[:] = -1 self.lith_index_grv_old = np.zeros([self.numx, self.numy, self.numz], dtype=int) self.lith_index_grv_old[:] = -1 self.init_calc_grids() if usedtm: self.dtm_to_lith(pbar) self.lithold_to_lith(not usedtm, pbar) self.update_lithlist() self.is_modified() def update_lithlist(self): """ Update lith_list from local variables. Returns ------- None. """ for i in self.lith_list: self.lith_list[i].set_xyz(self.numx, self.numy, self.numz, self.dxy, self.mht, self.ght, self.d_z, modified=False) def update_lith_list_reverse(self): """ Update the lith_list reverse lookup. It must be run at least once before using lith_list_reverse. Returns ------- None. """ keys = list(self.lith_list.keys()) values = list(self.lith_list.values()) if not keys: return self.lith_list_reverse = {} for i in range(len(keys)): self.lith_list_reverse[list(values)[i].lith_index] = list(keys)[i]
Town of Bruce Mines (Single Tier Algoma)Record Owner? Log in. In 1846 the first mining claim was filed by James Cuthbertson. The following year the Montreal Mining Company started the first copper mine in Canada, shipping ore to Wales for smelting. The mines were the mostproductive on the continent until American demand for copper ceased during the Civil War, forcing their closure in 1876. The town was named in honour of James Bruce, 8th Earl of Elgin and governor of the province of Canada 1846-1854. Have something to say about Town of Bruce Mines (Single Tier Algoma)?
from __future__ import print_function import argparse from math import log10 import torch import torch.nn as nn import torch.optim as optim from torch.autograd import Variable from torch.utils.data import DataLoader from model import Net from data import get_training_set, get_test_set # Training settings parser = argparse.ArgumentParser(description='PyTorch Super Res Example') parser.add_argument('--upscale_factor', type=int, required=True, help="super resolution upscale factor") parser.add_argument('--batchSize', type=int, default=64, help='training batch size') parser.add_argument('--testBatchSize', type=int, default=10, help='testing batch size') parser.add_argument('--nEpochs', type=int, default=2, help='number of epochs to train for') parser.add_argument('--lr', type=float, default=0.01, help='Learning Rate. Default=0.01') parser.add_argument('--cuda', action='store_true', help='use cuda?') parser.add_argument('--threads', type=int, default=4, help='number of threads for data loader to use') parser.add_argument('--seed', type=int, default=123, help='random seed to use. Default=123') opt = parser.parse_args() print(opt) cuda = opt.cuda if cuda and not torch.cuda.is_available(): raise Exception("No GPU found, please run without --cuda") torch.manual_seed(opt.seed) if cuda: torch.cuda.manual_seed(opt.seed) print('===> Loading datasets') train_set = get_training_set(opt.upscale_factor) test_set = get_test_set(opt.upscale_factor) training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True) testing_data_loader = DataLoader(dataset=test_set, num_workers=opt.threads, batch_size=opt.testBatchSize, shuffle=False) print('===> Building model') model = Net(upscale_factor=opt.upscale_factor) criterion = nn.MSELoss() if cuda: model = model.cuda() criterion = criterion.cuda() optimizer = optim.Adam(model.parameters(), lr=opt.lr) def train(epoch): epoch_loss = 0 for iteration, batch in enumerate(training_data_loader, 1): input, target = Variable(batch[0]), Variable(batch[1]) if cuda: input = input.cuda() target = target.cuda() optimizer.zero_grad() loss = criterion(model(input), target) epoch_loss += loss.data[0] loss.backward() optimizer.step() print("===> Epoch[{}]({}/{}): Loss: {:.4f}".format(epoch, iteration, len(training_data_loader), loss.data[0])) print("===> Epoch {} Complete: Avg. Loss: {:.4f}".format(epoch, epoch_loss / len(training_data_loader))) def test(): avg_psnr = 0 for batch in testing_data_loader: input, target = Variable(batch[0]), Variable(batch[1]) if cuda: input = input.cuda() target = target.cuda() prediction = model(input) mse = criterion(prediction, target) psnr = 10 * log10(1 / mse.data[0]) avg_psnr += psnr print("===> Avg. PSNR: {:.4f} dB".format(avg_psnr / len(testing_data_loader))) def checkpoint(epoch): model_out_path = "model_epoch_{}.pth".format(epoch) torch.save(model, model_out_path) print("Checkpoint saved to {}".format(model_out_path)) for epoch in range(1, opt.nEpochs + 1): train(epoch) test() checkpoint(epoch)
One solution for all your Restaurant Design needs! Customized restaurant website designs and templates. They are sure to win the customers and positive feedback. Send us all the information you have pertaining to your service and restaurant. Provide us with the basic information, website details and the requirement. Within a short span, we shall come up with a brand new website with attractive features and content. It shall be a package of surprises and shall help in attracting visitors to your website. Post us your USP: Yes! Provide us with whatever you've got, be it the menu, the dish preparation, your info, your profile, your Facebook page, your Blog, etc. We shall work ahead with all the info we get from your end. 5 Days to Get Set & Go: All it takes is 5 days to make your new website go live and all running. You shall have the opportunity to review and give instant feedback on the end product before we move it to Live. Your wish list of Domain name: We shall provide you inputs pertaining to Domain name and once it's finalized, your website goes Live! Are you a restaurant owner looking to stay ahead of your competitors? Be the leader in your niche with kreativ web solutions creative approach of web layouts, innovative fuctionalites you need. Restaurant Online Ordering is a versatile benefit because it offers useful, time efficient features for customers and restaurant owners. This new innovative technology is growing rapidly with consumers for many reasons. Can have an idea of what your place provide. you will be able to add min and max time of stay on the table once booked. Admin will be able to change the status of reservation as pending, confirmed, cancelled etc.
#!/usr/bin/env python # # A thin Python wrapper around addr2line, can monitor esp-open-rtos # output and uses gdb to convert any suitable looking hex numbers # found in the output into function and line numbers. # # Works with a serial port if the --port option is supplied. # Otherwise waits for input on stdin. # import serial import argparse import re import os import os.path import subprocess import termios import sys import time # Try looking up anything in the executable address space RE_EXECADDR = r"(0x)?40([0-9]|[a-z]){6}" def find_elf_file(): out_files = [] for top,_,files in os.walk('.', followlinks=False): for f in files: if f.endswith(".out"): out_files.append(os.path.join(top,f)) if len(out_files) == 1: return out_files[0] elif len(out_files) > 1: print("Found multiple .out files: %s. Please specify one with the --elf option." % out_files) else: print("No .out file found under current directory. Please specify one with the --elf option.") sys.exit(1) def main(): parser = argparse.ArgumentParser(description='esp-open-rtos output filter tool', prog='filteroutput') parser.add_argument( '--elf', '-e', help="ELF file (*.out file) to load symbols from (if not supplied, will search for one)"), parser.add_argument( '--port', '-p', help='Serial port to monitor (will monitor stdin if None)', default=None) parser.add_argument( '--baud', '-b', help='Baud rate for serial port', type=int, default=74880) parser.add_argument( '--reset-on-connect', '-r', help='Reset ESP8266 (via DTR) on serial connect. (Linux resets even if not set, except when using NodeMCU-style auto-reset circuit.)', action='store_true') args = parser.parse_args() if args.elf is None: args.elf = find_elf_file() elif not os.path.exists(args.elf): print("ELF file '%s' not found" % args.elf) sys.exit(1) if args.port is not None: print("Opening %s at %dbps..." % (args.port, args.baud)) port = serial.Serial(args.port, baudrate=args.baud) if args.reset_on_connect: print("Resetting...") port.setDTR(False) time.sleep(0.1) port.setDTR(True) else: print("Reading from stdin...") port = sys.stdin # disable echo try: old_attr = termios.tcgetattr(sys.stdin.fileno()) attr = termios.tcgetattr(sys.stdin.fileno()) attr[3] = attr[3] & ~termios.ECHO termios.tcsetattr(sys.stdin.fileno(), termios.TCSADRAIN, attr) except termios.error: pass try: while True: line = port.readline() if line == '': break print(line.strip()) for match in re.finditer(RE_EXECADDR, line, re.IGNORECASE): addr = match.group(0) if not addr.startswith("0x"): addr = "0x"+addr # keeping addr2line and feeding it addresses on stdin didn't seem to work smoothly addr2line = subprocess.check_output(["xtensa-lx106-elf-addr2line","-pfia","-e","%s" % args.elf, addr], cwd=".").strip() if not addr2line.endswith(": ?? ??:0"): print("\n%s\n" % addr2line.strip()) finally: if args.port is None: # restore echo termios.tcsetattr(sys.stdin.fileno(), termios.TCSADRAIN, old_attr) if __name__ == "__main__": main()
Rostra, an ISO 9001 and TS 16949 Certified company, in addition to supplying OEM components to some of the world's largest auto and truck component manufacturers, is also North America's single largest supplier of cruise controls and transmission products to the automotive aftermarket. Backed by nearly 150 years of experience in precision manufacturing, Rostra has earned their reputation as an ideal partner throughout the automotive industry. Vehicle Accessories carries a complete line of Rostra Precision Controls products. Stop by any of our locations in Flint, Bay City, Clarkston and Lansing or call us Toll Free at 1-866-820-2119 for pricing and availability. If you don't see a product that you were looking for, please give us a call so that we may assist you.
import os class RecursiveDep(object): """ Map all dependencies for a database/schema, to advise changes. args: host: the hostname to use to connect to database: the database to check against form: the form to return the result """ def __init__(self, host="localhost", database="mysql", table="user", form="tree"): """create assertion object.""" self.host = host self.database = database self.table = table self.form = form self.storage = set() # set of lists for result def _run_mysql(self, command): """Run the mysql query and get the result as a list.""" cmd = ["mysql", "-h", self.host, self.database, "-sss", "-e", "\"{command};\"".format(command=command)] return os.subprocess.check_output(cmd).splitlines() def find(self): """Find, store, and show all dependencies.""" # get tables in db table_query = "select TABLE_NAME from information_schema.TABLES \ where TABLE_SCHEMA='{db}'".format(db=self.database) tables = self._run_mysql(table_query) # call _find_deps for all and store for table in tables: self._store(table, self._find_deps(table)) # call the appropriate result function def _store(self, from_table, to_table): """Store the result to internal variable.""" self.storage.add([from_table, to_table]) def _find_deps(self, tablename): """Find dependencies for a given table, given by name.""" dep_query = """select TABLE_NAME from information_schema.KEY_COLUMN_USAGE where TABLE_SCHEMA = "{db}" and REFERENCED_TABLE_NAME = "{table}" and referenced_column_name is not NULL;""".format(db=self.database, table=tablename) return self._run_mysql(dep_query) def _connect_deps(self, tablename, maxdep=5): """Get the tree of dependencies for a table, up to maxdep times. input: tablename(str) - which table to start with. maxdep(int) - (optional) how many layers deep to go output: """ connecting = True # while condition set # for each iteration working = [] # list of tables to work through working2 = [] # list of tables to add next iteration result = [] pos = 0 # position for result, and for maxdep working.append(tablename) while connecting: for table in working: # remove from working working.remove(table) # all tables with relevant depenency midres = [x[1] for x in self.storage if x[0]==table] #add to working working2.extend(midres) # prepare for next level deep working = list(set(working2)) working2 = [] # check if we should continue result[pos] = working # save the result pos = pos + 1 if (not midres) or (pos >= maxdep): continue = False # end the loop def _graph_result(self): """The result display function for the graph output.""" pass def _text_result(self): """The result displa function for text or command line output.""" pass
One of the best ways to change the society is by changing its education system. The Do Good Institute is doing nothing short of that. This is a new initiative is set to change the society by introducing them to the concept of non-profit organization during their formative years. Mr. Bruce Levenson speaking on ESPN explained that the need to come up with Do Good Institute was arrived at after observing many nonprofit organizations fail. Despite being managed by leaders with integrity, most of those leaders lacked the needed skills to operate a business. Through the introduction of the Do Good curriculum, Levenson hopes that the new institute will be able to nurture nonprofit organizational leaders who will be equally a competitive as their private counterparts. So far with the first class being offered the program has been able to see positive progress. Since the Do Good Institute seeks to educate the next generation to take leadership mantle in the nonprofit organization, it has another underlying mission. The do Good Institute unlike the current curriculum makes use of practical information as opposed to through. Therefore, by teaching the principles of doing good, by default, the Campus is being transformed into a Do Good institution. With the enormous success of the institution, Levenson projects some hurdles. For instance, with the current change of curriculum to online based studies, this curriculum cannot withstand much ground. However, hopes are still high for its success. If the society is giving thoughts to the online learning system, then it will consider the Do Good Institute method as well. Bruce Levenson is a distinguished businessman. Notably, Bruce was associated with the ownership of Atlanta Spirit, LLC that he sold in 2014. He has ownership in United Communications Group (UGC). Mr. Bruce is a graduate of Washington University and American University where he graduated with a bachelor’s in Arts degree and J.D respectively. Visit his personal website: brucelevenson.com.
# Copyright (C) Ivan Kravets <[email protected]> # See LICENSE for details. # pylint: disable=W0613 from binascii import hexlify from twisted.internet import reactor from twisted.internet.defer import inlineCallbacks, maybeDeferred, returnValue from twisted.internet.serialport import SerialPort import smartanthill.network.protocol as sanp from smartanthill.exception import NetworkRouterConnectFailure from smartanthill.service import SAMultiService from smartanthill.util import get_service_named class ControlService(SAMultiService): def __init__(self, name): SAMultiService.__init__(self, name) self._protocol = sanp.ControlProtocolWrapping( self.climessage_protocallback) self._litemq = None def startService(self): self._litemq = get_service_named("litemq") self._protocol.makeConnection(self) self._litemq.consume("network", "control.in", "transport->control", self.inmessage_mqcallback) self._litemq.consume("network", "control.out", "client->control", self.outmessage_mqcallback) SAMultiService.startService(self) def stopService(self): SAMultiService.stopService(self) self._litemq.unconsume("network", "control.in") self._litemq.unconsume("network", "control.out") def write(self, message): self._litemq.produce("network", "control->transport", message, dict(binary=True)) def inmessage_mqcallback(self, message, properties): self.log.debug("Received incoming raw message %s" % hexlify(message)) self._protocol.dataReceived(message) def outmessage_mqcallback(self, message, properties): self.log.debug("Received outgoing %s and properties=%s" % (message, properties)) self._protocol.send_message(message) def climessage_protocallback(self, message): self.log.debug("Received incoming client %s" % message) self._litemq.produce("network", "control->client", message) class TransportService(SAMultiService): def __init__(self, name): SAMultiService.__init__(self, name) self._protocol = sanp.TransportProtocolWrapping( self.rawmessage_protocallback) self._litemq = None def startService(self): self._litemq = get_service_named("litemq") self._protocol.makeConnection(self) self._litemq.consume("network", "transport.in", "routing->transport", self.insegment_mqcallback) self._litemq.consume("network", "transport.out", "control->transport", self.outmessage_mqcallback, ack=True) SAMultiService.startService(self) def stopService(self): SAMultiService.stopService(self) self._litemq.unconsume("network", "transport.in") self._litemq.unconsume("network", "transport.out") def rawmessage_protocallback(self, message): self.log.debug("Received incoming raw message %s" % hexlify(message)) self._litemq.produce("network", "transport->control", message, dict(binary=True)) def write(self, segment): self._litemq.produce("network", "transport->routing", segment, dict(binary=True)) def insegment_mqcallback(self, message, properties): self.log.debug("Received incoming segment %s" % hexlify(message)) self._protocol.dataReceived(message) @inlineCallbacks def outmessage_mqcallback(self, message, properties): self.log.debug("Received outgoing message %s" % hexlify(message)) ctrlmsg = sanp.ControlProtocol.rawmessage_to_message(message) def _on_err(failure): self._litemq.produce("network", "transport->err", ctrlmsg) failure.raiseException() d = maybeDeferred(self._protocol.send_message, message) d.addErrback(_on_err) result = yield d if result and ctrlmsg.ack: self._litemq.produce("network", "transport->ack", ctrlmsg) returnValue(result) class RouterService(SAMultiService): RECONNECT_DELAY = 1 # in seconds def __init__(self, name, options): SAMultiService.__init__(self, name, options) self._protocol = sanp.RoutingProtocolWrapping( self.inpacket_protocallback) self._router_device = None self._litemq = None self._reconnect_nums = 0 self._reconnect_callid = None def startService(self): connection = self.options['connection'] try: if connection.get_type() == "serial": _kwargs = connection.params _kwargs['protocol'] = self._protocol _kwargs['reactor'] = reactor # rename port's argument if "port" in _kwargs: _kwargs['deviceNameOrPortNumber'] = _kwargs['port'] del _kwargs['port'] self._router_device = SerialPort(**_kwargs) except: self.log.error(NetworkRouterConnectFailure(self.options)) self._reconnect_nums += 1 self._reconnect_callid = reactor.callLater( self._reconnect_nums * self.RECONNECT_DELAY, self.startService) return self._litemq = get_service_named("litemq") self._litemq.consume( exchange="network", queue="routing.out." + self.name, routing_key="transport->routing", callback=self.outsegment_mqcallback ) SAMultiService.startService(self) def stopService(self): SAMultiService.stopService(self) if self._reconnect_callid: self._reconnect_callid.cancel() if self._router_device: self._router_device.loseConnection() if self._litemq: self._litemq.unconsume("network", "routing.out." + self.name) def inpacket_protocallback(self, packet): self.log.debug("Received incoming packet %s" % hexlify(packet)) self._litemq.produce("network", "routing->transport", sanp.RoutingProtocol.packet_to_segment(packet), dict(binary=True)) def outsegment_mqcallback(self, message, properties): # check destination ID @TODO if ord(message[2]) not in self.options['deviceids']: return False self.log.debug("Received outgoing segment %s" % hexlify(message)) self._protocol.send_segment(message) class ConnectionInfo(object): def __init__(self, uri): assert ":" in uri self.uri = uri parts = uri.split(":") self.type_ = parts[0] self.params = dict() for part in parts[1:]: key, value = part.split("=") self.params[key] = value def __repr__(self): return "ConnectionInfo: %s" % self.uri def get_uri(self): return self.uri def get_type(self): return self.type_ class NetworkService(SAMultiService): def __init__(self, name, options): SAMultiService.__init__(self, name, options) self._litemq = None def startService(self): self._litemq = get_service_named("litemq") self._litemq.declare_exchange("network") ControlService("network.control").setServiceParent(self) TransportService("network.transport").setServiceParent(self) devices = get_service_named("device").get_devices() for devid, devobj in devices.iteritems(): netopts = devobj.options.get("network", {}) rconn = netopts.get("router", None) if not rconn: continue _options = {"connection": ConnectionInfo(rconn), "deviceids": [devid]} RouterService("network.router.%d" % devid, _options).setServiceParent(self) SAMultiService.startService(self) def stopService(self): SAMultiService.stopService(self) self._litemq.undeclare_exchange("network") def makeService(name, options): return NetworkService(name, options)
Welcome to the 29th issue of Entry Point North’s Newsletter. We wish you a pleasant reading! On 27 June 2017 Entry Point North Ireland officially opened a new state-of-the-art air traffic control tower simulator in Dublin. In August Entry Point North delivered a customised course for the Danish State Pilotage that provides all types of transit pilotages in Danish waters. Entry Point North is currently delivering GMS endorsement training for more than 60 participants from the Polish Air Navigation Services Agency. During June 2017 Entry Point North delivered an OJTI training programme on-site in Bangkok for ATC staff of Aerothai – Aeronautical Radio of Thailand. First APP/ACP Rating course students have already graduated and we have started a new programme of APS, ACS, APP/ACP courses. In Madrid we now also offer English Preparatory training. Would you like to teach the next generation of air traffic controllers? In order to meet our customer needs for future training and services, we are looking for ATCO specialists who are interested in sharing their knowledge for shorter or longer training assignments. On 25-26 October Entry Point North will be hosting its annual training seminar in Malmö. This year we will dedicate the two-day event to Continuation Training and explore what impact it may have on Business Continuity. Entry Point North’s annual ATSEP workshop will take place on 19-20 September in Malmö, Sweden and aims to broaden the knowledge in ATSEP training, discuss upcoming regulations, share best practices among different organisations, and generate professional discussions. Some of our graduated classes from May to August 2017.
# # Copyright (c) 2013-2020 Contributors to the Eclipse Foundation # # See the NOTICE file distributed with this work for additional information regarding copyright # ownership. All rights reserved. This program and the accompanying materials are made available # under the terms of the Apache License, Version 2.0 which accompanies this distribution and is # available at http://www.apache.org/licenses/LICENSE-2.0.txt # =============================================================================================== from pygw.config import java_pkg from pygw.base import GeoWaveObject from .simple_feature_type import SimpleFeatureType from .attribute_descriptor import AttributeDescriptor class SimpleFeatureTypeBuilder(GeoWaveObject): """ Builds `pygw.geotools.simple_feature_type.SimpleFeatureType` instances. """ def __init__(self): self.attributes = [] super().__init__(java_pkg.org.geotools.feature.simple.SimpleFeatureTypeBuilder()) def set_name(self, name): """ Sets the name of the feature type. Args: name (str): The name to use. Returns: This feature type builder. """ self._java_ref.setName(name) return self def set_namespace_uri(self, namespace_uri): """ Sets the namespace URI of the feature type. Args: namespace_uri (str): The namespace URI to use. Returns: This feature type builder. """ self._java_ref.setNamespaceURI(namespace_uri) return self def set_srs(self, srs): """ Sets the spatial reference system of the feature type. Args: srs (str): The spatial reference system to use. Returns: This feature type builder. """ self._java_ref.setSRS(srs) return self def add(self, attribute_descriptor): """ Adds an attribute to the feature type. Args: attribute_descriptor (pygw.geotools.attribute_descriptor.AttributeDescriptor): The attribute to add. Returns: This feature type builder. """ if isinstance(attribute_descriptor, AttributeDescriptor): self.attributes.append(attribute_descriptor) self._java_ref.add(attribute_descriptor._java_ref) return self else: raise ValueError("attribute_descriptor should be of type AttributeDescriptor") def build_feature_type(self): """ Builds the configured feature type. Returns: A `pygw.geotools.simple_feature_type.SimpleFeatureType` with the given configuration. """ return SimpleFeatureType(self._java_ref.buildFeatureType(), self.attributes)
With Valentine's Day just around the corner, complete this cute and simple craft with your group! 1. Gather your material. You will need several corks, a red pipe cleaner, pink, red, and white feathers, ribbon, waxed paper, white paper, glue, scissors, and a crayon or marker. 2. Arrange the corks on the white paper, forming a heart shape. 3. Use your crayon or marker to trace the outline of the heart shape. 4. Set a piece of waxed paper over the heart shape and staple (or glue) it to the top of the white paper to secure it in place. 5. Apply white glue along one side of each cork and stick them on the hear shape, pressing them together. 6. Set aside until the glue is completely dry. Delicately separate the cork heart from the waxed paper. 7. Use the pipe cleaner to form a hook as shown. 8. Glue the hook to the top of your heart. Cut a piece of ribbon long enough to be wrapped around the heart. 9. Apply a few dots of glue along the outside of the heart and press the ribbon on them. 10. Add a few feathers to the top of the heart and hang it. This heart also makes a great gift.
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2014, Numenta, Inc. Unless you have purchased from # Numenta, Inc. a separate commercial license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- import logging import unittest2 as unittest import numpy from nupic.algorithms.knn_classifier import KNNClassifier LOGGER = logging.getLogger(__name__) class KNNCategoriesTest(unittest.TestCase): """Tests how k Nearest Neighbor classifier handles categories""" def testCategories(self): # We need determinism! # # "Life is like a game of cards. The hand you are dealt is determinism; the # way you play it is free will." Jawaharlal Nehru # # What the heck, let's just set the random seed numpy.random.seed(42) failures, _knn = simulateCategories() self.assertEqual(len(failures), 0, "Tests failed: \n" + failures) def simulateCategories(numSamples=100, numDimensions=500): """Simulate running KNN classifier on many disjoint categories""" failures = "" LOGGER.info("Testing the sparse KNN Classifier on many disjoint categories") knn = KNNClassifier(k=1, distanceNorm=1.0, useSparseMemory=True) for i in range(0, numSamples): # select category randomly and generate vector c = 2*numpy.random.randint(0, 50) + 50 v = createPattern(c, numDimensions) knn.learn(v, c) # Go through each category and ensure we have at least one from each! for i in range(0, 50): c = 2*i+50 v = createPattern(c, numDimensions) knn.learn(v, c) errors = 0 for i in range(0, numSamples): # select category randomly and generate vector c = 2*numpy.random.randint(0, 50) + 50 v = createPattern(c, numDimensions) inferCat, _kir, _kd, _kcd = knn.infer(v) if inferCat != c: LOGGER.info("Mistake with %s %s %s %s %s", v[v.nonzero()], \ "mapped to category", inferCat, "instead of category", c) LOGGER.info(" %s", v.nonzero()) errors += 1 if errors != 0: failures += "Failure in handling non-consecutive category indices\n" # Test closest methods errors = 0 for i in range(0, 10): # select category randomly and generate vector c = 2*numpy.random.randint(0, 50) + 50 v = createPattern(c, numDimensions) p = knn.closestTrainingPattern(v, c) if not (c in p.nonzero()[0]): LOGGER.info("Mistake %s %s", p.nonzero(), v.nonzero()) LOGGER.info("%s %s", p[p.nonzero()], v[v.nonzero()]) errors += 1 if errors != 0: failures += "Failure in closestTrainingPattern method\n" return failures, knn def createPattern(c, numDimensions): """ Create a sparse pattern from category c with the given number of dimensions. The pattern is created by setting element c to be a high random number. Element c-1 and c+1 are set to low random numbers. numDimensions must be > c. """ v = numpy.zeros(numDimensions) v[c] = 5*numpy.random.random() + 10 v[c+1] = numpy.random.random() if c > 0: v[c-1] = numpy.random.random() return v if __name__ == "__main__": unittest.main()
We list the most relevant Cooking Classes in or near Arcola IN. You can make a great night out of a cooking course by making it a date – cooking classes are perfect for couples to bond and get to know each other while having fun over a glass of wine. Recreational cooking classes are laid back and are affordable for the value they provide. Classes are filling up fast, so get signed up today! If you are looking at cooking classes for professional development, visit our culinary schools section – A chef or any other culinary arts professional can get certification that demonstrates to an employer that she / he has achieved a clear level of culinary proficiency. Besides certification, classroom and hand-on training, one likewise needs a number of soft skills, or personal qualities, to achieve in this area.
"""Form helpers.""" from django import template from django.template.loader import render_to_string from collections import OrderedDict register = template.Library() @register.filter def render_as_blocks(form): """Render a form using blocks to contain sections.""" o_label_suffix = form.label_suffix form.label_suffix = "" categories = {} for bf in form: field = bf.field if not hasattr(field, "category"): # This should deal with EnabledDisabledFields if hasattr(field, "field"): field = field.field category = "General" if hasattr(field, "category"): category = field.category if category not in categories: categories[category] = [] categories[category].append(bf) # Sort categories alphabetically categories = OrderedDict(sorted(categories.items())) rendered = render_to_string("forms/_blocks_form.html", {"categories": categories}) form.label_suffix = o_label_suffix return rendered @register.filter def render(form): """Render a form.""" o_label_suffix = form.label_suffix form.label_suffix = "" rendered = render_to_string("forms/_form.html", {"form": form}) form.label_suffix = o_label_suffix return rendered @register.simple_tag def render_field(field, name, value): """Render a field.""" return field.widget.render(name, value, {})
Coupon Code: LOVEPOTION22 for 22% off a pillow purchase. Contest is open to US residents only, 18+ (ends March 8th, 2015 at 11:59pm EST). All entries unless otherwise noted are optional. Please enter the giveaway form below. Please only claim an entry if you complete it. Winner will have 48 hours to respond or a new winner will be chosen. Please read the complete Terms and Conditions at the bottom of the giveaway entry form for details. Good luck!