text
stringlengths
29
850k
#! /usr/bin/env python # vim: set fileencoding=utf-8 ''' Testing for USD, EUR, and GBP OIS qb. Note that these curves are the same as the NY DVC curves (in terms of instruments). Using the 30 June 2016 data below, we achieved <1bp difference for all points for the EUR and GBP OIS curves to the Numerix curve. There was a difference of 1-2 bps for the USD OIS curve, as that uses Average Index swaps, which have not been implemented. The difference is attributable to that adjustment in instruments. ''' import datetime import copy import qbootstrapper as qb curve_effective = datetime.datetime(2016, 6, 30) effective = datetime.datetime(2016, 7, 5) # EUR OIS curve (6/30/2016 data, 6/30/2016 effective date) eonia = qb.Curve(curve_effective) eonia_conventions = {'fixed_length': 12, 'float_length': 12, 'fixed_basis': 'Act360', 'float_basis': 'Act360', 'fixed_period_adjustment': 'following', 'float_period_adjustment': 'following', 'fixed_payment_adjustment': 'following', 'float_payment_adjustment': 'following' } eonia_cash = qb.LIBORInstrument(curve_effective, -0.00293, 5, eonia, length_type='days', payment_adjustment='following') eonia_short_instruments = [(datetime.datetime(2016, 8, 5), -0.00339), (datetime.datetime(2016, 9, 5), -0.00347), (datetime.datetime(2016, 10, 5), -0.00357), (datetime.datetime(2016, 11, 5), -0.00367), (datetime.datetime(2016, 12, 5), -0.00376), (datetime.datetime(2017, 1, 5), -0.00385), (datetime.datetime(2017, 2, 5), -0.00394), (datetime.datetime(2017, 3, 5), -0.00400), (datetime.datetime(2017, 4, 5), -0.00406), (datetime.datetime(2017, 5, 5), -0.00412), (datetime.datetime(2017, 6, 5), -0.00418)] eonia_instruments = [(datetime.datetime(2017, 7, 5), -0.00423), (datetime.datetime(2018, 1, 5), -0.00449), (datetime.datetime(2018, 7, 5), -0.00468), (datetime.datetime(2019, 7, 5), -0.00480), (datetime.datetime(2020, 7, 5), -0.00441), (datetime.datetime(2021, 7, 5), -0.00364), (datetime.datetime(2022, 7, 5), -0.00295), (datetime.datetime(2023, 7, 5), -0.00164), (datetime.datetime(2024, 7, 5), -0.00055), (datetime.datetime(2025, 7, 5), 0.00055), (datetime.datetime(2026, 7, 5), 0.00155), (datetime.datetime(2027, 7, 5), 0.00248), (datetime.datetime(2028, 7, 5), 0.00325), (datetime.datetime(2031, 7, 5), 0.00505), (datetime.datetime(2036, 7, 5), 0.00651), (datetime.datetime(2041, 7, 5), 0.00696), (datetime.datetime(2046, 7, 5), 0.00707), (datetime.datetime(2051, 7, 5), 0.00718), (datetime.datetime(2056, 7, 5), 0.00724), (datetime.datetime(2066, 7, 5), 0.00685)] eonia.add_instrument(eonia_cash) for idx, (maturity, rate) in enumerate(eonia_short_instruments): inst = qb.OISSwapInstrument(effective, maturity, rate, eonia, fixed_basis='Act360', fixed_length=idx + 1, float_length=idx + 1) eonia.add_instrument(inst) for (maturity, rate) in eonia_instruments: inst = qb.OISSwapInstrument(effective, maturity, rate, eonia, **eonia_conventions) eonia.add_instrument(inst) # USD OIS curve (6/30/2016 data, 6/30/2016 effective date) # Note that these are synthetics, the actual swap rates for 6y+ maturities # are average OIS + basis v LIBOR fedfunds = qb.Curve(curve_effective) fedfunds_short_conventions = {'fixed_period_adjustment': 'following', 'float_period_adjustment': 'following', 'fixed_payment_adjustment': 'following', 'float_payment_adjustment': 'following'} fedfunds_conventions = {'fixed_length': 6, 'float_length': 3, 'fixed_basis': 'Act360', 'float_basis': 'Act360', 'fixed_period_adjustment': 'following', 'float_period_adjustment': 'following', 'fixed_payment_adjustment': 'following', 'float_payment_adjustment': 'following'} fedfunds_cash = qb.LIBORInstrument(curve_effective, 0.003, 4, fedfunds, length_type='days', payment_adjustment='following') fedfunds_swap_onew = qb.OISSwapInstrument(effective, datetime.datetime(2016, 7, 12), 0.00387, fedfunds, fixed_length=1, float_length=1, fixed_period_length='weeks', float_period_length='weeks', **fedfunds_short_conventions) fedfunds_swap_twow = qb.OISSwapInstrument(effective, datetime.datetime(2016, 7, 19), 0.00387, fedfunds, fixed_length=2, float_length=2, fixed_period_length='weeks', float_period_length='weeks', **fedfunds_short_conventions) fedfunds_swap_threew = qb.OISSwapInstrument(effective, datetime.datetime(2016, 7, 26), 0.00387, fedfunds, fixed_length=3, float_length=3, fixed_period_length='weeks', float_period_length='weeks', **fedfunds_short_conventions) fedfunds_short_instruments = [(datetime.datetime(2016, 8, 5), 0.00378, 1), (datetime.datetime(2016, 9, 5), 0.00375, 2), (datetime.datetime(2016, 10, 5), 0.00371, 3), (datetime.datetime(2016, 11, 5), 0.00369, 4), (datetime.datetime(2016, 12, 5), 0.00366, 5), (datetime.datetime(2017, 1, 5), 0.00365, 6), (datetime.datetime(2017, 4, 5), 0.00371, 9)] fedfunds_instruments = [(datetime.datetime(2017, 7, 5), 0.003780), (datetime.datetime(2018, 1, 5), 0.003950), (datetime.datetime(2018, 7, 5), 0.004220), (datetime.datetime(2019, 7, 5), 0.004850), (datetime.datetime(2020, 7, 5), 0.005600), (datetime.datetime(2021, 7, 5), 0.006450), (datetime.datetime(2022, 7, 5), 0.007350), (datetime.datetime(2023, 7, 5), 0.008155), (datetime.datetime(2026, 7, 5), 0.010262), (datetime.datetime(2028, 7, 5), 0.011370), (datetime.datetime(2031, 7, 5), 0.012585), (datetime.datetime(2036, 7, 5), 0.013827), (datetime.datetime(2041, 7, 5), 0.014470), (datetime.datetime(2046, 7, 5), 0.014847), (datetime.datetime(2056, 7, 5), 0.015047), (datetime.datetime(2066, 7, 5), 0.014897)] fedfunds.add_instrument(fedfunds_cash) fedfunds.add_instrument(fedfunds_swap_onew) fedfunds.add_instrument(fedfunds_swap_twow) fedfunds.add_instrument(fedfunds_swap_threew) for (maturity, rate, months) in fedfunds_short_instruments: inst = qb.OISSwapInstrument(effective, maturity, rate, fedfunds, fixed_length=months, float_length=months, **fedfunds_short_conventions) fedfunds.add_instrument(inst) for (maturity, rate) in fedfunds_instruments: inst = qb.OISSwapInstrument(effective, maturity, rate, fedfunds, **fedfunds_conventions) fedfunds.add_instrument(inst) # EUR EURIBOR 6M curve (6/30/2016 data, 6/30/2016 effective date) euribor = qb.LIBORCurve(curve_effective, discount_curve=eonia) effective = datetime.datetime(2016, 7, 4) euribor_short_conventions = {'fixed_period_adjustment': 'following', 'float_period_adjustment': 'following', 'fixed_payment_adjustment': 'following', 'float_payment_adjustment': 'following', 'fixed_basis': '30E360'} euribor_conventions = {'fixed_length': 12, 'float_length': 6, 'fixed_basis': '30E360', 'float_basis': 'Act360', 'fixed_period_adjustment': 'following', 'float_period_adjustment': 'following', 'fixed_payment_adjustment': 'following', 'float_payment_adjustment': 'following', 'rate_period': 6, 'rate_period_length': 'months'} euribor_cash_instruments = [(1, 'weeks', -0.00371), (2, 'weeks', -0.00370), (1, 'months', -0.00364), (2, 'months', -0.00321), (3, 'months', -0.00286), (6, 'months', -0.00179)] euribor_fra_instruments = [(datetime.datetime(2017, 1, 4), datetime.datetime(2017, 7, 4), -0.00210), (datetime.datetime(2017, 7, 4), datetime.datetime(2018, 1, 4), -0.00222)] euribor_swap_instruments = [(datetime.datetime(2018, 7, 4), -0.002075), (datetime.datetime(2019, 7, 4), -0.001979), (datetime.datetime(2020, 7, 4), -0.001421), (datetime.datetime(2021, 7, 4), -0.000539), (datetime.datetime(2022, 7, 4), 0.000166), (datetime.datetime(2023, 7, 4), 0.001454), (datetime.datetime(2024, 7, 4), 0.002476), (datetime.datetime(2025, 7, 4), 0.003498), (datetime.datetime(2026, 7, 4), 0.004424), (datetime.datetime(2027, 7, 4), 0.005268), (datetime.datetime(2028, 7, 4), 0.005954), (datetime.datetime(2031, 7, 4), 0.007514), (datetime.datetime(2036, 7, 4), 0.008604), (datetime.datetime(2041, 7, 4), 0.008824), (datetime.datetime(2046, 7, 4), 0.008754), (datetime.datetime(2051, 7, 4), 0.008694), (datetime.datetime(2056, 7, 4), 0.008582), (datetime.datetime(2061, 7, 4), 0.008281), (datetime.datetime(2066, 7, 4), 0.008054)] for (length, length_type, rate) in euribor_cash_instruments: inst = qb.LIBORInstrument(effective, rate, length, euribor, length_type=length_type, payment_adjustment='following') euribor.add_instrument(inst) for (start_date, end_date, rate) in euribor_fra_instruments: inst = qb.FRAInstrumentByDates(start_date, end_date, rate, euribor) euribor.add_instrument(inst) for (maturity, rate) in euribor_swap_instruments: inst = qb.LIBORSwapInstrument(effective, maturity, rate, euribor, **euribor_conventions) euribor.add_instrument(inst) # USD LIBOR 3M curve (6/30/2016 data) usdlibor = qb.LIBORCurve(curve_effective, discount_curve=fedfunds) effective = datetime.datetime(2016, 7, 5) usdlibor_conventions = {'fixed_length': 6, 'float_length': 3, 'fixed_basis': '30360', 'float_basis': 'Act360', 'fixed_period_adjustment': 'following', 'float_period_adjustment': 'following', 'fixed_payment_adjustment': 'following', 'float_payment_adjustment': 'following', 'rate_period': 3, 'rate_period_length': 'months'} usdlibor_cash_instruments = [(1, 'weeks', 0.004402), (1, 'months', 0.004651), (2, 'months', 0.005490), (3, 'months', 0.006541)] usdlibor_futures_instruments = [(datetime.datetime(2016, 9, 21), datetime.datetime(2016, 12, 21), 99.35562), (datetime.datetime(2016, 12, 21), datetime.datetime(2017, 3, 21), 99.32671), (datetime.datetime(2017, 3, 15), datetime.datetime(2017, 6, 15), 99.30839), (datetime.datetime(2017, 6, 21), datetime.datetime(2017, 9, 21), 99.27554), (datetime.datetime(2017, 9, 20), datetime.datetime(2017, 12, 20), 99.23812), (datetime.datetime(2017, 12, 20), datetime.datetime(2018, 3, 20), 99.18614), (datetime.datetime(2018, 3, 21), datetime.datetime(2018, 6, 21), 99.14960), (datetime.datetime(2018, 6, 20), datetime.datetime(2018, 9, 20), 99.10847), (datetime.datetime(2018, 9, 19), datetime.datetime(2018, 12, 19), 99.06277), (datetime.datetime(2018, 12, 19), datetime.datetime(2019, 3, 19), 99.00748), (datetime.datetime(2019, 3, 20), datetime.datetime(2019, 6, 20), 98.96757), (datetime.datetime(2019, 6, 19), datetime.datetime(2019, 9, 19), 98.92307)] usdlibor_swap_instruments = [(datetime.datetime(2020, 7, 5), 0.00898), (datetime.datetime(2021, 7, 5), 0.00985), (datetime.datetime(2022, 7, 5), 0.01075), (datetime.datetime(2023, 7, 5), 0.01158), (datetime.datetime(2024, 7, 5), 0.01241), (datetime.datetime(2025, 7, 5), 0.01311), (datetime.datetime(2026, 7, 5), 0.01375), (datetime.datetime(2027, 7, 5), 0.01435), (datetime.datetime(2028, 7, 5), 0.01487), (datetime.datetime(2031, 7, 5), 0.01611), (datetime.datetime(2036, 7, 5), 0.01739), (datetime.datetime(2041, 7, 5), 0.01807), (datetime.datetime(2046, 7, 5), 0.01846), (datetime.datetime(2056, 7, 5), 0.01866), (datetime.datetime(2066, 7, 5), 0.01851)] for (length, length_type, rate) in usdlibor_cash_instruments: inst = qb.LIBORInstrument(effective, rate, length, usdlibor, length_type=length_type, payment_adjustment='following') usdlibor.add_instrument(inst) for (start_date, end_date, price) in usdlibor_futures_instruments: inst = qb.FuturesInstrumentByDates(start_date, end_date, price, usdlibor) usdlibor.add_instrument(inst) for (maturity, rate) in usdlibor_swap_instruments: inst = qb.LIBORSwapInstrument(effective, maturity, rate, usdlibor, **usdlibor_conventions) usdlibor.add_instrument(inst) # GBP OIS curve (6/30/2016 data, 6/30/2016 effective date) sonia = qb.Curve(curve_effective) sonia_short_conventions = {'fixed_period_adjustment': 'following', 'float_period_adjustment': 'following', 'fixed_payment_adjustment': 'following', 'float_payment_adjustment': 'following', 'rate_basis': 'Act365', 'fixed_basis': 'Act365', 'float_basis': 'Act365' } sonia_conventions = {'fixed_length': 12, 'float_length': 12, 'fixed_basis': 'Act360', 'float_basis': 'Act360', 'fixed_period_adjustment': 'following', 'float_period_adjustment': 'following', 'fixed_payment_adjustment': 'following', 'float_payment_adjustment': 'following', 'rate_basis': 'Act365', 'fixed_basis': 'Act365', 'float_basis': 'Act365' } sonia_cash = qb.LIBORInstrument(curve_effective, 0.004416, 1, sonia, length_type='days', payment_adjustment='following') sonia_swap_onew = qb.OISSwapInstrument(curve_effective, datetime.datetime(2016, 7, 7), 0.00443, sonia, fixed_length=1, float_length=1, fixed_period_length='weeks', float_period_length='weeks', **sonia_short_conventions) sonia_swap_twow = qb.OISSwapInstrument(curve_effective, datetime.datetime(2016, 7, 14), 0.00448, sonia, fixed_length=2, float_length=2, fixed_period_length='weeks', float_period_length='weeks', **sonia_short_conventions) sonia_swap_threew = qb.OISSwapInstrument(curve_effective, datetime.datetime(2016, 7, 21), 0.004042, sonia, fixed_length=3, float_length=3, fixed_period_length='weeks', float_period_length='weeks', **sonia_short_conventions) sonia_swap_onem = qb.OISSwapInstrument(curve_effective, datetime.datetime(2016, 7, 29), 0.0038, sonia, fixed_length=1, float_length=1, **sonia_short_conventions) sonia_swap_twom = qb.OISSwapInstrument(curve_effective, datetime.datetime(2016, 8, 31), 0.003017, sonia, fixed_length=2, float_length=2, **sonia_short_conventions) sonia_swap_threem = qb.OISSwapInstrument(curve_effective, datetime.datetime(2016, 9, 30), 0.002653, sonia, fixed_length=3, float_length=3, **sonia_short_conventions) sonia_swap_fourm = qb.OISSwapInstrument(curve_effective, datetime.datetime(2016, 10, 31), 0.002425, sonia, fixed_length=4, float_length=4, **sonia_short_conventions) sonia_swap_fivem = qb.OISSwapInstrument(curve_effective, datetime.datetime(2016, 11, 30), 0.002213, sonia, fixed_length=5, float_length=5, **sonia_short_conventions) sonia_swap_sixm = qb.OISSwapInstrument(curve_effective, datetime.datetime(2016, 12, 30), 0.002053, sonia, fixed_length=6, float_length=6, **sonia_short_conventions) sonia_swap_sevenm = qb.OISSwapInstrument(curve_effective, datetime.datetime(2017, 1, 31), 0.001925, sonia, fixed_length=7, float_length=7, **sonia_short_conventions) sonia_swap_eightm = qb.OISSwapInstrument(curve_effective, datetime.datetime(2017, 2, 28), 0.001812, sonia, fixed_length=8, float_length=8, **sonia_short_conventions) sonia_swap_ninem = qb.OISSwapInstrument(curve_effective, datetime.datetime(2017, 3, 31), 0.001716, sonia, fixed_length=9, float_length=9, **sonia_short_conventions) sonia_swap_tenm = qb.OISSwapInstrument(curve_effective, datetime.datetime(2017, 4, 28), 0.00164, sonia, fixed_length=10, float_length=10, **sonia_short_conventions) sonia_swap_elevenm = qb.OISSwapInstrument(curve_effective, datetime.datetime(2017, 5, 31), 0.001564, sonia, fixed_length=11, float_length=11, **sonia_short_conventions) sonia_short_swaps = [sonia_cash, sonia_swap_onew, sonia_swap_twow, sonia_swap_threew, sonia_swap_onem, sonia_swap_twom, sonia_swap_threem, sonia_swap_fourm, sonia_swap_fivem, sonia_swap_sixm, sonia_swap_sevenm, sonia_swap_eightm, sonia_swap_ninem, sonia_swap_tenm, sonia_swap_elevenm] sonia_swap_data = [(datetime.datetime(2017, 6, 30), 0.001499), (datetime.datetime(2017, 12, 29), 0.001223), (datetime.datetime(2018, 6, 30), 0.001076), (datetime.datetime(2019, 6, 30), 0.001106), (datetime.datetime(2020, 6, 30), 0.001444), (datetime.datetime(2021, 6, 30), 0.002058), (datetime.datetime(2022, 6, 30), 0.00284), (datetime.datetime(2023, 6, 30), 0.003749), (datetime.datetime(2024, 6, 30), 0.004668), (datetime.datetime(2025, 6, 30), 0.005532), (datetime.datetime(2026, 6, 30), 0.006322), (datetime.datetime(2027, 6, 30), 0.007016), (datetime.datetime(2028, 6, 30), 0.007609), (datetime.datetime(2031, 6, 30), 0.008891), (datetime.datetime(2036, 6, 30), 0.009792), (datetime.datetime(2041, 6, 30), 0.009916), (datetime.datetime(2046, 6, 30), 0.009869), (datetime.datetime(2056, 6, 30), 0.009242), (datetime.datetime(2066, 6, 30), 0.009003)] for inst in sonia_short_swaps: sonia.add_instrument(inst) for maturity, rate in sonia_swap_data: sonia.add_instrument(qb.OISSwapInstrument(curve_effective, maturity, rate, sonia, **sonia_conventions)) fedfunds_short = qb.Curve(curve_effective) fedfunds_short_short_instruments = [ fedfunds_cash, fedfunds_swap_onew, fedfunds_swap_twow, fedfunds_swap_threew] for inst in fedfunds_short_short_instruments: new_inst = copy.deepcopy(inst) new_inst.curve = fedfunds_short fedfunds_short.add_instrument(new_inst) for (maturity, rate, months) in fedfunds_short_instruments: inst = qb.OISSwapInstrument(effective, maturity, rate, fedfunds_short, fixed_length=months, float_length=months, **fedfunds_short_conventions) fedfunds_short.add_instrument(inst) for (maturity, rate) in fedfunds_instruments[:6]: inst = qb.OISSwapInstrument(effective, maturity, rate, fedfunds_short, **fedfunds_conventions) fedfunds_short.add_instrument(inst) usdlibor_short = qb.LIBORCurve(curve_effective, discount_curve=fedfunds_short) for (length, length_type, rate) in usdlibor_cash_instruments: inst = qb.LIBORInstrument(effective, rate, length, usdlibor_short, length_type=length_type, payment_adjustment='following') usdlibor_short.add_instrument(inst) for (start_date, end_date, price) in usdlibor_futures_instruments: inst = qb.FuturesInstrumentByDates(start_date, end_date, price, usdlibor_short) usdlibor_short.add_instrument(inst) for (maturity, rate) in usdlibor_swap_instruments[:2]: inst = qb.LIBORSwapInstrument(effective, maturity, rate, usdlibor_short, **usdlibor_conventions) usdlibor_short.add_instrument(inst) fedfunds_libor_libor_swaps = usdlibor_swap_instruments[2:4] fedfunds_libor_libor_swaps.extend([usdlibor_swap_instruments[6]]) fedfunds_libor_libor_swaps.extend(usdlibor_swap_instruments[8:]) fedfunds_libor = qb.SimultaneousStrippedCurve(curve_effective, fedfunds_short, usdlibor_short) fedfunds_libor_swap_data = [(datetime.datetime(2022, 7, 5), 0.003400), (datetime.datetime(2023, 7, 5), 0.003425), (datetime.datetime(2026, 7, 5), 0.003488), (datetime.datetime(2028, 7, 5), 0.003500), (datetime.datetime(2031, 7, 5), 0.003525), (datetime.datetime(2036, 7, 5), 0.003563), (datetime.datetime(2041, 7, 5), 0.003600), (datetime.datetime(2046, 7, 5), 0.003613), (datetime.datetime(2056, 7, 5), 0.003613), (datetime.datetime(2066, 7, 5), 0.003613)] for idx, (maturity, rate) in enumerate(fedfunds_libor_swap_data): ois_inst = qb.AverageIndexBasisSwapInstrument(effective, maturity, fedfunds_libor, leg_one_spread=rate) libor_inst = qb.LIBORSwapInstrument(effective, fedfunds_libor_libor_swaps[idx][0], fedfunds_libor_libor_swaps[idx][1], usdlibor, **usdlibor_conventions) instrument_pair = qb.SimultaneousInstrument(ois_inst, libor_inst, fedfunds_libor) fedfunds_libor.add_instrument(instrument_pair) # eonia.build() # eonia.view() # eonia.zeros() # fedfunds.build() # fedfunds.view() # fedfunds.zeros() # sonia.build() # sonia.view() # sonia.zeros() # euribor.build() # euribor.view() # euribor.zeros() # usdlibor.build() # usdlibor.view() # usdlibor.zeros() # fedfunds_libor.build() # fedfunds_libor.discount_curve.view() # fedfunds_libor.discount_curve.zeros() # fedfunds_libor.projection_curve.view() # fedfunds_libor.projection_curve.zeros()
Purple Flag is a national accreditation scheme for entertainment and hospitality zones at night. It is linked to the standards that people expect from their city centres at night. Purple Flag’s methodology is based on five years research, a dozen pilot initiatives, national and international good practice. The Association of Town and City Management has awarded Derry Purple Flag status for the eighth year in a row. The city has been awarded the prestigious accolade, that recognises town and city centre’s meeting standards in its evening and night time economy offerings. The Mayor, Councillor Maolíosa McHugh pictured at the announcement that Derry has once again been awarded Purple Flag status. Included with City Centre Initiative Chief Executive Jim Roddy, who co-ordinates the Purple Flag assessment is; City Centre PSNI, Paul McCartney, First Housing Support, Jarleath Clarke, Estate Services, Rachael Eastwood, Grand Central and The Taphouse. Picture Martin McKeown. Inpresspics.com. The Core Agenda is the bedrock of Purple Flag. It captures what people want from their town centres at night – whatever the size of town. There are criteria, indicators and good practice examples to explain each theme. Entrants and assessors will use the Core Agenda in coming to a conclusion about the performance of each centre. Purple Flag is flexible and dynamic. It will evolve as society changes, as experience grows and as town centres become increasingly vibrant and appealing at night. Purple Flag is flexible and dynamic. City Centre Initiative is very proud to accept the Purple Flag award on behalf of all our partners who maintain and provide services within the city centre. Purple Flag is a city centre award similar to the Blue Flag for beaches, backed by Government, Police and business. It recognises the services and facilities that are provided in the city centre under the five themes of; Wellbeing, Movement, Appeal, Place and Policy. This award covers City Centre Initiatives core area which stretches from Waterside Theatre right to the Buncrana Road. The arduous and detailed application process culminated in two different assessments under the heading of the themes aforementioned. We first obtained the award in November 2011 it has been renewed annually. The city will go through ongoing assessments to ensure that the high standards expected by ATCM (Association of Town Centre Management) will be maintained.
import logging import os from galaxy.model.orm import and_ from galaxy.util.odict import odict from tool_shed.util import hg_util import tool_shed.util.shed_util_common as suc log = logging.getLogger( __name__ ) def can_browse_repository_reviews( app, user, repository ): """ Determine if there are any reviews of the received repository for which the current user has permission to browse any component reviews. """ if user: for review in repository.reviews: for component_review in review.component_reviews: if app.security_agent.user_can_browse_component_review( app, repository, component_review, user ): return True return False def changeset_revision_reviewed_by_user( user, repository, changeset_revision ): """Determine if the current changeset revision has been reviewed by the current user.""" for review in repository.reviews: if review.changeset_revision == changeset_revision and review.user == user: return True return False def get_component( app, id ): """Get a component from the database.""" sa_session = app.model.context.current return sa_session.query( app.model.Component ).get( app.security.decode_id( id ) ) def get_component_review( app, id ): """Get a component_review from the database""" sa_session = app.model.context.current return sa_session.query( app.model.ComponentReview ).get( app.security.decode_id( id ) ) def get_component_by_name( app, name ): """Get a component from the database via a name.""" sa_session = app.model.context.current return sa_session.query( app.model.Component ) \ .filter( app.model.Component.table.c.name==name ) \ .first() def get_component_review_by_repository_review_id_component_id( app, repository_review_id, component_id ): """Get a component_review from the database via repository_review_id and component_id.""" sa_session = app.model.context.current return sa_session.query( app.model.ComponentReview ) \ .filter( and_( app.model.ComponentReview.table.c.repository_review_id == app.security.decode_id( repository_review_id ), app.model.ComponentReview.table.c.component_id == app.security.decode_id( component_id ) ) ) \ .first() def get_components( app ): sa_session = app.model.context.current return sa_session.query( app.model.Component ) \ .order_by( app.model.Component.name ) \ .all() def get_previous_repository_reviews( app, repository, changeset_revision ): """ Return an ordered dictionary of repository reviews up to and including the received changeset revision. """ repo = hg_util.get_repo_for_repository( app, repository=repository, repo_path=None, create=False ) reviewed_revision_hashes = [ review.changeset_revision for review in repository.reviews ] previous_reviews_dict = odict() for changeset in hg_util.reversed_upper_bounded_changelog( repo, changeset_revision ): previous_changeset_revision = str( repo.changectx( changeset ) ) if previous_changeset_revision in reviewed_revision_hashes: previous_rev, previous_changeset_revision_label = \ hg_util.get_rev_label_from_changeset_revision( repo, previous_changeset_revision ) revision_reviews = get_reviews_by_repository_id_changeset_revision( app, app.security.encode_id( repository.id ), previous_changeset_revision ) previous_reviews_dict[ previous_changeset_revision ] = \ dict( changeset_revision_label=previous_changeset_revision_label, reviews=revision_reviews ) return previous_reviews_dict def get_review( app, id ): """Get a repository_review from the database via id.""" sa_session = app.model.context.current return sa_session.query( app.model.RepositoryReview ).get( app.security.decode_id( id ) ) def get_review_by_repository_id_changeset_revision_user_id( app, repository_id, changeset_revision, user_id ): """ Get a repository_review from the database via repository id, changeset_revision and user_id. """ sa_session = app.model.context.current return sa_session.query( app.model.RepositoryReview ) \ .filter( and_( app.model.RepositoryReview.repository_id == app.security.decode_id( repository_id ), app.model.RepositoryReview.changeset_revision == changeset_revision, app.model.RepositoryReview.user_id == app.security.decode_id( user_id ) ) ) \ .first() def get_reviews_by_repository_id_changeset_revision( app, repository_id, changeset_revision ): """Get all repository_reviews from the database via repository id and changeset_revision.""" sa_session = app.model.context.current return sa_session.query( app.model.RepositoryReview ) \ .filter( and_( app.model.RepositoryReview.repository_id == app.security.decode_id( repository_id ), app.model.RepositoryReview.changeset_revision == changeset_revision ) ) \ .all() def has_previous_repository_reviews( app, repository, changeset_revision ): """ Determine if a repository has a changeset revision review prior to the received changeset revision. """ repo = hg_util.get_repo_for_repository( app, repository=repository, repo_path=None, create=False ) reviewed_revision_hashes = [ review.changeset_revision for review in repository.reviews ] for changeset in hg_util.reversed_upper_bounded_changelog( repo, changeset_revision ): previous_changeset_revision = str( repo.changectx( changeset ) ) if previous_changeset_revision in reviewed_revision_hashes: return True return False
This is a Fight Club custom size. For your convenience, all our shoes are sorted by both period style and size. For the best fit, please measure your foot using our shoe size guides here. It is important to measure yourself correctly. If you need more details or help, then please ask.
# -*- coding: UTF-8 -*- """ .. inheritance-diagram:: pyopus.optimizer.grnm :parts: 1 **Unconstrained grid-restrained Nelder-Mead simplex optimizer (PyOPUS subsystem name: GRNMOPT)** A provably convergent version of the Nelder-Mead simplex algorithm. The algorithm performs unconstrained optimization. Convergence is achieved by restraining the simplex points to a gradually refined grid and by keeping the simplex internal angles away from 0. The algorithm was published in .. [grnm] Bűrmen Á., Puhan J., Tuma T.: Grid Restrained Nelder-Mead Algorithm. Computational Optimization and Applications, vol. 34, pp. 359-375, 2006. There is an error in Algorithm 2, step 5. The correct step 5 is: If $f^{pe}<\min(f^{pe}, f^1, f^2, ..., f^{n+1})$ replace $x^i$ with $x^{pe}$ where $x^{i}$ denotes the point for which $f(x^i)$ is the lowest of all points. """ from ..misc.debug import DbgMsgOut, DbgMsg from base import Optimizer from nm import NelderMead from numpy import abs, argsort, where, round, sign, diag, sqrt, log, array, zeros, dot, ones from numpy.linalg import qr, det import matplotlib.pyplot as pl __all__ = [ 'GRNelderMead' ] class GRNelderMead(NelderMead): """ Unconstrained grid-restrained Nelder-Mead optimizer class Default values of the expansion (1.2) and shrink (0.25) coefficients are different from the original Nelder-Mead values. Different are also the values of relative tolerance (1e-16), and absolute function (1e-16) and side length size (1e-9) tolerance. *lam* and *Lam* are the lower and upper bound on the simplex side length with respect to the grid. The shape (side length determinant) is bounded with respect to the grid density by *psi*. The grid density has a continuity bound due to the finite precision of floating point numbers. Therefore the grid begins to behave as continuous when its density falls below the relative(*tau_r*) and absolute (*tau_a*) bound with respect to the grid origin. If *originalGrid* is ``True`` the initial grid has the same density in all directions (as in the paper). If ``False`` the initial grid density adapts to the bounding box shape. If *gridRestrainInitial* is ``True`` the points of the initial simplex are restrained to the grid. See the :class:`~pyopus.optimizer.nm.NelderMead` class for more information. """ def __init__(self, function, debug=0, fstop=None, maxiter=None, reflect=1.0, expand=1.2, outerContract=0.5, innerContract=-0.5, shrink=0.25, reltol=1e-15, ftol=1e-15, xtol=1e-9, simplex=None, lam=2.0, Lam=2.0**52, psi=1e-6, tau_r=2.0**(-52), tau_a=1e-100, originalGrid=False, gridRestrainInitial=False): NelderMead.__init__(self, function, debug, fstop, maxiter, reflect, expand, outerContract, innerContract, shrink, reltol, ftol, xtol, simplex) # Simplex self.simplex=None # Grid origin and scaling self.z=None self.Delta=None # Side length bounds wrt. grid self.lam=lam self.Lam=Lam # Simplex shape lower bound self.psi=1e-6 # Grid continuity bound (relative and absolute) self.tau_r=tau_r self.tau_a=tau_a # Create initial grid with the procedure described in the paper self.originalGrid=originalGrid # Grid restrain initial simplex self.gridRestrainInitial=gridRestrainInitial def check(self): """ Checks the optimization algorithm's settings and raises an exception if something is wrong. """ NelderMead.check(self) if self.lam<=0: raise Exception, DbgMsg("GRNMOPT", "lambda should be positive.") if self.Lam<=0: raise Exception, DbgMsg("GRNMOPT", "Lambda should be positive.") if self.lam>self.Lam: raise Exception, DbgMsg("GRNMOPT", "Lambda should be greater or equal lambda.") if self.psi<0: raise Exception, DbgMsg("GRNMOPT", "psi should be greater or equal zero.") if self.tau_r<0 or self.tau_a<0: raise Exception, DbgMsg("GRNMOPT", "Relative and absolute grid continuity bounds should be positive.") def buildGrid(self, density=10.0): """ Generates the intial grid density for the algorithm. The grid is determined relative to the bounding box of initial simplex sides. *density* specifies the number of points in every grid direction that covers the corresponding side of the bounding box. If any side of the bounding box has zero length, the mean of all side lengths divided by *density* is used as grid density in the corresponding direction. Returns the 1-dimensional array of length *ndim* holding the grid densities. """ if self.debug: DbgMsgOut("GRNMOPT", "Building initial grid for initial simplex.") # Side vectors (to the first point) v=self.simplex[1:,:]-self.simplex[0,:] if not self.originalGrid: # Maximal absolute components (bounding box sides) vmax=abs(v).max(0) # Maximal bounding box side vmax_max=vmax.max() # If any component maximum is 0, set it to vmax value vmax=where(vmax==0.0, vmax_max, vmax) # Bounding box dimensions divided by density return vmax/density else: # Shortest side length lmin=sqrt((v*v).sum(1).min()) # Shortest side length divided by density, uniform across all dimensions return ones(self.ndim)*lmin/density def gridRestrain(self, x): """ Returns the point on the grid that is closest to *x*. """ xgr=round((x-self.z)/self.delta)*self.delta+self.z return xgr def sortedSideVectors(self): """ Returns a tuple (*vsorted*, *lsorted*) where *vsorted* is an array holding the simplex side vectors sorted by their length with longest side first. The first index of the 2-dimensional array is the side vector index while the second one is the component index. *lsorted* is a 1-dimensional array of corresponding simplex side lengths. """ # Side vectors v=self.simplex[1:,:]-self.simplex[0,:] # Get length l2=(v*v).sum(1) # Order by length (longest first) i=argsort(l2, 0, 'mergesort') # shortest first i=i[-1::-1] # longest first vsorted=v[i,:] lsorted=sqrt(l2[i]) return (vsorted, lsorted) def reshape(self, v=None, Q=None, R=None): """ Reshapes simpex side vectors given by rows of *v* into orthogonal sides with their bounding box bounded in length by *lam* and *Lam* with respect to the grid density. If *v* is ``None`` it assumes that it is a product of matrices *Q* and *R*. Returns a tuple (*vnew*, *l*) where *vnew* holds the reshaped simplex sides and *l* is the 1-dimensional array of reshaped side lengths. """ # Rows are side vectors # QR decomposition of a matrix with side vectors as columns if v is not None: (Q, R)=qr(v.T) # Get scaling factors and their signs Rdiag=R.diagonal() Rsign=sign(Rdiag) Rsign=where(Rsign!=0, Rsign, 1.0) # Get side lengths l=abs(Rdiag) # Calculate side length bounds norm_delta=sqrt((self.delta**2).sum()) lower=self.lam*sqrt(self.ndim)*norm_delta/2 upper=self.Lam*sqrt(self.ndim)*norm_delta/2 # Bound side length l=where(l<=upper, l, upper) l=where(l>=lower, l, lower) # Scale vectors # Vectors are in columns of Q. Therefore transpose Q. vnew=dot(diag(l*Rsign), Q.T) return (vnew, l) def reset(self, x0): """ Puts the optimizer in its initial state and sets the initial point to be the 1-dimensional array or list *x0*. The length of the array becomes the dimension of the optimization problem (:attr:`ndim` member). The initial simplex is built around *x0* by calling the :meth:`buildSimplex` method with default values for the *rel* and *abs* arguments. If *x0* is a 2-dimensional array or list of size (*ndim*+1) times *ndim* it specifies the initial simplex. A corresponding grid is created by calling the :meth:`buildGrid` method. The initial value of the natural logarithm of the simplex side vectors determinant is calculated and stored. """ # Debug message if self.debug: DbgMsgOut("GRNMOPT", "Resetting.") # Make it an array x0=array(x0) # Is x0 a point or a simplex? if x0.ndim==1: # Point # Set x now NelderMead.reset(self, x0) if self.debug: DbgMsgOut("GRNMOPT", "Generating initial simplex from initial point.") sim=self.buildSimplex(x0) self._setSimplex(sim) self.delta=self.buildGrid() self.z=x0 else: # Simplex or error (handled in _setSimplex()) self._setSimplex(x0) self.delta=self.buildGrid() self.z=x0[0,:] if self.debug: DbgMsgOut("GRNMOPT", "Using specified initial simplex.") # Set x to first point in simplex after it was checked in _setSimplex() Optimizer.reset(self, x0[0,:]) # Reset point moves counter self.simplexmoves=zeros(self.ndim+1) # Make x tolerance an array self.xtol=array(self.xtol) def run(self): """ Runs the optimization algorithm. """ # Debug message if self.debug: DbgMsgOut("GRNMOPT", "Starting a run at i="+str(self.niter)) # Checks self.check() # Reset stop flag self.stop=False # Grid-restrain initial simplex if self.gridRestrainInitial: for i in range(0, self.ndim+1): self.simplex[i,:]=self.gridRestrain(self.simplex[i,:]) # Evaluate if needed if self.simplexf is None: self.simplexf=zeros(self.npts) for i in range(0, self.ndim+1): self.simplexf[i]=self.fun(self.simplex[i,:]) if self.debug: DbgMsgOut("GRNMOPT", "Initial simplex point i="+str(self.niter)+": f="+str(self.simplexf[i])) # Loop while not self.stop: # Order simplex (best point first) self.orderSimplex() # Centroid xc=self.simplex[:-1,:].sum(0)/self.ndim # Worst point xw=self.simplex[-1,:] fw=self.simplexf[-1] # Second worst point xsw=self.simplex[-2,:] fsw=self.simplexf[-2] # Best point xb=self.simplex[0,:] fb=self.simplexf[0] # No shrink shrink=False # Reflect xr=self.gridRestrain(xc+(xc-xw)*self.reflect) fr=self.fun(xr) if self.debug: DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": reflect : f="+str(fr)) if fr<fb: # Try expansion xe=self.gridRestrain(xc+(xc-xw)*self.expand) fe=self.fun(xe) if self.debug: DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": expand : f="+str(fe)) if fe<fr: # Accept expansion self.simplex[-1,:]=xe self.simplexf[-1]=fe self.simplexmoves[-1]+=1 if self.debug: DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": accepted expansion") else: # Accept reflection self.simplex[-1,:]=xr self.simplexf[-1]=fr self.simplexmoves[-1]+=1 if self.debug: DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": accepted reflection after expansion") elif fb<=fr and fr<fsw: # Accept reflection self.simplex[-1,:]=xr self.simplexf[-1]=fr self.simplexmoves[-1]+=1 if self.debug: DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": accepted reflection") elif fsw<=fr and fr<fw: # Try outer contraction xo=self.gridRestrain(xc+(xc-xw)*self.outerContract) fo=self.fun(xo) if self.debug: DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": outer con : f="+str(fo)) if fo<fsw: # Accept self.simplex[-1,:]=xo self.simplexf[-1]=fo self.simplexmoves[-1]+=1 if self.debug: DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": accepted outer contraction") else: # Shrink shrink=True elif fw<=fr: # Try inner contraction xi=self.gridRestrain(xc+(xc-xw)*self.innerContract) fi=self.fun(xi) if self.debug: DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": inner con : f="+str(fi)) if fi<fsw: # Accept self.simplex[-1,:]=xi self.simplexf[-1]=fi self.simplexmoves[-1]+=1 if self.debug: DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": accepted inner contraction") else: # Shrink shrink=True # self._checkSimplex() # self._plotSimplex() # Reshape, pseudo-expand, and shrink loop if shrink: # Normal NM steps failed # No reshape happened yet reshaped=False # Create origin vector and function value x0=zeros(self.ndim) f0=0.0 # Check simplex shape # Simplex is already sorted (v, l)=self.sortedSideVectors() # Rows of v are side vectors, need to QR decompose a matrix # with columns holding side vectors (Q, R)=qr(v.T) # Diagonal of R Rdiag=R.diagonal() # Grid density norm norm_delta=sqrt((self.delta**2).sum()) if abs(Rdiag).min()<self.psi*sqrt(self.ndim)*norm_delta/2: # Shape not good, reshape (v, l)=self.reshape(Q=Q, R=R) reshaped=True # Origin for building the new simplex x0[:]=self.simplex[0,:] f0=self.simplexf[0] # Build new simplex for i in range(self.ndim): self.simplex[i+1,:]=self.gridRestrain(v[i,:]+x0) f=self.fun(self.simplex[i+1,:]) self.simplexf[i+1]=f if self.debug: DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": reshape : f="+str(f)) self.simplexmoves[:]=0 # Do not order simplex here, even if reshape results in a point that improves over x0. # The algorithm in the paper orders the simplex here. This is not in the sense of the # Price-Coope-Byatt paper, which introduced pseudo-expand. Therefore do not sort. # Centroid of the n worst points (or if a reshape took place - n new points) xcw=self.simplex[1:,:].sum(0)/self.ndim # Pseudo-expand point xpe=self.gridRestrain(xb+(self.expand/self.reflect-1.0)*(xb-xcw)) fpe=self.fun(xpe) if self.debug: DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": pseudo exp: f="+str(fpe)) # Check if there is any improvement if fpe<fb: # Pseudo-expand point is better than old best point self.simplex[0,:]=xpe self.simplexf[0]=fpe self.simplexmoves[0]+=1 if self.debug: DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": accepted pseudo exp") elif self.simplexf.min()<fb: # One of the points obtained by reshape is better than old best point if self.debug: DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": accepted reshape") else: # No improvement, enter shrink loop # Even though we had a reshape the reshape did not improve the best point, # and neither did pseudo-expand. This means that the best point before # reshape is still the best point. if not reshaped: # No reshape yet, reshape now (v, l)=self.reshape(Q=Q, R=R) reshaped=True # Origin for building the new simplex x0[:]=self.simplex[0,:] f0=self.simplexf[0] self.simplexmoves[:]=0 if self.debug: DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": reshape") # This is the first shrink step shrink_step=0 else: # This is the second shrink step # (first one happened at reshape and failed to produce improvement) shrink_step=1 # Shrink loop while self.simplexf.min()>=f0: # Reverse side vectors if this is not the first shrink step if shrink_step>0: v=-v # If not first even shrink step, shrink vectors and check grid if shrink_step>=2 and shrink_step % 2 == 0: # Shrink vectors v=v*self.shrink l=l*self.shrink if self.debug: DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": shrink vectors") # Find shortest side vector i=argsort(l, 0, 'mergesort') lmin=l[i[0]] vmin=v[i[0],:] # Do we need a new grid? if lmin < self.lam*sqrt(self.ndim)*sqrt((self.delta**2).sum())/2: # New grid origin self.z=x0 # New (refined) grid density vmin_norm=sqrt((vmin**2).sum())/sqrt(self.ndim) abs_vmin=abs(vmin) deltaprime=1.0/(250*self.lam*self.ndim)*where(abs_vmin>vmin_norm, abs_vmin, vmin_norm) # Enforce continuity bound on density contbound_r=abs(self.z)*self.tau_r contbound=where(contbound_r>self.tau_a, contbound_r, self.tau_a) deltanew=where(deltaprime>contbound, deltaprime, contbound) # Update grid density self.delta=where(deltanew<self.delta, deltanew, self.delta) if self.debug: DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": refine grid") # Evaluate points self.simplex[1:,:]=x0+v for i in range(self.ndim): self.simplex[i+1,:]=self.gridRestrain(x0+v[i,:]) f=self.fun(self.simplex[i+1,:]) self.simplexf[i+1]=f if self.debug: DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+(": shrink %1d: f=" % (shrink_step % 2))+str(f)) # self._checkSimplex() # self._plotSimplex() # if f0!=self.simplexf[0] or (x0!=self.simplex[0,:]).any(): # raise Exception, "x0, f0 not matching." # Stopping condition if (self.checkFtol() and self.checkXtol()) or self.stop: break # Increase shrink step counter shrink_step+=1 # Check stopping condition if self.checkFtol() and self.checkXtol(): if self.debug: DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": simplex x and f tolerance reached, stopping.") break # Debug message if self.debug: DbgMsgOut("GRNMOPT", "Finished.") # # Internal functions for debugging purposes # def _checkSimplex(self): """ Check if the approximate cost function values corresponding to simplex points are correct. """ for i in range(0, self.ndim+1): ff=self.simplexf[i] f=self.fun(self.simplex[i,:], False) if ff!=f and self.debug: DbgMsgOut("GRNMOPT", "Simplex consistency broken for member #"+str(i)) raise Exception, "" def _checkLogDet(self): """ Check if the natural logarithm of the simplex side vectors is correct. """ (v,l)=self.sortedSideVectors() vdet=abs(det(v)) DbgMsgOut("GRNMOPT", " logDet="+str(exp(self.logDet))+" vdet="+str(vdet)) if (1.0-exp(self.logDet)/vdet)>1e-3: raise Exception, DbgMsG("GRNMOPT", "Simplex determinat consistency broken. Relative error: %e" % (1.0-exp(self.logDet)/vdet)) def _plotSimplex(self): """ Plot the projection of simplex side vectors to the first two dimensions. """ p1=self.simplex[0,:2] p2=self.simplex[1,:2] p3=self.simplex[2,:2] pl.clf() pl.hold(True) pl.plot([p1[0]], [p1[1]], 'ro') pl.plot([p2[0]], [p2[1]], 'go') pl.plot([p3[0]], [p3[1]], 'bo') pl.plot([p1[0], p2[0]], [p1[1], p2[1]], 'b') pl.plot([p1[0], p3[0]], [p1[1], p3[1]], 'b') pl.axis('equal') pl.hold(False) pl.show()
Today I am going to tell you what the differences among meme, rage face, and rage comic. Why did I tell these to you? Because there are so many people who still can’t differentiate among those terms. A meme is not just a comic. it starts from a fan page meme who mentions themselves is meme comic. Even though the meme can be of any kind, the comics in the meme are usually called rage comic and in general meme is an image macro, which is a picture that is given a caption that takes from the quote of a famous character from a film, to get the feeling of the user, and the viewers who read it will read the message exactly to the tone of the original character but with different sentences. Can you see? the jokes are here. Not only from movies, but also from many factors that make an image become image macro within the scope of the Internet meme. For example, BRACE YOURSELF, this is taken from the Ned Stark of the game of thrones movie, which will lead the head of the deserter who saw the white walkers, in this episode, Lord Ned often said BRACE YOURSELF or WINTER IS COMING. There are so many internet memes, there are many videos and gifs that can be a meme but within the scope of the internet meme that has been fitted, I think it is more reasonable if we don’t mention them all is the meme because there will be global confusion which one makes meme think? What meme is? because the meme is a lot! Even though Rage Faces is part of the meme, and it is a special intermediary. But we better call it Rage Faces. Let’s look at the very contrasting differences here so that you are clear. A comic that uses Rage Faces is called Rage Comic. And this is what is called Image Macro and is most often called Meme. So even though the meme might be in the form of a comic, let’s call them to rage comic, and the characters in rage comic, let’s call it rage faces, and if we see pictures illustrated let’s call image macro or more people know them with memes.
############################################################################ ## ## Copyright (c) 2000-2015 BalaBit IT Ltd, Budapest, Hungary ## Copyright (c) 2015-2018 BalaSys IT Ltd, Budapest, Hungary ## ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License along ## with this program; if not, write to the Free Software Foundation, Inc., ## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ## ############################################################################ import socket from zorpctl.SZIGMessages import ( MessageAuthorizeAccept, MessageAuthorizeReject, MessageGetChild, MessageGetDeadLockCheck, MessageGetLogLevel, MessageGetLogSpec, MessageGetSibling, MessageGetValue, MessageReload, MessageReloadResult, MessageSetDeadLockCheck, MessageSetLogLevel, MessageSetLogSpec, MessageStopSession ) from zorpctl.ZorpctlConf import ZorpctlConfig class Response(object): def __init__(self, succeeded, value = None): self.is_succeeded = succeeded self.value = value class ResponseDeadlockCheck(Response): def isSet(self): return self.value == "1" class Handler(object): """ Class created for handling messages sent by Szig to Zorp and receiving answer from Zorp """ _success_prefix = "OK " _fail_prefix = "FAIL " def __init__(self, server_address): self.max_command_length = 4096 self.response_length = 4096 self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.server_address = server_address try: self.socket.connect(self.server_address) except IOError as e: e.message = "Socket not found, %s" % server_address raise e def talk(self, message): """ Sends an instance of Message and returns the response as Response class """ self.send(message) return self.recv() def send(self, message): """ Sending a message to Zorp. Messages can be derived from abstract Message class. """ self._write_request(str(message)) def recv(self): """ Returns an instance of Response class. """ resp = self._read_response() return Response(self._isSucceeded(resp), self._cutPrefix(resp)) def _write_request(self, request): """ Writing a command message to a Unix Domain Socket to communicate with Zorp. Raises SZIGError if not all the data has been sent. SZIGError value is a tuple of sent/all """ request_length = len(request) if request_length > self.max_command_length: raise SZIGError("Given request is longer than %s" % self.max_command_length) sent_data_length = self.socket.send(request) if sent_data_length < request_length: msg = "There was an error while sending the request (%s/%s)!" % (sent_data_length, request_length) raise SZIGError(msg, (sent_data_length, request_length)) def _read_response(self, resp_len = None): """ Reading from a Unix Domain Socket to communicate with Zorp. """ if not resp_len: resp_len = self.response_length if resp_len < 1: raise SZIGError("Response length should be greater than 0") response = self.socket.recv(resp_len) if not response: raise SZIGError("There was an error while receiving the answer!") return response[:-1] if response[-1:] == '\n' else response def _isSucceeded(self, response): """ Method for checking if Zorp understood the given request by inspecting the response. """ return response[:len(self._success_prefix)] == self._success_prefix def _cutPrefix(self, string): """ Cuts the defined prefix from a string. """ if string[:len(self._success_prefix)] == self._success_prefix: string = string[len(self._success_prefix):] else: if string[:len(self._fail_prefix)] == self._fail_prefix: string = string[len(self._fail_prefix):] return string class SZIG(object): def __init__(self, process_name, handler=None): ZORPCTLCONF = ZorpctlConfig.Instance() self.pidfile_dir = ZORPCTLCONF['ZORP_PIDFILEDIR'] if not handler: handler = Handler self.handler = handler(self.pidfile_dir + '/zorpctl.' + process_name) def get_value(self, key): response = self.handler.talk(MessageGetValue(key)) return None if response.value == "None" else response.value def get_sibling(self, node): response = self.handler.talk(MessageGetSibling(node)) return None if response.value == "None" else response.value def get_child(self, node): response = self.handler.talk(MessageGetChild(node)) return None if response.value == "None" else response.value @property def loglevel(self): self.handler.send(MessageGetLogLevel()) return int(self.handler.recv().value) @loglevel.setter def loglevel(self, value): self.handler.send(MessageSetLogLevel(value)) if not self.handler.recv().is_succeeded: raise SZIGError("Log level has not been set.") @property def logspec(self): self.handler.send(MessageGetLogSpec()) return self.handler.recv().value @logspec.setter def logspec(self, value): """ Setting LOGSPEC expecting a log specification string as value """ self.handler.send(MessageSetLogSpec(value)) if not self.handler.recv().is_succeeded: raise SZIGError("Log specification has not been set.") @property def deadlockcheck(self): self.handler.send(MessageGetDeadLockCheck()) dlc = self.handler.recv() dlc.__class__ = ResponseDeadlockCheck return dlc.isSet() @deadlockcheck.setter def deadlockcheck(self, value): """ Sets Deadlock Check, expects a boolean as value. """ self.handler.talk(MessageSetDeadLockCheck(value)) def reload(self): self.handler.talk(MessageReload()) def reload_result(self): result = self.handler.talk(MessageReloadResult()) return result.is_succeeded def stop_session(self, instance): response = self.handler.talk(MessageStopSession(instance)) if not response.is_succeeded: raise SZIGError("Session stop failed! Response was: %s" % response.value) def authorize_accept(self, session_id, description): response = self.handler.talk(MessageAuthorizeAccept(session_id, description)) if not response.is_succeeded: raise SZIGError(response.value) return response.value def authorize_reject(self, session_id, description): response = self.handler.talk(MessageAuthorizeReject(session_id, description)) if not response.is_succeeded: raise SZIGError(response.value) return response.value def stop_session(self, session_id): response = self.handler.talk(MessageStopSession(session_id)) if not response.is_succeeded: raise SZIGError(response.value) return response.value class SZIGError(Exception): """ Exception Class created for Szig specific errors. """ def __init__(self, msg, value = None): self.msg = msg self.value = value def __str__(self): return self.msg + repr(self.value)
This is a color that has a warm "wow" factor. A beautiful background color. Well-paired with Sun Baked.
import os, sys sys.path.append(os.getcwd()) import time import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy as np import sklearn.datasets import tensorflow as tf import tflib as lib import tflib.ops.linear import tflib.ops.conv2d import tflib.ops.batchnorm import tflib.ops.deconv2d import tflib.save_images import tflib.midi import tflib.plot MODE = 'wgan-gp' # dcgan, wgan, or wgan-gp DIM = 30 # Model dimensionality BATCH_SIZE = 10 # Batch size CRITIC_ITERS = 5 # For WGAN and WGAN-GP, number of critic iters per gen iter LAMBDA = 10 # Gradient penalty lambda hyperparameter ITERS = 200000 # How many generator iterations to train for OUTPUT_DIM = 100*88 # Number of pixels in MNIST (28*28) lib.print_model_settings(locals().copy()) def LeakyReLU(x, alpha=0.2): return tf.maximum(alpha*x, x) def ReLULayer(name, n_in, n_out, inputs): output = lib.ops.linear.Linear( name+'.Linear', n_in, n_out, inputs, initialization='he' ) return tf.nn.relu(output) def LeakyReLULayer(name, n_in, n_out, inputs): output = lib.ops.linear.Linear( name+'.Linear', n_in, n_out, inputs, initialization='he' ) return LeakyReLU(output) def Generator(n_samples, noise=None): if noise is None: noise = tf.random_normal([n_samples, 128]) output = lib.ops.linear.Linear('Generator.Input', 128, 13*11*4*DIM, noise) if MODE == 'wgan': output = lib.ops.batchnorm.Batchnorm('Generator.BN1', [0], output) output = tf.nn.relu(output) output = tf.reshape(output, [-1, 4*DIM, 13, 11]) output = lib.ops.deconv2d.Deconv2D('Generator.2', 4*DIM, 2*DIM, 5, output) if MODE == 'wgan': output = lib.ops.batchnorm.Batchnorm('Generator.BN2', [0,2,3], output) output = tf.nn.relu(output) output = output[:,:,:25,:] output = lib.ops.deconv2d.Deconv2D('Generator.3', 2*DIM, DIM, 5, output) if MODE == 'wgan': output = lib.ops.batchnorm.Batchnorm('Generator.BN3', [0,2,3], output) output = tf.nn.relu(output) output = lib.ops.deconv2d.Deconv2D('Generator.5', DIM, 1, 5, output) output = tf.nn.sigmoid(output) return tf.reshape(output, [-1, OUTPUT_DIM]) def Discriminator(inputs): output = tf.reshape(inputs, [-1, 1, 100, 88]) output = lib.ops.conv2d.Conv2D('Discriminator.1',1,DIM,5,output,stride=2) output = LeakyReLU(output) output = lib.ops.conv2d.Conv2D('Discriminator.2', DIM, 2*DIM, 5, output, stride=2) if MODE == 'wgan': output = lib.ops.batchnorm.Batchnorm('Discriminator.BN2', [0,2,3], output) output = LeakyReLU(output) output = lib.ops.conv2d.Conv2D('Discriminator.3', 2*DIM, 4*DIM, 5, output, stride=2) if MODE == 'wgan': output = lib.ops.batchnorm.Batchnorm('Discriminator.BN3', [0,2,3], output) output = LeakyReLU(output) output = tf.reshape(output, [-1, 13*11*4*DIM]) output = lib.ops.linear.Linear('Discriminator.Output', 13*11*4*DIM, 1, output) return tf.reshape(output, [-1]) real_data = tf.placeholder(tf.float32, shape=[BATCH_SIZE, OUTPUT_DIM]) fake_data = Generator(BATCH_SIZE) disc_real = Discriminator(real_data) disc_fake = Discriminator(fake_data) gen_params = lib.params_with_name('Generator') disc_params = lib.params_with_name('Discriminator') if MODE == 'wgan': gen_cost = -tf.reduce_mean(disc_fake) disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real) gen_train_op = tf.train.RMSPropOptimizer( learning_rate=5e-5 ).minimize(gen_cost, var_list=gen_params) disc_train_op = tf.train.RMSPropOptimizer( learning_rate=5e-5 ).minimize(disc_cost, var_list=disc_params) clip_ops = [] for var in lib.params_with_name('Discriminator'): clip_bounds = [-.01, .01] clip_ops.append( tf.assign( var, tf.clip_by_value(var, clip_bounds[0], clip_bounds[1]) ) ) clip_disc_weights = tf.group(*clip_ops) elif MODE == 'wgan-gp': gen_cost = -tf.reduce_mean(disc_fake) disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real) alpha = tf.random_uniform( shape=[BATCH_SIZE,1], minval=0., maxval=1. ) differences = fake_data - real_data interpolates = real_data + (alpha*differences) gradients = tf.gradients(Discriminator(interpolates), [interpolates])[0] slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1])) gradient_penalty = tf.reduce_mean((slopes-1.)**2) disc_cost += LAMBDA*gradient_penalty gen_train_op = tf.train.AdamOptimizer( learning_rate=1e-4, beta1=0.5, beta2=0.9 ).minimize(gen_cost, var_list=gen_params) disc_train_op = tf.train.AdamOptimizer( learning_rate=1e-4, beta1=0.5, beta2=0.9 ).minimize(disc_cost, var_list=disc_params) clip_disc_weights = None elif MODE == 'dcgan': gen_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( disc_fake, tf.ones_like(disc_fake) )) disc_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( disc_fake, tf.zeros_like(disc_fake) )) disc_cost += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( disc_real, tf.ones_like(disc_real) )) disc_cost /= 2. gen_train_op = tf.train.AdamOptimizer( learning_rate=2e-4, beta1=0.5 ).minimize(gen_cost, var_list=gen_params) disc_train_op = tf.train.AdamOptimizer( learning_rate=2e-4, beta1=0.5 ).minimize(disc_cost, var_list=disc_params) clip_disc_weights = None # For saving samples fixed_noise = tf.constant(np.random.normal(size=(20, 128)).astype('float32')) fixed_noise_samples = Generator(20, noise=fixed_noise) def generate_image(frame, true_dist): samples = session.run(fixed_noise_samples) lib.save_images.save_images( samples.reshape((20, 100, 88)), '../data/midi_img/samples_{}.png'.format(frame) ) # Dataset iterator train_gen, dev_gen, test_gen = lib.midi.load(BATCH_SIZE, BATCH_SIZE) def inf_train_gen(): while True: for images,targets in train_gen(): yield images # Train loop saver = tf.train.Saver() with tf.device('/gpu:0'): with tf.Session() as session: session.run(tf.initialize_all_variables()) gen = inf_train_gen() for iteration in xrange(ITERS): start_time = time.time() if iteration > 0: _ = session.run(gen_train_op) if MODE == 'dcgan': disc_iters = 1 else: disc_iters = CRITIC_ITERS for i in xrange(disc_iters): _data = gen.next() _disc_cost, _ = session.run( [disc_cost, disc_train_op], feed_dict={real_data: _data} ) if clip_disc_weights is not None: _ = session.run(clip_disc_weights) lib.plot.plot('train disc cost', _disc_cost) lib.plot.plot('time', time.time() - start_time) # Calculate dev loss and generate samples every 100 iters if iteration % 100 == 99: dev_disc_costs = [] for images,_ in dev_gen(): _dev_disc_cost = session.run( disc_cost, feed_dict={real_data: images} ) dev_disc_costs.append(_dev_disc_cost) lib.plot.plot('dev disc cost', np.mean(dev_disc_costs)) if iteration % 1000 == 999: generate_image(iteration, _data) saver.save(session, '../data/model/midi_model', global_step=iteration) # Write logs every 100 iters if (iteration < 5) or (iteration % 100 == 99): lib.plot.flush() lib.plot.tick()
It’s been awhile since my last post. This is in part because I haven’t felt inspired by vegan food, which I’m eating about 85% of the time (the other 15% is human flesh.) But I’ve also been very busy writing, job hunting, and vacillating between enjoying and worrying about life. And yesterday I watched the entire first season of Younger, starring Hilary Duff. Now I’m back with a subpar dish, inspired by one of those instant Indian food meals that you throw in your cart for emergencies but then go home and eat immediately. Somehow, the content of that plastic packet was rich, savory, and delicious. If red bean curry could taste so good after just a minute in the microwave, could I not make it from scratch? Well, I definitely could try. Read below for the results. I decided to start with dried beans, because you can buy them in bulk to cut down on waste. What you don’t cut down on is time, because you need to soak them overnight and then still cook them for an eternity. In a saute pan I added all the basics: onion, garlic, and ginger, along with the diced tomatoes. When my roommate left he took the garam masala with him, which would have made my life and this dish better. Instead, I picked out what I thought would be a good blend of spices. I threw in a bunch of each without measuring. Could this have been my mistake? I transferred everything to a pot, added the vegetable broth and beans, and started to simmer. And continued to simmer for like 7 hours!! Eventually, the onions and tomatoes completely broke down and the beans became soft. I seasoned them with salt and pepper, but even after that, they were nowhere near as delicious as the $4.00 convenience food I got from Giant. I ate the bean mush with naan, which made it only slightly more tasty. If you have any tips, send them my way! What did I do wrong? Did I miss an important ingredient? Should I stick with canned beans? Please help me become a better person and a better bean cooker.
from typing import Any from django.core.management.base import CommandParser from zerver.lib.management import ZulipBaseCommand from zerver.lib.message import maybe_update_first_visible_message_id from zerver.models import Realm class Command(ZulipBaseCommand): help = """Calculate the value of first visible message ID and store it in cache""" def add_arguments(self, parser: CommandParser) -> None: self.add_realm_args(parser) parser.add_argument( '--lookback-hours', dest='lookback_hours', type=int, help="Period a bit larger than that of the cron job that runs " "this command so that the lookback periods are sure to overlap.", required=True, ) def handle(self, *args: Any, **options: Any) -> None: target_realm = self.get_realm(options) if target_realm is None: realms = Realm.objects.all() else: realms = [target_realm] for realm in realms: maybe_update_first_visible_message_id(realm, options['lookback_hours'])
Christmas Presence - Back in our day . . . I have always loved Christmas. There was a time as a child when I believed in Santa Clause and fell asleep in the floor one cold winter's night, while watching out the window for Santa and his sleigh and reindeer to go in front of the moon! I didn't see him though and fell asleep on the cold linoleum. My parent's found me of course and put me in bed! The next morning I woke up early and sneaked out to the living room and found my Ricky Ricardo doll and took it back to bed with me! There were tons of presents, but I didn't open anything....just the doll. Later on, they all woke up and we had the best Christmas ever! My Grandmother spoiled us kids because my Dad was a coal miner and didn't make much money. She worked in Charleston, WV and she'd bring home food and gifts almost every weekend. But for Christmas she went all out! I got a set of real china, a table and chairs, my doll and buggy, a high chair for the doll, and paper dolls and coloring books and crayons! I had so much I didn't know what to play with first. But, in spite of it all, I didn't get that spoiled. I loved my toys, but I shared with my cousin Sandra and my friends. Then as I grew, I did for others! Especially my Mother. One year after our Dad left us, I felt so bad for her that I bought her a set of beautiful dinnerwear, a set of pots and pans, a sewing chest, a new coffee maker, and so much more I don't remember. She insisted I didn't need to do all that, but I couldn't help in any other way. I wanted her to be happy, not realizing that things can't replace people. The pain she felt was intense. She had always been with Dad and followed him wherever he'd take us. He was in the Navy after the mines shut down and we moved a lot. But when the family followed me back to California from Minnesota, and he had to go back there and finish his tour of duty or request a change in duty, he lost interest in us. Said he felt empty. He then was put on a carrier and sent out for 9 months. When he came back home, he transferred again to Seattle and Mom refused to go. He stayed gone for many years. Christmas was calm and quiet...no arguments for a change. But she had no money and felt bad, so in spite of the things I bought for her and the family, the Christmas was rather flat. I met my husband when he came to work in our office. We didn't talk at first. But eventually we started talking and he was a very interesting person. He told me things I'd never heard before and it began making good sense to me. I changed the way I dealt with people, not saying a word, just playing out the rope so they could hang themselves and not blame for it! I liked him a lot! We had great times together. We went to the desert and climbed the sand dunes and hills...I fell on my stomach and slid down Diablo on my stomach for a few feet. He had shown me what to do and I did just that. He layed on his stomach at the top of the hill and could reach my hand. He helped me get back up on top and I just started laughing myself silly! I think it was relief!! ha ha But, we hit it off and on July of 1975 he asked me to marry him! Said we could have a baby! Something I'd always wanted. So we did get married on Christmas Eve of 1975 and by November of 1976 we were parents of twin boys! Christmas that year was mostly just showing the babies the pretty tree lights. We made a trip out to our local Saveon drug store and bought a little fake tree. There were a few gifts we bought for each other, but not a lot. The grandparents bought things for the babies and bought a few things for us as well. But Christmas improved over the years and there were all sorts of things...big wheels, play dough, fireman hats and cars and all sorts of stuff. We stepped on them in the dark when we were checking up on who needed a drink of water or someone had to go to the potty. But time passed, as it does, quickly and before you knew it, the kids were 7 or 8 and we bought what we could. We didn't have a lot even though we both worked. But things improved with time. I'm skipping around a lot because I just want to keep Christmas in front of your eyes as it is Christmas today, in 2014. Both boys were married and had 7 grandkids for us to spoil. Right now our oldest twin is asleep on the sofa and will get up early and go get his kids. He divorced his wife and the kids live with her. There are many presents under the tree and most of them are for the kids. It's fun and I am excited like a youngster even though I turned 68 just yesterday! Maybe I'm reverting back to my childhood! ha ha Whatever!!! I am determined to have fun for the rest of my life! I've been sick a lot, and it took the joy out of life for a long time. But now, well, I have my Christmas Wish, a sound mind, a healthy heart, and a family to love and surprise with gifts and much love! God is good to us and I always make a point of celebrating the truth of Christmas with each new grandchild that comes along. My sons are happy with their children and don't plan to have any more. My daughter takes care of Jack and I and takes care of our son's kids during the week. She is one in a million and I hope she really enjoys her Christmas this year! Life has been good to us. There have been hard times, like there is for everyone, but we are survivors and overcomers and we are happy and content with our lives. Christmas is a celebration of a miracle! A Baby born in a manger that would become the Savior of the whole world! Jesus, we love you so much, and we thank you for saving Jack's live last year. Accept our Birthday greetings to you and be with us here today and bless us with peace, love, and the joy of the Christmas Miracle, YOU! If you need good news, I couldn't offer any better! Jesus is the reson for Christmas and we express our love for our families in the best way we now how....with gifts from the heart and kisses and hugs for everyone. I don't think Jesus feels slighted! He received gifts as well...and was the most well adjusted child ever. But then,...He was special and Jesus, we know without you, we'd all be lost. Thank you for coming to earth to give us a way to paradise! Thank you for dying for us Lord, and help us to never make light of what your life meant to the world! We love you, The Tyler Gang!
import json from django.http import HttpResponse, HttpResponseForbidden from django.http import HttpResponseRedirect from django.shortcuts import render, get_object_or_404 from django.views.decorators.http import require_POST from django.template.context import RequestContext from django.contrib.auth.decorators import login_required from django.utils.translation import ugettext as _ from django.contrib.syndication.views import Feed from django.utils.feedgenerator import Atom1Feed from django.contrib import messages from django.conf import settings from django.views.generic.detail import SingleObjectTemplateResponseMixin, BaseDetailView from qa.forms import AnswerForm, QuestionForm from .models import * from qa.mixins import JSONResponseMixin from user.views import edit_profile # the order options for the list views ORDER_OPTIONS = {'date': '-created_at', 'rating': '-rating'} class JsonpResponse(HttpResponse): def __init__(self, data, callback, *args, **kwargs): jsonp = "%s(%s)" % (callback, json.dumps(data)) super(JsonpResponse, self).__init__( content=jsonp, content_type='application/javascript', *args, **kwargs) def questions(request, entity_slug=None, entity_id=None, tags=None): """ list questions ordered by number of upvotes """ # TODO: cache the next lines if entity_id: entity = Entity.objects.get(pk=entity_id) else: entity = Entity.objects.get(slug=entity_slug) questions = Question.on_site.filter(entity=entity) context = {'entity': entity} order_opt = request.GET.get('order', 'rating') order = ORDER_OPTIONS[order_opt] if tags: tags_list = tags.split(',') questions = questions.filter(tags__name__in=tags_list) context['current_tags'] = tags_list questions = questions.order_by(order) # TODO: revive the tags! # context['tags'] = TaggedQuestion.on_site.values('tag__name').annotate(count=Count("tag")) context['questions'] = questions context['by_date'] = order_opt == 'date' context['by_rating'] = order_opt == 'rating' return render(request, "qa/question_list.html", RequestContext(request, context)) class QuestionDetail(JSONResponseMixin, SingleObjectTemplateResponseMixin, BaseDetailView): model = Question template_name = 'qa/question_detail.html' context_object_name = 'question' slug_field = 'unislug' def get_context_data(self, **kwargs): context = super(QuestionDetail, self).get_context_data(**kwargs) context['max_length_a_content'] = MAX_LENGTH_A_CONTENT context['answers'] = self.object.answers.all() context['entity'] = self.object.entity can_answer = self.object.can_answer(self.request.user) context['can_answer'] = can_answer if can_answer: try: user_answer = self.object.answers.get(author=self.request.user) context['my_answer_form'] = AnswerForm(instance=user_answer) context['my_answer_id'] = user_answer.id except self.object.answers.model.DoesNotExist: context['my_answer_form'] = AnswerForm() if self.request.user.is_authenticated() and \ not self.request.user.upvotes.filter(question=self.object).exists(): context['can_upvote'] = True else: context['can_upvote'] = False return context def render_to_response(self, context): # Look for a 'format=json' GET argument if self.request.GET.get('format', 'html') == 'json' or self.request.is_ajax(): data = { 'question': { 'subject': self.object.subject, 'content': self.object.content, 'author': self.object.author.username } } return JSONResponseMixin.render_to_response(self, data) else: return SingleObjectTemplateResponseMixin.render_to_response(self, context) @login_required def post_answer(request, q_id): context = {} question = Question.objects.get(id=q_id) if not question.can_answer(request.user): return HttpResponseForbidden(_("You must be logged in as a candidate to post answers")) try: # make sure the user haven't answered already answer = question.answers.get(author=request.user) except question.answers.model.DoesNotExist: answer = Answer(author=request.user, question=question) answer.content = request.POST.get("content") answer.save() return HttpResponseRedirect(question.get_absolute_url()) @login_required def post_q_router(request): user = request.user if user.is_anonymous(): return HttpResponseRedirect(settings.LOGIN_URL) else: profile = user.profile entity_slug = profile.locality and profile.locality.slug if entity_slug: return HttpResponseRedirect(reverse(post_question, args=(entity_slug, ))) else: # user must set locality return HttpResponseRedirect(reverse(edit_profile)) @login_required def post_question(request, entity_slug, slug=None): entity = Entity.objects.get(slug=entity_slug) if slug: q = get_object_or_404(Question, unislug=slug, entity=entity) if request.method == "POST": form = QuestionForm(request.POST) if form.is_valid(): if slug: if q.author != request.user: return HttpResponseForibdden(_("You can only edit your own questions.")) if q.answers.count(): return HttpResponseForbidden(_("Question has been answered, editing disabled.")) question = q question.subject = form.cleaned_data.get('subject', "") question.save() else: question = form.save(commit=False) question.author = request.user question.entity = entity question.save() form.save_m2m() return HttpResponseRedirect(question.get_absolute_url()) else: if slug: subject = q.subject else: subject = "" form = QuestionForm(initial={'entity': entity, 'subject': subject}) context = RequestContext(request, {"form": form, "entity": entity, "max_length_q_subject": MAX_LENGTH_Q_SUBJECT, "slug": slug, }) return render(request, "qa/post_question.html", context) @login_required def upvote_question(request, q_id): if request.method == "POST": q = get_object_or_404(Question, id=q_id) user = request.user if q.author == user or user.upvotes.filter(question=q): return HttpResponseForbidden(_("You already upvoted this question")) else: upvote = QuestionUpvote.objects.create(question=q, user=user) #TODO: use signals so the next line won't be necesary new_count = increase_rating(q) return HttpResponse(new_count) else: return HttpResponseForbidden(_("Use POST to upvote a question")) @transaction.commit_on_success def increase_rating(q): q = Question.objects.get(id=q.id) q.rating += 1 q.save() return q.rating class RssQuestionFeed(Feed): """Simple feed to get all questions""" title = _('OK QA Question Feed') link = "/" description = _('Questions from OKQA') def items(self): return Question.objects.order_by('-updated_at') def item_title(self, item): return item.subject def item_description(self, item): return item.content class AtomQuestionFeed(RssQuestionFeed): feed_type = Atom1Feed subtitle = RssQuestionFeed.description class RssQuestionAnswerFeed(Feed): """"Give question, get all answers for that question""" def get_object(self, request, q_id): return get_object_or_404(Question, pk=q_id) def title(self, obj): return _('Answers for the question') + ' "%s' % obj.subject + '"' def link(self, obj): return obj.get_absolute_url() def description(self, obj): return _('A feed of all answers for the question') + ' "%s' % obj.subject + '"' def items(self, obj): return Answer.objects.filter(question=obj).order_by('-updated_at') class AtomQuestionAnswerFeed(RssQuestionAnswerFeed): feed_type = Atom1Feed subtitle = RssQuestionAnswerFeed.description @require_POST def flag_question(request, q_id): q = get_object_or_404(Question, id=q_id) user = request.user ret = {} if user.is_anonymous(): messages.error(request, _('Sorry, you have to login to flag questions')) ret["redirect"] = '%s?next=%s' % (settings.LOGIN_URL, q.get_absolute_url()) elif (user.profile.is_editor and user.profile.locality == q.entity) or (user == q.author and not q.answers.all()): q.delete() messages.info(request, _('Question has been removed')) ret["redirect"] = reverse('qna', args=(q.entity.slug,)) elif user.flags.filter(question=q): ret["message"] = _('Thanks. You already reported this question') else: flag = QuestionFlag.objects.create(question=q, reporter=user) #TODO: use signals so the next line won't be necesary q.flagged() ret["message"] = _('Thank you for falgging the question. One of our editors will look at it shortly.') return HttpResponse(json.dumps(ret), content_type="application/json")
And you'll end up with a space that's uniquely you. Watch HGTV or read about interior or architectural design for any length of time and you’re going to run into a show or an article about trends. Trends of all types. Color trends. Material trends. Paint vs. wallpaper trends. You-name-it trends. And one easy way to do that is with glass. Take showers for example. Those seamless glass showers add shine and sparkle to any bath. But they can also become the statement in the room if you think a bit creatively: Use different glass (seedy, reeded, colored), etch a design into the glass, or cut the glass into interesting shapes rather than sticking with the traditional rectangle. The choices are limited only by your imagination. Once you decide, come to Don’s Mobile Glass where we have more than 50 years’ experience in producing glass to meet all sorts of needs (from windshields to mirrors to shelves to showers) and helping design dreams come to life. Don’s Can Help You, Your Designer, Or Your Architect Make Your Space Unique!
from datetime import datetime import jinja2 import flask blueprint = flask.Blueprint('filters', __name__) # http://flask.pocoo.org/snippets/33/ # and # http://stackoverflow.com/questions/12288454/how-to-import-custom-jinja2-filters-from-another-file-and-using-flask @jinja2.contextfilter @blueprint.app_template_filter("timesince") def friendly_time(context, dt, past_="ago", future_="from now", default="just now"): """ Returns string representing "time since" or "time until" e.g. 3 days ago, 5 hours from now etc. """ now = datetime.utcnow() try: trimmed_time = dt[:19] dt = datetime.strptime(trimmed_time, "%Y-%m-%d %H:%M:%S") except: pass try: # Thu, 26 Feb 2015 03:45:21 GMT dt = datetime.strptime(dt, "%a, %d %b %Y %H:%M:%S %Z") except: pass if now > dt: diff = now - dt dt_is_past = True else: diff = dt - now dt_is_past = False periods = ( (diff.days / 365, "year", "years"), (diff.days / 30, "month", "months"), (diff.days / 7, "week", "weeks"), (diff.days, "day", "days"), (diff.seconds / 3600, "hour", "hours"), (diff.seconds / 60, "minute", "minutes"), (diff.seconds, "second", "seconds"), ) for period, singular, plural in periods: if period: return "%d %s %s" % (period, \ singular if period == 1 else plural, \ past_ if dt_is_past else future_) return default
No pictures found for plate number 7420CS in Connecticut, United States. click here to add a picture. No videos found for plate number 7420CS in Connecticut, United States. click here to add a video. No owners found for plate number 7420CS in Connecticut, United States. click here to add an owner. No related websites found for plate number 7420CS in Connecticut, United States. click here to add a related website.
#!/usr/bin/env python # # @file generateRNG.py # @brief function for generating RNG schema # @author Frank Bergmann # @author Sarah Keating # # <!-------------------------------------------------------------------------- # # Copyright (c) 2013-2018 by the California Institute of Technology # (California, USA), the European Bioinformatics Institute (EMBL-EBI, UK) # and the University of Heidelberg (Germany), with support from the National # Institutes of Health (USA) under grant R01GM070923. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # # Neither the name of the California Institute of Technology (Caltech), nor # of the European Bioinformatics Institute (EMBL-EBI), nor of the University # of Heidelberg, nor the names of any contributors, may be used to endorse # or promote products derived from this software without specific prior # written permission. # ------------------------------------------------------------------------ --> """Function for generating RNG schema""" import sys from ..parseXML import ParseXML from ..util import global_variables as gv from ..validation import RNGSchemaFiles def generate_rng_for(filename): # , overwrite=True): """ Parse XML file and then invokes RNG file generation code. :param filename: the XML file to parse :return: returns nothing. """ gv.running_tests = False parser = ParseXML.ParseXML(filename) ob = dict() if gv.code_returned == gv.return_codes['success']: # catch a problem in the parsing try: ob = parser.parse_deviser_xml() except Exception: gv.code_returned = gv.return_codes['parsing error'] if gv.code_returned == gv.return_codes['success']: # try: if gv.is_package: generate_rng(ob) # except Exception : # gv.code_returned = gv.return_codes['unknown error - please report'] def generate_rng(ob): """ Wrapper function. Creates RNG Schema file. :param ob: the big dictionary object produced by XML file parsing. :return: returns nothing. """ ex = RNGSchemaFiles.RNGSchemaFiles(ob) ex.write_file() def main(args): """ Checks correct number of arguments and then invokes RNG code. """ if len(args) != 2: gv.code_returned = gv.return_codes['missing function argument'] print('Usage: generateRNG.py xmlfile') else: generate_rng_for(args[1]) if gv.code_returned == gv.return_codes['success']: print('code successfully written') else: print('writing rng failed') return gv.code_returned if __name__ == '__main__': main(sys.argv)
Hello! My name is Maggie Ness. You may be familiar with some of my podcasts from Heart of Mary. I recently gave a talk at my home parish for a group called Monthly Dose of Grace. The mission of the event is to bring women together to learn more about their faith. This is only an audio recording. While the audio from the movie clips has been included, please feel free to follow the YouTube links below or you can follow along on NETFLIX with the time stamps provided.
#!/usr/bin/env python """ Test classes for writing and reading signals to and from HDF files. """ import unittest import os import numpy as np import bionet.utils.signal_io as s filename = 'test_signal_io_data.h5' block_size = 10000 class SignalIOTestCase(unittest.TestCase): def setUp(self): '''Generate and save test data.''' N = 1000000 self.u = np.random.rand(N) w = s.WriteArray(filename) w.write(self.u) w.close() def tearDown(self): '''Clean up test file.''' os.remove(filename) def testReadOneBlock(self): '''Test one-block read of saved data.''' r = s.ReadArray(filename) u_read = r.read() r.close() assert all(self.u==u_read),'read block does not match original block' def testReadManyBlocks(self): '''Test multi-block read of saved data.''' r = s.ReadArray(filename) temp = [] while True: data_block = r.read(block_size) if not len(data_block): break temp += data_block.tolist() u_read = np.array(temp) r.close() assert all(self.u==u_read),'read block does not match original block' if __name__ == "__main__": unittest.main()
970123 Called Morris on Windows project; confrontation to improve meetings. Called Morris on Windows project; confrontation to improve meetings. 291801 - Follow up ref SDS 40 0000, ref SDS 39 0000. 291804 - was a Chips matter, but it turned out to an Intel matter. 291855 - [On 980307 Andy Grove's book "Only the Paranoid Survive" 291856 - mentions confrontation methods used at Intel meetings. 291911 - This is somewhat analogous to our meetings on Asilomar, see 960223. 291970 - ref SDS 38 0000, and the SDS data structure, ref OF 1 0000. 291974 - initial work on it.
# Copyright 2020, The TensorFlow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.seq2seq_mia.""" from absl.testing import absltest import numpy as np from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import AttackType from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import PrivacyReportMetadata from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.seq2seq_mia import create_seq2seq_attacker_data from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.seq2seq_mia import run_seq2seq_attack from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.seq2seq_mia import Seq2SeqAttackInputData class Seq2SeqAttackInputDataTest(absltest.TestCase): def test_validator(self): valid_logits_train = iter([np.array([]), np.array([])]) valid_logits_test = iter([np.array([]), np.array([])]) valid_labels_train = iter([np.array([]), np.array([])]) valid_labels_test = iter([np.array([]), np.array([])]) invalid_logits_train = [] invalid_logits_test = [] invalid_labels_train = [] invalid_labels_test = [] self.assertRaises( ValueError, Seq2SeqAttackInputData(logits_train=valid_logits_train).validate) self.assertRaises( ValueError, Seq2SeqAttackInputData(labels_train=valid_labels_train).validate) self.assertRaises( ValueError, Seq2SeqAttackInputData(logits_test=valid_logits_test).validate) self.assertRaises( ValueError, Seq2SeqAttackInputData(labels_test=valid_labels_test).validate) self.assertRaises(ValueError, Seq2SeqAttackInputData(vocab_size=0).validate) self.assertRaises(ValueError, Seq2SeqAttackInputData(train_size=0).validate) self.assertRaises(ValueError, Seq2SeqAttackInputData(test_size=0).validate) self.assertRaises(ValueError, Seq2SeqAttackInputData().validate) # Tests that both logits and labels must be set. self.assertRaises( ValueError, Seq2SeqAttackInputData( logits_train=valid_logits_train, logits_test=valid_logits_test, vocab_size=0, train_size=0, test_size=0).validate) self.assertRaises( ValueError, Seq2SeqAttackInputData( labels_train=valid_labels_train, labels_test=valid_labels_test, vocab_size=0, train_size=0, test_size=0).validate) # Tests that vocab, train, test sizes must all be set. self.assertRaises( ValueError, Seq2SeqAttackInputData( logits_train=valid_logits_train, logits_test=valid_logits_test, labels_train=valid_labels_train, labels_test=valid_labels_test).validate) self.assertRaises( ValueError, Seq2SeqAttackInputData( logits_train=invalid_logits_train, logits_test=invalid_logits_test, labels_train=invalid_labels_train, labels_test=invalid_labels_test, vocab_size=0, train_size=0, test_size=0).validate) class Seq2SeqTrainedAttackerTest(absltest.TestCase): def test_create_seq2seq_attacker_data_logits_and_labels(self): attack_input = Seq2SeqAttackInputData( logits_train=iter([ np.array([ np.array([[0.1, 0.1, 0.8], [0.7, 0.3, 0]], dtype=np.float32), np.array([[0.4, 0.5, 0.1]], dtype=np.float32) ], dtype=object), np.array( [np.array([[0.25, 0.6, 0.15], [1, 0, 0]], dtype=np.float32)], dtype=object), np.array([ np.array([[0.9, 0, 0.1], [0.25, 0.5, 0.25]], dtype=np.float32), np.array([[0, 1, 0], [0.2, 0.1, 0.7]], dtype=np.float32) ], dtype=object) ]), logits_test=iter([ np.array([ np.array([[0.25, 0.4, 0.35], [0.2, 0.4, 0.4]], dtype=np.float32) ], dtype=object), np.array([ np.array([[0.3, 0.3, 0.4], [0.4, 0.4, 0.2]], dtype=np.float32), np.array([[0.3, 0.35, 0.35]], dtype=np.float32) ], dtype=object) ]), labels_train=iter([ np.array([ np.array([2, 0], dtype=np.float32), np.array([1], dtype=np.float32) ], dtype=object), np.array([np.array([1, 0], dtype=np.float32)], dtype=object), np.array([ np.array([0, 1], dtype=np.float32), np.array([1, 2], dtype=np.float32) ], dtype=object) ]), labels_test=iter([ np.array([np.array([2, 1], dtype=np.float32)]), np.array([ np.array([2, 0], dtype=np.float32), np.array([1], dtype=np.float32) ], dtype=object) ]), vocab_size=3, train_size=3, test_size=2) privacy_report_metadata = PrivacyReportMetadata() attacker_data = create_seq2seq_attacker_data( attack_input_data=attack_input, test_fraction=0.25, balance=False, privacy_report_metadata=privacy_report_metadata) self.assertLen(attacker_data.features_train, 3) self.assertLen(attacker_data.features_test, 2) for _, feature in enumerate(attacker_data.features_train): self.assertLen(feature, 1) # each feature has one average rank # Tests that fields of PrivacyReportMetadata are populated. self.assertIsNotNone(privacy_report_metadata.loss_train) self.assertIsNotNone(privacy_report_metadata.loss_test) self.assertIsNotNone(privacy_report_metadata.accuracy_train) self.assertIsNotNone(privacy_report_metadata.accuracy_test) def test_balanced_create_seq2seq_attacker_data_logits_and_labels(self): attack_input = Seq2SeqAttackInputData( logits_train=iter([ np.array([ np.array([[0.1, 0.1, 0.8], [0.7, 0.3, 0]], dtype=np.float32), np.array([[0.4, 0.5, 0.1]], dtype=np.float32) ], dtype=object), np.array( [np.array([[0.25, 0.6, 0.15], [1, 0, 0]], dtype=np.float32)], dtype=object), np.array([ np.array([[0.9, 0, 0.1], [0.25, 0.5, 0.25]], dtype=np.float32), np.array([[0, 1, 0], [0.2, 0.1, 0.7]], dtype=np.float32) ], dtype=object) ]), logits_test=iter([ np.array([ np.array([[0.25, 0.4, 0.35], [0.2, 0.4, 0.4]], dtype=np.float32) ], dtype=object), np.array([ np.array([[0.3, 0.3, 0.4], [0.4, 0.4, 0.2]], dtype=np.float32), np.array([[0.3, 0.35, 0.35]], dtype=np.float32) ], dtype=object), np.array([ np.array([[0.25, 0.4, 0.35], [0.2, 0.4, 0.4]], dtype=np.float32) ], dtype=object) ]), labels_train=iter([ np.array([ np.array([2, 0], dtype=np.float32), np.array([1], dtype=np.float32) ], dtype=object), np.array([np.array([1, 0], dtype=np.float32)], dtype=object), np.array([ np.array([0, 1], dtype=np.float32), np.array([1, 2], dtype=np.float32) ], dtype=object) ]), labels_test=iter([ np.array([np.array([2, 1], dtype=np.float32)]), np.array([ np.array([2, 0], dtype=np.float32), np.array([1], dtype=np.float32) ], dtype=object), np.array([np.array([2, 1], dtype=np.float32)]) ]), vocab_size=3, train_size=3, test_size=3) privacy_report_metadata = PrivacyReportMetadata() attacker_data = create_seq2seq_attacker_data( attack_input_data=attack_input, test_fraction=0.33, balance=True, privacy_report_metadata=privacy_report_metadata) self.assertLen(attacker_data.features_train, 4) self.assertLen(attacker_data.features_test, 2) for _, feature in enumerate(attacker_data.features_train): self.assertLen(feature, 1) # each feature has one average rank # Tests that fields of PrivacyReportMetadata are populated. self.assertIsNotNone(privacy_report_metadata.loss_train) self.assertIsNotNone(privacy_report_metadata.loss_test) self.assertIsNotNone(privacy_report_metadata.accuracy_train) self.assertIsNotNone(privacy_report_metadata.accuracy_test) def _get_batch_logits_and_labels(num_sequences, max_tokens_in_sequence, vocab_size): num_tokens_in_sequence = np.random.choice(max_tokens_in_sequence, num_sequences) + 1 batch_logits, batch_labels = [], [] for num_tokens in num_tokens_in_sequence: logits, labels = _get_sequence_logits_and_labels(num_tokens, vocab_size) batch_logits.append(logits) batch_labels.append(labels) return np.array( batch_logits, dtype=object), np.array( batch_labels, dtype=object) def _get_sequence_logits_and_labels(num_tokens, vocab_size): sequence_logits = [] for _ in range(num_tokens): token_logits = np.random.random(vocab_size) token_logits /= token_logits.sum() sequence_logits.append(token_logits) sequence_labels = np.random.choice(vocab_size, num_tokens) return np.array( sequence_logits, dtype=np.float32), np.array( sequence_labels, dtype=np.float32) def get_seq2seq_test_input(n_train, n_test, max_seq_in_batch, max_tokens_in_sequence, vocab_size, seed=None): """Returns example inputs for attacks on seq2seq models.""" if seed is not None: np.random.seed(seed=seed) logits_train, labels_train = [], [] for _ in range(n_train): num_sequences = np.random.choice(max_seq_in_batch, 1)[0] + 1 batch_logits, batch_labels = _get_batch_logits_and_labels( num_sequences, max_tokens_in_sequence, vocab_size) logits_train.append(batch_logits) labels_train.append(batch_labels) logits_test, labels_test = [], [] for _ in range(n_test): num_sequences = np.random.choice(max_seq_in_batch, 1)[0] + 1 batch_logits, batch_labels = _get_batch_logits_and_labels( num_sequences, max_tokens_in_sequence, vocab_size) logits_test.append(batch_logits) labels_test.append(batch_labels) return Seq2SeqAttackInputData( logits_train=iter(logits_train), logits_test=iter(logits_test), labels_train=iter(labels_train), labels_test=iter(labels_test), vocab_size=vocab_size, train_size=n_train, test_size=n_test) class RunSeq2SeqAttackTest(absltest.TestCase): def test_run_seq2seq_attack_size(self): result = run_seq2seq_attack( get_seq2seq_test_input( n_train=10, n_test=5, max_seq_in_batch=3, max_tokens_in_sequence=5, vocab_size=2)) self.assertLen(result.single_attack_results, 1) def test_run_seq2seq_attack_trained_sets_attack_type(self): result = run_seq2seq_attack( get_seq2seq_test_input( n_train=10, n_test=5, max_seq_in_batch=3, max_tokens_in_sequence=5, vocab_size=2)) seq2seq_result = list(result.single_attack_results)[0] self.assertEqual(seq2seq_result.attack_type, AttackType.LOGISTIC_REGRESSION) def test_run_seq2seq_attack_calculates_correct_auc(self): result = run_seq2seq_attack( get_seq2seq_test_input( n_train=20, n_test=10, max_seq_in_batch=3, max_tokens_in_sequence=5, vocab_size=3, seed=12345), balance_attacker_training=False) seq2seq_result = list(result.single_attack_results)[0] np.testing.assert_almost_equal( seq2seq_result.roc_curve.get_auc(), 0.63, decimal=2) def test_run_seq2seq_attack_calculates_correct_metadata(self): attack_input = Seq2SeqAttackInputData( logits_train=iter([ np.array([ np.array([[0.1, 0.1, 0.8], [0.7, 0.3, 0]], dtype=np.float32), np.array([[0.4, 0.5, 0.1]], dtype=np.float32) ], dtype=object), np.array( [np.array([[0.25, 0.6, 0.15], [1, 0, 0]], dtype=np.float32)], dtype=object), np.array([ np.array([[0.9, 0, 0.1], [0.25, 0.5, 0.25]], dtype=np.float32), np.array([[0, 1, 0], [0.2, 0.1, 0.7]], dtype=np.float32) ], dtype=object), np.array([ np.array([[0.9, 0, 0.1], [0.25, 0.5, 0.25]], dtype=np.float32), np.array([[0, 1, 0], [0.2, 0.1, 0.7]], dtype=np.float32) ], dtype=object) ]), logits_test=iter([ np.array([ np.array([[0.25, 0.4, 0.35], [0.2, 0.4, 0.4]], dtype=np.float32) ], dtype=object), np.array([ np.array([[0.3, 0.3, 0.4], [0.4, 0.4, 0.2]], dtype=np.float32), np.array([[0.3, 0.35, 0.35]], dtype=np.float32) ], dtype=object), np.array([ np.array([[0.25, 0.4, 0.35], [0.2, 0.4, 0.4]], dtype=np.float32) ], dtype=object), np.array([ np.array([[0.25, 0.4, 0.35], [0.2, 0.4, 0.4]], dtype=np.float32) ], dtype=object) ]), labels_train=iter([ np.array([ np.array([2, 0], dtype=np.float32), np.array([1], dtype=np.float32) ], dtype=object), np.array([np.array([1, 0], dtype=np.float32)], dtype=object), np.array([ np.array([0, 1], dtype=np.float32), np.array([1, 2], dtype=np.float32) ], dtype=object), np.array([ np.array([0, 0], dtype=np.float32), np.array([0, 1], dtype=np.float32) ], dtype=object) ]), labels_test=iter([ np.array([np.array([2, 1], dtype=np.float32)]), np.array([ np.array([2, 0], dtype=np.float32), np.array([1], dtype=np.float32) ], dtype=object), np.array([np.array([2, 1], dtype=np.float32)]), np.array([np.array([2, 1], dtype=np.float32)]), ]), vocab_size=3, train_size=4, test_size=4) result = run_seq2seq_attack(attack_input, balance_attacker_training=False) metadata = result.privacy_report_metadata np.testing.assert_almost_equal(metadata.loss_train, 0.91, decimal=2) np.testing.assert_almost_equal(metadata.loss_test, 1.58, decimal=2) np.testing.assert_almost_equal(metadata.accuracy_train, 0.77, decimal=2) np.testing.assert_almost_equal(metadata.accuracy_test, 0.67, decimal=2) if __name__ == '__main__': absltest.main()
Windows Phone is the latest smart-phone that is turning heads with its powerful social features. Here at iApps, we take full advantage of the brilliant platform Microsoft provides for developers, by designing and developing premium applications for the Windows smartphone. From applications made available to the public, to powerful internal business tools — iApps are experts in developing mobile applications for this unique and user-friendly platform.
# -*- coding: utf-8 -*- """ Created on Tue Dec 1 08:04:28 2015 Module: bicm - Bipartite Configuration Model Author: Mika Straka Description: Implementation of the Bipartite Configuration Model (BiCM) for binary undirected bipartite networks [Saracco2015]_. Given the biadjacency matrix of a bipartite graph in the form of a binary array as input, the module allows the user to calculate the biadjacency matrix of the ensemble average graph :math:`<G>^*` of the BiCM null model. The matrix entries correspond to the link probabilities :math:`<G>^*_{rc} = p_{rc}` between nodes of the two distinct bipartite node sets. Subsequently, one can calculate the p-values of the node similarities for nodes in the same bipartite layer [Saracco2017]_. Usage: Be ``mat`` a two-dimensional binary NumPy array. The nodes of the two bipartite layers are ordered along the rows and columns, respectively. In the algorithm, the two layers are identified by the boolean values ``True`` for the **row-nodes** and ``False`` for the **column-nodes**. Import the module and initialize the Bipartite Configuration Model:: >>> from src.bicm import BiCM >>> cm = BiCM(bin_mat=mat) To create the biadjacency matrix of the BiCM, use:: >>> cm.make_bicm() .. note:: Note that ``make_bicm`` outputs a *status message* in the console, which informs the user whether the underlying numerical solver has converged to a solution. The function is based on the ``scipy.optimize.root`` routine of the `SciPy package <http://scipy.org>`_ to solve a log-likelihood maximization problem and uses thus the same arguments (except for *fun* and *args*, which are specified in our problem). This means that the user has full control over the selection of a solver, the initial conditions, tolerance, etc. As a matter of fact, it may happen that the default function call ``make_bicm()`` results in an unsuccessful solver, which requires adjusting the function arguments. In this case, please refer the description of the functions :func:`BiCM.make_bicm` and :func:`BiCM.solve_equations`, and the `scipy.optimize.root documentation <https://docs.scipy.org/doc/scipy-0.19.0/reference/generated/ scipy.optimize.root.html>`_. The biadjacency matrix of the BiCM null model can be saved in *<filename>*:: >>> cm.save_biadjacency(filename=<filename>, delim='\t') By default, the file is saved in a human-readable ``.csv`` format with tab delimiters, which can be changed using the keyword ``delim``. The information can also be saved as a binary NumPy file ``.npy`` by using:: >>> cm.save_biadjacency(filename=<filename>, binary=True) If the file is not binary, it should end with ``.csv``. If it is binary instead, NumPy automatically attaches the ending ``.npy``. In order to analyze the similarity of the **row-nodes** and to save the p-values of the corresponding :math:`\\Lambda`-motifs (i.e. of the number of shared neighbors [Saracco2017]_), use:: >>> cm.lambda_motifs(True, filename=<filename>) For the **column-nodes**, use:: >>> cm.lambda_motifs(False, filename=<filename>) By default, the resulting p-values are saved as binary NumPy file to reduce the required disk space, and the format suffix ``.npy`` is appended. If the file should be saved in a human-readable ``.csv`` format, use:: >>> cm.lambda_motifs(True, filename=<filename>, delim='\\t', \ binary=False) or analogously:: >>> cm.lambda_motifs(False, filename=<filename>, delim='\\t', \ binary=False) .. note:: The p-values are saved as a one-dimensional array with index :math:`k \\in \\left[0, \\ldots, \\binom{N}{2} - 1\\right]` for a bipartite layer of :math:`N` nodes. The indices ``(i, j)`` of the nodes corresponding to entry ``k`` in the array can be reconstructed using the method :func:`BiCM.flat2_triumat_idx`. The number of nodes ``N`` can be recovered from the length of the array with :func:`BiCM.flat2_triumat_dim`. Subsequently, the p-values can be used to perform a multiple hypotheses testing of the node similarities and to obtain statistically validated monopartite projections [Saracco2017]_. The p-values are calculated in parallel by default, see :ref:`parallel` for details. .. note:: Since the calculation of the p-values is computationally demanding, the ``bicm`` module uses the Python `multiprocessing <https://docs.python.org/2/library/multiprocessing.html>`_ package by default for this purpose. The number of parallel processes depends on the number of CPUs of the work station (see variable ``num_procs`` in the method :func:`BiCM.get_pvalues_q`). If the calculation should **not** be performed in parallel, use:: >>> cm.lambda_motifs(<bool>, parallel=False) instead of:: >>> cm.lambda_motifs(<bool>) References: .. [Saracco2015] `F. Saracco, R. Di Clemente, A. Gabrielli, T. Squartini, Randomizing bipartite networks: the case of the World Trade Web, Scientific Reports 5, 10595 (2015) <http://www.nature.com/articles/srep10595>`_ .. [Saracco2017] `F. Saracco, M. J. Straka, R. Di Clemente, A. Gabrielli, G. Caldarelli, and T. Squartini, Inferring monopartite projections of bipartite networks: an entropy-based approach, New J. Phys. 19, 053022 (2017) <http://stacks.iop.org/1367-2630/19/i=5/a=053022>`_ """ import ctypes import multiprocessing import scipy.optimize as opt import numpy as np from poibin.poibin import PoiBin class BiCM(object): """Bipartite Configuration Model for undirected binary bipartite networks. This class implements the Bipartite Configuration Model (BiCM), which can be used as a null model for the analysis of undirected and binary bipartite networks. The class provides methods for calculating the biadjacency matrix of the null model and for quantifying node similarities in terms of p-values. """ def __init__(self, bin_mat): """Initialize the parameters of the BiCM. :param bin_mat: binary input matrix describing the biadjacency matrix of a bipartite graph with the nodes of one layer along the rows and the nodes of the other layer along the columns. :type bin_mat: numpy.array """ self.bin_mat = np.array(bin_mat, dtype=np.int64) self.check_input_matrix_is_binary() [self.num_rows, self.num_columns] = self.bin_mat.shape self.dseq = self.set_degree_seq() self.dim = self.dseq.size self.sol = None # solution of the equation system self.adj_matrix = None # biadjacency matrix of the null model self.input_queue = None # queue for parallel processing self.output_queue = None # queue for parallel processing def check_input_matrix_is_binary(self): """Check that the input matrix is binary, i.e. entries are 0 or 1. :raise AssertionError: raise an error if the input matrix is not binary """ assert np.all(np.logical_or(self.bin_mat == 0, self.bin_mat == 1)), \ "Input matrix is not binary." def set_degree_seq(self): """Return the node degree sequence of the input matrix. :returns: node degree sequence [degrees row-nodes, degrees column-nodes] :rtype: numpy.array :raise AssertionError: raise an error if the length of the returned degree sequence does not correspond to the total number of nodes """ dseq = np.empty(self.num_rows + self.num_columns) dseq[self.num_rows:] = np.squeeze(np.sum(self.bin_mat, axis=0)) dseq[:self.num_rows] = np.squeeze(np.sum(self.bin_mat, axis=1)) assert dseq.size == (self.num_rows + self.num_columns) return dseq def make_bicm(self, x0=None, method='hybr', jac=None, tol=None, callback=None, options=None): """Create the biadjacency matrix of the BiCM null model. Solve the log-likelihood maximization problem to obtain the BiCM null model which respects constraints on the degree sequence of the input matrix. The problem is solved using ``scipy``'s root function with the solver defined by ``method``. The status of the solver after running ``scipy.root``and the difference between the network and BiCM degrees are printed in the console. The default solver is the modified Powell method ``hybr``. Least-squares can be chosen with ``method='lm'`` for the Levenberg-Marquardt approach. Depending on the solver, keyword arguments ``kwargs`` can be passed to the solver. Please refer to the `scipy.optimize.root documentation <https://docs.scipy.org/doc/scipy-0.19.0/reference/generated/ scipy.optimize.root.html>`_ for detailed descriptions. .. note:: It can happen that the solver ``method`` used by ``scipy.root`` does not converge to a solution. In this case, please try another ``method`` or different initial conditions and refer to the `scipy.optimize.root documentation <https://docs.scipy.org/doc/scipy-0.19.0/reference/generated/ scipy.optimize.root.html>`_. :param x0: initial guesses for the solutions. The first entries are the initial guesses for the row-nodes, followed by the initial guesses for the column-nodes. :type x0: 1d numpy.array, optional :param method: type of solver, default is ‘hybr’. For other solvers, see the `scipy.optimize.root documentation <https://docs.scipy.org/doc/ scipy-0.19.0/reference/generated/scipy.optimize.root.html>`_. :type method: str, optional :param jac: Jacobian of the system :type jac: bool or callable, optional :param tol: tolerance for termination. For detailed control, use solver-specific options. :type tol: float, optional :param callback: optional callback function to be called at every iteration as ``callback(self.equations, x)``, see ``scipy.root`` documentation :type callback: function, optional :param options: a dictionary of solver options, e.g. ``xtol`` or ``maxiter``, see scipy.root documentation :type options: dict, optional :param kwargs: solver-specific options, please refer to the SciPy documentation :raise ValueError: raise an error if not enough initial conditions are provided """ self.sol = self.solve_equations(x0=x0, method=method, jac=jac, tol=tol, callback=callback, options=options) # create BiCM biadjacency matrix: self.adj_matrix = self.get_biadjacency_matrix(self.sol.x) # self.print_max_degree_differences() # assert self.test_average_degrees(eps=1e-2) # ------------------------------------------------------------------------------ # Solve coupled nonlinear equations and get BiCM biadjacency matrix # ------------------------------------------------------------------------------ def solve_equations(self, x0=None, method='hybr', jac=None, tol=None, callback=None, options=None): """Solve the system of equations of the maximum log-likelihood problem. The system of equations is solved using ``scipy``'s root function with the solver defined by ``method``. The solutions correspond to the Lagrange multipliers .. math:: x_i = \exp(-\\theta_i). Depending on the solver, keyword arguments ``kwargs`` can be passed to the solver. Please refer to the `scipy.optimize.root documentation <https://docs.scipy.org/doc/scipy-0.19.0/reference/generated/ scipy.optimize.root.html>`_ for detailed descriptions. The default solver is the modified Powell method ``hybr``. Least-squares can be chosen with ``method='lm'`` for the Levenberg-Marquardt approach. .. note:: It can happen that the solver ``method`` used by ``scipy.root`` does not converge to a solution. In this case, please try another ``method`` or different initial conditions and refer to the `scipy.optimize.root documentation <https://docs.scipy.org/doc/scipy-0.19.0/reference/generated/ scipy.optimize.root.html>`_. :param x0: initial guesses for the solutions. The first entries are the initial guesses for the row-nodes, followed by the initial guesses for the column-nodes. :type x0: 1d numpy.array, optional :param method: type of solver, default is ‘hybr’. For other solvers, see the `scipy.optimize.root documentation <https://docs.scipy.org/doc/ scipy-0.19.0/reference/generated/scipy.optimize.root.html>`_. :type method: str, optional :param jac: Jacobian of the system :type jac: bool or callable, optional :param tol: tolerance for termination. For detailed control, use solver-specific options. :type tol: float, optional :param callback: optional callback function to be called at every iteration as ``callback(self.equations, x)``, see ``scipy.root`` documentation :type callback: function, optional :param options: a dictionary of solver options, e.g. ``xtol`` or ``maxiter``, see scipy.root documentation :type options: dict, optional :param kwargs: solver-specific options, please refer to the SciPy documentation :returns: solution of the equation system :rtype: scipy.optimize.OptimizeResult :raise ValueError: raise an error if not enough initial conditions are provided """ # use Jacobian if the hybr solver is chosen if method is 'hybr': jac = self.jacobian # set initial conditions if x0 is None: x0 = self.dseq / np.sqrt(np.sum(self.dseq)) else: if not len(x0) == self.dim: msg = "One initial condition for each parameter is required." raise ValueError(msg) # solve equation system sol = opt.root(fun=self.equations, x0=x0, method=method, jac=jac, tol=tol, options=options, callback=callback) # check whether system has been solved successfully print "Solver successful:", sol.success print sol.message if not sol.success: errmsg = "Try different initial conditions and/or a" + \ "different solver, see documentation at " + \ "https://docs.scipy.org/doc/scipy-0.19.0/reference/" + \ "generated/scipy.optimize.root.html" print errmsg return sol def equations(self, xx): """Return the equations of the log-likelihood maximization problem. Note that the equations for the row-nodes depend only on the column-nodes and vice versa, see [Saracco2015]_. :param xx: Lagrange multipliers which have to be solved :type xx: numpy.array :returns: equations to be solved (:math:`f(x) = 0`) :rtype: numpy.array """ eq = -self.dseq for i in xrange(0, self.num_rows): for j in xrange(self.num_rows, self.dim): dum = xx[i] * xx[j] / (1. + xx[i] * xx[j]) eq[i] += dum eq[j] += dum return eq def jacobian(self, xx): """Return a NumPy array with the Jacobian of the equation system. :param xx: Lagrange multipliers which have to be solved :type xx: numpy.array :returns: Jacobian :rtype: numpy.array """ jac = np.zeros((self.dim, self.dim)) for i in xrange(0, self.num_rows): # df_c / df_c' = 0 for all c' != c for j in xrange(self.num_rows, self.dim): # df_c / dx_c != 0 xxi = xx[i] / (1.0 + xx[i] * xx[j]) ** 2 xxj = xx[j] / (1.0 + xx[i] * xx[j]) ** 2 jac[i, i] += xxj jac[i, j] = xxi jac[j, i] = xxj jac[j, j] += xxi return jac def get_biadjacency_matrix(self, xx): """ Calculate the biadjacency matrix of the null model. The biadjacency matrix describes the BiCM null model, i.e. the optimal average graph :math:`<G>^*` with the average link probabilities :math:`<G>^*_{rc} = p_{rc}` , :math:`p_{rc} = \\frac{x_r \\cdot x_c}{1 + x_r\\cdot x_c}.` :math:`x` are the solutions of the equation system which has to be solved for the null model. Note that :math:`r` and :math:`c` are taken from opposite bipartite node sets, thus :math:`r \\neq c`. :param xx: solutions of the equation system (Lagrange multipliers) :type xx: numpy.array :returns: biadjacency matrix of the null model :rtype: numpy.array :raises ValueError: raise an error if :math:`p_{rc} < 0` or :math:`p_{rc} > 1` for any :math:`r, c` """ mat = np.empty((self.num_rows, self.num_columns)) xp = xx[range(self.num_rows, self.dim)] for i in xrange(self.num_rows): mat[i, ] = xx[i] * xp / (1 + xx[i] * xp) # account for machine precision: mat += np.finfo(np.float).eps if np.any(mat < 0): errmsg = 'Error in get_adjacency_matrix: probabilities < 0 in ' \ + str(np.where(mat < 0)) raise ValueError(errmsg) elif np.any(mat > (1. + np.finfo(np.float).eps)): errmsg = 'Error in get_adjacency_matrix: probabilities > 1 in' \ + str(np.where(mat > 1)) raise ValueError(errmsg) assert mat.shape == self.bin_mat.shape, \ "Biadjacency matrix has wrong dimensions." return mat # ------------------------------------------------------------------------------ # Test correctness of results: # ------------------------------------------------------------------------------ def print_max_degree_differences(self): """Print the maximal differences between input network and BiCM degrees. Check that the degree sequence of the solved BiCM null model graph corresponds to the degree sequence of the input graph. """ ave_deg_columns =np.sum(self.adj_matrix, axis=0) ave_deg_rows = np.sum(self.adj_matrix, axis=1) print "Maximal degree differences between data and BiCM:" print "Columns:", np.abs(np.max( self.dseq[self.num_rows:] - ave_deg_columns)) print "Rows:", np.abs(np.max( self.dseq[:self.num_rows] - ave_deg_rows)) def test_average_degrees(self, eps=1e-2): """Test the constraints on the node degrees. Check that the degree sequence of the solved BiCM null model graph corresponds to the degree sequence of the input graph. :param eps: maximum difference between degrees of the real network and the BiCM :type eps: float """ ave_deg_columns = np.squeeze(np.sum(self.adj_matrix, axis=0)) ave_deg_rows = np.squeeze(np.sum(self.adj_matrix, axis=1)) c_derr = np.where(np.logical_or( # average degree too small: ave_deg_rows + eps < self.dseq[:self.num_rows], # average degree too large: ave_deg_rows - eps > self.dseq[:self.num_rows])) p_derr = np.where(np.logical_or( ave_deg_columns + eps < self.dseq[self.num_rows:], ave_deg_columns - eps > self.dseq[self.num_rows:])) # Check row-nodes degrees: if not np.array_equiv(c_derr, np.array([])): print '...inaccurate row-nodes degrees:' for i in c_derr[0]: print 'Row-node ', i, ':', print 'input:', self.dseq[i], 'average:', ave_deg_rows[i] return False # Check column-nodes degrees: if not np.array_equiv(p_derr, np.array([])): print '...inaccurate column-nodes degrees:' for i in c_derr[0]: print 'Column-node ', i, ':', print 'input:', self.dseq[i + self.num_rows], \ 'average:', ave_deg_columns[i] return False return True # ------------------------------------------------------------------------------ # Lambda motifs # ------------------------------------------------------------------------------ def lambda_motifs(self, bip_set, parallel=True, filename=None, delim='\t', binary=True, num_chunks=4): """Calculate and save the p-values of the :math:`\\Lambda`-motifs. For each node couple in the bipartite layer specified by ``bip_set``, calculate the p-values of the corresponding :math:`\\Lambda`-motifs according to the link probabilities in the biadjacency matrix of the BiCM null model. The results can be saved either as a binary ``.npy`` or a human-readable ``.csv`` file, depending on ``binary``. .. note:: * The total number of p-values that are calculated is split into ``num_chunks`` chunks, which are processed sequentially in order to avoid memory allocation errors. Note that a larger value of ``num_chunks`` will lead to less memory occupation, but comes at the cost of slower processing speed. * The output consists of a one-dimensional array of p-values. If the bipartite layer ``bip_set`` contains ``n`` nodes, this means that the array will contain :math:`\\binom{n}{2}` entries. The indices ``(i, j)`` of the nodes corresponding to entry ``k`` in the array can be reconstructed using the method :func:`BiCM.flat2_triumat_idx`. The number of nodes ``n`` can be recovered from the length of the array with :func:`BiCM.flat2_triumat_dim` * If ``binary == False``, the ``filename`` should end with ``.csv``. If ``binary == True``, it will be saved in binary NumPy ``.npy`` format and the suffix ``.npy`` will be appended automatically. By default, the file is saved in binary format. :param bip_set: select row-nodes (``True``) or column-nodes (``False``) :type bip_set: bool :param parallel: select whether the calculation of the p-values should be run in parallel (``True``) or not (``False``) :type parallel: bool :param filename: name of the output file :type filename: str :param delim: delimiter between entries in the ``.csv``file, default is ``\\t`` :type delim: str :param binary: if ``True``, the file will be saved in the binary NumPy format ``.npy``, otherwise as ``.csv`` :type binary: bool :param num_chunks: number of chunks of p-value calculations that are performed sequentially :type num_chunks: int :raise ValueError: raise an error if the parameter ``bip_set`` is neither ``True`` nor ``False`` """ if (type(bip_set) == bool) and bip_set: biad_mat = self.adj_matrix bin_mat = self.bin_mat elif (type(bip_set) == bool) and not bip_set: biad_mat = np.transpose(self.adj_matrix) bin_mat = np.transpose(self.bin_mat) else: errmsg = "'" + str(bip_set) + "' " + 'not supported.' raise NameError(errmsg) n = self.get_triup_dim(bip_set) pval = np.ones(shape=(n, ), dtype='float') * (-0.1) # handle layers of dimension 2 separately if n == 1: nlam = np.dot(bin_mat[0, :], bin_mat[1, :].T) plam = biad_mat[0, :] * biad_mat[1, :] pb = PoiBin(plam) pval[0] = pb.pval(nlam) else: # if the dimension of the network is too large, split the # calculations # of the p-values in ``m`` intervals to avoid memory # allocation errors if n > 100: kk = self.split_range(n, m=num_chunks) else: kk = [0] # calculate p-values for index intervals for i in range(len(kk) - 1): k1 = kk[i] k2 = kk[i + 1] nlam = self.get_lambda_motif_block(bin_mat, k1, k2) plam = self.get_plambda_block(biad_mat, k1, k2) pv = self.get_pvalues_q(plam, nlam, k1, k2) pval[k1:k2] = pv # last interval k1 = kk[len(kk) - 1] k2 = n - 1 nlam = self.get_lambda_motif_block(bin_mat, k1, k2) plam = self.get_plambda_block(biad_mat, k1, k2) # for the last entry we have to INCLUDE k2, thus k2 + 1 pv = self.get_pvalues_q(plam, nlam, k1, k2 + 1) pval[k1:] = pv # check that all p-values have been calculated # assert np.all(pval >= 0) and np.all(pval <= 1) if filename is None: fname = 'p_values_' + str(bip_set) if not binary: fname += '.csv' else: fname = filename # account for machine precision: pval += np.finfo(np.float).eps self.save_array(pval, filename=fname, delim=delim, binary=binary) def get_lambda_motif_block(self, mm, k1, k2): """Return a subset of :math:`\\Lambda`-motifs as observed in ``mm``. Given the binary input matrix ``mm``, count the number of :math:`\\Lambda`-motifs for all the node couples specified by the interval :math:`\\left[k_1, k_2\\right[`. .. note:: * The :math:`\\Lambda`-motifs are counted between the **row-nodes** of the input matrix ``mm``. * If :math:`k_2 \equiv \\binom{mm.shape[0]}{2}`, the interval becomes :math:`\\left[k_1, k_2\\right]`. :param mm: binary matrix :type mm: numpy.array :param k1: lower interval limit :type k1: int :param k2: upper interval limit :type k2: int :returns: array of observed :math:`\\Lambda`-motifs :rtype: numpy.array """ ndim = mm.shape[0] # if the upper limit is the largest possible index, i.e. corresponds to # the node couple (ndim - 2, ndim - 1), where node indices start from 0, # include the result if k2 == (ndim * (ndim - 1) / 2 - 1): flag = 1 else: flag = 0 aux = np.ones(shape=(k2 - k1 + flag, )) * (-1) # -1 as a test [i1, j1] = self.flat2triumat_idx(k1, ndim) [i2, j2] = self.flat2triumat_idx(k2, ndim) # if limits have the same row index if i1 == i2: aux[:k2 - k1] = np.dot(mm[i1, :], mm[j1:j2, :].T) # if limits have different row indices else: k = 0 # get values for lower limit row fi = np.dot(mm[i1, :], mm[j1:, :].T) aux[:len(fi)] = fi k += len(fi) # get values for intermediate rows for i in range(i1 + 1, i2): mid = np.dot(mm[i, :], mm[i + 1:, :].T) aux[k : k + len(mid)] = mid k += len(mid) # get values for upper limit row if flag == 1: aux[-1] = np.dot(mm[ndim - 2, :], mm[ndim - 1, :].T) else: la = np.dot(mm[i2, :], mm[i2 + 1 : j2, :].T) aux[k:] = la return aux def get_plambda_block(self, biad_mat, k1, k2): """Return a subset of the :math:`\\Lambda` probability matrix. Given the biadjacency matrix ``biad_mat`` with :math:`\\mathbf{M}_{rc} = p_{rc}`, which describes the probabilities of row-node ``r`` and column-node ``c`` being linked, the method returns the matrix :math:`P(\\Lambda)_{ij} = \\left(M_{i\\alpha_1} \\cdot M_{j\\alpha_1}, M_{i\\alpha_2} \\cdot M_{j\\alpha_2}, \\ldots\\right),` for all the node couples in the interval :math:`\\left[k_1, k_2\\right[`. :math:`(i, j)` are two **row-nodes** of ``biad_mat`` and :math:`\\alpha_k` runs over the nodes in the opposite layer. .. note:: * The probabilities are calculated between the **row-nodes** of the input matrix ``biad_mat``. * If :math:`k_2 \equiv \\binom{biad\\_mat.shape[0]}{2}`, the interval becomes :math:`\\left[k1, k2\\right]`. :param biad_mat: biadjacency matrix :type biad_mat: numpy.array :param k1: lower interval limit :type k1: int :param k2: upper interval limit :type k2: int :returns: :math:`\\Lambda`-motif probability matrix :rtype: numpy.array """ [ndim1, ndim2] = biad_mat.shape # if the upper limit is the largest possible index, i.e. corresponds to # the node couple (ndim - 2, ndim - 1), where node indices start from 0, # include the result if k2 == (ndim1 * (ndim1 - 1) / 2 - 1): flag = 1 else: flag = 0 paux = np.ones(shape=(k2 - k1 + flag, ndim2), dtype='float') * (-0.1) [i1, j1] = self.flat2triumat_idx(k1, ndim1) [i2, j2] = self.flat2triumat_idx(k2, ndim1) # if limits have the same row index if i1 == i2: paux[:k2 - k1, :] = biad_mat[i1, ] * biad_mat[j1:j2, :] # if limits have different indices else: k = 0 # get values for lower limit row fi = biad_mat[i1, :] * biad_mat[j1:, :] paux[:len(fi), :] = fi k += len(fi) # get values for intermediate rows for i in range(i1 + 1, i2): mid = biad_mat[i, :] * biad_mat[i + 1:, :] paux[k : k + len(mid), :] = mid k += len(mid) # get values for upper limit row if flag == 1: paux[-1, :] = biad_mat[ndim1 - 2, :] * biad_mat[ndim1 - 1, :] else: la = biad_mat[i2, :] * biad_mat[i2 + 1:j2, :] paux[k:, :] = la return paux def get_pvalues_q(self, plam_mat, nlam_mat, k1, k2, parallel=True): """Calculate the p-values of the observed :math:`\\Lambda`-motifs. For each number of :math:`\\Lambda`-motifs in ``nlam_mat`` for the node interval :math:`\\left[k1, k2\\right[`, construct the Poisson Binomial distribution using the corresponding probabilities in ``plam_mat`` and calculate the p-value. :param plam_mat: array containing the list of probabilities for the single observations of :math:`\\Lambda`-motifs :type plam_mat: numpy.array (square matrix) :param nlam_mat: array containing the observations of :math:`\\Lambda`-motifs :type nlam_mat: numpy.array (square matrix) :param k1: lower interval limit :type k1: int :param k2: upper interval limit :type k2: int :param parallel: if ``True``, the calculation is executed in parallel; if ``False``, only one process is started :type parallel: bool """ n = len(nlam_mat) # the array must be sharable to be accessible by all processes shared_array_base = multiprocessing.Array(ctypes.c_double, n) pval_mat = np.frombuffer(shared_array_base.get_obj()) # number of processes running in parallel has to be tested. # good guess is multiprocessing.cpu_count() +- 1 if parallel: num_procs = multiprocessing.cpu_count() - 1 elif not parallel: num_procs = 1 else: num_procs = 1 self.input_queue = multiprocessing.Queue() self.output_queue = multiprocessing.Queue() p_inqueue = multiprocessing.Process(target=self.add2inqueue, args=(num_procs, plam_mat, nlam_mat, k1, k2)) p_outqueue = multiprocessing.Process(target=self.outqueue2pval_mat, args=(num_procs, pval_mat)) ps = [multiprocessing.Process(target=self.pval_process_worker, args=()) for i in range(num_procs)] # start queues p_inqueue.start() p_outqueue.start() # start processes for p in ps: p.start() # each process has an id, p.pid p_inqueue.join() for p in ps: p.join() p_outqueue.join() return pval_mat def add2inqueue(self, nprocs, plam_mat, nlam_mat, k1, k2): """Add elements to the in-queue to calculate the p-values. :param nprocs: number of processes running in parallel :type nprocs: int :param plam_mat: array containing the list of probabilities for the single observations of :math:`\\Lambda`-motifs :type plam_mat: numpy.array (square matrix) :param nlam_mat: array containing the observations of :math:`\\Lambda`-motifs :type nlam_mat: numpy.array (square matrix) :param k1: lower interval limit :type k1: int :param k2: upper interval limit :type k2: int """ n = len(plam_mat) # add tuples of matrix elements and indices to the input queue for k in xrange(k1, k2): self.input_queue.put((k - k1, plam_mat[k - k1, :], nlam_mat[k - k1])) # add as many poison pills "STOP" to the queue as there are workers for i in xrange(nprocs): self.input_queue.put("STOP") def pval_process_worker(self): """Calculate p-values and add them to the out-queue.""" # take elements from the queue as long as the element is not "STOP" for tupl in iter(self.input_queue.get, "STOP"): pb = PoiBin(tupl[1]) pv = pb.pval(int(tupl[2])) # add the result to the output queue self.output_queue.put((tupl[0], pv)) # once all the elements in the input queue have been dealt with, add a # "STOP" to the output queue self.output_queue.put("STOP") def outqueue2pval_mat(self, nprocs, pvalmat): """Put the results from the out-queue into the p-value array.""" # stop the work after having met nprocs times "STOP" for work in xrange(nprocs): for val in iter(self.output_queue.get, "STOP"): k = val[0] pvalmat[k] = val[1] def get_triup_dim(self, bip_set): """Return the number of possible node couples in ``bip_set``. :param bip_set: selects row-nodes (``True``) or column-nodes (``False``) :type bip_set: bool :returns: return the number of node couple combinations corresponding to the layer ``bip_set`` :rtype: int :raise ValueError: raise an error if the parameter ``bip_set`` is neither ``True`` nor ``False`` """ if bip_set: return self.triumat2flat_dim(self.num_rows) elif not bip_set: return self.triumat2flat_dim(self.num_columns) else: errmsg = "'" + str(bip_set) + "' " + 'not supported.' raise NameError(errmsg) def split_range(self, n, m=4): """Split the interval :math:`\\left[0,\ldots, n\\right]` in ``m`` parts. :param n: upper limit of the range :type n: int :param m: number of part in which range should be split :type n: int :returns: delimiter indices for the ``m`` parts :rtype: list """ return [i * n / m for i in range(m)] # ------------------------------------------------------------------------------ # Auxiliary methods # ------------------------------------------------------------------------------ @staticmethod def triumat2flat_idx(i, j, n): """Convert an matrix index couple to a flattened array index. Given a square matrix of dimension ``n`` and the index couple ``(i, j)`` *of the upper triangular part* of the matrix, return the index which the matrix element would have in a flattened array. .. note:: * :math:`i \\in [0, ..., n - 1]` * :math:`j \\in [i + 1, ..., n - 1]` * returned index :math:`\\in [0,\\, n (n - 1) / 2 - 1]` :param i: row index :type i: int :param j: column index :type j: int :param n: dimension of the square matrix :type n: int :returns: flattened array index :rtype: int """ return int((i + 1) * n - (i + 2) * (i + 1) / 2. - (n - (j + 1)) - 1) @staticmethod def triumat2flat_dim(n): """Return the size of the triangular part of a ``n x n`` matrix. :param n: the dimension of the square matrix :type n: int :returns: number of elements in the upper triangular part of the matrix (excluding the diagonal) :rtype: int """ return n * (n - 1) / 2 @staticmethod def flat2triumat_dim(k): """Return the dimension of the matrix hosting ``k`` triangular elements. :param k: the number of elements in the upper triangular part of the corresponding square matrix, excluding the diagonal :type k: int :returns: dimension of the corresponding square matrix :rtype: int """ return int(0.5 + np.sqrt(0.25 + 2 * k)) @staticmethod def flat2triumat_idx(k, n): """Convert an array index into the index couple of a triangular matrix. ``k`` is the index of an array of length :math:`\\binom{n}{2}{2}`, which contains the elements of an upper triangular matrix of dimension ``n`` excluding the diagonal. The function returns the index couple :math:`(i, j)` that corresponds to the entry ``k`` of the flat array. .. note:: * :math:`k \\in \left[0,\\ldots, \\binom{n}{2} - 1\\right]` * returned indices: * :math:`i \\in [0,\\ldots, n - 1]` * :math:`j \\in [i + 1,\\ldots, n - 1]` :param k: flattened array index :type k: int :param n: dimension of the square matrix :type n: int :returns: matrix index tuple (row, column) :rtype: tuple """ # row index of array index k in the the upper triangular part of the # square matrix r = n - 2 - int(0.5 * np.sqrt(-8 * k + 4 * n * (n - 1) - 7) - 0.5) # column index of array index k in the the upper triangular part of the # square matrix c = k + 1 + r * (3 - 2 * n + r) / 2 return r, c def save_biadjacency(self, filename, delim='\t', binary=False): """Save the biadjacendy matrix of the BiCM null model. The matrix can either be saved as a binary NumPy ``.npy`` file or as a human-readable ``.csv`` file. .. note:: * The relative path has to be provided in the filename, e.g. *../data/pvalue_matrix.csv*. * If ``binary==True``, NumPy automatically appends the format ending ``.npy`` to the file. :param filename: name of the output file :type filename: str :param delim: delimiter between values in file :type delim: str :param binary: if ``True``, save as binary ``.npy``, otherwise as a ``.csv`` file :type binary: bool """ self.save_array(self.adj_matrix, filename, delim, binary) @staticmethod def save_array(mat, filename, delim='\t', binary=False): """Save the array ``mat`` in the file ``filename``. The array can either be saved as a binary NumPy ``.npy`` file or as a human-readable ``.npy`` file. .. note:: * The relative path has to be provided in the filename, e.g. *../data/pvalue_matrix.csv*. * If ``binary==True``, NumPy automatically appends the format ending ``.npy`` to the file. :param mat: array :type mat: numpy.array :param filename: name of the output file :type filename: str :param delim: delimiter between values in file :type delim: str :param binary: if ``True``, save as binary ``.npy``, otherwise as a ``.csv`` file :type binary: bool """ if binary: np.save(filename, mat) else: np.savetxt(filename, mat, delimiter=delim) ################################################################################ # Main ################################################################################ if __name__ == "__main__": pass
Remove 1 tablespoon yogurt from yogurt container. Stir in 1 tablespoon of the diced peaches and 2 teaspoons of the pistachio nuts. Top with remaining peaches and pistachios nuts.
import os import sys from threading import Thread try: from queue import Queue # python 3 from urllib.request import urlretrieve except: from Queue import Queue # python 2 from urllib import urlretrieve home = os.path.expanduser("~") maven_home = os.path.join(home, '.m2/repository') print('MAVEN HOME: %s' %maven_home) def listfiles(path, filter=None): if os.path.isfile(path) or not os.path.isdir(path): return [] files = [os.path.join(path, f) for f in os.listdir(path)] if filter: files = [f for f in files if filter(f)] return files def enqueue(dir_queue, files): for f in files: dir_queue.put(f) def isEnd(files): for f in files: if os.path.isdir(f): return False return True def check(path, files, rmdir=False): if os.path.isfile(path): return if rmdir: jars = listfiles(path, filter=lambda f: f.endswith('.jar')) if len(jars) == 0: print('-------------------%s' %path) for f in files: print(f) os.remove(f) os.rmdir(path) else: caches = listfiles(path, filter=lambda f: f.endswith('.lastUpdated')) for f in caches: print(f) os.remove(f) def clean(num_worker_threads=5, rmdir=False): dir_queue = Queue() files = listfiles(maven_home) enqueue(dir_queue, files) def worker(): while not dir_queue.empty(): path = dir_queue.get() files = listfiles(path) if isEnd(files): check(path, files, rmdir) else: enqueue(dir_queue, files) dir_queue.task_done() for i in range(num_worker_threads): # start threads worker_thread = Thread(target=worker) worker_thread.daemon = True worker_thread.start() dir_queue.join() # block until all tasks are done print print('clean done...') def download(url, path): urlretrieve(url, path) def test_listfiles(): print(listfiles('maven_cleaner.py')) print(listfiles('.')) print([f for f in os.listdir() if f.endswith('.txt')]) def test_main(): test_listfiles() def main(): if len(sys.argv) == 2 and sys.argv[1] == 'T': clean(rmdir=True) else: clean() if __name__ == '__main__': # test_main() main()
Ruskin studied engraving almost as much as drawing. Partly he admired the precision and workmanship required in creating the images, but also he saw them as an important stepping-stone in learning how to draw and how to see clearly. He wrote that the best engravings were kept simple; that the viewer could learn through them what was necessary to include in a drawing and what could be left out. He also encouraged people to look closely at them: to see the detail and learn about the patience and skill required in making great artworks. The small scale of many works in this display shows not just the dedication of the engraver, but also the commitment of Ruskin in looking at the tiniest of details. His comments were designed to help viewers find inspiration in workmanship and design. The works in this section are inspired by Ruskin’s series of lectures given at Oxford University in 1872 called Ariadne Florentina. He concentrated on Botticelli, Holbein and Thomas Bewick, and tried to show that their art was a comment or sign of religious and social reform.
""" Django settings for blog_cms project. Generated by 'django-admin startproject' using Django 1.8.5. For more information on this file, see https://docs.djangoproject.com/en/1.8/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.8/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os from django.utils.translation import ugettext_lazy as _ BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '(v4)3fy!x-u=%rrqdd_n$mo#pf-gx(!p_)f0^n(aez-r-&mfs@' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'disqus', 'djangobower', 'django_gravatar', 'thumbnailfield', 'blog', ) # Bower components definition BOWER_INSTALLED_APPS = ( 'uikit#2.24.3', 'jquery#2.2.0', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.locale.LocaleMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', ) ROOT_URLCONF = 'blog_cms.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ os.path.join(BASE_DIR, os.path.dirname(__file__), "templates"), ], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] TEMPLATE_CONTEXT_PROCESSORS = ( "django.contrib.auth.context_processors.auth", "django.template.context_processors.debug", "django.template.context_processors.i18n", "django.template.context_processors.media", "django.template.context_processors.static", "django.template.context_processors.tz", "django.contrib.messages.context_processors.messages" ) WSGI_APPLICATION = 'blog_cms.wsgi.application' # Database # https://docs.djangoproject.com/en/1.8/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ LANGUAGES = ( ('en', _('English')), ('pl', _('Polish')), ) LOCALE_PATHS = ( os.path.join(os.path.dirname(os.path.abspath(__file__)), 'locale'), os.path.join(BASE_DIR, 'articles', 'locale'), ) LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ STATICFILES_FINDERS = ( "django.contrib.staticfiles.finders.FileSystemFinder", "django.contrib.staticfiles.finders.AppDirectoriesFinder", "djangobower.finders.BowerFinder" # Needed for django-bower ) BOWER_COMPONENTS_ROOT = os.path.join(os.path.dirname(BASE_DIR), "components") # Path of bower components STATIC_URL = '/static/' STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_root") STATICFILES_DIRS = ( os.path.join(BASE_DIR, os.path.dirname(__file__), "static", "static_files"), ) MEDIA_URL = '/media/' MEDIA_ROOT = os.path.join(BASE_DIR, os.path.dirname(__file__), "static", "media") # Disqus settings DISQUS_API_KEY = 'FOOBARFOOBARFOOBARFOOBARFOOBARF' DISQUS_WEBSITE_SHORTNAME = 'foobar'
Fish Selection in Gravy is rated 1.0 out of 5 by 2. Rated 1 out of 5 by Keys from Lack of Quality I bought this about a month ago, & unfortunately i think it will stay there. Complete waste of money in my opinion, & I think my cats agree judging how much they waste, & I have to throw away? I have attached a photo of said cats eating a n other popular cat food , which they would normally leave, over the Sheba.. not worth the money it's printed on in my opinion. Rated 1 out of 5 by Tittle mouse from Do I really have to eat THAT Bought two packs of the new "Craft Collection" this and another to try. Unfortunately, all he does is to sniff it from a distance and maybe a lick, but that's it. Waste of money, will donate the other pack to the cat's rescue.
import json import requests from django.conf import settings from pyconca2017.pycon_schedule.models import Speaker, Presentation class PapercallInterface(object): BASE_URL = 'https://www.papercall.io/api/v1' EVENT_URL = '/event' SUBMISSIONS_LIST_URL = '/submissions' SUBMISSION_GET_URL = '/submissions/{submission_id}' SUBMISSION_RATINGS_URL = '/submissions/{submission_id}/ratings' class SubmissionStates(object): ACCEPTED = 'accepted' SUBMITTED = 'submitted' REJECTED = 'rejected' WAITLIST = 'waitlist' def __init__(self): self.client = requests.Session() self.client.headers.update({'Authorization': settings.PAPERCALL_TOKEN}) def get_submissions(self, state=SubmissionStates.ACCEPTED): """ Iterator """ url = '{}{}'.format(self.BASE_URL, self.SUBMISSIONS_LIST_URL) params = { 'per_page': 100, 'page': 0, 'order': 'created_at', } if state: params['state'] = state while True: params['page'] += 1 response = self.client.get(url, params=params) response_pagination = json.loads(response.headers.get('x-pagination')) data = response.json() for item in data: yield item if response_pagination['last_page']: break class PresentationService(object): def __init__(self): self.papercall = PapercallInterface() def sync_proposals(self, update=False): """ Sync Papercall submissions with the database. :param update: If True, all values will be updated from Papercall. :return: """ for submission in self.papercall.get_submissions(): speaker_data = self._submission_to_speaker_data(submission) talk_data = self._submission_to_presentation_data(submission) speaker = self._sync_speaker(speaker_data, update=update) talk_data['speaker'] = speaker self._sync_presentation(talk_data, update=update) def _submission_to_speaker_data(self, submission): profile = submission['profile'] return { 'full_name': profile['name'], 'bio': profile['bio'], 'twitter_username': profile['twitter'], 'company_name': profile['company'], 'url': profile['url'], 'shirt_size': profile['shirt_size'], 'email': profile['email'], 'location': profile['location'], } def _sync_speaker(self, speaker_data, update=False): if update: speaker = Speaker.objects.update_or_create(email=speaker_data['email'], defaults=speaker_data)[0] else: speaker = Speaker.objects.get_or_create(email=speaker_data.pop('email'), defaults=speaker_data)[0] return speaker def _submission_to_presentation_data(self, submission): talk = submission['talk'] return { 'papercall_id': submission['id'], 'title': talk['title'], 'description': talk['description'], 'notes': talk['notes'], 'abstract': talk['abstract'], 'audience_level': talk['audience_level'], 'presentation_format': talk['talk_format'], } def _sync_presentation(self, data, update=False): if update: presentation = Presentation.objects.update_or_create(papercall_id=data['papercall_id'], defaults=data)[0] else: presentation = Presentation.objects.get_or_create(papercall_id=data.pop('papercall_id'), defaults=data)[0] return presentation
LOVE AT FIRST SIGHT Here is a TV ad for independent jewelers looking to establish themselves as the Bridal and Diamond Superstore in your market. It’s available for $799 including a custom tag with your logo and tagline. “Hey Jimmy, how do you train my team?” This is the question I get most often about the Jewelry Store Training Video program. So I created this short explainer to show you how it works. Click HERE to Get going on training your team! All in all it doesn’t look like March went too well. It seems folks are loading up for April diamond events.
# -*- coding: utf-8 -*- # Licensed to the Apache Software Foundation (ASF) under one or more§ # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import with_statement import sys import unittest from libcloud.compute.ssh import ParamikoSSHClient from libcloud.compute.ssh import ShellOutSSHClient from libcloud.compute.ssh import have_paramiko from mock import patch, Mock if not have_paramiko: ParamikoSSHClient = None class ParamikoSSHClientTests(unittest.TestCase): @patch('paramiko.SSHClient', Mock) def setUp(self): """ Creates the object patching the actual connection. """ conn_params = {'hostname': 'dummy.host.org', 'port': 8822, 'username': 'ubuntu', 'key': '~/.ssh/ubuntu_ssh', 'timeout': '600'} self.ssh_cli = ParamikoSSHClient(**conn_params) @patch('paramiko.SSHClient', Mock) def test_create_with_password(self): """ Initialize object with password. Just to have better coverage, initialize the object with the 'password' value instead of the 'key'. """ conn_params = {'hostname': 'dummy.host.org', 'username': 'ubuntu', 'password': 'ubuntu'} mock = ParamikoSSHClient(**conn_params) mock.connect() expected_conn = {'username': 'ubuntu', 'password': 'ubuntu', 'allow_agent': False, 'hostname': 'dummy.host.org', 'look_for_keys': False, 'port': 22} mock.client.connect.assert_called_once_with(**expected_conn) @patch('paramiko.SSHClient', Mock) def test_create_without_credentials(self): """ Initialize object with no credentials. Just to have better coverage, initialize the object without 'password' neither 'key'. """ conn_params = {'hostname': 'dummy.host.org', 'username': 'ubuntu'} mock = ParamikoSSHClient(**conn_params) mock.connect() expected_conn = {'username': 'ubuntu', 'hostname': 'dummy.host.org', 'allow_agent': True, 'look_for_keys': True, 'port': 22} mock.client.connect.assert_called_once_with(**expected_conn) def test_basic_usage_absolute_path(self): """ Basic execution. """ mock = self.ssh_cli # script to execute sd = "/root/random_script.sh" # Connect behavior mock.connect() mock_cli = mock.client # The actual mocked object: SSHClient expected_conn = {'username': 'ubuntu', 'key_filename': '~/.ssh/ubuntu_ssh', 'allow_agent': False, 'hostname': 'dummy.host.org', 'look_for_keys': False, 'timeout': '600', 'port': 8822} mock_cli.connect.assert_called_once_with(**expected_conn) mock.put(sd) # Make assertions over 'put' method mock_cli.open_sftp().chdir.assert_called_with('root') mock_cli.open_sftp().file.assert_called_once_with('random_script.sh', mode='w') mock.run(sd) # Make assertions over 'run' method mock_cli.get_transport().open_session().exec_command \ .assert_called_once_with(sd) mock.close() def test_delete_script(self): """ Provide a basic test with 'delete' action. """ mock = self.ssh_cli # script to execute sd = '/root/random_script.sh' mock.connect() mock.delete(sd) # Make assertions over the 'delete' method mock.client.open_sftp().unlink.assert_called_with(sd) mock.close() if not ParamikoSSHClient: class ParamikoSSHClientTests(unittest.TestCase): pass class ShellOutSSHClientTests(unittest.TestCase): def test_password_auth_not_supported(self): try: ShellOutSSHClient(hostname='localhost', username='foo', password='bar') except ValueError: e = sys.exc_info()[1] msg = str(e) self.assertTrue('ShellOutSSHClient only supports key auth' in msg) else: self.fail('Exception was not thrown') def test_ssh_executable_not_available(self): class MockChild(object): returncode = 127 def communicate(*args, **kwargs): pass def mock_popen(*args, **kwargs): return MockChild() with patch('subprocess.Popen', mock_popen): try: ShellOutSSHClient(hostname='localhost', username='foo') except ValueError: e = sys.exc_info()[1] msg = str(e) self.assertTrue('ssh client is not available' in msg) else: self.fail('Exception was not thrown') def test_connect_success(self): client = ShellOutSSHClient(hostname='localhost', username='root') self.assertTrue(client.connect()) def test_close_success(self): client = ShellOutSSHClient(hostname='localhost', username='root') self.assertTrue(client.close()) def test_get_base_ssh_command(self): client1 = ShellOutSSHClient(hostname='localhost', username='root') client2 = ShellOutSSHClient(hostname='localhost', username='root', key='/home/my.key') client3 = ShellOutSSHClient(hostname='localhost', username='root', key='/home/my.key', timeout=5) cmd1 = client1._get_base_ssh_command() cmd2 = client2._get_base_ssh_command() cmd3 = client3._get_base_ssh_command() self.assertEquals(cmd1, ['ssh', 'root@localhost']) self.assertEquals(cmd2, ['ssh', '-i', '/home/my.key', 'root@localhost']) self.assertEquals(cmd3, ['ssh', '-i', '/home/my.key', '-oConnectTimeout=5', 'root@localhost']) if __name__ == '__main__': sys.exit(unittest.main())
Please. My son. He&apos;s sick.
import logging import sys from config import constants from database import db from util import sendgrid_ class AppConfig(object): SECRET_KEY = constants.FLASK_SECRET_KEY CSRF_ENABLED = True SQLALCHEMY_DATABASE_URI = constants.SQLALCHEMY_DATABASE_URI SQLALCHEMY_TRACK_MODIFICATIONS = False SQLALCHEMY_ECHO = False def init_app(app): db.init_app(app) def init_prod_app(app): app.config.from_object(__name__ + '.AppConfig') init_app(app) if not constants.DEBUG: setup_log_handlers(app) return app def setup_log_handlers(app): log_level = logging.WARNING log_formatter = logging.Formatter( '%(asctime)s %(levelname)s [in %(pathname)s:%(lineno)d]: %(message)s') log_handlers = [ logging.StreamHandler(sys.stdout) if constants.LOG_TO_STDOUT else logging.FileHandler(constants.APP_LOG_FILENAME), new_smtp_log_handler(), ] app.logger.setLevel(log_level) for log_handler in log_handlers: log_handler.setLevel(log_level) log_handler.setFormatter(log_formatter) app.logger.addHandler(log_handler) return app def new_smtp_log_handler(): return sendgrid_.SendgridEmailLogHandler( None, # No server needed, we're using sendgrid instead of SMTP None, constants.MONITORING_NOTIFICATION_EMAILS, 'WriteToGov Error')
Is your storefront door not closing or locking properly? Is it time for a new look? Or do you need to meet current ADA codes on an aging building? We offer full service commercial storefront and storefront door design, installation and service. Contact us for more information.
#!/usr/bin/python ''' Temp/Humidity Monitor - written by Joshua Hughes, 10/20/14 Must be run as root. Records temperature and percent humidity from Adafruit DHT11/22 or AM2302 sensors once per minute Calculates absolute humidity and dumps data into a database Create an sqlite3 database and setup table with something like: create table data(temp INTEGER, relhum INTEGER, abshum INTEGER, stamp DATETIME default CURRENT_TIMESTAMP); ''' import time, datetime, sys, logging, Adafruit_DHT, math import sqlite3 as sql #Type of Adafruit sensor: #DHT11 = 11 #DHT22 = 22 #AM2302 = 22 sensor = 22 pin = 18 db = '/home/pi/recorder.db' log = '/var/log/temp.log' #Math Constants for Humidity conversion c1 = -7.85951783 c2 = 1.84408259 c3 = -11.7866497 c4 = 22.6807411 c5 = -15.9618719 c6 = 1.80122502 c7 = 2.16679 Tc = 647.096 # Critical Temp, K Pc = 22064000 # Critical Pressure, Pa #Calculate measured/saturation temp ratio def v(T, p): return math.pow(1 - (273.15 + T) / Tc, p) #Calculate Water Vapor Saturation Pressure, Pws def Pws(T): return Pc * math.exp( Tc * (c1*v(T,1) + c2*v(T,1.5) + c3*v(T,3) + c4*v(T,3.5) + c5*v(T,4) + c6*v(T,7.5)) / (273.15+T) ) #Calculate Water Vapor Pressure, Pw def Pw(T,RH): return Pws(T) * RH / 100 #Calculate Absolute Humidity def AbsHum(T,RH): return c7 * Pw(T,RH) / (273.15 + T) def InitLogger(): global logger logger = logging.getLogger('Temp') hdlr = logging.FileHandler(log) hdlr.setFormatter(logging.Formatter('%(message)s')) logger.addHandler(hdlr) logger.setLevel(logging.WARNING) if __name__ == "__main__": global logger InitLogger() con = sql.connect(db) ts = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S') logger.warning('\n'+ts+' - Sensor Startup') while True: relhum, temperature = Adafruit_DHT.read_retry(sensor,pin) abshum = AbsHum(temperature, relhum) #convert temp from C to F: temperature = temperature * 9 / 5 + 32 ts = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S') output = ts + ' - Temp={0:0.1f}*F Relative Humidity={1:0.1f}% Absolute Humidity={2:0.1f}'.format(temperature, relhum, abshum) logger.warning(output) print output sqlinsert = "INSERT INTO data(temp, relhum, abshum, stamp) VALUES("+"{0:.2f}".format(temperature)+","+"{0:.2f}".format(relhum)+","+"{0:.2f}".format(abshum)+",CURRENT_TIMESTAMP)" with con: cur = con.cursor() cur.execute(sqlinsert) #TODO - add averager instead of sleep? time.sleep(60)
Tickera UV forecast issued today at 8:37 pm. Next forecast at approx. 8:37 pm.
#!/usr/bin/env python # -*- coding: utf8 -*- """ This module creates word trees, splices, prunes, and queries. """ __module__ = "Tree.py" __author__ = "Jonathan D. Lettvin" __copyright__ = "\ Copyright(C) 2016 Jonathan D. Lettvin, All Rights Reserved" __credits__ = ["Jonathan D. Lettvin"] __license__ = "GPLv3" __version__ = "0.0.3" __maintainer__ = "Jonathan D. Lettvin" __email__ = "[email protected]" __contact__ = "[email protected]" __status__ = "Demonstration" __date__ = "20161102" from CPT import CPT # CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC class Tree(set): """ Tree instances as unitree, a tree for fast specified word lookup. unitree(word) functor adds word to tree. unitree[word] getitem returns True if word is in tree. unitree(word, "delete") remove word from tree unitree.delete(word) remove word from tree unitree.word(word, variant) add variant targeting word to tree A word or list of words may be given while instancing. A word or list of words may be added after instancing by functor. For canonicalization of word variants, the terminal is a set such that common tree variations for dissimilar words can have multiple results. TODO; when deleting word, delete its variations (remove from word lists). """ def __init__(self, **kw): self.kw = kw self.wordlist = kw.get('wordlist', []) self.case = kw.get('ignorecase', False) self.end = kw.get('end', 0xFFFF) # non-codepoint usable as end key. self.tree = {} # CPT() # {} self(self.wordlist) def word(self, root, also=None): "For a word, insert it into tree or retrieve it from tree" if isinstance(also, list): for variation in also: self.word(root, variation) return self fork = self.tree if not also: also = root for o in (ord(c) for c in also): # iteration costs less than recursion fork[o] = fork.get(o, {}) fork = fork[o] if not fork.get(self.end): fork[self.end] = set([root]) else: fork[self.end].add(root) self.add(root) return self def __call__(self, word, *args): "word or delete a word or list of words to the tree" if isinstance(word, list): map(self, word) else: if "delete" in args: self.delete(word) else: self.word(word) self.add(word) # TODO: internal variations mechanism doesn't work yet. # for variant in Tree.variations(word): # self.word(word, variant) return self def delete(self, root, tree=False, level=0, N=0): "Prune a word or list of words from the tree" # TODO delete variations as well as root if tree is False: tree = self.tree N = len(root) level = 0 if N <= level: self.discard(root) unique = (tree and (len(tree) == 1)) terminal = tree and self.end in tree if terminal: tree[self.end].discard(root) return unique and terminal C = root[level] O = ord(C) if O in tree: if self.delete(root, tree[O], level + 1, N) and len(tree) == 1: del tree[O] return True return False def _graphviz(self, tree, token=u""): text = u"" for k, w in tree.iteritems(): if k == self.end: terminal = u','.join(w) text += u'"%s" -> "[%s]" [label="$"];\n' % (token, terminal) text += u'"[%s]" -> "STOP";\n' % (terminal) else: newtoken = token + unichr(k) text += u'"%s";\n' % (newtoken) if token is not u"": label = u'[label="' + unichr(k) + u'"];' text += u'"%s" -> "%s" %s\n' % (token, newtoken, label) text += self._graphviz(w, newtoken) if tree == self.tree: fmt = '"START" -> "%s" [label="%s"];\n' text += fmt % (newtoken, unichr(k)) return text def graphviz(self, dotname="Tree.dot"): "Produce .dot file for use by graphviz external program" head = 'digraph tree {\n rankdir=LR;\n concentrate=true;\n' tail = "}" with open(dotname, "w+b") as dotstream: try: print>>dotstream, head + self._graphviz(self.tree) + tail except Exception as why: # pylint: disable=broad-except print("Can't output: %s(%s)" % (dotname, str(why))) finally: pass def __getitem__(self, find): "Find in the tree" fork = self.tree for o in (ord(c) for c in find): fork = fork.get(o, {}) if fork == {}: break return fork.get(self.end, False)
Ideally located, Midtown Square is your destination for the best of apartment living in Illinois. Enjoy premium shopping, dining, and entertainment just minutes from home in downtown Glenview. Travel and commuting is made easy with our proximity to Illinois' Metra system, Interstate 294, and O'Hare International Airport. Enjoy educational pursuits and on-campus activities just 20 minutes away at Northwestern University. Our community is also surrounded by a variety of parks, camping, and hiking so you can enjoy the outdoors. Tee off at one of several surrounding golf clubs, or experience history and culture at one of many surrounding museums including the Illinois Holocaust Museum & Education Center. Our community is just minutes from downtown Chicago too! Visit our Glenview apartments at Midtown Square today!
# -*- coding: utf-8 -*- # Copyright (C) 2014 GRAP (http://www.grap.coop) # Copyright © 2017 Coop IT Easy (http://www.coopiteasy.be) # @author: Sylvain LE GAL (https://twitter.com/legalsylvain) # @author: Houssine BAKKALI (https://github.com/houssine78) # @author: Rémy TAYMANS <[email protected]> # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). from datetime import datetime from openerp import api, fields, models, _ import openerp.addons.decimal_precision as dp # TODO: scale_category is defined in beesdoo_product but this module do # not depend on it. Find a way to configure these fields. ADDITIONAL_FIELDS = ['list_price', 'scale_category', 'image_medium'] class ProductTemplate(models.Model): _inherit = 'product.template' scale_group_id = fields.Many2one( related="product_variant_ids.scale_group_id", string='Scale Group', store=True ) scale_sequence = fields.Integer( related="product_variant_ids.scale_sequence", string='Scale Sequence', store=True ) scale_tare_weight = fields.Float( related="product_variant_ids.scale_tare_weight", string='Scale Tare Weight', store=True, help="Set here Constant tare weight" " for the given product. This tare will be substracted when" " the product is weighted. Usefull only for weightable product.\n" "The tare is defined with kg uom." ) # View Section @api.multi def send_scale_create(self): for product in self: # TODO: Should check if the product has a scale group product._send_to_scale_bizerba('create', True) return True @api.multi def send_scale_write(self): for product in self: # TODO: Should check if the product has a scale group product._send_to_scale_bizerba('write', True) return True @api.multi def send_scale_unlink(self): for product in self: # TODO: Should check if the product has a scale group product._send_to_scale_bizerba('unlink') return True # Custom Section def _send_to_scale_bizerba(self, action, send_product_image=False): log_obj = self.env['product.scale.log'] log_obj.create({ 'log_date': datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'scale_system_id': self.scale_group_id.scale_system_id.id, 'product_id': self.id, 'action': action, 'send_product_image': send_product_image, }) def _check_vals_scale_bizerba(self, vals): system = self.scale_group_id.scale_system_id system_fields = [x.name for x in system.field_ids] for product_field in ADDITIONAL_FIELDS: if product_field not in system_fields: system_fields.append(product_field) vals_fields = vals.keys() return set(system_fields).intersection(vals_fields) def is_in_scale(self): """Return True if the current product should be in the scale system. """ self.ensure_one() return self.active and self.sale_ok and self.scale_group_id def is_new_in_scale(self, vals): """Return True if the current product will be new in the scale system after the write. """ return not self.is_in_scale() and self.will_be_in_scale(vals) def will_be_in_scale(self, vals): """Return True if the current product will be in the scale system after the write. """ self.ensure_one() return (vals.get('active', self.active) and vals.get('sale_ok', self.sale_ok) and vals.get('scale_group_id', self.scale_group_id)) # Overload Section @api.model def create(self, vals): product = super(ProductTemplate, self).create(vals) if product.is_in_scale(): product._send_to_scale_bizerba('create') return product @api.multi def write(self, vals): defered = {} for product in self: if product.is_new_in_scale(vals): # Product is new to the scale system: create it. defered[product.id] = 'create' elif product.is_in_scale() and product.will_be_in_scale(vals): # Product is in the scale system and will be in the # scale system after the write: if there is changes in # the fields related to the scale system, update it. if product._check_vals_scale_bizerba(vals): defered[product.id] = 'write' # If scale_group has change, product must be updated. if ('scale_group_id' in vals and vals['scale_group_id'] != product.scale_group_id): # Remove it from previous group product._send_to_scale_bizerba('unlink') # Send it in the new group defered[product.id] = 'create' elif product.is_in_scale() and not product.will_be_in_scale(vals): # Product is in the scale system and will no longer be # in the scale system after the write: delete it. defered[product.id] = 'unlink' res = super(ProductTemplate, self).write(vals) for product_id, action in defered.iteritems(): product = self.browse(product_id) product._send_to_scale_bizerba(action, True) return res @api.multi def unlink(self): for product in self: if product.is_in_scale(): self._send_to_scale_bizerba('unlink') return super(ProductTemplate, self).unlink()
D. Graça Sousão Grande Reserve Douro - Red Wine 2013 - Kabaz, Lda. Tasting notes The color is typical of Sousão, making the wine almost opaque to light. The aroma is delicate, somewhat austere, without traces of herbaceous or vegetable. Palate In the mouth, surprising by the balance between a powerful body, a vibrant acidity and an attractive aroma, which gives it a charming finish. Wine Pairing It is an ideal wine for lamprey and for baked goods or game stews.
# -*- coding: utf-8 -*- import wx import wx.adv import application from pubsub import pub class mainWindow(wx.Frame): def makeMenu(self): mb = wx.MenuBar() app_ = wx.Menu() create = wx.Menu() self.audio_album = create.Append(wx.NewId(), _("Audio album")) self.video_album = create.Append(wx.NewId(), _("Video album")) app_.Append(wx.NewId(), _("Create"), create) delete = wx.Menu() self.delete_audio_album = delete.Append(wx.NewId(), _("Audio album")) self.delete_video_album = delete.Append(wx.NewId(), _("Video album")) app_.Append(wx.NewId(), _("Delete"), delete) self.blacklist = app_.Append(wx.NewId(), _("Blacklist")) self.accounts = app_.Append(wx.NewId(), _("Manage accounts")) self.settings_dialog = app_.Append(wx.NewId(), _("Preferences")) me = wx.Menu() profile = wx.Menu() self.view_profile = profile.Append(wx.NewId(), _("View profile")) # self.edit_profile = profile.Append(wx.NewId(), _("Edit profile")) self.open_in_browser = profile.Append(wx.NewId(), _("Open in browser")) me.Append(wx.NewId(), _("Profile"), profile) self.set_status = me.Append(wx.NewId(), _("Set status message")) buffer = wx.Menu() search = wx.Menu() self.search_audios = search.Append(wx.NewId(), _("Audio")) self.search_videos = search.Append(wx.NewId(), _("Video")) self.timeline = buffer.Append(wx.NewId(), _("&New timeline")) buffer.Append(wx.NewId(), _("Search"), search) self.update_buffer = buffer.Append(wx.NewId(), _("Update current buffer")) self.load_previous_items = buffer.Append(wx.NewId(), _("Load previous items")) self.remove_buffer_ = buffer.Append(wx.NewId(), _("&Remove buffer")) mb.Append(app_, _("Application")) mb.Append(me, _("Me")) mb.Append(buffer, _("Buffer")) player = wx.Menu() self.player_play = player.Append(wx.NewId(), _("Play/Pause")) self.player_play_all = player.Append(wx.NewId(), _("Play all")) self.player_previous = player.Append(wx.NewId(), _("Previous")) self.player_next = player.Append(wx.NewId(), _("Next")) self.player_shuffle = player.AppendCheckItem(wx.NewId(), _("Shuffle")) self.player_seek_left = player.Append(wx.NewId(), _("Seek backwards")) self.player_seek_right = player.Append(wx.NewId(), _("Seek forwards")) self.player_volume_up = player.Append(wx.NewId(), _("Volume up")) self.player_volume_down = player.Append(wx.NewId(), _("Volume down")) self.player_mute = player.Append(wx.NewId(), _("Mute")) help_ = wx.Menu() self.about = help_.Append(wx.NewId(), _("About {0}").format(application.name,)) self.documentation = help_.Append(wx.NewId(), _("Manual")) self.check_for_updates = help_.Append(wx.NewId(), _("Check for updates")) self.changelog = help_.Append(wx.NewId(), _("Chan&gelog")) self.open_logs = help_.Append(wx.NewId(), _("Open logs directory")) self.open_config = help_.Append(wx.NewId(), _("Open config directory")) self.report = help_.Append(wx.NewId(), _("Report an error")) mb.Append(player, _("Audio player")) mb.Append(help_, _("Help")) self.SetMenuBar(mb) self.accel_tbl = wx.AcceleratorTable([ # Assign keystrokes to control the player object. (wx.ACCEL_ALT, wx.WXK_LEFT, self.player_previous.GetId()), (wx.ACCEL_ALT, wx.WXK_RIGHT, self.player_next.GetId()), (wx.ACCEL_ALT, wx.WXK_DOWN, self.player_volume_down.GetId()), (wx.ACCEL_ALT, wx.WXK_UP, self.player_volume_up.GetId()), # Translators: Keystroke used to play/pause the current item in the playback queue. Use the latin alphabet, but you can match a different key here. For example if you want to assign this to the key "П", use G. (wx.ACCEL_CTRL, ord(_("P")), self.player_play.GetId()), (wx.ACCEL_CTRL|wx.ACCEL_SHIFT, ord(_("P")), self.player_play_all.GetId()), (wx.ACCEL_ALT|wx.ACCEL_SHIFT, wx.WXK_LEFT, self.player_seek_left.GetId()), (wx.ACCEL_ALT|wx.ACCEL_SHIFT, wx.WXK_RIGHT, self.player_seek_right.GetId()), ]) self.SetAcceleratorTable(self.accel_tbl) def __init__(self): super(mainWindow, self).__init__(parent=None, id=wx.NewId(), title=application.name) self.Maximize() self.makeMenu() self.panel = wx.Panel(self) self.sizer = wx.BoxSizer(wx.VERTICAL) self.sb = self.CreateStatusBar() self.tb = wx.Treebook(self.panel, -1) self.sizer.Add(self.tb, 1, wx.ALL|wx.EXPAND, 5) pub.subscribe(self.change_status, "change_status") def realize(self): self.panel.SetSizer(self.sizer) self.SetClientSize(self.sizer.CalcMin()) self.Layout() self.SetSize(self.GetBestSize()) def change_status(self, status): wx.CallAfter(self.sb.SetStatusText, status) def connection_error(self): wx.MessageDialog(self, _("There is a connection error. Check your internet connection and try again later."), _("Connection error"), wx.ICON_ERROR).ShowModal() def get_buffer_count(self): return self.tb.GetPageCount() def add_buffer(self, buffer, name): self.tb.AddPage(buffer, name) def insert_buffer(self, buffer, name, pos): return self.tb.InsertSubPage(pos, buffer, name) def insert_chat_buffer(self, buffer, name, pos): return self.tb.InsertPage(pos, buffer, name) def search(self, name_): for i in range(0, self.tb.GetPageCount()): if self.tb.GetPage(i).name == name_: return i def get_current_buffer(self): return self.tb.GetCurrentPage() def get_current_buffer_pos(self): return self.tb.GetSelection() def get_buffer(self, pos): return self.GetPage(pos) def change_buffer(self, position): self.tb.ChangeSelection(position) def get_buffer_text(self, pos=None): if pos == None: pos = self.tb.GetSelection() return self.tb.GetPageText(pos) def get_buffer_by_id(self, id): return self.nb.FindWindowById(id) def advance_selection(self, forward): self.tb.AdvanceSelection(forward) def about_dialog(self, channel="stable", *args, **kwargs): if channel == "stable": version = _("{version} (stable)").format(version=application.version) else: version = _("{version} (alpha)").format(version=application.update_next_version) info = wx.adv.AboutDialogInfo() info.SetName(application.name) info.SetVersion(version) info.SetDescription(application.description) info.SetCopyright(application.copyright) info.SetTranslators(application.translators) # info.SetLicence(application.licence) info.AddDeveloper(application.author) wx.adv.AboutBox(info) def remove_buffer(self, pos): self.tb.DeletePage(pos) def remove_buffer_from_position(self, pos): return self.tb.RemovePage(pos) def notify(self, title, text): self.notification = wx.adv.NotificationMessage(title=title, message=text, parent=self) self.notification.Show()
It’s a Hard Luck Life, but not any more for Sunny | What's Up With My Pet? Sunny had only 48 hours to live in a Nashville animal shelter.She also had a nasty abrasion on her back right paw, something that might have discouraged some prospective owners, but not the producers of “Annie”. Good thing they did as Sunny turned out to not only have the right look and temperament, she was comfortable walking on her back legs. So Sunny is going to dance in the upcoming Broadway production. These days, Sunny is in an upper West Side apartment in New York and working hard to get ready for her new role. Sunny’s story is inspiring and unusual, but there is a common thread that Sunny shares with past canine stars of “Annie” – all were adopted from shelters. Now Pedigree has teamed with “Annie” to help more than just the one rare dog have their day, for each “Annie” ticket sold through to the end of next year, Pedigree will donate $2.00 up to $1 million for a special fund to help more dogs like Sunny find homes.
from PIL import Image from PIL import ImageFont from PIL import ImageDraw from TransitionFunctions import * import copy import struct import serial from MessageClasses import * class Display(object): """ This class represents a display -- it has properties that specify its width and number of lines. It also has a property that represents its current state (e.g. what it is currently displaying). It has an update method which is fed a display object, a transition function and a delay. The transition function takes in a current display state and a desired display state, then returns a list of intermediate display states that need to be transitioned through. A display state is stored as a list of length num_lines that contains only strings of length num_chars. """ def __init__(self, num_lines, num_chars): self.num_lines = num_lines self.num_chars = num_chars self.currentstate = [' '*num_chars]*num_lines def determine_transition(self, transitionfunction, messageobject): messageobject.update(num_lines=self.num_lines, num_chars=self.num_chars) return transitionfunction(self.currentstate, messageobject.get_message()) def update(self, transitionfunction, messageobject): # this will be different for each display type, so must be overridden raise NotImplementedError class SimpleDisplay(Display): """ This class is the simplest display possible - it just prints the info to the console. """ def __init__self(self, num_lines, num_chars): Display.__init__(self, num_lines, num_chars) def update(self, transitionfunction, messageobject): states = self.determine_transition(transitionfunction, messageobject) for i in range(len(states)): print(states[i]) class SerialLCDDisplay(Display): """ This class is an LCD display controlled via serial -- it takes in a bytestring of length num_chars * num_lines and displays it. """ def __init__(self, num_lines, num_chars, device, frequency, reactiontime): """ :param num_lines: number of lines in the display :param num_chars: number of characters in each line of the display :param device: device location of the serial connection (e.g. '/dev/tty.usbserial') :param frequency: baud rate of the connection (e.g. 9600) :param reactiontime: delay between each update action, seconds """ Display.__init__(self, num_lines, num_chars) self.device = device self.frequency = frequency self.reactiontime = reactiontime def update(self, transitionfunction, messageobject): import serial import time ser = serial.Serial(self.device, self.frequency) time.sleep(self.reactiontime) states = self.determine_transition(transitionfunction, messageobject) print("Attempting to display ", messageobject) for i in range(len(states)): output = "" for z in range(self.num_lines): output += states[i][z] ser.write(output.encode(encoding='us-ascii', errors='strict')) time.sleep(self.reactiontime) ser.close() class FlipDotDisplay(Display): """ This class represents a rectangular array of flip dot displays from AlfaZeta, arranged in an arbitrary layout. """ def __init__(self, rows, columns, serialinterface, layout): """ Initializes the display. :param rows: Integer number of rows in the display :param columns: Integer number of columns in the display :param layout: A dictionary, with keys being (row,column) tuples. layout[(row,column)] should return a tuple (displayID,bytenum,powerof2) :param serialinterface: A python serial module object, representing the serial interface of the actual display """ # Make sure variables are of the right type. For layout, only make sure it has the right dimensions assert type(rows) == int assert type(columns) == int assert isinstance(serialinterface, serial.Serial) self.rows = rows self.columns = columns self.layout = copy.deepcopy(layout) self.serial = serialinterface self.invert = False # determine indices of displays and number of bytes # display is a dictionary of the displays, indexed by their identifier # each element of the dictionary is an integer (number of bytes) # this loop determines the largest bytenum for each display display = {} for pixel in layout: if layout[pixel][0] in display: if layout[pixel][1] > display[layout[pixel][0]]: display[layout[pixel][0]] = layout[pixel][1] else: display[layout[pixel][0]] = layout[pixel][1] # turn the display dictionary into a dictionary of lists, each list the length of the bytes in the display # default empty state is 0 (all pixels in column black) for disp in display: temp = display[disp] display[disp] = [0]*(temp + 1) self.emptystate = copy.deepcopy(display) # initialize current state to all black and then set the display to it self.currentstate = Image.new('1', (self.columns, self.rows), 0) self.show(self.currentstate) self.currentmessage = None def flip_invert(self): """ Swaps display from inverted to not inverted or vice versa :return: """ self.invert = not self.invert def get_invert(self): """ Safe way to determine whether the display is inverted :return: boolean, indicating whether colors are inverted or not """ return self.invert def show(self, desiredstate): """ Writes the desired state to the display. :param desiredstate: a PIL image object, of dimensions (rows,columns) :return: None """ # to optimize time, only going to check if the first row has the proper number of columns assert (self.columns, self.rows) == desiredstate.size # turn desiredstate into a list of lists, with desiredstate[row][column] returning the pixel direction pixel = list(desiredstate.getdata()) pixels = [pixel[i * self.columns: (i + 1) * self.columns] for i in range(self.rows)] # start with generic command strings head = b'\x80' tail = b'\x8F' cmd = b'\x84' refresh = head + b'\x82' + tail cmdstring = b'' display = copy.deepcopy(self.emptystate) # first need to use self.layout to turn the pixels array into the display IDs and byte values # iterate through all the rows and columns in the desired state for row in range(len(pixels)): for column in range(len(pixels[row])): # sometimes white will be 255, sometimes it will be 1 # this code needs white to be 1 for calculation purposes # sanitize 255 into 1, or just use as is if pixels[row][column] == 255: pixel = 1 else: pixel = pixels[row][column] # if display is inverted, turn 1 into 0 and vice versa, otherwise leave as is if self.invert: pixel = 1 - pixel # display[displaynum from layout] [ bytenum from layout] incremented by the pixels value * power # of 2 from layout display[self.layout[(row, column)][0]][self.layout[(row, column)][1]] +=\ pixel * 2 ** self.layout[(row, column)][2] # iterate through the displays and turn them into the proper byte arrays for disp in display: # start off each command with a head and command string # add the display address # to generate bytes for the address, use struct.pack('=b',ADDRESS) # add the actual command string- use the bytearray function to turn the list of integers into the byte array cmdstring += head + cmd + struct.pack('=b', disp) + bytearray(display[disp]) + tail # once done, add the refresh command to the end of the command string cmdstring += refresh # write the command to the serial interface self.serial.write(cmdstring) def update(self, transitionfunction, displayobject,font): # Ensure proper types assert isinstance(displayobject, Message) or isinstance(displayobject, Image.Image) if isinstance(displayobject, Message): assert isinstance(font, ImageFont.FreeTypeFont) assert callable(transitionfunction) assert transitionfunction.is_message_transition or transitionfunction.is_display_transition displaystates = [] # if an image if isinstance(displayobject, Image.Image): # either crop it to fit the display (keep top left) or pad to fill (center) # first check if either of the dimensions are too big image_for_transition = displayobject if displayobject.size[0] > self.columns or displayobject.size[1] > self.rows: horizontalcrop = max(displayobject.size[0] - self.columns, 0) verticalcrop = max(displayobject.size[1] - self.rows, 0) image_for_transition = displayobject.crop((0 + horizontalcrop // 2, 0 + verticalcrop // 2, displayobject.size[0] - horizontalcrop // 2 - horizontalcrop % 2, displayobject.size[1] - verticalcrop // 2 - verticalcrop % 2)) # now that any cropping has been done, need to check if the image needs to be padded if image_for_transition.size[0] < self.columns or displayobject.size[1] < self.rows: image_for_transition = pad_image(displayobject, self.rows, self.columns, fill=0) # if a message, we need to figure some things elif isinstance(displayobject, Message): # check the size of the phrase "ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz " to see how wide # and tall the display is in terms of characters with the specified font checkwidth, checkheight = font.getsize("ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz ") checkwidth //= 54 display_width_chars = self.columns // checkwidth display_height_chars = self.rows // checkheight if display_height_chars < 1 or display_width_chars < 1: raise ValueError("My font is too big! My font is TOO BIG!") # set foundissue to true to ensure we check at least once foundissue = True # purpose of this loop is to check ensure we get a message that fits in the display # the initial estimate of the number of characters is a guess - because some fonts do not have the same # width for each character, the actual size taken up can depend on the message # so, we check if the message fits. if it doesn't, we honor the font size provided and reduce the amount # of space available for the message while foundissue: # tell the message to update with the estimated number of characters displayobject.update(num_lines=display_height_chars, num_chars=display_width_chars) totalheight = 0 maxwidth = 0 foundissue = False # determine max dimensions of the image for line in displayobject.get_message(): width, height = font.getsize(line) totalheight += height maxwidth = max(maxwidth, width) # check against maximum display dimensions and update the "message size" if necessary if maxwidth > self.columns: foundissue = True display_width_chars = int(display_width_chars * self.columns / maxwidth) if totalheight > self.rows: foundissue = True display_height_chars = int(display_height_chars * self.rows / totalheight) # at the end of the loop, totalheight and maxwidth should contain the actual values for the message # if the provided transition function is messagetransition, apply it here to generate a message states list # then turn those message states into display states # otherwise, create a single-item list that is just the eventual message if transitionfunction.is_message_transition: # try to use transition function - if we get an assertion error, that means the current display state # is an image, so a message transition is not possible try: messagestates = transitionfunction(self.currentmessage, displayobject.get_message()) except AssertionError: messagestates = [displayobject.get_message()] # since our function is a message function, we create the displaystates list here for messagestate in messagestates: image = message_to_image(messagestate, self.columns, self.rows, maxwidth, totalheight, font, display_height_chars) displaystates.append(image) # since our function is not a message function, we just make the message into an image for transition else: image_for_transition = message_to_image(displayobject.get_message(), self.columns, self.rows, maxwidth, totalheight, font, display_height_chars) # write the message output to the self.currentmessage container, so future message transitions can work self.currentmessage = displayobject.get_message() else: # it's not a message or an image - technically this should not be possible because of the asserts raise AssertionError("Assertion not working") # if the provided transition function is a displaytransition, then use the transition function to generate # desired display states if transitionfunction.is_display_transition: displaystates = transitionfunction(self.currentstate, image_for_transition) # if we get this far and displaystates is still an empty list, then # we got an image to display, but combined with a message transition. just use simpletransition if displaystates == []: displaystates = SimpleTransition(self.currentstate, image_for_transition) # show the desired states on the display for state in displaystates: self.show(state) self.currentstate = displaystates[-1] class FakeFlipDotDisplay(FlipDotDisplay): def __init__(self, rows, columns, serialinterface, layout): self.file_number = 1 FlipDotDisplay.__init__(self, rows, columns, serialinterface, layout) def show(self, desiredstate): desiredstate.format = 'PNG' statepath = '/Users/cmcd/PycharmProjects/SignStorage/' desiredstate.save(statepath + str(self.file_number) + '.PNG', format='PNG') self.file_number += 1 def pad_image(Image, rows, columns, fill=0): """ Takes in an image file, returns a padded image to fit the rectangle given by the rows and columns dimensions :param Image: A PIL image object :param rows: integer, number of rows of pixels :param columns: integer, number of columns of pixels :param fill: an integer 1 or 0, indicating which color to fill the padded area with (1= white, 0 = black) :return: A PIL image object of dimensions (rows,columns) with the provided image in the center """ # create new image of the desired size, with the fill padded = Image.new('1', (columns, rows), fill) incolumns, inrows = Image.size if incolumns > columns or inrows > rows: raise ValueError("Input image must be less than or equal to the output size in all dimensions.") # paste provided image into created image, such that it is as centered as possible in the new area padded.paste(Image, ((columns - incolumns) // 2, (rows-inrows) // 2)) return padded def initialize_row_spacing_lookup(): """ Could not determine an algorithm for how to space the lines, so going to use a lookup table. lookuptable[num_lines][extra_spaces] will contain a tuple (top,per_line) which indicates how many spaces go at the top and how many go between each line. :return: a lookup table, which is a list of lists of tuples. """ output = [[(None, None)]*12 for i in range(12)] output[3][0] = (0, 0) output[3][1] = (0, 0) output[3][2] = (0, 1) output[3][3] = (0, 1) output[3][4] = (1, 1) output[3][5] = (1, 1) output[2][0] = (0, 0) output[2][1] = (0, 1) output[2][2] = (0, 1) output[2][3] = (1, 1) output[2][4] = (1, 2) output[2][5] = (1, 2) return output def message_to_image(message, columns, rows, max_width, total_height, font, display_height_chars): image = Image.new('1', (columns, rows), 0) # calculate x position to write the lines to - this is easy since it's just centering the stuff xposition = (columns - max_width) // 2 # calculate y position and spacing - more difficult since there are multiple lines and spacing between total_y_space = rows - total_height yposition, per_line = initialize_row_spacing_lookup()[display_height_chars][total_y_space] if yposition is None: yposition = total_y_space // 2 + 1 per_line = 0 line_height = font.getsize('A')[1] # iterate through the lines in the message, writing each line at the right position and then # incrementing the position for i in range(len(message)): ImageDraw.Draw(image).text((xposition, yposition), message[i], fill=1, font=font) yposition += line_height + per_line return image
At TDMC we combine strategic, creative, media and web development expertise, in a quest to achieve tangible results for our clients. Digital marketing agencies are a dime-a-dozen these days, but we like to fly beneath the radar, using our understanding of user interactions and technology - fostered over years in the digital space, both locally and abroad - to deliver an in depth insight into what really does influence and move brands and their consumers. We strive to create dialogue with our clients and their customers, by partnering with them to create meaningful interactions, to transform great ideas into successful results, by challenging convention and smartly innovating. And we do all this because we can. We have a proven track record with a bouquet of clients for delivering a reliable, quality experience and for working together with them to build their businesses. If you’re confused by what all those buttons and settings do on your camera, then this is the workshop for you. Think of us as an extension to your team, allowing you to manage the ebb and flow in your work stream, whilst TDMC deliver seamlessly to meet your deadlines. With a team of specialists based in Durban along with our personal experience, we will ensure that quality is never comprised. The TDMC team have 40+ years digital experience collectively. We are passionate about building partnerships through knowledge sharing, transparency and innovation.
from pyveplot import * import networkx as nx import csv import argparse import math import progressbar def coords( radius, angle, origin=(0,0)): """ Returns a tuple of tuples of coordinates given a radius, angle and origin tuple """ return (origin[0] + round((radius * math.cos(math.radians(angle))), 2), origin[1] + round((radius * math.sin(math.radians(angle))), 2)) from pprint import pprint parser = argparse.ArgumentParser(description='hiveplot of mirna-gene interactions') parser.add_argument('edgelist', type=argparse.FileType('r'), help="network in edgelist format") parser.add_argument('plot', type=argparse.FileType('w'), help="plot file") args = parser.parse_args() # load edges into graph g = nx.Graph() for edge in csv.DictReader(args.edgelist, delimiter="\t"): g.add_edge( edge['from'], edge['to'], w=float(edge['weight']) ) # sort nodes by degree by_degree = {} for n in g.nodes(): d = g.degree(n) if d in by_degree: by_degree[d].append(n) else: by_degree[d] = [n, ] degree_ordered = [] for d in sorted(by_degree.keys(), reverse=False): degree_ordered += by_degree[d] # if a gene doesn't have other genes as its first neighbors def only_talks_to_mrna( gene ): status = True for n in g.neighbors(gene): if not n.startswith('hsa'): return False return status mirna_genes = [] genes = [] mirnas = [] # classify nodes for n in degree_ordered: if not n.startswith('hsa'): genes.append(n) if only_talks_to_mrna(n): mirna_genes.append(n) else: mirnas.append(n) h = Hiveplot( args.plot.name ) args.plot.close() h.dwg.width=15000 h.dwg.height=15000 #centre = (len(mirnas)+30, len(mirnas)+30) centre = (516*2, 516*2) print "centre", centre # configure mirna axes m1 = Axis( coords(20, -90, centre), coords(len(mirnas)*2, -90, centre), stroke="#CBFF65", stroke_width=10) m2 = Axis( coords(20, -180, centre), coords(len(mirnas)*2, -180, centre), stroke="#CBFF65", stroke_width=10) pos = 0.0 delta = 1.0 / len(mirnas) for n in mirnas: node0 = Node(n) node1 = Node(n) m1.add_node( node0, pos ) m2.add_node( node1, pos ) pos += delta g1 = Axis(coords(20, 0, centre), coords(len(genes)*0.5, 0, centre), stroke="#00C598", stroke_width=10) g2 = Axis(coords(20, 90, centre), coords(len(genes)*0.5, 90, centre), stroke="#00C598", stroke_width=10) pos = 0.0 delta = 1.0 / len(genes) for n in genes: node0 = Node(n) node1 = Node(n) g1.add_node( node0, pos ) g2.add_node( node1, pos ) pos += delta h.axes = [m1, m2, g1, g2] bar = progressbar.ProgressBar() for e in bar(g.edges()): if e[0] in mirnas and e[1] in mirnas: # mirnas to mirnas h.connect(m2, e[0], 23, m1, e[1], -23, stroke_width = g.get_edge_data(*e)['w'] * 10, stroke_opacity = 0.035, stroke = 'grey') if e[0] in mirnas and e[1] in genes and e[1] not in mirna_genes: # mirnas to genes h.connect(m1, e[0], 23, g1, e[1], -23, stroke_width=g.get_edge_data(*e)['w'] * 10, stroke_opacity=0.035, stroke="grey") if e[1] in mirnas and e[0] in genes and e[0] not in mirna_genes: # mirnas to genes h.connect(m1, e[1], 23, g1, e[0], -23, stroke_width=g.get_edge_data(*e)['w'] * 10, stroke_opacity=0.035, stroke="grey") if e[0] in genes and e[1] in genes: # genes to genes h.connect(g1, e[0], 23, g2, e[1], -23, stroke_width = g.get_edge_data(*e)['w'] * 10, stroke_opacity = 0.035, stroke='grey') if e[0] in mirnas and e[1] in mirna_genes: # mirnas to mirna-genes h.connect(m2, e[0], -23, g2, e[1], 23, stroke_width=g.get_edge_data(*e)['w'] * 10, stroke_opacity=0.035, stroke="grey") if e[1] in mirnas and e[0] in mirna_genes: # mirnas to mirna-genes h.connect(m2, e[1], -23, g2, e[0], 23, stroke_width=g.get_edge_data(*e)['w'] * 10, stroke_opacity=0.035, stroke="grey") print "saving" h.save()
Episode 73 – Post Draft Review And Fantasy Tips with Nick D. This week on Before And Laughter we go through our recent family league draft. We are joined by Nick D. who has playing fantasy football since the days of sending in roster changes by fax! We also give some great player values and strategies for winning fantasy! Have a Great Labor Day!
''' @Author: Rohan Achar [email protected] ''' import sys from abc import * class Config: __metaclass__ = ABCMeta def __init__(self): #Number of Url Data Fetching Threads Allowed self.MaxWorkerThreads = 8 #Timeout(Seconds) for trying to get the next url from the frontier. self.FrontierTimeOut = 60 #Timeout(Seconds) for trying to get a free worker thread, (worker is taking too long maybe?) self.WorkerTimeOut = 60 #Timeout(Seconds) for getting data from the output queue self.OutBufferTimeOut = 60 #Timeout(Seconds) for getting data from a url self.UrlFetchTimeOut = 2 #The User Agent String that this crawler is going to identify itself as. http://tools.ietf.org/html/rfc2616#section-14.43 self.__UserAgentString = None #To allow resume of fetching from last point of closure. Set to False to always restart from seed set of urls. self.Resumable = True #Number of times to retry fetching a url if it fails self.MaxRetryDownloadOnFail = 5 #PolitenessDelay that the crawler is forced to adhere to. http://en.wikipedia.org/wiki/Web_crawler#Politeness_policy self.PolitenessDelay = 300 #The Persistent File to store current state of crawler for resuming (if Resumable is True) self.PersistentFile = "Persistent.shelve" #Total (Approximate) documents to fetch before stopping self.NoOfDocToFetch = -1 #The Max Depth of the page to go to while fetching (depth = distance of first discovery from seed urls) self.MaxDepth = -1 #Max size of page in bytes that is allowed to be fetched. (Only works for websites that send Content-Length in response header) self.MaxPageSize = 1048576 #Max size of output queue. If the HandleData function is slow, then output buffer might not clear up fast. #This enforces that the queue does not go beyond a certain size. #Set to 0 if you want unlimited size #Advantages of setting > 0: Fetch url waits for the buffer to become free when its full. If crawler crashes max of this size output is lost. #Disadvantage of setting > 0: Slows down the crawling. self.MaxQueueSize = 0 #This ignores the rules at robot.txt. Be very careful with this. Only make it True with permission of the host/API pulling that does not need robot rules. self.IgnoreRobotRule = False #This sets the mode of traversal: False -> Breadth First, True -> Depth First. self.DepthFirstTraversal = False def ValidateConfig(self): '''Validates the config to see if everything is in order. No need to extend this''' try: assert (self.UserAgentString != "" or self.UserAgentString != "Set This Value!") except AssertionError: print ("Set value of UserAgentString") sys.exit(1) try: assert (self.MaxWorkerThreads != 0) except AssertionError: print ("MaxWorkerThreads cannot be 0") sys.exit(1) @abstractmethod def GetSeeds(self): '''Returns the first set of urls to start crawling from''' return ["Sample Url 1", "Sample Url 2", "Etc"] @abstractmethod def HandleData(self, parsedData): '''Function to handle url data. Guaranteed to be Thread safe. parsedData = {"url" : "url", "text" : "text data from html", "html" : "raw html data"} Advisable to make this function light. Data can be massaged later. Storing data probably is more important''' print (parsedData["url"]) pass def AllowedSchemes(self, scheme): '''Function that allows the schemes/protocols in the set.''' return scheme.lower() in set(["http", "https", "ftp", b"http", b"https", b"ftp"]) @abstractmethod def ValidUrl(self, url): '''Function to determine if the url is a valid url that should be fetched or not.''' return True parsed = urlparse(url) try: return ".ics.uci.edu" in parsed.hostname.decode("utf-8") \ and not re.match(".*\.(css|js|bmp|gif|jpe?g|ico|png|tiff?|mid|mp2|mp3|mp4)$", parsed.path.decode("utf-8")) except TypeError: print ("TypeError for ", parsed) def GetTextData(self, htmlData): '''Function to clean up html raw data and get the text from it. Keep it small. Not thread safe, returns an object that will go into the parsedData["text"] field for HandleData function above''' import nltk return nltk.clean_html(htmlData) def ExtractNextLinks(self, url, rawData, outputLinks): '''Function to extract the next links to iterate over. No need to validate the links. They get validated at the ValudUrl function when added to the frontier Add the output links to the outputLinks parameter (has to be a list). Return Bool signifying success of extracting the links. rawData for url will not be stored if this function returns False. If there are no links but the rawData is still valid and has to be saved return True Keep this default implementation if you need all the html links from rawData''' from lxml import html,etree try: htmlParse = html.document_fromstring(rawData) htmlParse.make_links_absolute(url) except etree.ParserError: return False except etree.XMLSyntaxError: return False for element, attribute, link, pos in htmlParse.iterlinks(): outputLinks.append(link) return True def GetAuthenticationData(self): ''' Function that returns dict(top_level_url : tuple(username, password)) for basic authentication purposes''' return {}
Charming In-town colonial walking distance to Choate and downtown Wallingford. Features open spacious living room with fireplace, hardwood floors, updated bathroom, brand new roof, 2 car garage and more. Perfect starter home and be close to everything!
""" """ __author__ = "Xun Li <[email protected]>" __all__ = ['DataListCtrl','DataTablePanel','DataWidget'] import sys import wx import wx.lib.mixins.listctrl as listmix import wx.grid as gridlib import stars from stars.model import * from stars.visualization.EventHandler import * from stars.visualization.AbstractWidget import * from stars.visualization.AbstractCanvas import AbstractCanvas class DataGrid(wx.grid.Grid): def __init__(self, parent, dbf): wx.grid.Grid.__init__(self, parent, -1) self.dbf = dbf n_cols = len(dbf.header) n_rows = dbf.n_records self.CreateGrid(n_rows, n_cols) raw_dbf_data = [] for i in range(n_rows): row_data = dbf.read_record(i) for j in range(n_cols): self.SetCellValue(i,j, str(row_data[j])) raw_dbf_data.append(row_data) self.dbf.records = raw_dbf_data for i in range(n_cols): self.SetColLabelValue(i, dbf.header[i]) self.Bind(wx.EVT_IDLE, self.OnIdle) def OnIdle(self, event): pass class DataTablePanel1(wx.Panel, AbstractCanvas): """ Panel displaying dbf DataTable. The wxPanel container for DataList (wx.ListCtrl). """ def __init__(self, parent, shapefileObject, name): wx.Panel.__init__(self, parent, -1, style=wx.WANTS_CHARS) from stars.visualization.maps.BaseMap import PolygonLayer, PointLayer, LineLayer self.layer = shapefileObject self.dbf = self.layer.dbf self.name = name self.parent = parent self.current_selected = {} # {id: centroid} self.isEvtHandle = False self.table = DataGrid(self, self.dbf) sizer = wx.BoxSizer(wx.VERTICAL) sizer.Add(self.table, 1, wx.EXPAND) if self.layer.shape_type == stars.SHP_POINT: self.draw_layer = PointLayer(self,self.layer) elif self.layer.shape_type == stars.SHP_LINE: self.draw_layer = LineLayer(self, self.layer) elif self.layer.shape_type == stars.SHP_POLYGON: self.draw_layer = PolygonLayer(self,self.layer) self.SetSizer(sizer) self.SetAutoLayout(True) # table events #self.table.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnItemSelected) #self.table.Bind(wx.EVT_LIST_ITEM_DESELECTED, self.OnItemDeselected) # register event_handler to THE OBSERVER while parent != None: if isinstance(parent, stars.Main): self.observer = parent.observer parent = parent.GetParent() self.Register(stars.EVT_OBJS_SELECT, self.OnRecordsSelect) self.Register(stars.EVT_OBJS_UNSELECT, self.OnNoRecordSelect) self.parent.Bind(wx.EVT_CLOSE, self.OnClose) # OnClose Only send to Frame/Dialog def OnClose(self, event): self.Unregister(stars.EVT_OBJS_SELECT, self.OnRecordsSelect) self.Unregister(stars.EVT_OBJS_UNSELECT, self.OnNoRecordSelect) event.Skip() def OnRecordsSelect(self, event): pass def OnNoRecordSelect(self, event): pass class DataListCtrl(wx.ListCtrl): """ Virtual ListCtrl for fast display on large DBF file """ def __init__(self, parent, ID, dbf, pos=wx.DefaultPosition, size=wx.DefaultSize, style=wx.LC_REPORT|wx.LC_VIRTUAL|wx.LC_HRULES|wx.LC_VRULES): wx.ListCtrl.__init__(self, parent, ID, pos, size, style) self.dbf = dbf self.SetItemCount(dbf.n_records) n_columns = len(dbf.header) self.InsertColumn(0, "") for i,item in enumerate(dbf.header): self.InsertColumn(i+1, item) self.il = wx.ImageList(16,16) open_bmp = wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN, wx.ART_TOOLBAR, (16,16)) self.idx1 = self.il.Add(open_bmp) self.SetImageList(self.il,wx.IMAGE_LIST_NORMAL) def OnGetItemText(self, item, col): if col == 0: return str(item+1) #return self.dbf.read_record(item)[col] return self.dbf.read_record(item)[col-1] class DataTablePanel(wx.Panel, AbstractCanvas,listmix.ColumnSorterMixin): """ Panel displaying dbf DataTable. The wxPanel container for DataList (wx.ListCtrl). """ def __init__(self, parent, shapefileObject, name): wx.Panel.__init__(self, parent, -1, style=wx.WANTS_CHARS) from stars.visualization.maps.BaseMap import PolygonLayer, PointLayer, LineLayer self.layer = shapefileObject self.dbf = self.layer.dbf self.name = name self.parent = parent self.current_selected = {} # {id: centroid} self.isEvtHandle = False tID = wx.NewId() self.table = DataListCtrl( self, tID, self.dbf, style=wx.LC_REPORT | wx.LC_VIRTUAL #| wx.BORDER_SUNKEN | wx.BORDER_NONE | wx.LC_EDIT_LABELS #| wx.LC_SORT_ASCENDING #| wx.LC_NO_HEADER | wx.LC_VRULES | wx.LC_HRULES #| wx.LC_SINGLE_SEL ) sizer = wx.BoxSizer(wx.VERTICAL) sizer.Add(self.table, 1, wx.EXPAND) if self.layer.shape_type == stars.SHP_POINT: self.draw_layer = PointLayer(self,self.layer) elif self.layer.shape_type == stars.SHP_LINE: self.draw_layer = LineLayer(self, self.layer) elif self.layer.shape_type == stars.SHP_POLYGON: self.draw_layer = PolygonLayer(self,self.layer) self.SetSizer(sizer) self.SetAutoLayout(True) # table events self.table.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnItemSelected) self.table.Bind(wx.EVT_LIST_ITEM_DESELECTED, self.OnItemDeselected) # register event_handler to THE OBSERVER while parent != None: if isinstance(parent, stars.Main): self.observer = parent.observer parent = parent.GetParent() self.Register(stars.EVT_OBJS_SELECT, self.OnRecordsSelect) self.Register(stars.EVT_OBJS_UNSELECT, self.OnNoRecordSelect) self.parent.Bind(wx.EVT_CLOSE, self.OnClose) # OnClose Only send to Frame/Dialog def OnClose(self, event): self.Unregister(stars.EVT_OBJS_SELECT, self.OnRecordsSelect) self.Unregister(stars.EVT_OBJS_UNSELECT, self.OnNoRecordSelect) event.Skip() def update_table(self, dbf): """ Get and display data from dbf File on DataList (wx.ListCtrl) """ self.table.ClearAll() self.table.SetItemCount(dbf.n_records) n_columns = len(dbf.header) self.table.InsertColumn(0, "ID") for i,item in enumerate(dbf.header): self.table.InsertColumn(i+1, item) def OnItemSelected(self, event): if self.isEvtHandle == False: # prevent backforce Event if self.table.SelectedItemCount == 1: self.current_selected = {} if not self.current_selected.has_key(event.m_itemIndex): dummy_region = [] # find centroid of current_select objec if self.layer.shape_type == stars.SHP_POLYGON: centroids = self.layer.centroids[event.m_itemIndex] for centroid in centroids: dummy_region += centroid + centroid else: point = list(self.layer.shape_objects[event.m_itemIndex]) dummy_region = point + point self.current_selected[event.m_itemIndex] = dummy_region # trigger Upadte Event to notify other # widgets to drawing the selected items self.OnRecordsSelect(None) event.Skip() def OnItemDeselected(self, event): if self.isEvtHandle == False: # prevent backforce Event if self.current_selected.has_key(event.m_itemIndex): self.current_selected.pop(event.m_itemIndex) # trigger Upadte Event to notify other # widgets to drawing the selected items self.OnRecordsSelect(None) event.Skip() def unhighlight_selected(self): for item in self.current_selected: self.table.SetItemState(item, 0, wx.LIST_STATE_SELECTED)# | wx.LIST_STATE_FOCUSED) def highlight_selected(self): if len(self.current_selected) > 0: first = self.current_selected.keys()[0] for item in self.current_selected: if item == first: self.table.EnsureVisible(item) self.table.SetItemState(item, wx.LIST_STATE_SELECTED, wx.LIST_STATE_SELECTED)#|wx.LIST_STATE_FOCUSED) #------------------------------ # Belows are Event handlers #------------------------------ def OnRecordsSelect(self, event): if event == None: # trigger other widgets data = AbstractData(self) data.shape_ids[self.name] = self.current_selected.keys() data.boundary = self.current_selected.values() self.UpdateEvt(stars.EVT_OBJS_SELECT, data) else: # trigged by other widgets self.isEvtHandle = True data = event.data if data.shape_ids.has_key(self.name): # unselect first self.unhighlight_selected() # then select trigged selected_id_list = data.shape_ids[self.name] self.current_selected = {} for i in selected_id_list: self.current_selected[i] = None self.highlight_selected() else: # unselect first self.unhighlight_selected() self.current_selected = {} # try to test if query regions can be used # to find shape ids query_regions = data.boundary if query_regions == None or len(query_regions) == 0: pass else: if isinstance(query_regions[0], float): query_regions = [query_regions] for region in query_regions: shape_ids, query_region = self.draw_layer.get_selected_by_region(None, region) for id in shape_ids: self.current_selected[id] = None self.highlight_selected() self.isEvtHandle = False def OnNoRecordSelect(self, event): self.isEvtHandle = True for item in self.current_selected: self.table.SetItemState(item, 0, wx.LIST_STATE_SELECTED)# | wx.LIST_STATE_FOCUSED) self.isEvtHandle = False class DataWidget(AbstractWidget): """ Widget for displaying dbf table, the layout should be like this: ------------------------- | toolbar | -------------------------- | | | | | Table | | | | | -------------------------- """ def __init__(self, parent, shp, name): self.shp= shp self.name = name AbstractWidget.__init__(self, parent, self.name, pos=(60, 60), size=(600, 350)) #self.toolbar = self._create_toolbar() #self.SetToolBar(self.toolbar) #self.toolbar.Realize() self.status_bar = self.CreateStatusBar() self.data_table = DataTablePanel(self,self.shp,self.name) self.canvas = self.data_table sizer = wx.BoxSizer(wx.VERTICAL) sizer.Add(self.data_table, 1, wx.EXPAND) self.SetSizer(sizer) self.SetAutoLayout(True) def _create_toolbar(self): tsize = (16,16) toolbar = self.CreateToolBar() open_bmp = wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN, wx.ART_TOOLBAR, tsize) close_bmp = wx.ArtProvider.GetBitmap(wx.ART_FOLDER, wx.ART_TOOLBAR, tsize) toolbar.AddLabelTool(1001, "Filter Data", open_bmp) toolbar.AddLabelTool(1002, "Close", close_bmp) toolbar.EnableTool(1002, False) self.Bind(wx.EVT_TOOL, self.OnFilterData, id=1001) return toolbar def OnFilterData(self,event): frame = SpaceTimeQuery(self.data_table, "SpaceTime Query", self.dbf) frame.Show()
Bell Helmets have been within the helmet business since 1950! That’s over 60 years of research, development, technological advancements, and innovative ideas that have made their helmets one of the highest demanded motorcycle helmets on the market. Sure there are plenty of other motorcycle helmet companies that produce high quality helmets to ride in, but Bell Helmets uses some of the best technology that increases protection, comfort, and style of the helmets. Bell Helmets was founded by racing enthusiast Roy Richter as he saw a need for head protection in car racing. Bell makes hundreds of different helmets for road racing, cycling, and motocross. The Riot helmet from Bell has great features that make it a great option of motorcycle riders around the world. This Bell Riot helmet has a low profile Fiberglass Composite shell construction that gives it a sleek appearance. There are 5 different shell sizes and three different color schemes to choose from for a personalized fit. The interior of this Bell Riot helmet is made with an anti-bacterial liner to increase comfortability and sanitation, and comes with a NutraFog UV protective shield that has anti-fog and anti-scratch properties. This is a fairly priced Bell helmet that will help keep you protected throughout your ride. Crashes are an unfortunate part of our sport/hobby and head injuries are one of the most common injuries in our sport. Thanks to the technology that bell puts into their helmets, this Bell Riot helmet will help mitigate those injuries. Nothing is guaranteed to prevent injuries, but these helmets will definitely help reduce concussions from occurring. Be smart out there and ride safe!
# -*- coding: utf-8 -*- """ Created on Tue Dec 30 19:08:33 2014 @author: Nik """ import json, requests, sys, codecs, nltk from HTMLParser import HTMLParser #function to strip html tags: taken from http://stackoverflow.com/questions/753052/strip-html-from-strings-in-python """class MLStripper(HTMLParser): def __init__(self): self.reset() self.fed = [] def handle_data(self, d): self.fed.append(d) def get_data(self): return ''.join(self.fed) def strip_tags(html): s = MLStripper() s.feed(html) return s.get_data()""" # Checks title for spacing so that the space can be replaced with an underscore in the parameters for the URL. sys.argv[1] # is used so PATH variable isn't put into parameters for URL title = sys.argv[1] x = title.replace(" ", "_") if " " in title else title #Parameters to be passed into the url parameters = {'format' : 'json', 'action' : 'query', 'titles' : x, 'prop' : 'revisions', 'rvprop' : 'ids', 'continue' : '', 'rvlimit' : '10'} #getting the content of the url r = requests.get('http://en.wikipedia.org/w/api.php', params=parameters) #turning that content into json and loading it data = r.json() #writing json content to file with open('testedRevData.json', 'w') as outfile: json.dump(data, outfile) #writing plaintext to file """with codecs.open('testedRevData.txt', 'w', 'utf-8') as file2: ids = data['query']['pages'].keys() text = ' '.join([data['query']['pages'][idx]['extract'] for idx in ids]) text = strip_tags(text) file2.write(text)"""
Melinda Curtis is the award-winning author of both sweet and hot contemporary romances. She lives in Northern California and loves writing romance about women who don't know how strong they are until a hero comes along to show them. of course, she also enjoys the wry, humorous power struggle of falling in love. After all, who wants the man to have the last word? Her work has been called, "Smart and sassy" by Jayne Ann Krentz.
import pyspark.ml.linalg as ml_linalg from pyspark.mllib.linalg.distributed import MatrixEntry from pyspark.ml import feature from pyspark.sql.functions import udf import numpy as np def _compute_bfs(vec_1, vec_2, sigma=0.42): return np.exp(-vec_1.squared_distance(vec_2) / sigma ** 2) def _tolerance_cut(value, tol=10e-10): if value <= tol: return 0 else: return value def _to_dense(x): try: return ml_linalg.DenseVector(x.toArray()) except Exception as e: print(e) return x def _make_feature_vector(df, feature_col=None): return 'features', feature.VectorAssembler(inputCols=feature_col, outputCol='features').transform(df) def _scale_data_frame(df, vector=None): if vector: df = df.withColumn(vector, udf(_to_dense, ml_linalg.VectorUDT())(vector)) scale = feature.StandardScaler( withMean=True, withStd=True, inputCol=vector, outputCol='std_vector') model = scale.fit(df) return (model .transform(df) .select([i for i in df.columns if i != vector] + [scale.getOutputCol()]) .withColumnRenamed(existing=scale.getOutputCol(), new=vector)) def do_cartesian(sc, df, id_col=None, feature_col=None, **kwargs): import functools sigma = kwargs.get('sigma', 0.42) tol = kwargs.get('tol', 10e-10) standardize = kwargs.get('standardize', True) if isinstance(feature_col, list): feature_col, scaled_df = _make_feature_vector(df=df, feature_col=feature_col) if standardize: scaled_df = _scale_data_frame(scaled_df, vector=feature_col) if id_col: vector_dict = scaled_df.select(id_col, feature_col).rdd.collectAsMap() else: vector_dict = (scaled_df.select(feature_col) .rdd.zipWithIndex().map(lambda x: (x[1], x[0][feature_col])) .collectAsMap()) bc_vec = sc.broadcast(vector_dict) index_rdd = df.rdd.map(lambda x: x[id_col]).cache() bfs = functools.partial(_compute_bfs) cartesian_demon = index_rdd.cartesian(index_rdd).filter(lambda x: x[0] >= x[1]) cartesian_distance_demon = cartesian_demon.map( lambda x: MatrixEntry(x[0], x[1], bfs( vec_1=bc_vec.value.get(x[0]), vec_2=bc_vec.value.get(x[1]), sigma=sigma)) ) index_rdd.unpersist() # Memory cleanup! tol_cut = functools.partial(_tolerance_cut, tol=tol) return cartesian_distance_demon.filter(lambda x: tol_cut(x.value))
Jim joined IPIECA in 2017 as Technical Director, Climate and Energy, after nearly 40 years in the oil and gas industry. He graduated in Chemistry from Oxford University and spent the first part of his career in various technology roles. During that time he worked in the UK, France and the USA and led global teams developing new fuels and lubricants and providing support to refining and marketing. More recently, he worked on environmental and safety for the upstream business functions with a focus on policy development.
# !/usr/bin/env python # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Basically copied line for line and adapted from Greg Neagle's Munki project. # See: https://github.com/munki/munki/blob/master/code/client/munkilib/munkicommon.py#L1507 import os import tempfile import subprocess import shutil from glob import glob from autopkglib import ProcessorError from DmgMounter import DmgMounter __all__ = ["FlatPkgVersioner"] class FlatPkgVersioner(DmgMounter): description = ("Expands PackageInfo and Distribution information from a flat package using xar," "then parses version information") input_variables = { "flat_pkg_path": { "required": True, "description": ("Path to a flat package. " "Can point to a globbed path inside a .dmg which will " "be mounted."), } } output_variables = { "version": { "description": "Version of the item.", }, } source_path = None def main(self): # Check if we're trying to copy something inside a dmg. (dmg_path, dmg, dmg_source_path) = self.env[ 'flat_pkg_path'].partition(".dmg/") dmg_path += ".dmg" try: if dmg: # Mount dmg and copy path inside. mount_point = self.mount(dmg_path) self.source_path = glob( os.path.join(mount_point, dmg_source_path))[0] else: # Straight copy from file system. self.source_path = self.env['flat_pkg_path'] infoarray = self.getFlatPackageInfo(self.source_path) self.output("Unpacked %s to %s" % (self.source_path, self.env['destination_path'])) finally: if dmg: self.unmount(dmg_path) def getFlatPackageInfo(pkgpath): """ returns array of dictionaries with info on subpackages contained in the flat package """ infoarray = [] # get the absolute path to the pkg because we need to do a chdir later abspkgpath = os.path.abspath(pkgpath) # make a tmp dir to expand the flat package into pkgtmp = tempfile.mkdtemp(dir=tmpdir) # record our current working dir cwd = os.getcwd() # change into our tmpdir so we can use xar to unarchive the flat package os.chdir(pkgtmp) cmd = ['/usr/bin/xar', '-xf', abspkgpath, '--exclude', 'Payload'] proc = subprocess.Popen(cmd, bufsize=-1, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (unused_output, err) = proc.communicate() if proc.returncode == 0: currentdir = pkgtmp packageinfofile = os.path.join(currentdir, 'PackageInfo') if os.path.exists(packageinfofile): infoarray = parsePkgRefs(packageinfofile) if not infoarray: # found no PackageInfo file # so let's look at the Distribution file distributionfile = os.path.join(currentdir, 'Distribution') if os.path.exists(distributionfile): infoarray = parsePkgRefs(distributionfile, path_to_pkg=pkgpath) if not infoarray: # No PackageInfo file or Distribution file # look for subpackages at the top level for item in listdir(currentdir): itempath = os.path.join(currentdir, item) if itempath.endswith('.pkg') and os.path.isdir(itempath): packageinfofile = os.path.join(itempath, 'PackageInfo') if os.path.exists(packageinfofile): infoarray.extend(parsePkgRefs(packageinfofile)) else: raise ProcessorError(err) # change back to original working dir os.chdir(cwd) shutil.rmtree(pkgtmp) return infoarray if __name__ == '__main__': processor = FlatPkgVersioner() processor.execute_shell()
This is a placeholder page for Brittany Back, which means this person is not currently on this site. We do suggest using the tools below to find Brittany Back. You are visiting the placeholder page for Brittany Back. This page is here because someone used our placeholder utility to look for Brittany Back. We created this page automatically in hopes Brittany Back would find it. If you are not Brittany Back, but are an alumni of Eastern Hancock High School, register on this site for free now.
''' Provide a request handler that returns a page displaying a document. ''' from __future__ import absolute_import, print_function import logging log = logging.getLogger(__name__) import hashlib import random import time from tornado.web import RequestHandler from bokeh.embed import server_html_page_for_session from bokeh.settings import settings # Use the system PRNG for session id generation (if possible) # NOTE: secure random string generation implementation is adapted # from the Django project. Reference: # https://github.com/django/django/blob/0ed7d155635da9f79d4dd67e4889087d3673c6da/django/utils/crypto.py try: random = random.SystemRandom() using_sysrandom = True except NotImplementedError: import warnings warnings.warn('A secure pseudo-random number generator is not available ' 'on your system. Falling back to Mersenne Twister.') using_sysrandom = False def get_random_string(length=36, allowed_chars='abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'): """ Return a securely generated random string. The default length of 12 with the a-z, A-Z, 0-9 character set returns a 71-bit value. log_2((26+26+10)^12) =~ 71 bits """ if not using_sysrandom: # This is ugly, and a hack, but it makes things better than # the alternative of predictability. This re-seeds the PRNG # using a value that is hard for an attacker to predict, every # time a random string is required. This may change the # properties of the chosen random sequence slightly, but this # is better than absolute predictability. random.seed( hashlib.sha256( ("%s%s%s" % ( random.getstate(), time.time(), settings.SECRET_KEY)).encode('utf-8') ).digest()) return ''.join(random.choice(allowed_chars) for i in range(length)) class DocHandler(RequestHandler): ''' Implements a custom Tornado handler for document display page ''' def __init__(self, tornado_app, *args, **kw): self.application_context = kw['application_context'] self.bokeh_websocket_path = kw['bokeh_websocket_path'] # Note: tornado_app is stored as self.application super(DocHandler, self).__init__(tornado_app, *args, **kw) def initialize(self, *args, **kw): pass def get(self, *args, **kwargs): session_id = self.get_argument("bokeh-session-id", default=None) if session_id is None: session_id = get_random_string() session = self.application_context.create_session_if_needed(session_id) websocket_url = self.application.websocket_url_for_request(self.request, self.bokeh_websocket_path) page = server_html_page_for_session(session_id, self.application.resources(self.request), title=session.document.title, websocket_url=websocket_url) self.set_header("Content-Type", 'text/html') self.write(page)
"Tammy Wynette, Mysteries from the Grave" my featured artist of this month for Insider Viewpoint Magazine is about Tammy Wynette. Tammy Wynette, the name is synonymous with being one of the greatest Country Music Singers and Writers of all time. She also became a pop-cross over artist with multiple songs she sang and penned; Grammy Award Recipient, CMA Female Vocalist of the Year (for several years), ACM Top Female Vocalist; and so many accomplishments in the music industry that it is staggering. The story begins in Itawamba County, Mississippi on May 5, 1942 as a frail little country girl was born, "Wynette Pugh." Her childhood was difficult and she was raised "picking cotton" for her grandparents. Tammy was one of the 2500+ artists with whom THE JORDANAIRES recorded their famed background vocals. They performed on almost every hit record that Tammy Wynette recorded, totaling an amazing forty-seven sessions of at least four singles per session, on Epic Records. They contributed on such great songs as "Stand by Your Man," "Your Good Girl’s Gonna Go Bad," "He Loves Me All the Way," "I Don’t Wanna Play House"; and a couple hundred others. Additional musical recording sessions with Tammy included seventeen sessions with her and George Jones ("Golden Ring," "We’re Gonna Hold On," "Near You" . . . and many others). Besides recording sessions, they recorded a Pepsi commercial, a special appearance at the White House, a Mardi Gras Event in New Orleans, and two Opryland Concerts with her. Gordon Stoker of the Jordanaires says Tammy was "a sweet, kind person who was always obliging to everyone including her fans." The Jordanaires greatly loved and admired Tammy and were great friends. She was always inviting them to her house to cook them a good country down-home meal! Gordon said, "Tammy always remembered draggin’ that bag of cotton behind her on her grandparents’ farm. Because of these humble beginnings, Tammy also chose to keep her hairdresser’s license as a just in case music business did not work out . . . even years after she had many hit records." Ray Walker, bass singer of the Jordanaires, sold Tammy her first stretch-limousine after she had a couple hit records. He had bought the royal-blue 1967 Cadillac Limousine from the Oak Ridge Boys and owned the car for a few months when Tammy and her second husband Don Chapel wanted it, so Ray sold them their first limousine. I, Sharon Haynes, had the pleasure of working with Don Chapel, it was my first "gig" in Nashville. Don and his son Mike offered me a singing job in a couple of Nashville clubs, one at the famed Printers Alley. For several days I did not realize I was working with Tammy Wynette’s Ex-husband and Stepson. Don and Mike both talked about how much they loved Tammy. Both felt Don never received his due from his direct involvement in the launching of her career. Don said, "I cleaned her up . . . when I found her, she was skinny and nearly starving to death with three young daughters. She had just moved to Nashville and was trying to pursue the path of Country Music Stardom. Tammy was broke and no place to go. Don said, I took them in, fixed her teeth and bought her new clothes for the introduction to Billy Sherrill, I insisted Billy hear her songs. Billie immediately signed her to Epic Records." Shortly afterwards George Jones wanted to record duets with Tammy and reportedly fell in love with Tammy and decided to take her away from Don. Don said, "George Jones walked into my house, took Tammy by the arm and said, I’m in love with your wife, and she’s leaving with me." Don and Tammy’s marriage ended and she married the already famous, George Jones. I had the pleasure of meeting Tammy Wynette in Wembley, England during a performance with The Jordanaires. They billed us with Tammy and several Opry stars on the "International Country Music Festival" held in the Wembley Stadium. As they were introducing me to Tammy, she gave me a big smile and hug. I was so impressed. A star of her stature still remaining so open, warm and approachable was impressive, she still revered the humbling effect of those "cotton pickin’ days!" A few months ago, during a break from our show, I watched the Tammy Wynette Biography on A&E on TV. The very next morning at 6:00 a.m. I awoke to the news that Tammy Wynette was dead. "How can that be . . . I’ve seen her all my life . . . DEAD?! A mother of four, a series of four divorces and five marriages created tremendous pressures, stressed and coupled with work expectations, the overall demands on her were monumental. Various illnesses had driven her, at times, to become addicted to prescription pain medication for which she received therapy. What happened in her final moments of life and immediately following, is still a mystery and the investigation is ongoing even today! The investigation is going to be messy.
# encoding: utf-8 # This file is part of Guacamole. # # Copyright 2012-2015 Canonical Ltd. # Written by: # Zygmunt Krynicki <[email protected]> # # Guacamole is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License version 3, # as published by the Free Software Foundation. # # Guacamole is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Guacamole. If not, see <http://www.gnu.org/licenses/>. """ The essence of guacamole. This module defines the three essential core classes: :class:`Ingredient`, :class:`Bowl`, :class:`Context`. All of those have stable APIs. """ from __future__ import absolute_import, print_function, unicode_literals import logging import sys __all__ = ( 'Bowl', 'Context', 'Ingredient', ) _logger = logging.getLogger('guacamole') class Ingredient(object): """ Part of guacamole. Ingredients are a mechanism for inserting functionality into Guacamole. The sequence of calls to ingredient methods is as follows: - :meth:`added()` The added method is where an ingredient can advertise itself to other ingredients that it explicitly collaborates with. - :meth:`preparse()` The preparse method is where ingredients can have a peek at the command line arguments. This can serve to optimize further actions. Essentially guacamole allows applications to parse arguments twice and limit the actions needed to do that correctly to the essential minimum required. - :meth:`early_init()` The early initialization method can be used to do additional initialization. It can take advantage of the fact that the whole command line arguments are now known and may have been analyzed further by the preparse method. - :meth:`parse()` The parse method is where applications are expected to fully understand command line arguments. This method can abort subsequent execution if arguments are wrong in in some way. After parsing command line arguments the application should be ready for execution. - :meth:`late_init()` The late initialization method mimics the early initialization method but is called after parsing all of the command line arguments. Again, it can be used to prepare addiotional resources necessary for a given application. - :meth:`dispatch()` The dispatch method is where applications execute the bulk of their actions. Dispatching is typically done with one of the standard ingredients which will locate the appropriate method to call into the application. Depending on the outcome of the dispatch (if an exception is raised or not) one of :meth:`dispatch_succeeded()`` or :meth:`dispatch_failed()` is called. - :meth:`shutdown()` This is the last method called on all ingredients. Each of those methods is called with a context argument (:class:`Context:`). A context is a free-for-all environment where ingredients can pass data around. There is no name-spacing. Ingredients should advertise what they do with the context and what to expect. """ def __str__(self): """ Get the string representation of this ingredient. The string method just returns the class name. Since the ingredient is an implemenetation detail it does not have anything that applications should show to the user. """ return self.__class__.__name__ def added(self, context): """Ingredient method called before anything else.""" def build_early_parser(self, context): """Ingredient method called to build the early parser.""" def preparse(self, context): """Ingredient method called to pre-parse command line aruments.""" def early_init(self, context): """Ingredient method for early initialization.""" def build_parser(self, context): """Ingredient method called to build the full parser.""" def parse(self, context): """Ingredient method called to parse command line arguments.""" def late_init(self, context): """Ingredient method for late initialization.""" def dispatch(self, context): """ Ingredient method for dispatching (execution). .. note:: The first ingredient that implements this method and returns something other than None will stop command dispatch! """ def dispatch_succeeded(self, context): """Ingredient method called when dispatching is correct.""" def dispatch_failed(self, context): """Ingredient method called when dispatching fails.""" def shutdown(self, context): """Ingredient method called after all other methods.""" class Context(object): """ Context for making guacamole with ingredients. A context object is created and maintained throughout the life-cycle of an executing tool. A context is passed as argument to all ingredient methods. Since context has no fixed API anything can be stored and loaded. Particular ingredients document how they use the context object. """ def __repr__(self): """ Get a debugging string representation of the context. The debugging representation shows all of the *names* of objects added to the context by various ingredients. Since the actual object can have large and complex debugging representation containing that representation was considered as a step against understanding what is in the context. """ return "<Context {{{}}}>".format( ', '.join(sorted(self.__dict__.keys()))) class Bowl(object): """ A vessel for preparing guacamole out of ingredients. .. note:: Each Bowl is single-use. If you eat it you need to get another one as this one is dirty and cannot be reused. """ def __init__(self, ingredients): """Prepare a guacamole out of given ingredients.""" self.ingredients = ingredients self.context = Context() self.context.bowl = self self.context.spices = set() def add_spice(self, spice): """ Add a single spice the bowl. """ self.context.spices.add(spice) def has_spice(self, spice): """ Check if a given spice is being used. This method can be used to construct checks if an optional ingredient feature should be enabled or not. Spices are simply strings that describe optional features. """ return spice in self.context.spices def eat(self, argv=None): """ Eat the guacamole. :param argv: Command line arguments or None. None means that sys.argv is used :return: Whatever is returned by the first ingredient that agrees to perform the command dispatch. The eat method is called to run the application, as if it was invoked from command line directly. """ # The setup phase, here KeyboardInterrupt is a silent sign to exit the # application. Any error that happens here will result in a raw # backtrace being printed to the user. try: self.context.argv = argv self._added() self._build_early_parser() self._preparse() self._early_init() self._build_parser() self._parse() self._late_init() except KeyboardInterrupt: self._shutdown() return # The execution phase. Here we differentiate SystemExit from all other # exceptions. SystemExit is just re-raised as that's what any piece of # code can raise to ask to exit the currently running application. All # other exceptions are recorded in the context and the failure-path of # the dispatch is followed. In other case, when there are no # exceptions, the success-path is followed. In both cases, ingredients # are shut down. try: return self._dispatch() except SystemExit: raise except BaseException: (self.context.exc_type, self.context.exc_value, self.context.traceback) = sys.exc_info() self._dispatch_failed() else: self._dispatch_succeeded() finally: self._shutdown() def _added(self): """Run the added() method on all ingredients.""" for ingredient in self.ingredients: ingredient.added(self.context) def _build_early_parser(self): """Run build_early_parser() method on all ingredients.""" for ingredient in self.ingredients: ingredient.build_early_parser(self.context) def _preparse(self): """Run the peparse() method on all ingredients.""" for ingredient in self.ingredients: ingredient.preparse(self.context) def _early_init(self): """Run the early_init() method on all ingredients.""" for ingredient in self.ingredients: ingredient.early_init(self.context) def _build_parser(self): """Run build_parser() method on all ingredients.""" for ingredient in self.ingredients: ingredient.build_parser(self.context) def _parse(self): """Run the parse() method on all ingredients.""" for ingredient in self.ingredients: ingredient.parse(self.context) def _late_init(self): """Run the late_init() method on all ingredients.""" for ingredient in self.ingredients: ingredient.late_init(self.context) def _dispatch(self): """Run the dispatch() method on all ingredients.""" for ingredient in self.ingredients: result = ingredient.dispatch(self.context) if result is not None: return result def _dispatch_succeeded(self): """Run the dispatch_succeeded() method on all ingredients.""" for ingredient in self.ingredients: ingredient.dispatch_succeeded(self.context) def _dispatch_failed(self): """Run the dispatch_failed() method on all ingredients.""" for ingredient in self.ingredients: ingredient.dispatch_failed(self.context) def _shutdown(self): """Run the shutdown() method on all ingredients.""" for ingredient in self.ingredients: ingredient.shutdown(self.context)
Daylight Saving Time started on Sunday, March 11 at 2:00 a.m. That means this week will be a little rough: It will be hard to wake up, and there’ll be an increase in heart attacks and car crashes. To make the switch a little easier, you can take advantage of what scientists have learned about circadian rhythms. Daylight Saving Time in the US took effect in the early morning hours of Sunday, March 11. That means your alarm this morning probably felt even more invasive than normal. But it’s more serious than that – Daylight Saving Time is literally killing us. On Monday, there will likely be a 24% spike in heart attacks and a short-term increase in car crashes, strokes, and potentially even suicides. There’s nothing you can do to fully compensate for the sudden change that’s being forced on us, but you can take advantage of what scientists have learned about body clocks to adapt as quickly as possible. We all have a natural internal clock of sorts, our circadian rhythm. It’s what makes us feel tired when it’s time to sleep and wakes us up in the morning, provided we’re on a fairly regular schedule. As a species, humans’ clocks have evolved to mostly match the 24-hour natural light/dark schedule. (Our internal clock is actually a little longer than 24 hours, but gets naturally re-synchronised by environmental cues.) Exposure to light or darkness generally causes our bodies to produce hormones, particularly melatonin, that tell us when we should be alert or asleep – though artificial lighting can wreak some havoc on that system. Most of us are drowsiest around 5 a.m. Suddenly changing the clocks throws off our internal body clock. You won’t naturally suddenly feel tired an hour earlier at night. In the morning when the alarm rings, it’s still going to feel like you should be asleep. But we can manipulate our internal clocks to some degree: the most effective strategy is to get exposed to light at the right time. According to one study, the most effective way to reset your natural sleep schedule is to go camping. Even in the winter, there’s enough natural light to shift your internal rhythm. But it’s probably too late for a last-minute camping trip (and it’s still very cold in much of the US). A less planning-intensive method is to take in some bright sunlight early in the morning for the next few days. It will also help to avoid light in the evening, making sure you are in a dark environment by bedtime. “Full spectrum lighting is probably optimal in terms of the management of all these clockwork hormones that direct the complex physiology we have,” Richard Rosen, director of retina services at New York Eye and Ear Infirmary of Mount Sinai, previously told Business Insider. Even wearing sunglasses when you are trying to get your body ready for bed might help. Those who really feel the pain of the spring-forward clock change could also follow the lead of Florida residents, who are pushing to move clocks forward then never switch them back.
# ------------------------------------------------------------------------------- # Name: main - Proutpulsor # # Author: S.L-prog # # Licence: <GNU GENERAL PUBLIC LICENSE> # ------------------------------------------------------------------------------- import pygame from pygame.locals import * # Set the screen size. screen = pygame.display.set_mode((0,0), FULLSCREEN) #(800, 480)) # pygame.init() import classes from constantes import * import random scrrec = screen.get_rect() BACKGROUND = pygame.transform.scale(BACKGROUND, (scrrec.right, scrrec.bottom)) # Import the android module. If we can't import it, set it to None - this # lets us test it, and check to see if we want android-specific behavior. try: import android except ImportError: android = None # Event constant. TIMEREVENT = pygame.USEREVENT # The FPS the game runs at. FPS = 30 def universegenerator(number_ext, number_ast, number_hamb): extinguisher = [0]*number_ext for i in range(0,number_ext): extinguisher[i] = classes.Item(scrrec.center, EXTINGUISHER) asteroid = [0]*number_ast for i in range(0,number_ast): randomvar = random.randint(1,4) if randomvar == 1: asteroid[i] = classes.Item(scrrec.center, ASTEROID1) if randomvar == 2: asteroid[i] = classes.Item(scrrec.center, ASTEROID2) if randomvar == 3: asteroid[i] = classes.Item(scrrec.center, ASTEROID3) if randomvar == 4: asteroid[i] = classes.Item(scrrec.center, ASTEROID4) hamburger = [0]*number_hamb for i in range(0,number_hamb): hamburger[i] = classes.Item(scrrec.center, HAMBURGER) return extinguisher, asteroid, hamburger def displayuniverse(extinguisher, asteroid, hamburger, screen, astronautx, astronauty): for i in range(0,len(extinguisher)): extinguisher[i].display(screen, astronautx, astronauty) for i in range(0,len(asteroid)): asteroid[i].display(screen, astronautx, astronauty) for i in range(0,len(hamburger)): hamburger[i].display(screen, astronautx, astronauty) def main(): astronaut = classes.Astronaut(scrrec.center) extinguisher, asteroid, hamburger = universegenerator(50,200,50) # nombre d'items au depart # Map the back button to the escape key. if android: android.init() android.map_key(android.KEYCODE_BACK, pygame.K_ESCAPE) #a reactiver pour python 2.7 #pygame.time.set_timer(TIMEREVENT, 1000 / FPS) screenleft = screen.get_width()/2 screentop = screen.get_height()/2 game = True while game: # Android-specific: if android: if android.check_pause(): android.wait_for_resume() for ev in pygame.event.get(): if ev.type == pygame.MOUSEBUTTONDOWN: if ev.pos[0] <= screenleft: if ev.pos[1] <= screentop: astronaut.extinguisher_right = True if ev.pos[1] > screentop: astronaut.extinguisher_left = True if ev.pos[0] > screenleft: astronaut.fart = True if ev.type == pygame.MOUSEBUTTONUP: astronaut.extinguisher_right = False astronaut.extinguisher_left = False astronaut.fart = False if ev.type == pygame.KEYDOWN and ev.key == pygame.K_ESCAPE: game = False if ev.type == pygame.KEYDOWN and ev.key == pygame.K_SPACE: if astronaut.takeextinguisher == False: astronaut.takeextinguisher = True else: astronaut.takeextinguisher = False astronaut.mouvement() screen.blit(BACKGROUND, (0,0)) pygame.draw.line(screen, (255, 0, 0), (screenleft, 0), (screenleft,screentop*2), 5) # afficher delimitation pygame.draw.line(screen, (255, 0, 0), (0, screentop), (screenleft,screentop), 5) # afficher delimitation displayuniverse(extinguisher, asteroid, hamburger, screen, astronaut.astroposition_x, astronaut.astroposition_y) astronaut.display(screen) pygame.display.flip() pygame.quit() #a reactiver pour python 2.7 #if __name__ == '__main__': # main() #a desactiver pour python 2.7 main()
The author introduces the notion of an ε -fundamental retraction which combines conditions that are used in Borsuk's shape theory, the reviewer's approximate shape theory, and Bogatyi's internal shape theory. This concept is used to define a class of compacta under the name of fundamental approximative absolute neighborhood retracts (FAANRs). It includes fundamental absolute neighborhood retracts (FANRs) and approximative absolute neighborhood retracts in the sense of M. H. Clapp (AANR C s) as proper subclasses. The paper presents several results about the FAANRs which are analogous to the corresponding results about FANRs and AANR C s and it gives several examples which are helpful in understanding the properties of this new class of compacta. For example, it is proved that FAANRs coincide with quasi strongly movable compacta. This statement should be compared with Borsuk's theorem (FANRs coincide with strongly movable compacta) and the reviewer's theorem (AANR C s coincide with approximatively movable compacta). Of course, the author's quasi strong movability is a hybrid of strong movability and approximative movability.
from __future__ import absolute_import import sure from .. import Chain, NoApiKeyId, NoApiKeySecret, Webhook, create_webhook from .mock_http_adapter import * def test_create_webhook(): create_webhook(webhook_id=webhook_id, webhook_url=webhook_url, api_key_id=api_key_id, api_key_secret=api_key_secret, http_adapter=http_adapter) \ .should.equal(webhook) def test_create_webhook_using_class(): Chain(api_key_id=api_key_id, api_key_secret=api_key_secret, http_adapter=http_adapter) \ .create_webhook(webhook_id=webhook_id, webhook_url=webhook_url) \ .should.equal(webhook) def test_create_webhook_without_api_key_id(): (lambda: create_webhook(webhook_id=webhook_id, webhook_url=webhook_url, http_adapter=no_http())) \ .should.throw(NoApiKeyId) def test_create_webhook_without_api_key_secret(): (lambda: create_webhook(webhook_id=webhook_id, webhook_url=webhook_url, api_key_id=api_key_id, http_adapter=no_http())) \ .should.throw(NoApiKeySecret) api_key_id = 'DEMO-4a5e1e4' api_key_secret = 'DEMO-f8aef80', webhook_id = 'FFA21991-5669-4728-8C83-74DEC4C93A4A' webhook_url = 'https://username:[email protected]' url = 'https://api.chain.com/v1/webhooks' request_json = """ { "id": "FFA21991-5669-4728-8C83-74DEC4C93A4A", "url": "https://username:[email protected]" } """ response_body = """ { "id": "FFA21991-5669-4728-8C83-74DEC4C93A4A", "url": "https://username:[email protected]" } """ webhook = Webhook( id=webhook_id, url=webhook_url, ) http_adapter = mock_post_json(url, request_json, response_body)
: How much in donations has the DPCA received because of 501c3? : What has the DPCA done with those donations? : What is the GOOD for the membership OR THE BREED? The NAIA, under the directorship of the AKC in the form of Patti Strand, a long-time AKC Director. Sponsorship of the Meet the Breeds in which Dr. DiNardo's daughter ( an AKC executive) played a key role for the AKC. A sponsorship at the PennVet Working Dog Conference. A sponsor to AKC. Are you seeing "red yet? Under the 501c3 the Constitution has been perverted away from “Preserve and Protect” and toward the benefit of ALL dogs. In fact the financial benefits seem to be primarily a benefit TO THE AKC. In summation – until proof is offered--- it is questionable if the Doberman Pinscher and the DPCA have benefited at all under the 501c3. Rather the opposite could be true with the Doberman Pinscher and DPCA suffering with reduced registrations of Dobermans and a decrease in club membership. “How do the IRS and the State of Michigan feel about being the victim of fraud and other misrepresentations?
from django.conf.urls import patterns, url, include from serrano.conf import dep_supported # Patterns for the data namespace data_patterns = patterns( '', url(r'^export/', include('serrano.resources.exporter')), url(r'^preview/', include('serrano.resources.preview')), ) # Patterns for the serrano namespace serrano_patterns = patterns( '', url(r'^', include('serrano.resources')), url(r'^async/', include('serrano.resources.async', namespace='async')), url(r'^categories/', include('serrano.resources.category')), url(r'^concepts/', include('serrano.resources.concept')), url(r'^contexts/', include('serrano.resources.context', namespace='contexts')), url(r'^data/', include(data_patterns, namespace='data')), url(r'^fields/', include('serrano.resources.field')), url(r'^jobs/', include('serrano.resources.jobs', namespace='jobs')), url(r'^queries/', include('serrano.resources.query', namespace='queries')), url(r'^stats/', include('serrano.resources.stats', namespace='stats')), url(r'^views/', include('serrano.resources.view', namespace='views')), ) if dep_supported('objectset'): # Patterns for the 'sets' namespace serrano_patterns += patterns( '', url(r'^sets/', include('serrano.resources.sets', namespace='sets')) ) # Exported patterns urlpatterns = patterns( '', url(r'^', include(serrano_patterns, namespace='serrano')) )
According to Bill, who’s been with us since dinosaurs roamed the Earth, “everyone knows everything” about him. We would add that he once referred to himself as the Willy Loman of FreedomCar, but you’ll need to be versed in “Death of a Salesman” to get the reference. FreedomCar Fun Fact: Bill once held the Company record for error-free consecutive services at 1,066 trips. We don’t talk about number 1,067. Once again, we had a very pleasant trip from BWI with FreedomCar. Terry was in contact with us shortly after we landed to let us know he was on-site. The car was comfortable and the driving excellent. Altogether, a fine experience! David was courteous, helpful, very pleasant, and we had a nice ride to Phoenix listening to some “golden oldies” music, which I absolutely loved. Thanks again to both you and David. I remain mighty impressed by your customer service. Your reliability is priceless. Many thanks to a great driver for a safe trip to Mays Chapel. As always, FreedomCar’s service was great. Both the drivers to and from the Port drove safely and helped us with our bags. We feel very blessed to have your company transport us to the Port and Airport when we travel.
# Copyright (C) 2013 ABRT Team # Copyright (C) 2013 Red Hat, Inc. # # This file is part of faf. # # faf is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # faf is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with faf. If not, see <http://www.gnu.org/licenses/>. from __future__ import absolute_import import re from pyfaf.opsys import System from pyfaf.checker import DictChecker, IntChecker, ListChecker, StringChecker from pyfaf.common import FafError, log from pyfaf.queries import (get_archs, get_arch_by_name, get_opsys_by_name, get_package_by_nevra, get_releases, get_reportpackage, get_repos_for_opsys, get_unknown_package) from pyfaf.storage import (Arch, Build, OpSys, OpSysReleaseStatus, Package, ReportPackage, ReportUnknownPackage, column_len) from pyfaf.repos.yum import Yum __all__ = ["CentOS"] class CentOS(System): name = "centos" nice_name = "CentOS" packages_checker = ListChecker( DictChecker({ "name": StringChecker(pattern=r"^[a-zA-Z0-9_\-\.\+~]+$", maxlen=column_len(Package, "name")), "epoch": IntChecker(minval=0), "version": StringChecker(pattern=r"^[a-zA-Z0-9_\.\+]+$", maxlen=column_len(Build, "version")), "release": StringChecker(pattern=r"^[a-zA-Z0-9_\.\+]+$", maxlen=column_len(Build, "release")), "architecture": StringChecker(pattern=r"^[a-zA-Z0-9_]+$", maxlen=column_len(Arch, "name")), }), minlen=1 ) ureport_checker = DictChecker({ # no need to check name, version and architecture twice # the toplevel checker already did it # "name": StringChecker(allowed=[CentOS.name]) # "version": StringChecker() # "architecture": StringChecker() }) pkg_roles = ["affected", "related", "selinux_policy"] @classmethod def install(cls, db, logger=None): if logger is None: logger = log.getChildLogger(cls.__name__) logger.info("Adding CentOS") new = OpSys() new.name = cls.nice_name db.session.add(new) db.session.flush() @classmethod def installed(cls, db): return bool(get_opsys_by_name(db, cls.nice_name)) def __init__(self): super(CentOS, self).__init__() self.load_config_to_self("base_repo_url", ["centos.base-repo-url"], "http://vault.centos.org/centos/$releasever/" "os/Source/") self.load_config_to_self("updates_repo_url", ["centos.updates-repo-url"], "http://vault.centos.org/centos/$releasever/" "updates/Source/") def _save_packages(self, db, db_report, packages, count=1): for package in packages: role = "RELATED" if "package_role" in package: if package["package_role"] == "affected": role = "CRASHED" elif package["package_role"] == "selinux_policy": role = "SELINUX_POLICY" db_package = get_package_by_nevra(db, name=package["name"], epoch=package["epoch"], version=package["version"], release=package["release"], arch=package["architecture"]) if db_package is None: self.log_warn("Package {0}-{1}:{2}-{3}.{4} not found in " "storage".format(package["name"], package["epoch"], package["version"], package["release"], package["architecture"])) db_unknown_pkg = get_unknown_package(db, db_report, role, package["name"], package["epoch"], package["version"], package["release"], package["architecture"]) if db_unknown_pkg is None: db_arch = get_arch_by_name(db, package["architecture"]) if db_arch is None: continue db_unknown_pkg = ReportUnknownPackage() db_unknown_pkg.report = db_report db_unknown_pkg.name = package["name"] db_unknown_pkg.epoch = package["epoch"] db_unknown_pkg.version = package["version"] db_unknown_pkg.release = package["release"] db_unknown_pkg.arch = db_arch db_unknown_pkg.type = role db_unknown_pkg.count = 0 db.session.add(db_unknown_pkg) db_unknown_pkg.count += count continue db_reportpackage = get_reportpackage(db, db_report, db_package) if db_reportpackage is None: db_reportpackage = ReportPackage() db_reportpackage.report = db_report db_reportpackage.installed_package = db_package db_reportpackage.count = 0 db_reportpackage.type = role db.session.add(db_reportpackage) db_reportpackage.count += count def validate_ureport(self, ureport): CentOS.ureport_checker.check(ureport) return True def validate_packages(self, packages): CentOS.packages_checker.check(packages) for package in packages: if ("package_role" in package and package["package_role"] not in CentOS.pkg_roles): raise FafError("Only the following package roles are allowed: " "{0}".format(", ".join(CentOS.pkg_roles))) return True def save_ureport(self, db, db_report, ureport, packages, flush=False, count=1): self._save_packages(db, db_report, packages, count=count) if flush: db.session.flush() def get_releases(self): return {"7": {"status": "ACTIVE"}} def get_components(self, release): urls = [repo.replace("$releasever", release) for repo in [self.base_repo_url, self.updates_repo_url]] yum = Yum(self.name, *urls) components = list(set(pkg["name"] for pkg in yum.list_packages(["src"]))) return components #def get_component_acls(self, component, release=None): # return {} def get_build_candidates(self, db): return (db.session.query(Build) .filter(Build.release.like("%%.el%%")) .all()) def check_pkgname_match(self, packages, parser): for package in packages: if ("package_role" not in package or package["package_role"].lower() != "affected"): continue nvra = "{0}-{1}-{2}.{3}".format(package["name"], package["version"], package["release"], package["architecture"]) match = parser.match(nvra) if match is not None: return True return False
The Toughbook 19 falls under Panasonic's fully rugged moniker, meaning that it will stand up to almost anything. That includes being dropped from almost a metre, being showered with water, being thrown in the mud and being dragged through dust and sand. It's therefore no surprise that ToughBooks are used by the US military, where the conditions are often inhospitable to say the least. In fact Panasonic was keen to mention that a ToughBook saved a soldier's life during Desert Storm! Very happy, just what we needed.
import sys import hashlib import trex from trex import redis from twisted.internet import defer from twisted.trial import unittest from twisted.internet import reactor from twisted.python import failure from .mixins import Redis26CheckMixin, REDIS_HOST, REDIS_PORT class TestScripting(unittest.TestCase, Redis26CheckMixin): _SCRIPT = "return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}" # From redis example @defer.inlineCallbacks def setUp(self): self.db = yield redis.Connection(REDIS_HOST, REDIS_PORT, reconnect=False) self.db1 = None self.redis_2_6 = yield self.is_redis_2_6() yield self.db.script_flush() @defer.inlineCallbacks def tearDown(self): yield self.db.disconnect() if self.db1 is not None: yield self.db1.disconnect() @defer.inlineCallbacks def test_eval(self): self._skipCheck() keys = ('key1', 'key2') args = ('first', 'second') r = yield self.db.eval(self._SCRIPT, keys, args) self._check_eval_result(keys, args, r) r = yield self.db.eval("return 10") self.assertEqual(r, 10) r = yield self.db.eval("return {1,2,3.3333,'foo',nil,'bar'}") self.assertEqual(r, [1, 2, 3, "foo"]) # Test the case where the hash is in script_hashes, # but redis doesn't have it h = self._hash_script(self._SCRIPT) yield self.db.script_flush() conn = yield self.db._factory.getConnection(True) conn.script_hashes.add(h) r = yield self.db.eval(self._SCRIPT, keys, args) self._check_eval_result(keys, args, r) @defer.inlineCallbacks def test_eval_keys_only(self): self._skipCheck() keys = ['foo', 'bar'] args = [] r = yield self.db.eval("return {KEYS[1],KEYS[2]}", keys, args) self.assertEqual(r, keys) r = yield self.db.eval("return {KEYS[1],KEYS[2]}", keys=keys) self.assertEqual(r, keys) @defer.inlineCallbacks def test_eval_args_only(self): self._skipCheck() keys = [] args = ['first', 'second'] r = yield self.db.eval("return {ARGV[1],ARGV[2]}", keys, args) self.assertEqual(r, args) r = yield self.db.eval("return {ARGV[1],ARGV[2]}", args=args) self.assertEqual(r, args) @defer.inlineCallbacks def test_eval_error(self): self._skipCheck() try: result = yield self.db.eval('return {err="My Error"}') except trex.exceptions.ResponseError: pass except: raise self.failureException('%s raised instead of %s:\n %s' % (sys.exc_info()[0], 'trex.exceptions.ResponseError', failure.Failure().getTraceback())) else: raise self.failureException('%s not raised (%r returned)' % ('trex.exceptions.ResponseError', result)) @defer.inlineCallbacks def test_evalsha(self): self._skipCheck() r = yield self.db.eval(self._SCRIPT) h = self._hash_script(self._SCRIPT) r = yield self.db.evalsha(h) self._check_eval_result([], [], r) @defer.inlineCallbacks def test_evalsha_error(self): self._skipCheck() h = self._hash_script(self._SCRIPT) try: result = yield self.db.evalsha(h) except trex.exceptions.ScriptDoesNotExist: pass except: raise self.failureException('%s raised instead of %s:\n %s' % (sys.exc_info()[0], 'trex.exceptions.ScriptDoesNotExist', failure.Failure().getTraceback())) else: raise self.failureException('%s not raised (%r returned)' % ('trex.exceptions.ResponseError', result)) @defer.inlineCallbacks def test_script_load(self): self._skipCheck() h = self._hash_script(self._SCRIPT) r = yield self.db.script_exists(h) self.assertFalse(r) r = yield self.db.script_load(self._SCRIPT) self.assertEqual(r, h) r = yield self.db.script_exists(h) self.assertTrue(r) @defer.inlineCallbacks def test_script_exists(self): self._skipCheck() h = self._hash_script(self._SCRIPT) script1 = "return 1" h1 = self._hash_script(script1) r = yield self.db.script_exists(h) self.assertFalse(r) r = yield self.db.script_exists(h, h1) self.assertEqual(r, [False, False]) yield self.db.script_load(script1) r = yield self.db.script_exists(h, h1) self.assertEqual(r, [False, True]) yield self.db.script_load(self._SCRIPT) r = yield self.db.script_exists(h, h1) self.assertEqual(r, [True, True]) @defer.inlineCallbacks def test_script_kill(self): self._skipCheck() try: result = yield self.db.script_kill() except trex.exceptions.NoScriptRunning: pass except: raise self.failureException('%s raised instead of %s:\n %s' % (sys.exc_info()[0], 'trex.exceptions.NoScriptRunning', failure.Failure().getTraceback())) else: raise self.failureException('%s not raised (%r returned)' % ('trex.exceptions.ResponseError', result)) # Run an infinite loop script from one connection # and kill it from another. inf_loop = "while 1 do end" self.db1 = yield redis.Connection(REDIS_HOST, REDIS_PORT, reconnect=False) eval_deferred = self.db1.eval(inf_loop) reactor.iterate() r = yield self.db.script_kill() self.assertEqual(r, 'OK') try: result = yield eval_deferred except trex.exceptions.ResponseError: pass except: raise self.failureException('%s raised instead of %s:\n %s' % (sys.exc_info()[0], 'trex.exceptions.ResponseError', failure.Failure().getTraceback())) else: raise self.failureException('%s not raised (%r returned)' % ('trex.exceptions.ResponseError', result)) def _check_eval_result(self, keys, args, r): self.assertEqual(r, list(keys) + list(args)) def _hash_script(self, script): return hashlib.sha1(script).hexdigest()
A detailed roadmap to successfully launching your company and product. This final book of the series presents a detailed roadmap to successfully launching your company and product. From setting realistic goals for the launch, determining the key messages, and preparing the sales team – to briefing the press and analysts, finding the right venue, and enlisting the help of early customers. Book 5 takes you through all the necessary steps to a successful launch.
from bluebottle.recurring_donations.models import (MonthlyDonor, MonthlyDonorProject) from rest_framework import serializers from bluebottle.donations.models import Donation from bluebottle.projects.models import Project class MonthlyDonationProjectSerializer(serializers.ModelSerializer): project = serializers.SlugRelatedField(many=False, slug_field='slug', queryset=Project.objects) donation = serializers.PrimaryKeyRelatedField(source='donor', queryset=MonthlyDonor.objects) class Meta(): model = MonthlyDonorProject fields = ('id', 'donation', 'project') class MonthlyDonationSerializer(serializers.ModelSerializer): projects = MonthlyDonationProjectSerializer(many=True, read_only=True) class Meta(): model = MonthlyDonor fields = ('id', 'amount', 'iban', 'bic', 'active', 'name', 'city', 'country', 'projects')
“We’re giving black people power, royalty. ... We can be superheroes," he said, linking the party to the movie. Michael B. Jordan has stepped out of the “Black Panther” universe to portray another kind of Black Panther on British GQ’s March cover. The actor, who plays Erik Killmonger in Marvel’s upcoming “Black Panther” movie, believes it’s important to offer uplifting representations of blackness on and off screen. With a 100 percent Rotten Tomatoes rating and a Hollywood premiere that asked guests to dress like African royalty, the movie “Black Panther” has already become a symbol of black representation. And with this cover, Jordan ― wearing the traditional Black Panther Party dress of black turtleneck, leather jacket and beret ― makes the link between an inspiring fictional universe and the historical activism of the Black Panthers. The party was founded in Oakland, California, in 1966 as a way to protect and uplift the black community. Like the Marvel movie, the Black Panthers strove to represent blackness as it had not been seen before. Party members initiated breakfast programs for schools, promoted black beauty and monitored police behavior to protect their neighborhoods. Government officials, however, responded to the very idea of armed and organized black people as a threat to national security. The Black Panthers was targeted by the FBI’s secret counterintelligence program ― COINTELPRO ― aimed at discrediting black activist and other dissident groups. Now, artistic representations like Jordan’s cover celebrate the movement’s positive impact. Images in a mainstream magazine work to normalize its history and its narrative. This Black Panther-inspired cover isn’t a first for the GQ franchise. Last year, the U.S. version of the magazine named Colin Kaepernick its “Citizen of the Year” and featured him on the cover dressed in Black Panther Party attire. A year and another Black Panther cover later, Jordan told GQ that he looks at the present moment as one of progress and hope.
# -*- mode: python; coding: utf-8 -*- # # Copyright 2012, 2013 Andrej A Antonov <[email protected]>. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. assert str is not bytes import os, os.path, weakref, importlib from mako import lookup as mako_lookup from . import get_items class TplTxtGenEnviron(object): pass class ItemFunc(object): def __init__(self, get_iter): self._get_iter = get_iter self._group_map = {} def __call__(self, path, group=None): if group is None: return next(self._get_iter(path)) try: text = self._group_map[group] except KeyError: self._group_map[group] = text = next(self._get_iter(path)) return text class ItemFuncFactory(object): def __init__(self, environ_ref): self._environ_ref = environ_ref self._iter_map = {} def __call__(self): return ItemFunc(self._get_iter) def _resolve_path(self, path): root_dir = self._environ_ref().root_dir return os.path.join(root_dir, path) def _get_iter(self, path): try: it = self._iter_map[path] except KeyError: self._iter_map[path] = it = \ get_items.get_random_infinite_items(self._resolve_path(path)) return it class CustomFunc(object): def __init__(self, get_impl): self._get_impl = get_impl self._impl_map = {} def __call__(self, custom_name): try: impl = self._impl_map[custom_name] except KeyError: self._impl_map[custom_name] = impl = self._get_impl(custom_name)() return impl class CustomFuncFactory(object): def __init__(self, environ_ref): self._environ_ref = environ_ref self._impl_map = {} def __call__(self): return CustomFunc(self._get_impl) def _get_impl(self, custom_name): try: impl = self._impl_map[custom_name] except KeyError: func_name, module_name = custom_name.rsplit(':', 1) mod = importlib.import_module(module_name) factory = mod.FUNC_FACTORY_MAP[func_name] self._impl_map[custom_name] = impl = factory(self._environ_ref) return impl FUNC_FACTORY_MAP = { 'item': ItemFuncFactory, 'custom': CustomFuncFactory, } DEFAULT_FUNC_FACTORY_MAP = FUNC_FACTORY_MAP def count_iter(count): if count is not None: for i in range(count): # TODO: for Python-3.3+ -- need fix to PEP-0380 yield i else: while True: yield def tpl_txt_gen_iter(tpl_path, count=None, environ=None, func_factory_map=None): if environ is None: environ = TplTxtGenEnviron() environ.tpl_path = tpl_path environ.count = count environ.root_dir = os.path.dirname(environ.tpl_path) environ.tpl_name = os.path.basename(environ.tpl_path) environ.tpl_lookup = mako_lookup.TemplateLookup(directories=(environ.root_dir, )) environ.tpl = environ.tpl_lookup.get_template(environ.tpl_name) if func_factory_map is None: func_factory_map = DEFAULT_FUNC_FACTORY_MAP func_factories = { func_name: func_factory_map[func_name](weakref.ref(environ)) for func_name in func_factory_map } for i in count_iter(environ.count): tpl_kwargs = { func_name: func_factories[func_name]() for func_name in func_factories } yield environ.tpl.render(**tpl_kwargs) def tpl_txt_gen(tpl_path, out_path, count): out_path_created = False for i, text in enumerate(tpl_txt_gen_iter(tpl_path, count=count)): if not out_path_created: os.mkdir(out_path) out_path_created = True out_name = 'out-{}.txt'.format(i) full_out_path = os.path.join(out_path, out_name) with open(full_out_path, 'w', encoding='utf-8', newline='\n') as fd: fd.write(text)
I've been away visiting our oldest son in San Francisco so I apologize for the long gap in posts. I had limited internet access and was busy enjoying the sites and the food - what a great city! Now it is time to catch up and post a lot of things that have been waiting for me to have time to post! I participated in a mesh swap on the Cricut Message Board this month. Here is one of the files I contributed. It was shared with the other swap participants by email and I am posting it now so people who weren't involved in the swap can use it. The only cartridge needed is the Wedding Solutions cartridge. What a beautiful mesh! I hope you loved S.F. It was BEAUTIFUL there this week. I live one hour north. Love The City! I was in SF in June and most recetnly in September when I lost my dad to cancer. I have great memories of SF. Hope you enjoyed yourself. Thanks for sharing your awesome files. thanks for sharing!! cool design! Your designs are so beautiful! You definitely have a knack for this!! They are beautiful. Thanks so much for sharing. The Importance of Backing Up!
import gettext from Tools.Directories import SCOPE_LANGUAGE, resolveFilename, fileExists import language_cache class Language: def __init__(self): gettext.install('enigma2', resolveFilename(SCOPE_LANGUAGE, ""), unicode=0, codeset="utf-8") self.activeLanguage = 0 self.lang = {} self.langlist = [] # FIXME make list dynamically # name, iso-639 language, iso-3166 country. Please don't mix language&country! # also, see "precalcLanguageList" below on how to re-create the language cache after you added a language language_path = "/usr/share/enigma2/po/%s/LC_MESSAGES/enigma2.mo" if fileExists(language_path % "en"): self.addLanguage(_("English"), "en", "EN") if fileExists(language_path % "de"): self.addLanguage(_("German"), "de", "DE") if fileExists(language_path % "ar"): self.addLanguage(_("Arabic"), "ar", "AE") if fileExists(language_path % "ca"): self.addLanguage(_("Catalan"), "ca", "AD") if fileExists(language_path % "hr"): self.addLanguage(_("Croatian"), "hr", "HR") if fileExists(language_path % "cs"): self.addLanguage(_("Czech"), "cs", "CZ") if fileExists(language_path % "da"): self.addLanguage(_("Danish"), "da", "DK") if fileExists(language_path % "nl"): self.addLanguage(_("Dutch"), "nl", "NL") if fileExists(language_path % "et"): self.addLanguage(_("Estonian"), "et", "EE") if fileExists(language_path % "fi"): self.addLanguage(_("Finnish"), "fi", "FI") if fileExists(language_path % "fr"): self.addLanguage(_("French"), "fr", "FR") if fileExists(language_path % "el"): self.addLanguage(_("Greek"), "el", "GR") if fileExists(language_path % "hu"): self.addLanguage(_("Hungarian"), "hu", "HU") if fileExists(language_path % "lt"): self.addLanguage(_("Lithuanian"), "lt", "LT") if fileExists(language_path % "lv"): self.addLanguage(_("Latvian"), "lv", "LV") if fileExists(language_path % "is"): self.addLanguage(_("Icelandic"), "is", "IS") if fileExists(language_path % "it"): self.addLanguage(_("Italian"), "it", "IT") if fileExists(language_path % "no"): self.addLanguage(_("Norwegian"), "no", "NO") if fileExists(language_path % "pl"): self.addLanguage(_("Polish"), "pl", "PL") if fileExists(language_path % "pt"): self.addLanguage(_("Portuguese"), "pt", "PT") if fileExists(language_path % "ru"): self.addLanguage(_("Russian"), "ru", "RU") if fileExists(language_path % "sr"): self.addLanguage(_("Serbian"), "sr", "YU") if fileExists(language_path % "sk"): self.addLanguage(_("Slovakian"), "sk", "SK") if fileExists(language_path % "sl"): self.addLanguage(_("Slovenian"), "sl", "SI") if fileExists(language_path % "es"): self.addLanguage(_("Spanish"), "es", "ES") if fileExists(language_path % "sv"): self.addLanguage(_("Swedish"), "sv", "SE") if fileExists(language_path % "tr"): self.addLanguage(_("Turkish"), "tr", "TR") if fileExists(language_path % "uk"): self.addLanguage(_("Ukrainian"), "uk", "UA") if fileExists(language_path % "fy"): self.addLanguage(_("Frisian"), "fy", "x-FY") # there is no separate country for frisian self.callbacks = [] def addLanguage(self, name, lang, country): try: self.lang[str(lang + "_" + country)] = ((_(name), lang, country)) self.langlist.append(str(lang + "_" + country)) except: print "Language " + str(name) + " not found" def activateLanguage(self, index): try: lang = self.lang[index] print "Activating language " + lang[0] gettext.translation('enigma2', resolveFilename(SCOPE_LANGUAGE, ""), languages=[lang[1]]).install() self.activeLanguage = index for x in self.callbacks: x() except: print "Selected language does not exist!" lang = self.lang["en_EN"] print "Activating default language " + lang[0] gettext.translation('enigma2', resolveFilename(SCOPE_LANGUAGE, ""), languages=[lang[1]]).install() self.activeLanguage = "en_EN" for x in self.callbacks: x() def activateLanguageIndex(self, index): if index < len(self.langlist): self.activateLanguage(self.langlist[index]) def getLanguageList(self): return [ (x, self.lang[x]) for x in self.langlist ] def getActiveLanguage(self): return self.activeLanguage def getActiveLanguageIndex(self): idx = 0 for x in self.langlist: if x == self.activeLanguage: return idx idx += 1 return 0 def getLanguage(self): try: return str(self.lang[self.activeLanguage][1]) + "_" + str(self.lang[self.activeLanguage][2]) except: return 'en_EN' def addCallback(self, callback): self.callbacks.append(callback) def precalcLanguageList(self): # excuse me for those T1, T2 hacks please. The goal was to keep the language_cache.py as small as possible, *and* # don't duplicate these strings. T1 = _("Please use the UP and DOWN keys to select your language. Afterwards press the OK button.") T2 = _("Language selection") l = open("language_cache.py", "w") print >>l, "# -*- coding: UTF-8 -*-" print >>l, "LANG_TEXT = {" for language in self.langlist: self.activateLanguage(language) print >>l, '"%s": {' % language for name, lang, country in self.lang.values(): print >>l, '\t"%s_%s": "%s",' % (lang, country, _(name)) print >>l, '\t"T1": "%s",' % (_(T1)) print >>l, '\t"T2": "%s",' % (_(T2)) print >>l, '},' print >>l, "}" language = Language()
This home has got dementia care right, superb facilities for personalised person centred care. There are always two trained members of staff on shift, along with a well established care team, and award winning activities team. This is a super management role, for a career driven dementia specialist nurse, with the opportunity of undertaking extra and appropriate study, 36 hours clinical, 4 hours supernumerary. As Unit Manager, you will be planning, running and ensuring that our residents needs are met. This is our residents home, and you will have the autonomy to execute personalised care plans, to ensure their happiness and wellbeing. As Unit Manager you will be involved with recruitment, training and mentoring staff (do you have a interest in working with Student Nurses??). This is a GOOD home aiming for OUTSTANDING, can you be part of the Team ?
# # Copyright (c) 2013-2014 Kevin Steves <[email protected]> # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # from __future__ import print_function import sys import logging from . import DEBUG1, DEBUG2, DEBUG3 _valid_part = set([ 'device-and-network-excluded', 'policy-and-objects-excluded', 'shared-object-excluded', 'no-vsys', 'vsys', ]) _part_xml = { 'device-and-network-excluded': '<device-and-network>excluded</device-and-network>', 'policy-and-objects-excluded': '<policy-and-objects>excluded</policy-and-objects>', 'shared-object-excluded': '<shared-object>excluded</shared-object>', 'no-vsys': '<no-vsys></no-vsys>', 'vsys': '<member>%s</member>', } def valid_part(part): return part in _valid_part class PanCommit: def __init__(self, validate=False, force=False, commit_all=False, merge_with_candidate=False): self._log = logging.getLogger(__name__).log self._validate = validate self._force = force self._commit_all = commit_all self._merge_with_candidate = merge_with_candidate self.partial = set() self._vsys = set() self._device = None self._device_group = None def validate(self): self._validate = True def force(self): self._force = True def commit_all(self): self._commit_all = True def merge_with_candidate(self): self._merge_with_candidate = True def device_and_network_excluded(self): part = 'device-and-network-excluded' self.partial.add(part) def policy_and_objects_excluded(self): part = 'policy-and-objects-excluded' self.partial.add(part) def shared_object_excluded(self): part = 'shared-object-excluded' self.partial.add(part) def no_vsys(self): part = 'no-vsys' self.partial.add(part) def vsys(self, vsys): if not self._commit_all: part = 'vsys' self.partial.add(part) if type(vsys) == type(''): vsys = [vsys] for name in vsys: self._vsys.add(name) def device(self, serial): self._device = serial def device_group(self, device_group): self._device_group = device_group def cmd(self): if self._commit_all: return self.__commit_all() else: return self.__commit() def __commit_all(self): s = '<commit-all><shared-policy>' if self._device: s += '<device>%s</device>' % self._device if self._device_group: s += '<device-group>%s</device-group>' % self._device_group # default when no <merge-with-candidate-cfg/> is 'yes' # we default to 'no' like the Web UI merge_xml = '<merge-with-candidate-cfg>%s</merge-with-candidate-cfg>' if self._merge_with_candidate: merge = 'yes' else: merge = 'no' s += merge_xml % merge if self._vsys: s += '<vsys>%s</vsys>' % self._vsys.pop() s += '</shared-policy></commit-all>' self._log(DEBUG1, 'commit-all cmd: %s', s) return s def __commit(self): s = '<commit>' if self._validate: s += '<validate>' if self._force: s += '<force>' if self.partial: s += '<partial>' for part in self.partial: if part in _part_xml: if part == 'vsys': s += '<vsys>' for name in self._vsys: xml_vsys = _part_xml[part] % name s += xml_vsys s += '</vsys>' else: s += _part_xml[part] if self.partial: s += '</partial>' if self._force: s += '</force>' if self._validate: s += '</validate>' s += '</commit>' self._log(DEBUG1, 'commit cmd: %s', s) return s if __name__ == '__main__': import pan.commit c = pan.commit.PanCommit() c.force() c.device_and_network_excluded() c.policy_and_objects_excluded() c.shared_object_excluded() c.vsys(['vsys4', 'vsys5']) print('cmd:', c.cmd())
Majestic mountains, wondrous waterfalls and pretty ports vie for your attention when you sail along Norway’s corrugated coast. Its cities are compact gems of history and culture, its tiny villages burst with character, and its natural attractions are truly stupendous. Highlights include Bergen, a European City of Culture with opportunities for adventure in the heart of Norway’s fjords; and Hellesylt, a Viking village in a spectacular location. And you still have the gravity-defying Pulpit Rock to look forward to at the end! Titan’s VIP door-to-door travel service collects you from home. Embark Boudicca (3T+) in preparation for a late afternoon sailing. Spend today exploring Bergen’s 900-year-old Bryggen Wharf, a delightfully colourful quay of maritime heritage. The maritime museum or the cafés and shops within half-timbered houses are wonderful and welcoming. You may want to visit a museum devoted to Norway’s most famous composer, Edvard Grieg, or ride a funicular railway to the highest peak of the Seven Mountains. After sailing through Sognefjord - Europe’s longest and deepest fjord - we dock early this morning in Flåm. Why not take a stroll along the waterside, passing cottages, orchards and farmland? Or explore the beautiful Nærøyfjord nearby. Late this afternoon, you can view this UNESCO protected area from the water as we sail its length towards Gudvangen, a small Viking village at the head of the fjord. Ship to shore tender service. This morning, we cruise past soaring mountains, including popular skiing location Strandafjellet. In the afternoon we arrive in the tiny Viking port of Hellesylt, one of Norway’s most beautiful fjord locations. Easy walking trails survey the stunning Hellesylt Waterfall and the Kjellstaddalen and Moldskreddalen valleys. Our cruise continues through Geirangerfjord this evening, passing snow-capped mountains and the fabled Seven Sisters Waterfall. Early this morning, we dock in Olden, a charming village on the magnificent Nordfjord. This is a great opportunity to visit the glaciers of Jostedalsbreen or take in the landscape from the Loen Skylift. Later this afternoon we cruise peacefully along Nordfjord. Ship to shore tender service. Have a camera in hand as we cruise alongside Furebergfossen waterfall and the many gems of Hardangerfjord, on our way to Eidfjord. This picturesque village sits before snow-peaked mountains engraved with glistening rivers and tumbling waterfalls. Hiking is popular here, so don’t forget some suitable shoes to explore the network of marked trails. As a former European Capital of Culture, Stavanger has plenty of history, tradition and charm to discover. It’s also perfectly placed for Lysefjord, a stunning waterway famous for the Kjerag Mountain and Preikestolen (Pulpit Rock) - two of the country's most impressive attractions.
import sys, cPickle, pickle from stackless import * def run_pickled(func, *args): t = tasklet(func)(*args) print "starting tasklet" t.run() print "pickling" # hack for pickle if pickl == pickle: t.tempval = None pi = pickl.dumps(t) t.remove() #print pi file("temp.pickle", "wb").write(pi) print "unpickling" ip = pickl.loads(pi) print "starting unpickled tasklet" ip.run() def test(n, when): for i in range(n): if i==when: schedule() print i def rectest(nrec, lev=0): print lev*" " + "entering", lev+1 if lev < nrec: rectest(nrec, lev+1) else: schedule() print lev*" " + "leaving", lev+1 pickl = pickle # note that the refcounts are correct with pickle.py # but also note that in 2.2, pickle seems broken for extension types # which are referencing themselves... print print "testing pickled iteration" print 60*"-" print run_pickled(test, 20, 13) print print "testing pickled recursion" print 60*"-" print run_pickled(rectest, 13)
The Sampson “One-Man” Hydraulic extra heavy-duty casket and body lifter will easily handle up to 1000 lbs. With it’s nine 4″ swivel casters you can maneuver this body lifter through a doorway with ease. Features New adjustable body carriage. This allows you to adjust all 4 body straps easily to accommodate any body from head to toe. Comes with four 2″ Adjustable washable nylon web body straps, which adjust to the girth of any body. The two straps that go under the body are attached to a wand – just slip the want under the body and the straps follow with ease. Two 2″ nylon web straps are also included for lifting caskets. Heavy duty hydraulic jack will lift the heaviest body from floor to cart or table with virtually no effort. Made of extra sturdy, all steel square tubular construction with durable powder-coat finish.
from nile.common import log as logging from nile.common import exception from nile.common.i18n import _ from nile.common import models from nile.common import pagination from nile.common import utils from nile.db import db_query from nile.db import get_db_api LOG = logging.getLogger(__name__) class DatabaseModelBase(models.ModelBase): _auto_generated_attrs = ['id'] @classmethod def create(cls, **values): now_time = utils.utcnow() init_vals = { 'id': utils.generate_uuid(), 'created': now_time, } if hasattr(cls, 'deleted'): init_vals['deleted'] = False if hasattr(cls, 'updated'): init_vals['updated'] = now_time init_vals.update(values) instance = cls(**init_vals) if not instance.is_valid(): raise exception.InvalidModelError(errors=instance.errors) return instance.save() @property def db_api(self): return get_db_api() @property def preserve_on_delete(self): return hasattr(self, 'deleted') and hasattr(self, 'deleted_at') @classmethod def query(cls): return get_db_api()._base_query(cls) def save(self): if not self.is_valid(): raise exception.InvalidModelError(errors=self.errors) self['updated'] = utils.utcnow() LOG.debug("Saving %(name)s: %(dict)s" % {'name': self.__class__.__name__, 'dict': self.__dict__}) return self.db_api.save(self) def delete(self): self['updated'] = utils.utcnow() LOG.debug("Deleting %(name)s: %(dict)s" % {'name': self.__class__.__name__, 'dict': self.__dict__}) if self.preserve_on_delete: self['deleted_at'] = utils.utcnow() self['deleted'] = True return self.db_api.save(self) else: return self.db_api.delete(self) def update(self, **values): for key in values: if hasattr(self, key): setattr(self, key, values[key]) self['updated'] = utils.utcnow() return self.db_api.save(self) def __init__(self, **kwargs): self.merge_attributes(kwargs) if not self.is_valid(): raise exception.InvalidModelError(errors=self.errors) def merge_attributes(self, values): """dict.update() behaviour.""" for k, v in values.iteritems(): self[k] = v @classmethod def find_by(cls, context=None, **conditions): model = cls.get_by(**conditions) if model is None: raise exception.ModelNotFoundError(_("%(s_name)s Not Found") % {"s_name": cls.__name__}) if ((context and not context.is_admin and hasattr(model, 'user_id') and model.user_id != context.user_id)): msg = _("User %(s_user_id)s tried to access " "%(s_name)s, owned by %(s_owner)s.") LOG.error(msg % ( {"s_user_id": context.user_id, "s_name": cls.__name__, "s_owner": model.user_id})) raise exception.ModelNotFoundError( _("User %(s_user_id)s cannot access %(s_name)s") % ( {"s_user_id": context.user_id, "s_name": cls.__name__})) return model @classmethod def get_by(cls, **kwargs): return get_db_api().find_by(cls, **cls._process_conditions(kwargs)) @classmethod def find_all(cls, **kwargs): return db_query.find_all(cls, **cls._process_conditions(kwargs)) @classmethod def _process_conditions(cls, raw_conditions): """Override in inheritors to format/modify any conditions.""" return raw_conditions @classmethod def find_by_pagination(cls, collection_type, collection_query, order_by=None, page_size=200, page_index=0): elements, count = collection_query.paginated_collection(order_by=order_by, page_size=page_size, page_index=page_index) return pagination.PaginatedDataView(collection_type, elements, page_index=page_index, page_size=page_size, total_size=count)
The Prince of Wales has invited partners behind plans to regenerate Wisbech to his new housing development in Cornwall to learn more about how its principles for urban planning could support Wisbech Garden Town proposals. Princes Charles extended the invitation to Fenland District Council, Anglian Water and other organisations leading the Wisbech Garden Town proposals during his visit to Wisbech (November 27). An exhibition on plans for the town’s future were showcased at St Peter and St Paul Church, where His Royal Highness met a number of organisations, charities and volunteers to find out about the work they do in the local community, and learn more about projects in the area. Gary Garford, corporate director for Fenland District Council, Peter Simpson, chief executive of Anglian Water, and Russell Beal, Anglian Water’s Wisbech 2020 programme manager, spoke to The Prince about the garden town concept and how it was created from the partnership Wisbech 2020 Vision project, which aims to bring jobs, infrastructure and investment to the town. Developed in 2016, Wisbech Garden Town seek to regenerate Wisbech through growth of housing and the economy, with 13,200 homes over the next 40 years and 11,000 new jobs, as well as new shops, schools, public spaces and community facilities. During the meeting, The Prince invited partners to visit Nansledan, a 218-hectare urban extension of Newquay in Cornwall, led by the Duchy of Cornwall with support from The Prince’s Foundation for Building Community. Like the garden town’s ambitions, Nansledan has been designed to champion sustainable development environmentally, socially and economically. It will evolve into a community of more than 4,000 homes over the next 40 years to help meet Newquay’s future business, housing, educational and health needs. Mr Simpson said: “HRH was really keen to see how The Prince’s Foundation could support the development of the Garden Town proposal for Wisbech. “The ambition’s regeneration plans for the town will help the local economy to grow and prosper. The Prince’s invitation comes just weeks after partners submitted a bid to Government for Wisbech Garden Town to join its Garden Communities programme. The Ministry of Housing, Communities and Local Government (MHCLG) launched the Garden Communities prospectus in August to offer local authorities and private sector partners renewed support for creating high-quality, locally supported, new communities. Successful bids will receive tailored government assistance, including resource funding, advice from Homes England and cross-government brokerage to overcome barriers to delivery. Successful proposals will be announced later in the New Year.
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2011 Sybren A. Stüvel <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from setuptools import setup if __name__ == '__main__': setup(name='rsa', version='3.4.2', description='Pure-Python RSA implementation', author='Sybren A. Stuvel', author_email='[email protected]', maintainer='Sybren A. Stuvel', maintainer_email='[email protected]', url='https://stuvel.eu/rsa', packages=['rsa'], license='ASL 2', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Intended Audience :: Education', 'Intended Audience :: Information Technology', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Topic :: Security :: Cryptography', ], install_requires=[ 'pyasn1 >= 0.1.3', ], entry_points={'console_scripts': [ 'pyrsa-priv2pub = rsa.util:private_to_public', 'pyrsa-keygen = rsa.cli:keygen', 'pyrsa-encrypt = rsa.cli:encrypt', 'pyrsa-decrypt = rsa.cli:decrypt', 'pyrsa-sign = rsa.cli:sign', 'pyrsa-verify = rsa.cli:verify', 'pyrsa-encrypt-bigfile = rsa.cli:encrypt_bigfile', 'pyrsa-decrypt-bigfile = rsa.cli:decrypt_bigfile', ]}, )
Access to higher education has been limited for many underrepresented racial minorities (URMs) in the United States. Part of the reason for this has been because of weaknesses in the K-16 pipeline. This article takes a collective impact approach to understanding what K-16 personnel can do to assist URMs through proper curriculum development and implementation. Overall, this article will focus on the proficiency model, how it has been used in the mainstream, and why the proficiency model is important when working together within the K-16 continuum.
import json, boto, boto.s3.connection, bottlenose, os.path, time from boto.s3.key import Key from urllib.error import HTTPError # Load the AWS key information f = open(os.path.dirname(os.path.realpath(__file__)) + "/keys/aws_keys.json") configs = json.loads(f.read()) s3conn = boto.connect_s3(aws_access_key_id=configs["aws_public_key"],aws_secret_access_key=configs["aws_secret_key"]) bucket = s3conn.get_bucket("hootproject") def error_handler(err): ex = err['exception'] if isinstance(ex, HTTPError) and ex.code == 503: time.sleep(random.expovariate(0.1)) return True def setup_product_api(): return bottlenose.Amazon(configs["aws_public_key"], configs["aws_secret_key"], configs["product_api_tag"], ErrorHandler=error_handler, MaxQPS=0.9) def push_to_S3(filename, jsonToUpload): k = Key(bucket) k.key = filename k.set_contents_from_string(jsonToUpload) def retrieve_from_S3(filename): key = bucket.new_key(filename) contents = key.get_contents_as_string().decode(encoding='UTF-8') return contents
You can change your address, telephone number and email address on your account by simply selecting ‘My Account’ and ‘Edit Your Details’. Just make sure you change your details if you’ve recently moved before placing an order. As we process orders quickly, we can't change your address once you've placed your order. Orders placed before you updated your address details will be delivered to the address shown on your email confirmation. We're really sorry, it's not possible for you to change the name on your account. Please contact us and we can arrange this for you.
#!/usr/bin/env python3 from guizero import App, Text, Slider, Combo, PushButton, Box, Picture pause = True def readsensors(): return {"hlt" : 160, "rims" : 152, "bk" : 75} def handlepause(): global pause global pauseState print("Pause Button pressed") if pause: print("running") pause = not pause pauseState.value=("Running") hltFlame.visible=True rimsFlame.visible=True bkFlame.visible=True else: print("pausing") pause = not pause pauseState.value=("Paused") hltFlame.visible=False rimsFlame.visible=False bkFlame.visible=False return app = App(title="Brew GUI", width=1280, height=768, layout="grid") vertPad = Picture(app, image="blank_vert.gif", grid=[0,0]) hltBox = Box(app, layout="grid", grid=[1,0]) hltPad = Picture(hltBox, image="blank.gif", grid=[0,0]) hltTitle = Text(hltBox, text="HLT", grid=[0,1], align="top") hltText = Text(hltBox, text="180", grid=[0,2], align="top") hltSlider = Slider(hltBox, start=212, end=100, horizontal=False, grid=[0,3], align="top") hltSlider.tk.config(length=500, width=50) hltFlamePad = Picture(hltBox, image="blank_flame.gif", grid=[0,4]) hltFlame = Picture(hltBox, image="flame.gif", grid=[0,4]) rimsBox = Box(app, layout="grid", grid=[2,0]) rimsPad = Picture(rimsBox, image="blank.gif", grid=[0,0]) rimsTitle = Text(rimsBox, text="RIMS", grid=[0,1], align="top") rimsText = Text(rimsBox, text="180", grid=[0,2], align="top") rimsSlider = Slider(rimsBox, start=212, end=100, horizontal=False, grid=[0,3], align="top") rimsSlider.tk.config(length=500, width=50) rimsFlamePad = Picture(rimsBox, image="blank_flame.gif", grid=[0,4]) rimsFlame = Picture(rimsBox, image="flame.gif", grid=[0,4]) bkBox = Box(app, layout="grid", grid=[3,0]) bkPad = Picture(bkBox, image="blank.gif", grid=[0,0]) bkTitle = Text(bkBox, text="BK", grid=[0,1], align="top") bkText = Text(bkBox, text="75", grid=[0,2], align="top") bkSlider = Slider(bkBox, start=100, end=0, horizontal=False, grid=[0,3], align="top") bkSlider.tk.config(length=500, width=50) bkFlamePad = Picture(bkBox, image="blank_flame.gif", grid=[0,4]) bkFlame = Picture(bkBox, image="flame.gif", grid=[0,4]) modeBox = Box(app, layout="grid", grid=[4,0]) modePad = Picture(modeBox, image="blank.gif", grid=[0,0]) modeTitle = Text(modeBox, text="Mode", grid=[0,0], align="top") mode = Combo(modeBox, options=["HLT", "RIMS", "BK"], grid=[1,0]) pauseState = Text(modeBox, text="Paused", grid=[0,1]) pauseButton = PushButton(modeBox, icon="pause-play.gif", command=handlepause, grid=[1,1]) hltFlame.visible=False rimsFlame.visible=False bkFlame.visible=False app.display()
We have found 0 Daewoo dealers in Peshtigo and Daewoo dealerships in Peshtigo Wisconsin area. You may try to filter your search results by state, city, make, or zip to your Daewoo Dealers in Peshtigo and Daewoo Dealerships in Peshtigo from search results. If you are a Daewoo dealership member and wish to list or update your Daewoo dealership details in Peshtigo Wisconsin, it is FREE for submitting your Daewoo dealership information into our Peshtigo Wisconsin directory.
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import stdimage.models class Migration(migrations.Migration): dependencies = [ ('bookswap', '0001_initial'), ] operations = [ migrations.CreateModel( name='FAQItem', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('title', models.TextField()), ('text', models.TextField()), ('display_order', models.PositiveSmallIntegerField()), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='LocationImage', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('photo', stdimage.models.StdImageField(upload_to=b'bookswap_photos')), ('super_caption', models.TextField(null=True, blank=True)), ('sub_caption', models.TextField()), ('display_order', models.PositiveSmallIntegerField()), ], options={ }, bases=(models.Model,), ), ]
Twilight Rates Offer Good Golf on Challenging Courses--for Less Than You'd Pay on a Typical Public Course. Here Are 15 of the Area's Best Deals. Most people play golf with friends or business companions. But some of my most enjoyable rounds have been with strangers late in the afternoon. I've played some of the area's best public courses for little money by taking advantage of twilight rates. Because I'm usually paired with a stranger or two, I've gotten to know lots of people–priests, news anchors, actors, and more. Once I was paired with the CEO of Gannett. Drive a few miles north of the Beltway to Worthington Manor in Urbana during the summer and you can play from 4 until dark (except Fridays) for $35 or $40. I can get in 27 holes by playing the front nine twice before moving to the back nine. Worthington's twilight rate starts at 4, but many other courses begin reduced rates at 2. Twilight golfers don't usually need reservations–singles can usually be slipped in at the first tee. But call ahead to make sure the course is open to the public that day. Here are 15 of the best afternoon deals within an hour's drive of DC. 1. P.B. Dye Golf Club, 9526 Dr. Perry Rd., Ijamsville, Md.; 301-607-4653; pbdyegolf.com. The super twilight rate at this challenging course starts at 4. Monday through Thursday it's $35, Friday it's $39, Saturday, Sunday, and holidays $45. Regular twilight rate (2 to 4 PM) is $49 Monday through Thursday. It's a hard course for high handicappers, but it's usually not too crowded because it's not easy to find. Prices include greens fee, golf cart, and use of the driving range. 2. Musket Ridge Golf Club, 3555 Brethren Church Rd., Myersville, Md.; 301-293-9930; musketridge.com. Twilight rate at this nifty course west of Frederick starts at 3 in the spring and fall, 4 in summer season. It's $39 Monday through Thursday, $49 Friday through Sunday. Includes greens fee, cart, and use of the range before your tee time. 3. Worthington Manor Golf Club, 8329 Fingerboard Rd., Urbana, Md.; 301-874-5400; worthingtonmanor.com. Monday through Thursday the noon price is $50 and the twilight rate, starting at 4, is $35 for all you can play on a gorgeous course off I-270. Friday is $60 until 5, $40 thereafter. Saturday, Sunday, and holidays the noon price is $64 and twilight is $40. The play is very friendly and well paced. Price includes greens fee, cart, and use of the range. 4. Penderbrook Golf Club, 3700 Golf Trail La., Fairfax; 703-385-3700; www.penderbrook.com. Super twilight ($25) at 4 all week and includes greens fees, cart, and use of the putting green. The house-lined course is compact and good for high handicappers; it is newly under the oversight of Arnold Palmer Golf Management. No driving range, but there is a small practice area for chipping. There's an all-you-can-eat/all-you-can-play special with breakfast (7:30 to 10:30) and lunch (11 to 2) buffets included in earlier fees. Play can be slow, but it's a terrific deal. 5. Westfields Golf Club, 13940 Balmoral Greens Ave., Clifton; 703-631-3300; westfieldsgolf.com. At one of the finest courses in the area, the twilight rate, starting at 3, is $50 Monday through Thursday; that includes a cart, which you'll need. Saturday, Sunday, and holidays the twilight rate is $55 including free range balls. Westfields was designed by pro Fred Couples. To play this course during prime hours on weekends is nearly $100 a round. 6. Black Rock Golf Course, 20025 Mount Aetna Rd., Hagerstown; 240-313-2816; blackrockgolfcourse.com. This is in Washington County at the outer limits of our one-hour perimeter, but if traffic's not bad on I-270, you can make it. Twilight rate Monday through Thursday ($30) starts at noon, Friday through Sunday ($32) at 2; super twilight ($25) at 5 daily. That includes greens fees, cart, and all the golf you can play. The driving range is extra. 7. Augustine Golf Club, 76 Monument Dr., Stafford; 540-720-7374; augustinegolf.com. You have to battle I-95 south toward Fredericksburg, but if you live in that direction, this spectacular course is worth playing. The twilight rate, starting at 3, is $39 Monday through Friday, $49 Saturday, Sunday, and holidays. The fee includes greens fees, cart, and use of range. 8. Blue Mash Golf Course, 5821 Olney-Laytonsville Rd., Laytonsville; 301-670-1966; bluemash.com. This is a good walking course–almost perfectly flat. I favor it on wet days when other places will be cart path only. Twilight rate starts at 3–it's $44 Monday through Thursday, $53 Friday through Sunday and holidays with cart. It's even cheaper (by $6) if you walk. 9. Cross Creek Golf Club, 12800 Bay Hill Dr., Beltsville; 301-595-8901; crosscreekgolfclub.net. Just off I-95 north of the Beltway, Cross Creek is easy to get to. Twilight rate on this tight course is $35 ($45 Saturday and Sunday) with cart starting at 2, and if you start at 4 you can play until dark for $25 ($30 Saturday and Sunday). Watch for Internet ads offering further discounts or free balls, which you may need. 10. Swan Point Yacht & Country Club, 11550 Swan Point Blvd., Issue, Md.; 301-259-0047; swanpointgolf.com. This beautiful course with lots of water has the feel of playing in Myrtle Beach but is seven hours closer. Twilight rate starts at 2 and is $49 any day of the week. It's a great deal for a course of this quality. A play-all-day special that includes lunch is $79 weekdays. Prices include cart, greens fee, and range balls. 11. Raspberry Falls Golf & Hunt Club, 41601 Raspberry Dr., Leesburg; 703-779-2555; raspberryfalls.com. This course is pretty but tough and doesn't come cheap even late in the afternoon. Twilight rate starts at 3 and is $52 Monday through Thursday, $62 Friday through Sunday and holidays–cart, range balls, and greens fees included. 12. Stonewall Golf Club at Lake Manassas, 15601 Turtle Point Dr., Gainesville; 703-753-5101; stonewallgolfclub.com. This wonderful course across the lake from the exclusive Robert Trent Jones layout doesn't cut you much of a break. Summer twilight rate, starting at 4, is $55 Monday through Thursday, $65 Friday through Sunday, including cart, greens fee, and range balls. 13. Whiskey Creek Golf Club, 4804 Whiskey Ct., Ijamsville, Md.; 301-694-2900; whiskeycreekgolf.com. This hilly golf paradise off I-270 starts its twilight rate at 1:30 in April and May and 2:30 June through August. The price drops from $78 to $58 Monday through Thursday and from $93 to $62 Friday through Sunday and holidays, including greens fee, cart, and range balls. 14. Goose Creek Golf Club, 43001 Golf Club Rd., Leesburg; 703-729-2500; goosecreekgolf.com. This course is not in the same league as Westfields or Raspberry Falls, but the price is right, and it's great for beginners. Twilight rate, 3 to 5 Monday through Friday, is all you can play for $25; Saturday and Sunday it's $29. Super twilight, starting at 5, is $19 daily. 15. Reston National Golf Course, 11875 Sunrise Valley Dr., Reston; 703-620-9333; virginiagolf.com/restonnational.html. I've enjoyed this course for many years, but it seems to close too often for corporate outings. The course is hacker-friendly, fun to play, but usually slow. Twilight rate, 3 to 6 daily, is $35, $40 Friday. Super twilight starts at 6: Monday through Friday $30, Saturday and Sunday $25.
from django.db import models from ..professors.models import Professor def get_upload_file_name(intance, filename): return "static/uploaded_files/%s" % (filename) class University(models.Model): name = models.CharField(max_length=50) city = models.CharField(max_length=25) emblem = models.FileField(upload_to=get_upload_file_name) slug = models.SlugField(null=False) def __unicode__(self): return u'%s %s' % (self.name, self.city) def save(self, *args, **kwargs): self.slug = self.slug.lower() super(University, self).save(*args, **kwargs) def count(instance): return Professor.objects.filter(university=instance).count() def get_grade(instance): professors = Professor.objects.filter(university=instance) count = 0 percent = 0 for p in professors: percent += p.get_percent() count += 1 if count == 0: percent = 0 else: percent = percent/count if percent >= 90: return 'A' elif percent >= 80: return 'B' elif percent >= 70: return 'C' elif percent >= 60: return 'D' else: return 'F'
Alternative method for quantitative enzyme histochemistry of muscle fibers. Application of photographic densitometry combined with atomic absorption spectrophotometry. The present study examines the use of photographic densitometry combined with atomic absorption spectrophotometry for the quantitation of enzyme activities (SDH and ATPase) in fresh frozen sections of rat tibialis anterior muscles. The technique eliminates some difficulties which are inherent in other methods. The reliability of the technique was found to be in the 98% range; the results were precise for all samples studied. The use of SDH to separate muscle fibers into "types" was found to be totally inaccurate since a full spectrum of activities was observed. ATPase activities could separate easily into two groups, but a continuum of ATPase activities was observed in the fast-twitch fibers. The simultaneous use of both enzymes was capable of separating the FG, FOG and SO fibers; however, variation within a single type was considerable and a great deal of information was lost when using any classification system. The continuum of SDH activities indicates the motor units are arranged as a spectrum of fatigue-resistant contractile units. The range of ATPase activities observed is comparable to ranges of motor unit contraction times emphasizing the importance of this enzyme in the regulation of contraction speed.
#!/usr/bin/python2.6 # This file is a part of Metagam project. # # Metagam is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # any later version. # # Metagam is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Metagam. If not, see <http://www.gnu.org/licenses/>. from mg.constructor import * import re re_double_slash = re.compile(r'//') re_valid_code = re.compile(r'^[a-z0-9][a-z0-9\-_]*(\/[a-z0-9\-_]*[a-z0-9_])*$') re_del = re.compile(r'^del\/(.+)$') re_valid_pgcode = re.compile(r'u_[a-z0-9_]+$') class DBLibraryPage(CassandraObject): clsname = "LibraryPage" indexes = { "all": [[], "code"], "code": [["code"]], } class DBLibraryPageList(CassandraObjectList): objcls = DBLibraryPage class DBLibraryGroup(CassandraObject): clsname = "LibraryGroup" indexes = { "all": [[], "code"], "code": [["code"]], "everywhere": [["everywhere"]], } class DBLibraryGroupList(CassandraObjectList): objcls = DBLibraryGroup class DBLibraryPageGroup(CassandraObject): clsname = "LibraryPageGroup" indexes = { "grp": [["grp"]], "page": [["page"]], } class DBLibraryPageGroupList(CassandraObjectList): objcls = DBLibraryPageGroup class Library(ConstructorModule): def register(self): self.rhook("gameinterface.buttons", self.gameinterface_buttons) self.rhook("ext-library.index", self.library_index, priv="public") self.rhook("ext-library.handler", self.library_handler, priv="public") self.rhook("socio.button-blocks", self.button_blocks) self.rhook("sociointerface.buttons", self.buttons) self.rhook("library-page-index.content", self.page_index) self.rhook("hook-lib.catalog", self.hook_catalog) self.rhook("library.page-groups", self.page_groups) self.rhook("library.icon", self.icon) self.rhook("admin-icons.list", self.icons_list) def icons_list(self, icons): icons.append({ "code": "library-icon", "title": self._("Library icon"), "default": "/st-mg/icons/library-icon.png", }) def icon(self, uri): img = self.call("icon.get", "library-icon", default_icon="/st-mg/icons/library-icon.png") return ' <a href="%s" target="_blank"><img src="%s" alt="?" class="library-icon" /></a>' % (uri, img) def button_blocks(self, blocks): blocks.append({"id": "library", "title": self._("Library"), "class": "library"}) def child_modules(self): return ["mg.constructor.library.LibraryAdmin"] def gameinterface_buttons(self, buttons): buttons.append({ "id": "library", "href": "/library", "target": "_blank", "icon": "library.png", "title": self._("Game library"), "block": "top-menu", "order": 8, }) def library_index(self): self.library_page("index") def library_handler(self): req = self.req() self.library_page(req.args) def library_page(self, code): if not re_valid_code.match(code): self.call("web.not_found") lst = self.objlist(DBLibraryPageList, query_index="code", query_equal=code) lst.load() if len(lst): pent = lst[0] else: pent = self.call("library-page-%s.content" % code, render_content=True) if not pent: self.call("web.not_found") vars = { "title": htmlescape(pent.get("title")), "keywords": htmlescape(pent.get("keywords")), "description": htmlescape(pent.get("description")), "allow_bracket_hooks": True, } vars["library_content"] = self.call("web.parse_inline_layout", pent.get("content"), vars) # loading blocks blocks = {} lst = self.objlist(DBLibraryGroupList, query_index="everywhere", query_equal="1") lst.load() for ent in lst: blocks[ent.get("code")] = { "code": ent.get("code"), "content": ent.get("block_content"), "order": ent.get("block_order"), } lst = self.objlist(DBLibraryPageGroupList, query_index="page", query_equal=code) lst.load(silent=True) for ent in lst: if ent.get("grp") not in blocks: grplst = self.objlist(DBLibraryGroupList, query_index="code", query_equal=ent.get("grp")) grplst.load() for grp in grplst: if grp.get("block"): blocks[grp.get("code")] = { "code": grp.get("code"), "content": grp.get("block_content"), "order": grp.get("block_order"), } if len(blocks): blocks = blocks.values() blocks.sort(cmp=lambda x, y: cmp(x.get("order"), y.get("order")) or cmp(x.get("code"), y.get("code"))) vars["library_blocks"] = [self.call("web.parse_inline_layout", blk["content"], vars) for blk in blocks] # loading parents menu_left = [{"html": vars["title"], "lst": True}] parent = pent.get("parent") shown = set() shown.add(pent.get("code")) while parent and parent not in shown: shown.add(parent) lst = self.objlist(DBLibraryPageList, query_index="code", query_equal=parent) lst.load() if len(lst): parent_ent = lst[0] else: parent_ent = self.call("library-page-%s.content" % parent, render_content=False) if not parent_ent: break menu_left.insert(0, {"html": htmlescape(parent_ent.get("title")), "href": "/library" if parent == "index" else "/library/%s" % parent}) parent = parent_ent.get("parent") if menu_left: vars["menu_left"] = menu_left self.call("socio.response_template", "library.html", vars) def buttons(self, buttons): buttons.append({ "id": "forum-library", "href": "/library", "title": self._("Library"), "target": "_self", "block": "forum", "order": 10, "left": True, }) def page_index(self, render_content): pageinfo = { "title": self._("Library - %s") % self.app().project.get("title_short"), } if render_content: pageinfo["content"] = '[hook:lib.catalog grp="index"]' return pageinfo def page_groups(self, page_groups): page_groups.append({ "code": "index", "title": self._("Publish on the library indexpage"), }) lst = self.objlist(DBLibraryGroupList, query_index="all") lst.load() for ent in lst: page_groups.append({ "code": ent.get("code"), "title": ent.get("title"), "uuid": ent.uuid, "manual": True, "everywhere": ent.get("everywhere"), }) def hook_catalog(self, vars, grp, delim="<br />"): lst = self.objlist(DBLibraryPageGroupList, query_index="grp", query_equal=grp) lst.load(silent=True) pages = [] for ent in lst: pages.append({"page": ent.get("page"), "order": ent.get("order")}) self.call("library-grp-%s.pages" % grp, pages) pages.sort(cmp=lambda x, y: cmp(x["order"], y["order"]) or cmp(x["page"], y["page"])) page_info = {} lst = self.objlist(DBLibraryPageList, query_index="code", query_equal=[ent["page"] for ent in pages]) lst.load(silent=True) for ent in lst: page_info[ent.get("code")] = ent result = [] for ent in pages: page = page_info.get(ent["page"]) or self.call("library-page-%s.content" % ent["page"], render_content=False) if page: code = page.get("code") result.append('<a href="%s">%s</a>' % ("/library" if code == "index" else "/library/%s" % code, htmlescape(page.get("title")))) return delim.join(result) class LibraryAdmin(ConstructorModule): def register(self): self.rhook("menu-admin-root.index", self.menu_root_index) self.rhook("menu-admin-library.index", self.menu_library_index) self.rhook("permissions.list", self.permissions_list) self.rhook("ext-admin-library.pages", self.admin_pages, priv="library.edit") self.rhook("headmenu-admin-library.pages", self.headmenu_pages) self.rhook("ext-admin-library.page-groups", self.admin_page_groups, priv="library.edit") self.rhook("headmenu-admin-library.page-groups", self.headmenu_page_groups) self.rhook("objclasses.list", self.objclasses_list) self.rhook("advice-admin-library.index", self.advice_library) self.rhook("admin-sociointerface.design-files", self.design_files) def design_files(self, files): files.append({"filename": "library.html", "description": self._("Library page layout"), "doc": "/doc/design/library"}) def advice_library(self, hook, args, advice): advice.append({"title": self._("Library documentation"), "content": self._('You can find detailed information on the library system in the <a href="//www.%s/doc/library" target="_blank">library page</a> in the reference manual.') % self.main_host}) def objclasses_list(self, objclasses): objclasses["LibraryPage"] = (DBLibraryPage, DBLibraryPageList) objclasses["LibraryGroup"] = (DBLibraryGroup, DBLibraryGroupList) objclasses["LibraryPageGroup"] = (DBLibraryPageGroup, DBLibraryPageGroupList) def menu_root_index(self, menu): menu.append({"id": "library.index", "text": self._("Library"), "order": 80}) def menu_library_index(self, menu): req = self.req() if req.has_access("library.edit"): menu.append({"id": "library/page-groups", "text": self._("Library page groups"), "order": 5, "leaf": True}) menu.append({"id": "library/pages", "text": self._("Library pages"), "order": 10, "leaf": True}) def permissions_list(self, perms): perms.append({"id": "library.edit", "name": self._("Library editing")}) def headmenu_pages(self, args): if args == "new": return [self._("New page"), "library/pages"] elif args: try: page = self.obj(DBLibraryPage, args) except ObjectNotFoundException: pass else: return [htmlescape(page.get("title")), "library/pages"] return self._("Library pages") def admin_pages(self): req = self.req() m = re_del.match(req.args) if m: uuid = m.group(1) try: page = self.obj(DBLibraryPage, uuid) except ObjectNotFoundException: pass else: page.remove() self.objlist(DBLibraryPageGroupList, query_index="page", query_equal=page.get("code")).remove() self.call("admin.redirect", "library/pages") if req.args: if req.args != "new": try: page = self.obj(DBLibraryPage, req.args) except ObjectNotFoundException: self.call("admin.redirect", "library/pages") else: page = self.obj(DBLibraryPage) page_groups = [] self.call("library.page-groups", page_groups) page_groups = [pg for pg in page_groups if not pg.get("everywhere")] if req.ok(): errors = {} code = req.param("code").strip() if not code: errors["code"] = self._("This field is mandatory") elif code.startswith('/'): errors["code"] = self._("Code can't start with slash") elif code.endswith('/'): errors["code"] = self._("Code can't end with slash") elif re_double_slash.search(code): errors["code"] = self._("Code can't contain '//'") elif not re_valid_code.match(code): errors["code"] = self._("Invalid format") else: lst = self.objlist(DBLibraryPageList, query_index="code", query_equal=code) if len(lst) and lst[0].uuid != page.uuid: errors["code"] = self._("There is a page with the same code already") else: page.set("code", code) title = req.param("title").strip() if not title: errors["title"] = self._("This field is mandatory") else: page.set("title", title) content = req.param("content").strip() page.set("content", content) keywords = req.param("keywords").strip() if not keywords: errors["keywords"] = self._("This field is mandatory") else: page.set("keywords", keywords) description = req.param("description").strip() if not description: errors["description"] = self._("This field is mandatory") else: page.set("description", description) page.set("parent", req.param("parent").strip()) if len(errors): self.call("web.response_json", {"success": False, "errors": errors}) page.store() self.objlist(DBLibraryPageGroupList, query_index="page", query_equal=page.get("code")).remove() for grp in page_groups: order = req.param("grp-%s" % grp.get("code")) if order != "": obj = self.obj(DBLibraryPageGroup) obj.set("page", page.get("code")) obj.set("grp", grp.get("code")) obj.set("order", intz(order)) obj.store() self.call("admin.redirect", "library/pages") fields = [ {"name": "code", "label": self._("Page code (latin letters, slashes, digits and '-'). This page code is practically a component of the page URL. This library page will be available as '/library/&lt;code&gt;'. You may use slashes. For example, 'clans/wars' will be available at '/library/clans/wars'. Special code 'index' means library index page: '/library'"), "value": page.get("code")}, {"name": "title", "label": self._("Page title"), "value": page.get("title")}, {"name": "parent", "label": self._("Code of the parent page"), "value": page.get("parent")}, {"name": "content", "type": "htmleditor", "label": self._("Page content. You may use hooks to include any dynamic content"), "value": page.get("content")}, {"name": "keywords", "label": self._("Page keywords (visible to search engines). Comma delimited"), "value": page.get("keywords")}, {"name": "description", "label": self._("Page decription (visible to search engines only)"), "value": page.get("description")}, ] lst = self.objlist(DBLibraryPageGroupList, query_index="page", query_equal=page.get("code")) lst.load() group_enabled = {} for ent in lst: group_enabled[ent.get("grp")] = ent.get("order") fields.append({"type": "header", "html": self._("Which groups this page belongs to. If you want any page to show in the group specify an integer value here. This value will be the sorting order of the page in the group")}) col = 0 for grp in page_groups: fields.append({"name": "grp-%s" % grp.get("code"), "label": htmlescape(grp.get("title")), "value": group_enabled.get(grp.get("code")), "inline": (col % 3 != 0)}) col += 1 while col % 3 != 0: fields.append({"type": "empty", "inline": True}) col += 1 self.call("admin.form", fields=fields, modules=["HtmlEditorPlugins"]) rows = [] lst = self.objlist(DBLibraryPageList, query_index="all") lst.load() for ent in lst: code = ent.get("code") rows.append([ code, '<hook:admin.link href="library/pages/%s" title="%s" />' % (ent.uuid, htmlescape(ent.get("title"))), '<hook:admin.link href="library/pages/del/%s" title="%s" confirm="%s" />' % (ent.uuid, self._("delete"), self._("Are you sure want to delete this page?")), '<a href="%s" target="_blank">%s</a>' % ("/library" if code == "index" else "/library/%s" % code, self._("view")), ]) vars = { "tables": [ { "links": [ {"hook": "library/pages/new", "text": self._("New library page"), "lst": True}, ], "header": [ self._("Page code"), self._("Title"), self._("Deletion"), self._("Viewing"), ], "rows": rows } ] } self.call("admin.response_template", "admin/common/tables.html", vars) def headmenu_page_groups(self, args): if args == "new": return [self._("New page group"), "library/page-groups"] elif args: try: page_group = self.obj(DBLibraryGroup, args) except ObjectNotFoundException: pass else: return [htmlescape(page_group.get("title")), "library/page-groups"] return self._("Library page groups") def admin_page_groups(self): req = self.req() m = re_del.match(req.args) if m: uuid = m.group(1) try: page_group = self.obj(DBLibraryGroup, uuid) except ObjectNotFoundException: pass else: page_group.remove() self.objlist(DBLibraryPageGroupList, query_index="grp", query_equal=page_group.get("code")).remove() self.call("admin.redirect", "library/page-groups") if req.args: if req.args != "new": try: page_group = self.obj(DBLibraryGroup, req.args) except ObjectNotFoundException: self.call("admin.redirect", "library/page-groups") else: page_group = self.obj(DBLibraryGroup) if req.ok(): errors = {} code = req.param("code").strip() if not code: errors["code"] = self._("This field is mandatory") elif not code.startswith("u_"): errors["code"] = self._("Identifier must start with 'u_'") elif not re_valid_pgcode.match(code): errors["code"] = self._("Invalid format") else: lst = self.objlist(DBLibraryGroupList, query_index="code", query_equal=code) if len(lst) and lst[0].uuid != page_group.uuid: errors["code"] = self._("There is a page group with the same code already") else: page_group.set("code", code) title = req.param("title").strip() if not title: errors["title"] = self._("This field is mandatory") else: page_group.set("title", title) if req.param("block"): page_group.set("block", 1) page_group.set("block_order", intz(req.param("block_order"))) if req.param("block_everywhere"): page_group.set("everywhere", 1) else: page_group.delkey("everywhere") page_group.set("block_content", req.param("block_content")) else: page_group.delkey("block") page_group.delkey("block_order") page_group.delkey("block_everywhere") page_group.delkey("block_content") if len(errors): self.call("web.response_json", {"success": False, "errors": errors}) page_group.store() self.call("admin.redirect", "library/page-groups") fields = [ {"name": "code", "label": self._("Page group code (must start with u_ and contain latin letters, digits and '_' symbols)"), "value": page_group.get("code")}, {"name": "title", "label": self._("Page group title"), "value": page_group.get("title")}, {"name": "block", "label": self._("This group is a block (HTML portion that will be shown on every page in the group)"), "type": "checkbox", "checked": page_group.get("block")}, {"name": "block_content", "type": "htmleditor", "label": self._("Block content. You may use hooks to include any dynamic content"), "value": page_group.get("block_content"), "condition": "[block]"}, {"name": "block_order", "label": self._("Block sorting order"), "value": page_group.get("block_order"), "condition": "[block]"}, {"name": "block_everywhere", "type": "checkbox", "label": self._("This block is shown on the every library page"), "checked": page_group.get("everywhere"), "condition": "[block]"}, ] self.call("admin.form", fields=fields, modules=["HtmlEditorPlugins"]) page_groups = [] self.call("library.page-groups", page_groups) rows = [] for ent in page_groups: code = ent.get("code") manual = ent.get("manual") title = htmlescape(ent.get("title")) rows.append([ code, '<hook:admin.link href="library/page-groups/%s" title="%s" />' % (ent.get("uuid"), title) if manual else title, '<hook:admin.link href="library/page-groups/del/%s" title="%s" confirm="%s" />' % (ent.get("uuid"), self._("delete"), self._("Are you sure want to delete this page group?")) if manual else None, ]) vars = { "tables": [ { "links": [ {"hook": "library/page-groups/new", "text": self._("New library page group"), "lst": True}, ], "header": [ self._("Page group code"), self._("Title"), self._("Deletion"), ], "rows": rows } ] } self.call("admin.response_template", "admin/common/tables.html", vars)
Draw a line to keep the good guys alive and let the bad guys fall. Collect stars for points. Use Mouse to click and hold to draw a line.
#=========================================# # BEAUTIFUL # #=========================================# #=========================================# # HEADER # #=========================================# def header(text="DÉMINEUR", vp=1, p=6, m=7, s="#", s2="="): """ Affiche un beau titre. """ padding = p*" " margin = m*" " void = margin + s + " "*(len(text) + 2*p) + s print("\n" + margin + s + s2*(len(text) + 2*p) + s) for i in range(vp): print(void) print(margin + s + padding + text + padding + s) for i in range(vp): print(void) print(margin + s + s2*(len(text) + 2*p) + s + "\n") #=========================================# # INPUT INT # #=========================================# def input_int(txt="Veuillez entrer un nombre : ", error="!! Vous n'avez pas entré un nombre"): """ Retourne un int envoyé par l'utilisateur avec la gestion des exceptions """ while True: try: n = int(input(txt)) break except ValueError: print (error + "\n") return n
This is the perfect fall baby dress! The top is nice and stretchy so it will last for a nice long time, and the skirt is silky smooth. It is oh-so-soft from top to bottom, and cute as a button too! We love this fun and whimsical print, and we think you will too!
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict from distutils import util import os import re from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib # type: ignore from google.api_core import exceptions as core_exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.metastore_v1alpha.services.dataproc_metastore import pagers from google.cloud.metastore_v1alpha.types import metastore from google.protobuf import empty_pb2 # type: ignore from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore from .transports.base import DataprocMetastoreTransport, DEFAULT_CLIENT_INFO from .transports.grpc import DataprocMetastoreGrpcTransport from .transports.grpc_asyncio import DataprocMetastoreGrpcAsyncIOTransport class DataprocMetastoreClientMeta(type): """Metaclass for the DataprocMetastore client. This provides class-level methods for building and retrieving support objects (e.g. transport) without polluting the client instance objects. """ _transport_registry = ( OrderedDict() ) # type: Dict[str, Type[DataprocMetastoreTransport]] _transport_registry["grpc"] = DataprocMetastoreGrpcTransport _transport_registry["grpc_asyncio"] = DataprocMetastoreGrpcAsyncIOTransport def get_transport_class( cls, label: str = None, ) -> Type[DataprocMetastoreTransport]: """Returns an appropriate transport class. Args: label: The name of the desired transport. If none is provided, then the first transport in the registry is used. Returns: The transport class to use. """ # If a specific transport is requested, return that one. if label: return cls._transport_registry[label] # No transport is requested; return the default (that is, the first one # in the dictionary). return next(iter(cls._transport_registry.values())) class DataprocMetastoreClient(metaclass=DataprocMetastoreClientMeta): """Configures and manages metastore services. Metastore services are fully managed, highly available, auto-scaled, auto-healing, OSS-native deployments of technical metadata management software. Each metastore service exposes a network endpoint through which metadata queries are served. Metadata queries can originate from a variety of sources, including Apache Hive, Apache Presto, and Apache Spark. The Dataproc Metastore API defines the following resource model: - The service works with a collection of Google Cloud projects, named: ``/projects/*`` - Each project has a collection of available locations, named: ``/locations/*`` (a location must refer to a Google Cloud ``region``) - Each location has a collection of services, named: ``/services/*`` - Dataproc Metastore services are resources with names of the form: ``/projects/{project_number}/locations/{location_id}/services/{service_id}``. """ @staticmethod def _get_default_mtls_endpoint(api_endpoint): """Converts api endpoint to mTLS endpoint. Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: api_endpoint (Optional[str]): the api endpoint to convert. Returns: str: converted mTLS api endpoint. """ if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "metastore.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials info. Args: info (dict): The service account private key info. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: DataprocMetastoreClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_info(info) kwargs["credentials"] = credentials return cls(*args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: DataprocMetastoreClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @property def transport(self) -> DataprocMetastoreTransport: """Returns the transport used by the client instance. Returns: DataprocMetastoreTransport: The transport used by the client instance. """ return self._transport @staticmethod def backup_path(project: str, location: str, service: str, backup: str,) -> str: """Returns a fully-qualified backup string.""" return "projects/{project}/locations/{location}/services/{service}/backups/{backup}".format( project=project, location=location, service=service, backup=backup, ) @staticmethod def parse_backup_path(path: str) -> Dict[str, str]: """Parses a backup path into its component segments.""" m = re.match( r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/services/(?P<service>.+?)/backups/(?P<backup>.+?)$", path, ) return m.groupdict() if m else {} @staticmethod def metadata_import_path( project: str, location: str, service: str, metadata_import: str, ) -> str: """Returns a fully-qualified metadata_import string.""" return "projects/{project}/locations/{location}/services/{service}/metadataImports/{metadata_import}".format( project=project, location=location, service=service, metadata_import=metadata_import, ) @staticmethod def parse_metadata_import_path(path: str) -> Dict[str, str]: """Parses a metadata_import path into its component segments.""" m = re.match( r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/services/(?P<service>.+?)/metadataImports/(?P<metadata_import>.+?)$", path, ) return m.groupdict() if m else {} @staticmethod def network_path(project: str, network: str,) -> str: """Returns a fully-qualified network string.""" return "projects/{project}/global/networks/{network}".format( project=project, network=network, ) @staticmethod def parse_network_path(path: str) -> Dict[str, str]: """Parses a network path into its component segments.""" m = re.match( r"^projects/(?P<project>.+?)/global/networks/(?P<network>.+?)$", path ) return m.groupdict() if m else {} @staticmethod def service_path(project: str, location: str, service: str,) -> str: """Returns a fully-qualified service string.""" return "projects/{project}/locations/{location}/services/{service}".format( project=project, location=location, service=service, ) @staticmethod def parse_service_path(path: str) -> Dict[str, str]: """Parses a service path into its component segments.""" m = re.match( r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/services/(?P<service>.+?)$", path, ) return m.groupdict() if m else {} @staticmethod def common_billing_account_path(billing_account: str,) -> str: """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @staticmethod def parse_common_billing_account_path(path: str) -> Dict[str, str]: """Parse a billing_account path into its component segments.""" m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_folder_path(folder: str,) -> str: """Returns a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod def parse_common_folder_path(path: str) -> Dict[str, str]: """Parse a folder path into its component segments.""" m = re.match(r"^folders/(?P<folder>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_organization_path(organization: str,) -> str: """Returns a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod def parse_common_organization_path(path: str) -> Dict[str, str]: """Parse a organization path into its component segments.""" m = re.match(r"^organizations/(?P<organization>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_project_path(project: str,) -> str: """Returns a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod def parse_common_project_path(path: str) -> Dict[str, str]: """Parse a project path into its component segments.""" m = re.match(r"^projects/(?P<project>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_location_path(project: str, location: str,) -> str: """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @staticmethod def parse_common_location_path(path: str) -> Dict[str, str]: """Parse a location path into its component segments.""" m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path) return m.groupdict() if m else {} def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, transport: Union[str, DataprocMetastoreTransport, None] = None, client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the dataproc metastore client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, DataprocMetastoreTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (google.api_core.client_options.ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint) and "auto" (auto switch to the default mTLS endpoint if client certificate is present, this is the default value). However, the ``api_endpoint`` property takes precedence if provided. (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used to provide client certificate for mutual TLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ if isinstance(client_options, dict): client_options = client_options_lib.from_dict(client_options) if client_options is None: client_options = client_options_lib.ClientOptions() # Create SSL credentials for mutual TLS if needed. use_client_cert = bool( util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) ) client_cert_source_func = None is_mtls = False if use_client_cert: if client_options.client_cert_source: is_mtls = True client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() if is_mtls: client_cert_source_func = mtls.default_client_cert_source() else: client_cert_source_func = None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: api_endpoint = client_options.api_endpoint else: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") if use_mtls_env == "never": api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": if is_mtls: api_endpoint = self.DEFAULT_MTLS_ENDPOINT else: api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " "values: never, auto, always" ) # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. if isinstance(transport, DataprocMetastoreTransport): # transport is a DataprocMetastoreTransport instance. if credentials or client_options.credentials_file: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) if client_options.scopes: raise ValueError( "When providing a transport instance, provide its scopes " "directly." ) self._transport = transport else: Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, credentials_file=client_options.credentials_file, host=api_endpoint, scopes=client_options.scopes, client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, ) def list_services( self, request: metastore.ListServicesRequest = None, *, parent: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListServicesPager: r"""Lists services in a project and location. Args: request (google.cloud.metastore_v1alpha.types.ListServicesRequest): The request object. Request message for [DataprocMetastore.ListServices][google.cloud.metastore.v1alpha.DataprocMetastore.ListServices]. parent (str): Required. The relative resource name of the location of metastore services to list, in the following form: ``projects/{project_number}/locations/{location_id}``. This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.metastore_v1alpha.services.dataproc_metastore.pagers.ListServicesPager: Response message for [DataprocMetastore.ListServices][google.cloud.metastore.v1alpha.DataprocMetastore.ListServices]. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a metastore.ListServicesRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, metastore.ListServicesRequest): request = metastore.ListServicesRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.list_services] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.ListServicesPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def get_service( self, request: metastore.GetServiceRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> metastore.Service: r"""Gets the details of a single service. Args: request (google.cloud.metastore_v1alpha.types.GetServiceRequest): The request object. Request message for [DataprocMetastore.GetService][google.cloud.metastore.v1alpha.DataprocMetastore.GetService]. name (str): Required. The relative resource name of the metastore service to retrieve, in the following form: ``projects/{project_number}/locations/{location_id}/services/{service_id}``. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.metastore_v1alpha.types.Service: A managed metastore service that serves metadata queries. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a metastore.GetServiceRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, metastore.GetServiceRequest): request = metastore.GetServiceRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.get_service] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def create_service( self, request: metastore.CreateServiceRequest = None, *, parent: str = None, service: metastore.Service = None, service_id: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Creates a metastore service in a project and location. Args: request (google.cloud.metastore_v1alpha.types.CreateServiceRequest): The request object. Request message for [DataprocMetastore.CreateService][google.cloud.metastore.v1alpha.DataprocMetastore.CreateService]. parent (str): Required. The relative resource name of the location in which to create a metastore service, in the following form: ``projects/{project_number}/locations/{location_id}``. This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. service (google.cloud.metastore_v1alpha.types.Service): Required. The Metastore service to create. The ``name`` field is ignored. The ID of the created metastore service must be provided in the request's ``service_id`` field. This corresponds to the ``service`` field on the ``request`` instance; if ``request`` is provided, this should not be set. service_id (str): Required. The ID of the metastore service, which is used as the final component of the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin with a letter, end with a letter or number, and consist of alpha-numeric ASCII characters or hyphens. This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.api_core.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:`google.cloud.metastore_v1alpha.types.Service` A managed metastore service that serves metadata queries. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, service, service_id]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a metastore.CreateServiceRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, metastore.CreateServiceRequest): request = metastore.CreateServiceRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if service is not None: request.service = service if service_id is not None: request.service_id = service_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.create_service] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, metastore.Service, metadata_type=metastore.OperationMetadata, ) # Done; return the response. return response def update_service( self, request: metastore.UpdateServiceRequest = None, *, service: metastore.Service = None, update_mask: field_mask_pb2.FieldMask = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Updates the parameters of a single service. Args: request (google.cloud.metastore_v1alpha.types.UpdateServiceRequest): The request object. Request message for [DataprocMetastore.UpdateService][google.cloud.metastore.v1alpha.DataprocMetastore.UpdateService]. service (google.cloud.metastore_v1alpha.types.Service): Required. The metastore service to update. The server only merges fields in the service if they are specified in ``update_mask``. The metastore service's ``name`` field is used to identify the metastore service to be updated. This corresponds to the ``service`` field on the ``request`` instance; if ``request`` is provided, this should not be set. update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. A field mask used to specify the fields to be overwritten in the metastore service resource by the update. Fields specified in the ``update_mask`` are relative to the resource (not to the full request). A field is overwritten if it is in the mask. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.api_core.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:`google.cloud.metastore_v1alpha.types.Service` A managed metastore service that serves metadata queries. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([service, update_mask]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a metastore.UpdateServiceRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, metastore.UpdateServiceRequest): request = metastore.UpdateServiceRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if service is not None: request.service = service if update_mask is not None: request.update_mask = update_mask # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.update_service] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata( (("service.name", request.service.name),) ), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, metastore.Service, metadata_type=metastore.OperationMetadata, ) # Done; return the response. return response def delete_service( self, request: metastore.DeleteServiceRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Deletes a single service. Args: request (google.cloud.metastore_v1alpha.types.DeleteServiceRequest): The request object. Request message for [DataprocMetastore.DeleteService][google.cloud.metastore.v1alpha.DataprocMetastore.DeleteService]. name (str): Required. The relative resource name of the metastore service to delete, in the following form: ``projects/{project_number}/locations/{location_id}/services/{service_id}``. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.api_core.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a metastore.DeleteServiceRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, metastore.DeleteServiceRequest): request = metastore.DeleteServiceRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.delete_service] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, empty_pb2.Empty, metadata_type=metastore.OperationMetadata, ) # Done; return the response. return response def list_metadata_imports( self, request: metastore.ListMetadataImportsRequest = None, *, parent: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListMetadataImportsPager: r"""Lists imports in a service. Args: request (google.cloud.metastore_v1alpha.types.ListMetadataImportsRequest): The request object. Request message for [DataprocMetastore.ListMetadataImports][google.cloud.metastore.v1alpha.DataprocMetastore.ListMetadataImports]. parent (str): Required. The relative resource name of the service whose metadata imports to list, in the following form: ``projects/{project_number}/locations/{location_id}/services/{service_id}/metadataImports``. This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.metastore_v1alpha.services.dataproc_metastore.pagers.ListMetadataImportsPager: Response message for [DataprocMetastore.ListMetadataImports][google.cloud.metastore.v1alpha.DataprocMetastore.ListMetadataImports]. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a metastore.ListMetadataImportsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, metastore.ListMetadataImportsRequest): request = metastore.ListMetadataImportsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.list_metadata_imports] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.ListMetadataImportsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def get_metadata_import( self, request: metastore.GetMetadataImportRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> metastore.MetadataImport: r"""Gets details of a single import. Args: request (google.cloud.metastore_v1alpha.types.GetMetadataImportRequest): The request object. Request message for [DataprocMetastore.GetMetadataImport][google.cloud.metastore.v1alpha.DataprocMetastore.GetMetadataImport]. name (str): Required. The relative resource name of the metadata import to retrieve, in the following form: ``projects/{project_number}/locations/{location_id}/services/{service_id}/metadataImports/{import_id}``. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.metastore_v1alpha.types.MetadataImport: A metastore resource that imports metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a metastore.GetMetadataImportRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, metastore.GetMetadataImportRequest): request = metastore.GetMetadataImportRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.get_metadata_import] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def create_metadata_import( self, request: metastore.CreateMetadataImportRequest = None, *, parent: str = None, metadata_import: metastore.MetadataImport = None, metadata_import_id: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Creates a new MetadataImport in a given project and location. Args: request (google.cloud.metastore_v1alpha.types.CreateMetadataImportRequest): The request object. Request message for [DataprocMetastore.CreateMetadataImport][google.cloud.metastore.v1alpha.DataprocMetastore.CreateMetadataImport]. parent (str): Required. The relative resource name of the service in which to create a metastore import, in the following form: ``projects/{project_number}/locations/{location_id}/services/{service_id}`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. metadata_import (google.cloud.metastore_v1alpha.types.MetadataImport): Required. The metadata import to create. The ``name`` field is ignored. The ID of the created metadata import must be provided in the request's ``metadata_import_id`` field. This corresponds to the ``metadata_import`` field on the ``request`` instance; if ``request`` is provided, this should not be set. metadata_import_id (str): Required. The ID of the metadata import, which is used as the final component of the metadata import's name. This value must be between 1 and 64 characters long, begin with a letter, end with a letter or number, and consist of alpha-numeric ASCII characters or hyphens. This corresponds to the ``metadata_import_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.api_core.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:`google.cloud.metastore_v1alpha.types.MetadataImport` A metastore resource that imports metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, metadata_import, metadata_import_id]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a metastore.CreateMetadataImportRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, metastore.CreateMetadataImportRequest): request = metastore.CreateMetadataImportRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if metadata_import is not None: request.metadata_import = metadata_import if metadata_import_id is not None: request.metadata_import_id = metadata_import_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.create_metadata_import] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, metastore.MetadataImport, metadata_type=metastore.OperationMetadata, ) # Done; return the response. return response def update_metadata_import( self, request: metastore.UpdateMetadataImportRequest = None, *, metadata_import: metastore.MetadataImport = None, update_mask: field_mask_pb2.FieldMask = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Updates a single import. Only the description field of MetadataImport is supported to be updated. Args: request (google.cloud.metastore_v1alpha.types.UpdateMetadataImportRequest): The request object. Request message for [DataprocMetastore.UpdateMetadataImport][google.cloud.metastore.v1alpha.DataprocMetastore.UpdateMetadataImport]. metadata_import (google.cloud.metastore_v1alpha.types.MetadataImport): Required. The metadata import to update. The server only merges fields in the import if they are specified in ``update_mask``. The metadata import's ``name`` field is used to identify the metastore import to be updated. This corresponds to the ``metadata_import`` field on the ``request`` instance; if ``request`` is provided, this should not be set. update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. A field mask used to specify the fields to be overwritten in the metadata import resource by the update. Fields specified in the ``update_mask`` are relative to the resource (not to the full request). A field is overwritten if it is in the mask. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.api_core.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:`google.cloud.metastore_v1alpha.types.MetadataImport` A metastore resource that imports metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([metadata_import, update_mask]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a metastore.UpdateMetadataImportRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, metastore.UpdateMetadataImportRequest): request = metastore.UpdateMetadataImportRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if metadata_import is not None: request.metadata_import = metadata_import if update_mask is not None: request.update_mask = update_mask # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.update_metadata_import] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata( (("metadata_import.name", request.metadata_import.name),) ), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, metastore.MetadataImport, metadata_type=metastore.OperationMetadata, ) # Done; return the response. return response def export_metadata( self, request: metastore.ExportMetadataRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Exports metadata from a service. Args: request (google.cloud.metastore_v1alpha.types.ExportMetadataRequest): The request object. Request message for [DataprocMetastore.ExportMetadata][google.cloud.metastore.v1alpha.DataprocMetastore.ExportMetadata]. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.api_core.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:`google.cloud.metastore_v1alpha.types.MetadataExport` The details of a metadata export operation. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a metastore.ExportMetadataRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, metastore.ExportMetadataRequest): request = metastore.ExportMetadataRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.export_metadata] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("service", request.service),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, metastore.MetadataExport, metadata_type=metastore.OperationMetadata, ) # Done; return the response. return response def restore_service( self, request: metastore.RestoreServiceRequest = None, *, service: str = None, backup: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Restores a service from a backup. Args: request (google.cloud.metastore_v1alpha.types.RestoreServiceRequest): The request object. Request message for [DataprocMetastore.Restore][]. service (str): Required. The relative resource name of the metastore service to run restore, in the following form: ``projects/{project_id}/locations/{location_id}/services/{service_id}`` This corresponds to the ``service`` field on the ``request`` instance; if ``request`` is provided, this should not be set. backup (str): Required. The relative resource name of the metastore service backup to restore from, in the following form: ``projects/{project_id}/locations/{location_id}/services/{service_id}/backups/{backup_id}`` This corresponds to the ``backup`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.api_core.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:`google.cloud.metastore_v1alpha.types.Restore` The details of a metadata restore operation. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([service, backup]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a metastore.RestoreServiceRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, metastore.RestoreServiceRequest): request = metastore.RestoreServiceRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if service is not None: request.service = service if backup is not None: request.backup = backup # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.restore_service] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("service", request.service),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, metastore.Restore, metadata_type=metastore.OperationMetadata, ) # Done; return the response. return response def list_backups( self, request: metastore.ListBackupsRequest = None, *, parent: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListBackupsPager: r"""Lists backups in a service. Args: request (google.cloud.metastore_v1alpha.types.ListBackupsRequest): The request object. Request message for [DataprocMetastore.ListBackups][google.cloud.metastore.v1alpha.DataprocMetastore.ListBackups]. parent (str): Required. The relative resource name of the service whose backups to list, in the following form: ``projects/{project_number}/locations/{location_id}/services/{service_id}/backups``. This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.metastore_v1alpha.services.dataproc_metastore.pagers.ListBackupsPager: Response message for [DataprocMetastore.ListBackups][google.cloud.metastore.v1alpha.DataprocMetastore.ListBackups]. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a metastore.ListBackupsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, metastore.ListBackupsRequest): request = metastore.ListBackupsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.list_backups] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.ListBackupsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def get_backup( self, request: metastore.GetBackupRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> metastore.Backup: r"""Gets details of a single backup. Args: request (google.cloud.metastore_v1alpha.types.GetBackupRequest): The request object. Request message for [DataprocMetastore.GetBackup][google.cloud.metastore.v1alpha.DataprocMetastore.GetBackup]. name (str): Required. The relative resource name of the backup to retrieve, in the following form: ``projects/{project_number}/locations/{location_id}/services/{service_id}/backups/{backup_id}``. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.metastore_v1alpha.types.Backup: The details of a backup resource. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a metastore.GetBackupRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, metastore.GetBackupRequest): request = metastore.GetBackupRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.get_backup] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def create_backup( self, request: metastore.CreateBackupRequest = None, *, parent: str = None, backup: metastore.Backup = None, backup_id: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Creates a new Backup in a given project and location. Args: request (google.cloud.metastore_v1alpha.types.CreateBackupRequest): The request object. Request message for [DataprocMetastore.CreateBackup][google.cloud.metastore.v1alpha.DataprocMetastore.CreateBackup]. parent (str): Required. The relative resource name of the service in which to create a backup of the following form: ``projects/{project_number}/locations/{location_id}/services/{service_id}`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. backup (google.cloud.metastore_v1alpha.types.Backup): Required. The backup to create. The ``name`` field is ignored. The ID of the created backup must be provided in the request's ``backup_id`` field. This corresponds to the ``backup`` field on the ``request`` instance; if ``request`` is provided, this should not be set. backup_id (str): Required. The ID of the backup, which is used as the final component of the backup's name. This value must be between 1 and 64 characters long, begin with a letter, end with a letter or number, and consist of alpha-numeric ASCII characters or hyphens. This corresponds to the ``backup_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.api_core.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:`google.cloud.metastore_v1alpha.types.Backup` The details of a backup resource. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, backup, backup_id]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a metastore.CreateBackupRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, metastore.CreateBackupRequest): request = metastore.CreateBackupRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if backup is not None: request.backup = backup if backup_id is not None: request.backup_id = backup_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.create_backup] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, metastore.Backup, metadata_type=metastore.OperationMetadata, ) # Done; return the response. return response def delete_backup( self, request: metastore.DeleteBackupRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Deletes a single backup. Args: request (google.cloud.metastore_v1alpha.types.DeleteBackupRequest): The request object. Request message for [DataprocMetastore.DeleteBackup][google.cloud.metastore.v1alpha.DataprocMetastore.DeleteBackup]. name (str): Required. The relative resource name of the backup to delete, in the following form: ``projects/{project_number}/locations/{location_id}/services/{service_id}/backups/{backup_id}``. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.api_core.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a metastore.DeleteBackupRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, metastore.DeleteBackupRequest): request = metastore.DeleteBackupRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.delete_backup] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, empty_pb2.Empty, metadata_type=metastore.OperationMetadata, ) # Done; return the response. return response try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution("google-cloud-metastore",).version, ) except pkg_resources.DistributionNotFound: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() __all__ = ("DataprocMetastoreClient",)
The FloraFlexTM Dart Dripper is a first-of-its-Kind subsurface non-pressure compensating dripper. Insert the Dart directly into the medium wherever you would like to deliver water and nutrients. On the sides, the bottom, the top, the choice is yours. The subsurface design of the dripper allows you to deliver water and nutrients straight to the root zone of your plants.
# # DBus structures for validation. # # Copyright (C) 2019 Red Hat, Inc. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from dasbus.structure import DBusData from dasbus.typing import * # pylint: disable=wildcard-import __all__ = ["ValidationReport"] class ValidationReport(DBusData): """The validation report.""" def __init__(self): self._error_messages = [] self._warning_messages = [] def is_valid(self): """Is the validation successful? :return: True or False """ return not self._error_messages def get_messages(self): """Get all messages. :return: a list of strings """ return self.error_messages + self.warning_messages @property def error_messages(self) -> List[Str]: """List of error messages. :return: a list of strings """ return self._error_messages @error_messages.setter def error_messages(self, messages: List[Str]): self._error_messages = list(messages) @property def warning_messages(self) -> List[Str]: """List of warning messages. :return: a list of strings """ return self._warning_messages @warning_messages.setter def warning_messages(self, messages: List[Str]): self._warning_messages = list(messages)
[ACCESS RESTRICTED TO THE UNIVERSITY OF MISSOURI AT AUTHOR'S REQUEST.] Plants being immobile have developed various adaptive responses to interpret and utilize light directionality, quantity and quality. One such adaptive response is phototropism where the plant organs bend towards a directional light source. In Arabidopsis, NPH₃ protein is absolutely required for phototropism and it interacts with the phot1 photoreceptor. Given the unique properties of this critical protein mediating phototropism, yet little is known about how phot1 signals through NPH₃. This dissertation presents results that address a better understanding on the workings of NPH₃ in mediating phototropism. In brief, it has been demonstrated that NPH₃ has reversible phosphorylation states and dephosphorylation of NPH₃ is dependent on phot1. Lastly, my dissertation work has led to the identification of three more novel components of the phototropic pathway: 1) CUL₃ (directly interacts with NPH₃); 2) a protein kinase that phosphorylates NPH₃ in dark; and 3) a protein phosphatase that dephosphorylates NPH₃ in light.