text
stringlengths
29
850k
#------------------------------------------------------------------------------ # Copyright 2013 Esri # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #------------------------------------------------------------------------------ # for line features, finds the bearing angle from the first point to the last point import os, sys, traceback, math import arcpy def Geo2Arithmetic(inAngle): inAngle = math.fmod(inAngle,360.0) #0 to 90 if (inAngle >= 0.0 and inAngle <= 90.0): outAngle = math.fabs(inAngle - 90.0) # 90 to 360 if (inAngle >= 90.0 and inAngle < 360.0): outAngle = 360.0 - (inAngle - 90.0) return float(outAngle) inputFeatures = arcpy.GetParameterAsText(0) # C:\Workspace\ArcGIS Defense 10.1\path slope\default.gdb\roads inputAngleField = arcpy.GetParameterAsText(1) # aoo deleteme = [] debug = False try: arcpy.AddMessage("Updating " + inputAngleField + " field for " + str(arcpy.GetCount_management(inputFeatures).getOutput(0)) + " rows ...") with arcpy.da.UpdateCursor(inputFeatures,["OID@","SHAPE@",inputAngleField]) as rows: for row in rows: angle = None geometry = row[1] # firstPoint, lastPoint firstPoint = geometry.firstPoint lastPoint = geometry.lastPoint xdiff = (lastPoint.X - firstPoint.X) ydiff = (lastPoint.Y - firstPoint.Y) #distance = math.sqrt(math.pow(xdiff,2.0) + math.pow(ydiff,2.0)) # Convert from quadrants to arithmetic if (xdiff == 0.0 and ydiff > 0.0): # vertical line, slope infinity angle = 90.0 if (xdiff == 0.0 and ydiff < 0.0): # vertical line, slope infinity angle = 270.0 if (xdiff > 0.0 and ydiff == 0.0): angle = 0.0 if (xdiff < 0.0 and ydiff == 0.0): angle = 180.0 if (xdiff > 0.0 and ydiff > 0.0): # Quadrant I (+,+) angle = math.degrees(math.atan(ydiff/xdiff)) if (xdiff < 0.0 and ydiff > 0.0): # Quadrant II (-,+) angle = 180.0 - math.fabs(math.degrees(math.atan(ydiff/xdiff))) if (xdiff < 0.0 and ydiff < 0.0): # Quadrant III (-,-) angle = 180.0 + math.fabs(math.degrees(math.atan(ydiff/xdiff))) if (xdiff > 0.0 and ydiff < 0.0): # Quadrant IV (+,-) angle = 360.0 - math.fabs(math.degrees(math.atan(ydiff/xdiff))) #if debug == True: arcpy.AddMessage(str(xdiff) + " -- " + str(angle) + " -- " + str(ydiff)) if not angle == None: row[2] = Geo2Arithmetic(angle) else: arcpy.AddWarning("Empty angle for feature " + str(row[0]) + ". This could be a closed loop feature.") row[2] = None #if debug == True: arcpy.AddMessage(" " + str(row)) rows.updateRow(row) arcpy.SetParameter(2,inputFeatures) except arcpy.ExecuteError: # Get the tool error messages msgs = arcpy.GetMessages() arcpy.AddError(msgs) #print msgs #UPDATE print (msgs) except: # Get the traceback object tb = sys.exc_info()[2] tbinfo = traceback.format_tb(tb)[0] # Concatenate information together concerning the error into a message string pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1]) msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n" # Return python error messages for use in script tool or Python Window arcpy.AddError(pymsg) arcpy.AddError(msgs) # Print Python error messages for use in Python / Python Window #print pymsg + "\n" UPDATE print ((pymsg + "\n")) #print msgs #UPDATE print (msgs) finally: # cleanup intermediate datasets if debug == True: arcpy.AddMessage("Removing intermediate datasets...") for i in deleteme: if debug == True: arcpy.AddMessage("Removing: " + str(i)) arcpy.Delete_management(i) if debug == True: arcpy.AddMessage("Done")
We know that when we evaluate an absolute value, the result will be positive, regardless of the sign of what is inside of the absolute value bars. Thus, we consider that the variable could be either a positive or a negative number. Furthermore, the definition of an absolute value tells us that if the absolute value of x is 4, then x is either 4 or -4.
from django.db import models import string class Tweet(models.Model): user = models.BigIntegerField() tid = models.BigIntegerField() lat = models.FloatField() lon = models.FloatField() text = models.TextField(max_length=256) time = models.DateField() kwd = models.CharField(max_length=50) class Tweets(models.Model): id = models.IntegerField(primary_key=True) # AutoField? longitude = models.CharField(max_length=255, blank=True) latitude = models.CharField(max_length=255, blank=True) tweet_date = models.CharField(max_length=255, blank=True) tweet_time = models.CharField(max_length=255, blank=True) user_id = models.CharField(max_length=255, blank=True) user_location = models.CharField(max_length=255, blank=True) user_lang = models.CharField(max_length=255, blank=True) text_id = models.CharField(max_length=255, blank=True) text_msg = models.CharField(max_length=255, blank=True) class Meta: managed = False db_table = 'tweets' class Grids(models.Model): id_grid = models.IntegerField(primary_key=True) lat_ini = models.FloatField(blank=True, null=True) lat_fin = models.FloatField(blank=True, null=True) long_ini = models.FloatField(blank=True, null=True) long_fin = models.FloatField(blank=True, null=True) class Meta: managed = False db_table = 'grids' class UserTweets(models.Model): id = models.TextField(primary_key=True) number = models.TextField(blank=True) n = models.BigIntegerField(blank=True, null=True) class Meta: managed = False db_table = 'user_tweets' class TwTabTime(models.Model): id_time = models.IntegerField(primary_key=True) lapso = models.CharField(max_length=30) cantidad_twts = models.IntegerField(blank=True, null=True) class Meta: managed = False db_table = 'tw_tab_time'
Sedona is a YA/New Adult sci fi fantasy. Surfer ballet dancer – her posse includes a black lab, a pelican, a cousin with an evil twin, an old surfer dude, and a huge black bear. dude exudes calm and shares wisdom beyond anything she’s heard from the family therapist. With fewer words. Why does the rest of the world seem to conspire against her? Including herself, with high self expectations and relentless self reproach. Sedona has reached the end of her junior year at her performing arts high school in Malibu, California. Almost…Exams are over but here comes the final dance performance. Rehearsals are intense. Class is just – tense. She has had a helluva year sans boyfriend of several previous years. She also lost her best friend to that bf of several previous years. Her eleventh grade year was bleak, and date-free. Except for the piano music in ballet class at school, and the pounding of the surf outside at home, she doesn’t want to hear much from anyone anymore. Not from the bratty bunch of girls , the stalking fan club of the gorgeous guy she’s in dance class with. An old friend, a buddy since third grade. Those snarky sophomores waste the long glares and cyber bullying tweets on her. And seriously, not from the sarcastic dance teacher who really has it in for Sedona most days. What is up with that? Navigating through a simple conversation with The Mom can get – complicated! When a teen has to be vigilant to avoid the triggers that lead to frustration, anger, depression or worse, well, that doesn’t always work out right? The Dad adores her and and supports her in every way. But not so close up and personal, like when she was little. Days long gone. beach, surfing, cross-training (weight lifting, Kettle Bells, extra stretching…especially those ankle and foot joints) fills every day up. What’s better r’n’r than binge watching Battlestar Galactica with the Cuz and the sweet supportive nanny (now general manager of the household only-reliable-person in charge)? With fresh popcorn and churros? Even the pelican doesn’t poop in her world, unlike the bratties at school. Trained since a baby, he chills on the deck with the dog. The best of life – surfing, running, binge watching – and dancing every day. All distractions from lost love, peripheral parents, and mean girls. Sedona can have her pick of any of the best ballet summer intensive programs. But her self-deprecating guilt has motivated her to work over the summer instead. She knows she plays “poor little rich girl” in her head, way too much. Working in a hospice is her way of balancing life. She can still keep up her ballet training schedule locally. Not that her summer plans turn out remotely like she thought they would! Buxton Deporter seeks representation for Sedona, the first in this YA/New Adult series.
from django.db import models from django.contrib.auth.models import User class Product(models.Model): """ A product is an entity related to any food having nutritional consistency """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=30) # Proteins/fats/carbohydrates pfc = models.CharField(max_length=10) # Just get a listed nutritional consistency def get_pfc(self): return self.pfc.split("/") cal_value = models.IntegerField(default=0) class Record(models.Model): """ A record. It is a main entity in a diary, contains what and when a user ate """ id = models.AutoField(primary_key=True) datetime = models.DateTimeField() user = models.ForeignKey(User) products = models.ManyToManyField(Product, through='RecordProducts') class RecordProducts(models.Model): """ Extending default many-to-many relationship table to add a weight for each product """ record = models.ForeignKey(Record, on_delete=models.CASCADE) product = models.ForeignKey(Product, on_delete=models.CASCADE) weight = models.IntegerField()
I started noticing my love for photography when I was a teenager. There was a photo of our family during Hari Raya taken at our kampung in Pahang, Malaysia, that struck a chord. My father travelled quite a lot, so it was quite hard for us to take a nice, proper family photograph together. I remember wanting to hold on to that memory forever because I knew that things will not always be as it was then. As cliché as it sounds, I enjoy how photographs freeze these precious moments. To me, photographing a place or a person you care for is like saying “I love you” to it, because you want to keep it to be remembered. Till now, I still love photography for its seamlessness and its instant ability to preserve the present. Back then, I used to borrow cameras from my friends, or just use a phone. I’d take photographs of the people around me and my neighbourhood and copy the editing styles I saw online. As a teenager, I had been an avid explorer of art, design and photography websites. Seeing beautiful creations and visuals from across the globe opened up my mind to international cultures, and made me want to go out and see the world. That was something I managed to achieve after I graduated with my degree in Architecture in 2014. I worked hard to save up for my first trip abroad alone, as well as my first camera, the EOS 600D. Travelling motivated me to get deeper into photography, and I began to travel even more when I was studying for a Master’s Degree in Architecture in London from 2014 to 2017. At one point, I visited a different city almost every month! Capturing images of the places that I went started off as a way for me to record my experiences. But when I started sharing them out on my social media platforms, my family, friends, and even strangers gave positive feedback and encouragement. With that, I soon I found myself looking forward to and planning my trips based on photos that I wanted to take. Slowly, local brands in the UK and Europe started reaching out and requesting me to photograph lifestyle content for their products and events. To this day I’m still attached to certain brands. Right now, I’m working with a local travel company (Awesome Adventure) to photograph for their trips. My biggest reward and motivation in photography is being able to travel, connecting with others, sharing experiences, and expanding my work through visuals. My architectural training taught me that how something works and what it conveys are just as important as how it looks. It still influences how I approach my shots, from the composition to the back story, the lighting, the colours that catch my eye, and even the subjects. And I still love taking photos of buildings, streets, industrial details, structural compositions, and shots that show the scale of architecture in comparison with humans. For me, understanding the technical aspects of photography is the biggest hurdle. It’s something I still am trying to learn. But in a world where everyone feels like they should show that they know everything, it’s a relief to admit you’re a work in progress. I absolutely love not knowing everything, and having the chance to get to learn is fantastic. There are so many things I want to do, and it all just boils down to more, constant, and continuous learning. This was one of my first-ever night shots, and I was quite unprepared. We were in the middle of the desert, my battery was low, and I didn’t bring a spare battery or a tripod. My friend helped me to figure out the settings for this, and we didn’t even shoot in RAW for back up. I had to pull out the colours, adjust the brightness and reduce the noise on JPEG to arrive at this final image. From this experience, I learned to be more prepared: To read up about recommended set-ups for the scenes, and pack so that I'd have the appropriate equipment and a spare battery on hand. My EOS 600D was a great camera for a beginner. I’m not a very sentimental person, but it will always have a special place in my heart. It was the only camera I used throughout my early years of travelling—not only was it lightweight and portable, its basic functionality made it easy to use, and most importantly, it got the job done. I also used nothing other than my EF-S18-55mm f/3.5-5.6 IS STM kit lens for three years. But on one work trip, I got to try out a full-frame camera and was stunned by how much more of the scene that I could capture. The level of the detail and the quality of the low light shots were also amazing. It was also around that time that I started experimenting with other lenses. I felt like a child who had just discovered the candy store. My mind was blown away by these experiences! There were so many possibilities of manipulating and controlling photography based on all these permutations. It has been a great four years with the EOS 600D, but now that I’ve experimented with my photography and tried out a full-frame camera and different lenses such as fish-eye lenses, it does feel like it’s time for an upgrade. With the nature of my work, I definitely need a camera that’s easy to travel with—one that’s portable, not too bulky, and can withstand a lot of moving and varying weather conditions. It also needs to have a flipscreen, Wi-fi and Bluetooth connectivity, as well as excellent low-light performance. I was spellbound by the EOS R when I got to try it on a recent trip to the United States. It ticks all the boxes and more. Not only did it fit nicely into my satchel, the low-light capabilities also stood out: I was impressed at how my shots of the Arizona night sky and Manhattan skyline came out—image quality excellent all the way to the edges. I also loved how customisable the camera is, with three shooting pre-sets (C1, C2 and C3), the touch bar and the customisable control ring on the RF lenses. I loved the size, weight and image quality of the EOS R (pictured here with the EF16-35mm f/2.8L II USM and EF-EOS R Mount Adapter). The solid grip makes it really easy to hold. Having worked almost exclusively with DSLR cameras, I would have to get used to using a mirrorless camera. The new features on the EOS R and the RF lenses are also something I would have to familiarise myself with. But I’d like to see how I can establish myself with it. It feels like a completely new territory, and I’m always up for a good adventure. An architectural graduate, designer and freelance travel photographer, Acacia Mardiana decided to explore other aspects of visual arts after completing her Master’s degree in 2017. Since then, she has worked on various branding and marketing projects as photographer and designer, as well as a number of her own projects that seek to motivate, inspire, and share about her travel experiences. She was shortlisted as a Canon Malaysia EOS Youth Ambassador in 2018, and continues to be engaged with the Canon photography community.
"""Utilities for loading modules, supporting programmatic imports of types:: >>>from import_utils import import_module_from, import_module >>>import x as mod_x >>>mod_x = import_module('x') >>> >>>import x.y as mod_xy >>>mod_xy = import_module('x.y') >>> >>>from x.y import z >>>z = import_module_from('x.y.z') """ import sys __version__ = '0.0.1' def import_module_from(mod_path): """``mod_path`` is python path to module. Examples: 1) call with dotted path: >>>import_module_from('x.y.z') is equivalent to >>>from x.y import z 2) call with path without dots: >>>import_module_from('x') is the same as >>>import x """ if '.' in mod_path: bits = mod_path.split('.') mod_name = bits.pop() mod_path = '.'.join(bits) return import_module(mod_path, mod_name) else: return import_module(mod_path) def import_module(mod_path, mod_name = None): """first parameter must be a dotted python module path, second parameter is optional - module name. Examples: 1) call with one parameter: >>>import_module('x.y.z') is equivalent to >>>import x.y.z 2) call with two parameters >>>import_module('x.y', 'z') is equivalent to >> from x.y import z Relative imports are not supported """ if mod_name is None: try: return sys.modules[mod_path] except KeyError: __import__(mod_path) return sys.modules[mod_path] else: if mod_name.find('.') != -1: raise ValueError('second argument to import_module must not contain dots') mod_ = __import__(mod_path, globals(), locals(), [mod_name,], -1) return getattr(mod_, mod_name)
President Barack Obama on Wednesday announced the creation of the Next Generation Power Electronics Institute at North Carolina State University's Centennial Campus in Raleigh. The U.S Department of Energy is dedicating $70 million to the institute, which will work to develop the next generation of energy-saving electronic chips and devices, over the next five years. NCSU will lead a $140-million consortium of six universities and 18 businesses in the effort. The $140 million figure comes from an additional $70 million in nonfederal money being provided by the state of North Carolina, as well as the businesses and universities in the consortium. This effort is part of Obama's effort to fulfill a promise made last year to set up high-tech manufacturing hubs around the country. With NCSU being only the first of three such hubs announced, the coming weeks should reveal more information as to what other universities and regions are involved in the effort.
# -*- coding: utf-8 -*- # (c) 2016 Alfredo de la Fuente - AvanzOSC # License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html from openerp import models, fields, api, _ class EventEvent(models.Model): _inherit = 'event.event' no_employee_registration_ids = fields.One2many( comodel_name='event.registration', inverse_name='event_id', string='Registered students', readonly=False, states={'done': [('readonly', True)]}, domain=[('employee', '=', False)]) employee_registration_ids = fields.One2many( comodel_name='event.registration', inverse_name='event_id', string='Registered teachers', readonly=False, states={'done': [('readonly', True)]}, domain=[('employee', '!=', False)]) count_all_registrations = fields.Integer( string='All assistants', compute='_count_registrations') count_teacher_registrations = fields.Integer( string='Teacher assistants', compute='_count_registrations') count_pickings = fields.Integer( string='Pickings', compute='_compute_count_teacher_pickings_moves') count_moves = fields.Integer( string='Moves', compute='_compute_count_teacher_pickings_moves') seats_canceled = fields.Integer( string='Canceled registrations', store=True, readonly=True, compute='_compute_seats') count_presences = fields.Integer( string='Presences', compute='_compute_count_presences') count_parents = fields.Integer( string='Parents', compute='_compute_count_parents') @api.multi @api.depends('registration_ids') def _count_registrations(self): for record in self: super(EventEvent, record)._count_registrations() record.count_registrations =\ len(record.no_employee_registration_ids) record.count_all_registrations = len(record.registration_ids) record.count_teacher_registrations =\ len(record.employee_registration_ids) @api.multi @api.depends('no_employee_registration_ids', 'no_employee_registration_ids.state', 'no_employee_registration_ids.partner_id', 'no_employee_registration_ids.partner_id.parent_id') def _compute_count_parents(self): for event in self: reg = event.no_employee_registration_ids.filtered( lambda x: x.state in ('done', 'open')) event.count_parents = len(reg.mapped('partner_id.parent_id')) @api.multi def _compute_count_teacher_pickings_moves(self): picking_obj = self.env['stock.picking'] move_obj = self.env['stock.move'] for event in self: partners = event.mapped('employee_registration_ids.partner_id') cond = [('partner_id', 'in', partners.ids)] pickings = picking_obj.search(cond) event.count_pickings = len(pickings) cond = [('picking_id.partner_id', 'in', partners.ids)] moves = move_obj.search(cond) event.count_moves = len(moves) @api.multi def _compute_count_presences(self): for event in self: event.count_presences = len(event.mapped('track_ids.presences')) @api.multi @api.depends('seats_max', 'registration_ids', 'registration_ids.state', 'registration_ids.nb_register') def _compute_seats(self): super(EventEvent, self)._compute_seats() for event in self: event.seats_unconfirmed = len( event.no_employee_registration_ids.filtered( lambda x: x.state == 'draft')) event.seats_reserved = len( event.no_employee_registration_ids.filtered( lambda x: x.state in ('open', 'done'))) event.seats_canceled = len( event.no_employee_registration_ids.filtered( lambda x: x.state == 'cancel')) event.seats_available = (event.seats_unconfirmed + event.seats_reserved) def _create_event_from_sale(self, by_task, sale, line=False): event = super(EventEvent, self)._create_event_from_sale( by_task, sale, line=line) if by_task: self._create_event_ticket(event, line) else: sale_lines = sale.order_line.filtered( lambda x: x.recurring_service) for line in sale_lines: self._create_event_ticket(event, line) return event def _create_event_ticket(self, event, line): ticket_obj = self.env['event.event.ticket'] line.product_id.event_ok = True ticket_vals = {'event_id': event.id, 'product_id': line.product_id.id, 'name': line.name, 'price': line.price_subtotal, 'sale_line': line.id} ticket_obj.create(ticket_vals) @api.multi def write(self, vals): if (vals.get('employee_registration_ids', False) and vals.get('no_employee_registration_ids', False)): new_lines = [] for line in vals.get('no_employee_registration_ids'): if line[0] != 2 and line[2] is not False: new_lines.append(line) if new_lines: vals['no_employee_registration_ids'] = new_lines else: vals.pop('no_employee_registration_ids') new_lines = [] for line in vals.get('employee_registration_ids'): if line[0] != 2 and line[2] is not False: new_lines.append(line) if new_lines: vals['employee_registration_ids'] = new_lines else: vals.pop('employee_registration_ids') return super(EventEvent, self).write(vals) @api.multi def show_all_registrations(self): self.ensure_one() return {'name': _('Teacher assistants'), 'type': 'ir.actions.act_window', 'view_mode': 'tree,form,calendar,graph', 'view_type': 'form', 'res_model': 'event.registration', 'domain': [('id', 'in', self.registration_ids.ids)]} @api.multi def show_teacher_registrations(self): self.ensure_one() return {'name': _('Teacher assistants'), 'type': 'ir.actions.act_window', 'view_mode': 'tree,form,calendar,graph', 'view_type': 'form', 'res_model': 'event.registration', 'domain': [('id', 'in', self.employee_registration_ids.ids)]} @api.multi def button_show_parents(self): self.ensure_one() reg = self.no_employee_registration_ids.filtered( lambda x: x.state in ('done', 'open')) parents = reg.mapped('partner_id.parent_id') return {'name': _('Parents'), 'type': 'ir.actions.act_window', 'view_mode': 'tree,form', 'view_type': 'form', 'res_model': 'res.partner', 'domain': [('id', 'in', parents.ids)]} @api.multi def show_presences(self): self.ensure_one() context = self.env.context.copy() context.update({'search_default_students_filter': 1}) if context.get('group_by', False): context.pop('group_by') return {'name': _('Event presences'), 'type': 'ir.actions.act_window', 'view_mode': 'tree,form', 'view_type': 'form', 'res_model': 'event.track.presence', 'context': context, 'domain': [('id', 'in', self.mapped('track_ids.presences').ids)]} @api.multi def show_teacher_pickings(self): partners = self.mapped('employee_registration_ids.partner_id') return {'name': _('Teachers pickings'), 'type': 'ir.actions.act_window', 'view_mode': 'tree,form,calendar', 'view_type': 'form', 'res_model': 'stock.picking', 'domain': [('partner_id', 'in', partners.ids)]} @api.multi def show_teacher_moves(self): partners = self.mapped('employee_registration_ids.partner_id') return {'name': _('Teachers moves'), 'type': 'ir.actions.act_window', 'view_mode': 'tree,form', 'view_type': 'form', 'res_model': 'stock.move', 'domain': [('picking_id.partner_id', 'in', partners.ids)]} def _delete_canceled_presences_registrations(self): for event in self: presences = event.mapped('track_ids.presences').filtered( lambda x: x.state == 'canceled') presences.unlink() registrations = event.registration_ids.filtered( lambda x: x.state == 'cancel') for registration in registrations: presences = event.mapped('track_ids.presences').filtered( lambda x: x.state != 'canceled' and x.partner.id == registration.partner_id.id) if not presences: registration.analytic_account.unlink() registration.write({'state': 'draft'}) registration.unlink() class EventRegistration(models.Model): _inherit = 'event.registration' @api.depends('event_id', 'event_id.sale_order', 'event_id.sale_order.project_id', 'event_id.sale_order.project_id.recurring_invoices', 'employee', 'analytic_account') def _calculate_required_account(self): for reg in self: reg.required_account = True if (reg.employee or reg.analytic_account or reg.event_id.sale_order.project_id.recurring_invoices): reg.required_account = False required_account = fields.Boolean( string='Required account', compute='_calculate_required_account', store=True) analytic_account = fields.Many2one( comodel_name='account.analytic.account', string='Analytic account') employee = fields.Many2one( comodel_name='hr.employee', string='Employee', related='partner_id.employee_id', store=True) parent_num_bank_accounts = fields.Integer( string='# bank accounts', store=True, related='partner_id.parent_num_bank_accounts') parent_num_valid_mandates = fields.Integer( string='# valid mandates', store=True, related='partner_id.parent_num_valid_mandates') @api.onchange('partner_id') def _onchange_partner(self): result = super(EventRegistration, self)._onchange_partner() self.employee = self.partner_id.employee_id return result def _prepare_wizard_registration_open_vals(self): wiz_vals = super(EventRegistration, self)._prepare_wizard_registration_open_vals() wiz_vals.update({'create_account': self.required_account}) return wiz_vals @api.multi def button_reg_cancel(self): self.mapped('analytic_account').set_cancel() super(EventRegistration, self).button_reg_cancel() class EventEventTicket(models.Model): _inherit = 'event.event.ticket' sale_line = fields.Many2one( comodel_name='sale.order.line', string='Sale line') class EventTrackPresence(models.Model): _inherit = 'event.track.presence' employee = fields.Many2one( comodel_name='hr.employee', string='Employee', related='partner.employee_id', store=True) class EventTrack(models.Model): _inherit = 'event.track' @api.depends('presences', 'presences.real_duration') def _compute_real_duration(self): for track in self: track.real_duration = (max(track.mapped('presences.real_duration')) if track.presences else 0) no_employee_presences = fields.One2many( comodel_name='event.track.presence', inverse_name='session', string='Student presences', readonly=False, domain=[('employee', '=', False)]) employee_presences = fields.One2many( comodel_name='event.track.presence', inverse_name='session', string='Teacher presences', readonly=False, domain=[('employee', '!=', False)]) @api.multi def write(self, vals): if 'no_employee_presences' in vals and 'employee_presences' in vals: vals.pop('presences', None) return super(EventTrack, self).write(vals)
Portable Toilet Rental is known for providing our customers with the best service in the state. We offer the highest quality and best selection of porta potties, dumpsters, showers and sinks, and restroom trailers in Nebraska. Taking care of your specific needs is our top priority. We have both short and long term rental solutions. Portable Toilet Rental has private and roomy porta potties for every occasion you’re planning in Nebraska. Planning special events takes a lot of time and effort. You've got so much manage and logistics to juggle, so let us take the stress away from this aspect of the planning process. We have flushing units with individual toilets and sinks as well as hand wash stations and hand sanitizers. Our porta potties are always delivered to your location, complete with all necessary features and accessories. You're sure to get the best customer experience when you use our local preferred vendors in Nebraska. Using a local provider guarantees your porta potty will arrive promptly, be top quality and comply with all local regulations and requirements that might apply. The restroom will also be regularly serviced, as needed, and your dedicated provider will be easy to contact and available quickly. Portable Toilet Rental pre-screens all the listed local vendors to look out for you. We do our homework so you don’t have to. Get in touch with our preferred local vendor in NE. We are committed to giving our customers exactly what they deserve, the highest level of service available!
# -*- coding: utf-8 -*- from datetime import datetime from dateutil import relativedelta import json import random from openerp import tools from openerp.exceptions import Warning from openerp.tools.safe_eval import safe_eval as eval from openerp.tools.translate import _ from openerp.tools import ustr from openerp.osv import osv, fields class MassMailingCategory(osv.Model): """Model of categories of mass mailing, i.e. marketing, newsletter, ... """ _name = 'mail.mass_mailing.category' _description = 'Mass Mailing Category' _order = 'name' _columns = { 'name': fields.char('Name', required=True), } class MassMailingList(osv.Model): """Model of a contact list. """ _name = 'mail.mass_mailing.list' _order = 'name' _description = 'Mailing List' def _get_contact_nbr(self, cr, uid, ids, name, arg, context=None): result = dict.fromkeys(ids, 0) Contacts = self.pool.get('mail.mass_mailing.contact') for group in Contacts.read_group(cr, uid, [('list_id', 'in', ids), ('opt_out', '!=', True)], ['list_id'], ['list_id'], context=context): result[group['list_id'][0]] = group['list_id_count'] return result _columns = { 'name': fields.char('Mailing List', required=True), 'contact_nbr': fields.function( _get_contact_nbr, type='integer', string='Number of Contacts', ), } class MassMailingContact(osv.Model): """Model of a contact. This model is different from the partner model because it holds only some basic information: name, email. The purpose is to be able to deal with large contact list to email without bloating the partner base.""" _name = 'mail.mass_mailing.contact' _inherit = 'mail.thread' _description = 'Mass Mailing Contact' _order = 'email' _rec_name = 'email' _columns = { 'name': fields.char('Name'), 'email': fields.char('Email', required=True), 'create_date': fields.datetime('Create Date'), 'list_id': fields.many2one( 'mail.mass_mailing.list', string='Mailing List', ondelete='cascade', required=True, ), 'opt_out': fields.boolean('Opt Out', help='The contact has chosen not to receive mails anymore from this list'), } def _get_latest_list(self, cr, uid, context={}): lid = self.pool.get('mail.mass_mailing.list').search(cr, uid, [], limit=1, order='id desc', context=context) return lid and lid[0] or False _defaults = { 'list_id': _get_latest_list } def get_name_email(self, name, context): name, email = self.pool['res.partner']._parse_partner_name(name, context=context) if name and not email: email = name if email and not name: name = email return name, email def name_create(self, cr, uid, name, context=None): name, email = self.get_name_email(name, context=context) rec_id = self.create(cr, uid, {'name': name, 'email': email}, context=context) return self.name_get(cr, uid, [rec_id], context)[0] def add_to_list(self, cr, uid, name, list_id, context=None): name, email = self.get_name_email(name, context=context) rec_id = self.create(cr, uid, {'name': name, 'email': email, 'list_id': list_id}, context=context) return self.name_get(cr, uid, [rec_id], context)[0] def message_get_default_recipients(self, cr, uid, ids, context=None): res = {} for record in self.browse(cr, uid, ids, context=context): res[record.id] = {'partner_ids': [], 'email_to': record.email, 'email_cc': False} return res class MassMailingStage(osv.Model): """Stage for mass mailing campaigns. """ _name = 'mail.mass_mailing.stage' _description = 'Mass Mailing Campaign Stage' _order = 'sequence' _columns = { 'name': fields.char('Name', required=True, translate=True), 'sequence': fields.integer('Sequence'), } _defaults = { 'sequence': 0, } class MassMailingCampaign(osv.Model): """Model of mass mailing campaigns. """ _name = "mail.mass_mailing.campaign" _description = 'Mass Mailing Campaign' def _get_statistics(self, cr, uid, ids, name, arg, context=None): """ Compute statistics of the mass mailing campaign """ results = {} cr.execute(""" SELECT c.id as campaign_id, COUNT(s.id) AS total, COUNT(CASE WHEN s.sent is not null THEN 1 ELSE null END) AS sent, COUNT(CASE WHEN s.scheduled is not null AND s.sent is null AND s.exception is null THEN 1 ELSE null END) AS scheduled, COUNT(CASE WHEN s.scheduled is not null AND s.sent is null AND s.exception is not null THEN 1 ELSE null END) AS failed, COUNT(CASE WHEN s.id is not null AND s.bounced is null THEN 1 ELSE null END) AS delivered, COUNT(CASE WHEN s.opened is not null THEN 1 ELSE null END) AS opened, COUNT(CASE WHEN s.replied is not null THEN 1 ELSE null END) AS replied , COUNT(CASE WHEN s.bounced is not null THEN 1 ELSE null END) AS bounced FROM mail_mail_statistics s RIGHT JOIN mail_mass_mailing_campaign c ON (c.id = s.mass_mailing_campaign_id) WHERE c.id IN %s GROUP BY c.id """, (tuple(ids), )) for row in cr.dictfetchall(): results[row.pop('campaign_id')] = row total = row['total'] or 1 row['delivered'] = row['sent'] - row['bounced'] row['received_ratio'] = 100.0 * row['delivered'] / total row['opened_ratio'] = 100.0 * row['opened'] / total row['replied_ratio'] = 100.0 * row['replied'] / total return results _columns = { 'name': fields.char('Name', required=True), 'stage_id': fields.many2one('mail.mass_mailing.stage', 'Stage', required=True), 'user_id': fields.many2one( 'res.users', 'Responsible', required=True, ), 'category_ids': fields.many2many( 'mail.mass_mailing.category', 'mail_mass_mailing_category_rel', 'category_id', 'campaign_id', string='Categories'), 'mass_mailing_ids': fields.one2many( 'mail.mass_mailing', 'mass_mailing_campaign_id', 'Mass Mailings', ), 'unique_ab_testing': fields.boolean( 'AB Testing', help='If checked, recipients will be mailed only once, allowing to send' 'various mailings in a single campaign to test the effectiveness' 'of the mailings.'), 'color': fields.integer('Color Index'), # stat fields 'total': fields.function( _get_statistics, string='Total', type='integer', multi='_get_statistics' ), 'scheduled': fields.function( _get_statistics, string='Scheduled', type='integer', multi='_get_statistics' ), 'failed': fields.function( _get_statistics, string='Failed', type='integer', multi='_get_statistics' ), 'sent': fields.function( _get_statistics, string='Sent Emails', type='integer', multi='_get_statistics' ), 'delivered': fields.function( _get_statistics, string='Delivered', type='integer', multi='_get_statistics', ), 'opened': fields.function( _get_statistics, string='Opened', type='integer', multi='_get_statistics', ), 'replied': fields.function( _get_statistics, string='Replied', type='integer', multi='_get_statistics' ), 'bounced': fields.function( _get_statistics, string='Bounced', type='integer', multi='_get_statistics' ), 'received_ratio': fields.function( _get_statistics, string='Received Ratio', type='integer', multi='_get_statistics', ), 'opened_ratio': fields.function( _get_statistics, string='Opened Ratio', type='integer', multi='_get_statistics', ), 'replied_ratio': fields.function( _get_statistics, string='Replied Ratio', type='integer', multi='_get_statistics', ), } def _get_default_stage_id(self, cr, uid, context=None): stage_ids = self.pool['mail.mass_mailing.stage'].search(cr, uid, [], limit=1, context=context) return stage_ids and stage_ids[0] or False _defaults = { 'user_id': lambda self, cr, uid, ctx=None: uid, 'stage_id': lambda self, *args: self._get_default_stage_id(*args), } def get_recipients(self, cr, uid, ids, model=None, context=None): """Return the recipients of a mailing campaign. This is based on the statistics build for each mailing. """ Statistics = self.pool['mail.mail.statistics'] res = dict.fromkeys(ids, False) for cid in ids: domain = [('mass_mailing_campaign_id', '=', cid)] if model: domain += [('model', '=', model)] stat_ids = Statistics.search(cr, uid, domain, context=context) res[cid] = set(stat.res_id for stat in Statistics.browse(cr, uid, stat_ids, context=context)) return res class MassMailing(osv.Model): """ MassMailing models a wave of emails for a mass mailign campaign. A mass mailing is an occurence of sending emails. """ _name = 'mail.mass_mailing' _description = 'Mass Mailing' # number of periods for tracking mail_mail statistics _period_number = 6 _order = 'sent_date DESC' def __get_bar_values(self, cr, uid, obj, domain, read_fields, value_field, groupby_field, date_begin, context=None): """ Generic method to generate data for bar chart values using SparklineBarWidget. This method performs obj.read_group(cr, uid, domain, read_fields, groupby_field). :param obj: the target model (i.e. crm_lead) :param domain: the domain applied to the read_group :param list read_fields: the list of fields to read in the read_group :param str value_field: the field used to compute the value of the bar slice :param str groupby_field: the fields used to group :return list section_result: a list of dicts: [ { 'value': (int) bar_column_value, 'tootip': (str) bar_column_tooltip, } ] """ date_begin = date_begin.date() section_result = [{'value': 0, 'tooltip': ustr((date_begin + relativedelta.relativedelta(days=i)).strftime('%d %B %Y')), } for i in range(0, self._period_number)] group_obj = obj.read_group(cr, uid, domain, read_fields, groupby_field, context=context) field = obj._fields.get(groupby_field.split(':')[0]) pattern = tools.DEFAULT_SERVER_DATE_FORMAT if field.type == 'date' else tools.DEFAULT_SERVER_DATETIME_FORMAT for group in group_obj: group_begin_date = datetime.strptime(group['__domain'][0][2], pattern).date() timedelta = relativedelta.relativedelta(group_begin_date, date_begin) section_result[timedelta.days] = {'value': group.get(value_field, 0), 'tooltip': group.get(groupby_field)} return section_result def _get_daily_statistics(self, cr, uid, ids, field_name, arg, context=None): """ Get the daily statistics of the mass mailing. This is done by a grouping on opened and replied fields. Using custom format in context, we obtain results for the next 6 days following the mass mailing date. """ obj = self.pool['mail.mail.statistics'] res = {} for mailing in self.browse(cr, uid, ids, context=context): res[mailing.id] = {} date = mailing.sent_date if mailing.sent_date else mailing.create_date date_begin = datetime.strptime(date, tools.DEFAULT_SERVER_DATETIME_FORMAT) date_end = date_begin + relativedelta.relativedelta(days=self._period_number - 1) date_begin_str = date_begin.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT) date_end_str = date_end.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT) domain = [('mass_mailing_id', '=', mailing.id), ('opened', '>=', date_begin_str), ('opened', '<=', date_end_str)] res[mailing.id]['opened_daily'] = json.dumps(self.__get_bar_values(cr, uid, obj, domain, ['opened'], 'opened_count', 'opened:day', date_begin, context=context)) domain = [('mass_mailing_id', '=', mailing.id), ('replied', '>=', date_begin_str), ('replied', '<=', date_end_str)] res[mailing.id]['replied_daily'] = json.dumps(self.__get_bar_values(cr, uid, obj, domain, ['replied'], 'replied_count', 'replied:day', date_begin, context=context)) return res def _get_statistics(self, cr, uid, ids, name, arg, context=None): """ Compute statistics of the mass mailing """ results = {} cr.execute(""" SELECT m.id as mailing_id, COUNT(s.id) AS total, COUNT(CASE WHEN s.sent is not null THEN 1 ELSE null END) AS sent, COUNT(CASE WHEN s.scheduled is not null AND s.sent is null AND s.exception is null THEN 1 ELSE null END) AS scheduled, COUNT(CASE WHEN s.scheduled is not null AND s.sent is null AND s.exception is not null THEN 1 ELSE null END) AS failed, COUNT(CASE WHEN s.sent is not null AND s.bounced is null THEN 1 ELSE null END) AS delivered, COUNT(CASE WHEN s.opened is not null THEN 1 ELSE null END) AS opened, COUNT(CASE WHEN s.replied is not null THEN 1 ELSE null END) AS replied, COUNT(CASE WHEN s.bounced is not null THEN 1 ELSE null END) AS bounced FROM mail_mail_statistics s RIGHT JOIN mail_mass_mailing m ON (m.id = s.mass_mailing_id) WHERE m.id IN %s GROUP BY m.id """, (tuple(ids), )) for row in cr.dictfetchall(): results[row.pop('mailing_id')] = row total = row['total'] or 1 row['received_ratio'] = 100.0 * row['delivered'] / total row['opened_ratio'] = 100.0 * row['opened'] / total row['replied_ratio'] = 100.0 * row['replied'] / total return results def _get_mailing_model(self, cr, uid, context=None): res = [] for model_name in self.pool: model = self.pool[model_name] if hasattr(model, '_mail_mass_mailing') and getattr(model, '_mail_mass_mailing'): res.append((model._name, getattr(model, '_mail_mass_mailing'))) res.append(('mail.mass_mailing.contact', _('Mailing List'))) return res # indirections for inheritance _mailing_model = lambda self, *args, **kwargs: self._get_mailing_model(*args, **kwargs) _columns = { 'name': fields.char('Subject', required=True), 'email_from': fields.char('From', required=True), 'create_date': fields.datetime('Creation Date'), 'sent_date': fields.datetime('Sent Date', oldname='date', copy=False), 'body_html': fields.html('Body'), 'attachment_ids': fields.many2many( 'ir.attachment', 'mass_mailing_ir_attachments_rel', 'mass_mailing_id', 'attachment_id', 'Attachments' ), 'mass_mailing_campaign_id': fields.many2one( 'mail.mass_mailing.campaign', 'Mass Mailing Campaign', ondelete='set null', ), 'state': fields.selection( [('draft', 'Draft'), ('test', 'Tested'), ('done', 'Sent')], string='Status', required=True, copy=False, ), 'color': fields.related( 'mass_mailing_campaign_id', 'color', type='integer', string='Color Index', ), # mailing options 'reply_to_mode': fields.selection( [('thread', 'In Document'), ('email', 'Specified Email Address')], string='Reply-To Mode', required=True, ), 'reply_to': fields.char('Reply To', help='Preferred Reply-To Address'), # recipients 'mailing_model': fields.selection(_mailing_model, string='Recipients Model', required=True), 'mailing_domain': fields.char('Domain', oldname='domain'), 'contact_list_ids': fields.many2many( 'mail.mass_mailing.list', 'mail_mass_mailing_list_rel', string='Mailing Lists', ), 'contact_ab_pc': fields.integer( 'AB Testing percentage', help='Percentage of the contacts that will be mailed. Recipients will be taken randomly.' ), # statistics data 'statistics_ids': fields.one2many( 'mail.mail.statistics', 'mass_mailing_id', 'Emails Statistics', ), 'total': fields.function( _get_statistics, string='Total', type='integer', multi='_get_statistics', ), 'scheduled': fields.function( _get_statistics, string='Scheduled', type='integer', multi='_get_statistics', ), 'failed': fields.function( _get_statistics, string='Failed', type='integer', multi='_get_statistics', ), 'sent': fields.function( _get_statistics, string='Sent', type='integer', multi='_get_statistics', ), 'delivered': fields.function( _get_statistics, string='Delivered', type='integer', multi='_get_statistics', ), 'opened': fields.function( _get_statistics, string='Opened', type='integer', multi='_get_statistics', ), 'replied': fields.function( _get_statistics, string='Replied', type='integer', multi='_get_statistics', ), 'bounced': fields.function( _get_statistics, string='Bounced', type='integer', multi='_get_statistics', ), 'received_ratio': fields.function( _get_statistics, string='Received Ratio', type='integer', multi='_get_statistics', ), 'opened_ratio': fields.function( _get_statistics, string='Opened Ratio', type='integer', multi='_get_statistics', ), 'replied_ratio': fields.function( _get_statistics, string='Replied Ratio', type='integer', multi='_get_statistics', ), # daily ratio 'opened_daily': fields.function( _get_daily_statistics, string='Opened', type='char', multi='_get_daily_statistics', ), 'replied_daily': fields.function( _get_daily_statistics, string='Replied', type='char', multi='_get_daily_statistics', ) } def default_get(self, cr, uid, fields, context=None): res = super(MassMailing, self).default_get(cr, uid, fields, context=context) if 'reply_to_mode' in fields and not 'reply_to_mode' in res and res.get('mailing_model'): if res['mailing_model'] in ['res.partner', 'mail.mass_mailing.contact']: res['reply_to_mode'] = 'email' else: res['reply_to_mode'] = 'thread' return res _defaults = { 'state': 'draft', 'email_from': lambda self, cr, uid, ctx=None: self.pool['mail.message']._get_default_from(cr, uid, context=ctx), 'reply_to': lambda self, cr, uid, ctx=None: self.pool['mail.message']._get_default_from(cr, uid, context=ctx), 'mailing_model': 'mail.mass_mailing.contact', 'contact_ab_pc': 100, 'mailing_domain': [], } #------------------------------------------------------ # Technical stuff #------------------------------------------------------ def copy_data(self, cr, uid, id, default=None, context=None): mailing = self.browse(cr, uid, id, context=context) default = dict(default or {}, name=_('%s (copy)') % mailing.name) return super(MassMailing, self).copy_data(cr, uid, id, default, context=context) def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True): """ Override read_group to always display all states. """ if groupby and groupby[0] == "state": # Default result structure # states = self._get_state_list(cr, uid, context=context) states = [('draft', 'Draft'), ('test', 'Tested'), ('done', 'Sent')] read_group_all_states = [{ '__context': {'group_by': groupby[1:]}, '__domain': domain + [('state', '=', state_value)], 'state': state_value, 'state_count': 0, } for state_value, state_name in states] # Get standard results read_group_res = super(MassMailing, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby) # Update standard results with default results result = [] for state_value, state_name in states: res = filter(lambda x: x['state'] == state_value, read_group_res) if not res: res = filter(lambda x: x['state'] == state_value, read_group_all_states) res[0]['state'] = [state_value, state_name] result.append(res[0]) return result else: return super(MassMailing, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby) #------------------------------------------------------ # Views & Actions #------------------------------------------------------ def on_change_model_and_list(self, cr, uid, ids, mailing_model, list_ids, context=None): value = {} if mailing_model == 'mail.mass_mailing.contact': mailing_list_ids = set() for item in list_ids: if isinstance(item, (int, long)): mailing_list_ids.add(item) elif len(item) == 3: mailing_list_ids |= set(item[2]) if mailing_list_ids: value['mailing_domain'] = "[('list_id', 'in', %s), ('opt_out', '=', False)]" % list(mailing_list_ids) else: value['mailing_domain'] = "[('list_id', '=', False)]" else: value['mailing_domain'] = [] return {'value': value} def action_duplicate(self, cr, uid, ids, context=None): copy_id = None for mid in ids: copy_id = self.copy(cr, uid, mid, context=context) if copy_id: return { 'type': 'ir.actions.act_window', 'view_type': 'form', 'view_mode': 'form', 'res_model': 'mail.mass_mailing', 'res_id': copy_id, 'context': context, } return False def action_test_mailing(self, cr, uid, ids, context=None): ctx = dict(context, default_mass_mailing_id=ids[0]) return { 'name': _('Test Mailing'), 'type': 'ir.actions.act_window', 'view_mode': 'form', 'res_model': 'mail.mass_mailing.test', 'target': 'new', 'context': ctx, } def action_edit_html(self, cr, uid, ids, context=None): if not len(ids) == 1: raise ValueError('One and only one ID allowed for this action') mail = self.browse(cr, uid, ids[0], context=context) url = '/website_mail/email_designer?model=mail.mass_mailing&res_id=%d&template_model=%s&return_action=%d&enable_editor=1' % (ids[0], mail.mailing_model, context['params']['action']) return { 'name': _('Open with Visual Editor'), 'type': 'ir.actions.act_url', 'url': url, 'target': 'self', } #------------------------------------------------------ # Email Sending #------------------------------------------------------ def get_recipients(self, cr, uid, mailing, context=None): if mailing.mailing_domain: domain = eval(mailing.mailing_domain) res_ids = self.pool[mailing.mailing_model].search(cr, uid, domain, context=context) else: res_ids = [] domain = [('id', 'in', res_ids)] # randomly choose a fragment if mailing.contact_ab_pc < 100: contact_nbr = self.pool[mailing.mailing_model].search(cr, uid, domain, count=True, context=context) topick = int(contact_nbr / 100.0 * mailing.contact_ab_pc) if mailing.mass_mailing_campaign_id and mailing.mass_mailing_campaign_id.unique_ab_testing: already_mailed = self.pool['mail.mass_mailing.campaign'].get_recipients(cr, uid, [mailing.mass_mailing_campaign_id.id], context=context)[mailing.mass_mailing_campaign_id.id] else: already_mailed = set([]) remaining = set(res_ids).difference(already_mailed) if topick > len(remaining): topick = len(remaining) res_ids = random.sample(remaining, topick) return res_ids def send_mail(self, cr, uid, ids, context=None): author_id = self.pool['res.users'].browse(cr, uid, uid, context=context).partner_id.id for mailing in self.browse(cr, uid, ids, context=context): # instantiate an email composer + send emails res_ids = self.get_recipients(cr, uid, mailing, context=context) if not res_ids: raise Warning('Please select recipients.') comp_ctx = dict(context, active_ids=res_ids) composer_values = { 'author_id': author_id, 'attachment_ids': [(4, attachment.id) for attachment in mailing.attachment_ids], 'body': mailing.body_html, 'subject': mailing.name, 'model': mailing.mailing_model, 'email_from': mailing.email_from, 'record_name': False, 'composition_mode': 'mass_mail', 'mass_mailing_id': mailing.id, 'mailing_list_ids': [(4, l.id) for l in mailing.contact_list_ids], 'no_auto_thread': mailing.reply_to_mode != 'thread', } if mailing.reply_to_mode == 'email': composer_values['reply_to'] = mailing.reply_to composer_id = self.pool['mail.compose.message'].create(cr, uid, composer_values, context=comp_ctx) self.pool['mail.compose.message'].send_mail(cr, uid, [composer_id], context=comp_ctx) self.write(cr, uid, [mailing.id], {'sent_date': fields.datetime.now(), 'state': 'done'}, context=context) return True
Sports coaching requires professional individuals as it is extremely important and aims to bridge the gap between sports and science. Sports coaching has a lot of theory incorporated into it, just like the other concepts. However to ensure that there is something different as compared to the rest of the other theories, sports coaching requires hands on experience which can only be provided by having a proper coach who can relate to live topics and can also correlate live experiences with past experiences. These experiences are extremely important as they allow one to have a holistic approach to the concept of sports.
# Name: mapper_radarsat2 # Purpose: Nansat mapping for Radarsat2 data # Authors: Morten W. Hansen, Knut-Frode Dagestad, Anton Korosov # Licence: This file is part of NANSAT. You can redistribute it or modify # under the terms of GNU General Public License, v.3 # http://www.gnu.org/licenses/gpl-3.0.html from __future__ import unicode_literals, division, absolute_import import os import zipfile import json from dateutil.parser import parse import numpy as np try: import scipy.ndimage except: IMPORT_SCIPY = False else: IMPORT_SCIPY = True import pythesint as pti from nansat.nsr import NSR from nansat.vrt import VRT from nansat.domain import Domain from nansat.node import Node from nansat.utils import initial_bearing, gdal from nansat.exceptions import WrongMapperError, NansatReadError class Mapper(VRT): ''' Create VRT with mapping of WKV for Radarsat2 ''' def __init__(self, inputFileName, gdalDataset, gdalMetadata, xmlonly=False, **kwargs): ''' Create Radarsat2 VRT ''' fPathName, fExt = os.path.splitext(inputFileName) if zipfile.is_zipfile(inputFileName): # Open zip file using VSI fPath, fName = os.path.split(fPathName) filename = '/vsizip/%s/%s' % (inputFileName, fName) if not 'RS' in fName[0:2]: raise WrongMapperError('%s: Provided data is not Radarsat-2' % fName) gdalDataset = gdal.Open(filename) gdalMetadata = gdalDataset.GetMetadata() else: filename = inputFileName # if it is not RADARSAT-2, return if (not gdalMetadata or not 'SATELLITE_IDENTIFIER' in list(gdalMetadata.keys())): raise WrongMapperError(filename) elif gdalMetadata['SATELLITE_IDENTIFIER'] != 'RADARSAT-2': raise WrongMapperError(filename) if zipfile.is_zipfile(inputFileName): # Open product.xml to get additional metadata zz = zipfile.ZipFile(inputFileName) productXmlName = os.path.join(os.path.basename( inputFileName).split('.')[0], 'product.xml') productXml = zz.open(productXmlName).read() else: # product.xml to get additionali metadata productXmlName = os.path.join(filename, 'product.xml') if not os.path.isfile(productXmlName): raise WrongMapperError(filename) productXml = open(productXmlName).read() if not IMPORT_SCIPY: raise NansatReadError('Radarsat-2 data cannot be read because scipy is not installed') # parse product.XML rs2_0 = Node.create(productXml) if xmlonly: self.init_from_xml(rs2_0, filename) return # Get additional metadata from product.xml rs2_1 = rs2_0.node('sourceAttributes') rs2_2 = rs2_1.node('radarParameters') if rs2_2['antennaPointing'].lower() == 'right': antennaPointing = 90 else: antennaPointing = -90 rs2_3 = rs2_1.node('orbitAndAttitude').node('orbitInformation') passDirection = rs2_3['passDirection'] # create empty VRT dataset with geolocation only self._init_from_gdal_dataset(gdalDataset) self.dataset.SetGCPs(self.dataset.GetGCPs(), NSR().wkt) # define dictionary of metadata and band specific parameters pol = [] metaDict = [] # Get the subdataset with calibrated sigma0 only for dataset in gdalDataset.GetSubDatasets(): if dataset[1] == 'Sigma Nought calibrated': s0dataset = gdal.Open(dataset[0]) s0datasetName = dataset[0][:] band = s0dataset.GetRasterBand(1) s0datasetPol = band.GetMetadata()['POLARIMETRIC_INTERP'] for i in range(1, s0dataset.RasterCount+1): iBand = s0dataset.GetRasterBand(i) polString = iBand.GetMetadata()['POLARIMETRIC_INTERP'] suffix = polString # The nansat data will be complex # if the SAR data is of type 10 dtype = iBand.DataType if dtype == 10: # add intensity band metaDict.append( {'src': {'SourceFilename': ('RADARSAT_2_CALIB:SIGMA0:' + filename + '/product.xml'), 'SourceBand': i, 'DataType': dtype}, 'dst': {'wkv': 'surface_backwards_scattering_coefficient_of_radar_wave', 'PixelFunctionType': 'intensity', 'SourceTransferType': gdal.GetDataTypeName(dtype), 'suffix': suffix, 'polarization': polString, 'dataType': 6}}) # modify suffix for adding the compled band below suffix = polString+'_complex' pol.append(polString) metaDict.append( {'src': {'SourceFilename': ('RADARSAT_2_CALIB:SIGMA0:' + filename + '/product.xml'), 'SourceBand': i, 'DataType': dtype}, 'dst': {'wkv': 'surface_backwards_scattering_coefficient_of_radar_wave', 'suffix': suffix, 'polarization': polString}}) if dataset[1] == 'Beta Nought calibrated': b0dataset = gdal.Open(dataset[0]) b0datasetName = dataset[0][:] for j in range(1, b0dataset.RasterCount+1): jBand = b0dataset.GetRasterBand(j) polString = jBand.GetMetadata()['POLARIMETRIC_INTERP'] if polString == s0datasetPol: b0datasetBand = j ############################### # Add SAR look direction ############################### d = Domain(ds=gdalDataset) lon, lat = d.get_geolocation_grids(100) ''' (GDAL?) Radarsat-2 data is stored with maximum latitude at first element of each column and minimum longitude at first element of each row (e.g. np.shape(lat)=(59,55) -> latitude maxima are at lat[0,:], and longitude minima are at lon[:,0]) In addition, there is an interpolation error for direct estimate along azimuth. We therefore estimate the heading along range and add 90 degrees to get the "satellite" heading. ''' if str(passDirection).upper() == 'DESCENDING': sat_heading = initial_bearing(lon[:, :-1], lat[:, :-1], lon[:, 1:], lat[:, 1:]) + 90 elif str(passDirection).upper() == 'ASCENDING': sat_heading = initial_bearing(lon[:, 1:], lat[:, 1:], lon[:, :-1], lat[:, :-1]) + 90 else: print('Can not decode pass direction: ' + str(passDirection)) # Calculate SAR look direction look_direction = sat_heading + antennaPointing # Interpolate to regain lost row look_direction = np.mod(look_direction, 360) look_direction = scipy.ndimage.interpolation.zoom( look_direction, (1, 11./10.)) # Decompose, to avoid interpolation errors around 0 <-> 360 look_direction_u = np.sin(np.deg2rad(look_direction)) look_direction_v = np.cos(np.deg2rad(look_direction)) look_u_VRT = VRT.from_array(look_direction_u) look_v_VRT = VRT.from_array(look_direction_v) # Note: If incidence angle and look direction are stored in # same VRT, access time is about twice as large lookVRT = VRT.from_lonlat(lon, lat) lookVRT.create_band( [{'SourceFilename': look_u_VRT.filename, 'SourceBand': 1}, {'SourceFilename': look_v_VRT.filename, 'SourceBand': 1}], {'PixelFunctionType': 'UVToDirectionTo'}) # Blow up to full size lookVRT = lookVRT.get_resized_vrt(gdalDataset.RasterXSize, gdalDataset.RasterYSize) # Store VRTs so that they are accessible later self.band_vrts['look_u_VRT'] = look_u_VRT self.band_vrts['look_v_VRT'] = look_v_VRT self.band_vrts['lookVRT'] = lookVRT # Add band to full sized VRT lookFileName = self.band_vrts['lookVRT'].filename metaDict.append({'src': {'SourceFilename': lookFileName, 'SourceBand': 1}, 'dst': {'wkv': 'sensor_azimuth_angle', 'name': 'look_direction'}}) ############################### # Create bands ############################### self.create_bands(metaDict) ################################################### # Add derived band (incidence angle) calculated # using pixel function "BetaSigmaToIncidence": ################################################### src = [{'SourceFilename': b0datasetName, 'SourceBand': b0datasetBand, 'DataType': dtype}, {'SourceFilename': s0datasetName, 'SourceBand': 1, 'DataType': dtype}] dst = {'wkv': 'angle_of_incidence', 'PixelFunctionType': 'BetaSigmaToIncidence', 'SourceTransferType': gdal.GetDataTypeName(dtype), '_FillValue': -10000, # NB: this is also hard-coded in # pixelfunctions.c 'dataType': 6, 'name': 'incidence_angle'} self.create_band(src, dst) self.dataset.FlushCache() ################################################################### # Add sigma0_VV - pixel function of sigma0_HH and beta0_HH # incidence angle is calculated within pixel function # It is assummed that HH is the first band in sigma0 and # beta0 sub datasets ################################################################### if 'VV' not in pol and 'HH' in pol: s0datasetNameHH = pol.index('HH')+1 src = [{'SourceFilename': s0datasetName, 'SourceBand': s0datasetNameHH, 'DataType': 6}, {'SourceFilename': b0datasetName, 'SourceBand': b0datasetBand, 'DataType': 6}] dst = {'wkv': 'surface_backwards_scattering_coefficient_of_radar_wave', 'PixelFunctionType': 'Sigma0HHBetaToSigma0VV', 'polarization': 'VV', 'suffix': 'VV'} self.create_band(src, dst) self.dataset.FlushCache() ############################################ # Add SAR metadata ############################################ if antennaPointing == 90: self.dataset.SetMetadataItem('ANTENNA_POINTING', 'RIGHT') if antennaPointing == -90: self.dataset.SetMetadataItem('ANTENNA_POINTING', 'LEFT') self.dataset.SetMetadataItem('ORBIT_DIRECTION', str(passDirection).upper()) # set valid time self.dataset.SetMetadataItem('time_coverage_start', (parse(gdalMetadata['FIRST_LINE_TIME']). isoformat())) self.dataset.SetMetadataItem('time_coverage_end', (parse(gdalMetadata['LAST_LINE_TIME']). isoformat())) # Get dictionary describing the instrument and platform according to # the GCMD keywords mm = pti.get_gcmd_instrument("C-SAR") ee = pti.get_gcmd_platform('radarsat-2') # TODO: Validate that the found instrument and platform are indeed what we # want.... self.dataset.SetMetadataItem('instrument', json.dumps(mm)) self.dataset.SetMetadataItem('platform', json.dumps(ee)) self.dataset.SetMetadataItem('entry_title', 'Radarsat-2 SAR') self.dataset.SetMetadataItem('provider', 'MDA/GSI') self.dataset.SetMetadataItem('dataset_parameters', json.dumps( ['surface_backwards_scattering_coefficient_of_radar_wave'])) self.dataset.SetMetadataItem('entry_id', os.path.basename(filename)) def init_from_xml(self, productXml, filename): ''' Fast init from metada in XML only ''' numberOfLines = int(productXml .node('imageAttributes') .node('rasterAttributes') .node('numberOfLines') .value) numberOfSamples = int(productXml .node('imageAttributes') .node('rasterAttributes') .node('numberOfSamplesPerLine') .value) VRT.__init__(self, srcRasterXSize=numberOfSamples, srcRasterYSize=numberOfLines) gcps = [] geogrid = productXml.node( 'imageAttributes').node('geographicInformation').node('geolocationGrid') for child in geogrid.children: pix = float(child.node('imageCoordinate').node('pixel').value) lin = float(child.node('imageCoordinate').node('line').value) lon = float(child.node('geodeticCoordinate').node('longitude').value) lat = float(child.node('geodeticCoordinate').node('latitude').value) gcps.append(gdal.GCP(lon, lat, 0, pix, lin)) self.dataset.SetGCPs(gcps, NSR().wkt) dates = list(map(parse, [child.node('timeStamp').value for child in (productXml.node('sourceAttributes') .node('orbitAndAttitude') .node('orbitInformation') .nodeList('stateVector'))])) self.dataset.SetMetadataItem('time_coverage_start', min(dates).isoformat()) self.dataset.SetMetadataItem('time_coverage_end', max(dates).isoformat()) self.dataset.SetMetadataItem('platform', json.dumps(pti.get_gcmd_platform('radarsat-2'))) self.dataset.SetMetadataItem('instrument', json.dumps(pti.get_gcmd_instrument('C-SAR'))) self.dataset.SetMetadataItem('Entry Title', 'Radarsat-2 SAR') self.dataset.SetMetadataItem('Data Center', 'CSA') self.dataset.SetMetadataItem('ISO Topic Category', 'Oceans') self.dataset.SetMetadataItem('Summary', 'Radarsat-2 SAR data') self.dataset.SetMetadataItem('provider', 'MDA/GSI') self.dataset.SetMetadataItem('dataset_parameters', json.dumps( 'surface_backwards_scattering_coefficient_of_radar_wave')) self.dataset.SetMetadataItem('entry_id', os.path.basename(filename))
Kottonmouth Kings frontman Daddy X says his former business partner totally bogarted him ... but not out of a fat joint ... out of millions of dollars. X says for more than 5 years Kevin Zinger -- co-founder of his record label -- illegally paid himself a total of $600k more than his approved salary. And Daddy claims his ex-partner used their record label to pimp out Zinger's clothing line -- and he raked in tons of money -- but didn't hook Daddy or the company up with any cheddar. The frontman for the popular stoner-rap group says he asked Zinger for the company financials in 2011 ... but never got them -- and two years later, was fired from his own company in retaliation. The rapper wants his ex-partner to ante up $15 mil. Zinger -- who is still the label's C.E.O. -- says NOT GUILTY in a lengthy YouTube video.
import unicodedata import regex import roman import datetime import pytz import time from pynab import log import pynab.util from pynab.interfaces.movie import INTERFACES as MOVIE_INTERFACES from pynab.interfaces.tv import INTERFACES as TV_INTERFACES from pynab.db import db_session, windowed_query, Release, MetaBlack, Category, Movie, TvShow, DBID, DataLog, Episode import config CLEANING_REGEX = regex.compile(r'\b(hdtv|dvd|divx|xvid|mpeg2|x264|aac|flac|bd|dvdrip|10 bit|264|720p|1080p\d+x\d+)\b', regex.I) def process(type, interfaces=None, limit=None, online=True): """ Process ID fetching for releases. :param type: tv/movie :param interfaces: interfaces to use or None will use all :param limit: optional limit :param online: whether to check online apis :return: """ expiry = datetime.datetime.now(pytz.utc) - datetime.timedelta(config.postprocess.get('fetch_blacklist_duration', 7)) with db_session() as db: # noinspection PyComparisonWithNone,PyComparisonWithNone db.query(MetaBlack).filter((MetaBlack.movie != None)|(MetaBlack.tvshow != None)).filter(MetaBlack.time <= expiry).delete(synchronize_session='fetch') if type == 'movie': # noinspection PyComparisonWithNone query = db.query(Release).filter(Release.movie == None).join(Category).filter(Category.parent_id == 2000) if online: # noinspection PyComparisonWithNone query = query.filter(Release.movie_metablack_id == None) elif type == 'tv': # noinspection PyComparisonWithNone query = db.query(Release).filter(Release.tvshow == None).join(Category).filter(Category.parent_id == 5000) if online: # noinspection PyComparisonWithNone query = query.filter(Release.tvshow_metablack_id == None) else: raise Exception('wrong release type') query = query.order_by(Release.posted.desc()) if limit: releases = query.limit(limit) else: releases = windowed_query(query, Release.id, config.scan.get('binary_process_chunk_size')) if type == 'movie': parse_func = parse_movie iface_list = MOVIE_INTERFACES obj_class = Movie attr = 'movie' def extract_func(data): return {'name': data.get('name'), 'genre': data.get('genre', None), 'year': data.get('year', None)} elif type == 'tv': parse_func = parse_tv iface_list = TV_INTERFACES obj_class = TvShow attr = 'tvshow' def extract_func(data): return {'name': data.get('name'), 'country': data.get('country', None)} else: raise Exception('wrong release type') for release in releases: method = 'local' data = parse_func(release.search_name) if data: if type == 'movie': q = db.query(Movie).filter(Movie.name.ilike('%'.join(clean_name(data['name']).split(' ')))).filter(Movie.year == data['year']) elif type == 'tv': q = db.query(TvShow).filter(TvShow.name.ilike('%'.join(clean_name(data['name']).split(' ')))) else: q = None entity = q.first() if not entity and online: method = 'online' ids = {} for iface in iface_list: if interfaces and iface.NAME not in interfaces: continue exists = q.join(DBID).filter(DBID.db==iface.NAME).first() if not exists: id = iface.search(data) if id: ids[iface.NAME] = id if ids: entity = obj_class(**extract_func(data)) db.add(entity) for interface_name, id in ids.items(): i = DBID() i.db = interface_name i.db_id = id setattr(i, attr, entity) db.add(i) if entity: log.info('{}: [{}] - [{}] - data added: {}'.format( attr, release.id, release.search_name, method )) if type == 'tv': # episode processing ep = db.query(Episode).filter(Episode.tvshow_id == entity.id).filter(Episode.series_full == data['series_full']).first() if not ep: ep = Episode( season=data.get('season'), episode=data.get('episode'), series_full=data.get('series_full'), air_date=data.get('air_date'), year=data.get('year'), tvshow=entity ) release.episode = ep setattr(release, attr, entity) db.add(release) else: log.info('{}: [{}] - data not found: {}'.format( attr, release.search_name, method )) if online: mb = MetaBlack(status='ATTEMPTED') setattr(mb, attr, release) db.add(mb) else: log.info('{}: [{}] - {} data not found: no suitable regex for {} name'.format( attr, release.id, release.search_name, attr )) mb = MetaBlack(status='IMPOSSIBLE') setattr(mb, attr, release) db.add(mb) db.add(DataLog(description='parse_{} regex'.format(attr), data=release.search_name)) db.commit() if method != 'local': time.sleep(1) def clean_name(name): """ Cleans a show/movie name for searching. :param name: release name :return: cleaned name """ name = unicodedata.normalize('NFKD', name) name = regex.sub('[._\-]', ' ', name) name = regex.sub('[\':!"#*’,()?]', '', name) name = regex.sub('\s{2,}', ' ', name) name = regex.sub('\[.*?\]', '', name) replace_chars = { '$': 's', '&': 'and', 'ß': 'ss' } for k, v in replace_chars.items(): name = name.replace(k, v) name = CLEANING_REGEX.sub('', name) return name.lower() def parse_tv(search_name): """ Parse a TV show name for episode, season, airdate and name information. :param search_name: release name :return: show data (dict) """ # i fucking hate this function and there has to be a better way of doing it # named capturing groups in a list and semi-intelligent processing? show = {} match = pynab.util.Match() if match.match('^(.*?)[\. \-]s(\d{1,2})\.?e(\d{1,3})(?:\-e?|\-?e)(\d{1,3})\.', search_name, regex.I): show = { 'name': match.match_obj.group(1), 'season': int(match.match_obj.group(2)), 'episode': [int(match.match_obj.group(3)), int(match.match_obj.group(4))], } elif match.match('^(.*?)[\. \-]s(\d{2})\.?e(\d{2})(\d{2})\.', search_name, regex.I): show = { 'name': match.match_obj.group(1), 'season': int(match.match_obj.group(2)), 'episode': [int(match.match_obj.group(3)), int(match.match_obj.group(4))], } elif match.match('^(.*?)[\. \-]s(\d{1,2})\.?e(\d{1,3})\.?', search_name, regex.I): show = { 'name': match.match_obj.group(1), 'season': int(match.match_obj.group(2)), 'episode': int(match.match_obj.group(3)), } elif match.match('^(.*?)[\. \-]s(\d{1,2})\.', search_name, regex.I): show = { 'name': match.match_obj.group(1), 'season': int(match.match_obj.group(2)), 'episode': 'all', } elif match.match('^(.*?)[\. \-]s(\d{1,2})d\d{1}\.', search_name, regex.I): show = { 'name': match.match_obj.group(1), 'season': int(match.match_obj.group(2)), 'episode': 'all', } elif match.match('^(.*?)[\. \-](\d{1,2})x(\d{1,3})\.', search_name, regex.I): show = { 'name': match.match_obj.group(1), 'season': int(match.match_obj.group(2)), 'episode': int(match.match_obj.group(3)), } elif match.match('^(.*?)[\. \-](19|20)(\d{2})[\.\-](\d{2})[\.\-](\d{2})\.', search_name, regex.I): show = { 'name': match.match_obj.group(1), 'season': match.match_obj.group(2) + match.match_obj.group(3), 'episode': '{}/{}'.format(match.match_obj.group(4), match.match_obj.group(5)), 'air_date': '{}{}-{}-{}'.format(match.match_obj.group(2), match.match_obj.group(3), match.match_obj.group(4), match.match_obj.group(5)) } elif match.match('^(.*?)[\. \-](\d{2}).(\d{2})\.(19|20)(\d{2})\.', search_name, regex.I): show = { 'name': match.match_obj.group(1), 'season': match.match_obj.group(4) + match.match_obj.group(5), 'episode': '{}/{}'.format(match.match_obj.group(2), match.match_obj.group(3)), 'air_date': '{}{}-{}-{}'.format(match.match_obj.group(4), match.match_obj.group(5), match.match_obj.group(2), match.match_obj.group(3)) } elif match.match('^(.*?)[\. \-](\d{2}).(\d{2})\.(\d{2})\.', search_name, regex.I): # this regex is particularly awful, but i don't think it gets used much # seriously, > 15? that's going to be a problem in 2 years if 15 < int(match.match_obj.group(4)) <= 99: season = '19' + match.match_obj.group(4) else: season = '20' + match.match_obj.group(4) show = { 'name': match.match_obj.group(1), 'season': season, 'episode': '{}/{}'.format(match.match_obj.group(2), match.match_obj.group(3)), 'air_date': '{}-{}-{}'.format(season, match.match_obj.group(2), match.match_obj.group(3)) } elif match.match('^(.*?)[\. \-]20(\d{2})\.e(\d{1,3})\.', search_name, regex.I): show = { 'name': match.match_obj.group(1), 'season': '20' + match.match_obj.group(2), 'episode': int(match.match_obj.group(3)), } elif match.match('^(.*?)[\. \-]20(\d{2})\.Part(\d{1,2})\.', search_name, regex.I): show = { 'name': match.match_obj.group(1), 'season': '20' + match.match_obj.group(2), 'episode': int(match.match_obj.group(3)), } elif match.match('^(.*?)[\. \-](?:Part|Pt)\.?(\d{1,2})\.', search_name, regex.I): show = { 'name': match.match_obj.group(1), 'season': 1, 'episode': int(match.match_obj.group(2)), } elif match.match('^(.*?)[\. \-](?:Part|Pt)\.?([ivx]+)', search_name, regex.I): show = { 'name': match.match_obj.group(1), 'season': 1, 'episode': roman.fromRoman(str.upper(match.match_obj.group(2))) } elif match.match('^(.*?)[\. \-]EP?\.?(\d{1,3})', search_name, regex.I): show = { 'name': match.match_obj.group(1), 'season': 1, 'episode': int(match.match_obj.group(2)), } elif match.match('^(.*?)[\. \-]Seasons?\.?(\d{1,2})', search_name, regex.I): show = { 'name': match.match_obj.group(1), 'season': int(match.match_obj.group(2)), 'episode': 'all' } elif match.match('^(.+)\s{1,3}(\d{1,3})\s\[([\w\d]+)\]', search_name, regex.I): # mostly anime show = { 'name': match.match_obj.group(1), 'season': 1, 'episode': int(match.match_obj.group(2)) } if 'name' in show and show['name']: # check for country code or name (Biggest Loser Australia etc) country = regex.search('[\._ ](US|UK|AU|NZ|CA|NL|Canada|Australia|America)', show['name'], regex.I) if country: if str.lower(country.group(1)) == 'canada': show['country'] = 'CA' elif str.lower(country.group(1)) == 'australia': show['country'] = 'AU' elif str.lower(country.group(1)) == 'america': show['country'] = 'US' else: show['country'] = str.upper(country.group(1)) if not isinstance(show['season'], int) and len(show['season']) == 4: show['series_full'] = '{}/{}'.format(show['season'], show['episode']) else: year = regex.search('[\._ ](19|20)(\d{2})', search_name, regex.I) if year: show['year'] = year.group(1) + year.group(2) show['season'] = 'S{:02d}'.format(show['season']) # check to see what episode ended up as if isinstance(show['episode'], list): show['episode'] = ''.join(['E{:02d}'.format(s) for s in show['episode']]) elif isinstance(show['episode'], int): show['episode'] = 'E{:02d}'.format(int(show['episode'])) # if it's a date string, leave it as that show['series_full'] = show['season'] + show['episode'] return show return None def parse_movie(search_name): """ Parse a movie name into name/year. :param search_name: release name :return: (name, year) """ result = regex.search('^(?P<name>.*)[\.\-_\( ](?P<year>19\d{2}|20\d{2})', search_name, regex.I) if result: result = result.groupdict() if 'year' not in result: result = regex.search( '^(?P<name>.*)[\.\-_ ](?:dvdrip|bdrip|brrip|bluray|hdtv|divx|xvid|proper|repack|real\.proper|sub\.?fix|sub\.?pack|ac3d|unrated|1080i|1080p|720p|810p)', search_name, regex.I) if result: result = result.groupdict() if 'name' in result: name = regex.sub('\(.*?\)|\.|_', ' ', result['name']) if 'year' in result: year = result['year'] else: year = '' return {'name': name, 'year': year} return None
Santa Ana is the hipster mecca of OC these days. When I found out a new sandwich shop had arrived I was overjoyed. San Francisco had more delis in one district then South OC has in its entire vicinity. C4 is definitely a welcoming sight! 1. Clean, yet art deco appearance. When you think hipsters, you usually think unshowered, unclean, vintage clothes, and obnoxious tats. Well C4 has done a great job at giving you a retro vibe while keeping it clean and classy. 2. 16 rotating handles and wines on tap as well as bottles you can purchase for in-store consumption. I love a place that has more than just Sierra Nevada on tap. After spending a night with frat boys ordering bud light, I could not be more thankful that C4 is as far from the Peninsula as possible. 3. The price is right. Delis, especially "hipster" ones could be super expensive. If this place was located in CDM it would easily be twice as expensive. But C4 keeps it reasonable by having sandwiches priced from $5.99-$9.99 and they also even have combos. 4. Food is hella good. Can't believe I just said that, but SF has changed me. The sandwiches aren't too big, but they weren't small either. The Rebuen and corned beef were just incredibly moist and tasty, and the sauces were like nothing I have had before. They pickle almost everything, and their coleslaw was quite unique. 4.5/5 sandwiches - my only complaint was not enough meat on the sandwich. Labels: Best of Series, Best Sandwiches, Coffee and Tea, Lunch Time, Sandwiches, Time for Happy Hour, Weekend Wrap Up, What's for Dinner?
#!/usr/bin/env python3 ''' Chapter 11 showed you how to use the requests module to scrape data from http://weather.gov/. Write a program that runs just before you wake up in the morning and checks whether it’s raining that day. If so, have the program text you a reminder to pack an umbrella before leaving the house. This script gets the first full-day verbal weather forecast in Turkish at Turkish Meteorological Institute's (MGM) website for Istanbul and emails it to the specified address. ''' import getpass import sys import smtplib import bs4 from selenium import webdriver rain_words = ["yağış", "yağmur", "sağanak"] # see stackoverflow 45448994 for phantomjs if sys.platform == 'win32': print('Install PhantomJS') sys.exit() elif sys.platform == 'darwin': # Executable path specified as otherwise PhantomJS headless browser # does not work. # Service log path spedified as otherwise script can not be run from # a .plist file due to the permission problems with the ghostdriver.log # file. browser = webdriver.PhantomJS(executable_path= '/usr/local/Cellar/phantomjs211/bin/phantomjs', service_log_path='/tmp/ghostdriver.log') else: print('Warning - Unknown OS:', sys.platform) print("Install PhantomJS") sys.exit() url = 'https://www.mgm.gov.tr/?il=Istanbul' browser.get(url) html = browser.page_source soup = bs4.BeautifulSoup(html, 'html.parser') # Get day of the week elems_day = soup.select('div.tahminTarih.ng-binding') # Get city and district names in order to make sure query # returned correct results. elems_il = soup.select('ziko.ng-binding') # The weather forecasts at MGM's site is per city and district ("ilce") # but many district names in Turkish have non-ascii characters. # Therefore, district-based queries not implemented. # elems_ilce = soup.select('span.ng-binding') # Get weather verbal elems_tahmin = soup.select('div.tahminHadise.ng-binding') # Reading of weather off of internat completed. Quit browser. browser.quit() # Check if the verbal weather forecast in Turkish includes words # implying rain umbrella = False for keyword in rain_words: if elems_tahmin[0].getText().replace("I", "ı").replace("İ", "i").\ lower().find(keyword) > 0: umbrella = True break if umbrella: # Send email to yourself about the weather smtpObj = smtplib.SMTP('smtp.gmail.com', 587) smtpObj.ehlo() smtpObj.starttls() from_addr = '[email protected]' pswd = 'your_password' # getpass.getpass() not useful when run scheduled smtpObj.login(from_addr, pswd) to_addr = '[email protected]' subject = 'Dışarı çıkarsan Şemsiye almayı unutma!' body_text = elems_day[0].getText() + '\n' + \ elems_il[0].getText() + '\n' + \ elems_tahmin[0].getText() body = ('\r\n').join([ 'From: %s' % from_addr, 'To: %s' % to_addr, 'Subject: %s' % subject , '', body_text] ) smtpObj.sendmail(from_addr, to_addr, body.encode('utf-8')) # log out of email server smtpObj.quit()
Overall result - chip.com.tr IS family friendly. All sources consider chip.com.tr trustworthy and family friendly. Website chip.com.tr has a credible reputation. Norton ConnectSafe evaluates chip.com.tr for any unsafe and insecure content. The results are critical for families with young children. ALLOWS chip.com.tr search results on Google. McAfee assesses chip.com.tr for a meaningful set of security threats. Featured dangers from annoying pop-ups to hidden Trojans, that can steal your identity, will be revealed. McAfee does not analyze chip.com.tr for mature or inappropriate content, only security checks are evaluated. McAfee SiteAdvisor results for chip.com.tr can be consulted here. The WOT calculates reputation of the chip.com.tr. This reputation system receives ratings from users and information from third-party sources, assesses the chip.com.tr for safety features and confirms, whether chip.com.tr is suitable for children. WOT results for chip.com.tr can be consulted here. Still concerned with chip.com.tr? Please, contact our team. We will carry our full audit of the chip.com.tr of your interest and update the information of the analysis.
# Author(s): Silvio Gregorini ([email protected]) # Copyright 2019 Openforce Srls Unipersonale (www.openforce.it) # License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). from odoo import _, api, fields, models from odoo.exceptions import ValidationError class DepLineType(models.Model): _name = 'asset.depreciation.line.type' _description = "Depreciation Line Type" _table = 'asset_dep_line_type' _order = 'name asc, code asc' @api.model def get_default_company_id(self): return self.env.user.company_id code = fields.Char( string="Code" ) company_id = fields.Many2one( 'res.company', default=get_default_company_id, string="Company" ) name = fields.Char( required=True, string="Name" ) type = fields.Selection( [('in', 'In'), ('out', 'Out')], string="Type", ) @api.multi def unlink(self): for line_type in self: if self.env['asset.depreciation.line'].search([ ('depreciation_line_type_id', '=', line_type.id) ]): raise ValidationError( _("Cannot remove type {}: there is some depreciation" " line linked to it.".format(line_type.name)) ) return super().unlink() @api.multi def name_get(self): return [(line_type.id, line_type.make_name()) for line_type in self] def make_name(self): self.ensure_one() name = "" if self.code: name += "[{}] ".format(self.code) name += self.name type_name = dict(self._fields['type'].selection).get(self.type) if type_name: name += " - " + type_name return name.strip()
Average appliance Repair Cost in Yaphank call 631-526-7049, this page will give you some appliance repair estimates for appliances including refrigerator repair cost, dryer repair cost, washer repair cost, oven repair cost, stove repair cost and dishwasher repair cost. Please note that appliance repair costs will vary depending on types of brands. Ge, Frigidaire, LG, Whirlpool, Maytag, Kenmore, Samsung, Kitchenaid, Bosch,, Sub Zero, Viking and many more will have a wide range of average repair costs.
# Copyright (c) 2019 Ultimaker B.V. # Cura is released under the terms of the LGPLv3 or higher. from typing import Optional from ..BaseModel import BaseModel ## Model for the types of changes that are needed before a print job can start class ClusterPrintJobConfigurationChange(BaseModel): ## Creates a new print job constraint. # \param type_of_change: The type of configuration change, one of: "material", "print_core_change" # \param index: The hotend slot or extruder index to change # \param target_id: Target material guid or hotend id # \param origin_id: Original/current material guid or hotend id # \param target_name: Target material name or hotend id # \param origin_name: Original/current material name or hotend id def __init__(self, type_of_change: str, target_id: str, origin_id: str, index: Optional[int] = None, target_name: Optional[str] = None, origin_name: Optional[str] = None, **kwargs) -> None: self.type_of_change = type_of_change self.index = index self.target_id = target_id self.origin_id = origin_id self.target_name = target_name self.origin_name = origin_name super().__init__(**kwargs)
Yesterday was one of those days. Normally I'm up before the alarm rings at 5:00 AM, but after a restless night I slept until the alarm went off. For some reason I didn't go out the back door, instead I walked in to kitchen to put on a pot of coffee. That's when my day really began. As I reached the end of the hall I could see Mom at the front window peeking outside and trying to open the curtain. At that moment I knew it was going to be one of those days. I asked Mom if she wanted some coffee, which she did, then I asked if she wanted breakfast and again she did. So I made her some coffee, went out and retrieved the paper and made her some breakfast. With Mom's hunger needs satisfied I went out side to enjoy a cup off coffee. After about ten minutes, needing a refill, I came back inside only to discover Mom had eaten about two bites of her breakfast and then quietly crept back to her room. Well OK, it was about 5 hours early for her to get up so it didn't surprise me that she went back to bed. I cleaned her leftovers off the table and put them in the fridge for later. That ended up being a pretty darn good move. For some reason Mom only stayed in bed for about ten minutes and came out hungry and looking for food again. At that point she actually seemed pretty sharp and she stayed up a couple hours reading the paper. It seemed today would be good day. Boy, was I wrong. I had an appointment in town so one of the girls stayed with mom while I was gone. She went back to bed and got up again a couple times during my outing and was sitting in her rocker by the window when I came in. I hadn't been in the house two minutes and she tried telling me about, something, what I didn't know. I had no Idea what she was trying to say. I heated her up some coffee and after a few minutes of probing her for information I found out she was telling me about an article that I had written in the daily paper. It's been 20 years since I'd done anything like that, but she didn't believe me and kept telling me her thoughts. Finally frustrated, she shuffled off to bed. I didn't know it, but this was just the tip of the iceberg compared to what was coming. Since Mom had been up about four hours longer than normal, I figured she would take a nap. Oh no, not today. She was only in her room about ten minutes this time. She shuffled over to her rocker and picked up the paper. I asked her if she was hungry and would like something to eat. Wrong question. She looked up from her paper, glared at me and snarled, "it's about super time isn't it"? I told her dinner was about two hours away and this time I asked if she'd like a snack. Wrong question again. She s snapped at me again and informed me sternly that she would wait for supper. Things went pretty smoothly after that, well until dinner was over. Once dinner was over the real fun began. First it was her Mother had been here and just left. Then it was aunt Laura's house was just around the corner, she had been there, went inside and it was empty. Next it was " where's my car"? Followed by "it's almost winter, it's time to go home". So that's not so bad. Well it ended up as a four hour circle of the same questions over and over again. At one time as Paula and I were explaining that her mother had passed away 19 years ago, Mom became angry and said " NO, NO, NO, NO, I just talked to her"! Finally around 10:30 Mom asked us how she had gotten here today. We again explained that she has lived with us for a year now. With that she said, "OK, then I'm going to bed". And it ended as quickly as it started. It has taken me a few days to write this post, but as a side note, Mom slept 22 hours the next day. It took me almost all day to get her out of bed once for dinner. I always read your posts and always gain a little insight to some of the things I experienced with my father. Your ability to share yours with all of us I think helps greatly to understand something that is very difficult to understand. Thank you for finding the words that I cannot. Thanks for the kind words Dana. Writing helps me deal with it too.
# Copyright 2013, 2014, 2015, 2016, 2017 Kevin Reid and the ShinySDR contributors # # This file is part of ShinySDR. # # ShinySDR is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ShinySDR is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with ShinySDR. If not, see <http://www.gnu.org/licenses/>. from __future__ import absolute_import, division, print_function, unicode_literals import math from zope.interface import implementer # available via Twisted from gnuradio import analog from gnuradio import blocks from gnuradio import channels from gnuradio import gr from gnuradio.filter import rational_resampler from shinysdr.devices import Device, IRXDriver from shinysdr.filters import make_resampler from shinysdr.interfaces import IModulator from shinysdr.math import dB, rotator_inc, to_dB from shinysdr.i.modes import lookup_mode from shinysdr.i.pycompat import defaultstr from shinysdr.signals import SignalType, no_signal from shinysdr.types import RangeT, ReferenceT from shinysdr import units from shinysdr.values import CellDict, CollectionState, ExportedState, LooseCell, exported_value, setter __all__ = [] # appended later def SimulatedDevice( name='Simulated RF', freq=0.0, allow_tuning=False): """ See documentation in shinysdr/i/webstatic/manual/configuration.html. """ return SimulatedDeviceForTest( name=name, freq=freq, allow_tuning=allow_tuning, add_transmitters=True) __all__.append('SimulatedDevice') def SimulatedDeviceForTest( name='Simulated RF', freq=0.0, allow_tuning=False, add_transmitters=False): """Identical to SimulatedDevice except that the defaults are arranged to be minimal for fast testing rather than to provide a rich simulation.""" rx_driver = _SimulatedRXDriver(name, add_transmitters=add_transmitters) return Device( name=name, vfo_cell=LooseCell( value=freq, type=RangeT([(-1e9, 1e9)]) if allow_tuning else RangeT([(freq, freq)]), # TODO kludge magic numbers writable=True, persists=False, post_hook=rx_driver._set_sim_freq), rx_driver=rx_driver) __all__.append('SimulatedDeviceForTest') @implementer(IRXDriver) class _SimulatedRXDriver(ExportedState, gr.hier_block2): # TODO: be not hardcoded; for now this is convenient audio_rate = 1e4 rf_rate = 200e3 def __init__(self, name, add_transmitters): gr.hier_block2.__init__( self, defaultstr(type(self).__name__ + ' ' + name), gr.io_signature(0, 0, 0), gr.io_signature(1, 1, gr.sizeof_gr_complex * 1), ) rf_rate = self.rf_rate audio_rate = self.audio_rate self.__noise_level = -22 self.__transmitters = CellDict(dynamic=True) self.__transmitters_cs = CollectionState(self.__transmitters) self.__bus = blocks.add_vcc(1) self.__channel_model = channels.channel_model( noise_voltage=dB(self.__noise_level), frequency_offset=0, epsilon=1.01, # TODO: expose this parameter # taps=..., # TODO: apply something here? ) self.__rotator = blocks.rotator_cc() self.__throttle = blocks.throttle(gr.sizeof_gr_complex, rf_rate) self.connect( self.__bus, self.__throttle, self.__channel_model, self.__rotator, self) signals = [] def add_modulator(freq, key, mode_or_modulator_ctor, **kwargs): if isinstance(mode_or_modulator_ctor, type): mode = None ctor = mode_or_modulator_ctor else: mode = mode_or_modulator_ctor mode_def = lookup_mode(mode) if mode_def is None: # missing plugin, say return ctor = mode_def.mod_class context = None # TODO implement context modulator = ctor(context=context, mode=mode, **kwargs) tx = _SimulatedTransmitter(modulator, audio_rate, rf_rate, freq) self.connect(audio_signal, tx) signals.append(tx) self.__transmitters[key] = tx # Audio input signal pitch = analog.sig_source_f(audio_rate, analog.GR_SAW_WAVE, -1, 2000, 1000) audio_signal = vco = blocks.vco_f(audio_rate, 1, 1) self.connect(pitch, vco) # Channels if add_transmitters: add_modulator(0.0, 'usb', 'USB') add_modulator(10e3, 'am', 'AM') add_modulator(30e3, 'fm', 'NFM') add_modulator(50e3, 'rtty', 'RTTY', message='The quick brown fox jumped over the lazy dog.\n') add_modulator(80e3, 'chirp', ChirpModulator) if signals: for bus_input, signal in enumerate(signals): self.connect(signal, (self.__bus, bus_input)) else: # kludge up a correct-sample-rate no-op self.connect( audio_signal, blocks.multiply_const_ff(0), make_resampler(audio_rate, rf_rate), blocks.float_to_complex(), self.__bus) self.__signal_type = SignalType( kind='IQ', sample_rate=rf_rate) self.__usable_bandwidth = RangeT([(-rf_rate / 2, rf_rate / 2)]) @exported_value(type=ReferenceT(), changes='never') def get_transmitters(self): return self.__transmitters_cs # implement IRXDriver @exported_value(type=SignalType, changes='never') def get_output_type(self): return self.__signal_type def _set_sim_freq(self, freq): self.__rotator.set_phase_inc(rotator_inc(rate=self.rf_rate, shift=-freq)) # implement IRXDriver def get_tune_delay(self): return 0.0 # implement IRXDriver def get_usable_bandwidth(self): return self.__usable_bandwidth # implement IRXDriver def close(self): pass @exported_value(type=RangeT([(-50, 0)]), changes='this_setter', label='White noise') def get_noise_level(self): return self.__noise_level @setter def set_noise_level(self, value): self.__channel_model.set_noise_voltage(dB(value)) self.__noise_level = value def notify_reconnecting_or_restarting(self): # The throttle block runs on a clock which does not stop when the flowgraph stops; resetting the sample rate restarts the clock. # The necessity of this kludge has been filed as a gnuradio bug at <http://gnuradio.org/redmine/issues/649> self.__throttle.set_sample_rate(self.__throttle.sample_rate()) class _SimulatedTransmitter(gr.hier_block2, ExportedState): """provides frequency parameters""" def __init__(self, modulator, audio_rate, rf_rate, freq): modulator = IModulator(modulator) gr.hier_block2.__init__( self, type(self).__name__, gr.io_signature(1, 1, gr.sizeof_float * 1), gr.io_signature(1, 1, gr.sizeof_gr_complex * 1), ) self.__freq = freq self.__rf_rate = rf_rate self.__modulator = modulator modulator_input_type = modulator.get_input_type() if modulator_input_type.get_kind() == 'MONO': audio_resampler = make_resampler(audio_rate, modulator_input_type.get_sample_rate()) self.connect(self, audio_resampler, modulator) elif modulator_input_type.get_kind() == 'NONE': self.connect(self, blocks.null_sink(gr.sizeof_float)) else: raise Exception('don\'t know how to supply input of type %s' % modulator_input_type) rf_resampler = rational_resampler.rational_resampler_ccf( interpolation=int(rf_rate), decimation=int(modulator.get_output_type().get_sample_rate())) self.__rotator = blocks.rotator_cc(rotator_inc(rate=rf_rate, shift=freq)) self.__mult = blocks.multiply_const_cc(dB(-10)) self.connect(modulator, rf_resampler, self.__rotator, self.__mult, self) @exported_value(type=ReferenceT(), changes='never') def get_modulator(self): return self.__modulator @exported_value( type_fn=lambda self: RangeT([(-self.__rf_rate / 2, self.__rf_rate / 2)], unit=units.Hz, strict=False), changes='this_setter', label='Frequency') def get_freq(self): return self.__freq @setter def set_freq(self, value): self.__freq = float(value) self.__rotator.set_phase_inc(rotator_inc(rate=self.__rf_rate, shift=self.__freq)) @exported_value( type=RangeT([(-50.0, 0.0)], unit=units.dB, strict=False), changes='this_setter', label='Gain') def get_gain(self): return to_dB(self.__mult.k().real) @setter def set_gain(self, value): self.__mult.set_k(dB(value)) @implementer(IModulator) class ChirpModulator(gr.hier_block2, ExportedState): def __init__(self, context, mode, chirp_rate=0.1, output_rate=10000): gr.hier_block2.__init__( self, type(self).__name__, gr.io_signature(0, 0, 0), gr.io_signature(1, 1, gr.sizeof_gr_complex)) self.__output_rate = output_rate self.__chirp_rate = chirp_rate self.__control = analog.sig_source_f(output_rate, analog.GR_SAW_WAVE, chirp_rate, output_rate * 2 * math.pi, 0) chirp_vco = blocks.vco_c(output_rate, 1, 1) self.connect( self.__control, chirp_vco, self) def get_input_type(self): return no_signal def get_output_type(self): return SignalType(kind='IQ', sample_rate=self.__output_rate) @exported_value( parameter='chirp_rate', type=RangeT([(-10.0, 10.0)], unit=units.Hz, strict=False), changes='this_setter', label='Chirp rate') def get_chirp_rate(self): return self.__chirp_rate @setter def set_chirp_rate(self, value): self.__chirp_rate = value self.__control.set_frequency(value)
It was an easy 3 hour drive up to Quebec, where the pine trees begin to take over from deciduous fall colours. We're now in hotels for 10 days and on our own. Time to spread out a bit, more reading and exploring, probably determined to some extent by variable weather. The dominance of French speakers, French signage and culture seems somehow misplaced in this huge American continent, but history tells you otherwise. The Quebecois language is, however, fairly impenetrable, with a strong accent and local vocabulary. They think French French rather posh. Communicating in English is a fail safe! our small hotel was walking distance from the old town and surrounded by useful shops and resturants. We followed the hotelier's advice for resturants, not knowing any better, and had two great American brunches and three evening meals, influenced by French, American ( super-burgers) and Italian cuisine- the last to celebrate Peter's signficant birthday. or New France) as a Brtish colony. The French have been campaigning for significant independence and the freedom for the French language to dominate public life ever since. Halloween displays in the park looked atmospheric amongst falling leaves, ghosts hidden in trees, a gallows, witches, spiders, pumpkins and scarecrows - quite a show. We worked hard the next two days to be good tourists, learning the history from exhibits and the excellent Museum of Civization - an excellent place to spend a cold Sunday sfternoon- and walking round the old town, reading all the plaques, buying food delicacies from the food market and retreating back to the hotel when the driving rain finally set in. I managed to replace my leakng Merrell shows with a similar pair - much needed. the docks, so one can only imagine how rammed it must be in summer. We also visted the lovely old station and chatted to a Via Canada man (that's the railway company) about train travel, there are still trains from Halifax Nova Scotia to Quebec, and 4 trains a day to Montreal, then on to Toronto to board the train to vancouver. 4 days and 3 nights - 5300 dollars canadian for 2 in a sleeper. Sounds like a bargain to me!
#!/usr/bin/env python ''' decode RCDA messages from a log and optionally play back to a serial port. The RCDA message is captures RC input bytes when RC_OPTIONS=16 is set ''' import struct from argparse import ArgumentParser parser = ArgumentParser(description=__doc__) parser.add_argument("--condition", default=None, help="select packets by condition") parser.add_argument("--baudrate", type=int, default=115200, help="baudrate") parser.add_argument("--port", type=str, default=None, help="port") parser.add_argument("--delay-mul", type=float, default=1.0, help="delay multiplier") parser.add_argument("log", metavar="LOG") import time import serial args = parser.parse_args() from pymavlink import mavutil print("Processing log %s" % args.log) mlog = mavutil.mavlink_connection(args.log) if args.port: port = serial.Serial(args.port, args.baudrate, timeout=1.0) tlast = -1 counter = 0 while True: msg = mlog.recv_match(type=['RCDA'], condition=args.condition) if msg is None: mlog.rewind() tlast = -1 continue tnow = msg.TimeUS if tlast == -1: tlast = tnow buf = struct.pack("<IIIIIIIIII", msg.U0, msg.U1, msg.U2, msg.U3, msg.U4, msg.U5, msg.U6, msg.U7, msg.U8, msg.U9)[0:msg.Len] ibuf = [ ord(b) for b in buf ] dt = tnow - tlast tlast = tnow print(len(ibuf), ibuf, dt) if args.port: time.sleep(dt*1.0e-6*args.delay_mul) port.write(buf)
Spring is Here! Have you booked your stay with Rusty Hook yet? Book your Spring/Summer getaway with us today. Modern Cabin rentals and Convenience Store at Tobin Lake Location: Hwy 255 @ Resort Village of Tobin Lake - We are the very first property upon entering the village at the top of the hill. Book one of our modern Cabins today! See what we offer at our store. Fish the renowned Tobin Lake for your "record" experience summer or winter. Snowmobile, quad, golf, boat, swim or just relax in your chair with a good book. We offer modern two to three bedroom cabin rentals. Our full-service convenience store is situated within walking distance to accommodate your needs during your stay. Cabin and Store open daily. VISA, M/C, American Express & Debit payments accepted. Modern Cabin rentals for 2 and 3 bedrooms available. Location: Hwy 255 @ Resort Village of Tobin Lake - We are the very first property upon entering the village at the top of the hill. Have fun in the snow at Rusty Hook! Modern Cabin rentals and Service Store convenience at Tobin Lake, SK. Hwy 255 @ Resort Village of Tobin Lake - We are the very first property upon entering the village at the top of the hill.
# -*-mode: python; py-indent-offset: 4; indent-tabs-mode: nil; encoding: utf-8-dos; coding: utf-8 -*- import sys from optparse import OptionParser, make_option from OpenTrader.deps.cmd2plus import remaining_args, ParsedString lOPTIONS_DEFINED = [] def options(option_list, arg_desc="arg", usage=None): '''Used as a decorator and passed a list of optparse-style options, alters a cmd2 method to populate its ``opts`` argument from its raw text argument. Example: transform def do_something(self, arg): into @options([make_option('-q', '--quick', action="store_true", help="Makes things fast")], "source dest") def do_something(self, arg, opts): if opts.quick: self.fast_button = True ''' global lOPTIONS_DEFINED import optparse import pyparsing if not isinstance(option_list, list): option_list = [option_list] for opt in option_list: # opt is an optparse Option lOPTIONS_DEFINED.append(pyparsing.Literal(opt.get_opt_string())) def option_setup(func): optionParser = OptionParser(usage=usage) optionParser.disable_interspersed_args() for opt in option_list: # opt is an optparse Option optionParser.add_option(opt) optionParser.set_usage("%s [options] %s" % (func.__name__[3:], arg_desc)) optionParser._func = func def oUpdateOptionParser(instance): if func.__name__.startswith('do_'): sName = func.__name__[3:] if hasattr(instance, 'oConfig') and sName in instance.oConfig: oConfigSection = instance.oConfig[sName] # iterate over optionParser for sKey, gVal in oConfigSection.iteritems(): sOption = '--' +sKey if optionParser.has_option(sOption): oOption = optionParser.get_option(sOption) # FixMe: only if the default is optparse.NO_DEFAULT? if oOption.default is optparse.NO_DEFAULT: # FixMe: does this set the default? oOption.default = gVal # FixMe: how about this? optionParser.defaults[oOption.dest] = oOption.default return optionParser def new_func(instance, arg): try: # makebe return a list and prepend it optionParser = oUpdateOptionParser(instance) opts, newArgList = optionParser.parse_args(arg.split()) # Must find the remaining args in the original argument list, but # mustn't include the command itself #if hasattr(arg, 'parsed') and newArgList[0] == arg.parsed.command: # newArgList = newArgList[1:] newArgs = remaining_args(arg, newArgList) if isinstance(arg, ParsedString): arg = arg.with_args_replaced(newArgs) else: arg = newArgs except optparse.OptParseError as e: print (e) optionParser.print_help() return if hasattr(opts, '_exit'): return None result = func(instance, arg, opts) return result func._optionParser = optionParser if func.__doc__ is None and usage is None: func.__doc__ = "" elif func.__doc__ is None and usage: func.__doc__ = usage elif usage: func.__doc__ = '%s\n%s' % (usage, func.__doc__, ) new_func.__doc__ = '%s\n%s' % (func.__doc__, optionParser.format_help()) return new_func return option_setup
Gallery is a kind of Photography apps for Android, 9Apps official website provides download and walkthrough for Gallery,. Exacoin ICO is a kind of Finance apps for Android, 9Apps official website provides download and walkthrough for Exacoin ICO, Play free Exacoin ICO online. Security Gallery is a kind of Productivity apps for Android, 9Apps official website provides download and walkthrough for Security Gallery, Play free Security Gallery. Cryptocurrency Trading Course is a kind of Education apps for Android, 9Apps official website provides download and walkthrough for Cryptocurrency Trading Course. CNET Download.com is your best guide to find free downloads of safe, trusted, and secure Windows software, utilities, and games.Gallery is a kind of Personalization apps for Android, 9Apps official website provides download and walkthrough for Gallery, Play free Gallery online. GuessSneaker is a kind of Trivia apps for Android, 9Apps official website provides download and walkthrough for GuessSneaker, Play free GuessSneaker online. CodyCross Game is a kind of Arcade apps for Android, 9Apps official website provides download and walkthrough for CodyCross Game, Play free CodyCross Game online.TabTrader is a kind of Finance apps for Android, 9Apps official website provides download and walkthrough for TabTrader, Play free TabTrader online. I-Do is a kind of Finance apps for Android, 9Apps official website provides download and walkthrough for I-Do, Play free I-Do online.Satellite Director PRO is a kind of Tools apps for Android, 9Apps official website provides download and walkthrough for Satellite Director PRO, Play free Satellite. Re-Download the mining software from our mining page and setup your miner.Gallery is a kind of Tools apps for Android, 9Apps official website provides download and walkthrough for Gallery, Play free Gallery online. SP Immo is a kind of Tools apps for Android, 9Apps official website provides download and walkthrough for SP Immo, Play free SP Immo online. Humanico is a kind of Business apps for Android, 9Apps official website provides download and walkthrough for Humanico, Play free Humanico online.
#!/usr/bin/env python # Software License Agreement (BSD License) # # Copyright (c) 2008, Willow Garage, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Willow Garage, Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from __future__ import print_function import sys import time from . import roslogging from . import masterapi from .impl import graph def fullusage(): print("""rosgraph is a command-line tool for debugging the ROS Computation Graph. Usage: \trosgraph """) def rosgraph_main(): if len(sys.argv) == 1: pass elif len(sys.argv) == 2 and (sys.argv[1] == '-h' or sys.argv[1] == '--help'): fullusage() return else: fullusage() sys.exit(-1) roslogging.configure_logging('rosgraph') # make sure master is available master = masterapi.Master('rosgraph') try: master.getPid() except: print("ERROR: Unable to communicate with master!", file=sys.stderr) return g = graph.Graph() try: while 1: g.update() if not g.nn_nodes and not g.srvs: print("empty") else: print('\n') if g.nn_nodes: print('Nodes:') for n in g.nn_nodes: prefix = n + '|' print(' ' + n + ' :') print(' Inbound:') for k in g.nn_edges.edges_by_end.keys(): if k.startswith(prefix): for c in g.nn_edges.edges_by_end[k]: print(' ' + c.start) print(' Outbound:') for k in g.nn_edges.edges_by_start.keys(): if k.startswith(prefix): for c in g.nn_edges.edges_by_start[k]: print(' ' + c.end) if g.srvs: print('Services:') for s in g.srvs: print(' ' + s) time.sleep(1.0) except KeyboardInterrupt: pass
Password Password must be at least eight characters long and contain at least one letter and one number. The use of upper and lowercase and special characters is encouraged. If you already have an NSGA membership, please enter your NSGA member ID below.
#!/usr/bin/python # Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries """ Implementing logistic regression for classification problem Helpful resources: Coursera ML course https://medium.com/@martinpella/logistic-regression-from-scratch-in-python-124c5636b8ac """ import numpy as np from matplotlib import pyplot as plt from sklearn import datasets # get_ipython().run_line_magic('matplotlib', 'inline') # In[67]: # sigmoid function or logistic function is used as a hypothesis function in # classification problems def sigmoid_function(z): return 1 / (1 + np.exp(-z)) def cost_function(h, y): return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean() def log_likelihood(X, Y, weights): scores = np.dot(X, weights) return np.sum(Y * scores - np.log(1 + np.exp(scores))) # here alpha is the learning rate, X is the feature matrix,y is the target matrix def logistic_reg(alpha, X, y, max_iterations=70000): theta = np.zeros(X.shape[1]) for iterations in range(max_iterations): z = np.dot(X, theta) h = sigmoid_function(z) gradient = np.dot(X.T, h - y) / y.size theta = theta - alpha * gradient # updating the weights z = np.dot(X, theta) h = sigmoid_function(z) J = cost_function(h, y) if iterations % 100 == 0: print(f"loss: {J} \t") # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": iris = datasets.load_iris() X = iris.data[:, :2] y = (iris.target != 0) * 1 alpha = 0.1 theta = logistic_reg(alpha, X, y, max_iterations=70000) print("theta: ", theta) # printing the theta i.e our weights vector def predict_prob(X): return sigmoid_function( np.dot(X, theta) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(X[y == 0][:, 0], X[y == 0][:, 1], color="b", label="0") plt.scatter(X[y == 1][:, 0], X[y == 1][:, 1], color="r", label="1") (x1_min, x1_max) = (X[:, 0].min(), X[:, 0].max()) (x2_min, x2_max) = (X[:, 1].min(), X[:, 1].max()) (xx1, xx2) = np.meshgrid(np.linspace(x1_min, x1_max), np.linspace(x2_min, x2_max)) grid = np.c_[xx1.ravel(), xx2.ravel()] probs = predict_prob(grid).reshape(xx1.shape) plt.contour(xx1, xx2, probs, [0.5], linewidths=1, colors="black") plt.legend() plt.show()
Don't miss the season's lowest prices now at Pleasurements. Popular now: 15% Off Any Order.Click here to start shopping! Get the latest offers and promotional code for your local Pleasurements store when you decide to enjoy the product at Pleasurements. Don't miss this amazing offer! Shop Pleasurements today for great products at great price! Popular now: Up to 56% Off Lingerie Sale.Be quick, as the offer is going fast. Pleasurements is the best place to help you save more money today! Top Discount: Pleasurements sale - up to 30% Site. Don't miss this fantastic deal! A few clicks of the mouse allows you to enjoy the most popular promotional codes at Pleasurements. Make it a point to save even more at Pleasurements by using Pleasurements coupon codes and spend less with select purchases. You can shop Pleasurements without breaking your budget by using Pleasurements coupon codes to score items from some highly coveted brands. Using cashbackscotland to shop at Pleasurements not only saves you time, but it also saves your money and get a great experience when shopping online.
from daiquiri.core.renderers import XMLRenderer from daiquiri.core.renderers.datacite import DataciteRendererMixin from daiquiri.core.renderers.dublincore import DublincoreRendererMixin from daiquiri.core.renderers.voresource import VoresourceRendererMixin class OaiRenderer(DublincoreRendererMixin, DataciteRendererMixin, VoresourceRendererMixin, XMLRenderer): def render_document(self, data, accepted_media_type=None, renderer_context=None): self.start('oai:OAI-PMH', { 'xmlns:oai': 'http://www.openarchives.org/OAI/2.0/', 'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance', 'xsi:schemaLocation': 'http://www.openarchives.org/OAI/2.0/ http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd' }) self.node('oai:responseDate', {}, data['responseDate']) request_arguments = data['arguments'] for error_code, _ in data['errors']: if error_code in ['badVerb', 'badArgument']: request_arguments = {} self.node('oai:request', request_arguments, data['baseUrl']) if data['errors']: self.render_errors(data['errors']) elif data['verb'] == 'GetRecord': self.render_get_record(data['response']) elif data['verb'] == 'Identify': self.render_identify(data['response'], data['baseUrl']) elif data['verb'] == 'ListIdentifiers': self.render_list_identifiers(data['response']['items'], data['response']['resumptionToken']) elif data['verb'] == 'ListMetadataFormats': self.render_list_metadata_formats(data['response']) elif data['verb'] == 'ListRecords': self.render_list_records(data['response']['items'], data['response']['resumptionToken']) elif data['verb'] == 'ListSets': self.render_list_sets(data['response']) self.end('oai:OAI-PMH') def render_errors(self, errors): for error_code, error_message in errors: self.node('error', {'code': error_code}, error_message) def render_get_record(self, item): self.start('oai:GetRecord') self.render_record(item) self.end('oai:GetRecord') def render_identify(self, repository_metadata, base_url): self.start('oai:Identify') self.node('oai:repositoryName', {}, repository_metadata.get('repository_name')) self.node('oai:baseURL', {}, base_url) self.node('oai:protocolVersion', {}, '2.0') self.node('oai:adminEmail', {}, repository_metadata['admin_email']) self.node('oai:earliestDatestamp', {}, repository_metadata.get('earliest_datestamp').strftime('%Y-%m-%dT%H:%M:%SZ')) self.node('oai:deletedRecord', {}, repository_metadata.get('deleted_record')) self.node('oai:granularity', {}, 'YYYY-MM-DDThh:mm:ssZ') self.render_identify_description(repository_metadata) self.end('oai:Identify') def render_identify_description(self, repository_metadata): self.start('oai:description') if repository_metadata['identifier'] is not None: self.render_oai_identifier(repository_metadata.get('identifier')) self.end('oai:description') self.start('oai:description') if repository_metadata['registry'] is not None: self.render_voresource(repository_metadata.get('registry')) self.end('oai:description') def render_oai_identifier(self, identifier_metadata): self.start('oai-identifier', { 'xmlns': 'http://www.openarchives.org/OAI/2.0/oai-identifier', 'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance', 'xsi:schemaLocation': 'http://www.openarchives.org/OAI/2.0/oai-identifier http://www.openarchives.org/OAI/2.0/oai-identifier.xsd' }) self.node('scheme', {}, identifier_metadata.get('scheme')) self.node('repositoryIdentifier', {}, identifier_metadata.get('repository_identifier')) self.node('delimiter', {}, identifier_metadata.get('delimiter')) self.node('sampleIdentifier', {}, identifier_metadata.get('sample_identifier')) self.end('oai-identifier') def render_list_identifiers(self, items, resumption_token): self.start('oai:ListIdentifiers') for item in items: self.render_header(item['header']) if resumption_token: self.node('oai:resumptionToken', { 'oai:expirationDate': resumption_token.get('expirationDate'), 'oai:completeListSize': resumption_token.get('completeListSize'), 'oai:cursor': resumption_token.get('cursor') }, resumption_token['token']) self.end('oai:ListIdentifiers') def render_list_metadata_formats(self, metadata_formats): self.start('oai:ListMetadataFormats') for metadata_format in metadata_formats: self.start('oai:metadataFormat') self.node('oai:metadataPrefix', {}, metadata_format['prefix']) self.node('oai:schema', {}, metadata_format.get('schema')) self.node('oai:metadataNamespace', {}, metadata_format.get('namespace')) self.end('oai:metadataFormat') self.end('oai:ListMetadataFormats') def render_list_records(self, items, resumption_token): self.start('oai:ListRecords') for item in items: self.render_record(item) if resumption_token: self.node('oai:resumptionToken', { 'oai:expirationDate': resumption_token.get('expirationDate'), 'oai:completeListSize': resumption_token.get('completeListSize'), 'oai:cursor': resumption_token.get('cursor') }, resumption_token['token']) self.end('oai:ListRecords') def render_list_sets(self, data): self.start('oai:ListSets') for oai_set in data['oai_sets']: self.start('oai:set') self.node('oai:setSpec', {}, oai_set['setSpec']) self.node('oai:setName', {}, oai_set['setName']) if oai_set['setDescription'] is not None: self.node('oai:setDescription', {}, oai_set['setDescription']) self.end('oai:set') self.end('oai:ListSets') def render_record(self, record): self.start('oai:record') self.render_header(record['header']) if record['metadata'] is not None: self.start('oai:metadata') self.render_metadata(record['metadata']) self.end('oai:metadata') self.end('oai:record') def render_header(self, header): self.start('oai:header', {'status': 'deleted'} if header['deleted'] else {}) self.node('oai:identifier', {}, header['identifier']) self.node('oai:datestamp', {}, header['datestamp']) for spec in header.get('setSpec', []): self.node('oai:setSpec', {}, spec) self.end('oai:header') def render_metadata(self, metadata): raise NotImplementedError() class DublincoreRenderer(OaiRenderer): def render_metadata(self, metadata): self.render_dublincore(metadata) class OaiDataciteRenderer(OaiRenderer): def render_metadata(self, metadata): self.start('oai_datacite', { 'xmlns': 'http://schema.datacite.org/oai/oai-1.0/', 'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance', 'xsi:schemaLocation': 'http://schema.datacite.org/oai/oai-1.0/ oai_datacite.xsd' }) self.start('payload') self.render_datacite(metadata) self.end('payload') self.end('oai_datacite') class DataciteRenderer(OaiRenderer): def render_metadata(self, metadata): self.render_datacite(metadata) class VoresourceRenderer(OaiRenderer): def render_metadata(self, metadata): self.render_voresource(metadata)
Does anyone have a fightpad I could buy off them? Was just at Microplay in Guelph yesterday and they were selling a Marvel fightpad for 360. You should check it out.
#!/usr/bin/env python # find_differential_primers.py # # A Python script that identifies pairs of forward and reverse primers which # are capable of amplifying either individual organisms, or a particular # family of organisms, from a set of genome sequences. Primers are expected # to be located within CDS features, in an attempt to maximise sequence # stability of the primers. # # The script reads from a configuration file containing sequence names and, # at a minimum, the location of a complete genome sequence. Optionally, the # configuration file may also indicate: # - the location of a GenBank file containing CDS feature locations, # or an equivalent output file from the Prodigal genefinder # (http://compbio.ornl.gov/prodigal/) # - the locations on the genome, and sequences of, primers predicted in # EMBOSS ePrimer3 output format # (http://emboss.bioinformatics.nl/cgi-bin/emboss/help/eprimer3) # # The first step of the script, if no primer file is specified, is to use # the sequence file as the basis for a call to EMBOSS ePrimer3 # (http://emboss.bioinformatics.nl/cgi-bin/emboss/help/eprimer3), which must # be installed and either on the $PATH, or its location specified at the # command line. This will generate an output file with the same stem as the # sequence file, but with the extension '.eprimer3'. Some ePrimer3 settings, # such as the number of primers to find, are command-line options. # # If no CDS feature file is specified, and the --noCDS flag is not set, # the script will attempt first to use Prodigal # (http://compbio.ornl.gov/prodigal/) to predict CDS locations, placing the # output in the same directory as the sequence source. If Prodigal cannot be # found, a warning will be given, and the script will proceed as if the # --noCDS flag is set. If this flag is set, then all primers are carried # through to a query with the EMBOSS PrimerSearch package # (http://emboss.bioinformatics.nl/cgi-bin/emboss/help/primersearch) against # all other sequences in the dataset. If the flag is not set, then all # primers that are not located within a CDS feature are excluded from the # PrimerSearch input. To enable this, the PrimerSearch input is written to # an intermediate file with the same stem as the input sequence, but the # extension '.primers'. # # A run of PrimerSearch is carried out with every set of primers against # all other sequences in the dataset. The output of this search is written to # a file with the following naming convention: # <query>_primers_vs_<target>.primersearch # Where <query> is the name given to the query sequence in the config file, and # <target> is the name given to the target sequence in the config file. This # step is not carried out if the --noprimersearch flag is set. When this flag # is set, the script will look for the corresponding PrimerSearch output in # the same directory as the sequence file, and will report an error if it is # not present. # # Finally, the script uses the PrimerSearch results to identify primers that # are unique to each query sequence, and to each family named in the config # file. These are reported in files with the following naming convention: # <query>_specific_primers.eprimer3 # <family>_specific_primers.primers # We use ePrimer3 format for the family-specific primers, even though the # start and end positions are meaningless, as they will amplify at different # sites in each family member. However, the source sequence is indicated in a # comment line, and the primer sequences and T_m/GC% values should be the same, # regardless. # Primers that are universal to all sequences in the sample are written in # ePrimer3 format to the file: # universal_primers.eprimer3 # This file has the same caveats as the family-specific file above. # # (c) The James Hutton Institute 2011 # Authors: Leighton Pritchard, Benjamin Leopold, Michael Robeson # # Contact: # [email protected] # # Leighton Pritchard, # Information and Computing Sciences, # James Hutton Institute, # Errol Road, # Invergowrie, # Dundee, # DD6 9LH, # Scotland, # UK # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # script version # should match r"^__version__ = '(?P<version>[^']+)'$" for setup.py __version__ = '0.1.2' ### # IMPORTS import logging import logging.handlers import multiprocessing import os import subprocess import sys import time import traceback import re from collections import defaultdict # Syntactic sugar from optparse import OptionParser # Cmd-line parsing try: from Bio import SeqIO # Parsing biological sequence data from Bio.Blast.Applications import NcbiblastnCommandline from Bio.Blast import NCBIXML # BLAST XML parser from Bio.Emboss.Applications import Primer3Commandline, \ PrimerSearchCommandline from Bio.Emboss import Primer3, PrimerSearch # EMBOSS parsers from Bio.GenBank import _FeatureConsumer # For GenBank locations from Bio.Seq import Seq # Represents a sequence from Bio.SeqRecord import SeqRecord # Represents annotated record from Bio.SeqFeature import SeqFeature # Represents annotated record except ImportError: sys.stderr.write("Biopython required for script, but not found (exiting)") sys.exit(1) try: from bx.intervals.cluster import ClusterTree # Interval tree building except ImportError: sys.stderr.write("bx-python required for script, but not found (exiting)") sys.exit(1) ### # CLASSES # Class describing an organism's genome, and associated data. class GenomeData(object): """ Describes an organism's genome, and has attributes: name - short, unique (not enforced) identification string families - string indicating family memberships seqfilename - location of representative genome sequence file ftfilename - location of GBK/Prodigal feature file primerfilename - location of ePrimer3 format primers file primers - dictionary collection of Bio.Emboss.Primer3.Primer objects, keyed by primer name Exposed methods are: """ def __init__(self, name, families=None, seqfilename=None, ftfilename=None, primerfilename=None, primersearchfilename=None): """ Expects at minimum a name to identify the organism. Optionally filenames describing the location of sequence, feature, and primer data may be specified, along with a family classification. name - short, unique (not enforced) identification string family - string indicating a family membership seqfilename - location of representative genome sequence file ftfilename - location of GBK/Prodigal feature file primerfilename - location of ePrimer3 format primers file primersearchfilename - location of PrimerSearch format primers file Rather hackily, passing '-' to any of the keyword arguments also sets them to None; this is to aid in config file parsing, and is a wee bit ugly. """ self.name = name # Short identifier self.families = families.split(',') if families != '-' else None self.seqfilename = seqfilename if seqfilename != '-' else None self.ftfilename = ftfilename if ftfilename != '-' else None self.primerfilename = primerfilename if primerfilename != '-' \ else None self.primersearchfilename = primersearchfilename if\ primersearchfilename != '-' else None self.primers = {} # Dict of Primer objects, keyed by name self.sequence = None # Will hold genome sequence self.load_sequence() def load_sequence(self): """ Load the sequence defined in self.seqfile into memory. We assume it's FASTA format. This can then be used to calculate amplicons when loading primers in. """ if self.seqfilename is not None: try: self.sequence = SeqIO.read(open(self.seqfilename, 'rU'), 'fasta') except ValueError: logger.error("Loading sequence file %s failed", self.seqfilename) logger.error(last_exception()) sys.exit(1) def write_primers(self): """ Write the primer pairs in self.primers out to file in an appropriate format for PrimerSearch. If the filename is not already defined, the filestem of the source sequencefile is used for the output file, with the extension '.primers'. The method returns the number of lines written. """ # Define output filename, if not already defined if self.primersearchfilename is None: self.primersearchfilename = \ os.path.splitext(self.seqfilename)[0] + '.primers' time_start = time.time() logger.info("Writing primers to file %s ...", self.primersearchfilename) # Open handle and write data outfh = open(self.primersearchfilename, 'w') outfh.write("# Primers for %s\n" % self.name) outfh.write("# Automatically generated by find_differential_primers\n") for primers in self.primers.values(): outfh.write("%s\t%s\t%s\n" % (primers.name, primers.forward_seq, primers.reverse_seq)) if not len(self.primers): logger.warning("WARNING: no primers written to %s!", self.primersearchfilename) # Being tidy outfh.close() logger.info("... wrote %d primers to %s (%.3fs)", len(self.primers), self.primersearchfilename, time.time() - time_start) def get_unique_primers(self, cds_overlap=False, oligovalid=False, blastfilter=False): """ Returns a list of primers that have the .amplifies_organism attribute, but where this is an empty set. If cds_overlap is True, then this list is restricted to those primers whose .cds_overlap attribute is also True """ return self.get_primers_amplify_count(0, cds_overlap, blastfilter) def get_family_unique_primers(self, family_members, cds_overlap=False, blastfilter=False): """ Returns a list of primers that have the .amplifies_organism attribute, and where the set of organisms passed in family_members is the same as that in .amplifies_organism, with the addition of self.name. If cds_overlap is True, then this list is restricted to those primers whose .cds_overlap attribute is also True """ primerlist = [] for primer in self.primers.values(): if family_members == \ set([self.name]).union(primer.amplifies_organism): primerlist.append(primer) logger.info("[%s] %d family primers", self.name, len(primerlist)) if cds_overlap: primerlist = [p for p in primerlist if p.cds_overlap] logger.info("[%s] %d primers after CDS filter", self.name, len(primerlist)) if options.filtergc3prime: primerlist = [p for p in primerlist if p.gc3primevalid] logger.info("[%s] %d primers after GC 3` filter", self.name, len(primerlist)) if options.hybridprobe: primerlist = [p for p in primerlist if p.oligovalid] logger.info("[%s] %d primers after oligo filter", self.name, len(primerlist)) if blastfilter: primerlist = [p for p in primerlist if p.blastpass] logger.info("[%s] %d primers after BLAST filter", self.name, len(primerlist)) if options.single_product: primerlist = [p for p in primerlist if p.negative_control_amplimers == 1] logger.info("[%s] %d primers after single_product filter", self.name, len(primerlist)) logger.info("[%s] returning %d primers", self.name, len(primerlist)) return primerlist def get_primers_amplify_count(self, count, cds_overlap=False, blastfilter=False): """ Returns a list of primers that have the .amplifies_organism attribute and the length of this set is equal to the passed count. If cds_overlap is True, then this list is restricted to those primers whose .cds_overlap attribute is also True """ primerlist = [p for p in self.primers.values() if count == len(p.amplifies_organism)] logger.info("[%s] %d family primers that amplify %d orgs", self.name, len(primerlist), count) if cds_overlap: primerlist = [p for p in primerlist if p.cds_overlap] logger.info("[%s] %d primers after CDS filter", self.name, len(primerlist)) if options.filtergc3prime: primerlist = [p for p in primerlist if p.gc3primevalid] logger.info("[%s] %d primers after GC 3` filter", self.name, len(primerlist)) if options.hybridprobe: primerlist = [p for p in primerlist if p.oligovalid] logger.info("[%s] %d primers after oligo filter", self.name, len(primerlist)) if blastfilter: primerlist = [p for p in primerlist if p.blastpass] logger.info("[%s] %d primers after BLAST filter", self.name, len(primerlist)) if options.single_product: primerlist = [p for p in primerlist if p.negative_control_amplimers == 1] logger.info("[%s] %d primers after single_product filter", self.name, len(primerlist)) logger.info("[%s] returning %d primers", self.name, len(primerlist)) return primerlist # Filter primers on the basis of CDS feature overlap def filter_primers(self, psizemin): """ Takes the minimum size of an amplified region, and then uses a ClusterTree to find clusters of CDS and primer regions that overlap by this minimum size. There is a possibility that, by stacking primer regions, some of the reported overlapping primers may in fact not overlap CDS regions directly, so this method may overreport primers. - psizemin (int): minimum size of an amplified region """ # Load in the feature data. This is done using either SeqIO for # files with the .gbk extension, or an ad hoc parser for # .prodigalout prediction files time_start = time.time() logger.info("Loading feature data from %s ...", self.ftfilename) if os.path.splitext(self.ftfilename)[-1] == '.gbk': # GenBank seqrecord = [r for r in SeqIO.parse(open(self.ftfilename, 'rU'), 'genbank')] elif os.path.splitext(self.ftfilename)[-1] == '.prodigalout': seqrecord = parse_prodigal_features(self.ftfilename) else: raise IOError("Expected .gbk or .prodigalout file extension") logger.info("... loaded %d features ...", len(seqrecord.features)) # Use a ClusterTree as an interval tree to identify those # primers that overlap with features. By setting the minimum overlap # to the minimum size for a primer region, we ensure that we capture # every primer that overlaps a CDS feature by this amount, but we may # also extend beyond the CDS by stacking primers, in principle. logger.info("... adding CDS feature locations to ClusterTree ...") ctree = ClusterTree(-psizemin, 2) # Loop over CDS features and add them to the tree with ID '-1'. This # allows us to easily separate the features from primers when # reviewing clusters. for feature in [f for f in seqrecord.features if f.type == 'CDS']: ctree.insert(feature.location.nofuzzy_start, feature.location.nofuzzy_end, -1) # ClusterTree requires us to identify elements on the tree by integers, # so we have to relate each primer added to an integer in a temporary # list of the self.primers values logger.info("... adding primer locations to cluster tree ...") aux = {} for i, primer in enumerate(self.primers.values()): ctree.insert(primer.forward_start, primer.reverse_start + primer.reverse_length, i) aux[i] = primer # Now we find the overlapping regions, extracting all element ids # that are not -1. These are the indices for aux, and we modify the # self.cds_overlap attribute directly logger.info("... finding overlapping primers ...") overlap_primer_ids = set() # CDS overlap primers for (s, e, ids) in ctree.getregions(): primer_ids = set([i for i in ids if i != -1]) # get non-ft ids overlap_primer_ids = overlap_primer_ids.union(primer_ids) logger.info("... %d primers overlap CDS features (%.3fs)", len(overlap_primer_ids), time.time() - time_start) for i in overlap_primer_ids: aux[i].cds_overlap = True # Filter primers on the basis of internal oligo characteristics def filter_primers_oligo(self): """ Loops over the primer pairs in this GenomeData object and mark primer.oligovalid as False if the internal oligo corresponds to any of the following criteria: - G at 5` end or 3` end - two or more counts of 'CC' - G in second position at 5` end """ time_start = time.time() logger.info("Filtering %s primers on internal oligo...", self.name) invalidcount = 0 for primer in self.primers.values(): primer.oligovalid = not(primer.internal_seq.startswith('G') or primer.internal_seq.endswith('G') or primer.internal_seq[1:-1].count('CC') > 1 or primer.internal_seq[1] == 'G') if not primer.oligovalid: invalidcount += 1 #if (primer.internal_seq.startswith('G') or # primer.internal_seq.endswith('G') or # primer.internal_seq[1:-1].count('CC') > 1 or # primer.internal_seq[1] == 'G'): # primer.oligovalid = False # invalidcount += 1 logger.info("... %d primers failed (%.3fs)", invalidcount, time.time() - time_start) # Filter primers on the basis of GC content at 3` end def filter_primers_gc_3prime(self): """ Loops over the primer pairs in the passed GenomeData object and, if either primer has more than 2 G+C in the last five nucleotides, sets the .gc3primevalid flag to False. """ time_start = time.time() logger.info("Filtering %s primers on 3` GC content ...", self.name) invalidcount = 0 for primer in self.primers.values(): fseq, rseq = primer.forward_seq[-5:], primer.reverse_seq[-5:] if (fseq.count('C') + fseq.count('G') > 2) or \ (rseq.count('C') + fseq.count('G') > 2): primer.gc3primevalid = False invalidcount += 1 logger.info("... %d primers failed (%.3fs)", invalidcount, time.time() - time_start) # Concatenate multiple fragments of a genome to a single file def concatenate_sequences(self): """ Takes a GenomeData object and concatenates sequences with the spacer sequence NNNNNCATCCATTCATTAATTAATTAATGAATGAATGNNNNN (this contains start and stop codons in all frames, to cap individual sequences). We write this data out to a new file For filename convention, we just add '_concatenated' to the end of the sequence filestem, and use the '.fas' extension. """ # Spacer contains start and stop codons in all six frames spacer = 'NNNNNCATCCATTCATTAATTAATTAATGAATGAATGNNNNN' time_start = time.time() logger.info("Concatenating sequences from %s ...", self.seqfilename) newseq = SeqRecord(Seq(spacer.join([s.seq.data for s in SeqIO.parse(open(self.seqfilename, 'rU'), 'fasta')])), id=self.name + "_concatenated", description="%s, concatenated with spacers" % self.name) outfilename = ''.join([os.path.splitext(self.seqfilename)[0], '_concatenated', '.fas']) SeqIO.write([newseq], open(outfilename, 'w'), 'fasta') logger.info("... wrote concatenated data to %s (%.3fs)", outfilename, time.time() - time_start) return outfilename def __str__(self): """ Pretty string description of object contents """ outstr = ['GenomeData object: %s' % self.name] outstr.append('Families: %s' % list(self.families)) outstr.append('Sequence file: %s' % self.seqfilename) outstr.append('Feature file: %s' % self.ftfilename) outstr.append('Primers file: %s' % self.primerfilename) outstr.append('PrimerSearch file: %s' % self.primersearchfilename) outstr.append('Primers: %d' % len(self.primers)) if len(self.primers): outstr.append('Primers overlapping CDS: %d' % len([p for p in self.primers.values() if p.cds_overlap])) return os.linesep.join(outstr) + os.linesep ### # FUNCTIONS # Parse command-line options def parse_cmdline(): """ Parse command line, accepting args obtained from sys.argv """ usage = "usage: %prog [options] arg" parser = OptionParser(usage) parser.add_option("-i", "--infile", dest="filename", action="store", help="location of configuration file", default=None) parser.add_option("-o", "--outdir", dest="outdir", action="store", help="directory for output files", default="differential_primer_results") parser.add_option("--numreturn", dest="numreturn", action="store", help="number of primers to find", default=20, type="int") parser.add_option("--hybridprobe", dest="hybridprobe", action="store_true", help="generate internal oligo as a hybridisation probe", default=False) parser.add_option("--filtergc3prime", dest="filtergc3prime", action="store_true", help="allow no more than two GC at the 3` " + "end of primers", default=False) parser.add_option("--single_product", dest="single_product", action="store", help="location of FASTA sequence file containing " + "sequences from which a sequence-specific " + "primer must amplify exactly one product.", default=None) parser.add_option("--prodigal", dest="prodigal_exe", action="store", help="location of Prodigal executable", default="prodigal") parser.add_option("--eprimer3", dest="eprimer3_exe", action="store", help="location of EMBOSS eprimer3 executable", default="eprimer3") parser.add_option("--blast_exe", dest="blast_exe", action="store", help="location of BLASTN/BLASTALL executable", default="blastn") parser.add_option("--blastdb", dest="blastdb", action="store", help="location of BLAST database", default=None) parser.add_option("--useblast", dest="useblast", action="store_true", help="use existing BLAST results", default=False) parser.add_option("--nocds", dest="nocds", action="store_true", help="do not restrict primer prediction to CDS", default=False) parser.add_option("--noprodigal", dest="noprodigal", action="store_true", help="do not carry out Prodigal prediction step", default=False) parser.add_option("--noprimer3", dest="noprimer3", action="store_true", help="do not carry out ePrimer3 prediction step", default=False) parser.add_option("--noprimersearch", dest="noprimersearch", action="store_true", help="do not carry out PrimerSearch step", default=False) parser.add_option("--noclassify", dest="noclassify", action="store_true", help="do not carry out primer classification step", default=False) parser.add_option("--osize", dest="osize", action="store", help="optimal size for primer oligo", default=20, type="int") parser.add_option("--minsize", dest="minsize", action="store", help="minimum size for primer oligo", default=18, type="int") parser.add_option("--maxsize", dest="maxsize", action="store", help="maximum size for primer oligo", default=22, type="int") parser.add_option("--otm", dest="otm", action="store", help="optimal melting temperature for primer oligo", default=59, type="int") parser.add_option("--mintm", dest="mintm", action="store", help="minimum melting temperature for primer oligo", default=58, type="int") parser.add_option("--maxtm", dest="maxtm", action="store", help="maximum melting temperature for primer oligo", default=60, type="int") parser.add_option("--ogcpercent", dest="ogcpercent", action="store", help="optimal %GC for primer oligo", default=55, type="int") parser.add_option("--mingc", dest="mingc", action="store", help="minimum %GC for primer oligo", default=30, type="int") parser.add_option("--maxgc", dest="maxgc", action="store", help="maximum %GC for primer oligo", default=80, type="int") parser.add_option("--psizeopt", dest="psizeopt", action="store", help="optimal size for amplified region", default=100, type="int") parser.add_option("--psizemin", dest="psizemin", action="store", help="minimum size for amplified region", default=50, type="int") parser.add_option("--psizemax", dest="psizemax", action="store", help="maximum size for amplified region", default=150, type="int") parser.add_option("--maxpolyx", dest="maxpolyx", action="store", help="maximum run of repeated nucleotides in primer", default=3, type="int") parser.add_option("--mismatchpercent", dest="mismatchpercent", action="store", help="allowed percentage mismatch in primersearch", default=10, type="int") parser.add_option("--oligoosize", dest="oligoosize", action="store", help="optimal size for internal oligo", default=20, type="int") parser.add_option("--oligominsize", dest="oligominsize", action="store", help="minimum size for internal oligo", default=13, type="int") parser.add_option("--oligomaxsize", dest="oligomaxsize", action="store", help="maximum size for internal oligo", default=30, type="int") parser.add_option("--oligootm", dest="oligootm", action="store", help="optimal melting temperature for internal oligo", default=69, type="int") parser.add_option("--oligomintm", dest="oligomintm", action="store", help="minimum melting temperature for internal oligo", default=68, type="int") parser.add_option("--oligomaxtm", dest="oligomaxtm", action="store", help="maximum melting temperature for internal oligo", default=70, type="int") parser.add_option("--oligoogcpercent", dest="oligoogcpercent", action="store", help="optimal %GC for internal oligo", default=55, type="int") parser.add_option("--oligomingc", dest="oligomingc", action="store", help="minimum %GC for internal oligo", default=30, type="int") parser.add_option("--oligomaxgc", dest="oligomaxgc", action="store", help="maximum %GC for internal oligo", default=80, type="int") parser.add_option("--oligomaxpolyx", dest="oligomaxpolyx", action="store", help="maximum run of repeated nt in internal oligo", default=3, type="int") parser.add_option("--cpus", dest="cpus", action="store", help="number of CPUs to use in multiprocessing", default=multiprocessing.cpu_count(), type="int") parser.add_option("--sge", dest="sge", action="store_true", help="use SGE job scheduler", default=False) parser.add_option("--clean", action="store_true", dest="clean", help="clean up old output files before running", default=False) parser.add_option("--cleanonly", action="store_true", dest="cleanonly", help="clean up old output files and exit", default=False) parser.add_option("-l", "--logfile", dest="logfile", action="store", default=None, help="script logfile location") parser.add_option("-v", "--verbose", action="store_true", dest="verbose", help="report progress to log", default=False) parser.add_option("--debug", action="store_true", dest="debug", help="report extra progress to log for debugging", default=False) parser.add_option("--keep_logs", action="store_true", dest="keep_logs", help="store log files from each process", default=False) parser.add_option("--log_dir", action="store", dest="log_dir", help="store called process log files in this directory", default=None) (optsparsed, argsparsed) = parser.parse_args() return (optsparsed, argsparsed, parser) # Report last exception as string def last_exception(): """ Returns last exception as a string, or use in logging. """ exc_type, exc_value, exc_traceback = sys.exc_info() return ''.join(traceback.format_exception(exc_type, exc_value, exc_traceback)) # Create a list of GenomeData objects corresponding to config file entries def create_gd_from_config(filename): """ Parses data from a configuration file into a list of GenomeData objects. Returns a list of GenomeData objects. Each line of the config file describes a single genome. The config file format is six tab-separated columns, where columns may be separated by multiple tabs. 'Empty' data values are indicated by the '-' symbol, and these are converted into None objects in parsing. Comment lines start with '#', as in Python. The five columns are: 1) Genome name 2) Genome family 3) Location of FASTA format sequence data 4) Location of GENBANK/PRODIGAL format feature data 5) Location of EPRIMER3 format primer data 6) Location of PRIMERSEARCH input format primer data The data would, of course, be better presented as an XML file, but it might be useful to maintain both tab- and XML-formatted approaches to facilitate human construction as well as computational. """ time_start = time.time() logger.info("Creating list of genomes from config file %s ...", filename) gd_list = [] # Hold GenomeData objects # Ignore blank lines and comments... for line in [l.strip() for l in open(filename, 'rU') if l.strip() and not l.startswith('#')]: # Split data and create new GenomeData object, adding it to the list data = [e.strip() for e in line.strip().split('\t') if e.strip()] name, family, sfile, ffile, pfile, psfile = tuple(data) gd_list.append(GenomeData(name, family, sfile, ffile, pfile, psfile)) logger.info("... created GenomeData object for %s ...", name) logger.info(gd_list[-1]) logger.info("... created %d GenomeData objects (%.3fs)", len(gd_list), time.time() - time_start) return gd_list # Check whether each GenomeData object has multiple sequence and, if so, # concatenate them sensibly, resetting feature and primer file locations to # None def check_single_sequence(gd_list): """ Loops over the GenomeData objects in the passed list and, where the sequence file contains multiple sequences, concatenates them into a single sequence using a spacer that facilitates gene-finding. As this process changes feature and primer locations, the ftfilename and primerfilename attributes are reset to None, and these are recalculated later on in the script, where necessary. """ time_start = time.time() logger.info("Checking for multiple sequences ...") for gd_obj in gd_list: # Verify that the sequence file contains a single sequence seqdata = [s for s in SeqIO.parse(open(gd_obj.seqfilename, 'rU'), 'fasta')] if len(seqdata) != 1: logger.info("... %s describes multiple sequences ...", gd_obj.seqfilename) gd_obj.seqfilename = gd_obj.concatenate_sequences() # Concatenate logger.info("... clearing feature and primer file locations ...") (gd_obj.ftfilename, gd_obj.primerfilename, gd_obj.primersearchfilename) = \ (None, None, None) logger.info("... checked %d GenomeData objects (%.3fs)", len(gd_list), time.time() - time_start) # Check for each GenomeData object in a passed list, the existence of # the feature file, and create one using Prodigal if it doesn't exist already def check_ftfilenames(gd_list): """ Loop over the GenomeData objects in gdlist and, where no feature file is specified, add the GenomeData object to the list of packets to be processed in parallel by Prodigal using multiprocessing. """ logger.info("Checking and predicting features for GenomeData files ...") # We split the GenomeData objects into those with, and without, # defined feature files, but we don't test the validity of the files # that were predefined, here. # We don't use the objects with features here, though #gds_with_ft = [gd_obj for gd_obj in gd_list if # (gd_obj.ftfilename is not None and # os.path.isfile(gd_obj.ftfilename))] gds_no_ft = [gd_obj for gd_obj in gd_list if (gd_obj.ftfilename is None or not os.path.isfile(gd_obj.ftfilename))] # Predict features for those GenomeData objects with no feature file logger.info("... %d GenomeData objects have no feature file ...", len(gds_no_ft)) logger.info("... running %d Prodigal jobs to predict CDS ...", len(gds_no_ft)) # Create a list of command-line tuples, for Prodigal # gene prediction applied to each GenomeData object in gds_no_ft. clines = [] for gd_obj in gds_no_ft: gd_obj.ftfilename = os.path.splitext(gd_obj.seqfilename)[0] +\ '.prodigalout' seqfilename = os.path.splitext(gd_obj.seqfilename)[0] + '.features' cline = "%s -a %s < %s > %s" % (options.prodigal_exe, seqfilename, gd_obj.seqfilename, gd_obj.ftfilename) clines.append(cline + log_output(gd_obj.name + ".prodigal")) logger.info("... Prodigal jobs to run:") logger.info("Running:\n" + "\n".join(clines)) # Depending on the type of parallelisation required, these command-lines # are either run locally via multiprocessing, or passed out to SGE if not options.sge: multiprocessing_run(clines) else: sge_run(clines) # Check whether GenomeData objects have a valid primer definition file def check_primers(gd_list): """ Loop over GenomeData objects in the passed gdlist and, if they have a defined primerfilename attribute, attempt to parse it. If this is successful, do nothing. If it fails, set the primerfilename attribute to None. """ logger.info("Checking ePrimer3 output files ...") for gd_obj in [g for g in gd_list if g.primerfilename]: try: Primer3.read(open(gd_obj.primerfilename, 'rU')) logger.info("... %s primer file %s OK ...", gd_obj.name, gd_obj.primerfilename) except IOError: logger.info("... %s primer file %s not OK ...", gd_obj.name, gd_obj.primerfilename) gd_obj.primerfilename = None # Check for each GenomeData object in a passed list, the existence of # the ePrimer3 file, and create one using ePrimer3 if it doesn't exist already def predict_primers(gd_list, emboss_version): """ Loop over the GenomeData objects in gdlist and, where no primer file is specified, add the GenomeData object to the list of packets to be processed in parallel by Prodigal using multiprocessing. """ logger.info("Checking and predicting primers for GenomeData files ...") # We need to split the GenomeData objects into those with, and without, # defined primer files, but we don't test the validity of these files # We don't use the gds #gds_with_primers = [g for g in gd_list if g.primerfilename is not None] gds_no_primers = [g for g in gd_list if g.primerfilename is None] # Predict primers for those GenomeData objects with no primer file logger.info("... %d GenomeData objects have no primer file ...", len(gds_no_primers)) logger.info("... running %d ePrimer3 jobs to predict CDS ...", len(gds_no_primers)) # Create command-lines to run ePrimer3 clines = [] for gd_obj in gds_no_primers: # Create ePrimer3 command-line. cline = Primer3Commandline(cmd=options.eprimer3_exe) cline.sequence = gd_obj.seqfilename cline.auto = True cline.osize = "%d" % options.osize # Optimal primer size cline.minsize = "%d" % options.minsize # Min primer size cline.maxsize = "%d" % options.maxsize # Max primer size # Optimal primer Tm option dependent on EMBOSS version if float('.'.join(emboss_version.split('.')[:2])) >= 6.5: cline.opttm = "%d" % options.otm # Optimal primer Tm else: cline.otm = "%d" % options.otm cline.mintm = "%d" % options.mintm # Min primer Tm cline.maxtm = "%d" % options.maxtm # Max primer Tm cline.ogcpercent = "%d" % options.ogcpercent # Optimal primer %GC cline.mingc = "%d" % options.mingc # Min primer %GC cline.maxgc = "%d" % options.maxgc # Max primer %GC cline.psizeopt = "%d" % options.psizeopt # Optimal product size # Longest polyX run in primer cline.maxpolyx = "%d" % options.maxpolyx # Allowed product sizes cline.prange = "%d-%d" % (options.psizemin, options.psizemax) # Number of primers to predict cline.numreturn = "%d" % options.numreturn cline.hybridprobe = options.hybridprobe # Predict internal oligo? # Internal oligo parameters; cline.osizeopt = "%d" % options.oligoosize # We use EMBOSS v6 parameter names, here. cline.ominsize = "%d" % options.oligominsize cline.omaxsize = "%d" % options.oligomaxsize cline.otmopt = "%d" % options.oligootm cline.otmmin = "%d" % options.oligomintm cline.otmmax = "%d" % options.oligomaxtm cline.ogcopt = "%d" % options.oligoogcpercent cline.ogcmin = "%d" % options.oligomingc cline.ogcmax = "%d" % options.oligomaxgc cline.opolyxmax = "%d" % options.oligomaxpolyx cline.outfile = os.path.splitext(gd_obj.seqfilename)[0] + '.eprimer3' gd_obj.primerfilename = cline.outfile clines.append(str(cline) + log_output(gd_obj.name + ".eprimer3")) logger.info("... ePrimer3 jobs to run:") logger.info("Running:\n" + '\n'.join(clines)) # Parallelise jobs if not options.sge: multiprocessing_run(clines) else: sge_run(clines) # Load primers from ePrimer3 files into each GenomeData object def load_primers(gd_list): """ Load primer data from an ePrimer3 output file into a dictionary of Bio.Emboss.Primer3.Primer objects (keyed by primer name) in a GenomeData object, for each such object in the passed list. Each primer object is given a new ad hoc attribute 'cds_overlap' which takes a Boolean, indicating whether the primer is found wholly within a CDS defined in the GenomeData object's feature file; this status is determined using an interval tree approach. """ logger.info("Loading primers, %sfiltering on CDS overlap", 'not ' if options.nocds else '') # Load in the primers, assigning False to a new, ad hoc attribute called # cds_overlap in each for gd_obj in gd_list: logger.info("... loading primers into %s from %s ...", gd_obj.name, gd_obj.primerfilename) try: os.path.isfile(gd_obj.primerfilename) except TypeError: raise IOError("Primer file %s does not exist." % gd_obj.primerfilename) primers = Primer3.read(open(gd_obj.primerfilename, 'rU')).primers # Add primer pairs to the gd.primers dictionary primercount = 0 for primer in primers: primercount += 1 primer.cds_overlap = False # default state primer.name = "%s_primer_%04d" % (gd_obj.name, primercount) primer.amplifies_organism = set() # Organisms amplified primer.amplifies_family = set() # Organism families amplified primer.gc3primevalid = True # Passes GC 3` test primer.oligovalid = True # Oligo passes filter primer.blastpass = True # Primers pass BLAST screen gd_obj.primers.setdefault(primer.name, primer) primer.amplicon = \ gd_obj.sequence[primer.forward_start - 1: primer.reverse_start - 1 + primer.reverse_length] primer.amplicon.description = primer.name logger.info("... loaded %d primers into %s ...", len(gd_obj.primers), gd_obj.name) # Now that the primers are in the GenomeData object, we can filter # them on location, if necessary if not options.nocds: gd_obj.filter_primers(options.psizemin) # We also filter primers on the basis of GC presence at the 3` end if options.filtergc3prime: gd_obj.filter_primers_gc_3prime() # Filter primers on the basis of internal oligo characteristics if options.hybridprobe: gd_obj.filter_primers_oligo() # Screen passed GenomeData primers against BLAST database def blast_screen(gd_list): """ The BLAST screen takes three stages. Firstly we construct a FASTA sequence file containing all primer forward and reverse sequences, for all primers in each GenomeData object of the list. We then use the local BLAST+ (not legacy BLAST) interface to BLASTN to query the named database with the input file. The multiprocessing of BLASTN is handled by either our multiprocessing threading approach, or by SGE; we don't use the built-in threading of BLAST so that we retain flexibility when moving to SGE. It's a small modification to revert to using the BLAST multithreading code. The output file is named according to the GenomeData object. The final step is to parse the BLAST output, and label the primers that make hits as not having passed the BLAST filter. """ build_blast_input(gd_list) run_blast(gd_list) parse_blast(gd_list) # Write BLAST input files for each GenomeData object def build_blast_input(gd_list): """ Loops over each GenomeData object in the list, and writes forward and reverse primer sequences out in FASTA format to a file with filename derived from the GenomeData object name. """ time_start = time.time() logger.info("Writing files for BLAST input ...") for gd_obj in gd_list: gd_obj.blastinfilename =\ os.path.join(os.path.split(gd_obj.seqfilename)[0], "%s_BLAST_input.fas" % gd_obj.name) seqrecords = [] for name, primer in gd_obj.primers.items(): seqrecords.append(SeqRecord(Seq(primer.forward_seq), id=name + '_forward')) seqrecords.append(SeqRecord(Seq(primer.reverse_seq), id=name + '_reverse')) logger.info("... writing %s ...", gd_obj.blastinfilename) SeqIO.write(seqrecords, open(gd_obj.blastinfilename, 'w'), 'fasta') logger.info("... done (%.3fs)", time.time() - time_start) # Run BLAST screen for each GenomeData object def run_blast(gd_list): """ Loop over the GenomeData objects in the passed list, and run a suitable BLASTN query with the primer sequences, writing to a file with name derived from the GenomeData object, in XML format. """ logger.info("Compiling BLASTN command-lines ...") clines = [] for gd_obj in gd_list: gd_obj.blastoutfilename =\ os.path.join(os.path.split(gd_obj.seqfilename)[0], "%s_BLAST_output.xml" % gd_obj.name) cline = NcbiblastnCommandline(query=gd_obj.blastinfilename, db=options.blastdb, task='blastn', # default: MEGABLAST out=gd_obj.blastoutfilename, num_alignments=1, num_descriptions=1, outfmt=5, perc_identity=90, ungapped=True) clines.append(str(cline) + log_output(gd_obj.name + ".blastn")) logger.info("... BLASTN+ jobs to run:") logger.info("Running:\n" + '\n'.join(clines)) if not options.sge: multiprocessing_run(clines) else: sge_run(clines) # Parse BLAST output for each GenomeData object def parse_blast(gd_list): """ Loop over the GenomeData objects in the passed list, and parse the BLAST XML output indicated in the .blastoutfilename attribute. For each query that makes a suitable match, mark the appropriate primer's .blastpass attribute as False """ time_start = time.time() logger.info("Parsing BLASTN output with multiprocessing ...") # Here I'm cheating a bit and using multiprocessing directly so that # we can speed up the parsing process a bit pool = multiprocessing.Pool(processes=options.cpus) pool_results = [pool.apply_async(process_blastxml, (g.blastoutfilename, g.name)) for g in gd_list] pool.close() pool.join() # Process the results returned from the BLAST searches. Create a # dictionary of GenomeData objects, keyed by name, and loop over the # result sets, setting .blastpass attributes for the primers as we go gddict = {} for gd_obj in gd_list: gddict.setdefault(gd_obj.name, gd_obj) failcount = 0 for result in [r.get() for r in pool_results]: for name in result: gd_obj = gddict[name.split('_primer_')[0]] gd_obj.primers[name].blastpass = False failcount += 1 logger.info("... %d primers failed BLAST screen ...", failcount) logger.info("... multiprocessing BLAST parsing complete (%.3fs)", time.time() - time_start) # BLAST XML parsing function for multiprocessing def process_blastxml(filename, name): """ Takes a BLAST output file, and a process name as input. Returns a set of query sequence names that make a suitably strong hit to the database. We are using the database as a screen, so *any* hit that passes our criteria will do; BLAST+ reports the hits in quality order, so we only need to see this top hit. We care if the screening match is identical for at least 90% of the query, and we're using ungapped alignments, so we check the alignment HSP identities against the length of the query. """ time_start = time.time() logger.info("[process name: %s] Parsing BLAST XML ...", name) # List to hold queries that hit the database matching_primers = set() recordcount = 0 # Parse the file try: for record in NCBIXML.parse(open(filename, 'rU')): recordcount += 1 # Increment our count of matches # We check whether the number of identities in the alignment is # greater than our (arbitrary) 90% cutoff. If so, we add the # query name to our set of failing/matching primers if len(record.alignments): identities = float(record.alignments[0].hsps[0].identities) / \ float(record.query_letters) if 0.9 <= identities: matching_primers.add('_'.join( record.query.split('_')[:-1])) logger.info("[process name: %s] Parsed %d records", name, recordcount) except IOError: logger.info("[process name: %s] Error reading BLAST XML file", name) logger.info("[process name: %s] Time spent in process: (%.3fs)", name, time.time() - time_start) # Return the list of matching primers return matching_primers # A function for parsing features from Prodigal output def parse_prodigal_features(filename): """ Parse Prodigal 'GenBank' output. We try to emulate SeqIO.read() SeqRecord output as much as possible, but the information provided by Prodigal is limited to feature type and location, on a single line. Amended: Newer versions of Prodigal write closer match to GenBank format, and thus if the first line matches "DEFINITION" we use SeqIO. RE-amended: Latest version of Prodigal is still not good enough for SeqIO, so a new function is created to parse line-by-line. """ record = SeqRecord(None) # record gets a dummy sequence # Open filehandle and parse contents handle = open(filename, 'rU') # init feature list from file parsing record.features = seqrecord_parse(handle) return record # Parse record features from the lines of prodigal or genbank format file def seqrecord_parse(filehandle): """ Parses the head lines of CDS features from a Prodigal or GenBank file. This is still necessary, as Prodigal's GenBank output is not SeqIO.read() friendly. """ features = [] for line in filehandle: if re.search("CDS", line): data = [e.strip() for e in line.split()] feature = gb_string_to_feature(data[-1]) feature.type = data[0] features.append(feature) return features # Parse record features from sequence file, using SeqIO def seqrecord_parse_seqio(filehandle, seqformat): """ NOTE: Latest version of prodigal output is *closer* to GenBank format but not close enough for SeqIO to find the genome.features Thus: this function NOT USED (until potential update to prodigal or SeqIO). """ features = [] seqrecord = list(SeqIO.parse(filehandle, seqformat)) for record in seqrecord: logger.debug("record seq: [%s]...", record.seq[0:12]) features.append(record.features) return features # Code (admittedly hacky) from Brad Chapman to parse a GenBank command line def gb_string_to_feature(content, use_fuzziness=True): """Convert a GenBank location string into a SeqFeature. """ consumer = _FeatureConsumer(use_fuzziness) consumer._cur_feature = SeqFeature() consumer.location(content) return consumer._cur_feature # Run PrimerSearch all-against-all on a list of GenomeData objects def primersearch(gd_list): """ Loop over the GenomeData objects in the passed list, and construct command lines for an all-against-all PrimerSearch run. Output files are of the format <query name>_vs_<target name>.primersearch Where <query name> and <target name> are the gd.name attributes of the source and target GenomeData objects, respectively. The output file goes in the same location as the source sequence file. """ logger.info("Constructing all-against-all PrimerSearch runs " + "for %d objects ...", len(gd_list)) # Create list of command-lines clines = [] for query_gd in gd_list: query_gd.primersearch_output = [] for target_gd in gd_list: if query_gd != target_gd: # Location of PrimerSearch output outdir = os.path.split(query_gd.seqfilename)[0] outfilename = os.path.join(outdir, "%s_vs_%s.primersearch" % (query_gd.name, target_gd.name)) query_gd.primersearch_output.append(outfilename) # Create command-line cline = PrimerSearchCommandline() cline.auto = True cline.seqall = target_gd.seqfilename cline.infile = query_gd.primersearchfilename cline.outfile = outfilename cline.mismatchpercent = options.mismatchpercent clines.append(str(cline) + log_output(os.path.basename(outfilename))) logger.info("... PrimerSearch jobs to run: ...") logger.info("Running:\n" + '\n'.join(clines)) # Parallelise jobs if not options.sge: multiprocessing_run(clines) else: sge_run(clines) # Load in existing PrimerSearch output def load_existing_primersearch_results(gd_list): """ Associates PrimerSearch output files with each GenomeData object and returns a list of (name, filename) tuples for all GenomeData objects """ time_start = time.time() logger.info("Locating existing PrimerSearch input files ...") primersearch_results = [] for gd_obj in gd_list: gd_obj.primersearch_output = [] filedir = os.path.split(gd_obj.seqfilename)[0] primersearch_files = [f for f in os.listdir(filedir) if os.path.splitext(f)[-1] == '.primersearch' and f.startswith(gd_obj.name)] for filename in primersearch_files: logger.info("... found %s for %s ...", filename, gd_obj.name) gd_obj.primersearch_output.append(os.path.join(filedir, filename)) logger.info("... found %d PrimerSearch input files (%.3fs)", len(primersearch_results), time.time() - time_start) # Run primersearch to find whether and where the predicted primers amplify # our negative target (the one we expect exactly one match to) def find_negative_target_products(gd_list): """ We run primersearch using the predicted primers as queries, with options.single_product as the target sequence. We exploit multiprocessing, and use the prescribed number of CPUs. Happily, primersearch accepts multiple sequence FASTA files. """ logger.info("Constructing negative control PrimerSearch runs " + "for %d objects ...", len(gd_list)) # Create list of command-lines clines = [] for query_gd in gd_list: query_gd.primersearch_output = [] outdir = os.path.split(query_gd.seqfilename)[0] outfilename = os.path.join(outdir, "%s_negative_control.primersearch" % query_gd.name) query_gd.primersearch_output.append(outfilename) # Create command-line cline = PrimerSearchCommandline() cline.auto = True cline.seqall = options.single_product cline.infile = query_gd.primersearchfilename cline.outfile = outfilename cline.mismatchpercent = options.mismatchpercent clines.append(str(cline) + log_output(os.path.basename(outfilename))) logger.info("... PrimerSearch jobs to run: ...") logger.info("Running:\n" + '\n'.join(clines)) # Parallelise jobs and run if not options.sge: multiprocessing_run(clines) else: sge_run(clines) # Classify the primers in a list of GenomeData objects according to the # other sequences that they amplify def classify_primers(gd_list): """ Takes a list of GenomeData objects and loops over the primersearch results, loading in the primersearch results and applying them to the associated query GenomeData object. If a primer is reported, by PrimerSearch, to amplify a region of the target genome, two changes are made to the corresponding Primer object in the amplifies_object and amplifies_family ad hoc attributes, with the target name and family, respectively, being added to those sets. """ time_start = time.time() logger.info("Classifying primers by PrimerSearch results ...") # Convenience dictionary, keying each GenomeData object by name # We need to load this fully before checking the PrimerSearch output # below. gddict = {} for gd_obj in gd_list: gddict.setdefault(gd_obj.name, gd_obj) # Parse the PrimerSearch output, updating the primer contents of the # appropriate GenomeData object, for each set of results for gd_obj in gd_list: logger.info("... GenomeData for %s ...", gd_obj.name) for filename in gd_obj.primersearch_output: logger.info("... processing %s ...", filename) # Identify the target organism targetname = \ os.path.splitext(os.path.split( filename)[-1])[0].split('_vs_')[-1] # Only classify amplimers to sequences in the gdlist dataset # This avoids problems with recording counts of matches to # sequences that we're not considering, artifically lowering the # specificity counts. if targetname in gddict: # Load the contents of the PrimerSearch output psdata = PrimerSearch.read(open(filename, 'rU')) # We loop over each primer in psdata and, if the primer has a # length this indicates that it amplifies the target. When # this is the case we add the organism name and the family # name to the appropriate primer in the query GenomeData object for pname, pdata in psdata.amplifiers.items(): if len(pdata): # Primer amplifies gd_obj.primers[pname].amplifies_organism.add( targetname) for family in gddict[targetname].families: gd_obj.primers[pname].amplifies_family.add(family) # Consider the negative control primersearch output elif 'negative_control' in filename: # Load PrimerSearch data psdata = PrimerSearch.read(open(filename, 'rU')) # We loop over each primer, and find the number of amplimers. # We note the number of amplimers as an attribute of the primer for pname, pdata in psdata.amplifiers.items(): gd_obj.primers[pname].negative_control_amplimers =\ len(pdata) logger.info("Found %d amplimers in negative control", len(pdata)) logger.info("... processed %d Primersearch results for %s ...", len(gd_obj.primersearch_output), gd_obj.name) logger.info("... processed PrimerSearch results (%.3fs)", time.time() - time_start) # Write analysis data to files def write_report(gd_list, blastfilter): """ Write a tab-separated table of information to the passed filename, summarising the distribution of unique, family-unique, and universal (for this set) primers amongst the GenomeData objects in gdlist. Also write out to this file the locations of the files containing the data used to generate the information. In addition, write out the following files in ePrimer3 format: i) <query_name>_specific.eprimer3 - unique primers for each query GenomeData object ii) <family>_specific.eprimer3 - unique primers for each family in the GenomeData set iii) universal_primers.eprimer3 - primers that amplify all members of the GenomeData set """ time_start = time.time() logger.info("Creating summary output ...") # First we need to generate a dictionary of GenomeData object names, keyed # by family families = defaultdict(set) for gd_obj in gd_list: for family in gd_obj.families: families[family].add(gd_obj.name) # Rectify nocds flag cds_overlap = not options.nocds # Check whether output directory exists and, if not, create it if not os.path.isdir(options.outdir): os.mkdir(options.outdir) # Open output file, and write header outfh = open(os.path.join(options.outdir, 'differential_primer_results.tab'), 'w') outfh.write(os.linesep.join([ "# Summary information table", "# Generated by find_differential_primers", "# Columns in the table:", "# 1) Query organism ID", "# 2) Query organism families", "# 3) Count of organism-unique primers", "# 4) Count of universal primers", "# 5) Query sequence filename", "# 6) Query feature filename", "# 7) Query ePrimer3 primers filename"]) + '\n') # Write data for each GenomeData object other_org_count = len(gd_list) - 1 # Amplifications for 'universal' set # We store 'universal' primers in their own list, and family-specific # primers in a dicitonary, keyed by family all_universal_primers = [] family_specific_primers = defaultdict(list) # Loop over each GenomeData object and populate family-specific and # universal primer collections, as well as organism-specific and # summary information for gd_obj in gd_list: logger.info('\n'.join([ "... writing data for %s ..." % gd_obj.name, "... cds_overlap: %s ..." % cds_overlap, "... gc3primevalid: %s ..." % options.filtergc3prime, "... oligovalid: %s ..." % options.hybridprobe, "... blastpass: %s ..." % blastfilter, "... single_product %s ..." % (options.single_product is not None), "... retrieving primer pairs ...", "... finding strain-specific primers for %s ..." % gd_obj.name ])) unique_primers = gd_obj.get_unique_primers(cds_overlap, blastfilter) logger.info("... finding family-specific primers for %s ...", gd_obj.name) family_unique_primers = {} for family in gd_obj.families: logger.info("Checking family: %s" % family) logger.info("families[%s]: %s" % (family, families[family])) family_unique_primers[family] = \ gd_obj.get_family_unique_primers(families[family], cds_overlap, blastfilter) family_specific_primers[family] += family_unique_primers[family] logger.info("family_unique_primers[%s]: %d" % (family, len(family_unique_primers[family]))) logger.info("family_specific_primers[%s]: %d" % (family, len(family_specific_primers[family]))) logger.info("... finding universal primers for %s ...", gd_obj.name) universal_primers = \ gd_obj.get_primers_amplify_count(other_org_count, cds_overlap, blastfilter) all_universal_primers.extend(universal_primers) # Write summary data to file outfh.write('\t'.join([gd_obj.name, ','.join(gd_obj.families), str(len(unique_primers)), str(len(universal_primers)), str(gd_obj.seqfilename), str(gd_obj.ftfilename), str(gd_obj.primerfilename)]) + '\n') # Write organism-specific primers to file write_eprimer3(unique_primers, os.path.join(options.outdir, "%s_specific_primers.eprimer3" % gd_obj.name), gd_obj.seqfilename) # Write organism-specific amplicons to file SeqIO.write([p.amplicon for p in unique_primers], os.path.join(options.outdir, "%s_specific_amplicons.fas" % gd_obj.name), 'fasta') outfh.close() # Write universal primers to file write_eprimer3(universal_primers, os.path.join(options.outdir, "universal_primers.eprimer3"), '', append=True) # Write organism-specific amplicons to file SeqIO.write([p.amplicon for p in universal_primers], open(os.path.join(options.outdir, "universal_amplicons.fas"), 'w'), 'fasta') # Write family-specific primers to files outfh = open(os.path.join(options.outdir, 'differential_primer_results-families.tab'), 'w') outfh.write(os.linesep.join([ "# Summary information table", "# Generated by find_differential_primers", "# Columns in the table:", "# 1) Family", "# 2) Count of family-specific primers", "# 3) Family-specific primer file", "# 4) Family-specific amplicon file"]) + '\n') for family, primers in family_specific_primers.items(): outstr = [family, str(len(primers))] fname = os.path.join(options.outdir, "%s_family-specific_primers.eprimer3" % family) write_eprimer3(primers, fname, '') outstr.append(fname) # Write family-specific amplicons to file fname = os.path.join(options.outdir, "%s_family-specific_amplicons.fas" % family) SeqIO.write([p.amplicon for p in primers], open(fname, 'w'), 'fasta') outstr.append(fname) outfh.write('\t'.join(outstr) + '\n') # Being tidy... outfh.close() logger.info("... data written (%.3fs)", time.time() - time_start) # Write ePrimer3 format primer file def write_eprimer3(primers, filename, sourcefilename, append=False): """ Write the passed primers to the passed file, in ePrimer3- compatible format. """ logger.info("Writing %d primer pairs to %s ...", len(primers), filename) # Open file filemode = 'a' if append else 'w' # Do we append or write anew? outfh = open(filename, filemode) # Write header outfh.write(os.linesep.join([ "# EPRIMER3 PRIMERS %s " % filename, "# Start Len Tm GC% Sequence", os.linesep]) + '\n') primercount = 0 for primer in primers: primercount += 1 outfh.write("# %s %s\n" % (primer.name, sourcefilename)) outfh.write("%-4d PRODUCT SIZE: %d\n" % (primercount, primer.size)) outfh.write(" FORWARD PRIMER %-9d %-3d %.02f %.02f %s\n" % (primer.forward_start, primer.forward_length, primer.forward_tm, primer.forward_gc, primer.forward_seq)) outfh.write(" REVERSE PRIMER %-9d %-3d %.02f %.02f %s\n" % (primer.reverse_start, primer.reverse_length, primer.reverse_tm, primer.reverse_gc, primer.reverse_seq)) if hasattr(primer, 'internal_start'): outfh.write(" INTERNAL OLIGO %-9d %-3d %.02f %.02f %s\n" % (primer.internal_start, primer.internal_length, primer.internal_tm, primer.internal_gc, primer.internal_seq)) outfh.write(os.linesep * 3) # Be tidy outfh.close() # Run the passed list of command-lines using a multiprocessing.Pool def multiprocessing_run(clines): """ We create a multiprocessing Pool to handle command-lines We pass the (unique) GenomeData object name, and the location of the sequence file. The called function returns the GenomeData name and the corresponding location of the generated feature file. The GenomeData objects are stored in a temporary dictionary, keyed by gd.name, to allow association of the results of the asynchronous pool jobs with the correct GenomeData object """ time_start = time.time() logger.info("Running %d jobs with multiprocessing ...", len(clines)) pool = multiprocessing.Pool(processes=options.cpus) # create process pool completed = [] if options.verbose: callback_fn = multiprocessing_callback else: callback_fn = completed.append for cline in clines: pool.apply_async(subprocess.call, (str(cline), ), {'stderr': subprocess.PIPE, 'shell': sys.platform != "win32"}, callback=callback_fn) pool.close() # Run jobs pool.join() logger.info("Completed:\n" + '\n'.join([str(e) for e in completed])) logger.info("... all multiprocessing jobs ended (%.3fs)", time.time() - time_start) # Add a multiprocessing callback function here def multiprocessing_callback(val): """ A verbose callback function for multiprocessing runs. It uses the return value to indicate run completion or failure. Failure is indicated by a nonzero return from the multiprocessing call. """ if 0 == val: logger.info("... multiprocessing run completed (status: %s) ...", val) else: logger.error("... problem with multiprocessing run (status: %s) ...", val) # Clean output for each GenomeData object in the passed list def clean_output(gd_list): """ Remove .eprimer3, .primers, .prodigalout, and .primersearch files from the same directory as the sequence file for each passed PrimerSearch object """ time_start = time.time() logger.info("Cleaning up output files for GenomeData objects ...") # Loop over each GenomeData object, and remove each output file for gd_obj in gd_list: seqdir = os.path.split(gd_obj.seqfilename)[0] for filename in [f for f in os.listdir(seqdir) if os.path.splitext(f)[-1] in ['.eprimer3', 'primers', '.prodigalout', '.primersearch', '.xml']]: abspath = os.path.join(seqdir, filename) logger.info("... deleting %s ...", abspath) os.remove(abspath) # You can never go back after this point logger.info("... done (%.3fs)", time.time() - time_start) # construct str to concat on end of cline if option.keep_logs is set def log_output(filename): """ predefine file extension and stream to print to. if log_dir exists, join it to filename else output to base filename. """ log_extension = ".log" log_out_handle = " 2> " if options.keep_logs and options.log_dir: return log_out_handle + os.path.join(options.log_dir, filename) +\ log_extension elif options.keep_logs: return log_out_handle + filename + log_extension else: return "" # run list of command-line jobs with SGE def sge_run(*args): """ Function intended to compile a passed list of command lines, and run them on SGE. """ raise NotImplementedError ### # SCRIPT if __name__ == '__main__': # Parse cmd-line options, arguments, optparser = parse_cmdline() # Set up logging, and modify loglevel according to whether we need # verbosity or not # err_handler points to sys.stderr # err_handler_file points to a logfile, if named logger = logging.getLogger('find_differential_primers.py') logger.setLevel(logging.DEBUG) err_handler = logging.StreamHandler(sys.stderr) err_formatter = logging.Formatter('%(levelname)s: %(message)s') err_handler.setFormatter(err_formatter) if options.logfile is not None: try: logstream = open(options.logfile, 'w') err_handler_file = logging.StreamHandler(logstream) err_handler_file.setFormatter(err_formatter) err_handler_file.setLevel(logging.INFO) logger.addHandler(err_handler_file) except IOError: logger.error("Could not open %s for logging", options.logfile) sys.exit(1) if options.verbose: err_handler.setLevel(logging.INFO) else: err_handler.setLevel(logging.WARNING) logger.addHandler(err_handler) logger.info('# find_differential_primers.py logfile') logger.info('# Run: %s', time.asctime()) # Report arguments, if verbose logger.info(options) logger.info(arguments) # Create our GenomeData objects. If there is no configuration file # specified, raise an error and exit. Otherwise we end up with a list # of GenomeData objects that are populated only with the data from the # config file if options.filename is None: optparser.print_help() raise IOError("No configuration file specified") gdlist = create_gd_from_config(options.filename) # If the user wants to clean the directory before starting, do so if options.clean or options.cleanonly: clean_output(gdlist) if options.cleanonly: sys.exit(0) # It is possible that the sequence file for a GenomeData object might # be a multi-sequence file describing scaffolds or contigs. We create a # concatenated sequence to facilitate further analyses, if this is the # case. Where a sequence needs to be concatenated, this will affect the # placement of features and/or primers, so any specified files are # reset to None check_single_sequence(gdlist) # What EMBOSS version is available? This is important as the ePrimer3 # command-line changes in v6.6.0, which is awkward for the Biopython # interface. embossversion = \ subprocess.check_output("embossversion", stderr=subprocess.PIPE, shell=sys.platform != "win32").strip() logger.info("EMBOSS version reported as: %s", embossversion) # We need to check the existence of a prescribed feature file and, if # there is not one, create it. We don't bother if the --nocds flag is set. if not (options.nocds or options.noprodigal): logger.info("--nocds option not set: " + "Checking existence of features...") check_ftfilenames(gdlist) elif options.nocds: logger.warning("--nocds option set: Not checking or " + "creating feature files") else: logger.warning("--noprodigal option set: Not predicting new CDS") # We need to check for the existence of primer sequences for the organism # and, if they do not exist, create them using ePrimer3. If the # --noprimer3 flag is set, we do not create new primers, but even if the # --noprimersearch flag is set, we still need to check whether the # primer files are valid if not options.noprimer3: logger.info("--noprimer3 flag not set: Predicting new primers") check_primers(gdlist) predict_primers(gdlist, embossversion) else: logger.warning("--noprimer3 flag set: Not predicting new primers") # With a set of primers designed for the organism, we can load them into # the GenomeData object, filtering for those present only in the CDS, # if required. This step is necessary, whether or not a new ePrimer3 # prediction is made. We also filter on GC content at the primer 3' end, # if required. logger.info("Loading primers...") load_primers(gdlist) # At this point, we can check our primers against a prescribed BLAST # database. How we filter these depends on the user's preference. # We screen against BLAST here so that we can flag an attribute on # each primer to say whether or not it passed the BLAST screen. if options.blastdb and not options.useblast: logger.info("--blastdb options set: BLAST screening primers...") blast_screen(gdlist) elif options.useblast: logger.warning("--useblast option set: " + "using existing BLAST results...") else: logger.warning("No BLAST options set, not BLAST screening primers...") # Having a set of (potentially CDS-filtered) primers for each organism, # we then scan these primers against each of the other organisms in the # set, using the EMBOSS PrimerSearch package # (http://embossgui.sourceforge.net/demo/manual/primersearch.html) # Now we have all the data we need to run PrimerSearch in an all-vs-all # manner, so make a cup of tea, put your feet up, and do the comparisons # with EMBOSS PrimerSearch # (http://embossgui.sourceforge.net/demo/manual/primersearch.html) if options.noprimersearch: logger.warning("--noprimersearch flag set: Not running PrimerSearch") # Load the appropriate primersearch output files for each # GenomeData object load_existing_primersearch_results(gdlist) else: logger.info("--noprimersearch flag not set: Running PrimerSearch") # We write input for PrimerSearch ignoring all the filters; this lets # us turn off PrimerSearch and rerun the analysis with alternative # filter settings for gd in gdlist: gd.write_primers() # Run PrimerSearch primersearch(gdlist) # If the --single_product option is specified, we load in the sequence # file to which the passed argument refers, and filter the primer # sequences on the basis of how many amplification products are produced # from these sequences. We expect exactly one amplification product per # primer set, if it's not degenerate on the target sequence # (note that this filter is meaningless for family-specific primers) if options.single_product: find_negative_target_products(gdlist) logger.info("--blastdb options set: BLAST screening primers...") blast_screen(gdlist) # Now we classify the primer sets according to which sequences they amplify if not options.noclassify: logger.info("Classifying primers and writing output files ...") # Classify the primers in each GenomeData object according to # the organisms and families that they amplify, using the # PrimerSearch results. classify_primers(gdlist) # All the data has been loaded and processed, so we can now create our # plaintext summary report of the number of unique, family-unique and # universal primers in each of the organisms write_report(gdlist, (options.blastdb is not None or options.useblast))
Sociologists of education rooted in social realism have for more than a decade argued that knowledge matters in education, there are different kinds of knowledge, not all forms of knowledge are equal and that these differentiations have significant implications for curriculum. While this argument has made an important contribution to both theoretical and policy debate, the implications for curriculum have not been sufficiently addressed. In other words, a theory of differentiated knowledge has not translated into an adequate theory of differentiated curriculum. Drawing on Basil Bernstein's work on knowledge differentiation and Karl Maton's Legitimation Code Theory, this paper offers an empirically derived emerging framework for conceptualizing differentiated higher education curricula with a particular interest in occupationally and professionally oriented curricula. The framework illuminates the principles underlying curriculum differentiation, thus enabling a richer conversation about epistemological access and progression. Shay, S. 2012. Conceptualizing curriculum differentiation in higher education: a sociology of knowledge point of view. British Journal of Sociology of Education. This is an Accepted Manuscript of an article published by Taylor & Francis in British Journal of Sociology of Education on 27 July 2012, available online: http://www.tandfonline.com/10.1080/01425692.2012.722285.
# -*- coding: utf-8 -*- # These tests don't work at the moment, due to the security_groups multi select not working # in selenium (the group is selected then immediately reset) from textwrap import dedent import fauxfactory import pytest from riggerlib import recursive_update from widgetastic_patternfly import CheckableBootstrapTreeview as Check_tree from cfme import test_requirements from cfme.cloud.provider import CloudProvider from cfme.cloud.provider.azure import AzureProvider from cfme.cloud.provider.gce import GCEProvider from cfme.cloud.provider.openstack import OpenStackProvider from cfme.infrastructure.provider import InfraProvider from cfme.markers.env_markers.provider import providers from cfme.utils import normalize_text from cfme.utils.appliance.implementations.ui import navigate_to from cfme.utils.blockers import BZ from cfme.utils.generators import random_vm_name from cfme.utils.log import logger from cfme.utils.providers import ProviderFilter from cfme.utils.update import update from cfme.utils.version import LOWEST from cfme.utils.version import VersionPicker from cfme.utils.wait import TimedOutError from cfme.utils.wait import wait_for pytestmark = [ pytest.mark.meta(server_roles="+automate +notifier"), test_requirements.provision, pytest.mark.tier(2), pytest.mark.provider(gen_func=providers, filters=[ProviderFilter(classes=[CloudProvider, InfraProvider], required_flags=['provision'])], scope="function"), pytest.mark.usefixtures('setup_provider') ] @pytest.fixture() def vm_name(): return random_vm_name(context='prov', max_length=12) @pytest.fixture() def instance_args(request, provider, provisioning, vm_name): """ Fixture to prepare instance parameters for provisioning """ inst_args = dict(template_name=provisioning.get('image', {}).get('image') or provisioning.get( 'template')) # Base instance info inst_args['request'] = { 'notes': 'Testing provisioning from image {} to vm {} on provider {}' .format(inst_args.get('template_name'), vm_name, provider.key), } # Check whether auto-selection of environment is passed auto = False # By default provisioning will be manual try: parameter = request.param auto = parameter except AttributeError: # in case nothing was passed just skip pass if auto: inst_args.update({'environment': {'automatic_placement': auto}}) yield vm_name, inst_args @pytest.fixture() def provisioned_instance(provider, instance_args, appliance): """ Checks provisioning status for instance """ vm_name, inst_args = instance_args collection = appliance.provider_based_collection(provider) instance = collection.create(vm_name, provider, form_values=inst_args) if not instance: raise Exception("instance returned by collection.create is 'None'") yield instance logger.info('Instance cleanup, deleting %s', instance.name) try: instance.cleanup_on_provider() except Exception as ex: logger.warning('Exception while deleting instance fixture, continuing: {}' .format(ex.message)) @pytest.mark.rhel_testing @pytest.mark.parametrize('instance_args', [True, False], ids=["Auto", "Manual"], indirect=True) def test_provision_from_template(provider, provisioned_instance): """ Tests instance provision from template via CFME UI Metadata: test_flag: provision Polarion: assignee: jhenner caseimportance: high casecomponent: Provisioning initialEstimate: 1/4h """ assert provisioned_instance.exists_on_provider, "Instance wasn't provisioned successfully" @pytest.mark.provider([GCEProvider], required_fields=[['provisioning', 'image']], override=True) @pytest.mark.usefixtures('setup_provider') def test_gce_preemptible_provision(appliance, provider, instance_args, soft_assert): """ Polarion: assignee: jhenner caseimportance: high casecomponent: Provisioning initialEstimate: 1/6h """ vm_name, inst_args = instance_args inst_args['properties']['is_preemptible'] = True instance = appliance.collections.cloud_instances.create(vm_name, provider, form_values=inst_args) view = navigate_to(instance, "Details") preemptible = view.entities.summary("Properties").get_text_of("Preemptible") soft_assert('Yes' in preemptible, "GCE Instance isn't Preemptible") soft_assert(instance.exists_on_provider, "Instance wasn't provisioned successfully") @pytest.mark.rhv2 @pytest.mark.parametrize("edit", [True, False], ids=["edit", "approve"]) def test_provision_approval(appliance, provider, vm_name, smtp_test, request, edit): """ Tests provisioning approval. Tests couple of things. * Approve manually * Approve by editing the request to conform Prerequisities: * A provider that can provision. * Automate role enabled * User with e-mail set so you can receive and view them Steps: * Create a provisioning request that does not get automatically approved (eg. ``num_vms`` bigger than 1) * Wait for an e-mail to come, informing you that the auto-approval was unsuccessful. * Depending on whether you want to do manual approval or edit approval, do: * MANUAL: manually approve the request in UI * EDIT: Edit the request in UI so it conforms the rules for auto-approval. * Wait for an e-mail with approval * Wait until the request finishes * Wait until an email, informing about finished provisioning, comes. Metadata: test_flag: provision suite: infra_provisioning Polarion: assignee: jhenner caseimportance: high casecomponent: Provisioning initialEstimate: 1/8h """ # generate_tests makes sure these have values # template, host, datastore = map(provisioning.get, ('template', 'host', 'datastore')) # It will provision two of them vm_names = [vm_name + "001", vm_name + "002"] if BZ(1628240).blocks and provider.one_of(CloudProvider): requester = "" else: requester = "[email protected] " collection = appliance.provider_based_collection(provider) inst_args = {'catalog': { 'vm_name': vm_name, 'num_vms': '2' }} vm = collection.create(vm_name, provider, form_values=inst_args, wait=False) try: if provider.one_of(CloudProvider): vm_type = "instance" else: vm_type = "virtual machine" subject = VersionPicker({ LOWEST: "your request for a new vms was not autoapproved", "5.10": "your {} request is pending".format(vm_type) }).pick() wait_for( lambda: len(filter( lambda mail: normalize_text(subject) in normalize_text(mail["subject"]), smtp_test.get_emails())) == 1, num_sec=90, delay=5) subject = VersionPicker({ LOWEST: "virtual machine request was not approved", "5.10": "{} request from {}pending approval".format(vm_type, requester) }).pick() wait_for( lambda: len(filter( lambda mail: normalize_text(subject) in normalize_text(mail["subject"]), smtp_test.get_emails())) == 1, num_sec=90, delay=5) except TimedOutError: subjects = ",".join([normalize_text(m["subject"]) for m in smtp_test.get_emails()]) logger.error("expected: %s, got emails: %s", subject, subjects) raise smtp_test.clear_database() cells = {'Description': 'Provision from [{}] to [{}###]'.format(vm.template_name, vm.name)} provision_request = appliance.collections.requests.instantiate(cells=cells) navigate_to(provision_request, 'Details') if edit: # Automatic approval after editing the request to conform new_vm_name = '{}-xx'.format(vm_name) modifications = { 'catalog': {'num_vms': "1", 'vm_name': new_vm_name}, 'Description': 'Provision from [{}] to [{}]'.format(vm.template_name, new_vm_name)} provision_request.edit_request(values=modifications) vm_names = [new_vm_name] # Will be just one now request.addfinalizer( lambda: collection.instantiate(new_vm_name, provider).cleanup_on_provider() ) else: # Manual approval provision_request.approve_request(method='ui', reason="Approved") vm_names = [vm_name + "001", vm_name + "002"] # There will be two VMs request.addfinalizer( lambda: [appliance.collections.infra_vms.instantiate(name, provider).cleanup_on_provider() for name in vm_names] ) subject = VersionPicker({ LOWEST: "your virtual machine configuration was approved", "5.10": "your {} request was approved".format(vm_type) }).pick() try: wait_for( lambda: len(filter( lambda mail: normalize_text(subject) in normalize_text(mail["subject"]), smtp_test.get_emails())) == 1, num_sec=120, delay=5) except TimedOutError: subjects = ",".join([normalize_text(m["subject"]) for m in smtp_test.get_emails()]) logger.error("expected: %s, got emails: %s", subject, subjects) raise smtp_test.clear_database() # Wait for the VM to appear on the provider backend before proceeding to ensure proper cleanup logger.info('Waiting for vms %s to appear on provider %s', ", ".join(vm_names), provider.key) wait_for( lambda: all(map(provider.mgmt.does_vm_exist, vm_names)), handle_exception=True, num_sec=600) provision_request.wait_for_request(method='ui') msg = "Provisioning failed with the message {}".format(provision_request.row.last_message.text) assert provision_request.is_succeeded(method='ui'), msg subject = VersionPicker({ LOWEST: "your virtual machine request has completed vm {}".format(vm_name), "5.10": "your {} request has completed vm name {}".format(vm_type, vm_name) }).pick() # Wait for e-mails to appear def verify(): return ( len(filter( lambda mail: normalize_text(subject) in normalize_text(mail["subject"]), smtp_test.get_emails())) == len(vm_names) ) try: wait_for(verify, message="email receive check", delay=5) except TimedOutError: subjects = ",".join([normalize_text(m["subject"]) for m in smtp_test.get_emails()]) logger.error("expected: %s, got emails: %s", subject, subjects) raise @pytest.mark.parametrize('auto', [True, False], ids=["Auto", "Manual"]) def test_provision_from_template_using_rest(appliance, request, provider, vm_name, auto): """ Tests provisioning from a template using the REST API. Metadata: test_flag: provision, rest Polarion: assignee: pvala casecomponent: Rest caseimportance: high initialEstimate: 1/30h """ if auto: form_values = {"vm_fields": {"placement_auto": True}} else: form_values = None collection = appliance.provider_based_collection(provider) instance = collection.create_rest(vm_name, provider, form_values=form_values) wait_for( lambda: instance.exists, num_sec=1000, delay=5, message="VM {} becomes visible".format(vm_name)) VOLUME_METHOD = (""" prov = $evm.root["miq_provision"] prov.set_option( :clone_options, {{ :block_device_mapping => [{}] }}) """) ONE_FIELD = """{{:volume_id => "{}", :device_name => "{}"}}""" @pytest.fixture(scope="module") def domain(request, appliance): domain = appliance.collections.domains.create(name=fauxfactory.gen_alphanumeric(), enabled=True) request.addfinalizer(domain.delete_if_exists) return domain @pytest.fixture(scope="module") def original_request_class(appliance): return (appliance.collections.domains.instantiate(name='ManageIQ') .namespaces.instantiate(name='Cloud') .namespaces.instantiate(name='VM') .namespaces.instantiate(name='Provisioning') .namespaces.instantiate(name='StateMachines') .classes.instantiate(name='Methods')) @pytest.fixture(scope="module") def modified_request_class(request, domain, original_request_class): with pytest.raises(Exception, match="error: Error during 'Automate Class copy'"): # methods of this class might have been copied by other fixture, so this error can occur original_request_class.copy_to(domain) klass = (domain .namespaces.instantiate(name='Cloud') .namespaces.instantiate(name='VM') .namespaces.instantiate(name='Provisioning') .namespaces.instantiate(name='StateMachines') .classes.instantiate(name='Methods')) request.addfinalizer(klass.delete_if_exists) return klass @pytest.fixture(scope="module") def copy_domains(original_request_class, domain): methods = ['openstack_PreProvision', 'openstack_CustomizeRequest'] for method in methods: original_request_class.methods.instantiate(name=method).copy_to(domain) # Not collected for EC2 in generate_tests above @pytest.mark.parametrize("disks", [1, 2]) @pytest.mark.provider([OpenStackProvider], required_fields=[['provisioning', 'image']], override=True) def test_cloud_provision_from_template_with_attached_disks( appliance, request, instance_args, provider, disks, soft_assert, domain, modified_request_class, copy_domains, provisioning): """ Tests provisioning from a template and attaching disks Metadata: test_flag: provision Polarion: assignee: jhenner caseimportance: high casecomponent: Provisioning initialEstimate: 1/4h """ vm_name, inst_args = instance_args # Modify availiability_zone for Azure provider if provider.one_of(AzureProvider): recursive_update(inst_args, {'environment': {'availability_zone': provisioning("av_set")}}) device_name = "/dev/sd{}" device_mapping = [] with provider.mgmt.with_volumes(1, n=disks) as volumes: for i, volume in enumerate(volumes): device_mapping.append((volume, device_name.format(chr(ord("b") + i)))) # Set up automate method = modified_request_class.methods.instantiate(name="openstack_PreProvision") with update(method): disk_mapping = [] for mapping in device_mapping: disk_mapping.append(ONE_FIELD.format(*mapping)) method.script = VOLUME_METHOD.format(", ".join(disk_mapping)) def _finish_method(): with update(method): method.script = """prov = $evm.root["miq_provision"]""" request.addfinalizer(_finish_method) instance = appliance.collections.cloud_instances.create(vm_name, provider, form_values=inst_args) for volume_id in volumes: soft_assert(vm_name in provider.mgmt.volume_attachments(volume_id)) for volume, device in device_mapping: soft_assert(provider.mgmt.volume_attachments(volume)[vm_name] == device) instance.mgmt.delete() # To make it possible to delete the volume wait_for(lambda: not instance.exists_on_provider, num_sec=180, delay=5) # Not collected for EC2 in generate_tests above @pytest.mark.provider([OpenStackProvider], required_fields=[['provisioning', 'image']], override=True) def test_provision_with_boot_volume(request, instance_args, provider, soft_assert, modified_request_class, appliance, copy_domains): """ Tests provisioning from a template and attaching one booting volume. Metadata: test_flag: provision, volumes Polarion: assignee: jhenner caseimportance: high casecomponent: Provisioning initialEstimate: 1/4h """ vm_name, inst_args = instance_args image = inst_args.get('template_name') with provider.mgmt.with_volume(1, imageRef=provider.mgmt.get_template_id(image)) as volume: # Set up automate method = modified_request_class.methods.instantiate(name="openstack_CustomizeRequest") with update(method): method.script = dedent('''\ $evm.root["miq_provision"].set_option( :clone_options, {{ :image_ref => nil, :block_device_mapping_v2 => [{{ :boot_index => 0, :uuid => "{}", :device_name => "vda", :source_type => "volume", :destination_type => "volume", :volume_size => 1, :delete_on_termination => false }}] }} ) '''.format(volume)) @request.addfinalizer def _finish_method(): with update(method): method.script = """prov = $evm.root["miq_provision"]""" instance = appliance.collections.cloud_instances.create(vm_name, provider, form_values=inst_args) request_description = 'Provision from [{}] to [{}]'.format(image, instance.name) provision_request = appliance.collections.requests.instantiate(request_description) try: provision_request.wait_for_request(method='ui') except Exception as e: logger.info( "Provision failed {}: {}".format(e, provision_request.request_state)) raise msg = "Provisioning failed with the message {}".format( provision_request.row.last_message.text) assert provision_request.is_succeeded(method='ui'), msg soft_assert(instance.name in provider.mgmt.volume_attachments(volume)) soft_assert(provider.mgmt.volume_attachments(volume)[instance.name] == "/dev/vda") instance.mgmt.delete() # To make it possible to delete the volume wait_for(lambda: not instance.exists_on_provider, num_sec=180, delay=5) # Not collected for EC2 in generate_tests above @pytest.mark.provider([OpenStackProvider], required_fields=[['provisioning', 'image']], override=True) def test_provision_with_additional_volume(request, instance_args, provider, small_template, soft_assert, modified_request_class, appliance, copy_domains): """ Tests provisioning with setting specific image from AE and then also making it create and attach an additional 3G volume. Metadata: test_flag: provision, volumes Polarion: assignee: jhenner caseimportance: high casecomponent: Provisioning initialEstimate: 1/4h """ vm_name, inst_args = instance_args # Set up automate method = modified_request_class.methods.instantiate(name="openstack_CustomizeRequest") try: image_id = provider.mgmt.get_template(small_template.name).uuid except KeyError: pytest.skip("No small_template in provider data!") with update(method): method.script = dedent('''\ $evm.root["miq_provision"].set_option( :clone_options, {{ :image_ref => nil, :block_device_mapping_v2 => [{{ :boot_index => 0, :uuid => "{}", :device_name => "vda", :source_type => "image", :destination_type => "volume", :volume_size => 3, :delete_on_termination => false }}] }} ) '''.format(image_id)) def _finish_method(): with update(method): method.script = """prov = $evm.root["miq_provision"]""" request.addfinalizer(_finish_method) instance = appliance.collections.cloud_instances.create(vm_name, provider, form_values=inst_args) request_description = 'Provision from [{}] to [{}]'.format(small_template.name, instance.name) provision_request = appliance.collections.requests.instantiate(request_description) try: provision_request.wait_for_request(method='ui') except Exception as e: logger.info( "Provision failed {}: {}".format(e, provision_request.request_state)) raise assert provision_request.is_succeeded(method='ui'), ( "Provisioning failed with the message {}".format( provision_request.row.last_message.text)) instance.mgmt.refresh() prov_instance_raw = instance.mgmt.raw try: assert hasattr(prov_instance_raw, 'os-extended-volumes:volumes_attached') volumes_attached = getattr(prov_instance_raw, 'os-extended-volumes:volumes_attached') assert len(volumes_attached) == 1 volume_id = volumes_attached[0]["id"] assert provider.mgmt.volume_exists(volume_id) volume = provider.mgmt.get_volume(volume_id) assert volume.size == 3 finally: instance.cleanup_on_provider() wait_for(lambda: not instance.exists_on_provider, num_sec=180, delay=5) if "volume_id" in locals(): # To handle the case of 1st or 2nd assert if provider.mgmt.volume_exists(volume_id): provider.mgmt.delete_volume(volume_id) def test_provision_with_tag(appliance, vm_name, tag, provider, request): """ Tests tagging instance using provisioning dialogs. Steps: * Open the provisioning dialog. * Apart from the usual provisioning settings, pick a tag. * Submit the provisioning request and wait for it to finish. * Visit instance page, it should display the selected tags Metadata: test_flag: provision Polarion: assignee: anikifor casecomponent: Tagging initialEstimate: 1/4h """ inst_args = {'purpose': { 'apply_tags': Check_tree.CheckNode( ['{} *'.format(tag.category.display_name), tag.display_name])}} collection = appliance.provider_based_collection(provider) instance = collection.create(vm_name, provider, form_values=inst_args) request.addfinalizer(instance.cleanup_on_provider) tags = instance.get_tags() assert any( instance_tag.category.display_name == tag.category.display_name and instance_tag.display_name == tag.display_name for instance_tag in tags), ( "{}: {} not in ({})".format(tag.category.display_name, tag.display_name, str(tags)))
Hailing from Rome, Nagual has been an original contributor to the Trivmvirate concept, and we’ve had the pleasure to closely work with him in our previous life, so his debut on Trivmvirate couldn’t make us any happier. Nagual production follow indeed our ethos: evolution based on past knowledge. Deeply crafted beats and sound design are coupled with acidiferous synth lines and story tellin arrangements, so as usual with Trivmvirate… you can expect something different!
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Telegram bot to play UNO in group chats # Copyright (c) 2016 Jannes Höke <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from telegram import ReplyKeyboardMarkup from telegram.ext import CommandHandler, RegexHandler from utils import send_async from user_setting import UserSetting from shared_vars import dispatcher from locales import available_locales from internationalization import _, user_locale @user_locale def show_settings(bot, update): chat = update.message.chat if update.message.chat.type != 'private': send_async(bot, chat.id, text=_("Please edit your settings in a private chat with " "the bot.")) return us = UserSetting.get(id=update.message.from_user.id) if not us: us = UserSetting(id=update.message.from_user.id) if not us.stats: stats = '📊' + ' ' + _("Enable statistics") else: stats = '❌' + ' ' + _("Delete all statistics") kb = [[stats], ['🌍' + ' ' + _("Language")]] send_async(bot, chat.id, text='🔧' + ' ' + _("Settings"), reply_markup=ReplyKeyboardMarkup(keyboard=kb, one_time_keyboard=True)) @user_locale def kb_select(bot, update, groups): chat = update.message.chat user = update.message.from_user option = groups[0] if option == '📊': us = UserSetting.get(id=user.id) us.stats = True send_async(bot, chat.id, text=_("Enabled statistics!")) elif option == '🌍': kb = [[locale + ' - ' + descr] for locale, descr in sorted(available_locales.items())] send_async(bot, chat.id, text=_("Select locale"), reply_markup=ReplyKeyboardMarkup(keyboard=kb, one_time_keyboard=True)) elif option == '❌': us = UserSetting.get(id=user.id) us.stats = False us.first_places = 0 us.games_played = 0 us.cards_played = 0 send_async(bot, chat.id, text=_("Deleted and disabled statistics!")) @user_locale def locale_select(bot, update, groups): chat = update.message.chat user = update.message.from_user option = groups[0] if option in available_locales: us = UserSetting.get(id=user.id) us.lang = option _.push(option) send_async(bot, chat.id, text=_("Set locale!")) _.pop() def register(): dispatcher.add_handler(CommandHandler('settings', show_settings)) dispatcher.add_handler(RegexHandler('^([' + '📊' + '🌍' + '❌' + ']) .+$', kb_select, pass_groups=True)) dispatcher.add_handler(RegexHandler(r'^(\w\w_\w\w) - .*', locale_select, pass_groups=True))
In this Instructable I will be using 3 iPhone apps to create compelling comics. These are the best apps I have found for iPhone, but I am sure there are just as equally worthy apps in the App Store. I use the Google browser for finding images on the web, but you can use Safari for the same task. You can also use images you have taken yourself with your built in camera. I create the story board with ComicBook, which has a bunch of great features and Comic Tycoon to render comic book-type images. There are a bunch of photo editors that can be used to create the perfect image to place in your comics, I just prefer Comic Tycoon's comic toon imager. I have created a bunch of comics with my phone and developed some tips along the way. I had an idea one day on a road trip and decided that it needed to be in comic book form. I jotted down my thought until I felt that it was funny enough to share with my friends. I then figured out the best way to convey my message in as few words as possible. People these days do a lot of their browsing on their phones so I wanted a comic that was easy to read on a small screen. Ideas can come from anywhere. Maybe you saw a funny post or comment on a social media site or a news article seemed ridiculous enough to share. My first comics were about our road trip and became a way to share our adventures with our friends. It all just snowballed from there. Giving you a place to find funny ideas for your comic can be difficult. I believe that once you come up with an idea, that other ideas will come easily, that's what happened for me. I ended up creating comics by searching for trending articles. Ben Afleck and Mylie Cyrus were trending when I started my comics. I also began creating comics for holidays and for friend's birthdays. Photo 1 - My first comic. This one is pretty rough, but I was able to convey my idea. Photo 2 - Mylie Cyrus twerked her way into the headlines. Photos 3,4 & 5 - Ben Affleck was just announced to play the lead role in the next Batman movie. Once you have a funny idea, it's best to draw it out to see how many frames you will need. I suggest not using more than 5. This is an important step but not really necessary after you get a feel for creating your own cartoons, it really helped me get a feel for layout in the beginning. One you have the concept of your comic, it is time to add compelling images. I usually start with a header (the top image). The header is not necessary but will give the viewer an idea of your cartoon. I then use 4 frames or less to get my idea across. If you intend on creating a whole series of comics, a header is a great way to help your audience identify your comics right away. If you plan one creating just one comic, I suggest not adding a header because you will then have more room to convey your thoughts. When I created my Facebook Trends comic series, I had at least three ideas ready to go so I knew I wanted a header. You will be able to save your template within the ComicBook app for later use.Look for the briefcase icon at the bottom to save your template. Find an appropriate image and add a title. Create your first comic and save for your next one. This part of the project is wide open and based on your own tastes. As I stated earlier, you can use a variety of programs to get your desired effect. For this step, I am going to use ComicTycoon. Choose camera roll or take a new photo. Add photo and pick your desired effect. Add text and save to your phone or social media sites. I would love to see what you create. Please share your comics in the comment section below. This is a series I created to make fun of the items I see when scrolling through Facebook. I post them back on Facebook, I can't help myself, haha. As an avid bicyclist, I decided to create a series for my fellow bikers to laugh at. I got great feedback for this one. Colorado recently (January 1st, 2014) allowed everyone to purchase marijuana. I decided to put my spin on the industry with a series of comics. These did quite well. thank you so much for this tutorial and al4hough i have android gear the basics are translatable. You are welcome. Thank you for the comment. Neat techniques, definitely looks like you're having a lot of fun! Thanks for sharing!
from celery.decorators import task import sendgrid from django.conf import settings from micro_blog.models import Subscribers, Post, Category from .send_grid import * import datetime from micro_admin.models import User from django.template import loader @task def create_contact_in_category(category_name, email_address): ''' Checks whether the category exists on not if it does then it will create a contact and save in database under sendgrid_user_id field and then add contact to list. If not create a category list then contact then adds contact to the new category list which eliminates the duplicates, also if any contact or list already exists then it return the object which avoids creating duplicates. Tested Cases: existing category new user -PASS existing category existing user -PASS new category existing user - PASS new catergory new user -PASS ''' contact_lists = get_contact_lists() if category_name in contact_lists: contact_id = create_contact(email_address) CONTACTS_ENDPOINT = "https://api.sendgrid.com/v3/contactdb/lists/" + contact_lists[category_name] + \ "/" + "recipients/" + contact_id response = requests.post(CONTACTS_ENDPOINT, headers=headers) else: contact_list_id = create_contact_list(category_name) contact_id = create_contact(email_address) CONTACTS_ENDPOINT = "https://api.sendgrid.com/v3/contactdb/" + \ "lists/{0}/recipients/{1}".format(contact_list_id, contact_id) response = requests.post(CONTACTS_ENDPOINT, headers=headers) @task def sending_mail_to_subscribers(): blog_posts = Post.objects.filter(published_on=datetime.datetime.today(), status='P') subscribers = Subscribers.objects.filter(blog_post=True) for blog_post in blog_posts: blog_url = 'https://www.micropyramid.com/blog/' + str(blog_post.slug) + '/' for subscriber in subscribers: sg = sendgrid.SendGridClient(settings.SG_USER, settings.SG_PWD) contact_msg = sendgrid.Mail() contact_msg.set_subject("New Blog Post | MicroPyramid") message_reply = 'Hello ' + str(subscriber.email) + ',\n\n' message_reply = '<p>New blog post has been created by ' + str(blog_post.author) + \ ' with the name ' + str(blog_post.title) + ' in the category ' + str(blog_post.category.name) + '.</p>' message_reply = message_reply + 'Regards<br>' message_reply = message_reply + 'The MicroPyramid Team.<br>' contact_msg.set_html(message_reply) contact_msg.set_from("[email protected]") contact_msg.add_to(subscriber.email) sg.send(contact_msg) @task def report_on_blog_post_published_limit(): import datetime date = datetime.date.today() start_week = date - \ datetime.timedelta(date.weekday()) - datetime.timedelta(1) end_week = start_week + datetime.timedelta(6) posts = Post.objects.filter(published_on__range=(start_week, end_week)) blog_posts = Post.objects.filter(created_on__range=(start_week, end_week)) from django.db.models import Sum, Count, Q, F incomplete_categories = Category.objects.filter(blog_posts__published_on__range=(start_week, end_week)).annotate(total_blog_posts=Count('blog_posts')).filter(total_blog_posts__lt=F('min_published_blogs')) categories = Category.objects.filter() incomplete_categories = [] for each in categories: blog_posts = each.blog_posts.filter(published_on__range=(start_week, end_week)) each_dict = {} if blog_posts.count() < each.min_published_blogs: each_dict['category'] = each each_dict['total_blog_posts'] = blog_posts.count() incomplete_categories.append(each_dict) complete_categories = Category.objects.filter(blog_posts__published_on__range=(start_week, end_week)).annotate(total_blog_posts=Count('blog_posts')).filter(total_blog_posts__gte=F('min_published_blogs')) users = User.objects.filter(is_admin=True) formatted_start_week = datetime.datetime.strptime( str(start_week), "%Y-%m-%d").strftime("%d-%m-%Y") formatted_end_week = datetime.datetime.strptime( str(end_week), "%Y-%m-%d").strftime("%d-%m-%Y") min_blogposts = 0 for user in users: sg = sendgrid.SendGridClient(settings.SG_USER, settings.SG_PWD) contact_msg = sendgrid.Mail() temp = loader.get_template('admin/blogposts_report.html') rendered = temp.render({'posts': posts, 'blog_posts': blog_posts, 'start_week': start_week, 'end_week': end_week, 'user': user, 'complete_categories': complete_categories, 'incomplete_categories': incomplete_categories}) contact_msg.set_html(rendered) contact_msg.set_text("Report") contact_msg.set_subject('Blog Post Report '+ formatted_start_week + ' - ' + formatted_end_week + ' - MicroPyramid') contact_msg.set_from("[email protected]") contact_msg.add_to(user.email) sg.send(contact_msg)
Webster University team members Brodie Dakin and Tyler Thorman were named 2014-15 Cleveland Golf/Srixon All-American Scholars by the Golf Coaches Association of America (GCAA). The twosome become the first Gorloks in the golf program's history to earn the distinction, and the first in the St. Louis Intercollegiate Athletic Conference since 2009 to be honored. Thorman (Macomb, IL/Macomb HS), who will be a senior in 2015-16, had a team-high 76.6 scoring average for 22.5 rounds in 2014-15 and has maintained a 3.5 cumulative grade point average (GPA) as he works toward a degree in management. Dakin (Macomb, IL/Macomb HS), graduated in May with a B.S. in business administration and had a cumulative 3.72 GPA at Webster. He had a scoring average of 78.0 for 22.5 rounds in 2014-15. Dakin and Thorman were two of only 70 golfers nationwide in the NCAA III to be named All-American Scholars. To be considered for an NCAA III Cleveland Golf/Srixon All-America Scholar status, an individual must be a junior or senior academically, compete in at least three full years at the collegiate level, participate in 50% of his team's competitive rounds, have a stroke-average under 79.0, and maintain a minimum cumulative GPA of 3.2. A recipient must also be of high moral character and be in good standing at his college or university. "Though Tyler and Brodie were the team's scoring leaders this past year, they were also great academic examples of the golf team in 2014-15 in general, as the whole squad maintained a stellar 3.65 cumulative GPA", noted head coach Andrew Belsky. "We will miss Brodie next year but look forward to Tyler and our other returnees, plus our new members of the team, keeping up that academic standard."
########################################################################## # This file is part of WTFramework. # # WTFramework is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # WTFramework is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with WTFramework. If not, see <http://www.gnu.org/licenses/>. ########################################################################## import re """ This module contains functions for working with files. """ import os import tempfile from wtframework.wtf.utils.data_utils import generate_timestamped_string import urllib import codecs def temp_path(file_name=None): """ Gets a temp path. Kwargs: file_name (str) : if file name is specified, it gets appended to the temp dir. Usage:: temp_file_path = temp_path("myfile") copyfile("myfile", temp_file_path) # copies 'myfile' to '/tmp/myfile' """ if file_name is None: file_name = generate_timestamped_string("wtf_temp_file") return os.path.join(tempfile.gettempdir(), file_name) def create_temp_file(file_name=None, string_or_another_file=""): """ Creates a temp file using a given name. Temp files are placed in the Project/temp/ directory. Any temp files being created with an existing temp file, will be overridden. This is useful for testing uploads, where you would want to create a temporary file with a desired name, upload it, then delete the file when you're done. Kwargs: file_name (str): Name of file string_or_another_file: Contents to set this file to. If this is set to a file, it will copy that file. If this is set to a string, then it will write this string to the temp file. Return: str - Returns the file path to the generated temp file. Usage:: temp_file_path = create_temp_file("mytestfile", "The nimble fox jumps over the lazy dog.") file_obj = open(temp_file_path) os.remove(temp_file_path) """ temp_file_path = temp_path(file_name) if isinstance(string_or_another_file, file): # attempt to read it as a file. temp_file = open(temp_file_path, "wb") temp_file.write(string_or_another_file.read()) else: # handle as a string type if we can't handle as a file. temp_file = codecs.open(temp_file_path, "w+", "utf-8") temp_file.write(string_or_another_file) temp_file.close() return temp_file_path def download_to_tempfile(url, file_name=None, extension=None): """ Downloads a URL contents to a tempfile. This is useful for testing downloads. It will download the contents of a URL to a tempfile, which you then can open and use to validate the downloaded contents. Args: url (str) : URL of the contents to download. Kwargs: file_name (str): Name of file. extension (str): Extension to use. Return: str - Returns path to the temp file. """ if not file_name: file_name = generate_timestamped_string("wtf_temp_file") if extension: file_path = temp_path(file_name + extension) else: ext = "" try: ext = re.search(u"\\.\\w+$", file_name).group(0) except: pass file_path = temp_path(file_name + ext) webFile = urllib.urlopen(url) localFile = open(file_path, 'w') localFile.write(webFile.read()) webFile.close() localFile.close() return file_path
Volname returns the volume name for a device formatted with an ISO-9660 file system, typically a CD-ROM. The device file name can be specified on the command line. If omitted, it defaults to /dev/cdrom. file COPYING and notes in the source code for details.
""" - `File`: sanitization.py - `Description`: xss challenge 1: How importatn to do sanitization """ import webapp2 from google.appengine.ext.webapp import template import re import os import constants import random import logging import urlparse import urllib class ResultVerifyHandler(webapp2.RequestHandler): def get(self): url = urllib.unquote(self.request.url).decode('utf8') url = url.replace(" ", "") parsed = urlparse.urlparse(url) logging.info('ResultVerifyHandler %s ' % url) user = urlparse.parse_qs(parsed.query)['user'] logging.info('user=%s' % user[0]) ctx = SanitizationHandler.getBreadcrumbContext() ctx['isSucceeded'] = True if(user[0] != 'mary'): ctx['isSucceeded'] = False ctx['userError'] = True self.response.write( template.render( os.path.join( constants.TPL_DIR, 'sanitization.tpl' ), ctx ) ) else: logging.info('ResultVerifyHandler') self.response.write( template.render( os.path.join( constants.TPL_DIR, 'sanitization.tpl' ), ctx ) ) class ReviewFormHandler(webapp2.RequestHandler): """Handler for /xss/sanitization/reviewForm""" def deleteSID(self): # Also delete cookie set by browser's console self.response.delete_cookie( 'sid', path='/xss/sanitization' ) self.response.delete_cookie( 'sid', path='/xss/sanitization/reviewForm' ) def get(self): sid = self.request.cookies.get('sid') if isinstance(sid, basestring) and re.match('^john:[0-9]{1,2}', sid): ctx = SanitizationHandler.getBreadcrumbContext() ctx['sid'] = sid ctx['owner'] = 'john' self.response.write( template.render( os.path.join( constants.TPL_DIR, 'sanitization.tpl' ), ctx ) ) else: # Back to /xss/sanitization if there is no valid cookie # present self.deleteSID() self.redirect('/xss/sanitization') def post(self): sid = self.request.cookies.get('sid') action = self.request.POST['action'] logging.info('sid=%s' % sid) if action == 'logout': # Delete cookie and back to /transaction/sessionHijack self.deleteSID() self.redirect('/xss/sanitization') else: review = self.request.get('comment') ctx = SanitizationHandler.getBreadcrumbContext() ctx['sid'] = sid ctx['review'] = review; logging.info('review=%s' % review) # Transfer on behalf of john user = self.request.get('user') ctx['owner'] = self.request.get('user') self.response.set_cookie( 'user', user, max_age=60, path='/xss/sanitization/reviewForm' ) self.response.headers['X-XSS-Protection'] = '0' self.response.write( template.render( os.path.join( constants.TPL_DIR, 'sanitization.tpl' ), ctx ) ) class SanitizationHandler(webapp2.RequestHandler): """Handler for /xss/sanitization""" @classmethod def getBreadcrumbContext(cls): return { 'breadcrumb': [{ 'name': 'Home', 'href': '/', 'active': False, }, { 'name': 'XSS', 'href': '/xss', 'active': False, }, { 'name': 'Sanitizing Input', 'href': '/xss/sanitization', 'active': True, }], } def get(self): ctx = SanitizationHandler.getBreadcrumbContext() self.response.write( template.render( os.path.join(constants.TPL_DIR, 'sanitization.tpl'), ctx ) ) def post(self): # Get username and password name = self.request.get('name') pw = self.request.get('pw') if (name == 'john' and pw == 'john'): redirectPath = '/xss/sanitization/reviewForm' # Redirect to /xss/sanitization/reviewForm sid = 'john:%d' % random.randint(1, 10) logging.info('sid=%s' % sid) self.response.status = 302 self.response.set_cookie( 'sid', sid, max_age=60, path=redirectPath ) self.redirect(redirectPath) else: passwordIncorrect = True ctx = SanitizationHandler.getBreadcrumbContext() ctx['passwordIncorrect'] = passwordIncorrect self.response.write( template.render( os.path.join(constants.TPL_DIR, 'sanitization.tpl'), ctx ) )
Shopping the bulk section is one of my favorite ways to grocery shop, especially in the spice section. The prices are on point and you can get just the right amount of the ingredient you need. One secret of bulk shopping is bringing your own containers. It helps you stay organized and you don't have to unload your groceries from the flimsy provided bags, not to mention it cuts down on waste. I do the majority of my shopping at Sprouts, so this post outlines the best way to bulk shop there, but make sure to check with your grocery store on their policies. You don't have to go out and buy a ton of mason jars or containers for bulk shopping. Save old spice jars, jam jars, pickle jars, or to go soup containers and have them on hand for bulk shopping and storing at home. Just make sure to give them a good cleaning before you use them. Prior to shopping run through the check out line with your containers to have them weighed, taking note of each weight. Weigh each container with the lid on your own your scale before at home write the weight on the lid or side of the jar. Weighing them at home is by far the more efficient option. It saves you a ton of time. I stick a piece of tape on the lid of my containers so I can easily label them at the store (masking tape works great). That way when you've used up your item, you can just replace the tape for the next grocery shopping session. My favorite food storage items are deli containers. Yes, they are plastic, but the pros outweigh the cons in my book. 4 They can be used for storing and giving away left overs without. You can get them on Amazon, save them the next time you get one at take out, or find a friend with a restaurant depo account so you can get them for super cheap in bulk. 5 tips to get you started.
import datetime import os import requests import shutil import subprocess import time """ NOTE: This gets used in initial setup of console by the setup program ** Don't add any dependencies on other parts of the console (E.g., no logging """ def StageVersion(vdir, tag, label): logf = open('stagelog.log', 'w') print(datetime.datetime.now().strftime("%I:%M%p on %B %d, %Y"), file=logf) print("Staging " + tag + " in " + vdir + ' because ' + label, file=logf) cwd = os.getcwd() try: os.chdir(vdir) except Exception as E: print("Staging directory {} doesn't exist - try to create it ({})".format(vdir, E)) os.mkdir(vdir) os.chdir(vdir) shutil.rmtree('stagedversion', True) os.mkdir('stagedversion') os.chdir('stagedversion') if tag == '*live*': subprocess.call('wget https://github.com/kevinkahn/softconsole/tarball/master', shell=True, stdout=logf, stderr=logf) subprocess.call('tar -zxls --strip-components=1 < master', shell=True, stdout=logf, stderr=logf) subprocess.call('chown -R pi: *', shell=True, stdout=logf, stderr=logf) os.remove('master') else: subprocess.call('wget https://github.com/kevinkahn/softconsole/archive/' + tag + '.tar.gz', shell=True, stdout=logf, stderr=logf) subprocess.call('tar -zxls --strip-components=1 < ' + tag + '.tar.gz', shell=True, stdout=logf, stderr=logf) sha, cdate = GetSHA(tag) with open('versioninfo', 'w') as f: f.writelines(['{0}\n'.format(tag), '{0}\n'.format(sha), label + ': ' + time.strftime('%m-%d-%y %H:%M:%S\n'), 'Commit of: {0}\n'.format(cdate)]) os.remove(tag + '.tar.gz') # noinspection PyBroadException try: os.chmod('runconsole.py', 0o555) except: pass # noinspection PyBroadException try: os.chmod('console.py', 0o555) except: pass os.chdir(cwd) logf.close() # noinspection PyBroadException def InstallStagedVersion(d): logf = open('stagelog.log', 'a') print("Installing", file=logf) shutil.rmtree(d + '/previousversion', True) # don't keep multiple previous version in tree os.rename(d, d + '.TMP') # move active directory to temp os.rename(d + '.TMP/stagedversion', d) # move new version into place os.rename(d + '.TMP', d + '/previousversion') # save previous version os.chdir(d) if os.path.exists('../homesystem'): # noinspection PyBroadException try: subprocess.call('cp -u -r -p "example configs"/* ../Console', shell=True, stdout=logf, stderr=logf) except: print('Copy of example configs failed on homesystem', file=logf) if not os.path.exists('../Console/termshortenlist'): try: os.rename('example configs/termshortenlist', '../Console/termshortenlist') print("Initialized termshortenlist", file=logf) except: print("Couldn't move termshortenlist in " + str(os.getcwd()), file=logf) print('Process upgrade extras script', file=logf) subprocess.call('sudo bash ' + './scripts/upgradeprep.sh', shell=True, stdout=logf, stderr=logf) print('End upgrade extras script', file=logf) logf.close() os.chdir('..') def GetSHA(tag): r = requests.get('https://api.github.com/repos/kevinkahn/softconsole/tags') d = r.json() sha = 'not found' url = 'none' for i in d: if i['name'] == tag: sha = i['commit']['sha'] url = i['commit']['url'] break if sha == 'not found': return 'no current sha', 'no release info' r = requests.get(url) d = r.json() c = d['commit']['committer']['date'] return sha, c
An Development Application (DA-2018/158) has been lodged to redevelop 475 – 501 Victoria Avenue. The property is located between the Pacific Highway and the railway, diagonally opposite Chatswood RSL. The proposal is to demolish the buildings on the site and erect two new buildings. One a four storey commercial building, the other a fifteen storey hotel. Whilst Chatswood has a variety of accommodation styles, only one traditional style overnight hotel is listed on the accommodation search engines. The current cross block pedestrian link is to be retained. An interesting element of the development is a ‘trafficable roof area’ on the top of the four storey building. It is unclear what the actual use of this area would be. Cost of work is give as in excess of $75 million. The application can be viewed online at willoughby.nsw.gov.au. Closing date for comments is 2 July 2018. ← Coming to a park near you?
from gi import require_version require_version("Gtk", "3.0") from gi.repository import Gtk, GdkPixbuf, GLib, Gdk import helpers from stream_select import FileChooserWindow, NetworkStream class PlaylistManager(Gtk.Window): def __init__(self, playlist, enable_web, transcoder, probe, preferred_transcoder, counter): self.win = Gtk.Window(type=Gtk.WindowType.TOPLEVEL) theme = Gtk.IconTheme.get_default() self.playimage = theme.load_icon("media-playback-start", 16,0) self.store = Gtk.ListStore(GdkPixbuf.Pixbuf, str, str, int, int, str, str, str, str) self.selection_index = None self.create_model(playlist) if counter: self.store[counter][0] = self.playimage self.playlist_counter = None self.play_now = False self.playlist_changed = False self.double_clicked = False self.drag_index = None self.transcoder = transcoder self.number_clicked = 0 self.double_clicked_index = None self.probe = probe self.preferred_transcoder = preferred_transcoder self.enable_web = enable_web self.show_image = True self.sorted_index = None def exit(self, *args): self.win.close() def check_uris(self, play_uri): uri_win = [] item = self.store.get_iter_first() while (item != None): uri_win.append(self.store.get_value(item, 1)) item = self.store.iter_next(item) player_uri = [pl[0] for pl in play_uri] if uri_win != player_uri: self.create_model(play_uri) def main(self): self.win.set_title("Manage playlist") vboxall = Gtk.Box(orientation=Gtk.Orientation.VERTICAL) vboxmanager = Gtk.Box(orientation=Gtk.Orientation.VERTICAL) hboxbuttons = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL) filebutton = Gtk.Button('_Open', use_underline=True) filebutton.connect('clicked', self._on_file_clicked) self.netbutton = Gtk.Button('_Open network stream', use_underline=True) self.netbutton.connect('clicked', self._on_net_stream_clicked) deletebutton = Gtk.Button() deleteButtonImage = Gtk.Image() deleteButtonImage.set_from_stock(Gtk.STOCK_REMOVE, Gtk.IconSize.BUTTON) deletebutton.add(deleteButtonImage) topbutton = Gtk.Button() topButtonImage = Gtk.Image() topButtonImage.set_from_stock(Gtk.STOCK_GOTO_TOP, Gtk.IconSize.BUTTON) topbutton.add(topButtonImage) upbutton = Gtk.Button() upButtonImage = Gtk.Image() upButtonImage.set_from_stock(Gtk.STOCK_GO_UP, Gtk.IconSize.BUTTON) upbutton.add(upButtonImage) bottombutton = Gtk.Button() bottomButtonImage = Gtk.Image() bottomButtonImage.set_from_stock(Gtk.STOCK_GOTO_BOTTOM, Gtk.IconSize.BUTTON) bottombutton.add(bottomButtonImage) downbutton = Gtk.Button() downButtonImage = Gtk.Image() downButtonImage.set_from_stock(Gtk.STOCK_GO_DOWN, Gtk.IconSize.BUTTON) downbutton.add(downButtonImage) okbutton = Gtk.Button('_Close', use_underline=True) okbutton.connect("clicked", self.exit) mainmenu = Gtk.Menu() filem = Gtk.MenuItem("Open") self.streamm = Gtk.MenuItem("Open network stream") if not self.enable_web: self.streamm.set_sensitive(False) exit = Gtk.MenuItem("Close") root_menu = Gtk.MenuItem('File') root_menu.set_submenu(mainmenu) menu_bar = Gtk.MenuBar() mainmenu.append(filem) mainmenu.append(self.streamm) mainmenu.append(exit) menu_bar.append(root_menu) sw = Gtk.ScrolledWindow() sw.set_shadow_type(Gtk.ShadowType.IN) sw.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC) self.treeView = Gtk.TreeView(self.store) self.treeView.set_grid_lines(Gtk.TreeViewGridLines.BOTH) self.create_columns(self.treeView) targets = [] self.treeView.enable_model_drag_source(Gdk.ModifierType.BUTTON1_MASK, self.treeView, Gdk.DragAction.MOVE) self.treeView.drag_dest_set(Gtk.DestDefaults.ALL, targets, Gdk.DragAction.MOVE) self.treeView.connect("drag-data-received", self._on_drag_data_received) self.treeView.connect("drag-drop", self._drag_dropped) self.treeView.connect("drag-end", self._drag_finished) self.drag_finished = False sw.add(self.treeView) self.treeView.set_reorderable(True) okbutton.set_margin_right(10) filebutton.set_margin_left(10) deletebutton.set_margin_left(200) hboxbuttons.pack_start(filebutton, False, False, 0) hboxbuttons.pack_start(self.netbutton, False, False, 10) hboxbuttons.pack_start(deletebutton, False, False, 0) hboxbuttons.pack_start(bottombutton, False, False, 10) hboxbuttons.pack_start(downbutton, False, False, 0) hboxbuttons.pack_start(upbutton, False, False, 10) hboxbuttons.pack_start(topbutton, False, False, 0) hboxbuttons.pack_end(okbutton, False, False, 0) vboxmanager.pack_start(sw, True, True, 0) vboxall.pack_start(vboxmanager, True, True, 0) vboxall.pack_end(hboxbuttons, False, False, 10) vboxall.pack_start(menu_bar, False, False, 0) deletebutton.connect("clicked", self._on_delete_clicked) upbutton.connect("clicked", self._on_up_clicked) downbutton.connect("clicked", self._on_down_clicked) topbutton.connect("clicked", self._on_top_clicked) bottombutton.connect("clicked", self._on_bottom_clicked) filem.connect('activate', self._on_file_clicked) self.streamm.connect('activate', self._on_net_stream_clicked) self.treeView.connect("row-activated", self._double_clicked) exit.connect("activate", self.exit) self.win.set_size_request(1200, 700) self.win.add(vboxall) self.win.show_all() def _on_drag_data_received(self, *args): if not self.drag_finished: return else: self.drag_finished = False index_source = self.get_selected_index() self.index_source = index_source self.source_uri = self.store[index_source][1] def _drag_finished(self, *args): if self.index_source is None: return for i, row in enumerate(self.store): if row[1] == self.source_uri: index_drop = i break index_source = self.index_source self.index_source = None self.index_drop = None if self.playlist_counter is not None: if self.playlist_counter == index_source: self.store[index_source][0] = None self.store[index_drop][0] = self.playimage self.sorted_index = index_drop elif index_source < self.playlist_counter and index_drop >= self.playlist_counter: self.store[self.playlist_counter][0] = None self.store[self.playlist_counter-1][0] = self.playimage self.sorted_index = self.playlist_counter -1 elif index_source > self.playlist_counter and index_drop <= self.playlist_counter: self.store[self.playlist_counter][0] = None self.store[self.playlist_counter+1][0] = self.playimage self.sorted_index = self.playlist_counter + 1 popped = self.play_uri.pop(index_source) self.play_uri.insert(index_drop, popped) self.selection_index = index_drop self.playlist_changed = True self.treeView.set_cursor(index_drop) def _drag_dropped(self, *args): self.remove_sort_indicator() self.drag_finished = True self.index_source = None self.index_drop = None def _on_delete_clicked(self, *args): if len(self.store) == 1: self.play_uri = [] self.delete_at_index(0) self.playlist_changed = True return index = self.get_selected_index() if self.playlist_counter is not None: plc = self.playlist_counter + self.number_clicked if plc == index and self.show_image: self.number_clicked += -1 elif index < plc: self.number_clicked += -1 self.delete_at_index(index) if plc == index and self.show_image: self.show_image = False self.selection_index = index - 1 popped = self.play_uri.pop(index) self.playlist_changed = True self.remove_sort_indicator() def _on_up_clicked(self, *args): index = self.get_selected_index() if self.playlist_counter is not None: plc = self.playlist_counter + self.number_clicked else: plc = None if not index == 0: if self.playlist_counter is not None: if plc == index: self.number_clicked += -1 elif plc == index - 1: self.number_clicked += 1 self.move_item_up() if plc == index: self.store[index][0] = None self.store[index-1][0] = self.playimage elif plc == index - 1: self.store[index-1][0] = None self.store[index][0] = self.playimage self.selection_index = index - 1 popped = self.play_uri.pop(index) self.play_uri.insert(index-1, popped) self.playlist_changed = True self.remove_sort_indicator() def _on_down_clicked(self, *args): index = self.get_selected_index() if self.playlist_counter is not None: plc = self.playlist_counter + self.number_clicked else: plc = None if not index == len(self.store)-1: if self.playlist_counter is not None: if plc == index: self.number_clicked += 1 elif plc == index + 1: self.number_clicked += -1 self.move_item_down() if plc == index: self.store[index][0] = None self.store[index+1][0] = self.playimage elif plc == index + 1: self.store[index+1][0] = None self.store[index][0] = self.playimage self.selection_index = index + 1 popped = self.play_uri.pop(index) self.play_uri.insert(index+1, popped) self.playlist_changed = True self.remove_sort_indicator() def _on_top_clicked(self, *args): index = self.get_selected_index() if self.playlist_counter is not None: plc = self.playlist_counter + self.number_clicked else: plc = None if not index == 0: if self.playlist_counter is not None: if plc == index: self.number_clicked += -plc elif index > plc: self.number_clicked += 1 self.move_item_top() if plc == index: self.store[plc][0] = None self.store[0][0] = self.playimage elif plc and index > plc: self.store[plc][0] = None self.store[plc+1][0] = self.playimage self.selection_index = 0 popped = self.play_uri.pop(index) self.play_uri.insert(0, popped) self.playlist_changed = True self.remove_sort_indicator() def _on_bottom_clicked(self, *args): index = self.get_selected_index() if self.playlist_counter is not None: plc = self.playlist_counter + self.number_clicked else: plc = None if not index == len(self.store)-1: if self.playlist_counter is not None: if plc == index: self.number_clicked += len(self.store) - plc - 1 elif index < plc: self.number_clicked += -1 self.move_item_bottom() if plc == index: self.store[plc][0] = None self.store[-1][0] = self.playimage elif plc and index < plc: self.store[plc][0] = None self.store[plc-1][0] = self.playimage self.selection_index = len(self.store)-1 popped = self.play_uri.pop(index) self.play_uri.append(popped) self.playlist_changed = True self.remove_sort_indicator() def _double_clicked(self, *args): index = args[1].get_indices()[0] self.double_clicked_index = index self.double_clicked = True self.show_image = True def _on_file_clicked(self, *args): win = FileChooserWindow() ret = win.main() playlist = self.play_uri.copy() if ret: if ret[1] == 1: self.play_now = True self.play_uri = [] for i,u in enumerate(ret[0]): self.play_uri.append(helpers.decode_local_uri(u, self.transcoder, self.probe, self.preferred_transcoder)) else: for i, u in enumerate(ret[0]): self.play_uri.append(helpers.decode_local_uri(u, self.transcoder, self.probe, self.preferred_transcoder)) self.playlist_changed = True self.remove_sort_indicator() def _on_net_stream_clicked(self, *args): win = NetworkStream() ret = win.main() playlist = self.play_uri.copy() if ret: if ret[1] == 1: self.play_now = True self.play_uri = [] n = helpers.decode_network_uri(ret[0]) if n: self.play_uri.append(n) else: n = helpers.decode_network_uri(ret[0]) if n: self.play_uri.append(n) self.playlist_changed = True self.remove_sort_indicator() def _on_column_clicked(self, *args): column = args[0] column_index = args[1] index = self.playlist_counter order = column.get_sort_order() self.sort_rows(column, column_index, order) uri_win = [] item = self.store.get_iter_first() while (item != None): uri_win.append(self.store.get_value(item, 1)) item = self.store.iter_next(item) player_uri = [pl[0] for pl in self.play_uri] indices = [] for uri in player_uri: indices.append(uri_win.index(uri)) l = [x for (y,x) in sorted(zip(indices,self.play_uri))] if index is not None: self.store[index][0] = None new_index = indices[index] self.store[new_index][0] = self.playimage self.sorted_index = new_index self.play_uri = l self.playlist_changed = True column.set_sort_indicator(True) def sort_rows(self, column, index, sortorder): """ Sort the rows based on the given column """ self.remove_sort_indicator() rows = [tuple(r) + (i,) for i, r in enumerate(self.store)] if sortorder == Gtk.SortType.ASCENDING: sortorder = Gtk.SortType.DESCENDING reverse = False else: sortorder = Gtk.SortType.ASCENDING reverse = True rows.sort(key=lambda x: x[index], reverse=reverse) self.store.reorder([r[-1] for r in rows]) column.set_sort_order(sortorder) def remove_sort_indicator(self): for k in self.sort_columns: k[0].set_sort_indicator(False) def create_model(self, playlist): self.store.clear() self.play_uri = playlist[:] if playlist: for k in playlist: self.store.append([None] + self.add_to_playlist(k)) if self.selection_index: self.treeView.set_cursor(self.selection_index) self.selection_index = None def create_columns(self, treeView): rendererPixbuf = Gtk.CellRendererPixbuf() pixcolumn = Gtk.TreeViewColumn(None, rendererPixbuf, pixbuf=0) pixcolumn.set_fixed_width(20) pixcolumn.set_resizable(False) treeView.append_column(pixcolumn) self.sort_columns = [[pixcolumn, 0]] rendererText = Gtk.CellRendererText() column = Gtk.TreeViewColumn("URI", rendererText, text=1) column.set_fixed_width(180) column.set_resizable(True) column.set_clickable(True) column.connect("clicked", self._on_column_clicked, 1) treeView.append_column(column) self.sort_columns.append([column, 0]) rendererText = Gtk.CellRendererText() column = Gtk.TreeViewColumn("Title", rendererText, text=2) column.set_fixed_width(180) column.set_resizable(True) column.set_clickable(True) column.connect("clicked", self._on_column_clicked, 2) treeView.append_column(column) self.sort_columns.append([column, 0]) rendererText = Gtk.CellRendererText() column = Gtk.TreeViewColumn("Nr", rendererText, text=3) column.set_fixed_width(40) column.set_resizable(True) column.set_clickable(True) column.connect("clicked", self._on_column_clicked, 3) treeView.append_column(column) self.sort_columns.append([column, 0]) rendererText = Gtk.CellRendererText() column = Gtk.TreeViewColumn("CD", rendererText, text=4) column.set_fixed_width(40) column.set_resizable(True) column.set_clickable(True) column.connect("clicked", self._on_column_clicked, 4) treeView.append_column(column) self.sort_columns.append([column, 0]) rendererText = Gtk.CellRendererText() column = Gtk.TreeViewColumn("Album", rendererText, text=5) column.set_fixed_width(180) column.set_resizable(True) column.set_clickable(True) column.connect("clicked", self._on_column_clicked, 5) treeView.append_column(column) self.sort_columns.append([column, 0]) rendererText = Gtk.CellRendererText() column = Gtk.TreeViewColumn("Artist", rendererText, text=6) column.set_fixed_width(180) column.set_resizable(True) column.set_clickable(True) column.connect("clicked", self._on_column_clicked, 6) treeView.append_column(column) self.sort_columns.append([column, 0]) rendererText = Gtk.CellRendererText() column = Gtk.TreeViewColumn("AlbumArtist", rendererText, text=7) column.set_fixed_width(180) column.set_resizable(True) column.set_clickable(True) column.connect("clicked", self._on_column_clicked, 7) treeView.append_column(column) self.sort_columns.append([column, 0]) rendererText = Gtk.CellRendererText() column = Gtk.TreeViewColumn("Composer", rendererText, text=8) column.set_fixed_width(180) column.set_resizable(True) column.set_clickable(True) column.connect("clicked", self._on_column_clicked, 8) treeView.append_column(column) self.sort_columns.append([column, 0]) def get_selected_index(self): sel = self.treeView.get_selection() model, i = sel.get_selected() res = model[i].path.get_indices() return res[0] def delete_at_index(self, index): for row in self.store: if row.path.get_indices()[0] == index: self.store.remove(row.iter) break def move_item_down(self): selection = self.treeView.get_selection() selections, model = selection.get_selected_rows() for row in selections: if selection.iter_is_selected(row.iter) and row.next: self.store.swap(row.iter, row.next.iter) break def move_item_up(self): selection = self.treeView.get_selection() selections, model = selection.get_selected_rows() for row in selections: if selection.iter_is_selected(row.iter) and row.previous: self.store.swap(row.iter, row.previous.iter) break def move_item_top(self): selection = self.treeView.get_selection() selections, model = selection.get_selected_rows() for row in selections: if selection.iter_is_selected(row.iter): self.store.move_after(row.iter) def move_item_bottom(self): selection = self.treeView.get_selection() selections, model = selection.get_selected_rows() for row in selections: if selection.iter_is_selected(row.iter): self.store.move_before(row.iter) def add_to_playlist(self, data): uri = data[0] metadata = data[4] title = None album = None artist = None albumartist = None composer = None track = None cdnumber = None if metadata: if 'title' in metadata.keys(): title = metadata['title'] if 'artist' in metadata.keys(): artist = metadata['artist'] if 'albumArtist' in metadata.keys(): albumartist = metadata['albumArtist'] if 'composer' in metadata.keys(): composer = metadata['composer'] if 'albumName' in metadata.keys(): album = metadata['albumName'] if 'trackNumber' in metadata.keys(): track = metadata['trackNumber'] if 'cdNumber' in metadata.keys(): cdnumber = metadata['cdNumber'] return [uri, title, track, cdnumber, album, artist, albumartist, composer]
The tour embraces all three Baltic countries, Estonia, Latvia and Lithuania, each of which has its own distinctive cultural tradition and historical heritage. The three capitals are renowned for their charming old town areas, full of architectural gems. The captivating countryside is worth discovering for its quaint towns and unique landscapes. Special attention has been paid to the choice of hotels; they are located in or close to the old parts of the cities. Private transfers and walking tours with local guides are included. NOTE: This is an independent tour but includes private transfers and local English guides for city tours, as outlined below. Arrive in Tallinn Airport or harbor. Meet and greet by your local driver, transfer by private car to your hotel. Overnight at a four- or five-star hotel in the Old City of Tallinn. After breakfast you will explore the city’s most important sights on a 3-hour walking tour of the old parts of the city. You have time for a stroll to enjoy the wealth of its medieval architecture. From Toompea Castle Hill, you have a magnificent view over the city and the Baltic Sea. Overnight in Tallinn. After breakfast, you travel for 5 hours by coach along the famous Via Baltica from Tallinn to Riga—the capital of Latvia. In August 1989 the Estonians, Latvians and Lithuanians formed a human chain through all the three countries, and Via Baltica came to symbolize the road to freedom. Arrive in Riga in the afternoon/evening. Overnight at a four- or five-star hotel in the city center. After breakfast this morning, a three-hour walking tour introduces you to Riga’s historic sights, which display a wide variety of architectural styles. Overnight in Riga. After breakfast this morning, you travel 5 hours by coach to Vilnius, where you’ll stay for 2 nights in a four- or five-star hotel located in the city center. In the morning after breakfast you’ll get a close view of the monumental sights of Vilnius, all bearing witness to the rich history of the city. They include the Cathedral, the University, which dates back to the 1570’s, and the white church of Saints Peter and Paul. Overnight in Vilnius. After breakfast, transfer by private car to Vilnius airport for your flight home. NOTE: Price does not include extra activities and sights. An optional ferry can be added from/to Helsinki & Stockholm, plus St. Petersburg tour extensions. VISA NOTE: Nationals of some countries require a visa for entry to one or all three of the Baltic States. Transfers between the Baltic States are not guided but performed by a driver only, therefore it is of extreme importance that you check what the current visa requirements are at the time of booking. Child discount available! Children under 12 years sharing room with 2 adults in extra bed: on request.
# -*- coding: utf-8 -*- import bf_logging class BetfairError(Exception): def __init__(self, message): bf_logging.main_logger.exception(message) # pass pass class BetfairLoginError(BetfairError): def __init__(self, response, data): self.response = response self.message = data.get('loginStatus', 'UNKNOWN') super(BetfairLoginError, self).__init__(self.message) class BetfairAuthError(BetfairError): def __init__(self, response, data): self.response = response self.message = data.get('error', 'UNKNOWN') super(BetfairAuthError, self).__init__(self.message) class BetfairAPIError(BetfairError): def __init__(self, response, data): self.response = response try: error_data = data['error']['data']['APINGException'] self.message = error_data.get('errorCode', 'UNKNOWN') self.details = error_data.get('errorDetails') except KeyError: self.message = 'UNKNOWN' self.details = None super(BetfairAPIError, self).__init__(self.message)
| Overall, catches of flounder and red drum are good in local estuaries. On the south end, Capt. Mike McDonald of Gul-R-Boy Guide Service in Georgetown had a solid trip on Winyah Bay in the rain on Monday, catching seven red drum including a five-pounder plus a black drum while fishing cut shrimp on floats in the grass. Capt. Jason Burton of Fly Girl Charters has had good success this week with flounder and reds in Murrells Inlet. Even on Wednesday after the cold front passed the previous evening, Burton produced 18 flounder with six keepers plus four reds on a falling tide using mud minnows. In the Little River area, Capt. Mark Dickson of Shallow-Minded Inshore Charters has caught flounder in Cherry Grove Inlet and reds with a few trout in the Sunset Beach area using mud minnows. Dickson had a great trip Monday, catching 18 reds, and caught 10 flounder with two keepers in less than two hours of fishing on Saturday in Cherry Grove. | Bluefish, Spanish mackerel, whiting, black sea bass, sheepshead, flounder, black drum, croaker. | On Sunday, boats trolling near Paradise Reef out of Murrells Inlet encountered some very good news for the spring fishing season when they caught Spanish mackerel, including some keepers over the 12-inch minimum size limit. Bluefish are also on the scene in the inshore waters and the artificial reefs are producing black sea bass, weakfish and sheepshead. A few Spanish have also been caught off Grand Strand piers over the past week, but whiting and blues are the top catch. Size of the whiting is improving, with a few over one pound being caught off the Apache Pier. Also look for flounder, black drum and croaker off the piers. The surface ocean water temperature was 63.59 degrees Thursday at 4 p.m. at 2nd Ave. Pier in Myrtle Beach. | Wahoo, blackfin tuna, dolphin, black sea bass, vermilion snapper, porgy, amberjack, triggerfish. | Offshore trolling action kicked up a notch – with a few dolphin showing up in the catches – last weekend before the latest cold front rolled through on Tuesday. Capt. Derek Tressinger of Ocean Isle Fishing Center reports Get Floor’d out of Ocean Isle Beach found a weedline offshore of the Blackjack Hole and caught a 50-pound wahoo to go with a 25-pound class dolphin. Get Floor’d moved back inshore to a depth of 160-220 feet and landed a blackfin tuna to complete the Carolina Slam. Water temperatures were in the lower 70s near the break. “Once the water temperature hits 74 (the dolphin) will really start showing up,” said Tressinger. Bottom fishing is producing black sea bass, vermilion snapper, triggerfish, porgy and amberjack. In less than two weeks, on May 1, the annual shallow-water grouper spawning season closure will be over. Red snapper must be released until further notice in the South Atlantic region. | Bream, crappie, catfish, bass. | Reporting from the water, Jamie Dunn of Fisherman’s Headquarters in Conway was in the middle of a great trip for catfish on Thursday afternoon. Dunn and his boys had caught 15 catfish and counting in three hours on the Great Pee Dee River, using marinated cut eel for bait. Dunn reports good catches of bream before the cold front in depths of 2-3 feet of water with fish hitting both crickets and worms. Top areas for bream and shellcracker are the Ricefields area and the Waccamaw between Bucksport and Conway. Bass action is good, with many fish bedding, or close to being on the beds.
# -*- coding: utf-8 -*- # Generated by Django 1.10.5 on 2017-02-19 21:15 from __future__ import unicode_literals from django.db import migrations, models import media.models import videokit.models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='MediaItem', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('video', videokit.models.VideoField(duration_field='video_duration', height_field='video_height', mimetype_field='video_mimetype', rotation_field='video_rotation', thumbnail_field='video_thumbnail', upload_to=media.models.upload_to, width_field='video_width')), ('video_width', models.IntegerField(blank=True, null=True)), ('video_height', models.IntegerField(blank=True, null=True)), ('video_rotation', models.FloatField(blank=True, null=True)), ('video_mimetype', models.CharField(blank=True, max_length=32, null=True)), ('video_duration', models.IntegerField(blank=True, null=True)), ('video_thumbnail', models.ImageField(blank=True, null=True, upload_to=b'')), ('video_mp4', videokit.models.VideoSpecField(blank=True, null=True, upload_to=b'')), ('video_ogg', videokit.models.VideoSpecField(blank=True, null=True, upload_to=b'')), ], ), ]
​​​The CFA Institute Research Challenge is a worldwide competition through which CFA Institute sets up the proper environment for undergraduates and graduates enrolled in a university program to compete on delivering equity research coverage of a publicly traded subject company. The winning team at local level goes to compete at regional level, and the winners at this level meet at global level. More about the CFA Institute Research Challenge. CFA Society Romania started organizing the Research Challenge in 2012 with the attendance of Romanian and Republic of Moldova Universities​.
from __future__ import absolute_import, unicode_literals import os import sys from abc import ABCMeta, abstractmethod from six import add_metaclass from virtualenv.util.six import ensure_text from .activator import Activator if sys.version_info >= (3, 7): from importlib.resources import read_binary else: from importlib_resources import read_binary @add_metaclass(ABCMeta) class ViaTemplateActivator(Activator): @abstractmethod def templates(self): raise NotImplementedError def generate(self, creator): dest_folder = creator.bin_dir replacements = self.replacements(creator, dest_folder) generated = self._generate(replacements, self.templates(), dest_folder, creator) if self.flag_prompt is not None: creator.pyenv_cfg["prompt"] = self.flag_prompt return generated def replacements(self, creator, dest_folder): return { "__VIRTUAL_PROMPT__": "" if self.flag_prompt is None else self.flag_prompt, "__VIRTUAL_ENV__": ensure_text(str(creator.dest)), "__VIRTUAL_NAME__": creator.env_name, "__BIN_NAME__": ensure_text(str(creator.bin_dir.relative_to(creator.dest))), "__PATH_SEP__": ensure_text(os.pathsep), } def _generate(self, replacements, templates, to_folder, creator): generated = [] for template in templates: text = self.instantiate_template(replacements, template, creator) dest = to_folder / self.as_name(template) # use write_bytes to avoid platform specific line normalization (\n -> \r\n) dest.write_bytes(text.encode("utf-8")) generated.append(dest) return generated def as_name(self, template): return template.name def instantiate_template(self, replacements, template, creator): # read content as binary to avoid platform specific line normalization (\n -> \r\n) binary = read_binary(self.__module__, str(template)) text = binary.decode("utf-8", errors="strict") for key, value in replacements.items(): value = self._repr_unicode(creator, value) text = text.replace(key, value) return text @staticmethod def _repr_unicode(creator, value): # by default we just let it be unicode return value
Before we got Peru, Fred and I decided how we’d surprise our kids. We were going to send a photo of her every day leading up to the Monday they were all going to be introduced to her. Of course, things don’t always go as planned. I sent one photo and that was it. Fred sent 3? I can’t remember. The day we introduced her to the kids, as they sat in the driveway we sent a photo of her face to all of the kids. Actually, let me go back a few days before to when we were delivering bags of groceries to families in need. Fred, Doodle, and I were just leaving a house when Fred mentioned that the home owner’s dog probably smells the treats that he’d had in his coat pocket for his dog. Doodle looked at me and said, “Fred has a dog?” I had hoped he hadn’t heard, but yea, he had! He had to keep a secret for two days. I wasn’t worried about it. He’s good about those kinds of tings. A boy and his dog. Peru meeting the entire family today. Bulldoggin' with Ryan and Peru. Peru was a little more than loved on by all of the kids. Fred’s oldest (his daughter), and my oldest son both kind of wanted the same kind of dog. I had really wanted to get Fred a greyhound because he’s always doing so much for everyone I had hoped he could have a dog of his choosing. His daughter had wanted a Boxer and mine had wanted a Bulldog. Thus, the mix in an American Bulldog was perfect! The kids loved on her and took pictures of her. We took pictures of them with her. After a while we took her to the park so the kids could all run around with her. Let me tell you, that park is her happy place and she loves playing with the kids. She sleeps in Kgirl’s room a lot of the time. She came over to our house for the day, once so far, and she did rather well. Peru only wanted to eat the cats once or twice. We’re hoping to desensitize her to the cats. As mischievous as Peru is, we love her to death. Have you ever surprised your family with a pet?
#!/usr/bin/env python3 import numpy as np import pyqtgraph.opengl as gl from pyqtgraph.Qt import QtCore w = gl.GLViewWidget() w.opts['distance'] = 20 g = gl.GLGridItem() w.addItem(g) ## ## First example is a set of points with pxMode=False ## These demonstrate the ability to have points with real size down to a very small scale ## pos = np.empty((53, 3)) size = np.empty((53)) color = np.empty((53, 4)) pos[0] = (1,0,0); size[0] = 0.5; color[0] = (1.0, 0.0, 0.0, 0.5) pos[1] = (0,1,0); size[1] = 0.2; color[1] = (0.0, 0.0, 1.0, 0.5) pos[2] = (0,0,1); size[2] = 2./3.; color[2] = (0.0, 1.0, 0.0, 0.5) z = 0.5 d = 6.0 for i in range(3,53): pos[i] = (0,0,z) size[i] = 2./d color[i] = (0.0, 1.0, 0.0, 0.5) z *= 0.5 d *= 2.0 sp1 = gl.GLScatterPlotItem(pos=pos, size=size, color=color, pxMode=False) sp1.translate(5,5,0) w.addItem(sp1) ## ## Second example shows a volume of points with rapidly updating color ## and pxMode=True ## pos = np.random.random(size=(100000,3)) pos *= [10,-10,10] pos[0] = (0,0,0) color = np.ones((pos.shape[0], 4)) d2 = (pos**2).sum(axis=1)**0.5 size = np.random.random(size=pos.shape[0])*10 sp2 = gl.GLScatterPlotItem(pos=pos, color=(1,1,1,1), size=size) phase = 0. w.addItem(sp2) ## ## Third example shows a grid of points with rapidly updating position ## and pxMode = False ## pos3 = np.zeros((100, 100, 3)) pos3[:,:,:2] = np.mgrid[:100, :100].transpose(1,2,0) * [-0.1,0.1] pos3 = pos3.reshape(10000, 3) d3 = (pos3**2).sum(axis=1)**0.5 sp3 = gl.GLScatterPlotItem(pos=pos3, color=(1,1,1,.3), size=0.1, pxMode=False) w.addItem(sp3) def update(): ## update volume colors global phase, sp2, d2 s = -np.cos(d2*2+phase) color = np.empty((len(d2),4), dtype=np.float32) color[:, 3] = np.clip(s * 0.1, 0, 1) color[:, 0] = np.clip(s * 3.0, 0, 1) color[:, 1] = np.clip(s * 1.0, 0, 1) color[:, 2] = np.clip(s ** 3, 0, 1) sp2.setData(color=color) phase -= 0.1 ## update surface positions and colors global sp3, d3, pos3 z = -np.cos(d3*2+phase) pos3[:, 2] = z color = np.empty((len(d3),4), dtype=np.float32) color[:, 3] = 0.3 color[:, 0] = np.clip(z * 3.0, 0, 1) color[:, 1] = np.clip(z * 1.0, 0, 1) color[:, 2] = np.clip(z ** 3, 0, 1) sp3.setData(pos=pos3, color=color) t = QtCore.QTimer() t.timeout.connect(update) class MyPlugin: def __init__(self, project, plugin_position): self.name = 'OpenGL Showcase' self.widget = w self.widget.setWhatsThis("This is just a quick showcase that 3D visualization is possible for future developers " "looking to extend the application's functionality") t.start(50) def run(self): t.start(50) def stop(self): t.stop()
Puremagnetik has announced the release of 3 Free Sound Sets and HarvestPak, a collection of patches programmed from The Harvestman synthesizer modules. Puremagnetik has just added 3 free new instruments to their PakBytes library. PakBytes are quick downloads that include handpicked instruments from Puremagnetik’s Micropak catalog. They are available in Ableton Live, Logic and Kontakt formats. HarvestPak – a collection of patches programmed from The Harvestman synthesizer modules. HackShop – a glitchy collection of basses, pads and percussion elements sampled from circuit bent toys and drum machines. Artifact – a library of sounds created from digital anomalies and malfunctions. PakBytes are free to download and require only a valid email address by registering with Puremagnetik. Puremagnetik has also released HarvestPak, The Sounds of The Harvestman. To celebrate the fall season, Puremagnetik has just released HarvestPak – a comprehensive library built from The Harvestman synthesizer modules. HarvestPak comes loaded with 34 multisampled instruments that explore the unique character of The Harvestman analog/digital hybrid machines. HarvestPak is available in Live 8, Kontakt 3/4 and Logic 8/9 formats. It includes a unique assortment of bass, atmospheric, percussive, lead, “chaotic” and keyboard programs. 34 multisampled instruments sourced directly from Harvestman synthesizer modules. A library of basses, leads, keys, percussives, pads and “chaotic” patches. Over 500 expertly recorded 48k 24-bit samples. Completely integrated Ableton Live effects racks with advanced Macro mapping. Native Kontakt effects and custom KSP GUI for easy editing. The HarvestPak is now available to download for Puremagnetik subscribers.
# -*- coding: utf-8 -*- from django.db import models, migrations import sa_api_v2.models.caching import django.contrib.gis.db.models.fields import sa_api_v2.models.core import django.utils.timezone import sa_api_v2.models.mixins from django.conf import settings import sa_api_v2.apikey.models import django.core.files.storage import django.core.validators class Migration(migrations.Migration): dependencies = [ ('auth', '0001_initial'), ] operations = [ migrations.CreateModel( name='User', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('password', models.CharField(max_length=128, verbose_name='password')), ('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')), ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')), ('username', models.CharField(help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, max_length=30, verbose_name='username', validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username.', 'invalid')])), ('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)), ('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)), ('email', models.EmailField(max_length=75, verbose_name='email address', blank=True)), ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')), ('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')), ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')), ], options={ 'db_table': 'auth_user', }, bases=(sa_api_v2.models.caching.CacheClearingModel, models.Model), ), migrations.CreateModel( name='Action', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created_datetime', models.DateTimeField(default=django.utils.timezone.now, db_index=True, blank=True)), ('updated_datetime', models.DateTimeField(auto_now=True, db_index=True)), ('action', models.CharField(default='create', max_length=16)), ('source', models.TextField(null=True, blank=True)), ], options={ 'ordering': ['-created_datetime'], 'db_table': 'sa_api_activity', }, bases=(sa_api_v2.models.caching.CacheClearingModel, models.Model), ), migrations.CreateModel( name='ApiKey', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('key', models.CharField(default=sa_api_v2.apikey.models.generate_unique_api_key, unique=True, max_length=32)), ('logged_ip', models.IPAddressField(null=True, blank=True)), ('last_used', models.DateTimeField(default=django.utils.timezone.now, blank=True)), ], options={ 'db_table': 'apikey_apikey', }, bases=(sa_api_v2.models.mixins.CloneableModelMixin, models.Model), ), migrations.CreateModel( name='Attachment', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created_datetime', models.DateTimeField(default=django.utils.timezone.now, db_index=True, blank=True)), ('updated_datetime', models.DateTimeField(auto_now=True, db_index=True)), ('file', models.FileField(storage=django.core.files.storage.FileSystemStorage(), upload_to=sa_api_v2.models.core.timestamp_filename)), ('name', models.CharField(max_length=128, null=True, blank=True)), ], options={ 'db_table': 'sa_api_attachment', }, bases=(sa_api_v2.models.caching.CacheClearingModel, models.Model), ), migrations.CreateModel( name='DataIndex', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('attr_name', models.CharField(max_length=100, verbose_name='Attribute name', db_index=True)), ('attr_type', models.CharField(default='string', max_length=10, verbose_name='Attribute type', choices=[('string', 'String')])), ], options={ }, bases=(sa_api_v2.models.mixins.CloneableModelMixin, models.Model), ), migrations.CreateModel( name='DataSet', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('display_name', models.CharField(max_length=128)), ('slug', models.SlugField(default='', max_length=128)), ('owner', models.ForeignKey(on_delete=models.CASCADE, related_name='datasets', to=settings.AUTH_USER_MODEL)), ], options={ 'db_table': 'sa_api_dataset', }, bases=(sa_api_v2.models.mixins.CloneableModelMixin, sa_api_v2.models.caching.CacheClearingModel, models.Model), ), migrations.CreateModel( name='DataSetPermission', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('submission_set', models.CharField(help_text='Either the name of a submission set (e.g., "comments"), or "places". Leave blank to refer to all things.', max_length=128, blank=True)), ('can_retrieve', models.BooleanField(default=True)), ('can_create', models.BooleanField(default=False)), ('can_update', models.BooleanField(default=False)), ('can_destroy', models.BooleanField(default=False)), ('priority', models.PositiveIntegerField(blank=True)), ('dataset', models.ForeignKey(on_delete=models.CASCADE, related_name='permissions', to='sa_api_v2.DataSet')), ], options={ }, bases=(sa_api_v2.models.mixins.CloneableModelMixin, sa_api_v2.models.caching.CacheClearingModel, models.Model), ), migrations.CreateModel( name='DataSnapshot', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('json', models.TextField()), ('csv', models.TextField()), ], options={ 'db_table': 'sa_api_datasnapshot', }, bases=(models.Model,), ), migrations.CreateModel( name='DataSnapshotRequest', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('submission_set', models.CharField(max_length=128)), ('include_private', models.BooleanField(default=False)), ('include_invisible', models.BooleanField(default=False)), ('include_submissions', models.BooleanField(default=False)), ('requested_at', models.DateTimeField(auto_now_add=True)), ('status', models.TextField(default='', blank=True)), ('fulfilled_at', models.DateTimeField(null=True)), ('guid', models.TextField(default='', unique=True, blank=True)), ('dataset', models.ForeignKey(on_delete=models.CASCADE, to='sa_api_v2.DataSet')), ('requester', models.ForeignKey(on_delete=models.CASCADE, to=settings.AUTH_USER_MODEL, null=True)), ], options={ 'db_table': 'sa_api_datasnapshotrequest', }, bases=(models.Model,), ), migrations.CreateModel( name='Group', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(help_text='What is the name of the group to which users with this group belong? For example: "judges", "administrators", "winners", ...', max_length=32)), ('dataset', models.ForeignKey(on_delete=models.CASCADE, related_name='groups', to='sa_api_v2.DataSet', help_text='Which dataset does this group apply to?')), ('submitters', models.ManyToManyField(related_name='_groups', to=settings.AUTH_USER_MODEL, blank=True)), ], options={ 'db_table': 'sa_api_group', }, bases=(sa_api_v2.models.mixins.CloneableModelMixin, models.Model), ), migrations.CreateModel( name='GroupPermission', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('submission_set', models.CharField(help_text='Either the name of a submission set (e.g., "comments"), or "places". Leave blank to refer to all things.', max_length=128, blank=True)), ('can_retrieve', models.BooleanField(default=True)), ('can_create', models.BooleanField(default=False)), ('can_update', models.BooleanField(default=False)), ('can_destroy', models.BooleanField(default=False)), ('priority', models.PositiveIntegerField(blank=True)), ('group', models.ForeignKey(on_delete=models.CASCADE, related_name='permissions', to='sa_api_v2.Group')), ], options={ }, bases=(sa_api_v2.models.mixins.CloneableModelMixin, sa_api_v2.models.caching.CacheClearingModel, models.Model), ), migrations.CreateModel( name='IndexedValue', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('value', models.CharField(max_length=100, null=True, db_index=True)), ('index', models.ForeignKey(on_delete=models.CASCADE, related_name='values', to='sa_api_v2.DataIndex')), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='KeyPermission', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('submission_set', models.CharField(help_text='Either the name of a submission set (e.g., "comments"), or "places". Leave blank to refer to all things.', max_length=128, blank=True)), ('can_retrieve', models.BooleanField(default=True)), ('can_create', models.BooleanField(default=False)), ('can_update', models.BooleanField(default=False)), ('can_destroy', models.BooleanField(default=False)), ('priority', models.PositiveIntegerField(blank=True)), ('key', models.ForeignKey(on_delete=models.CASCADE, related_name='permissions', to='sa_api_v2.ApiKey')), ], options={ }, bases=(sa_api_v2.models.mixins.CloneableModelMixin, sa_api_v2.models.caching.CacheClearingModel, models.Model), ), migrations.CreateModel( name='Origin', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('pattern', models.CharField(help_text='The origin pattern, e.g., https://*.github.io, http://localhost:*, http*://map.phila.gov', max_length=100)), ('logged_ip', models.IPAddressField(null=True, blank=True)), ('last_used', models.DateTimeField(default=django.utils.timezone.now, blank=True)), ('dataset', models.ForeignKey(on_delete=models.CASCADE, related_name='origins', blank=True, to='sa_api_v2.DataSet')), ], options={ 'db_table': 'cors_origin', }, bases=(sa_api_v2.models.mixins.CloneableModelMixin, models.Model), ), migrations.CreateModel( name='OriginPermission', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('submission_set', models.CharField(help_text='Either the name of a submission set (e.g., "comments"), or "places". Leave blank to refer to all things.', max_length=128, blank=True)), ('can_retrieve', models.BooleanField(default=True)), ('can_create', models.BooleanField(default=False)), ('can_update', models.BooleanField(default=False)), ('can_destroy', models.BooleanField(default=False)), ('priority', models.PositiveIntegerField(blank=True)), ('origin', models.ForeignKey(on_delete=models.CASCADE, related_name='permissions', to='sa_api_v2.Origin')), ], options={ }, bases=(sa_api_v2.models.mixins.CloneableModelMixin, sa_api_v2.models.caching.CacheClearingModel, models.Model), ), migrations.CreateModel( name='SubmittedThing', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created_datetime', models.DateTimeField(default=django.utils.timezone.now, db_index=True, blank=True)), ('updated_datetime', models.DateTimeField(auto_now=True, db_index=True)), ('data', models.TextField(default='{}')), ('visible', models.BooleanField(default=True, db_index=True)), ], options={ 'db_table': 'sa_api_submittedthing', }, bases=(sa_api_v2.models.mixins.CloneableModelMixin, sa_api_v2.models.caching.CacheClearingModel, models.Model), ), migrations.CreateModel( name='Submission', fields=[ ('submittedthing_ptr', models.OneToOneField(on_delete=models.CASCADE, parent_link=True, auto_created=True, primary_key=True, serialize=False, to='sa_api_v2.SubmittedThing')), ('set_name', models.TextField(db_index=True)), ], options={ 'ordering': ['-updated_datetime'], 'db_table': 'sa_api_submission', }, bases=('sa_api_v2.submittedthing',), ), migrations.CreateModel( name='Place', fields=[ ('submittedthing_ptr', models.OneToOneField(on_delete=models.CASCADE, parent_link=True, auto_created=True, primary_key=True, serialize=False, to='sa_api_v2.SubmittedThing')), ( ('geometry', django.contrib.gis.db.models.fields.GeometryField(srid=4326)) if settings.USE_GEODB else ('geometry', models.TextField()) ), ], options={ 'ordering': ['-updated_datetime'], 'db_table': 'sa_api_place', }, bases=('sa_api_v2.submittedthing',), ), migrations.CreateModel( name='Webhook', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created_datetime', models.DateTimeField(default=django.utils.timezone.now, db_index=True, blank=True)), ('updated_datetime', models.DateTimeField(auto_now=True, db_index=True)), ('submission_set', models.CharField(max_length=128)), ('event', models.CharField(default='add', max_length=128, choices=[('add', 'On add')])), ('url', models.URLField(max_length=2048)), ('dataset', models.ForeignKey(on_delete=models.CASCADE, related_name='webhooks', to='sa_api_v2.DataSet')), ], options={ 'db_table': 'sa_api_webhook', }, bases=(models.Model,), ), migrations.AddField( model_name='submittedthing', name='dataset', field=models.ForeignKey(on_delete=models.CASCADE, related_name='things', blank=True, to='sa_api_v2.DataSet'), preserve_default=True, ), migrations.AddField( model_name='submittedthing', name='submitter', field=models.ForeignKey(on_delete=models.CASCADE, related_name='things', blank=True, to=settings.AUTH_USER_MODEL, null=True), preserve_default=True, ), migrations.AddField( model_name='submission', name='place', field=models.ForeignKey(on_delete=models.CASCADE, related_name='submissions', to='sa_api_v2.Place'), preserve_default=True, ), migrations.AddField( model_name='indexedvalue', name='thing', field=models.ForeignKey(on_delete=models.CASCADE, related_name='indexed_values', to='sa_api_v2.SubmittedThing'), preserve_default=True, ), migrations.AlterUniqueTogether( name='group', unique_together=set([('name', 'dataset')]), ), migrations.AddField( model_name='datasnapshot', name='request', field=models.OneToOneField(on_delete=models.CASCADE, related_name='fulfillment', to='sa_api_v2.DataSnapshotRequest'), preserve_default=True, ), migrations.AlterUniqueTogether( name='dataset', unique_together=set([('owner', 'slug')]), ), migrations.AddField( model_name='dataindex', name='dataset', field=models.ForeignKey(on_delete=models.CASCADE, related_name='indexes', to='sa_api_v2.DataSet'), preserve_default=True, ), migrations.AddField( model_name='attachment', name='thing', field=models.ForeignKey(on_delete=models.CASCADE, related_name='attachments', to='sa_api_v2.SubmittedThing'), preserve_default=True, ), migrations.AddField( model_name='apikey', name='dataset', field=models.ForeignKey(on_delete=models.CASCADE, related_name='keys', blank=True, to='sa_api_v2.DataSet'), preserve_default=True, ), migrations.AddField( model_name='action', name='thing', field=models.ForeignKey(on_delete=models.CASCADE, related_name='actions', db_column='data_id', to='sa_api_v2.SubmittedThing'), preserve_default=True, ), migrations.AddField( model_name='user', name='groups', field=models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of his/her group.', verbose_name='groups'), preserve_default=True, ), migrations.AddField( model_name='user', name='user_permissions', field=models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions'), preserve_default=True, ), ]
Configuring two comparable cloud storage solutions allowed the customer to judge the cost between the two platforms and test which configuration would work best for their storage needs. A rapidly growing consumer lender had the foresight to create hybrid cloud-ready active/active data centers to improve reliability, performance and increase efficiency. This included Dell EMC’s DPS Suite, Avamar and Data Domain data protection solutions. However, the customer had a variety of backup strategies among its data centers and sought WWT’s assistance to craft a comprehensive data protection strategy. In evaluating the requirements to ensure data protection of their data sets in the new data centers, it was determined that more capacity was required. Together with the customer, it was decided that the best course of action was to “flip the switch” on the hybrid cloud and offload the older backups to the cloud. In reviewing how often the customer restored files, the customer determined that they could easily offload data older than 3-6 months to the cloud. WWT configured 100TB of cloud storage via Virtustream Storage Cloud. Then onsite with the customer, WWT configured a comparable cloud storage solution with another top tier provider. This allowed the customer to judge the cost between the two platforms. In this instance, the Virtustream solution best met the customer’s needs. WWT and the Dell EMC team also provided analysis to prove that utilizing Data Domain to control the tiering would provide superior data reduction capabilities of the customer data sets and proving the advantages of using Data Domain Cloud Tier to Virtustream. By providing the customer with several options and empirical data, WWT enabled the customer to make an informed decision. The solution is currently being initiated, and with WWT’s support and expertise, the organization is taking full advantage of its hybrid cloud infrastructure.
# -*- encoding: utf-8 -*- import pickle from abjad import * def test_documentationtools_GraphvizGraph_pickle_01(): graph = documentationtools.GraphvizGraph() graph.append(documentationtools.GraphvizSubgraph()) graph[0].append(documentationtools.GraphvizNode()) graph[0].append(documentationtools.GraphvizNode()) graph[0].append(documentationtools.GraphvizNode()) graph[0].append(documentationtools.GraphvizSubgraph()) graph[0][-1].append(documentationtools.GraphvizNode()) graph.append(documentationtools.GraphvizNode()) edge = documentationtools.GraphvizEdge()(graph[0][1], graph[1]) edge = documentationtools.GraphvizEdge()(graph[0][0], graph[0][-1][0]) assert str(graph) == systemtools.TestManager.clean_string( r''' digraph G { subgraph cluster_0 { node_0_0; node_0_1; node_0_2; subgraph cluster_0_3 { node_0_3_0; } node_0_0 -> node_0_3_0; } node_1; node_0_1 -> node_1; } ''' ) pickled = pickle.loads(pickle.dumps(graph)) assert str(graph) == str(pickled)
Clear Lake is always a first of the season place to ice fish. The lake is very shallow throughout and freezes very quickly. The beginning of the ice season is also the perfect time to fish the shallow weeds that surround much of the lake. This was our target today as I was joined by David Gissel and Cameron Arnold. Like many anglers on Clear Lake this year many small perch were caught on the little lake, so after several small moves we made a big move to the big lake. Different locations but same strategy, hit the weeds and find the pickets that the fish were using to move and feed. The water was very clear so sight fishing became the method of choice. Several nice bluegills were had on this day as well as a few yellow bass. The action was not fast and furious, but the size of the gills and yellows surprised us all. This technique gets overlooked at Clear Lake and it shouldn't. Fishing shallow for these moving fish is a fun way to get on some quality fish as we did on this day. Clear Lake didn't disappoint but next time I hope to be chasing down schools of yellows...which is an adventure every time! November 18th marked the first time on the ice for me this year. This was the earliest I have ever ice fished. The unusual fall weather brought enough cold weather to northern Iowa to allow the ice to form rapidly over a week-long stretch. A group of 5 of us traveled to Northern Iowa in search of good ice. We tried 7 locations over the day and ended up fishing on 3 of them. Bluegills, Perch, Largemouth Bass and Catfish were caught. The VEXILAR showed us that the action was steady all day and we all had a good time. It was good to see some ice fishing friends from Des Moines once again. First ice is very exciting, however CAUTION has to be on forefront of any ice adventure, especially early in the season. Traveling in groups is first line of defense, as well as safety equipment; spikes, rope, flotation devices and cell phones. Using a spud bar and or drilling many holes to check the ice is something we all need to do this time of year. Don't take someones word that the ice is ok, check it yourself or simply stay home. NO FISH is worth taking a chance on unstable ice. Be prepared, travel in small groups and always take in your surroundings in this early ice season. Ice fishing shows in Iowa are in full swing, I was sent to Clear Lake Bait and Tackle last weekend to give a seminar on using flashers to catch more fish through the ice. No matter the crowd there are always a few things that really get the attention of the crowd. Below you will see the most common thing I get asked while doing shows at the beginning of winter. This graphic shows the math behind the lines on your flasher. We need to train our minds that the colors are not the "size" of fish as a lot of anglers assume. The graphic shows how the colors can create a mental picture for us as we decipher what our flasher is telling us. Use the colors to present the baits at the appropriate depths, never go to a GREEN or YELLOW line, always fish above it. Also, always go after the RED lines, those fish are right beneath you and it also tells you at what depth the fish are moving through your water column. Note, the crappies are swimming all at the same depth, however the lines on the Vexilar will appear deeper as they are farther away from the ice-ducer. This is the reason why we need fish above the yellow and green lines. Have fun, be safe and enjoy the upcoming ice season...it might be here before many think!
# -*- coding: utf-8 -*- # @Author: Patrick Bos # @Date: 2016-11-16 16:23:55 # @Last Modified by: Patrick Bos # @Last Modified time: 2016-11-21 18:10:00 import pandas as pd # fn = "timings.json" # fn = "timings_stbc_20161117.json" fn = "timings_stbc_20161121.json" with open(fn, 'r') as fh: json_array_inside_text = fh.read() json_array_text = "[" + json_array_inside_text[:-2] + "]" # :-1 removes ,\n df = pd.read_json(json_array_text) df.groupby([u'N_events', u'N_gaussians', u'N_parameters', u'num_cpu', u'parallel_interleave']).mean().timing_ns/1e9 # almost perfectly linear: df.groupby(['N_events']).mean().timing_ns.plot() plt.show() # 20161117: very strange, maxes out, then drops again # 20161121: strangeness gone, just goes up. Maybe faster than linear. df.groupby(['N_parameters']).mean().plot(y='timing_ns') plt.show() # moet hier iets met pivot doen ofzo... df.groupby(['N_events','N_parameters','num_cpu']).mean().timing_ns.plot() plt.show() # moet hier iets met pivot doen ofzo... df[df.N_events == 10000].groupby(['num_cpu']).mean().timing_ns.plot() plt.show() ### MET WOUTER, 21 nov 2016 t = np.array( [115.835, 67.6071, 51.3018, 44.8939, 31.6365, 33.413, 28.5969, 24.7553]) t_ideal = 115.835 / np.arange(1,9) c = range(1,9) plt.plot(c,t,c,t_ideal) plt.show()
Ash Fontana leads fundraising products like online investing, syndicates and funds at AngelList that drive over $20 million per month to startup companies. Ash previously co-founded and sold Topguest, a Founders Fund-backed company that built marketing technology for companies like IHG, United and Ceasars Entertainment. Earlier in his career, Ash was a top-ranked analyst in private equity and investment banking at Macquarie Capital, and graduated in the top of his law school class at the University of Sydney, his hometown. He enjoys discovering and advising promising new startup companies including Canva, Automa and Mozio.
from . import utils from .utils import cbc from requests.auth import HTTPBasicAuth from urllib.parse import urljoin __all__ = [ # constants 'SCRIPT_KIND', 'EXPLICIT_KIND', 'IMPLICIT_KIND', 'APPLICATION_INSTALLED_KIND', 'APPLICATION_EXPLICIT_KIND', 'ALL_SCOPES', # Classes 'OAuth', 'Authorization' ] SCRIPT_KIND = "script" EXPLICIT_KIND = "explicit" IMPLICIT_KIND = "implicit" APPLICATION_INSTALLED_KIND = "application/installed" APPLICATION_EXPLICIT_KIND = "application/explicit" ALL_KINDS = (SCRIPT_KIND, EXPLICIT_KIND, IMPLICIT_KIND, APPLICATION_EXPLICIT_KIND, APPLICATION_INSTALLED_KIND) ALL_SCOPES = () REVERSE_KINDS = { SCRIPT_KIND: "SCRIPT_KIND", EXPLICIT_KIND: "EXPLICIT_KIND", IMPLICIT_KIND: "IMPLICIT_KIND", APPLICATION_INSTALLED_KIND: "APPLICATION_INSTALLED_KIND", APPLICATION_EXPLICIT_KIND: "APPLICATION_EXPLICIT_KIND" } # Different kinds of authentication require different parameters. This is a mapping of # kind to required parameter keys for use in OAuth's __init__ method. KIND_PARAMETER_MAPPING = { SCRIPT_KIND: ('client_id', 'secret_id', 'username', 'password'), EXPLICIT_KIND: ('client_id', 'secret_id', 'redirect_uri'), APPLICATION_EXPLICIT_KIND: ('client_id', 'secret_id'), IMPLICIT_KIND: ('client_id', 'redirect_uri'), APPLICATION_INSTALLED_KIND: ('client_id',) } class OAuth(object): """Class representing a set of OAuth credentials. May be authorized. This class is used to represent a complete set of credentials to log in to Reddit's OAuth API using one of the script, explicit, implicit, or application authentication forms. An object of this kind can be passed to the :class:`~snooble.Snooble` intializer, or via the :meth:`~snooble.Snooble.oauth` method. An OAuth object may also be returned by the :meth:`~snooble.Snooble.oauth` method. .. seealso:: :meth:`~snooble.oauth.OAuth.__init__`: All arguments passed in to this class will also be available as attributes to read and modify. """ def __init__(self, kind, scopes, **kwargs): """Intialise the object with the correct keyword arguments. Arguments: kind (str): This should be one of the five kind strings. These are all available as constants in this module - use these constants! If this kind is wrong, initialisation will fail with a ValueError scopes (list[str]): A list of all of the requested scopes from the API. For your convenience, the constant `ALL_SCOPES` is made available in this module, which will provide the correct scopes for all possible API requests. client_id (str): Always needed. Client ID as provided on the apps preferences page. secret_id (str): Needed for script kind, explicit kind, and application/explicit kind. As provided on the apps preferences page. username/password (str): Only needed for script kind. Username and password of the user to log in to. redirect_uri (str): Needed for explicit and implicit kinds. When the user has authenticated with Reddit, they will be sent to this uri. *Must* be the same as provided on the apps preferences page. mobile (bool): If ``True``, for explicit and implicit kinds, this will cause any generated authentication links to use Reddit's mobile-friendly page. Defaults to ``False``. duration (str): One of ``'temporary'`` or ``'permanent'``. Only applicable for explicit authentication kinds. Defaults to ``'temporary'``. device_id (str): A unique string to identify a user, used to help Reddit track unique users and improve their analytics. If the user does not want to be tracked, use ``'DO_NOT_TRACK_THIS_USER'``. Defaults to ``'DO_NOT_TRACK_THIS_USER'``. """ if kind not in ALL_KINDS: raise ValueError("Invalid oauth kind {kind}".format(kind=kind)) self.kind = kind self.scopes = scopes self.mobile = kwargs.pop('mobile', False) self.duration = kwargs.pop('duration', 'temporary') self.device_id = kwargs.pop('device_id', 'DO_NOT_TRACK_THIS_USER') utils.assign_parameters(self, kwargs, KIND_PARAMETER_MAPPING[self.kind]) self.authorization = None """The details of this account's authorization request, or ``None``. Will be ``None`` by default. If an authorization request has been successfully completed, the :class:`~snooble.Snooble` class will set this to the corresponding :class:`~snooble.oauth.Authorization` object. """ def __repr__(self): cls = self.__class__.__name__ kind = REVERSE_KINDS.get(self.kind) args = ((k, v) for k, v in self.__dict__.items() if k != 'kind') args = ", ".join("{k}={v!r}".format(k=k, v=v) for k, v in args) return '{cls}({kind}, {args})'.format(cls=cls, kind=kind, args=args) @property def authorized(self): """True if this instance has an authorization property. Does not fully check the validity of the authorization property, only that it exists. """ return self.authorization is not None class Authorization(object): """A class containing the details of a successful authorization attempt. Contains the :attr:`~.token_type`, and the :attr:`~.token`. It also stores the time the token was :attr:`~.recieved`, and the :attr:`~.length` that this token will last. Note that these last two attributes are not currently used by Snooble, but may be useful in future, or to users. """ def __init__(self, token_type, token, recieved, length): self.token_type = token_type "*(str)* Should always be the string ``'bearer'``." self.token = token "*(str)* A Reddit session token." self.recieved = recieved "*(int)* When the token was recieved in seconds since the epoch. (Always UTC)." self.length = length "*(int)* The length of time the token will last in seconds." def __repr__(self): cls = self.__class__.__name__ args = ("{k}={v}".format(k=k, v=v) for k, v in self.__dict__.items()) return "{cls}({args})".format(cls=cls, args=", ".join(args)) def __eq__(self, other): if type(self) == type(other): return self.__dict__ == other.__dict__ return False class AUTHORIZATION_METHODS(cbc.CallbackClass): @cbc.CallbackClass.key(SCRIPT_KIND) def authorize_script(snoo, auth, session, code): client_auth = HTTPBasicAuth(auth.client_id, auth.secret_id) post_data = {"scope": ",".join(auth.scopes), "grant_type": "password", "username": auth.username, "password": auth.password} url = urljoin(snoo.domain.www, 'api/v1/access_token') return session.post(url, auth=client_auth, data=post_data) @cbc.CallbackClass.key(EXPLICIT_KIND) def authorize_explicit(snoo, auth, session, code): client_auth = HTTPBasicAuth(auth.client_id, auth.secret_id) post_data = {"grant_type": "authorization_code", "code": code, "redirect_uri": auth.redirect_uri} url = urljoin(snoo.domain.www, 'api/v1/access_token') return session.post(url, auth=client_auth, data=post_data) @cbc.CallbackClass.key(IMPLICIT_KIND) def authorize_implicit(snoo, auth, session, code): return None @cbc.CallbackClass.key(APPLICATION_EXPLICIT_KIND) def authorize_application_explicit(snoo, auth, session, code): client_auth = HTTPBasicAuth(auth.client_id, auth.secret_id) post_data = {"grant_type": "client_credentials"} url = urljoin(snoo.domain.www, 'api/v1/access_token') return session.post(url, auth=client_auth, data=post_data) @cbc.CallbackClass.key(APPLICATION_INSTALLED_KIND) def authorize_application_implicit(snoo, auth, session, code): client_auth = HTTPBasicAuth(auth.client_id, '') post_data = {"grant_type": "https://oauth.reddit.com/grants/installed_client", "device_id": auth.device_id} url = urljoin(snoo.domain.www, 'api/v1/access_token') return session.post(url, auth=client_auth, data=post_data)
I vaguely remember the first time I played Magic: the Gathering. I think it was on a camping trip with some family friends. I was handed a black deck and given basic descriptions of the various colors. This deck made rats. Lots of rats. I played some cards, was told what tapping was, and made rats. I don’t think I understood when I should attack and when I should hold back to defend, and I recall being quite annoyed that attacking required tapping the creature. I didn’t know the immense popularity of the game at the time, and I didn’t think much of it after that trip, especially when I found out how expensive it could be. In college I once again found myself with friends who played Magic. A group of us decided to go to the local game store and create decks on the cheap to play with casually. I built a red deck this time, though I think I really wanted to build a green one, because I liked the big creatures. None of us really put much blue or control into our decks, which made larger creatures quite powerful. Later I bought one of those random boxes that stores put all of the bad, non-valuable cards in and had some fun making various decks. Since college I’ve dipped into that pile of cards a couple of times, and we’ve done a couple of drafts, which is undoubtedly my favorite way to play the game (I love drafting in any context, really). But last week I discovered that there’s a new online system for MtG called Arena, and it piqued my curiosity. Every time I’ve seen a new digital MtG product it’s seemed annoyingly limited. I remember demoing one at PAX East a few years ago and it felt anemic, which is exactly the opposite of what I want from that brand. The excitement of Magic, for a casual player like me, is in exploring the cardpool and getting a sense of freedom. Arena does that, though it’s certainly not a free-for-all. I believe the cardpool is limited to what is currently legal in the normal competitive format (called…standard?), but you earn a good number of cards through simple play and it takes only a couple of days to earn enough in-game coin to enter a draft. All in all it feels a lot like Hearthstone in what you can unlock through the grind (though you can always pay actual money to accelerate ahead). I’m not here to review Arena, but to talk about how I enjoy games like this. It happened with Hearthstone before this, Hearts in college, and various other games in the meantime. I play games like this primarily as a relaxation tool, though that frequently ends up as a procrastination tool. I don’t know what to make of it. I think there’s a part of me that has convinced myself that spending a lot of time with games like this is somehow better than other “time wasters” because it’s mentally engaging, even though I only engage on a minimal level. Yes I’m playing this game when I ought to be doing something else, but at least I’m not watching reality TV. It’s a pretentious mindset. I’m not criticizing MtG as a game, only the way I play it on the computer. I think what draws me in is that it’s relatively easy, especially after playing other card games, to create a simple heuristic and play out that heuristic with few difficult decisions. Back in the day when I was really into Hearthstone, I ended up learning quite a bit about MtG through comparison. Mana flood/starvation isn’t an issue with Hearthstone, but card draw is still incredibly important. I learned about creating positive trades, and why healing is often overrated by beginners. I understood how to value board wipes and direct damage and all of the basic things you’d tell a beginner about basic strategy. Then I got into Netrunner, with which I couldn’t do this kind of idle heuristic thinking, but the more advanced lessons I got from it still apply–always draw first to front-load uncertainty and pay attention to tempo-generating moves. So when I returned to MtG through Arena, I had built up a number of heuristic patterns I could follow and turn it into a near-idle game. I can hop in, go through some youtube videos, and spend a couple of hours in minimal thought. At night I’ve gotten in the habit of playing Madden while watching TV to try to get to sleep. I can imagine people who know anything about sleep aids screaming in horror at the prospect of someone watching two screens simultaneously at midnight, but it’s sort of worked, I guess? Sports games have always had a similar appeal to me as an escapist activity. I can play Madden or MLB The Show or Out Of The Park Baseball or this browser-based golf game I enjoy without committing my full attention to them. Somehow I’ve convinced myself that it’s better if I play them while also watching something else. It seems less wasteful that way. At the same time I leave other games I own by the wayside because I would have to dedicate more attention to them (and turn on the sound). This article is more a way to get some thoughts out of my head rather than to make any kind of point. I think MtG is actually quite brilliant, but I can’t be bothered (and I don’t have the cash) to engage in it in a way that would truly challenge me. I know that my habits, especially over the past couple of weeks, haven’t been the best, but there’s a part of me that understands that this is simply how I relax. What are your idle games–what do you play to unwind and let your mind wander? Or am I the only one like this?
''' figures.py - Create all the ColorPy sample figures. Description: Creates the sample figures. This can also create the figures with some non-default initialization conditions. Functions: figures() - Create all the sample figures. figures_clip_clamp_to_zero () - Adjust the color clipping method, and create the sample figures. figures_gamma_245 () - Adjust the gamma correction to a power law gamma = 2.45 and create samples. figures_white_A () - Adjust the white point (for Luv/Lab) and create sample figures. License: Copyright (C) 2008 Mark Kness Author - Mark Kness - [email protected] This file is part of ColorPy. ColorPy is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. ColorPy is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with ColorPy. If not, see <http://www.gnu.org/licenses/>. ''' import colormodels import illuminants import plots import blackbody import rayleigh import thinfilm import misc def figures (): '''Create all the ColorPy sample figures.''' # no figures for colormodels and ciexyz colormodels.init() # default illuminants.figures() plots.figures() blackbody.figures() rayleigh.figures() thinfilm.figures() misc.figures() def figures_clip_clamp_to_zero (): '''Adjust the color clipping method, and create the sample figures.''' colormodels.init() colormodels.init_clipping (colormodels.CLIP_CLAMP_TO_ZERO) figures() def figures_gamma_245 (): '''Adjust the gamma correction to a power law gamma = 2.45 and create samples.''' colormodels.init() colormodels.init_gamma_correction ( display_from_linear_function = colormodels.simple_gamma_invert, linear_from_display_function = colormodels.simple_gamma_correct, gamma = 2.45) figures() def figures_white_A (): '''Adjust the white point (for Luv/Lab) and create sample figures.''' colormodels.init() colormodels.init_Luv_Lab_white_point (colormodels.WhiteA) figures() if __name__ == '__main__': figures()
A CHARMING and SPACIOUS property bursting with CHARACTER, very well placed in the delightful semi-rural hamlet of Hatton. To the ground floor the accommodation comprises FOUR separate reception rooms plus a BEAUTIFUL kitchen with WC. To the first floor there are THREE DOUBLE bedrooms, a further single bedroom and a family bathroom. Externally the property offers off road parking to the front, rear garden and views over fields to the front. NO CHAIN.
import time import tests recorded_events = [ ] def event(self, name, args, kwargs): global recorded_events print "*EVENT*", time.time(), self, name, args, kwargs recorded_events.append((time.time(), self, name, args, kwargs)) def eventfnc(f): name = f.__name__ def wrapper(self, *args, **kwargs): event(self, name, args, kwargs) return f(self, *args, **kwargs) return wrapper def get_events(): global recorded_events r = recorded_events recorded_events = [ ] return r def start_log(): global base_time base_time = time.time() def end_log(test_name): global base_time results = "" for (t, self, method, args, kwargs) in get_events(): results += "%s T+%f: %s::%s(%s, *%s, *%s)\n" % (time.ctime(t), t - base_time, str(self.__class__), method, self, args, kwargs) expected = None try: f = open(test_name + ".results", "rb") expected = f.read() f.close() except: print "NO TEST RESULT FOUND, creating new" f = open(test_name + ".new_results", "wb") f.write(results) f.close() print results if expected is not None: print "expected:" if expected != results: f = open(test_name + ".bogus_results", "wb") f.write(results) f.close() raise tests.TestError("test data does not match") else: print "test compared ok" else: print "no test data to compare with." def log(fnc, base_time = 0, test_name = "test", *args, **kwargs): import fake_time fake_time.setTime(base_time) start_log() try: fnc(*args, **kwargs) event(None, "test_completed", [], {"test_name": test_name}) except tests.TestError,c: event(None, "test_failed", [], {"test_name": test_name, "reason": str(c)}) end_log(test_name)
Working directly on the structure of the building, Williams will allow the visitor to have access to it through the original museum entrance and, by a balanced interplay of additions and removals, he will partially rediscover the itineraries, disposition of masses, explanatory structures and original outlay as they were at the time the museum was opened (May 1st, 1975). In the galleries themselves, but also in the old entrance lobby, the corridors and the service areas normally closed to the public, the artist’s works will outline the course of a unique journey “backwards in time”. By documenting the progressive transformations of the Gallery, designed by architect Leone Pancaldi adjacent to the pavilions of the Bologna Fair, the artist puts into mutual relationship the present and the past of the institution, its imminent closure with the events which politically motivated its creation and accompanied its inauguration, enabling these two opposite events – inauguration and closure – as it were to discourse between themselves, and almost merging the one with the other. In his photographs, videos, films, installations, sculptures and performances Christopher Williams analyses the complex and stratified mechanisms by means of which contemporary communication and aesthetic conventions structure our ways of perceiving and understanding reality. Conceived independently by the artist and dedicated to the memory of the French film director Danièle Huillet (1936-2006), a play list project curated by Williams and John Kelsey (Radio Danièle) will be broadcast throughout the period of the exhibition by Città del Capo – Radio Metropolitana.
import matplotlib.pyplot as plt class PlaceFieldVisualizer: def __init__(self, fields): self.fields=fields self.callbacks={} fig=plt.figure() cid = fig.canvas.mpl_connect('button_press_event', self.onclick) cid_k=fig.canvas.mpl_connect('key_press_event', self.onKeyPress) #layout_rows=2 #layout_cols=max(len(self.arm.fields_sm),len(self.arm.fields_hidden)+1) #layout_cols=max(layout_cols,4) layout_rows=2 layout_cols=2 panel_layout=layout_rows*100+10*layout_cols self.subplot_activation={} for i,f in enumerate(fields): self.subplot_activation[f]=plt.subplot(panel_layout+i+1) self.subplot_activation[f].set_title(f.name) f.plot_activation('green') ''' self.subplot_arm_schematic=plt.subplot(panel_layout+len(self.arm.fields_sm)+1) self.subplot_arm_schematic.set_xlim(-self.arm.field_ee.radius_max,self.arm.field_ee.radius_max) self.subplot_arm_schematic.set_ylim(-self.arm.field_ee.radius_max,self.arm.field_ee.radius_max) self.subplot_arm_target_line=plt.Line2D([0.0,0.0],[0.0,0.0]) self.subplot_arm_schematic.add_line(self.subplot_arm_target_line) ''' ''' self.subplot_poses={} for i,hidden in enumerate(self.arm.fields_hidden): field_sum,field_summand0,field_summand1=hidden.fields_sm radius_max=field_sum.radius_max plot_id=len(self.arm.fields_sm)+2+i self.subplot_poses[hidden]=plt.subplot(layout_rows,layout_cols,plot_id) self.subplot_poses[hidden].set_xlim(-radius_max,radius_max) self.subplot_poses[hidden].set_ylim(-radius_max,radius_max) self.subplot_poses[hidden].set_title(hidden.name) ''' plt.show() def onClick(self, event): pass def onKeyPress(self, event): if event.key in self.callbacks.keys(): self.callbacks[event]() self.refreshPlot() def addCallback(self, key, callback): self.callbacks[key]=callback def refreshPlot(self): for field, subplot in self.subplot_activation.items(): subplot.cla() subplot.set_title(field.name) field.plot_activation('green',subplot) plt.draw()
Terma provides customized electronic systems for space missions as well as more standardized solutions such as Terma's well-proven modular power supply system. Terma is also contracted to deliver the Power Conditioning and Distribution Unit for the first four satellites of the European Galileo satellite navigation system. Terma's Power Management and Distribution (PMAD) system is a flight-proven modular concept qualified for deep space and Earth orbit missions. The unique system maximizes power utilization in satellite missions. The system scales to a power capacity of up to 3 kW and offers Autonomous Maximum Power Point Tracking (MPPT), high power efficiency, and half the weight of traditional PMAD electronics. The MPPT function is designed with the latest state-of-the-art power technology to enhance reliability and power-to-weight performance, while utilizing high frequency switched-mode power conversion technology to minimize unit mass. The Power Management and Distribution system architecture is modular, offering a unique opportunity for tailoring the design to mission-specific requirements, while at the same time still benefiting from a standardized and qualified solution. The enhanced flexibility and adaptability facilitates convenient assembly, integration, and testing of the satellite. The MPPT function continuously optimizes the power available to the various payloads and subsystems on board the satellite. This function also improves the power management performance for missions with widely varying payload power consumption. The improved margins on the satellite design may be traded with smaller solar panels, lower satellite mass, reduced launch constraints, etc. The MPPT simplifies the design of the solar panels through reduced coupling between the power management system and the particular design options for the solar panels and the batteries.
from gourmet.plugin import ToolPlugin from . import fieldEditor from gi.repository import Gtk from gettext import gettext as _ class FieldEditorPlugin (ToolPlugin): menu_items = '''<placeholder name="DataTool"> <menuitem action="FieldEditor"/> </placeholder> ''' def setup_action_groups (self): self.action_group = Gtk.ActionGroup(name='FieldEditorPluginActionGroup') self.action_group.add_actions([ ('FieldEditor',None,_('Field Editor'), None,_('Edit fields across multiple recipes at a time.'),self.show_field_editor ), ]) self.action_groups.append(self.action_group) def show_field_editor (self, *args): from gourmet.GourmetRecipeManager import get_application self.app = get_application() self.field_editor = fieldEditor.FieldEditor(self.app.rd, self.app) self.field_editor.valueDialog.connect('response',self.response_cb) self.field_editor.show() def response_cb (self, d, r): if r==Gtk.ResponseType.APPLY: self.app.update_attribute_models() plugins = [FieldEditorPlugin]
Sticking towards the theory of "Super Good quality, Satisfactory service" ,We are striving to become a superb business enterprise partner of you for Street Pole Lighting , street lighting , LED Street Lighting , We've been prepared to cooperate with company friends from at your home and overseas and produce a wonderful future with each other. "That has a positive and progressive attitude to customer's fascination, our enterprise constantly improves our merchandise high quality to meet the demands of customers and further focuses on safety, reliability, environmental requirements, and innovation of Street Pole Lighting , street lighting , LED Street Lighting , We are introduced as a one of the growing manufacture supplier and export of our products. We have a team of dedicated trained professional who take care the quality and timely supply. If you are looking for Good Quality at a good price and timely delivery. Do contact us.
from functools import partial from toolz.compatibility import range, map def hashable(x): try: hash(x) return True except TypeError: return False def transitive_get(key, d): """ Transitive dict.get >>> d = {1: 2, 2: 3, 3: 4} >>> d.get(1) 2 >>> transitive_get(1, d) 4 """ while hashable(key) and key in d: key = d[key] return key def raises(err, lamda): try: lamda() return False except err: return True # Taken from theano/theano/gof/sched.py # Avoids licensing issues because this was written by Matthew Rocklin def _toposort(edges): """ Topological sort algorithm by Kahn [1] - O(nodes + vertices) inputs: edges - a dict of the form {a: {b, c}} where b and c depend on a outputs: L - an ordered list of nodes that satisfy the dependencies of edges >>> _toposort({1: (2, 3), 2: (3, )}) [1, 2, 3] Closely follows the wikipedia page [2] [1] Kahn, Arthur B. (1962), "Topological sorting of large networks", Communications of the ACM [2] http://en.wikipedia.org/wiki/Toposort#Algorithms """ incoming_edges = reverse_dict(edges) incoming_edges = dict((k, set(val)) for k, val in incoming_edges.items()) S = set((v for v in edges if v not in incoming_edges)) L = [] while S: n = S.pop() L.append(n) for m in edges.get(n, ()): assert n in incoming_edges[m] incoming_edges[m].remove(n) if not incoming_edges[m]: S.add(m) if any(incoming_edges.get(v, None) for v in edges): raise ValueError("Input has cycles") return L def reverse_dict(d): """Reverses direction of dependence dict >>> d = {'a': (1, 2), 'b': (2, 3), 'c':()} >>> reverse_dict(d) # doctest: +SKIP {1: ('a',), 2: ('a', 'b'), 3: ('b',)} :note: dict order are not deterministic. As we iterate on the input dict, it make the output of this function depend on the dict order. So this function output order should be considered as undeterministic. """ result = {} for key in d: for val in d[key]: result[val] = result.get(val, tuple()) + (key, ) return result def xfail(func): try: func() raise Exception("XFailed test passed") # pragma:nocover except: pass def freeze(d): """ Freeze container to hashable form >>> freeze(1) 1 >>> freeze([1, 2]) (1, 2) >>> freeze({1: 2}) # doctest: +SKIP frozenset([(1, 2)]) """ if isinstance(d, dict): return frozenset(map(freeze, d.items())) if isinstance(d, set): return frozenset(map(freeze, d)) if isinstance(d, (tuple, list)): return tuple(map(freeze, d)) return d
Ulysse Nardin - Paul Nardin small 5-minute repeater with original box and certificate circa 1900. Fancy white enamel dial with black and gold markings and fancy gold hands. Plain polish case with repeater slide in the band. Nickel 27 jewel movement jeweled to the center and the hammers and with wolf tooth winding. Signed Ulysse Nardin along the gongs. When activated, this watch chimes the time using different chime tones to designate the hours and five minute intervals with lovely chimes. Particularly fine condition. Ulysse Nardin was born in 1823 in Le Locle, Switzerland. He was an acomplished watchmaker having trained under his father, Leonard-Frederic Nardin and later under two master watchmakers, Frederic William Dubois and Louis JeanRichard-dit-Bressel. The company Ulysse Nardin was founded in 1846 and remained under Ulysse's control until his passing in 1876, when his 21-year old son, Paul-David Nardin took over. 1876 was also the year in which Paul- David Nardin received the only Gold Medal ever awarded at the Concours international de réglage de Genève as well as the Prix d'Honneur. The firm went on to receive numerous awards all over the world. The company name went through a number of changes and reverted back to Ulysse Nardin in 1922.
#!/usr/bin/env python2 """ Recursive Bayes for POMDP belief-state tracking. Max-a-posteriori estimation. """ # Dependencies from __future__ import division import numpy as np; npl = np.linalg import matplotlib.pyplot as plt # State, action, measurement, and time cardinalities nS = 3; nA = 2; nM = 2; nT = 100 # Transition conditional probability matrix, A by S by S' P = np.array([[[ 1, 0, 0], [ 1, 0, 0], [ 0, 0.3, 0.7]], [[0.4, 0, 0.6], [0.1, 0.6, 0.3], [ 0, 0.1, 0.9]]], dtype=np.float64) # Sensor conditional probability matrix, A by S by O Qr = 0.5 Q = np.array([[[ 1, 0], [ 1, 0], [1-Qr, Qr]], [[ 1, 0], [ 1, 0], [ 0, 1]]], dtype=np.float64) # Cost function, c(a,x) c = np.array([[-1, -1, -3], [ 0, 0, -2]], dtype=np.float64) # State, estimate, measurement, belief, and cost histories x = np.zeros(nT, dtype=np.int64) xh = np.zeros(nT, dtype=np.int64) y = np.zeros(nT, dtype=np.int64) b = np.zeros((nT, nS), dtype=np.float64) cost = np.zeros(nT, dtype=np.float64) # Initial conditions x[0] = 0 b[0] = [1, 0, 0] # Function for randomly sampling with a given discrete probability density sample_from = lambda p: np.argwhere(np.random.sample() < np.cumsum(p))[0][0] # Simulation time = np.arange(nT) for t in time[1:]: # Estimate state as the posterior maximizer xh[t-1] = np.argmax(b[t-1]) # Randomly choose action, accept cost u = sample_from([0.5, 0.5]) cost[t] = cost[t-1] + c[u, x[t-1]] # Advance state, obtain measurement x[t] = sample_from(P[u, x[t-1]]) y[t] = sample_from(Q[u, x[t]]) # Update belief b[t] = (b[t-1].dot(P[u]))*Q[u, :, y[t]] b[t] = b[t] / np.sum(b[t]) # Plot estimation error print("Accuracy: {}%".format(100*len(np.argwhere(np.logical_not(np.abs(x - xh))))/nT)) plt.title("Estimation Error", fontsize=22) plt.ylabel("x - xh", fontsize=22) plt.xlabel("Time (iteration)", fontsize=22) plt.scatter(time, x-xh) plt.xlim([0, nT]) plt.grid(True) plt.show()
June marks the sad second anniversary of Downey Civic Light Opera being forced out of existence by VenueTech, which began (mis)managing the civic theater on Jan. 26, 2011. The Downey Theater was literally built for DCLO in 1970 as a result of their efforts. Most of us will not soon forget the high quality musicals that were produced there by Marsha Moode and her team. A resident of Downey, Marsha always had a friendly and personal rapport with the audience. Something that is very much lacking there today. I have read the business report that VenueTech submitted to the city hall as contract renewal time approaches. At first glance the report paints a pretty picture of success. This is deceptive on several levels. Many of the rental clients listed were already renters far in advance of VT's arrival. They were given no choice but to play ball with VT since they are the only game in town. The report omits any figures related to DCLO and the lucrative amount of rent they paid for use of the theater. DCLO was still an operating client for the first two-plus years of VT's reign. Surely VT must know how much was collected. Regarding the vast improvements to the theater, I haven't seen much except for the stringing of the lights over the outdoor patio. The computerized ticketing system pretty much benefits VT only. Finally, and this is key, I see revenue figures but absolutely no expense figures. This renders the report completely meaningless. One expense that will not be ignored is the looming million dollar lawsuit filed by the lead singer of Los Lonely Boys for injuries he sustained at the theater as a result of VT's incompetence and laziness. The city of Downey now pays an unprecedented $454,000 per year to subsidize the theater. The job could be done more efficiently for much less money if a responsible and ethical management company were in charge. There is no reason theater goers could not be offered one-night acts, symphony concerts, travel films, the beloved DCLO musicals, and much more.
# # linter.py # Linter for SublimeLinter3, a code checking framework for Sublime Text 3 # # Written by Aparajita Fishman # Copyright (c) 2015 The SublimeLinter Community # # License: MIT # """This module exports the PHPLint plugin class.""" from SublimeLinter.lint import Linter class PHPLint(Linter): """Provides an interface to the phplint executable.""" syntax = ('php', 'html') cmd = 'phpl --php-version 5 --print-path relative --print-column-number --tab-size 4 --no-overall' version_args = '--version' version_re = r'PHPLint (?P<version>\d+\.\d+)' version_requirement = '>= 2.0' regex = ( r'(?i)^(?:' r'\t.*?\r?\n)?' r'==== (?P<line>\d+):(?P<col>.*): ' r'(?:(?P<error>error)|(?P<warning>warning|notice)): ' r'(?P<message>[^`\r\n]*(?:`(?P<near>[^\']+)\')?[^\r\n]*)' ) multiline = True tempfile_suffix = 'php' def split_match(self, match): """Return the match with ` quotes transformed to '.""" match, line, col, error, warning, message, near = super().split_match(match) if message == 'no PHP code found at all': match = None else: message = message.replace('`', '\'') # If the message contains a complaint about a function # and near looks like a function reference, remove the trailing # () so it can be found. if 'function \'' in message and near and near.endswith('()'): near = near[:-2] return match, line, col, error, warning, message, near
I was around seven or eight years old. I was with my dad on the University of Illinois campus. We stopped at the university store and I found a small basketball. I took it out of the bin and excitedly showed it to my dad. "Alright" he said. We left the store to walk back to my dad's office. "Can I hold the ball?" I asked. I was so excited. "Yes" said my dad, "but don't bounce it." Easy enough. About a block away from the store I let my excitement get the best of me. I bounced the ball. But, instead of falling on the pavement and returning to my hand, I made a mistake. The ball hit my shoe. It rolled into the street and under the wheel of a public bus. Fortunately, the bus was at a stop light. My dad saw the ball roll under and heard me ask for help. He jogged over to the bus, bent down to pry the ball from the wheel but it was too late. The light turned green, the bus started rolling, then BOOM. When the traffic had passed I ran to my dad who then picked up the rubber pancake that was my basketball. Tears ensued. I love my father. He could have relished this moment and made me suffer for my disobedience. He clearly told me not to bounce the ball, yet I did it anyway. Instead, he dried my tears, walked me back to the sports store, and helped me pick out a new ball. This time, I let him carry it. I've never forgotten this incident. It's had a profound impact on my life. Sometimes s**t happens as Forrest Gump says. Things that are beyond our control. My initial instinct was to run after my ball but it rolled into the street (a place I was also forbidden to tread). My dad tried to rescue the ball in my place but it was too late and I had to watch helplessly as my toy literally exploded before my eyes. Sometimes we don't have a choice in what happens, but as long as we're alive, we have a choice in how we respond. Victor Frankl says, "when we are no longer able to change a situation, we are challenged to change ourselves." This is profound. "Everyone you meet is fighting a battle" says Ian Maclaren, yet people hide it so well. Nobody wants to see our ugly scars, we tell ourselves, so we conceal them with smiles. I'm amazed by both the beauty and suffering of life, yet neither would exist without the other. Suffering is an uncontrollable given. What we can control is how we cope with suffering. Sometimes it takes time for us to find meaning in our suffering, but it is possible, and knowing that it's possible gives hope. Somewhere in my parent's garage in a bucket sits that basketball, or maybe it's been given away by now at a garage sale or to a neighbor (we don't have a basketball hoop anymore). But I still remember that day. I remember the helpless feeling as I watched the ball crush under the weight of the bus, and I remember my father consoling my guilt and sadness. Know there is power in choice. "What we think, we become," says Buddha. Choose to see the positive in the world and you will see it. Strive to find meaning in suffering. And remember to be kind, for everyone you meet is fighting a battle.
# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ NetApp Data ONTAP cDOT single-SVM storage driver. This driver requires a Data ONTAP (Cluster-mode) storage system with installed CIFS and/or NFS licenses, as well as a FlexClone license. This driver does not manage share servers, meaning it uses a single Data ONTAP storage virtual machine (i.e. 'vserver') as defined in manila.conf to provision shares. This driver supports NFS & CIFS protocols. """ from manila.share import driver from manila.share.drivers.netapp.dataontap.cluster_mode import lib_single_svm class NetAppCmodeSingleSvmShareDriver(driver.ShareDriver): """NetApp Cluster-mode single-SVM share driver.""" DRIVER_NAME = 'NetApp_Cluster_SingleSVM' def __init__(self, *args, **kwargs): super(NetAppCmodeSingleSvmShareDriver, self).__init__( False, *args, **kwargs) self.library = lib_single_svm.NetAppCmodeSingleSVMFileStorageLibrary( self.DRIVER_NAME, **kwargs) def do_setup(self, context): self.library.do_setup(context) def check_for_setup_error(self): self.library.check_for_setup_error() def get_pool(self, share): return self.library.get_pool(share) def create_share(self, context, share, **kwargs): return self.library.create_share(context, share, **kwargs) def create_share_from_snapshot(self, context, share, snapshot, **kwargs): return self.library.create_share_from_snapshot(context, share, snapshot, **kwargs) def create_snapshot(self, context, snapshot, **kwargs): return self.library.create_snapshot(context, snapshot, **kwargs) def revert_to_snapshot(self, context, snapshot, share_access_rules, snapshot_access_rules, **kwargs): return self.library.revert_to_snapshot(context, snapshot, **kwargs) def delete_share(self, context, share, **kwargs): self.library.delete_share(context, share, **kwargs) def delete_snapshot(self, context, snapshot, **kwargs): self.library.delete_snapshot(context, snapshot, **kwargs) def extend_share(self, share, new_size, **kwargs): self.library.extend_share(share, new_size, **kwargs) def shrink_share(self, share, new_size, **kwargs): self.library.shrink_share(share, new_size, **kwargs) def ensure_share(self, context, share, **kwargs): pass def manage_existing(self, share, driver_options): return self.library.manage_existing(share, driver_options) def unmanage(self, share): self.library.unmanage(share) def manage_existing_snapshot(self, snapshot, driver_options): return self.library.manage_existing_snapshot(snapshot, driver_options) def unmanage_snapshot(self, snapshot): self.library.unmanage_snapshot(snapshot) def update_access(self, context, share, access_rules, add_rules, delete_rules, **kwargs): self.library.update_access(context, share, access_rules, add_rules, delete_rules, **kwargs) def _update_share_stats(self, data=None): data = self.library.get_share_stats( filter_function=self.get_filter_function(), goodness_function=self.get_goodness_function()) super(NetAppCmodeSingleSvmShareDriver, self)._update_share_stats( data=data) def get_default_filter_function(self): return self.library.get_default_filter_function() def get_default_goodness_function(self): return self.library.get_default_goodness_function() def get_share_server_pools(self, share_server): return self.library.get_share_server_pools(share_server) def get_network_allocations_number(self): return self.library.get_network_allocations_number() def get_admin_network_allocations_number(self): return self.library.get_admin_network_allocations_number() def _setup_server(self, network_info, metadata=None): return self.library.setup_server(network_info, metadata) def _teardown_server(self, server_details, **kwargs): self.library.teardown_server(server_details, **kwargs) def create_replica(self, context, replica_list, replica, access_rules, replica_snapshots, **kwargs): return self.library.create_replica(context, replica_list, replica, access_rules, replica_snapshots, **kwargs) def delete_replica(self, context, replica_list, replica_snapshots, replica, **kwargs): self.library.delete_replica(context, replica_list, replica, replica_snapshots, **kwargs) def promote_replica(self, context, replica_list, replica, access_rules, share_server=None): return self.library.promote_replica(context, replica_list, replica, access_rules, share_server=share_server) def update_replica_state(self, context, replica_list, replica, access_rules, replica_snapshots, share_server=None): return self.library.update_replica_state(context, replica_list, replica, access_rules, replica_snapshots, share_server=share_server) def create_replicated_snapshot(self, context, replica_list, replica_snapshots, share_server=None): return self.library.create_replicated_snapshot( context, replica_list, replica_snapshots, share_server=share_server) def delete_replicated_snapshot(self, context, replica_list, replica_snapshots, share_server=None): return self.library.delete_replicated_snapshot( context, replica_list, replica_snapshots, share_server=share_server) def update_replicated_snapshot(self, context, replica_list, share_replica, replica_snapshots, replica_snapshot, share_server=None): return self.library.update_replicated_snapshot( replica_list, share_replica, replica_snapshots, replica_snapshot, share_server=share_server) def revert_to_replicated_snapshot(self, context, active_replica, replica_list, active_replica_snapshot, replica_snapshots, share_access_rules, snapshot_access_rules, **kwargs): return self.library.revert_to_replicated_snapshot( context, active_replica, replica_list, active_replica_snapshot, replica_snapshots, **kwargs) def migration_check_compatibility(self, context, source_share, destination_share, share_server=None, destination_share_server=None): return self.library.migration_check_compatibility( context, source_share, destination_share, share_server=share_server, destination_share_server=destination_share_server) def migration_start(self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): return self.library.migration_start( context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=share_server, destination_share_server=destination_share_server) def migration_continue(self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): return self.library.migration_continue( context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=share_server, destination_share_server=destination_share_server) def migration_get_progress(self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): return self.library.migration_get_progress( context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=share_server, destination_share_server=destination_share_server) def migration_cancel(self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): return self.library.migration_cancel( context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=share_server, destination_share_server=destination_share_server) def migration_complete(self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): return self.library.migration_complete( context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=share_server, destination_share_server=destination_share_server) def create_share_group_snapshot(self, context, snap_dict, share_server=None): fallback_create = super(NetAppCmodeSingleSvmShareDriver, self).create_share_group_snapshot return self.library.create_group_snapshot(context, snap_dict, fallback_create, share_server) def delete_share_group_snapshot(self, context, snap_dict, share_server=None): fallback_delete = super(NetAppCmodeSingleSvmShareDriver, self).delete_share_group_snapshot return self.library.delete_group_snapshot(context, snap_dict, fallback_delete, share_server) def create_share_group_from_share_group_snapshot( self, context, share_group_dict, snapshot_dict, share_server=None): fallback_create = super( NetAppCmodeSingleSvmShareDriver, self).create_share_group_from_share_group_snapshot return self.library.create_group_from_snapshot(context, share_group_dict, snapshot_dict, fallback_create, share_server) def get_configured_ip_versions(self): return self.library.get_configured_ip_versions()
One perk to being unemployed for the summer is I get to have days off with the Frenchman. Which means days off for the whole family which means family days out! Last week we headed down to Limerick to visit King John’s Castle.
from progressivis.core.utils import filepath_to_buffer from . import ProgressiveTest, skip, skipIf import requests, tempfile, os HTTP_URL = ('http://s3.amazonaws.com/h2o-release/h2o/master' '/1193/docs-website/resources/publicdata.html') S3_URL = ('s3://h2o-release/h2o/master/1193/docs-website' '/resources/publicdata.html') @skipIf(os.getenv('TRAVIS'),'skipped on Travis=>avoids: "certificate verify failed: IP address mismatch, certificate is not valid"') class TestFileBuffer(ProgressiveTest): def setUp(self): req = requests.get(HTTP_URL) _, self.tmp_file = tempfile.mkstemp(prefix='p10s_', suffix='.html') with open(self.tmp_file, 'wb') as f: f.write(req.content) def tearDown(self): os.remove(self.tmp_file) def test_file_buffer(self): reader_http, _, _, size_http = filepath_to_buffer( HTTP_URL ) self.assertGreater(size_http, 0) reader_s3, _, _, size_s3 = filepath_to_buffer( S3_URL ) self.assertEqual(size_http, size_s3) reader_file, _, _, size_file = filepath_to_buffer(self.tmp_file) self.assertEqual(size_file, size_s3) n1 = 12 n2 = 34 n3 = 56 _ = reader_http.read(n1) _ = reader_http.read(n2) _ = reader_http.read(n3) self.assertEqual(reader_http.tell(), n1 + n2 + n3) _ = reader_s3.read(n1) _ = reader_s3.read(n2) _ = reader_s3.read(n3) self.assertEqual(reader_s3.tell(), n1 + n2 + n3) _ = reader_file.read(n1) _ = reader_file.read(n2) _ = reader_file.read(n3) self.assertEqual(reader_file.tell(), n1 + n2 + n3) try: reader_s3.close() except: pass try: reader_file.close() except: pass if __name__ == '__main__': ProgressiveTest.main()
Marin Katusa is the author of the New York Times bestseller, The Colder War. Over the last decade, he has become one of the most successful portfolio managers in the resource sector, such as his 2009 Fund Partnership (KC50 Fund, LLC) which has outperformed the comparable index, the TSX.V by over 500% post fees. Katusa has been involved in raising over $1 billion in financing for resource companies. He has visited over 400 resource projects in over 100 countries. Katusa publishes his thoughts and research at www.katusaresearch.com. Energy investors are no strangers to boom-and-bust cycles. In fact, a number of the experts interviewed by The Energy Report in 2015 took a certain amount of glee in the opportunities they knew would open up in low-price markets for oil and gas, uranium and lithium. Let's take a trip around the world as we recall the words of wisdom these experts shared, and see if they can spark a better understanding of what we can expect during the next turn around the sun. Investors have spoken, and they said they don't want a merger of Fission Uranium and Denison Mines. In this interview with The Energy Report, Marin Katusa, founder of Katusa Research, shares his insight on why Fission investors rejected the deal and where he is finding value in the uranium and oil sector today. What if you could tell before a press release comes out that a company is poised to be bought out, possibly at a nice premium? Sounds good, right? By watching historic patterns, that might just be possible. The Gold Report reached out to experts who have been around through enough cycles to know and asked for the clues they watch that an acquisition might be imminent. From location and early investment to derisking levels, you just have to know what to look for to position yourself for a liquidity event. "I think FCU will keep drilling fantastic numbers." "The team at NXE has done an incredible job." "NMI has been putting dollars into drilling and hitting very high-grade gold." "Investors looking at the top-tier companies also have to consider the royalty streaming companies like SLW." "NGN is one of my favorite stocks." "FCU has one of the highest-grade projects in the world." "FIS has the right management team that made not one, but two major discoveries." "Major producers like RDS can find financing."
import tensorflow as tf def shape_probe(tensor): return tf.Print(tensor, [tf.shape(tensor)], message='Shape=', summarize=10) def min_max_probe(tensor): return tf.Print(tensor, [tf.reduce_min(tensor), tf.reduce_max(tensor)], message='Min, max=', summarize=10) def conv_map_montage(conv_maps): """ Montage of convolutional feature maps. Args: conv_maps: 4D tensor [B x H x W x C] maxWidth: maximum output width maxHeight: maximum output height Return: montage: [B x H' x W'] """ raise NotImplementedError # shape = tf.shape(conv_maps) # B, H, W, C = shape[0], shape[1], shape[2], shape[3] # maps = tf.transpose(conv_maps, [0,3,1,2]) # tf.gather(maps, ) def activation_summary(x): tensor_name = x.op.name tf.histogram_summary('activations/' + tensor_name, x) # tf.scalar_summary(tensor_name + '/max', tf.reduce_max(x)) # tf.scalar_summary(tensor_name + '/min', tf.reduce_min(x)) # tf.scalar_summary(tensor_name + '/mean', tf.reduce_mean(x)) tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x)) def histogram_summary_for_all_variables(): for var in tf.trainable_variables(): tf.histogram_summary(var.op.name, var) def add_loss_summaries(total_loss): """Add summaries for losses in CIFAR-10 model. Generates moving average for all losses and associated summaries for visualizing the performance of the network. Args: total_loss: Total loss from loss(). Returns: loss_averages_op: op for generating moving averages of losses. """ # Compute the moving average of all individual losses and the total loss. loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') losses = tf.get_collection('losses') loss_averages_op = loss_averages.apply(losses + [total_loss]) # Attach a scalar summmary to all individual losses and the total loss; do the # same for the averaged version of the losses. for l in losses + [total_loss]: # Name each loss as '(raw)' and name the moving average version of the loss # as the original loss name. tf.scalar_summary(l.op.name +' (raw)', l) tf.scalar_summary(l.op.name, loss_averages.average(l)) return loss_averages_op
The Kalette looks like a miniature Brussels sprout with wings. It is slightly smaller in size than the standard sprout, yet more leafy, like kale. Bay Area artists created a pop-up art installation honoring the Ethiopian and Eritrean new year.
# -*- coding: utf-8 -*- """ Created on Mon Oct 30 17:21:27 2017 @author: carlos.arana Descripcion: Procesa los datos del parámetro para regresar la variable de integridad. Si la Variable de Integridad es del tipo: Tipo 1 (Binaria), la funcion busca celdas vacías en una sola columna y regresa 0 si la celda esta vacía o 1 si la celda tiene datos. Tipo 2, la funcion busca celdas vacías en varias columnas y calcula un porcentaje de integridad en el rango entre 0 y 1, en donde cero es igual a ningun dato (0%) y uno es igual a todos los datos (100%) Tipo 3, la funcion considera que el dataset está completo y asigna integridad 1 a todos los registros. Utilizado para parámetros Input: par_dataset: [pandas dataframe] indexado por CVE_MUN, una sola columna con los datos para construir el parámetro dataset: [Pandas dataframe] dataset estandar, indexado por CVE_MUN, contiene toda la serie de datos disponibles, unicamente columnas con datos del parametro, evitar columnas de descripciones. """ import pandas as pd def VarInt(par_dataset, dataset, tipo = 1): # 'tipo de integridad: [1] para Binaria, [2] para Serie if tipo == 1: par_dataset['EXISTE'] = ~par_dataset.isnull() # el simbolo ~ es para invertir los valores de true / false par_dataset['VAR_INTEGRIDAD'] = par_dataset['EXISTE'].astype(int) if tipo == 2: par_dataset['NUM_REGISTROS'] = len(list(dataset)) # ¿Cuantos registros debería tener cada caso? par_dataset['REGISTROS_EXISTEN'] = dataset.notnull().sum(axis=1) # ¿Cuantas registros tienen informacion? par_dataset['VAR_INTEGRIDAD'] = par_dataset['REGISTROS_EXISTEN'] / par_dataset['NUM_REGISTROS'] if tipo == 3: # Adicionalmente, para este caso se reasigna la integridad en el compilador # durante la Revision de integridad, luego de ejecutar SUN_Integridad par_dataset['EXISTE'] = True par_dataset['VAR_INTEGRIDAD'] = par_dataset['EXISTE'].astype(int) variables_par_dataset = list(par_dataset) par_dataset['CVE_MUN'] = dataset.index return par_dataset, variables_par_dataset
The taste of fresh sliced melon washes the palate and refreshes the spirit. A special blend of organic white tea with all the antioxidant power that white tea leaves have to offer and fresh cantaloupe essence and jasmine. Perfect as an iced tea or after a meal. As far as I know, there are no figs in it, but, there is something about the aroma of this tea that mentally transports me to the fig orchard that I spent many an hour in as a young child, picking sun-ripened figs and eating them right there in the orchard. But, I do want to clarify – this tea DOES NOT taste like figs. It doesn’t even really smell like figs. There is just something in the aroma that brought that memory to the surface for me. The white tea base is – not surprisingly – rather delicate but I can definitely taste it – it tastes refreshing and light. There is a pleasant balance between tea and melon taste. The melon tastes juicy and sweet – a very pleasant fruity taste that complements some of the softer features of the white tea. As tasty as the melon flavor is here, there is a part of me that wishes it were not quite so strong so that I could taste more of the jasmine, which just sort of lingers – whispering ever so softly – in the background. It is a nice touch, but, this jasmine lover wants just a little more of jasmine’s sweet song to dance upon my palate. This is a delicious blend that is very refreshing. It is very good hot, but, I strongly suggest trying it iced. It’s truly amazing as an iced tea and has become one of my FAVORITE teas to serve iced. And I’ve tried a lot of teas!
## @package csnVisualStudio2010 # Definition of the csnVisualStudio2010 compilers. # \ingroup compiler import csnCompiler import os class Compiler(csnCompiler.Compiler): """ Abstract Visual Studio 2010 compiler. """ def __init__(self): csnCompiler.Compiler.__init__(self) self.postProcessor = PostProcessor() def GetCompileFlags(self): return [""] def IsForPlatform(self, _WIN32, _NOT_WIN32): return _WIN32 or (not _WIN32 and not _NOT_WIN32) def GetOutputSubFolder(self, _configuration = "${CMAKE_CFG_INTDIR}"): """ Returns the folder where the compiler should place binaries for _configuration. The default value for _configuration returns the output folder for the current configuration. for storing binaries. """ if _configuration == "DebugAndRelease": return "bin" else: return "bin/%s" % (_configuration) def GetBuildSubFolder(self, _projectType, _projectName): return "%s/%s" % (_projectType, _projectName) def GetThirdPartySubFolder(self): return "" def GetThirdPartyCMakeParameters(self): return [] def GetProjectCMakeParameters(self): return [] def GetAllowedConfigurations(self): return ["DebugAndRelease"] def GetPostProcessor(self): return self.postProcessor def TargetIsMac(self): return False def TargetIsLinux(self): return False class Compiler32(Compiler): """ Visual Studio 2010 32bits compiler. """ def GetName(self): return "Visual Studio 10" def TargetIs32Bits(self): return True def TargetIs64Bits(self): return False class Compiler64(Compiler): """ Visual Studio 2010 64bits compiler. """ def GetName(self): return "Visual Studio 10 Win64" def TargetIs32Bits(self): return False def TargetIs64Bits(self): return True class PostProcessor: def Do(self, _project): """ Post processes the vcproj file generated for _project. """ # vc proj to patch if not _project.dependenciesManager.isTopLevel: slnFilename = "%s/%s.sln" % (_project.GetBuildFolder(), _project.name) if os.path.exists(slnFilename): os.remove(slnFilename)
According to the latest breaking news, the president of the USA, Mr. Donald Trump has been facing large-scale criticisms and threats from the governments of six states along with Texas, for his administration's struggling campaign over bringing the in-effect DACA to an end completely. Deferred Action for Childhood Arrivals or DACA in short, is actually one of the policies that give the children who were brought into the country as illegal immigrants, the liberty of renewing the two-year period to not get deported. This was initiated by the earlier president, Barack Obama in the month of June 2012. You could watch video online to know further about it. An action to end this policy is gathering attention and thereby anything concerning with it is hitting the headlines of world news. Why is it being opposed? According to Mr. Trump and the similar mindsets, the influx of these immigrants poses a challenge and threat to the native Americans. They further say that the jobs that should have been available to the citizens of USA, is being carried away and the wages are seeing a declined rate due to this allowance. Although, the appeal has been made to the general public from many of the administrations that with this policy, Americans are victimized in most of the aspects, yet there are few forces that oppose any actions that lead to termination of the DACA. The current affairs of the US say that any attempt to bring an end to DACA, is nothing but misuse of the presidential powers. People are also worried about the future of the children who have been brought in illegally. The courts have been strongly condemning the forceful act and are seeking for a solid reason to stop this policy and are also asking the government to then arrange an alternate for the ones who are residing in the nation for now. Mr. Trump is urging the National Congress to unite with them to end the DACA and there are protestors who are out to demand the termination. Next Which team will win the 2018 FIFA World Cup?
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from sqlalchemy import Boolean, Column, ForeignKey, Integer, String from sqlalchemy.orm import relationship from pulseguardian.model.base import Base from pulseguardian.model.binding import Binding class Queue(Base): __tablename__ = 'queues' name = Column(String(255), primary_key=True) owner_id = Column(Integer, ForeignKey('pulse_users.id'), nullable=True) size = Column(Integer) # whether the queue can grow beyond the deletion size without being deleted unbounded = Column(Boolean, default=False) warned = Column(Boolean) durable = Column(Boolean, nullable=False, default=False) bindings = relationship(Binding, cascade='save-update, merge, delete') def __repr__(self): return "<Queue(name='{0}', owner='{1}')>".format(self.name, self.owner) __str__ = __repr__
This book applies to you, the hard worker. chaotic projects, poor communication, and other barriers to getting things done. Because work doesn’t have to be a four-letter word. You may work for a company that claims to demonstrate an “open workspace” until you notice that the cube walls are taller than you and the floor looks like a labyrinth. Or the workspace is physically open but the people interact as if they’re surrounded by walls. Every industry fights the battle against jargon and meaningless words. The people within an industry have a weird zombie or Stepford-wife-like acceptance of these words. We believe that deep down, we all rationally know these words have no intrinsic meaning, yet we all irrationally accept these words and phrases used in the context of our jobs. We accept their use, we use them ourselves, and we subconsciously add new ones to our mouths all the time. These absurd words and phrases even leak into our personal lives. Creative directors, account planners, strategists, and everyone else—no one is bad because of their title. These positions grew out of a need to get the work done as the type of work we do changes. If your job at a creative agency or business consultancy involves producing work, your schedule and how you spend your days is out of your control most days. Your time is often booked with endless meetings, constant reviews, and if you find yourself working somewhere that has adopted a twisted form of “agile,” then you are screwed, really screwed. You would never try to hammer a nail with a pencil. Yet a common source of frustration at many companies is that they are using the wrong tool for the job. Or using the same tool to do two very different things as a cost-cutting measure. You need a hammer to hammer and a pencil to write, but the two are not interchangeable. Rena Tom is the founder of Makeshift Society, a coworking space and clubhouse in San Francisco, with a new location in Brooklyn. She believes in camaraderie and creativity as essentials to make a great workplace and to do great work. Her support of independent professionals has been featured in publications like The New York Times, The Wall Street Journal, and Real Simple. such as best-selling author Sara Paretsky and Oscar-winning musicians The Swell Season. He conceived, designed, and self-published BROKEN, in addition to sharing how it was made at his blog. Stephanie Di Biase is a designer who runs design studio Ways & Means. Never shy of business problems, she delivers an aromatic blend of user-experience street smarts and creative bling for her clients. She loves designing things that people actually want or need. Most of which have been digital in nature. Diligently tested and enhanced for reading with Amazon Kindle, and Apple iBooks app on iPad. It should work on most eBook readers, including Nook and Kobo. You will be brought to print-on-demand service TheBookPatch.com to get a well-made copy: tactile, lean, handy, and especially in this case, potentially productive.
from __future__ import division import math from datetime import datetime from pyglow.pyglow import Point # using back-port for python2 (enum34 package) --- this is in the # python3 standard library (>=3.4) from enum import Enum class OplusType(Enum): ne = 1 charge_neutrality = 2 """ The following (alpha1 through BETA_1D) are from Table 2.2 from Makela's dissertation. They are in turn from: Link and Cogger, A reexamination of the O I 6300 \AA nightglow, Journal of Geophysical Research, vol. 93, pp. 9883--9892, 1988. """ def alpha1(Te): """[cm^3 / s]""" return 1.95e-7 * (Te / 300.)**(-0.7) def alpha2(Te): """[cm^3 / s]""" return 4.00e-7 * (Te / 300.)**(-0.9) def k1(Ti, exp=math.exp): """[cm^3 / s]""" return 3.23e-12 * exp(3.72/(Ti/300) - 1.87/(Ti/300)**2) def k2(Ti, exp=math.exp): """[cm^3 / s]""" return 2.78e-13 * exp(2.07/(Ti/300) - 0.61/(Ti/300)**2) def k3(Tn, exp=math.exp): """[cm^3 / s]""" return 2.0e-11 * exp(111.8/Tn) def k4(Tn, exp=math.exp): """[cm^3 / s]""" return 2.9e-11 * exp(67.5/Tn) def k5(Tn): """[cm^3 / s]""" return 1.6e-12 * Tn**(0.91) A_1D = 7.45e-3 """[1/s]""" A_6300 = 5.63e-3 """[1/s]""" BETA_1D = 1.1 def Oplus_simple(ne): """ """ return ne def Oplus(ne, Te, Ti, O2, N2, exp=math.exp): """ """ return ne / (1 \ + k1(Ti, exp=exp) * O2 / (alpha1(Te) * ne) \ + k2(Ti, exp=exp) * N2 / (alpha2(Te) * ne)) def emission_v6300(ne, Te, Ti, Tn, O2, N2, oplus_type=OplusType.charge_neutrality, exp=math.exp): """ """ if oplus_type == OplusType.ne: oplus = Oplus_simple(ne) elif oplus_type == OplusType.charge_neutrality: oplus = Oplus(ne, Te, Ti, O2, N2, exp=exp) else: raise NotImplemented('oplus_type = ' + str(oplus_type)) N = (A_1D / A_6300) * BETA_1D * k1(Ti, exp=exp) * O2 * oplus D = 1 + (k3(Tn, exp=exp) * N2 + k4(Tn, exp=exp) * O2 + k5(Tn) * ne) / A_1D return N / D
British sprinter Sonia Lannaman talks about the triumphs and failures of her career duing the 1970s. Scene had previously met Sonia in the programme Sonia and the Olympic Games prior to the Munich Olympics. This follow-up programme was shown in 1979 and features the athlete talking about her career up to the 1978 European Championships. It was slightly re-edited in autumn 1980 to include her thoughts on the 1980 Moscow Olympics at which she won a bronze medal. This page has been accessed 126 times. This page was last modified on 23 August 2012, at 15:29.
"""Limited-thrust circular orbit transfer.""" import functools import numpy as np import sympy from sympy import sin, cos from scipy import constants, integrate, interpolate import sym2num.model import sym2num.utils import sym2num.var from ceacoest import oc from ceacoest.modelling import symoc @symoc.collocate(order=2) class CircularOrbit: """Symbolic limited-thrust circular orbit transfer optimal control model.""" @sym2num.utils.classproperty @functools.lru_cache() def variables(cls): """Model variables definition.""" consts = ['mu', 've', 'T_max', 'R_final'] obj = sym2num.var.SymbolObject( 'self', sym2num.var.SymbolArray('consts', consts) ) vars = [obj, sym2num.var.SymbolArray('x', ['X', 'Y', 'vx', 'vy', 'm']), sym2num.var.SymbolArray('u', ['T', 'Txn', 'Tyn']), sym2num.var.SymbolArray('p', ['tf'])] return sym2num.var.make_dict(vars) @sym2num.model.collect_symbols def f(self, x, u, p, *, s): """ODE function.""" R3 = (s.X**2 + s.Y**2) ** 1.5 gx = - s.mu * s.X / R3 gy = - s.mu * s.Y / R3 Tx = s.T * s.Txn * s.T_max Ty = s.T * s.Tyn * s.T_max ax = gx + Tx / s.m ay = gy + Ty / s.m mdot = - s.T * s.T_max / s.ve f = [s.vx, s.vy, ax, ay, mdot] return sympy.Array(f) * s.tf @sym2num.model.collect_symbols def g(self, x, u, p, *, s): """Path constraints.""" return sympy.Array([s.Txn**2 + s.Tyn**2 - 1]) @sym2num.model.collect_symbols def h(self, xe, p, *, s): """Endpoint constraints.""" R_error = (s.X_final ** 2 + s.Y_final ** 2)/s.R_final - s.R_final v_dot_r = (s.X_final * s.vx_final + s.Y_final * s.vy_final) / s.R_final r_cross_v = s.X_final * s.vy_final - s.Y_final * s.vx_final V = sympy.sqrt(s.vx_final**2 + s.vy_final**2) V_error = r_cross_v * V - s.mu return sympy.Array([R_error, v_dot_r, V_error]) @sym2num.model.collect_symbols def M(self, xe, p, *, s): """Mayer (endpoint) cost.""" return sympy.Array(s.tf) @sym2num.model.collect_symbols def L(self, x, u, p, *, s): """Lagrange (running) cost.""" return sympy.Array(0) if __name__ == '__main__': symb_mdl = CircularOrbit() GeneratedCircularOrbit = sym2num.model.compile_class(symb_mdl) mu = 1 ve = 50 T_max = 0.025 R_final = 2 mdl_consts = dict(mu=mu, ve=ve, T_max=T_max, R_final=R_final) mdl = GeneratedCircularOrbit(**mdl_consts) t = np.linspace(0, 1, 500) problem = oc.Problem(mdl, t) tc = problem.tc dec_bounds = np.repeat([[-np.inf], [np.inf]], problem.ndec, axis=-1) dec_L, dec_U = dec_bounds problem.set_decision_item('tf', 0, dec_L) #problem.set_decision_item('tf', 10, dec_U) problem.set_decision_item('m', 0, dec_L) problem.set_decision_item('T', 0, dec_L) problem.set_decision_item('T', 1, dec_U) problem.set_decision_item('Txn', -1.5, dec_L) problem.set_decision_item('Txn', 1.5, dec_U) problem.set_decision_item('Tyn', -1.5, dec_L) problem.set_decision_item('Tyn', 1.5, dec_U) problem.set_decision_item('X_initial', 1, dec_L) problem.set_decision_item('X_initial', 1, dec_U) problem.set_decision_item('Y_initial', 0, dec_L) problem.set_decision_item('Y_initial', 0, dec_U) problem.set_decision_item('vx_initial', 0, dec_L) problem.set_decision_item('vx_initial', 0, dec_U) problem.set_decision_item('vy_initial', 1, dec_L) problem.set_decision_item('vy_initial', 1, dec_U) problem.set_decision_item('m_initial', 1, dec_L) problem.set_decision_item('m_initial', 1, dec_U) constr_bounds = np.zeros((2, problem.ncons)) constr_L, constr_U = constr_bounds dec_scale = np.ones(problem.ndec) problem.set_decision_item('m', 1, dec_scale) constr_scale = np.ones(problem.ncons) problem.set_constraint('h', 10, constr_scale) problem.set_defect_scale('m', 1, dec_scale) obj_scale = 1 dec0 = np.zeros(problem.ndec) problem.set_decision_item('m', 1, dec0) problem.set_decision_item('tf', 2*np.pi, dec0) problem.set_decision_item('X', np.cos(2*np.pi*tc), dec0) problem.set_decision_item('Y', np.sin(2*np.pi*tc), dec0) problem.set_decision_item('vx', -np.sin(2*np.pi*tc), dec0) problem.set_decision_item('vy', np.cos(2*np.pi*tc), dec0) problem.set_decision_item('Txn', 1, dec0) with problem.ipopt(dec_bounds, constr_bounds) as nlp: nlp.add_str_option('linear_solver', 'ma57') nlp.add_num_option('tol', 1e-6) nlp.add_int_option('max_iter', 3000) nlp.set_scaling(obj_scale, dec_scale, constr_scale) decopt, info = nlp.solve(dec0) opt = problem.variables(decopt) xopt = opt['x'] uopt = opt['u'] Topt = opt['p'] iopt = mdl.g(xopt, uopt, Topt) topt = problem.tc * Topt
Osmium is very hard and has the highest specific weight of all elements (twice as heavy as lead). It has the highest melting point in the platinum metals group. Its bulk modulus is the highest of all known metals and compounds; thereby it is even less compressible than diamond. Osmium is considered as chemically inert. The technical applications for Osmium are relatively scarce due to its rarity and its complicated manufacturing process. It is partly used in catalysts for the synthesis of fine chemicals and organometallic compounds. Osmium is processed into crystals in the jewelry industry.
from django.contrib.auth.models import AnonymousUser, User from django.template import Context, Template from django.test import SimpleTestCase, TestCase from django.urls import reverse from rest_framework import status from ..api.serializers import SnippetSerializer from ..models import Bookmark, Language, Snippet from ..templatetags.markup import safe_markdown # @skip("These tests don't test production code.") # @override_settings(ROOT_URLCONF='cab.tests.urls') class BaseCabTestCase(TestCase): def setUp(self): """ Because tags and ratings use GFKs which require content-type-ids, and as I am running 1.1.X at the moment, do all this stuff in the setUp() """ self.user_a = User.objects.create_user('a', 'a', 'a') self.user_b = User.objects.create_user('b', 'b', 'b') self.python = Language.objects.create( name='Python', slug='python', language_code='python', mime_type='text/x-python', file_extension='py') self.sql = Language.objects.create( name='SQL', slug='sql', language_code='sql', mime_type='text/x-sql', file_extension='sql') self.snippet1 = Snippet.objects.create( title='Hello world', language=self.python, author=self.user_a, description='A greeting\n==========', code='print "Hello, world"') self.snippet1.tags.add('hello', 'world') self.snippet2 = Snippet.objects.create( title='Goodbye world', language=self.python, author=self.user_b, description='A farewell\n==========', code='print "Goodbye, world"') self.snippet2.tags.add('goodbye', 'world') self.snippet3 = Snippet.objects.create( title='One of these things is not like the others', language=self.sql, author=self.user_a, description='Haxor some1z db', code='DROP TABLE accounts;') self.snippet3.tags.add('haxor') self.bookmark1 = Bookmark.objects.create(snippet=self.snippet1, user=self.user_a) self.bookmark2 = Bookmark.objects.create(snippet=self.snippet1, user=self.user_b) self.bookmark3 = Bookmark.objects.create(snippet=self.snippet3, user=self.user_a) self.snippet1.ratings.rate(self.user_a, 1) self.snippet1.ratings.rate(self.user_b, 1) self.snippet2.ratings.rate(self.user_a, -1) self.snippet2.ratings.rate(self.user_b, -1) self.snippet3.ratings.rate(self.user_a, 1) self.snippet3.ratings.rate(self.user_b, -1) self.snippet1 = Snippet.objects.get(pk=self.snippet1.pk) self.snippet2 = Snippet.objects.get(pk=self.snippet2.pk) self.snippet3 = Snippet.objects.get(pk=self.snippet3.pk) def ensure_login_required(self, url, username, password): """ A little shortcut that will hit a url, check for a login required redirect, then after logging in return the logged-in response """ self.client.logout() resp = self.client.get(url) self.assertRedirects(resp, '/accounts/login/?next=%s' % url, fetch_redirect_response=False) self.client.login(username=username, password=password) resp = self.client.get(url) self.client.logout() return resp class ManagerTestCase(BaseCabTestCase): """ Tests covering manager methods -- currently most "popular" this or that are handled by managers. """ def test_top_languages(self): top_languages = Language.objects.top_languages() self.assertEqual(top_languages[0], self.python) self.assertEqual(top_languages[1], self.sql) self.assertEqual(top_languages[0].score, 2) self.assertEqual(top_languages[1].score, 1) def test_top_authors(self): top_authors = Snippet.objects.top_authors() self.assertEqual(top_authors[0], self.user_a) self.assertEqual(top_authors[1], self.user_b) self.assertEqual(top_authors[0].score, 2) self.assertEqual(top_authors[1].score, 1) def test_top_tags(self): top_tags = Snippet.objects.top_tags() self.assertEqual(top_tags[0].name, 'world') self.assertEqual(top_tags[0].num_times, 2) self.assertEqual(top_tags[1].name, 'goodbye') self.assertEqual(top_tags[2].name, 'haxor') self.assertEqual(top_tags[3].name, 'hello') def test_top_rated(self): top_rated = Snippet.objects.top_rated() self.assertEqual(top_rated[0], self.snippet1) self.assertEqual(top_rated[1], self.snippet3) self.assertEqual(top_rated[2], self.snippet2) def test_most_bookmarked(self): most_bookmarked = Snippet.objects.most_bookmarked() self.assertEqual(most_bookmarked[0], self.snippet1) self.assertEqual(most_bookmarked[1], self.snippet3) self.assertEqual(most_bookmarked[2], self.snippet2) class ModelTestCase(BaseCabTestCase): """ Tests to make sure that custom model signal handlers, denormalized fields, work as expected """ def test_snippet_escaping(self): self.snippet1.description = '<script>alert("hacked");</script>' self.snippet1.save() self.assertEqual( self.snippet1.description_html, '&lt;script&gt;alert("hacked");&lt;/script&gt;' ) def test_ratings_hooks(self): # setUp() will actually fire off most of these hooks self.assertEqual(self.snippet1.rating_score, 2) # calling the hooks manually doesn't affect the results self.snippet1.update_rating() self.assertEqual(self.snippet1.rating_score, 2) # check the other snippets self.assertEqual(self.snippet2.rating_score, -2) self.assertEqual(self.snippet3.rating_score, 0) self.snippet1.ratings.rate(self.user_a, -1) # refresh from the db self.snippet1 = Snippet.objects.get(pk=self.snippet1.pk) self.assertEqual(self.snippet1.rating_score, 0) self.snippet3.ratings.rate(self.user_a, -1) # refresh from the db self.snippet3 = Snippet.objects.get(pk=self.snippet3.pk) self.assertEqual(self.snippet3.rating_score, -2) def test_bookmark_hooks(self): self.assertEqual(self.snippet1.bookmark_count, 2) # update_bookmark_count() doesn't screw things up self.snippet1.update_bookmark_count() self.assertEqual(self.snippet1.bookmark_count, 2) self.assertEqual(self.snippet2.bookmark_count, 0) self.assertEqual(self.snippet3.bookmark_count, 1) # create a new bookmark and check that the count got updated b = Bookmark.objects.create(user=self.user_b, snippet=self.snippet2) # refresh from the db self.snippet2 = Snippet.objects.get(pk=self.snippet2.pk) self.assertEqual(self.snippet2.bookmark_count, 1) # delete a bookmark and check that the count got updated b.delete() # refresh from the db self.snippet2 = Snippet.objects.get(pk=self.snippet2.pk) self.assertEqual(self.snippet2.bookmark_count, 0) def test_snippet_description(self): # these may be pointless, but make sure things get marked-down on save self.assertEqual(self.snippet1.description_html, '<h1>A greeting</h1>') self.snippet1.description = '**Booyakasha**' self.snippet1.save() self.assertTrue('<strong>Booyakasha</strong>' in self.snippet1.description_html) def test_tag_string(self): # yes. test a list comprehension self.assertEqual(self.snippet1.get_tagstring(), 'hello, world') self.assertEqual(self.snippet2.get_tagstring(), 'goodbye, world') self.assertEqual(self.snippet3.get_tagstring(), 'haxor') class ViewTestCase(BaseCabTestCase): def test_bookmark_views(self): # gotta have it user_bookmarks = reverse('cab_user_bookmarks') self.assertEqual(user_bookmarks, '/bookmarks/') # test for the login-required bits resp = self.ensure_login_required(user_bookmarks, 'a', 'a') self.assertCountEqual(resp.context['object_list'], [self.bookmark1, self.bookmark3]) resp = self.ensure_login_required(user_bookmarks, 'b', 'b') self.assertCountEqual(resp.context['object_list'], [self.bookmark2]) add_bookmark = reverse('cab_bookmark_add', args=[self.snippet2.pk]) self.assertEqual(add_bookmark, '/bookmarks/add/%d/' % self.snippet2.pk) # add a bookmark -- this does *not* require a POST for some reason so # this test will need to be amended when I get around to fixing this resp = self.ensure_login_required(add_bookmark, 'a', 'a') self.assertRedirects(resp, '/snippets/%d/' % self.snippet2.pk) new_bookmark = Bookmark.objects.get(user=self.user_a, snippet=self.snippet2) resp = self.ensure_login_required(user_bookmarks, 'a', 'a') self.assertCountEqual(resp.context['object_list'], [self.bookmark1, self.bookmark3, new_bookmark]) # make sure we have to log in to delete a bookmark delete_bookmark = reverse('cab_bookmark_delete', args=[self.snippet2.pk]) self.assertEqual(delete_bookmark, '/bookmarks/delete/%d/' % self.snippet2.pk) resp = self.ensure_login_required(delete_bookmark, 'a', 'a') # login and post to delete the bookmark self.client.login(username='a', password='a') resp = self.client.post(delete_bookmark) self.assertRedirects(resp, '/snippets/%d/' % self.snippet2.pk) # the bookmark is gone! self.assertRaises(Bookmark.DoesNotExist, Bookmark.objects.get, user=self.user_a, snippet=self.snippet2) # check the bookmark list view and make sure resp = self.ensure_login_required(user_bookmarks, 'a', 'a') self.assertCountEqual(resp.context['object_list'], [self.bookmark1, self.bookmark3]) def test_language_views(self): # where would we be without you language_url = reverse('cab_language_list') self.assertEqual(language_url, '/languages/') resp = self.client.get(language_url) self.assertEqual(resp.status_code, 200) self.assertCountEqual(resp.context['object_list'], [self.python, self.sql]) language_detail = reverse('cab_language_detail', args=['python']) self.assertEqual(language_detail, '/languages/python/') resp = self.client.get(language_detail) self.assertEqual(resp.status_code, 200) self.assertCountEqual(resp.context['object_list'], [self.snippet1, self.snippet2]) self.assertEqual(resp.context['language'], self.python) def test_popular_views(self): top_authors = reverse('cab_top_authors') self.assertEqual(top_authors, '/users/') resp = self.client.get(top_authors) self.assertEqual(resp.status_code, 200) user_a, user_b = resp.context['object_list'] self.assertEqual(user_a, self.user_a) self.assertEqual(user_b, self.user_b) top_languages = reverse('cab_top_languages') self.assertEqual(top_languages, '/popular/languages/') resp = self.client.get(top_languages) self.assertEqual(resp.status_code, 200) python, sql = resp.context['object_list'] self.assertEqual(python, self.python) self.assertEqual(sql, self.sql) top_tags = reverse('cab_top_tags') self.assertEqual(top_tags, '/tags/') resp = self.client.get(top_tags) self.assertEqual(resp.status_code, 200) tag_names = [tag.name for tag in resp.context['object_list']] self.assertEqual(tag_names, ['world', 'goodbye', 'haxor', 'hello']) top_bookmarked = reverse('cab_top_bookmarked') self.assertEqual(top_bookmarked, '/popular/bookmarked/') resp = self.client.get(top_bookmarked) self.assertEqual(resp.status_code, 200) s1, s3, s2 = resp.context['object_list'] self.assertEqual(s1, self.snippet1) self.assertEqual(s3, self.snippet3) self.assertEqual(s2, self.snippet2) top_rated = reverse('cab_top_rated') self.assertEqual(top_rated, '/popular/rated/') resp = self.client.get(top_rated) self.assertEqual(resp.status_code, 200) s1, s3, s2 = resp.context['object_list'] self.assertEqual(s1, self.snippet1) self.assertEqual(s3, self.snippet3) self.assertEqual(s2, self.snippet2) def test_tag_detail(self): tag_detail = reverse('cab_snippet_matches_tag', args=['world']) self.assertEqual(tag_detail, '/tags/world/') resp = self.client.get(tag_detail) self.assertEqual(resp.status_code, 200) self.assertCountEqual(resp.context['object_list'], [self.snippet1, self.snippet2]) def test_author_detail(self): author_detail = reverse('cab_author_snippets', args=['a']) self.assertEqual(author_detail, '/users/a/') resp = self.client.get(author_detail) self.assertEqual(resp.status_code, 200) self.assertCountEqual(resp.context['object_list'], [self.snippet1, self.snippet3]) def test_feeds(self): # I don't want to put much time into testing these since the response # is kind of fucked up. resp = self.client.get('/feeds/latest/') self.assertEqual(resp.status_code, 200) resp = self.client.get('/feeds/author/a/') self.assertEqual(resp.status_code, 200) resp = self.client.get('/feeds/author/c/') self.assertEqual(resp.status_code, 404) resp = self.client.get('/feeds/tag/world/') self.assertEqual(resp.status_code, 200) resp = self.client.get('/feeds/tag/nothing/') self.assertEqual(resp.status_code, 404) resp = self.client.get('/feeds/language/python/') self.assertEqual(resp.status_code, 200) resp = self.client.get('/feeds/language/java/') self.assertEqual(resp.status_code, 404) class SnippetViewsTestCase(BaseCabTestCase): def test_index(self): snippet_index = reverse('cab_snippet_list') self.assertEqual(snippet_index, '/snippets/') resp = self.client.get(snippet_index) self.assertEqual(resp.status_code, 200) self.assertCountEqual(resp.context['object_list'], [self.snippet1, self.snippet2, self.snippet3]) def test_snippet_detail(self): snippet_detail = reverse('cab_snippet_detail', args=[self.snippet1.pk]) self.assertEqual(snippet_detail, '/snippets/%d/' % self.snippet1.pk) resp = self.client.get(snippet_detail) self.assertEqual(resp.status_code, 200) self.assertEqual(resp.context['object'], self.snippet1) def test_snippet_download(self): snippet_download = reverse('cab_snippet_download', args=[self.snippet1.pk]) self.assertEqual(snippet_download, '/snippets/%d/download/' % self.snippet1.pk) resp = self.client.get(snippet_download) self.assertEqual(resp['content-type'], 'text/x-python') self.assertEqual(resp.content, b'print "Hello, world"') def test_snippet_rate(self): self.snippet1.ratings.clear() self.snippet1 = Snippet.objects.get(pk=self.snippet1.pk) self.assertEqual(self.snippet1.rating_score, 0) self.assertEqual(self.snippet1.ratings.count(), 0) snippet_rate = reverse('cab_snippet_rate', args=[self.snippet1.pk]) self.assertEqual(snippet_rate, '/snippets/%d/rate/' % self.snippet1.pk) resp = self.client.get(snippet_rate + '?score=up') self.assertEqual(resp.status_code, 302) self.assertTrue('accounts/login' in resp['location']) self.client.login(username='a', password='a') resp = self.client.get(snippet_rate + '?score=NaN') self.assertEqual(self.snippet1.ratings.count(), 0) resp = self.client.get(snippet_rate + '?score=up') self.assertEqual(self.snippet1.ratings.count(), 1) self.assertEqual(self.snippet1.ratings.cumulative_score(), 1) resp = self.client.get(snippet_rate + '?score=down') self.assertEqual(self.snippet1.ratings.count(), 1) self.assertEqual(self.snippet1.ratings.cumulative_score(), -1) def test_snippet_unrate_up(self): """ Sending the score "reset" should remove a user's vote. """ self.snippet1.ratings.clear() self.snippet1 = Snippet.objects.get(pk=self.snippet1.pk) snippet_rate = reverse('cab_snippet_rate', args=[self.snippet1.pk]) self.assertEqual(self.snippet1.rating_score, 0) self.assertEqual(self.snippet1.ratings.count(), 0) self.client.login(username='a', password='a') self.client.get(snippet_rate + '?score=up') self.assertEqual(self.snippet1.ratings.count(), 1) self.snippet1.update_rating() self.assertEqual(self.snippet1.rating_score, 1) self.client.get(snippet_rate + '?score=reset') self.assertEqual(self.snippet1.ratings.count(), 0) self.snippet1.update_rating() self.assertEqual(self.snippet1.rating_score, 0) def test_snippet_edit(self): snippet_edit = reverse('cab_snippet_edit', args=[self.snippet1.pk]) self.assertEqual(snippet_edit, '/snippets/%d/edit/' % self.snippet1.pk) resp = self.client.get(snippet_edit) self.assertEqual(resp.status_code, 302) self.assertTrue('accounts/login' in resp['location']) self.client.login(username='b', password='b') resp = self.client.get(snippet_edit) self.assertEqual(resp.status_code, 403) self.client.login(username='a', password='a') resp = self.client.get(snippet_edit) self.assertEqual(resp.status_code, 200) self.assertEqual(resp.context['form'].instance, self.snippet1) payload = {'title': 'Hi', 'version': '1.1', 'language': str(self.python.pk), 'description': 'wazzah\n======', 'code': 'print "Hi"', 'tags': 'hi, world'} resp = self.client.post(snippet_edit, payload) snippet1 = Snippet.objects.get(pk=self.snippet1.pk) self.assertEqual(snippet1.title, 'Hi') self.assertEqual(snippet1.description_html, '<h1>wazzah</h1>') self.assertEqual(snippet1.code, 'print "Hi"') self.assertEqual([t.name for t in snippet1.tags.all()], ['world', 'hi']) self.assertRedirects(resp, '/snippets/%d/' % snippet1.pk) def test_snippet_edit_no_tags(self): """ The user should be able to create/edit a snippet and remove all tags or create it without any. """ snippet_edit = reverse('cab_snippet_edit', args=[self.snippet1.pk]) self.assertEqual(snippet_edit, '/snippets/%d/edit/' % self.snippet1.pk) resp = self.client.get(snippet_edit) self.assertEqual(resp.status_code, 302) self.assertTrue('accounts/login' in resp['location']) self.client.login(username='a', password='a') resp = self.client.get(snippet_edit) self.assertEqual(resp.status_code, 200) self.assertEqual(resp.context['form'].instance, self.snippet1) payload = {'title': 'Hi', 'version': '1.1', 'language': str(self.python.pk), 'description': 'wazzah\n======', 'code': 'print "Hi"'} resp = self.client.post(snippet_edit, payload) snippet1 = Snippet.objects.get(pk=self.snippet1.pk) self.assertEqual(snippet1.title, 'Hi') self.assertEqual(snippet1.description_html, '<h1>wazzah</h1>') self.assertEqual(snippet1.code, 'print "Hi"') self.assertEqual(0, snippet1.tags.count()) self.assertRedirects(resp, '/snippets/%d/' % snippet1.pk) def test_snippet_add(self): snippet_add = reverse('cab_snippet_add') self.assertEqual(snippet_add, '/snippets/add/') resp = self.ensure_login_required(snippet_add, 'a', 'a') self.client.login(username='a', password='a') payload = {'title': 'Hi', 'version': '1.1', 'language': str(self.python.pk), 'description': 'wazzah\n======', 'code': 'print "Hi"', 'tags': 'hi, world'} resp = self.client.post(snippet_add, payload) new_snippet = Snippet.objects.get(title='Hi') self.assertEqual(new_snippet.title, 'Hi') self.assertEqual(new_snippet.description_html, '<h1>wazzah</h1>') self.assertEqual(new_snippet.code, 'print "Hi"') self.assertEqual([t.name for t in new_snippet.tags.all()], ['world', 'hi']) self.assertRedirects(resp, '/snippets/%d/' % new_snippet.pk) class TemplatetagTestCase(BaseCabTestCase): def test_cab_tags(self): t = Template("""{% load cab_tags %}{% if snippet|is_bookmarked:user %}Y{% else %}N{% endif %}""") c = Context({'snippet': self.snippet1, 'user': self.user_a}) rendered = t.render(c) self.assertEqual(rendered, 'Y') Bookmark.objects.filter(user=self.user_a, snippet=self.snippet1).delete() rendered = t.render(c) self.assertEqual(rendered, 'N') c = Context({'snippet': self.snippet1, 'user': AnonymousUser()}) rendered = t.render(c) self.assertEqual(rendered, 'N') def test_core_tags(self): t = Template('''{% load core_tags %}{% for s in "cab.snippet"|latest:2 %}{{ s.title }}|{% endfor %}''') rendered = t.render(Context({})) self.assertEqual(rendered, '%s|%s|' % (self.snippet3.title, self.snippet2.title)) t = Template( '{% load core_tags %}{% for t in "cab.snippet"|call_manager:"top_tags"|slice:":2" %}' '{{ t.name }}|{% endfor %}' ) rendered = t.render(Context({})) self.assertEqual(rendered, 'world|goodbye|') class MarkupTests(SimpleTestCase): def test_safe_markdown(self): self.assertEqual(safe_markdown('<p>foo</p>'), '<p>foo</p>') self.assertEqual(safe_markdown('<pre>foo</pre>'), '<pre>foo</pre>') class SearchViewsTestCase(BaseCabTestCase): def test_index(self): search_index = reverse('cab_search') self.assertEqual(search_index, '/search/') resp = self.client.get(search_index) self.assertEqual(resp.status_code, 200) self.assertCountEqual(resp.context['object_list'], [self.snippet1, self.snippet2, self.snippet3]) def test_q_search(self): search_index = reverse('cab_search') resp = self.client.get(search_index + '?q=greeting') self.assertCountEqual(resp.context['object_list'], [self.snippet1]) resp = self.client.get(search_index + '?q=doesnotexistforsure') self.assertCountEqual(resp.context['object_list'], []) class ApiTestCase(TestCase): def setUp(self): """ Because tags and ratings use GFKs which require content-type-ids, and as I am running 1.1.X at the moment, do all this stuff in the setUp() """ self.user_a = User.objects.create_user('a', 'a', 'a') self.user_b = User.objects.create_user('b', 'b', 'b') self.python = Language.objects.create( name='Python', slug='python', language_code='python', mime_type='text/x-python', file_extension='py') self.sql = Language.objects.create( name='SQL', slug='sql', language_code='sql', mime_type='text/x-sql', file_extension='sql') self.snippet1 = Snippet.objects.create( title='Hello world', language=self.python, author=self.user_a, description='A greeting\n==========', code='print "Hello, world"') self.snippet1.tags.add('hello', 'world') self.snippet2 = Snippet.objects.create( title='Goodbye world', language=self.python, author=self.user_b, description='A farewell\n==========', code='print "Goodbye, world"') self.snippet2.tags.add('goodbye', 'world') self.snippet3 = Snippet.objects.create( title='One of these things is not like the others', language=self.sql, author=self.user_a, description='Haxor some1z db', code='DROP TABLE accounts;') self.snippet3.tags.add('haxor') self.bookmark1 = Bookmark.objects.create(snippet=self.snippet1, user=self.user_a) self.bookmark2 = Bookmark.objects.create(snippet=self.snippet1, user=self.user_b) self.bookmark3 = Bookmark.objects.create(snippet=self.snippet3, user=self.user_a) self.snippet1.ratings.rate(self.user_a, 1) self.snippet1.ratings.rate(self.user_b, 1) self.snippet2.ratings.rate(self.user_a, -1) self.snippet2.ratings.rate(self.user_b, -1) self.snippet3.ratings.rate(self.user_a, 1) self.snippet3.ratings.rate(self.user_b, -1) self.snippet1 = Snippet.objects.get(pk=self.snippet1.pk) self.snippet2 = Snippet.objects.get(pk=self.snippet2.pk) self.snippet3 = Snippet.objects.get(pk=self.snippet3.pk) def test_get_all_snippets(self): # get API response response = self.client.get(reverse('api_snippet_list')) # get data from db snippets = Snippet.objects.all() serializer = SnippetSerializer(snippets, many=True) self.assertEqual(response.data, serializer.data) self.assertEqual(response.status_code, status.HTTP_200_OK)
The TWENTY Tour. Broadcast LIVE from Croke Park stadium, Dublin. The final spectacular show of the UK and Ireland leg of the ‘The Twenty Tour’ will be broadcast LIVE to cinemas from Dublin’s iconic Croke Park stadium. Irish heartthrobs Shane, Nicky, Mark and Kian will perform brand-new music alongside their greatest hits and all 14 of their UK No.1 singles, including ‘Uptown Girl’ ‘Flying Without Wings’, ‘You Raise Me Up’ and ‘If I Let You Go’. For one unforgettable night Westlife fans from coast to coast will raise the roof once more, in celebration of one of pop’s most sensational comebacks.
""" So far, very little is actually tested here, because there aren't many functions that can be tested outside of actually running them on Travis. """ import tempfile import os from os.path import join import pytest from ..travis import sync_from_log @pytest.mark.parametrize("src", ["src"]) @pytest.mark.parametrize("dst", ['.', 'dst']) def test_sync_from_log(src, dst): with tempfile.TemporaryDirectory() as dir: try: old_curdir = os.path.abspath(os.curdir) os.chdir(dir) # Set up a src directory with some files os.makedirs(src) with open(join(src, 'test1'), 'w') as f: f.write('test1') os.makedirs(join(src, 'testdir')) with open(join(src, 'testdir', 'test2'), 'w') as f: f.write('test2') # Test that the sync happens added, removed = sync_from_log(src, dst, 'logfile') assert added == [ join(dst, 'test1'), join(dst, 'testdir', 'test2'), 'logfile', ] assert removed == [] with open(join(dst, 'test1')) as f: assert f.read() == 'test1' with open(join(dst, 'testdir', 'test2')) as f: assert f.read() == 'test2' with open('logfile') as f: assert f.read() == '\n'.join([ join(dst, 'test1'), join(dst, 'testdir', 'test2'), ]) # Create a new file with open(join(src, 'test3'), 'w') as f: f.write('test3') added, removed = sync_from_log(src, dst, 'logfile') assert added == [ join(dst, 'test1'), join(dst, 'test3'), join(dst, 'testdir', 'test2'), 'logfile', ] assert removed == [] with open(join(dst, 'test1')) as f: assert f.read() == 'test1' with open(join(dst, 'testdir', 'test2')) as f: assert f.read() == 'test2' with open(join(dst, 'test3')) as f: assert f.read() == 'test3' with open('logfile') as f: assert f.read() == '\n'.join([ join(dst, 'test1'), join(dst, 'test3'), join(dst, 'testdir', 'test2'), ]) # Delete a file os.remove(join(src, 'test3')) added, removed = sync_from_log(src, dst, 'logfile') assert added == [ join(dst, 'test1'), join(dst, 'testdir', 'test2'), 'logfile', ] assert removed == [ join(dst, 'test3'), ] with open(join(dst, 'test1')) as f: assert f.read() == 'test1' with open(join(dst, 'testdir', 'test2')) as f: assert f.read() == 'test2' assert not os.path.exists(join(dst, 'test3')) with open('logfile') as f: assert f.read() == '\n'.join([ join(dst, 'test1'), join(dst, 'testdir', 'test2'), ]) # Change a file with open(join(src, 'test1'), 'w') as f: f.write('test1 modified') added, removed = sync_from_log(src, dst, 'logfile') assert added == [ join(dst, 'test1'), join(dst, 'testdir', 'test2'), 'logfile', ] assert removed == [] with open(join(dst, 'test1')) as f: assert f.read() == 'test1 modified' with open(join(dst, 'testdir', 'test2')) as f: assert f.read() == 'test2' assert not os.path.exists(join(dst, 'test3')) with open('logfile') as f: assert f.read() == '\n'.join([ join(dst, 'test1'), join(dst, 'testdir', 'test2'), ]) finally: os.chdir(old_curdir)
This short is inspired by my love of fishing, particularly for freshwater perch on Lake Huron. Nearly every summer of my existence has included trips to my grandparent's summer house in the U.P. where I have enjoyed many days fishing for these little fish off of the dock. The character is inspired by Puddleglum the marsh-wiggle from The Chronicles of Narnia. I imagine marsh-wiggles looking something like this while they are out fishing on the marshes. I animated this for the final project in the multiplane section of my Animation Workshop class.