text
stringlengths 29
850k
|
---|
import serial
import time
import matplotlib.pyplot as plt
plt.interactive(True)
print 'import'
# open up dummy serial to reset the arduino with
s = serial.Serial(port='/dev/ttyUSB1')
# reset the arduino
s.flushInput()
s.setDTR(level=False)
time.sleep(0.5)
# ensure there is no stale data in the buffer
s.flushInput()
s.setDTR()
time.sleep(1)
# now open up a new serial line for communication
s = serial.Serial(baudrate=115200, port='/dev/ttyUSB1', timeout=0.01)
#initializes plotting axis
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
# initializes data
data=[]
# time for system to settle after opening serial port
time.sleep(1)
# initial read command
s.write('r')
#continuous loop that will plot the data anew each time it runs, as well as
#pass the read command to the arduino
while True:
s.write('r') #read command
#loop which iterates through the serial being read, only taking
#non-empty values and appending them to the data set
while True:
value=s.readline()
if value !='':
data.append(float(value.rstrip()))
#determines the length of the dataset to observe
if len(data)==800:
break
#plots the dataset
ax1.clear()
ax1.plot( range(len(data)), data )
plt.draw()
data=[]
|
Two one-bedroom apartments on the first floor and two apartments on the ground floor are all located in this beautiful private house, set in a quiet location not far from the sea. Guests of these apartments in Čižići have access to a balcony with sea views and outdoor parking. All apartments are air-conditioned, have internet access and they accept dogs. Famous beach with healing mud, Soline, is distanced about 1000 meters, and island attraction - Biserujka Cave about 2000 m.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-11 11:38
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='AgendaAdministrativa',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('compartilhada', models.BooleanField(choices=[(True, 'Sim'), (False, 'Não')], verbose_name='Compartilhar agenda?')),
('dt_referencia', models.DateField(verbose_name='data de referência')),
('pauta', models.TextField(verbose_name='pauta')),
('inicio_acao', models.DateField(verbose_name='ínicio da ação')),
('status', models.BooleanField(choices=[(True, 'aberta'), (False, 'encerrada')], default=True, verbose_name='status')),
('prioridade', models.IntegerField(choices=[(0, 'baixa'), (1, 'média'), (2, 'alta')], default=1, verbose_name='prioridade')),
('fim_acao', models.DateField(blank=True, null=True, verbose_name='fim da ação')),
('dt_prev_dis_agenda', models.DateField(blank=True, null=True, verbose_name='data prev. discussão da agenda')),
('dt_prev_fim_agenda', models.DateField(blank=True, null=True, verbose_name='data prev. fim agenda')),
('dt_fim_agenda', models.DateField(blank=True, null=True, verbose_name='data finalização agenda')),
],
options={
'verbose_name': 'Agenda Administrativa',
'verbose_name_plural': 'Agendas Administrativas',
'db_table': 'tb_agenda_administrativa',
},
),
migrations.CreateModel(
name='AgendaAnexos',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('descricao', models.CharField(max_length=80, verbose_name='descrição')),
('anexo', models.FileField(blank=True, help_text='anexos para agendas', max_length=200, null=True, upload_to='uploads/anexos/', verbose_name='enviar arquivo')),
('dt_atualizacao', models.DateTimeField(auto_now_add=True, verbose_name='data atualizacao')),
('agenda_administrativa', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='anexos', to='atividades.AgendaAdministrativa')),
('usuario', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='anexos', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Agenda Anexo',
'verbose_name_plural': 'Agenda Anexos',
'db_table': 'tb_agenda_anexo',
},
),
migrations.CreateModel(
name='AgendaMovimentacao',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('desc_movimentacao', models.TextField(blank=True, null=True, verbose_name='Movimentação')),
('dt_atualizacao', models.DateTimeField(auto_now_add=True, verbose_name='data atualizacao')),
('agenda_administrativa', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='atividades.AgendaAdministrativa')),
('usuario', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='movimentacao', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Agenda Movimentacao',
'verbose_name_plural': 'Agendas Movimentacao',
'db_table': 'tb_agenda_movimentacao',
},
),
migrations.CreateModel(
name='AgendaTipo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tipo', models.CharField(max_length=60, verbose_name='tipo')),
],
options={
'verbose_name': 'Agenda Tipo',
'verbose_name_plural': 'Agendas Tipo',
'db_table': 'tb_agenda_agenda_tipo',
},
),
migrations.CreateModel(
name='DepartamentoSetor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=80, verbose_name='nome')),
],
options={
'verbose_name': 'Departamento ou Setor',
'verbose_name_plural': 'Departamentos ou Setores',
'db_table': 'tb_agenda_departamento_setor',
},
),
migrations.CreateModel(
name='Esfera',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('esfera', models.CharField(max_length=60, verbose_name='esfera')),
],
options={
'verbose_name': 'Esfera',
'verbose_name_plural': 'Esfera',
'db_table': 'tb_agenda_esfera',
},
),
migrations.CreateModel(
name='OrgaoDemandante',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('orgao', models.CharField(max_length=60, verbose_name='orgão')),
('cidade', models.CharField(max_length=80, verbose_name='cidade')),
('uf', models.CharField(choices=[('AC', 'Acre'), ('AL', 'Alagoas'), ('AP', 'Amapá'), ('AM', 'Amazonas'), ('BA', 'Bahia'), ('CE', 'Ceará'), ('DF', 'Distrito Federal'), ('ES', 'Espírito Santo'), ('GO', 'Goiás'), ('MA', 'Maranhão'), ('MT', 'Mato Grosso'), ('MS', 'Mato Grosso do Sul'), ('MG', 'Minas Gerais'), ('PA', 'Pará'), ('PB', 'Paraíba'), ('PR', 'Paraná'), ('PE', 'Pernambuco'), ('PI', 'Piauí'), ('RJ', 'Rio de Janeiro'), ('RN', 'Rio Grande do Norte'), ('RS', 'Rio Grande do Sul'), ('RO', 'Rondônia'), ('RR', 'Roraima'), ('SC', 'Santa Catarina'), ('SP', 'São Paulo'), ('SE', 'Sergipe'), ('TO', 'Tocantins')], max_length=2, verbose_name='uf')),
],
options={
'verbose_name': 'Orgão demandante',
'verbose_name_plural': 'Orgãos demandantes',
'db_table': 'tb_agenda_orgao',
},
),
migrations.CreateModel(
name='PessoasEnvolvidasAgenda',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=80, unique=True, verbose_name='nome')),
('telefone', models.CharField(max_length=15, verbose_name='telefone')),
('email', models.EmailField(blank=True, max_length=254, null=True, verbose_name='email')),
('funcionario', models.BooleanField(choices=[(True, 'sim'), (False, 'não')], verbose_name='é funcionario?')),
],
options={
'verbose_name': 'Pessoa envolvida',
'verbose_name_plural': 'Pessoas envolvidas',
'db_table': 'tb_pessoa_envolvida',
},
),
migrations.AddField(
model_name='agendaadministrativa',
name='coordenador_agenda',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='coordenador', to='atividades.PessoasEnvolvidasAgenda'),
),
migrations.AddField(
model_name='agendaadministrativa',
name='dpto_setor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='atividades.DepartamentoSetor'),
),
migrations.AddField(
model_name='agendaadministrativa',
name='esfera',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='atividades.Esfera'),
),
migrations.AddField(
model_name='agendaadministrativa',
name='orgao_demandante',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='atividades.OrgaoDemandante'),
),
migrations.AddField(
model_name='agendaadministrativa',
name='pessoas_envolvidas',
field=models.ManyToManyField(related_name='pessoas', to='atividades.PessoasEnvolvidasAgenda'),
),
migrations.AddField(
model_name='agendaadministrativa',
name='tipo_agenda',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='atividades.AgendaTipo'),
),
migrations.AddField(
model_name='agendaadministrativa',
name='usuario',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='agendas', to=settings.AUTH_USER_MODEL),
),
]
|
In stock and shipping. Prices jump to $1899 once this batch of 50 bikes sell out.
Ride for 25-30 miles depending on riding conditions, rider weight, etc.
The Moto was designed specifically for rider comfort. Its laid back riding position and plush motorcycle bench seat make it a pleasure to ride. The big wide 4" tires make any road imperfections disappear.
The Moto comes with an industry leading 13 amp hour (Ah) Panasonic powered battery, which gives you a range of 25-30 miles per charge depending on riding conditions.
Spend more time riding and less time charging! The Moto's battery recharges from empty to a full charge in 3-4 hours. Also, you can unlock your battery and take it with you anywhere.
In almost all of North America, the moto is considered an electric bicycle. So, you can conveniently ride anywhere a bicycle is allowed. Plus, in most cases you won't need a motorcycle license, road insurance or bike registration.
The Moto is designed for moderate acceleration and hill climbing torque (45Nm). Powered by a high performance 750 watt electric motor that puts out peaks of 1200W when needed is an absolute blast to ride and will take you anywhere you want!
The Moto can be ridden in 1-5 levels of pedal assist (where level 1 gives the least assist and level 5 gives the most assist), or simply twist the throttle and go without pedaling at all. Pedal assist mode gives you more range.
|
#!/usr/bin/env python
class Solution:
def isValidSudoku(self, board: 'List[List[str]]') -> 'bool':
# Each row
for i in range(9):
sudoku = {}
for j in range(9):
if board[i][j] == ".":
continue
if board[i][j] in sudoku:
return False
else:
sudoku[board[i][j]] = 1
# Each col
for i in range(9):
sudoku = {}
for j in range(9):
if board[j][i] == ".":
continue
if board[j][i] in sudoku:
return False
else:
sudoku[board[j][i]] = 1
# Each 3x3 square
for i in range(0,9,3):
for j in range(0,9,3):
sudoku = {}
for p in range(3):
for q in range(3):
if board[i+p][j+q] == ".":
continue
if board[i+p][j+q] in sudoku:
return False
else:
sudoku[board[i+p][j+q]] = 1
return True
board = [
["8","3",".",".","7",".",".",".","."],
["6",".",".","1","9","5",".",".","."],
[".","9","8",".",".",".",".","6","."],
["8",".",".",".","6",".",".",".","3"],
["4",".",".","8",".","3",".",".","1"],
["7",".",".",".","2",".",".",".","6"],
[".","6",".",".",".",".","2","8","."],
[".",".",".","4","1","9",".",".","5"],
[".",".",".",".","8",".",".","7","9"]
]
board = [
["5","3",".",".","7",".",".",".","."],
["6",".",".","1","9","5",".",".","."],
[".","9","8",".",".",".",".","6","."],
["8",".",".",".","6",".",".",".","3"],
["4",".",".","8",".","3",".",".","1"],
["7",".",".",".","2",".",".",".","6"],
[".","6",".",".",".",".","2","8","."],
[".",".",".","4","1","9",".",".","5"],
[".",".",".",".","8",".",".","7","9"]
]
sol = Solution()
print(sol.isValidSudoku(board))
|
A set of door protection plates for Land Rover Defender 90.
This is the new line of body protection with a classic design.
Designed to provide maximum protection in all terrains.
To ensure maximum effectiveness, each piece of these side guards is made of 2mm aluminum, powder coated.
Drilling is required for the installation.
The fitting material is included.
|
__file__ = 'CountyMapbook_v1'
__date__ = '6/18/2014'
__author__ = 'ABREZNIC'
import os, arcpy,datetime
from arcpy import env
#date
now = datetime.datetime.now()
curMonth = now.strftime("%m")
curDay = now.strftime("%d")
curYear = now.strftime("%Y")
today = curYear + "_" + curMonth + "_" + curDay
#variables
cofolder = "C:\\TxDOT\\CountyMapbook"
workspace = cofolder + "\\" + curYear
database = workspace + "\\Working.gdb"
comanche = "Connection to Comanche.sde"
restareas = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Travel\\TPP_GIS.APP_TPP_GIS_ADMIN.REST_AREA_PNT"
parks = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Park\\TPP_GIS.APP_TPP_GIS_ADMIN.Public_Lands_2014"
cemeteries = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Cemetery\\TPP_GIS.APP_TPP_GIS_ADMIN.Cemetery"
cemeteriesPT = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Cemetery\\TPP_GIS.APP_TPP_GIS_ADMIN.Cemetery_Points"
roads = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Roadways\\TPP_GIS.APP_TPP_GIS_ADMIN.TXDOT_Roadways"
counties = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.County\\TPP_GIS.APP_TPP_GIS_ADMIN.County"
airports = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Airport\\TPP_GIS.APP_TPP_GIS_ADMIN.Airport"
airportsPT = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Airport\\TPP_GIS.APP_TPP_GIS_ADMIN.Airport_Points"
prisons = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Base_Map_Layers\\TPP_GIS.APP_TPP_GIS_ADMIN.Prisons"
military = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Base_Map_Layers\\TPP_GIS.APP_TPP_GIS_ADMIN.Military"
schools = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Base_Map_Layers\\TPP_GIS.APP_TPP_GIS_ADMIN.Education"
cities = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.City\\TPP_GIS.APP_TPP_GIS_ADMIN.City"
citiesPT = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.City\\TPP_GIS.APP_TPP_GIS_ADMIN.City_Points"
lakes = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Water\\TPP_GIS.APP_TPP_GIS_ADMIN.Water_Bodies"
railroads = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Railroad\\TPP_GIS.APP_TPP_GIS_ADMIN.Railroads"
rivers = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Water\\TPP_GIS.APP_TPP_GIS_ADMIN.Streams"
grid = "T:\\DATAMGT\\MAPPING\\Mapping Products\\CountyMapbook\\Calendar year 2014\\District Grids\\State_Grid_120K.shp"
def preparation():
print "Creating database..."
if not os.path.exists(workspace):
os.makedirs(workspace)
else:
try:
arcpy.Delete_management(database)
except:
pass
for file in os.listdir(workspace):
thefile = os.path.join(workspace, file)
os.remove(thefile)
arcpy.CreateFileGDB_management(workspace, "Working.gdb")
print "Copying rest areas..."
arcpy.Select_analysis(restareas, database + "\\restarea", "RA_TYPE_NM = 'TIC'")
arcpy.AddField_management(database + "\\restarea", "label", "TEXT", "", "", 100)
cursor = arcpy.UpdateCursor(database + "\\restarea")
for row in cursor:
row.setValue("label", "Travel Information Center")
cursor.updateRow(row)
del cursor
del row
print "Copying parks..."
arcpy.Select_analysis(parks, database + "\\parks", "(GOVT_JURIS = '3' OR GOVT_JURIS = '4') AND LAND_NM IS NOT NULL AND LAND_NM <> ''")
print "Copying cemeteries..."
arcpy.Select_analysis(cemeteries, database + "\\cemetery", "CEMETERY_NM IS NOT NULL AND CEMETERY_NM <> ''")
arcpy.Select_analysis(cemeteriesPT, database + "\\cemetery_point", "CEMETERY_NM IS NOT NULL AND CEMETERY_NM <> ''")
print "Copying highways..."
arcpy.Select_analysis(roads, database + "\\highways", "(( RTE_CLASS = '1' OR RTE_CLASS = '6' ) AND RDBD_TYPE = 'KG' AND RTE_OPEN = 1 ) OR (RTE_NM = 'SL0008' AND RDBD_TYPE = 'KG' AND RTE_OPEN = 1 )")
print "Copying counties..."
arcpy.Copy_management(counties, database + "\\counties")
print "Copying airports..."
arcpy.Select_analysis(airports, database + "\\airports", "ARPRT_NM <> '' AND ARPRT_NM IS NOT NULL")
arcpy.Select_analysis(airportsPT, database + "\\airports_point", "DISPLAY = 'Yes'")
print "Copying county roads..."
arcpy.Select_analysis(roads, database + "\\countyroads", "RTE_CLASS = '2' AND RTE_OPEN = 1 AND RDBD_TYPE = 'KG'")
print "Copying prisons..."
arcpy.Copy_management(prisons, database + "\\prison")
print "Copying military..."
arcpy.Copy_management(military, database + "\\military")
print "Copying schools..."
arcpy.Copy_management(schools, database + "\\school")
print "Copying cities..."
arcpy.Copy_management(cities, database + "\\cities")
arcpy.Select_analysis(citiesPT, database + "\\cities_point", "INC = 'N'")
print "Copying lakes..."
arcpy.Select_analysis(lakes, database + "\\lakes", "BODY_NM IS NOT NULL AND BODY_NM <> '' AND BODY_TYPE = '1'")
print "Copying railroads..."
arcpy.Select_analysis(railroads, database + "\\railroad", "RR_TYPE = 'M' AND RR_STAT = 'A'")
print "Fixing railroad names..."
names = {}
cursor = arcpy.SearchCursor("T:\\DATAMGT\\MAPPING\\Railroad\\DomainRef.dbf")
for row in cursor:
curnum = row.domainTXT
names[curnum] = row.domainNM
del cursor
del row
arcpy.AddField_management(database + "\\railroad", "new_name", "TEXT", "", "", 100)
cursor = arcpy.UpdateCursor(database + "\\railroad")
for row in cursor:
curname = str(row.RR_NM)
if curname in names.keys():
row.setValue("new_name", names[curname])
else:
row.setValue("new_name", row.RR_NM)
cursor.updateRow(row)
del cursor
del row
print "Copying rivers..."
arcpy.Select_analysis(rivers, database + "\\rivers", "STRM_TYPE = '1' AND STRM_NM <> '' AND STRM_NM IS NOT NULL")
print "Copying federal roads..."
arcpy.Select_analysis(roads, database + "\\federal", "RTE_CLASS = '7' AND RTE_OPEN = 1 AND RDBD_TYPE = 'KG' AND FULL_ST_NM <> ''")
print "Copying grid..."
arcpy.Copy_management(grid, database + "\\grid")
# arcpy.Copy_management(grid, database + "\\grid")
# #
# print "Renumbering grid..."
# cursor = arcpy.UpdateCursor(database + "\\grid")
# for row in cursor:
# row.setValue("ID", row.ID - 66)
# row.setValue("STATE_ID", row.STATE_ID - 66)
# if row.NORTH != 0:
# row.setValue("NORTH", row.NORTH - 66)
# if row.SOUTH != 0:
# row.setValue("SOUTH", row.SOUTH - 66)
# if row.EAST != 0:
# row.setValue("EAST", row.EAST - 66)
# if row.WEST != 0:
# row.setValue("WEST", row.WEST - 66)
# cursor.updateRow(row)
# del cursor
# del row
print "Creating union..."
arcpy.Union_analysis([database + "\\grid", database + "\\counties"], database + "\\union")
cursor = arcpy.UpdateCursor(database + "\\union")
for row in cursor:
if row.CNTY_NM == "" or row.CNTY_NM is None or row.STATE_ID == 0:
cursor.deleteRow(row)
del cursor
del row
def intersects():
env.workspace = database
print "Creating field dictionary..."
dict = {}
dict["restarea"] = "label"
dict["parks"] = "LAND_NM"
dict["cemetery"] = "CEMETERY_NM"
dict["cemetery_point"] = "CEMETERY_NM"
dict["highways"] = "FULL_ST_NM"
dict["counties"] = "CNTY_NM"
dict["airports"] = "ARPRT_NM"
dict["airports_point"] = "ARPRT_NM"
dict["countyroads"] = "FULL_ST_NM"
dict["prison"] = "PRISON_NM"
dict["military"] = "BASE_NM"
dict["school"] = "SCHOOL_NM"
dict["cities"] = "CITY_NM"
dict["cities_point"] = "CITY_NM"
dict["lakes"] = "BODY_NM"
dict["railroad"] = "new_name"
dict["rivers"] = "STRM_NM"
dict["federal"] = "FULL_ST_NM"
print "Performing intersects..."
fcList = arcpy.ListFeatureClasses()
for fc in fcList:
if fc != "union" and fc != "grid":
print str(fc)
arcpy.Intersect_analysis(["union", fc], fc + "__INTERSECT")
del fcList
del fc
print "Summarizing..."
fcList = arcpy.ListFeatureClasses()
for fc in fcList:
if fc.split("__")[-1] == "INTERSECT":
dictname = fc.split("__")[0]
print dictname
field = dict[dictname]
arcpy.AddField_management(fc, "UNIQUE", "TEXT", "", "", 250)
cursor = arcpy.UpdateCursor(fc)
for row in cursor:
value = row.getValue(field)
if value is None:
value = ""
row.setValue("UNIQUE", str(row.STATE_ID) + row.CNTY_NM + value)
cursor.updateRow(row)
del cursor
del row
arcpy.Statistics_analysis(fc, dictname + "_SUMMARIZED", [["STATE_ID", "MIN"], ["CNTY_NM", "FIRST"], [dict[dictname], "FIRST"]], ["UNIQUE"])
print "Merging with point tables..."
arcpy.Merge_management(["cemetery_SUMMARIZED", "cemetery_point_SUMMARIZED"], "cemetery_all_SUMMARIZED")
arcpy.Merge_management(["airports_SUMMARIZED", "airports_point_SUMMARIZED"], "airports_all_SUMMARIZED")
arcpy.Merge_management(["cities_SUMMARIZED", "cities_point_SUMMARIZED"], "cities_all_SUMMARIZED")
print "Renaming tables..."
arcpy.Rename_management("cemetery_SUMMARIZED", "cemetery_SUMpreMERGE")
arcpy.Rename_management("cemetery_point_SUMMARIZED", "cemetery_point_SUMpreMERGE")
arcpy.Rename_management("airports_SUMMARIZED", "airports_SUMpreMERGE")
arcpy.Rename_management("airports_point_SUMMARIZED", "airports_point_SUMpreMERGE")
arcpy.Rename_management("cities_SUMMARIZED", "cities_SUMpreMERGE")
arcpy.Rename_management("cities_point_SUMMARIZED", "cities_point_SUMpreMERGE")
def merge():
env.workspace = database
env.overwriteOutput = True
print "Copying mdb..."
newDbase = "T:\\DATAMGT\\MAPPING\\Mapping Products\\CountyMapbook\\Calendar year 2014\\Feature Indicies\\Working\\2014_INDEXS_Geodatabase"+today+".mdb"
arcpy.Copy_management("T:\\DATAMGT\\MAPPING\\Mapping Products\\CountyMapbook\\Calendar year 2014\\Feature Indicies\\Working\\2014_INDEXS_Geodatabase.mdb", newDbase)
print "Overwriting tables..."
tList = arcpy.ListTables()
for table in tList:
if table.split("_")[-1] == "SUMMARIZED":
name = table.split("_")[0]
capname = name.title()
arcpy.Copy_management(table, newDbase + "\\" + capname)
preparation()
intersects()
merge()
print "That's all folks!"
|
8 bananas, peeled, cubed and frozen.
4 tbsp. sugar-free peanut butter.
In a blender, add the frozen bananas, peanut butter, maple syrup together and blitz until a nice smooth mixture is obtained.
Pop into the freezer for 30 minutes to get hard again.
Oh my! I am craving now. Thanks for sharing this. I have not tried eating a banana and peanut butter ice cream. I would love to try this soon as I have peanut butter. I still have to buy bananas though.
Hope you will enjoy it Thelma, and thanks for the follow !
Thank you very much. You are welcome.
|
# Copyright 2015 Midokura SARL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mock import Mock
import mock
from injector import Injector, singleton, provides
import pytest
from midonet_sandbox.configuration import Config
from midonet_sandbox.logic.builder import Builder
from midonet_sandbox.logic.dispatcher import Dispatcher
from midonet_sandbox.logic.injection import SandboxModule
from midonet_sandbox.wrappers.docker_wrapper import Docker
from midonet_sandbox.logic.composer import Composer
class DockerMock(object):
def __init__(self):
self.existing_images = []
def set_existing_images(self, existing_images):
self.existing_images = existing_images
def list_images(self, prefix):
filtered = list()
if prefix:
for image in self.existing_images:
if 'RepoTags' in image and image['RepoTags'] is not None:
for tag in image['RepoTags']:
if tag.startswith(prefix):
filtered.append(image)
return filtered
class BuilderMock(object):
pass
class ComposerMock(object):
def get_components_by_flavour(self, flavour):
if flavour is 'with_external_component':
return {'external1:master': None,
'sandbox/internal1:master': None}
elif flavour is 'without_external_component':
return {'sandbox/internal1:master': None,
'sandbox/internal2:master': None}
class SandboxModuleTest(SandboxModule):
def __init__(self):
super(self.__class__, self).__init__(dict())
@singleton
@provides(Config)
def configuration_provider(self):
return Config('mock')
@singleton
@provides(Composer)
def composer_provider(self):
return ComposerMock()
@singleton
@provides(Docker)
def docker_provider(self):
return DockerMock()
class TestBuilder(object):
"""
"""
def setup_method(self, method):
self.injector = Injector([SandboxModuleTest()])
self.dispatcher = self.injector.get(Dispatcher)
self.builder = self.injector.get(Builder)
self._composer = self.injector.get(Composer)
self._docker = self.injector.get(Docker)
self._build = Mock()
self.builder.build = self._build
self.builder._composer = self._composer
def test_build_not_sandbox_image(self):
options = {
'<flavour>': 'with_external_component',
'--force': False
}
self.dispatcher.build_all(options)
self._build.assert_called_once_with(u'internal1:master')
def test_existing_image_not_build(self):
exists = [{'RepoTags': ['sandbox/internal1:master']}]
options = {
'<flavour>': 'without_external_component',
'--force': False
}
self._docker.set_existing_images(exists)
self.dispatcher.build_all(options)
self._build.assert_called_once_with(u'internal2:master')
def test_existing_image_not_build_with_extra_tag(self):
exists = [{'RepoTags': ['sandbox/internal1:master',
'repo/sandbox/internal1:master']}]
options = {
'<flavour>': 'without_external_component',
'--force': False
}
self._docker.set_existing_images(exists)
self.dispatcher.build_all(options)
self._build.assert_called_once_with(u'internal2:master')
def test_force_build_existing_image(self):
exists = [{'RepoTags': ['sandbox/internal1:master',
'repo/sandbox/internal1:master']}]
options = {
'<flavour>': 'without_external_component',
'--force': True
}
self._docker.set_existing_images(exists)
self.dispatcher.build_all(options)
self._build.assert_has_calls([mock.call(u'internal1:master'),
mock.call(u'internal2:master')],
any_order=True)
if __name__ == '__main__':
pytest.main()
|
A J bolt is a J-shaped bolt which can be used in a variety of applications. Our offered J Hook Bolt find usage in engineering, automobile, construction and other allied industries for several purposes. These are manufactured by using high grade raw material and advanced technology. These J Hook Bolt are available in various dimensions. These J-bolts are precision engineered and are thus easy to use in any industry.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
* File Name : JSON-Exporter.py
* Purpose : Writing JSON output by checking the chapters and images for the
"https://gitlab.com/greatdeveloper/kitab" project.
* Creation Date : 15-08-2016 (Independence Day Special Hackathon)
* Copyright (c) 2016 Mandeep Singh <[email protected]>
"""
from __future__ import print_function
import os
import re
import json
def sort_nicely(targetList):
""" Sorts the given list with natural sort.
"""
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(targetList, key=alphanum_key)
# Directory in which chapters are added.
directory = "/home/mandeep/work/json-export/book"
# Declaration.
bookName = "Design Aids"
totalPages = 0
storage = {}
# Get total number of chapters and pages available.
for root, dirs, files in os.walk(directory):
# print root, dirs, files
if root == directory:
chapters = sorted(dirs, key=str)
else:
storage[root] = sort_nicely(files)
"""if root != directory:
print("Files:", root, files)
"""
# Calculates total number of pages available.
totalPages = totalPages + len(files)
print("\nTotal number of Pages:", totalPages)
print("\nTotal number of chapters:", chapters)
print("\nStorage:", storage)
# Basic list structure to be exported as json.
data = {"Book": {"Name": bookName, "totalPages": totalPages,
"info": {}}}
# Updating the json list to contain the images.
for item in chapters:
data['Book']['info'][item] = storage[os.path.join(directory, item)]
print("\nFinal Output:", data)
# Writing data as json format to a file.
with open(os.path.join(directory, "output.json"), 'w') as outputfile:
json.dump(data, outputfile)
|
For a buyer or refinancer of commercial and residential real estate title insurance will typically, at some point in the transaction, become a focal point for their attorney and potentially for them as well if a problem crops up that needs to be resolved!
But that said, for many the only concept that they will have concerning this extremely important insurance policy is that they will be asked to cut a check for it at the closing.
There are titleholders in beauty pageants, spelling bees and boxing matches. But only one title matters when buying a home.
Before a sale is complete, both the bank and the borrower want to be sure that the title—the formal document that shows proof of ownership—is free and clear. That means there are no delinquent taxes, unpaid liens, undisclosed heirs or other disputes that must be resolved before the house can be sold.
A title search and title insurance protect both lenders and borrowers.
“Whether it’s a person buying their first house at $250,000 or someone buying a home at $10 million, you have to ask, ‘How devastating would it be to your life if you lost your entire investment?” says Rafael Castellanos, managing partner at New York-based Expert Title Insurance Agency.
After a house goes into contract, a title company searches public records, typically going back a number of years, to look for any problems with the home’s title. More than a third of all title searches reveal a problem, according to the American Land Title Association (ALTA), the largest trade association for title insurance providers. The title company is able to correct some of the problems, such as an outdated survey of property lines, but some issues may have to be resolved by the seller.
Most mortgage lenders require borrowers to pay for a title search and title insurance on the loan. But this insurance policy is purchased to protect the lender—not the borrower—when unforeseen problems with the title emerge. A loan policy covers the property’s loan amount and decreases over time as the mortgage is paid off. Both the title search and the lender’s title insurance premium are one-time fees paid by the buyer at the closing.
For extra protection, homeowners can buy a separate owner’s policy, says Diane Evans,president of ALTA and vice president of Denver-based Land Title Guarantee Co. This form of title insurance, which insurers often discount if purchased with the loan policy at closing, covers holders for the duration of their home ownership—even after the mortgage is paid off. Borrowers may wonder why they need an owner’s policy after the closing, but issues may come up after the house has been paid off.
“For example, at the back of your yard, you planted a garden, and all of a sudden a public service company is tearing up the back fence to replace a utility line,” she explains. In such a case, title insurance may pay for repairs and legal fees to remove the utility’s easement, Ms. Evans says.
For the lender’s title insurance, the bank will typically recommend a particular title insurance provider or list some options. Under federal law, it cannot require the borrower use that provider.
As with other types of insurance, title-insurance rates typically rise with home value, so a higher purchase price means a higher premium because it is covering more, says Daniel D. Mennenoh, president of Galena, Ill.-based H.B. Wilkinson Title Co.
In Manhattan, a lender’s policy will run about $2,607 for a $650,000 jumbo mortgage amount on a $810,000 purchase price, just above the upper threshold for government-backed loans in this high-price area (up to $417,000 in most parts of the country), Mr. Castellanos says. However, the amount could be as low as $1,825, if the property has been title-insured in the prior 10 years.
A separate owner’s policy for this property would cost $782 if bundled with a lender’s policy at closing or $3,207 if purchased on its own, he adds.
Homeowners can save by shopping around for both lender and owner policies. But since rates are regulated in all states, prices tend to be similar among reputable insurers in specific geographic areas.
While the owner’s policy isn’t mandatory, it protects the borrower’s equity interest in the property, says John Walsh, CEO of Milford, Conn.-based Total Mortgage Services. Some owner’s policies also cover appreciation if home values rise during ownership, he adds.
• Check for conflicts. Borrowers should ensure that a lender doesn’t own or have a financial relationship with the recommended title provider, Mr. Castellanos says. Pressure from the lender to close by a specific date could cause the title company to overlook issues that could come back and haunt the owner later, he adds.
• What’s included. Sometimes the title company will also provide other services, such as conducting the closing, preparing and notarizing documents, so when comparing rates among providers, borrowers should ask for a breakdown of expenses, ALTA recommends.
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2014 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from relatives.utils import object_edit_link
from wirecloud.platform import models
admin.site.register(models.Reporter) #sixuan
admin.site.register(models.Article) #sixuan
admin.site.register(models.Constant)
admin.site.register(models.Position)
admin.site.register(models.IWidget)
admin.site.register(models.Variable)
class MarketAdmin(admin.ModelAdmin):
list_display = ('user', 'name')
ordering = ('user', 'name')
admin.site.register(models.Market, MarketAdmin)
class MarketUserDataAdmin(admin.ModelAdmin):
list_display = ('market', 'user', 'name', 'value')
ordering = ('market', 'user', 'name')
admin.site.register(models.MarketUserData, MarketUserDataAdmin)
class VariableDefInline(admin.StackedInline):
model = models.VariableDef
extra = 0
class WidgetAdmin(admin.ModelAdmin):
inlines = (VariableDefInline,)
admin.site.register(models.Widget, WidgetAdmin)
admin.site.register(models.XHTML)
admin.site.register(models.PlatformPreference)
class WorkspacePreferenceInline(admin.TabularInline):
model = models.WorkspacePreference
extra = 1
class TabPreferenceInline(admin.TabularInline):
model = models.TabPreference
extra = 1
class TabInline(admin.TabularInline):
model = models.Tab
edit_link = object_edit_link(_("Edit"))
fields = ('name', 'position', edit_link)
readonly_fields = (edit_link,)
ordering = ('position',)
extra = 1
class TabAdmin(admin.ModelAdmin):
list_display = ('workspace', 'name', 'position')
list_display_links = ('name',)
ordering = ('workspace', 'position')
inlines = (TabPreferenceInline,)
class WorkspaceAdmin(admin.ModelAdmin):
list_display = ('creator', 'name')
ordering = ('creator', 'name')
inlines = (WorkspacePreferenceInline, TabInline,)
admin.site.register(models.Workspace, WorkspaceAdmin)
admin.site.register(models.Tab, TabAdmin)
admin.site.register(models.UserWorkspace)
|
With the slew of devices powered by Windows 8, it was only a matter of time before Logitech, a brand renowned for its high-quality computer peripherals, introduces some new touch-and-gesture friendly peripherals as companions to Windows 8. And so it proved recently as Logitech launched three new peripherals – optimised for Windows 8.
First up was the Logitech Zone Touch Mouse T400. Designed to ease consumers into a touch-friendly device, the T400 has the traditional mouse button layout, but replaces the scroll wheel with a glass touch surface, which can still be used for scrolling. A quick click on the touch zone brings you back to the Start Screen for quick access from any app. The mouse is also wrapped with rubber on the sides, giving consumers greater control for any kind of grip. The T400 uses two AA batteries and has an estimated battery life of 18 months.
Next, there is the Touch Mouse T620, which features a full-touch glass surface. With its unconventionally comfortable design, the focus is on the glass touch surface. The buttonless layout makes it gesture-friendly, similar in the way a touchpad works. With up to 10 gestures supported, the Touch Mouse T620 negates any need for users to reach out to touch displays, as users can use intuitive and natural gestures to switch applications, show the desktop, or even open the Charms sidebar. The T620 can use one or two AA batteries at any one time, with an estimated battery life of between three months (one AA battery) or six months (two AA batteries).
Both mice feature Logitech’s Advanced Optical Tracking for more precise tracking on more surfaces, as well as Advanced Wireless technology that is more reliable than standard wireless mice. It also utilises the Unifying receiver, meaning that one USB receiver can connect up to six compatible Logitech peripherals, freeing up precious USB ports.
Finally, Logitech also announced a successor to the K400 keyboard, the Wireless Touch Keyboard K400r. The K400r replaces the numpad of a traditional keyboard in favour of a 3.5-inch touchpad complete with left and right-click buttons. This compact and stylish keyboard allows users to use it virtually anywhere, and still be in complete control of Windows 8, with its touchpad supporting all of Windows 8’s gestures. The K400r is different from the two mice in that it recharges via USB instead of AA batteries. However, the battery in the K400r is slated for up to two years of use.
Also, all three peripherals are not only ready for Windows 8, but are also compatible with older versions of Windows.
The Logitech Touch Mouse T620 will retail for RM199, while the Touch Mouse T400 is priced at RM149. The Wireless Touch Keyboard K400r will also be sold at RM149. All three peripherals are available in stores now. For more information, head on to Logitech Malaysia’s website here.
|
#
# ComparePermissionAllowed.py
#
# This program compares a generated file of permissions (PermissionsRequest.txt)
# that I generated to an apiallowed.csv file given by FCBH. The purpose is to
# identify any permissions that I am not allowed.
#
# Layout of apiallowed.csv
# column 0 - line number
# column 1 - LangName
# column 2 - Reg NT Text DamId
# column 3 - Reg OT Text DamId
# column 4 - ND NT Text DamId
# column 5 - ND OT Text DamId
# column 6 - Reg NT Audio DamId
# column 7 - Reg OT Audio DamId
# column 8 - ND NT Audio DamId
# column 9 - ND OT Audio DamId
#
# Process
# 1. Read the csv file one line at a time
# 2. Create Set of Col 2 NT Text
# 3. Create Set of Col 3 OT Text
# 4. Create Set of Col 6 and 8 NT Audio
# 5. Create Set of Col 7 and 9 OT Audio
# 6. Read PermissionsRequest.txt, and parse records
# 7. For each row lookup bibleId in NT Text
# 8. For each text lookup textId in NT Text
# 9. For each audio lookup NT damId in NT Audio
# 10. For each audio lookup OT damId in OT Audio
# 11. Report any differences in bibleId to textId
# 12. Report any differences in damId to textId
# 13. Report any differences in language id.
import io
import os
import csv
def unicode_csv_reader(utf8_data, dialect=csv.excel, **kwargs):
csv_reader = csv.reader(utf8_data, dialect=dialect, **kwargs)
for row in csv_reader:
yield [unicode(cell, 'utf-8') for cell in row]
def add(str, expectLen, addLen, aset):
if len(str) == 0:
return None
if str == "NA":
return None
if len(str) != expectLen:
print "Length Error", expectLen, str
return None
aset.add(str[0:addLen])
ntTextSet = set()
otTextSet = set()
ntAudioSet = set()
otAudioSet = set()
filename = os.environ['HOME'] + "/ShortSands/DBL/FCBH/apiallowed.csv"
reader = unicode_csv_reader(open(filename))
for row in reader:
add(row[2], 10, 6, ntTextSet)
add(row[3], 10, 6, otTextSet)
add(row[6], 10, 10, ntAudioSet)
add(row[8], 10, 10, ntAudioSet)
add(row[7], 10, 10, otAudioSet)
add(row[9], 10, 10, otAudioSet)
reader.close()
#print otAudioSet
input1 = io.open("PermissionsRequest.txt", mode="r", encoding="utf-8")
for line in input1:
if line.startswith('\"arn:aws:s3:::'):
row = line.split("/")
#print row[1]
if row[1] == "text":
row[4] = row[4].split("_")[0]
if row[2] not in ntTextSet and row[3] not in ntTextSet and row[4] not in ntTextSet:
print row[1], row[2], row[3], row[4]
elif row[1] == "audio":
#print row[2], row[3], row[4]
if row[3] not in ntAudioSet and row[3] not in otAudioSet:
print row[1], row[2], row[3], row[4]
|
These fluffy flowers fill up the gap when you are looking for a rare colour among flowers.
They are not only beautiful but also have a very long flowering time that lasts for months and months from spring to late autumn and in warmer climates through the whole year. These characteristics make them ideal for your purple flower garden designs and purple flower landscaping. Ageratum cannot tolerate frost and cold temperatures.
You can grow them from seeds or buy them from nurseries.
They need regular watering and full sun, however, will do fine in part shade as well. If they do not get enough sun though they get diseased with fungal issues such as powdery mildew. If that happens remove them from shade and place them where they get more sun exposure. You can cut the affected part and let the plant re-generate new leaves.
Make sure the soil drains well and is rich. Fertilize the soil every few months with a balanced organic fertilizer. Ageratums are heavy feeders. When you see them showing yellow leaves they are telling you they need more food and you have to fertilize them.
To suppress weeds place a layer of mulch and retain moisture. Ageratums do not like to dry out. Watch this video to learn more about Ageratum.
|
from unittest.mock import patch
from orchestra.models import Iteration
from orchestra.models import Project
from orchestra.tests.helpers import OrchestraTestCase
from orchestra.tests.helpers.fixtures import setup_models
from orchestra.utils.project_properties import completed_projects
from orchestra.utils.task_lifecycle import assign_task
from orchestra.utils.task_lifecycle import submit_task
class ProjectPropertiesTestCase(OrchestraTestCase):
def setUp(self):
super().setUp()
setup_models(self)
def test_completed_projects(self):
projects = Project.objects.all()
initial_task = assign_task(self.workers[6].id,
self.tasks['awaiting_processing'].id)
with patch('orchestra.utils.task_lifecycle._is_review_needed',
return_value=False):
initial_task = submit_task(initial_task.id, {},
Iteration.Status.REQUESTED_REVIEW,
self.workers[6])
self.assertEqual(completed_projects(projects).count(), 0)
next_task = assign_task(
self.workers[6].id,
initial_task.project.tasks.order_by('-start_datetime')[0].id)
with patch('orchestra.utils.task_lifecycle._is_review_needed',
return_value=False):
initial_task = submit_task(next_task.id, {},
Iteration.Status.REQUESTED_REVIEW,
self.workers[6])
self.assertEqual(completed_projects(projects).count(), 0)
next_task = assign_task(
self.workers[6].id,
initial_task.project.tasks.order_by('-start_datetime')[0].id)
with patch('orchestra.utils.task_lifecycle._is_review_needed',
return_value=False):
initial_task = submit_task(next_task.id, {},
Iteration.Status.REQUESTED_REVIEW,
self.workers[6])
self.assertEqual(completed_projects(projects).count(), 1)
|
CIVIL WAR NOV 1861 CDV CAPT CHARLES WILKES IN HAVANA CUBA PLOTTING TRENT AFFAIR. Vintage, original albumen photograph on CDV during the Civil War (first couple of days November 1861) of U. Navy Captain Charles Wilkes, along with he American consul general to Cuba, Robert Wilson Shuffeldt, a photographed by Charles D. Wilkes had assumed command of the USS San Jacinto off the coast of Africa on 27 August and spent considerable time in pursuit of the Confederate raider CSS Sumter under the command of Capt. Late in October, when he topped for coal at Cienfuegos, Cuba, he learned that James M. Mason and John Slidell, new Confederate envoys to Britain and France hd escaped from Charleston aboard the speedy coastal packet Theodora on 12 October and were then in Havana awaiting passage to Europe. Wilkes raced around the island and reached Havana on 31 October only to learn the Theodora had sailed the previous day.
But Mason and Slidell were till in Havana. There on 8 November Wilkes stopped the Trent and seized the envoys. They were imprisoned in Boston as Britain threatened war (12,000 British soldiers were dispatched to Canada).
The envoys were released and war averted. For information only a scan of a drawing of the encounter added. In fine condition (upper corners clipped), and extremely rare.
Listing and template services provided by inkFrog. The item "CIVIL WAR NOV 1861 CDV CAPT CHARLES WILKES IN HAVANA CUBA PLOTTING TRENT AFFAIR" is in sale since Tuesday, January 15, 2019. This item is in the category "Collectibles\Militaria\Civil War (1861-65)\Original Period Items\Photographs". The seller is "linglelobo" and is located in Houston, Texas. This item can be shipped worldwide.
|
#!/usr/bin/python
"""
Creators GET API interface v0.3.2
Full API docs: http://get.creators.com/docs/wiki
@author Brandon Telle <[email protected]>
@copyright (c) 2015 Creators <www.creators.com>
"""
import subprocess, shlex, re, urllib, os.path
# Python 3+ puts urlencode in urllib.parse
try:
from urllib import parse
use_parse = True
except:
use_parse = False
# We need some way to parse JSON responses
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
raise ImportError("A JSON library is required to use Creators_API")
# Try to use pycURL instead of system calls
try:
import pycurl
from io import BytesIO
use_pycurl = True
except ImportError:
use_pycurl = False
# User's API Key
api_key = ""
# API url
api_url = "http://get.creators.com/"
# API version
api_version = 0.32
# API key length
api_key_length = 40
# Make an API request
# @param endpoint string API url
# @param parse_json bool if True, parse the result as JSOn and return the parsed object
# @throws ApiError if an error code is returned by the API
# @return parsed JSON object, or raw return string
def __api_request(endpoint, parse_json=True, post_data={}, destination=''):
if api_key == "" and len(post_data) == 0:
raise ApiError('API key must be set')
data = ''
if len(post_data) > 0:
try:
data = urllib.urlencode(post_data)
except:
try:
data = urllib.parse.urlencode(post_data)
except:
raise ApiError('Cannot parse post string')
if use_pycurl:
c = pycurl.Curl()
c.setopt(c.URL, api_url+endpoint)
if data != '':
c.setopt(c.POSTFIELDS, data)
c.setopt(c.HTTPHEADER, ['X_API_KEY: '+api_key, 'X_API_VERSION: '+str(api_version)])
c.setopt(c.FOLLOWLOCATION, True)
buffer = BytesIO()
c.setopt(c.WRITEDATA, buffer)
c.perform()
c.close()
ret = buffer.getvalue()
try:
ret = ret.decode('UTF-8')
except:
if destination != '':
f = open(destination, 'wb')
f.write(ret)
f.close()
ret = True
else:
raise ApiError('Cannot parse API response')
else:
cmd = 'curl --silent -L --header "X_API_KEY: '+api_key+\
'" --header "X_API_VERSION: '+str(api_version)+'" '
if data != '':
cmd += '-X POST --data "'+data+'" '
cmd += api_url+endpoint
ret = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE).stdout.read()
# Check for HTTP error messages
if type(ret) is str:
err = re.search('Error ([0-9]+): (.*)', ret)
if err != None:
raise ApiError(err.group(2), err.group(1))
# Parse JSON if required
if parse_json:
try:
ret = json.loads(ret)
except:
pass
# Check for API-generated error messages, throw exception
try:
if type(ret) is dict and ret['error'] > 0:
raise ApiError(ret['message'], ret['error'])
except KeyError:
pass
return ret
def authenticate(username, password):
try:
login = {'username':username, 'password':password}
ret = __api_request('api/users/auth', post_data=login)
except:
return False
if type(ret) is dict and type(ret['api_key']) is str and len(ret['api_key']) == api_key_length:
global api_key
api_key = ret['api_key']
return True
return False
# SYN the server
# @return string "ack"
def syn():
return __api_request('api/etc/syn')
# Get a list of available active features
# @param limit int number of results to return
# @param get_all bool if true, results will include inactive features
# @return list of features
def get_features(limit=1000, get_all=False):
return __api_request('api/features/get_list/json/NULL/'+str(limit)+'/0?get_all='+str(int(get_all)))
# Get details on a feature
# @param filecode string unique filecode for the feature
# @return dict feature info
def get_feature_details(filecode):
return __api_request('api/features/details/json/'+str(filecode))
# Get a list of releases for a feature
# @param filecode string unique filecode for a feature
# @param offset int offset, default 0
# @param limit int limit, default 10
# @param start_date string start date: YYYY-MM-DD, default none
# @param end_date string end_date: YYYY-MM-DD, default none
# @return list of releases
def get_releases(filecode, offset=0, limit=10, start_date='', end_date=''):
return __api_request('api/features/get_list/json/'+str(filecode)+"/"+str(limit)+"/"+str(offset)+"?start_date="+str(start_date)+"&end_date="+str(end_date))
# Download a file
# @param url string URL string provided in the files section of a release result
# @param destination string path to the location the file should be saved to
# @throws ApiError if destination is not a writable file location or url is unavailable
# @return bool True if file is downloaded successfully
def download_file(url, destination):
if not os.path.isdir(destination):
try:
contents = __api_request(url, parse_json=False, destination=destination)
if type(contents) is bool:
return contents
f = open(destination, 'w')
if len(contents) and contents[0] == '{': # Poor man's JSON check
contents = json.loads(contents)
try:
if type(contents) is dict and contents['error'] > 0:
f.close()
raise ApiError(contents['message'], contents['error'])
except:
f.close()
raise ApiError("Unexpected content type: JSON")
f.write(contents)
f.close()
return True
except IOError:
raise ApiError("Destination is unavailable or unwriteable")
except ApiError:
raise
else:
raise ApiError("Destination is a directory")
# Download a zip archive of the entire contents of a release
# @param release_id int the unique ID of the release to download
# @param destination string path to the location the file should be saved to
# @throws ApiError if destination is not a writeable file or release is not found
# @return bool True if file is downloaded successfully
def download_zip(release_id, destination):
if not os.path.isdir(destination):
try:
contents = __api_request('/api/files/zip/'+str(release_id), parse_json=False, destination=destination)
if type(contents) is bool:
return contents
f = open(destination, 'w')
if len(contents) > 0 and contents[0] == '{': # Poor man's JSON check
contents = json.loads(contents)
try:
if type(contents) is dict and contents['error'] > 0:
f.close()
raise ApiError(contents['message'], contents['error'])
except:
f.close()
raise ApiError("Unexpected content type: JSON")
f.write(contents)
f.close()
return True
except IOError:
raise ApiError("Destination is unavailable or unwriteable")
except ApiError:
raise
else:
raise ApiError("Destination is a directory")
# API Exception class
class ApiError(Exception):
def __init__(self, value, errno=0):
self.value = value
self.errno = errno
def __str__(self):
val = ''
if self.errno > 0:
val += '[Errno '+str(self.errno)+'] '
val += self.value
return repr(val)
|
In the era of Industry 4.0, SMEs are forced to optimize the production system, by reducing time and costs.
Thanks to our technicians and patents, Innovative LinUp Startup, has created an integrated hardware and software system which allows to introduce in SMEs, the concept of methodologies and instruments of analysis of the production process, until now only utilized in Large Enterprises.
These concepts, fall under the name of Lean Manufacturing; they are revolutionary as they completely change the organization of production as they make them lean.
The objective is to produce only what is necessary for the client: zero stocks, zero mistakes and zero interruptions in the work cycle.
Have developed algorithms in order obtain waste mapping.
Created training modules in order to train resources in developing waste maps, which are subsequently ready to be implemented to the productive model, current and future.
|
# The MIT License (MIT)
#
# Copyright (c) 2010-2015 Carnegie Mellon University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
from google.appengine.ext import db, webapp
from google.appengine.ext.webapp import util
import registration
class CleanUpReg(webapp.RequestHandler):
def get(self):
# execute only when request comes from appengine.com
if self.request.headers.get('X-AppEngine-Cron') == 'true':
query = registration.Registration.all().order('key_id').order('-inserted')
num = 0
dup_regs = []
keys = set()
# find duplicated entries in all registrations, keeping all unique most recent ones.
duplicate = 0
lastKeyId = None
lastRegId = None
for r in query:
num += 1
keys.add(r.key_id)
if r.registration_id == lastRegId:
if r.key_id == lastKeyId:
dup_regs.append(r)
duplicate += 1
lastRegId = r.registration_id
lastKeyId = r.key_id
# remove duplicates, record our action
db.delete(dup_regs)
logging.info('cleanup: duplicate reg=%i (total: %i regs, %i keys)' % (duplicate, num, keys.__len__()))
def main():
application = webapp.WSGIApplication([('/cron/cleanup_reg', CleanUpReg)],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
|
There’s no better way to celebrate St. Patrick’s Day than with a festive green bouquet! Make someone smile with an all green bouquet or a green plant from Bouquets Unlimited. We can deliver your bouquet locally to Cheyenne, WY or nationwide.
|
# -*- coding: utf-8 -*-
import json
import sys
# see http://python3porting.com/noconv.html#import-errors
try:
# For Python 3.0 and later
from urllib.request import urlopen
from urllib.parse import quote
except ImportError:
# Fall back to Python 2's urllib
from urllib import urlopen, quote
def get_links(start_page, wikipedia_language='de'):
print('get_links(%s)' % start_page)
# parameters for building a string later:
# pllimit limits the number of links to return (max is 500 | 5000 for bots see http://en.wikipedia.org/w/api.php )
# the plcontinue value returned from the api can be used to continue of this is exceeded
# plnamespace: see here: http://en.wikipedia.org/wiki/Wikipedia:Namespace
def get_more_links(more_parameters=()):
parameters = {"format": "json",
"action": "query",
"prop": "links",
"pllimit": 500,
"plnamespace": 0,
"continue" : "",
"titles": quote(start_page.encode("utf8"))}
parameters.update(more_parameters)
queryString = "&".join("%s=%s" % (k, v) for k, v in parameters.items())
# This ensures that redirects are followed automatically, documented here:
# http://www.mediawiki.org/wiki/API:Query#Resolving_redirects
queryString = queryString+"&redirects"
url = "http://%s.wikipedia.org/w/api.php?%s" % (wikipedia_language, queryString)
#get json data and make a dictionary out of it:
request = urlopen(url)
try:
encoding = request.headers.getparam('charset')
except:
encoding = request.headers.get_param('charset')
jsonData = request.read().decode(encoding)
data = json.loads(jsonData)
pageId = list(data['query']['pages'])[0]
if int(pageId)<=0:
sys.exit("Page doesn't exist.")
link_list = data['query']['pages'][str(pageId)]['links']
return [entry["title"] for entry in link_list], data
all_links, data = get_more_links()
# Each time we get the dictionary we need to check if "query-continue"
# exists amd then repeat the stuff from before:
while 'continue' in data.keys():
continue_dict=dict()
for key in list(data['continue'].keys()):
if key == 'continue':
continue_dict.update({key: data['continue'][key]})
else:
val= "|".join([quote(e) for e in data['continue'][key].split('|')])
continue_dict.update({key: val})
new_links, data=get_more_links(continue_dict)
all_links+=new_links
return all_links
raw_names=get_links("Liste von Vornamen")
print(len(raw_names))
to_remove=["Liste albanischer Vornamen",
"Birmanischer Name",
"Chinesischer Name",
"Liste tibetischer Namen und Titel",
"Liste der bairischen Vornamen",
"Liste deutscher Vornamen aus der Bibel",
"Liste deutscher Vornamen germanischer Herkunft",
"Ostfriesischer Vorname",
"Obersorbische Vornamen",
"Liste finnischer Vornamen",
"Gambische Personennamen",
"Akan-Vorname",
"Liste griechischer Vornamen",
"Indischer Name",
"Römische Vornamen",
"Japanischer Name",
"Koreanischer Name",
"Liste der Namenstage in Lettland",
"Malaysischer Name",
"Personennamen der Sherpa",
"Polnischer Name",
"Spanischer Name",
"Liste türkischer Vornamen",
"Liste kurdischer Vornamen",
"Schreibung vietnamesischer Namen",
"Arabischer Name",
"Jüdischer Name",
"Slawische Vornamen"]
# remove this ^^^ ballast
names_only=set(raw_names)-set(to_remove)
#remove ' (Name)' and ' (Vorname)':
names=[entry.split(" ")[0] for entry in names_only]
with open('given_names.tsv', 'w') as namesfile:
for name in names:
namesfile.write(name+'\n')
|
Bacteria live all around us in colonies and reproduce quickly. bacteria are so small we can see them without a microscope. Bacteria are single celled microorganisms that live in the air and even in our bodies. There are 10,000 known species of bacteria. There are three types of bacteria. Cocci bacteria are round. Bacilli bacteria are straight in shape, and spiral bacteria look like corkscrew pasta.
Some bacteria are very dangerous. The bubonic plague spread throughout Europe and killed millions of people, all caused by bacteria. But not all bacteria are bad in fact you couldn't survive without some bacteria. Probiotics are bacteria that live in your stomach and help you digest food. Good bacteria is also known as beneficial bacteria and can enhance your health. An adult will have 3 pounds of active bacteria in their body at any one time.
|
#
# Mini-project # 5: "Memory".
#
# Author: Aristotelis Metsinis
# Email: [email protected]
# Mini-project # 5: An Introduction to Interactive Programming in Python
# @ https://www.coursera.org/course/interactivepython
# Date: 26 Oct 2014
# Version: 10.0
#
# Implementation of card game: "Memory".
#
# Two game "modes": play with "textual numbers" or
# "images.
#
#---------------------------------------------------------
# Import the "simple gui" module.
import simplegui
# Import module, which contains functions that involve
# randomness.
import random
# Import module that contains additional mathematical
# operations.
import math
#---------------------------------------------------------
# Define and initialize global constants.
# Initialize global constants that will hold the "width"
# and "height" of the "canvas" ("deck of cards" - grid of
# 16 "cards").
CANVAS_WIDTH = 800
CANVAS_HEIGHT = 140
# "Memory" game of 16 "cards" (as global constant).
CARDS_NUMBER = 16
# Compute the "width" of a single cell of this grid;
# "placeholder" for a single "card" (cells distributed
# evently).
CARD_PLACEHOLDER_WIDTH = (CANVAS_WIDTH // CARDS_NUMBER)
# Set general "draw" properties.
FONT_SIZE = 50
FONT_FACE = 'sans-serif'
FONT_COLOR = 'White'
MARGIN_Y = 19
# Compute the (global constant) "vertical" position to
# draw a "card", presenting a "textual number" and taking
# into consideration the height of the "deck of cards"
# plus a "margin".
CARD_VALUE_POINT_Y = (CANVAS_HEIGHT // 2) + MARGIN_Y
# More general "draw" properties.
CARD_PLACEHOLDER_LINE_COLOR = 'Black'
CARD_PLACEHOLDER_FILL_COLOR = 'Green'
CARD_PLACEHOLDER_LINE_WIDTH = 2
# Initialize a "dictionary" as global constant, mapping
# numbers from 0-7 (acting as "keys") to "urls" (acting
# as "values"). In practice, the business logic of the
# program models generally the "deck of cards" as a
# "shuffled" list consisting of 16 numbers with each
# number lying in the range [0,8) and appearing twice.
# The following "urls" (links to images)
# are just being used at the "presentation" layer,
# drawing the proper "image" instead of "number" (text).
IMAGES = {}
IMAGES[0] = simplegui.load_image('http://aristotelis-metsinis.github.io/img/riemann.jpg')
IMAGES[1] = simplegui.load_image('http://aristotelis-metsinis.github.io/img/aristotle.jpg')
IMAGES[2] = simplegui.load_image('http://aristotelis-metsinis.github.io/img/euler.jpg')
IMAGES[3] = simplegui.load_image('http://aristotelis-metsinis.github.io/img/gauss.jpg')
IMAGES[4] = simplegui.load_image('http://aristotelis-metsinis.github.io/img/newton.jpg')
IMAGES[5] = simplegui.load_image('http://aristotelis-metsinis.github.io/img/einstein.jpg')
IMAGES[6] = simplegui.load_image('http://aristotelis-metsinis.github.io/img/hilbert.jpg')
IMAGES[7] = simplegui.load_image('http://aristotelis-metsinis.github.io/img/lagrange.jpg')
#---------------------------------------------------------
# Define and initialize global variables.
# Boolean flag: play the game with "images" (True) or
# with "textual numbers" (False).
play_with_images = False
#---------------------------------------------------------
def new_game():
"""
Helper function that starts and restarts the
game, initializing global variables; reshuffle the
"cards", reset the "turn" counter and restart the
game. All "cards" should start the game hidden.
"""
# Initialize global variable that will hold the "deck
# of cards"; we model the "deck of cards" as a list
# consisting of 16 numbers with each number lying in
# the range [0,8) and appearing twice. The list is
# created by concatenating two lists with range [0,8)
# together. Although Player can play the game with
# "textual numbers" or "images", the above mentioned
# technique is being used modeling the game in both
# game "modes".
global deck_of_cards
deck_of_cards = range(CARDS_NUMBER // 2) + range(CARDS_NUMBER // 2)
# Shuffle the "deck".
random.shuffle(deck_of_cards)
# Remove comment if in DEBUG mode.
#print deck_of_cards
# Initialize global variable that will hold the a list,
# with size equal to the size of the "deck of cards"
# consisting of boolean values. The boolean value
# at a certain list index indicates whether the "card"
# is "exposed" (True) or not (False). Particularly,
# the ith entry should be "True" if the ith card is
# face up and its value is visible or "False" if the
# ith card is face down and it's value is hidden.
global deck_of_cards_exposed
deck_of_cards_exposed = [False] * CARDS_NUMBER
# Initialize global variable that will hold the game
# state (0,1 and 2), i.e. beginning of the game, single
# "exposed" unpaired "card" and end of a "turn"
# respectively (have a look at the comments of
# "mouseclick()" for a detailed description
# concerning this variable).
global state
state = 0
# Initialize global variable that will hold the number
# of "turns" playing the game.
global turn
turn = 0
label.set_text("Turns = " + str(turn))
# Initialize global variable that will hold a "helper"
# list, keeping the index of the cards "exposed" in
# a single "turn".
global index_of_cards_exposed_in_a_turn
index_of_cards_exposed_in_a_turn = [-1, -1]
return None
#---------------------------------------------------------
def mouseclick(pos):
"""
Define "mouse click" event handler; implements game
"state" logic. It receives a parameter; pair of screen
coordinates, i.e. a tuple of two non-negative integers
- the position of the mouse click.
"""
# User clicks on a "card" of the "deck" (grid of
# evenly distributed cells - cards placeholders).
# Compute the index of this "card", i.e. determine
# which card have been clicked on with the mouse.
# Recall that the sequence of cards entirely fills
# the "canvas".
clicked_card_index = int(math.floor(float(pos[0]) / CARD_PLACEHOLDER_WIDTH))
# If user clicks on a card already "exposed"; ignore
# event and "return" function immediately.
if deck_of_cards_exposed[clicked_card_index]:
return None
# The counter of "turns" playing the game will be
# updated as a global variable.
global turn
# The following block implements the game logic for
# selecting two "cards" and determining if they match.
# State 0 corresponds to the start of the game.
# In state 0, if you click on a card, that card is
# exposed, and you switch to state 1.
# State 1 corresponds to a single exposed unpaired
# card.
# In state 1, if you click on an unexposed card, that
# card is exposed and you switch to state 2.
# State 2 corresponds to the end of a turn.
# In state 2, if you click on an unexposed card, that
# card is exposed and you switch to state 1.
global state
if state == 0:
# Set the "status" of the clicked "card"
# as "exposed".
deck_of_cards_exposed[clicked_card_index] = True
# Store the "index" of the "exposed" card.
# This is the first card "exposed" in this "turn"
# of the game.
index_of_cards_exposed_in_a_turn[0] = clicked_card_index
# Update "turn" counter; incremented after the
# first "card" is flipped during a turn.
turn += 1
label.set_text("Turns = " + str(turn))
# Switch to the next game "state".
state = 1
elif state == 1:
# Set the "status" of the clicked "card"
# as "exposed".
deck_of_cards_exposed[clicked_card_index] = True
# Store the "index" of the "exposed" card.
# This is the second card "exposed" in this "turn"
# of the game.
index_of_cards_exposed_in_a_turn[1] = clicked_card_index
# Switch to the next game "state".
state = 2
else:
# Set the "status" of the clicked "card"
# as "exposed".
deck_of_cards_exposed[clicked_card_index] = True
# Get the value of the cards exposed in the previous
# "turn" of the game (taking advantage of the
# "indexes" stored). Then determine if the previous
# two "exposed" cards are paired or unpaired.
# If unpaired then switch the "status" of these
# cards back to "unexposed"; i.e. flip them back
# over so that they are hidden before moving to
# state 1.
if deck_of_cards[index_of_cards_exposed_in_a_turn[0]] != deck_of_cards[index_of_cards_exposed_in_a_turn[1]]:
deck_of_cards_exposed[index_of_cards_exposed_in_a_turn[0]] = False
deck_of_cards_exposed[index_of_cards_exposed_in_a_turn[1]] = False
# Store the "index" of the "exposed" card.
# This is the first card "exposed" in this "turn"
# of the game, i.e. replace the "index" of the
# first card "exposed" in the previous "turn" of
# the game.
index_of_cards_exposed_in_a_turn[0] = clicked_card_index
# Update "turn" counter; incremented after the
# first "card" is flipped during a turn.
turn += 1
label.set_text("Turns = " + str(turn))
# Switch to the next game "state".
state = 1
return None
#---------------------------------------------------------
def draw(canvas):
"""
Event handler that is responsible for all drawing.
It receives the "canvas" object and draws the "deck of
cards" (grid) as a horizontal sequence of 16 evently
distributed cells - "card" placeholders. It also draws
the "exposed" cards (if any) taking into consideration
the "mode" of the game, i.e either drawing "textual
numbers" or "images" in the "cells" of the "exposed"
cards (placeholders). "Cards" are logically 50 x 140
pixels in size based on the configurations set for
the purposes of this program.
"""
# Iterate through the "Memory deck" and draw all 16
# "card" placeholders.
for index in range(CARDS_NUMBER):
# Store the position of the left and right border
# of this cell (card placeholder).
card_placeholder_left_x = CARD_PLACEHOLDER_WIDTH * index
card_placeholder_right_x = CARD_PLACEHOLDER_WIDTH * (index + 1)
# Check if the "card" of this cell has an "exposed"
# (already) status.
if deck_of_cards_exposed[index]:
# Compute the position at the middle of this
# cell.
card_placeholder_middle_x = (card_placeholder_right_x + card_placeholder_left_x) // 2
# Play the game with "textual numbers" instead
# of "images".
if not play_with_images:
# Use the "index" of this "cell" as the
# "index" in the list of the "deck of
# cards" extracting the "card value".
# Get the width of the "card value" text
# in pixels; useful in (later) computing
# the position to draw the "card value"
# text - centered justified in the "cell"
# of each "card" (placeholder).
card_value_textwidth_in_px = frame.get_canvas_textwidth(str(deck_of_cards[index]),
FONT_SIZE, FONT_FACE)
card_value_point_x = card_placeholder_middle_x - (card_value_textwidth_in_px // 2)
# Draw the "textual number" associated
# with each "card" on the "canvas".
canvas.draw_text(str(deck_of_cards[index]), (card_value_point_x, CARD_VALUE_POINT_Y),
FONT_SIZE, FONT_COLOR, FONT_FACE)
# Play the game with "images" in place of
# "textual numbers".
else:
# Use the "index" of this "cell" as the
# "index" in the list of the "deck of
# cards" extracting the "card value".
# Later use this "card value" as the "key"
# loading the corresponding "image".
image = IMAGES[deck_of_cards[index]]
# Draw the "image" associated with each
# "card" on the "canvas".
canvas.draw_image(image,
(image.get_width() // 2,image.get_height() // 2),
(image.get_width(), image.get_height()),
(card_placeholder_middle_x, CANVAS_HEIGHT // 2),
(image.get_width(), image.get_height()))
# "Card" of this cell is not "exposed" (already);
# simply draw a cell ("card" placeholder).
else:
card_placeholder_points = [[card_placeholder_left_x, 0],
[card_placeholder_right_x, 0],
[card_placeholder_right_x, CANVAS_HEIGHT],
[card_placeholder_left_x, CANVAS_HEIGHT]]
# Just draw a blank green rectangle.
canvas.draw_polygon(card_placeholder_points,
CARD_PLACEHOLDER_LINE_WIDTH,
CARD_PLACEHOLDER_LINE_COLOR,
CARD_PLACEHOLDER_FILL_COLOR)
return None
#---------------------------------------------------------
def switch_game_mode():
"""
Button event handler that updates properly the boolean
flag, which "keeps" the "mode" of the game. The game
has two modes: play with "textual numbers" (False)
or "images" (True). Each time button is pressed the
value of this variable changes from "True" to "False"
and vice versa. The button text is updated
accordingly.
"""
# The boolean flag will be updated as a global
# variable. If already "True", will be "False" (and
# vice versa).
global play_with_images
play_with_images = not play_with_images
if play_with_images:
# User will play this game with "images". Update
# button text informing the user that he/she will
# reset the on-going game and play the next
# game with "textual numbers".
switch_game_mode_button.set_text("Reset and Play with numbers")
else:
# User will play this game with "textual numbers".
# Update button text informing the user that
# he/she will reset the on-going game and play
# the next game with "images".
switch_game_mode_button.set_text("Reset and Play with images")
# Reset on-going game.
new_game()
return None
#---------------------------------------------------------
# Create frame.
frame = simplegui.create_frame("Memory", CANVAS_WIDTH,
CANVAS_HEIGHT)
# Register event handlers for "control" elements and
# frame buttons to "restart" and if necessary "switch"
# the mode of the game. Once the game is over, you should
# hit the "Reset" button to restart the game.
frame.add_button("Reset", new_game)
frame.add_label("")
label = frame.add_label("Turns = 0")
frame.add_label("")
switch_game_mode_button = frame.add_button("Reset and Play with images",
switch_game_mode, 200)
# Register "event handler" that is responsible for the
# management of the mouse clicks on the "canvas".
frame.set_mouseclick_handler(mouseclick)
# Register the "event handler" that is responsible
# for all drawing.
frame.set_draw_handler(draw)
# Call "new_game()" ensuring that all variables are
# always initialized when the program starts running.
new_game()
# Start frame.
frame.start()
#---------------------------------------------------------
|
To excel at Taekwondo sparring, you will need to learn combination kicks. This video will teach you how to do a roundhouse and back kick combination in Taekwondo sparring.
For additional sparring techniques, please visit the Black Belt Wiki’s section on Taekwondo Sparring. To learn various kicking techniques (i.e. axe kicks or spinning hook kicks), please visit our Kicking Techniques section.
|
# ----------------------------------------------------------------------
# Copyright (c) 2015 Rafael Gonzalez.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ----------------------------------------------------------------------
# ========================== DESIGN NOTES ==============================
# A generic MQTT subscriber client to be used by my tiny server framework
# Must be subclassed a least do customize the onMessage() method.
#
# This class inherits from Lazy to periodically execute a work() procedure
# responsible for:
# 1. Managing connection to MQTT Broker. No disconnections are ever requested.
# 2. Managing subscriptions.
# 3. Delivering data to backend objects like databases
#
# The work() procedure executes twice as fast as
# the keepalive timeout specidied to the client MQTT library.
#
# ======================================================================
import logging
import paho.mqtt.client as paho
import socket
import datetime
from abc import ABCMeta, abstractmethod
from server import Lazy
import utils
# MQTT Connection Status
NOT_CONNECTED = 0
CONNECTING = 1
CONNECTED = 2
FAILED = 3
DISCONNECTING = 4
# Default QoS
QOS = 1
log = logging.getLogger('mqtt')
# Callback when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
userdata.onConnect(flags, rc)
def on_disconnect(client, userdata, rc):
userdata.onDisconnect(rc)
# Callback when a PUBLISH message is received from the server.
# The default message callback
def on_message(client, userdata, msg):
userdata.onMessage(msg, datetime.datetime.utcnow())
# Callback subscriptions
def on_subscribe(client, userdata, mid, granted_qos):
userdata.onSubscribe(mid, granted_qos)
def on_unsubscribe(client, userdata, mid):
userdata.onUnsubscribe(mid)
class MQTTGenericSubscriber(Lazy):
# Maximun retry period
MAX_PERIOD = 2*60*60
def __init__(self, srv, parser):
Lazy.__init__(self, 60)
self.__parser = parser
self.srv = srv
self.__state = NOT_CONNECTED
self.__topics = []
srv.addLazy(self)
# We do not allow to reconfigure an existing connection
# to a broker as we would loose incoming data
self.id = parser.get("MQTT", "mqtt_id")
self.__host = parser.get("MQTT", "mqtt_host")
self.__port = parser.getint("MQTT", "mqtt_port")
self.paho = paho.Client(client_id=self.id+'@'+ socket.gethostname(),
clean_session=False, userdata=self)
self.paho.on_connect = on_connect
self.paho.on_disconnect = on_disconnect
self.paho.on_message = on_message
self.paho.on_subscribe = on_subscribe
self.paho.on_unsubscribe = on_unsubscribe
self.reload()
log.info("MQTT client created")
# we only allow to reconfigure the topic list and keepalive period
def reload(self):
'''Reloads and reconfigures itself'''
parser = self.__parser # shortcut
lvl = parser.get("MQTT", "mqtt_log")
log.setLevel(lvl)
self.__keepalive = parser.getint("MQTT", "mqtt_period")
self.__initial_T = self.__keepalive / 2
self.__period = self.__initial_T
self.setPeriod(self.__initial_T )
topics = utils.chop(parser.get("MQTT", "mqtt_topics"),',')
self.__newtopics = [ (topic, QOS) for topic in topics ]
if self.__state == CONNECTED:
self.subscribe()
log.debug("Reload complete")
# ----------------------------------------
# Implement MQTT Callbacks
# -----------------------------------------
def onConnect(self, flags, rc):
'''Send the initial event and set last will on unexpected diconnection'''
if rc == 0:
self.__state = CONNECTED
self.__period = self.__initial_T
self.setPeriod(self.__initial_T)
log.info("Connected successfully")
self.subscribe()
else:
self.handleConnErrors()
def onDisconnect(self, rc):
log.warning("Unexpected disconnection, rc =%d", rc)
self.__state = NOT_CONNECTED
self.__topics = []
try:
self.srv.delReadable(self)
except ValueError as e:
log.warning("Recovered from mqtt library 'double disconnection' bug")
@abstractmethod
def onMessage(self, msg, tstamp):
'''
Process incoming messages from subscribed topics.
Typically will pass the message to a backend object via
the parent server object
'''
pass
def onSubscribe(self, mid, granted_qos):
log.info("Subscriptions ok with MID = %s, granted QoS = %s",
mid, granted_qos)
def onUnsubscribe(self, mid):
log.info("Unsubscribe ok with MID = %s", mid)
# ---------------------------------
# Implement the Event I/O Interface
# ---------------------------------
def onInput(self):
'''
Read from message buffer and notify handlers if message complete.
Called from Server object
'''
self.paho.loop_read()
def fileno(self):
'''Implement this interface to be added in select() system call'''
return self.paho.socket().fileno()
# ----------------------------------------
# Implement The Lazy interface
# -----------------------------------------
def work(self):
'''
Called periodically from a Server object.
Write blocking behaviour.
'''
log.debug("work()")
if self.__state == NOT_CONNECTED:
self.connect()
return
self.paho.loop_misc()
# --------------
# Helper methods
# --------------
def subscribe(self):
'''Subscribe smartly to a list of topics'''
# Unsubscribe first if necessary
topics = [ t[0] for t in (set(self.__topics) - set(self.__newtopics)) ]
if len(topics):
self.paho.unsubscribe(topics)
log.info("Unsubscribing from topics %s", topics)
else:
log.info("no need to unsubscribe")
# Now subscribe
topics = [ t for t in (set(self.__newtopics) - set(self.__topics)) ]
if len(topics):
log.info("Subscribing to topics %s", topics)
self.paho.subscribe(topics)
else:
log.info("no need to subscribe")
self.__topics = self.__newtopics
def connect(self):
'''
Connect to MQTT Broker with parameters passed at creation time.
Add MQTT library to the (external) EMA I/O event loop.
'''
try:
log.info("Connecting to MQTT Broker %s:%s", self.__host, self.__port)
self.__state = CONNECTING
self.paho.connect(self.__host, self.__port, self.__keepalive)
self.srv.addReadable(self)
except IOError as e:
log.error("%s",e)
self.handleConnErrors()
def handleConnErrors(self):
self.__state = NOT_CONNECTED
self.__period *= 2
self.__period = min(self.__period, MQTTGenericSubscriber.MAX_PERIOD)
self.setPeriod(self.__period)
log.info("Connection failed, next try in %d sec.", self.__period)
|
I am an English teacher from the Low Countries. Like everyone else I aspire to one day write a Booker Prize Winner, but until then I contend my self with writing bitchy reviews.
Share the post "Writing 101: how to induce nausea, fits of rage and earth-shattering desperation in your audience"
Share the post "Envision that Circumcision and Feel that Eel"
Share the post "Breasting Boobily"
Share the post "Small Misunderstandings and Major Desillusions"
Share the post "Psycho Killer, Qu’est-ce que c’est?"
Share the post "Our House, in the Middle of our Heath"
Share the post "Love of the Common People"
|
import collections
import logging
import pyaas
moduleImports = dict(
auth = 'pyaas.web.auth',
storage = 'pyaas.storage.engines',
cache = 'pyaas.storage.cache',
)
def load():
if not pyaas.config.has_section('modules'):
logging.debug('No modules defined')
return
modules = dict(pyaas.config.items('modules'))
for module in modules:
try:
modulePath = pyaas.module.moduleImports[module]
except KeyError:
logging.error('Unknown module: %s', module)
continue
# on import PyaasModules register themselves
__import__(pyaas.module.moduleImports[module])
for module, moduleClass in pyaas.module.PyaasModule.registry.items():
moduleClass.load()
return
class RegisterModuleMeta(type):
def __init__(cls, name, bases, dct):
if not hasattr(cls, 'registry'):
# this is the base class. Create an empty registry
cls.registry = {}
else:
# this is a derived class. Add cls to the registry
cls.registry[name] = cls
super(RegisterModuleMeta, cls).__init__(name, bases, dct)
class PyaasModule(object):
PKG_PATH = 'pyaas'
CLASSES = collections.defaultdict(dict)
__metaclass__ = RegisterModuleMeta
@classmethod
def load(cls):
raise NotImplementedError
@classmethod
def loadModule(cls, moduleName):
classes = cls.CLASSES[cls.__name__]
try:
return classes[moduleName]
except KeyError:
# try to load the module
pass
# then try loading a pyaas module first
try:
path = cls.PKG_PATH + '.' + moduleName
module = __import__(path)
except ImportError:
# try loading a user-supplied module next
try:
path = moduleName
module = __import__(path)
except ImportError:
raise pyaas.error('Unknown module: %s', moduleName)
subPackageName = path
for subPackageName in subPackageName.split('.')[1:]:
module = getattr(module, subPackageName)
classname = subPackageName.capitalize()
moduleClass = getattr(module, classname, None)
if moduleClass is None:
try:
moduleClass = getattr(module, 'Database')
except AttributeError:
raise pyaas.error('Bad module: %s', moduleName)
classes[moduleName] = moduleClass
return moduleClass
|
1 How Much Surgery Has Rose McGowan Had?
1.2 How Much Work Was Really Done?
2 Rose McGowan – Before and After Pictures?
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2013-2014 Didotech SRL (info at didotech.com)
# All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from openerp.osv import orm, fields
class SaleOrder(orm.Model):
_inherit = 'sale.order'
def _get_connected_sale_order(self, cr, uid, ids, field_name, model_name, context=None):
context = context or self.pool['res.users'].context_get(cr, uid)
result = {}
order_id = context.get('own_sale_id')
for sale in self.browse(cr, uid, ids, context):
result[sale.id] = False
if sale.id == order_id:
result[sale.id] = True
if self.pool['sale.order']._columns.get('sale_version_id', False):
if sale.sale_version_id and sale.sale_version_id.id == order_id:
result[sale.id] = True
return result
_columns = {
'connected_sale_order': fields.function(_get_connected_sale_order, string='Own Sale', type='boolean'),
'contact_id': fields.many2one('res.partner.address.contact', 'Contact'),
}
def onchange_partner_id(self, cr, uid, ids, part):
res = super(SaleOrder, self).onchange_partner_id(cr, uid, ids, part)
res['value']['contact_id'] = False
return res
def hook_sale_state(self, cr, uid, orders, vals, context):
crm_model = self.pool['crm.lead']
crm_sale_stage_model = self.pool['crm.sale.stage']
state = vals.get('state', False)
for order in orders:
lead_ids = crm_model.search(cr, uid, [('sale_order_id', '=', order.id)], context=context)
if context.get('active_model', '') == 'crm.lead':
lead_ids.append(context.get('active_id'))
lead_ids = list(set(lead_ids))
if lead_ids:
crm_sale_stage_ids = crm_sale_stage_model.search(cr, uid, [('shop_id', '=', order.shop_id.id), ('name', '=', state)], context=context)
if crm_sale_stage_ids:
crm_sale_stage = crm_sale_stage_model.browse(cr, uid, crm_sale_stage_ids[0], context)
stage_id = crm_sale_stage.stage_id.id
crm_value = {'stage_id': stage_id}
crm_value.update(crm_model.onchange_stage_id(cr, uid, lead_ids, stage_id)['value'])
if crm_sale_stage.update_amount:
crm_value.update({
'planned_revenue': order.amount_untaxed
})
crm_model.write(cr, uid, lead_ids, crm_value, context.update({'force_stage_id': True}))
return super(SaleOrder, self).hook_sale_state(cr, uid, orders, vals, context)
|
The Vision Technology Service is a Regional Assessment Centre for High Technology Vision Aids for the Assistive Devices Program, a funding program run by the Ontario Ministry of Health and Long-Term Care.
Introduce you to technical solutions to address the reading and writing challenges faced by individuals with low vision, blindness and/or multiple disabilities.
Identifying difficulties experienced in reading and writing encountered in personal independence needs, school or work productivity and leisure activities. For example, if you find any of the following tasks difficult, technology may help!
Providing comprehensive trials with appropriate technologies to compensate for these challenges.
Providing handouts, a summary report and funding documentation to support equipment purchase and training.
Closed Circuit Televisions (CCTV's) - simple magnification devices which assist in reading by enlarging items up to 40 times on a monitor.
Computers with Screen Reading and/or Screen Magnification software - enable people to write, edit, manage finances and access the Internet.
Scanners with special software that will read printed material out loud, such as schoolwork, magazines and books.
Braille Displays and Portable Notetaking Devices - to allow access to information when and how it is needed.
Ontario residents are eligible for a portion of the assessment fee and the equipment costs covered through the Assistive Devices Program (ADP). Eligible clients are those who are unable to perform every day age-related visual tasks in spite of conventional medical, surgical and/or routine refractive interventions (such as glasses). Clients who also present with physical and/or cognitive disability with vision loss can be seen at Vision Technology Service. Trials and recommendations can be made for alternative computer access, such as voice recognition or alternative keyboards.
To qualify, you must have a referral from your eye or family doctor, or a vision report dated within the past 6 months. This must include a diagnosis of your condition and a record of your visual acuity. For those who are financially assisted with the assessment fee by ODSP, Ontario Works or any other government assistance, proof of registration is required at the time of your initial assessment.
Where can this technology be purchased?
There are many vendors of adaptive technology for people with low vision or blindness. Please refer to SNOW's Assistive Technology glossary for AT manufacturers and/or vendors as well as the list of Ontario vendors for the Assistive Devices Program.
Our professional staff of Occupational Therapists are registered authorizers with the Assistive Devices Program. Authorizers at the Vision technology Service facilitate complex integration needs with alternative access equipment for individuals with physical disabilities.
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2015 OpenLP Developers #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
import logging
from PyQt4 import QtCore, QtGui
from openlp.core.lib import build_icon
from openlp.core.lib.ui import create_widget_action
log = logging.getLogger(__name__)
class SearchEdit(QtGui.QLineEdit):
"""
This is a specialised QLineEdit with a "clear" button inside for searches.
"""
def __init__(self, parent):
"""
Constructor.
"""
super(SearchEdit, self).__init__(parent)
self._current_search_type = -1
self.clear_button = QtGui.QToolButton(self)
self.clear_button.setIcon(build_icon(':/system/clear_shortcut.png'))
self.clear_button.setCursor(QtCore.Qt.ArrowCursor)
self.clear_button.setStyleSheet('QToolButton { border: none; padding: 0px; }')
self.clear_button.resize(18, 18)
self.clear_button.hide()
self.clear_button.clicked.connect(self._on_clear_button_clicked)
self.textChanged.connect(self._on_search_edit_text_changed)
self._update_style_sheet()
self.setAcceptDrops(False)
def _update_style_sheet(self):
"""
Internal method to update the stylesheet depending on which widgets are available and visible.
"""
frame_width = self.style().pixelMetric(QtGui.QStyle.PM_DefaultFrameWidth)
right_padding = self.clear_button.width() + frame_width
if hasattr(self, 'menu_button'):
left_padding = self.menu_button.width()
stylesheet = 'QLineEdit { padding-left: %spx; padding-right: %spx; } ' % (left_padding, right_padding)
else:
stylesheet = 'QLineEdit { padding-right: %spx; } ' % right_padding
self.setStyleSheet(stylesheet)
msz = self.minimumSizeHint()
self.setMinimumSize(max(msz.width(), self.clear_button.width() + (frame_width * 2) + 2),
max(msz.height(), self.clear_button.height() + (frame_width * 2) + 2))
def resizeEvent(self, event):
"""
Reimplemented method to react to resizing of the widget.
:param event: The event that happened.
"""
size = self.clear_button.size()
frame_width = self.style().pixelMetric(QtGui.QStyle.PM_DefaultFrameWidth)
self.clear_button.move(self.rect().right() - frame_width - size.width(),
(self.rect().bottom() + 1 - size.height()) // 2)
if hasattr(self, 'menu_button'):
size = self.menu_button.size()
self.menu_button.move(self.rect().left() + frame_width + 2, (self.rect().bottom() + 1 - size.height()) // 2)
def current_search_type(self):
"""
Readonly property to return the current search type.
"""
return self._current_search_type
def set_current_search_type(self, identifier):
"""
Set a new current search type.
:param identifier: The search type identifier (int).
"""
menu = self.menu_button.menu()
for action in menu.actions():
if identifier == action.data():
# setPlaceholderText has been implemented in Qt 4.7 and in at least PyQt 4.9 (I am not sure, if it was
# implemented in PyQt 4.8).
try:
self.setPlaceholderText(action.placeholder_text)
except AttributeError:
pass
self.menu_button.setDefaultAction(action)
self._current_search_type = identifier
self.emit(QtCore.SIGNAL('searchTypeChanged(int)'), identifier)
return True
def set_search_types(self, items):
"""
A list of tuples to be used in the search type menu. The first item in the list will be preselected as the
default.
:param items: The list of tuples to use. The tuples should contain an integer identifier, an icon (QIcon
instance or string) and a title for the item in the menu. In short, they should look like this::
(<identifier>, <icon>, <title>, <place holder text>)
For instance::
(1, <QIcon instance>, "Titles", "Search Song Titles...")
Or::
(2, ":/songs/authors.png", "Authors", "Search Authors...")
"""
menu = QtGui.QMenu(self)
first = None
for identifier, icon, title, placeholder in items:
action = create_widget_action(
menu, text=title, icon=icon, data=identifier, triggers=self._on_menu_action_triggered)
action.placeholder_text = placeholder
if first is None:
first = action
self._current_search_type = identifier
if not hasattr(self, 'menu_button'):
self.menu_button = QtGui.QToolButton(self)
self.menu_button.setIcon(build_icon(':/system/clear_shortcut.png'))
self.menu_button.setCursor(QtCore.Qt.ArrowCursor)
self.menu_button.setPopupMode(QtGui.QToolButton.InstantPopup)
self.menu_button.setStyleSheet('QToolButton { border: none; padding: 0px 10px 0px 0px; }')
self.menu_button.resize(QtCore.QSize(28, 18))
self.menu_button.setMenu(menu)
self.menu_button.setDefaultAction(first)
self.menu_button.show()
self._update_style_sheet()
def _on_search_edit_text_changed(self, text):
"""
Internally implemented slot to react to when the text in the line edit has changed so that we can show or hide
the clear button.
:param text: A :class:`~PyQt4.QtCore.QString` instance which represents the text in the line edit.
"""
self.clear_button.setVisible(bool(text))
def _on_clear_button_clicked(self):
"""
Internally implemented slot to react to the clear button being clicked to clear the line edit. Once it has
cleared the line edit, it emits the ``cleared()`` signal so that an application can react to the clearing of the
line edit.
"""
self.clear()
self.emit(QtCore.SIGNAL('cleared()'))
def _on_menu_action_triggered(self):
"""
Internally implemented slot to react to the select of one of the search types in the menu. Once it has set the
correct action on the button, and set the current search type (using the list of identifiers provided by the
developer), the ``searchTypeChanged(int)`` signal is emitted with the identifier.
"""
for action in self.menu_button.menu().actions():
# Why is this needed?
action.setChecked(False)
self.set_current_search_type(self.sender().data())
|
BUYING FOR A LARGER ORGANISATION?
C-Tec are due to launch their new offering into the fire alarm system market place. This system is known as the C-Tec ZFP. The C-Tec ZFP fire alarm control panels will be available from 2 to 8 loops in size and will be networkable. This system will support the Apollo analogue addressable fire alarm protocol devices. For more information on the C-Tec ZFP Fire Alarm Panels call us NOW on 01200 428 410.
This range of control panels is designed to compliment the successful range of C-Tec XFP Fire Alarm Control panels and will allow C-Tec to offer fire detection solutions for much larger buildings without the need to network smaller panels together.
The C-Tec ZFP Panel features an attractive touch screen display which allows the user and engineer to control the functions of the fire alarm system once the enable key has been turned. This touch screen is featured on both the main control panel and also the compact controller units which can also be networked to the fire system. These compact controllers are ideal for use in reception areas where they give control functions to the ZFP fire system and also provide full network information.
If you’re interested in the C-Tec ZFP Fire Alarm Panels you may also be interested in the C-Tec XFP Fire Alarm Panel and C-Tec EFP Panel ranges.
Are you looking for high quality fire alarm systems? At The Safety Centre, we have an exceptional range of the best systems and equipment for a variety of different requirements. Whether you are looking for wireless alarm systems, call systems, a sealed lead acid battery, fire extinguishers or domestic fire detectors, you will find what you are looking for when you browse through our collection.
Here at The Safety Centre we are fire safety specialists and believe in providing our customers with the most dependable fire alarms and products in the industry. It is this commitment to first class equipment that has made us one of the leading providers of fire alarms and other safety equipment. We believe that when it comes to alarm systems that you won't find better elsewhere, so you should be sure to browse through our competitively priced collections today.
As we are passionate about only providing our customers with the finest fiew alarms on the market, you can trust that you will be purchasing the best and most reliable systems. Due to the efficiency and dependability of our outstanding equipment, it has been incorporated in a range of high profile locations - so you can rest assured that our alarms won't let you down.
We offer a broad range of wireless alarms which are becoming increasingly sought-after across the world, and these innovative fire alarm systems are now as effective as wired alarms due to the improvements in radio technology over the past 20 years. Wireless fire alarm systems can be incorporated into both commercial and industrial settings and are extremely easy to install. Our wireless systems are extremely affordable and competitively-priced, and they also incorporate a range of features, such as sounders, call points, beacons, control panels and repeater panels. You should be sure to come to us for the best wireless fire alarms in the industry.
If you are looking for a premium sealed lead acid battery then you will be delighted with our range. We offer a sealed lead acid battery from some of the most popular and respected providers in the fire alarm industry, including Yuasa, Powersonic and Enersys. We ensure that all the sealed lead acid battery products we provide have been manufactured to the highest standard using some of the most state-of-the-art manufacturing techniques. Also, if we do not stock the sealed battery that you are looking for, then you should contact us today to see if we can find it for you.
Fire extinguishers are a must-have on any commercial or industrial site, so it is important that you purchase effective and reliable extinguishers. At The Safety Centre, we only stock fire extinguishers which have been manufactured to the highest standard, so you can choose from our ranges of Thomas Glover (Chubb Fire) extinguishers. As well as commercial and industrial fire extinguishers, we can also supply you with extinguisher cabinets, fire blankets, home and car fire extinguishers and accessories. So whether you are looking for wireless fire alarm systems, wired fire alarms, a sealed lead acid battery or fire extinguishers, you are sure to find the perfect fire safety product for your needs with us here at The Safety Centre.
" Good package, saved the hassle of selecting all the products individually. Arrived a couple of days after ordering. "
Feel free to fill out our quick contact form if you have any issues.
The Safety Centre has an extensive range of fire and safety products that we supply to companies and individuals worldwide, with over 30 years experience you can trust us to deliver a bespoke solution for all your fire and safety requirements.
|
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('diseased_vrelt_matriarch')
mobileTemplate.setLevel(6)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(True)
mobileTemplate.setScale(1)
mobileTemplate.setMeatType("Carnivore Meat")
mobileTemplate.setMeatAmount(10)
mobileTemplate.setHideType("Bristly Hide")
mobileTemplate.setHideAmount(10)
mobileTemplate.setBoneType("Animal Bones")
mobileTemplate.setBoneAmount(10)
mobileTemplate.setSocialGroup("vrelt")
mobileTemplate.setAssistRange(10)
mobileTemplate.setStalker(False)
mobileTemplate.setOptionsBitmask(Options.AGGRESSIVE | Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_vrelt.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_bite_2')
attacks.add('bm_bolster_armor_2')
attacks.add('bm_enfeeble_2')
mobileTemplate.setDefaultAttack('creatureRangedAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('diseased_vrelt_matriarch', mobileTemplate)
return
|
Budget 2018: where is the desire to reform?
The Budget delivered some help for the High Street and this is welcome for many in retail. However, despite some short term relief there was no evidence of looking to reform an archaic business rates system, according to industry commenters.
The Budget’s main points for the retail sector included the £675 million Future High Streets Fund – helping local authorities shape the High Street of the future.
A 2% digital services tax from April 2020 on, for example, online market place providers (which are both profitable and have global revenue in excess of £500 million).
Accountancy firm Wilkins Kennedy head of retail and wholesale Phil Mullis commented: “The Chancellor acknowledged the High Street was under threat and ultimately fundamentally changing. For those retailers mentioned above, a business rates saving over the next two years will be more than welcome. However, there is no evidence that a total reform of business rates is on the agenda. Many retailers with larger property portfolios will certainly feel left out in the cold by this budget – where was the rates reduction for them?
Phil added: “The Chancellor acknowledged that the High Street has to change to adopt the new ways in which people shop. Furthermore, he wishes to relax change of property use e.g. making it easier to convert from retail to housing. The availability of a £675 million Future High Streets Fund will go toward helping local authorities to make the transition.
NFU Mutual retail sector specialist Frank Woods also commented on the Budget.
“The government’s commitment to reduce business rates for the smallest retailers in the UK is welcome, but is not going to satisfy those calling for a more fundamental reform of business rates across the country,” he noted.
“And while the £900m relief will result in short term savings for the smallest shops across the UK, the larger retailers who employ the majority of people across the sector will not receive a direct benefit from this. And of course it is those larger retailers who have been grabbing the headlines over the past 18 months, for all the wrong reasons.
He added: “Taking steps to invest in transformation of city centres supports the widely recognised need for change. Making it easier for properties to be developed as homes to encourage more people to live in central urban areas will also help to build communities in areas where boarded up shops now dominate. Whether it will be enough to staunch the bad news that has been emanating from the sector in recent years is not so clear.
|
import logging
logging.basicConfig(filename='/tmp/unittest')
import auxo.executor
import auxo.test.mocks
import unittest
class TestExecutor(unittest.TestCase):
def testNoAgents(self):
r = auxo.executor.run([])
self.assertEqual(len(r), 0)
def testGoodAgents(self):
agents = [
auxo.test.mocks.mockAgent("A", "hello"),
auxo.test.mocks.mockAgent("B", "apple"),
auxo.test.mocks.mockAgent("C", "orange")
]
r = auxo.executor.run(agents)
self.assertEqual(len(r), 3)
self.assertEqual(r[0].name, 'A')
self.assertEqual(r[0].text, 'hello')
self.assertEqual(r[1].name, 'B')
self.assertEqual(r[1].text, 'apple')
self.assertEqual(r[2].name, 'C')
self.assertEqual(r[2].text, 'orange')
def testBadAgents(self):
agents = [
auxo.test.mocks.mockAgent("A", None),
auxo.test.mocks.mockAgent("B", "apple"),
auxo.test.mocks.mockAgent("C", None)
]
r = auxo.executor.run(agents)
self.assertEqual(len(r), 3)
self.assertEqual(r[0].name, 'A')
self.assertEqual(r[0].text, 'Failed to complete.\n')
self.assertEqual(r[1].name, 'B')
self.assertEqual(r[1].text, 'apple')
self.assertEqual(r[2].name, 'C')
self.assertEqual(r[2].text, 'Failed to complete.\n')
|
CHAMPS Analytics helps the world’s largest organizations unleash the power of their most valuable assets: their data and their people.
CHAMPS Analytics specializes in showing companies how to turn their data into actionable decisions that impact the bottom line. Through the power of data and analytics, our solutions provide you the insights you need for better decision making in real-time.
By partnering with the leading Business Intelligence (BI) vendors, CHAMPS Analytics delivers high quality solutions in a quarter of the time and half the cost of traditional BI vendors.
Our customers realize that data is the new currency and there are millions of dollars of ideas that can be unleashed through the power of analytics.
|
from collections import defaultdict
from unittest.mock import patch
import structlog
from gevent.event import AsyncResult
from raiden.message_handler import MessageHandler
from raiden.messages.abstract import Message
from raiden.raiden_event_handler import EventHandler
from raiden.raiden_service import RaidenService
from raiden.tests.utils.events import check_nested_attrs
from raiden.transfer.architecture import Event as RaidenEvent
from raiden.transfer.mediated_transfer.events import SendSecretRequest, SendUnlock
from raiden.transfer.state import ChainState
from raiden.utils.formatting import to_checksum_address
from raiden.utils.typing import Callable, Dict, List, NamedTuple, SecretHash, Set
log = structlog.get_logger(__name__)
class MessageWaiting(NamedTuple):
attributes: dict
message_type: type
async_result: AsyncResult
class HoldWait(NamedTuple):
event_type: type
async_result: AsyncResult
attributes: Dict
class Holding(NamedTuple):
event: RaidenEvent
chain_state: ChainState
event_type: type
async_result: AsyncResult
attributes: Dict
class WaitForMessage(MessageHandler):
def __init__(self):
self.waiting: Dict[type, list] = defaultdict(list)
def wait_for_message(self, message_type: type, attributes: dict) -> AsyncResult:
assert not any(attributes == waiting.attributes for waiting in self.waiting[message_type])
waiting = MessageWaiting(
attributes=attributes, message_type=Message, async_result=AsyncResult()
)
self.waiting[message_type].append(waiting)
return waiting.async_result
def on_messages(self, raiden: RaidenService, messages: List[Message]) -> None:
# First handle the message, and then set the events, to ensure the
# expected side-effects of the message are applied
super().on_messages(raiden, messages)
for message in messages:
for waiting in self.waiting[type(message)]:
if check_nested_attrs(message, waiting.attributes):
waiting.async_result.set(message)
class HoldRaidenEventHandler(EventHandler):
"""Use this handler to stop the node from processing an event.
This is useful:
- Simulate network communication problems, by delaying when protocol
messages are sent.
- Simulate blockchain congestion, by delaying transactions.
- Wait for a given state of the protocol, by waiting for an event to be
available.
"""
def __init__(self, wrapped_handler: EventHandler):
self.wrapped = wrapped_handler
self.eventtype_to_waitingholds: Dict[type, List[HoldWait]] = defaultdict(list)
self.eventtype_to_holdings: Dict[type, List[Holding]] = defaultdict(list)
self.pre_hooks: Set[Callable] = set()
def on_raiden_events(
self, raiden: RaidenService, chain_state: ChainState, events: List[RaidenEvent]
):
events_to_dispatch = list()
for event in events:
for hook in self.pre_hooks:
hook(event)
event_type = type(event)
# First check that there are no overlapping holds, otherwise the test
# is likely flaky. It should either reuse the hold for the same event
# or different holds must match a unique event.
for hold in self.eventtype_to_holdings[event_type]:
if check_nested_attrs(event, hold.attributes):
msg = (
f"Matching event of type {event.__class__.__name__} emitted "
f"twice, this should not happen. Either there is a bug in the "
f"state machine or the hold.attributes is too generic and "
f"multiple different events are matching. Event: {event} "
f"Attributes: {hold.attributes}"
)
raise RuntimeError(msg)
waitingholds = self.eventtype_to_waitingholds[event_type]
for pos, waiting_hold in enumerate(waitingholds):
# If it is a match:
# - Delete the waiting hold and add it to the holding
# - Do not dispatch the event
# - Notify the test by setting the async_result
if check_nested_attrs(event, waiting_hold.attributes):
holding = Holding(
event=event,
chain_state=chain_state,
event_type=waiting_hold.event_type,
async_result=waiting_hold.async_result,
attributes=waiting_hold.attributes,
)
del self.eventtype_to_waitingholds[event_type][pos]
self.eventtype_to_holdings[event_type].append(holding)
waiting_hold.async_result.set(event)
break
else:
# Only dispatch the event if it didn't match any of the holds
events_to_dispatch.append(event)
if events_to_dispatch:
self.wrapped.on_raiden_events(raiden, chain_state, events_to_dispatch)
def hold(self, event_type: type, attributes: Dict) -> AsyncResult:
hold = HoldWait(event_type=event_type, async_result=AsyncResult(), attributes=attributes)
self.eventtype_to_waitingholds[event_type].append(hold)
log.debug(f"Hold for {event_type.__name__} with {attributes} created.")
return hold.async_result
def release(self, raiden: RaidenService, event: RaidenEvent):
holds = self.eventtype_to_holdings[type(event)]
found = None
for pos, hold in enumerate(holds):
if hold.event == event:
found = (pos, hold)
break
msg = (
"Cannot release unknown event. "
"Either it was never held, or the event was not emitted yet, "
"or it was released twice."
)
assert found is not None, msg
hold = holds.pop(found[0])
self.wrapped.on_raiden_events(raiden, hold.chain_state, [event])
log.debug(f"{event} released.", node=to_checksum_address(raiden.address))
def hold_secretrequest_for(self, secrethash: SecretHash) -> AsyncResult:
return self.hold(SendSecretRequest, {"secrethash": secrethash})
def hold_unlock_for(self, secrethash: SecretHash):
return self.hold(SendUnlock, {"secrethash": secrethash})
def release_secretrequest_for(self, raiden: RaidenService, secrethash: SecretHash):
for hold in self.eventtype_to_holdings[SendSecretRequest]:
if hold.attributes["secrethash"] == secrethash:
self.release(raiden, hold.event)
def release_unlock_for(self, raiden: RaidenService, secrethash: SecretHash):
for hold in self.eventtype_to_holdings[SendUnlock]:
if hold.attributes["secrethash"] == secrethash:
self.release(raiden, hold.event)
def dont_handle_lock_expired_mock(app):
"""Takes in a raiden app and returns a mock context where lock_expired is not processed"""
def do_nothing(raiden, message): # pylint: disable=unused-argument
return []
return patch.object(app.message_handler, "handle_message_lockexpired", side_effect=do_nothing)
|
What is a Coach? How Do I Become a Coach?
When your students sign up, they will be able to enter your email address as that of their coach. You will then receive an email asking you to confirm your role as coach for this student.
You can then log in as coach by clicking on the login button to your right. You'll be able to see how much time your student(s) are spending on the site, the progress they've made in the tutorials, how well they are doing on the practice questions and the number of words they've mastered in the Vocab Builder.
You can also enroll as a user yourself, so you can have full access to the tutorial materials on the site. We encourage you to browse around so that you know what your student(s) are working on. We understand that you might need to brush up on some of this material. That's why our site is always available.
We hope that you find this arrangement useful, and that you'll able to help one or more students to get the maximum possible benefit from our site.
|
import lmfit
from .eqn_electronic import zharkov_pel
class ZharkovElecModel(lmfit.Model):
"""
lmfit Model class for Zharkov electronic contribution fitting
"""
def __init__(self, n, z, independent_vars=['v', 'temp'],
param_names=['v0', 'e0', 'g'],
prefix='', missing=None, name=None, **kwargs):
"""
:param n: number of elements in a chemical formula
:param z: number of formula unit in a unit cell
:param independent_vars: define independent variables for lmfit
unit-cell volume in A^3 and temperature in K
:param param_names: define parameter names, v0, e0, g
:param prefix: see lmfit
:param missing: see lmfit
:param name: see lmfit
:param kwargs: see lmfit
"""
kwargs.update({'prefix': prefix, 'missing': missing,
'independent_vars': independent_vars,
'param_names': param_names})
super(ZharkovElecModel, self).__init__(zharkov_pel, n=n, z=z, **kwargs)
self.set_param_hint('v0', min=0.)
self.set_param_hint('e0')
self.set_param_hint('g')
|
We're a veteran and family-owned business for 50 years, and have been operating from the same location ever since! You can rely on us to haul away your old flooring. Speak to us now.
Call us for a FREE estimates and measurement.
Has your tiled flooring started chipping due to heavy footfall? Count on our experienced technicians at Fashion Floor Carpet Galleries in Kingston, PA to assist you efficiently. We have been helping families with exquisite tiling services for over 100 years and counting!
We are proudly serving Kingston, PA with exceptional flooring services for over 100 years!
Take a look at our exceptional selection of tiles. Our flooring will be radiant in you home.
We provide warranties on materials and labor.
|
# Copyright (c) 2009, 2010, 2011, 2012 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import uuid
from ovs.db import error
import ovs.db.parser
import ovs.db.data
import ovs.ovsuuid
class AtomicType(object):
def __init__(self, name, default, python_types):
self.name = name
self.default = default
self.python_types = python_types
@staticmethod
def from_string(s):
if s != "void":
for atomic_type in ATOMIC_TYPES:
if s == atomic_type.name:
return atomic_type
raise error.Error('"%s" is not an atomic-type' % s, s)
@staticmethod
def from_json(json):
if type(json) not in [str, unicode]:
raise error.Error("atomic-type expected", json)
else:
return AtomicType.from_string(json)
def __str__(self):
return self.name
def to_string(self):
return self.name
def to_json(self):
return self.name
def default_atom(self):
return ovs.db.data.Atom(self, self.default)
VoidType = AtomicType("void", None, ())
IntegerType = AtomicType("integer", 0, (int, long))
RealType = AtomicType("real", 0.0, (int, long, float))
BooleanType = AtomicType("boolean", False, (bool,))
StringType = AtomicType("string", "", (str, unicode))
UuidType = AtomicType("uuid", ovs.ovsuuid.zero(), (uuid.UUID,))
ATOMIC_TYPES = [VoidType, IntegerType, RealType, BooleanType, StringType,
UuidType]
def escapeCString(src):
dst = ""
for c in src:
if c in "\\\"":
dst += "\\" + c
elif ord(c) < 32:
if c == '\n':
dst += '\\n'
elif c == '\r':
dst += '\\r'
elif c == '\a':
dst += '\\a'
elif c == '\b':
dst += '\\b'
elif c == '\f':
dst += '\\f'
elif c == '\t':
dst += '\\t'
elif c == '\v':
dst += '\\v'
else:
dst += '\\%03o' % ord(c)
else:
dst += c
return dst
def commafy(x):
"""Returns integer x formatted in decimal with thousands set off by
commas."""
return _commafy("%d" % x)
def _commafy(s):
if s.startswith('-'):
return '-' + _commafy(s[1:])
elif len(s) <= 3:
return s
else:
return _commafy(s[:-3]) + ',' + _commafy(s[-3:])
def returnUnchanged(x):
return x
class BaseType(object):
def __init__(self, type_, enum=None, min=None, max=None,
min_length=0, max_length=sys.maxint, ref_table_name=None):
assert isinstance(type_, AtomicType)
self.type = type_
self.enum = enum
self.min = min
self.max = max
self.min_length = min_length
self.max_length = max_length
self.ref_table_name = ref_table_name
if ref_table_name:
self.ref_type = 'strong'
else:
self.ref_type = None
self.ref_table = None
def default(self):
return ovs.db.data.Atom.default(self.type)
def __eq__(self, other):
if not isinstance(other, BaseType):
return NotImplemented
return (self.type == other.type and self.enum == other.enum and
self.min == other.min and self.max == other.max and
self.min_length == other.min_length and
self.max_length == other.max_length and
self.ref_table_name == other.ref_table_name)
def __ne__(self, other):
if not isinstance(other, BaseType):
return NotImplemented
else:
return not (self == other)
@staticmethod
def __parse_uint(parser, name, default):
value = parser.get_optional(name, [int, long])
if value is None:
value = default
else:
max_value = 2 ** 32 - 1
if not (0 <= value <= max_value):
raise error.Error("%s out of valid range 0 to %d"
% (name, max_value), value)
return value
@staticmethod
def from_json(json):
if type(json) in [str, unicode]:
return BaseType(AtomicType.from_json(json))
parser = ovs.db.parser.Parser(json, "ovsdb type")
atomic_type = AtomicType.from_json(parser.get("type", [str, unicode]))
base = BaseType(atomic_type)
enum = parser.get_optional("enum", [])
if enum is not None:
base.enum = ovs.db.data.Datum.from_json(
BaseType.get_enum_type(base.type), enum)
elif base.type == IntegerType:
base.min = parser.get_optional("minInteger", [int, long])
base.max = parser.get_optional("maxInteger", [int, long])
if (base.min is not None and base.max is not None
and base.min > base.max):
raise error.Error("minInteger exceeds maxInteger", json)
elif base.type == RealType:
base.min = parser.get_optional("minReal", [int, long, float])
base.max = parser.get_optional("maxReal", [int, long, float])
if (base.min is not None and base.max is not None
and base.min > base.max):
raise error.Error("minReal exceeds maxReal", json)
elif base.type == StringType:
base.min_length = BaseType.__parse_uint(parser, "minLength", 0)
base.max_length = BaseType.__parse_uint(parser, "maxLength",
sys.maxint)
if base.min_length > base.max_length:
raise error.Error("minLength exceeds maxLength", json)
elif base.type == UuidType:
base.ref_table_name = parser.get_optional("refTable", ['id'])
if base.ref_table_name:
base.ref_type = parser.get_optional("refType", [str, unicode],
"strong")
if base.ref_type not in ['strong', 'weak']:
raise error.Error('refType must be "strong" or "weak" '
'(not "%s")' % base.ref_type)
parser.finish()
return base
def to_json(self):
if not self.has_constraints():
return self.type.to_json()
json = {'type': self.type.to_json()}
if self.enum:
json['enum'] = self.enum.to_json()
if self.type == IntegerType:
if self.min is not None:
json['minInteger'] = self.min
if self.max is not None:
json['maxInteger'] = self.max
elif self.type == RealType:
if self.min is not None:
json['minReal'] = self.min
if self.max is not None:
json['maxReal'] = self.max
elif self.type == StringType:
if self.min_length != 0:
json['minLength'] = self.min_length
if self.max_length != sys.maxint:
json['maxLength'] = self.max_length
elif self.type == UuidType:
if self.ref_table_name:
json['refTable'] = self.ref_table_name
if self.ref_type != 'strong':
json['refType'] = self.ref_type
return json
def copy(self):
base = BaseType(self.type, self.enum.copy(), self.min, self.max,
self.min_length, self.max_length, self.ref_table_name)
base.ref_table = self.ref_table
return base
def is_valid(self):
if self.type in (VoidType, BooleanType, UuidType):
return True
elif self.type in (IntegerType, RealType):
return self.min is None or self.max is None or self.min <= self.max
elif self.type == StringType:
return self.min_length <= self.max_length
else:
return False
def has_constraints(self):
return (self.enum is not None or self.min is not None or
self.max is not None or
self.min_length != 0 or self.max_length != sys.maxint or
self.ref_table_name is not None)
def without_constraints(self):
return BaseType(self.type)
@staticmethod
def get_enum_type(atomic_type):
"""Returns the type of the 'enum' member for a BaseType whose
'type' is 'atomic_type'."""
return Type(BaseType(atomic_type), None, 1, sys.maxint)
def is_ref(self):
return self.type == UuidType and self.ref_table_name is not None
def is_strong_ref(self):
return self.is_ref() and self.ref_type == 'strong'
def is_weak_ref(self):
return self.is_ref() and self.ref_type == 'weak'
def toEnglish(self, escapeLiteral=returnUnchanged):
if self.type == UuidType and self.ref_table_name:
s = escapeLiteral(self.ref_table_name)
if self.ref_type == 'weak':
s = "weak reference to " + s
return s
else:
return self.type.to_string()
def constraintsToEnglish(self, escapeLiteral=returnUnchanged,
escapeNumber=returnUnchanged):
if self.enum:
literals = [value.toEnglish(escapeLiteral)
for value in self.enum.values]
if len(literals) == 2:
english = 'either %s or %s' % (literals[0], literals[1])
else:
english = 'one of %s, %s, or %s' % (literals[0],
', '.join(literals[1:-1]),
literals[-1])
elif self.min is not None and self.max is not None:
if self.type == IntegerType:
english = 'in range %s to %s' % (
escapeNumber(commafy(self.min)),
escapeNumber(commafy(self.max)))
else:
english = 'in range %s to %s' % (
escapeNumber("%g" % self.min),
escapeNumber("%g" % self.max))
elif self.min is not None:
if self.type == IntegerType:
english = 'at least %s' % escapeNumber(commafy(self.min))
else:
english = 'at least %s' % escapeNumber("%g" % self.min)
elif self.max is not None:
if self.type == IntegerType:
english = 'at most %s' % escapeNumber(commafy(self.max))
else:
english = 'at most %s' % escapeNumber("%g" % self.max)
elif self.min_length != 0 and self.max_length != sys.maxint:
if self.min_length == self.max_length:
english = ('exactly %s characters long'
% commafy(self.min_length))
else:
english = ('between %s and %s characters long'
% (commafy(self.min_length),
commafy(self.max_length)))
elif self.min_length != 0:
return 'at least %s characters long' % commafy(self.min_length)
elif self.max_length != sys.maxint:
english = 'at most %s characters long' % commafy(self.max_length)
else:
english = ''
return english
def toCType(self, prefix):
if self.ref_table_name:
return "struct %s%s *" % (prefix, self.ref_table_name.lower())
else:
return {IntegerType: 'int64_t ',
RealType: 'double ',
UuidType: 'struct uuid ',
BooleanType: 'bool ',
StringType: 'char *'}[self.type]
def toAtomicType(self):
return "OVSDB_TYPE_%s" % self.type.to_string().upper()
def copyCValue(self, dst, src):
args = {'dst': dst, 'src': src}
if self.ref_table_name:
return ("%(dst)s = %(src)s->header_.uuid;") % args
elif self.type == StringType:
return "%(dst)s = xstrdup(%(src)s);" % args
else:
return "%(dst)s = %(src)s;" % args
def initCDefault(self, var, is_optional):
if self.ref_table_name:
return "%s = NULL;" % var
elif self.type == StringType and not is_optional:
return '%s = "";' % var
else:
pattern = {IntegerType: '%s = 0;',
RealType: '%s = 0.0;',
UuidType: 'uuid_zero(&%s);',
BooleanType: '%s = false;',
StringType: '%s = NULL;'}[self.type]
return pattern % var
def cInitBaseType(self, indent, var):
stmts = []
stmts.append('ovsdb_base_type_init(&%s, %s);' % (
var, self.toAtomicType()))
if self.enum:
stmts.append("%s.enum_ = xmalloc(sizeof *%s.enum_);"
% (var, var))
stmts += self.enum.cInitDatum("%s.enum_" % var)
if self.type == IntegerType:
if self.min is not None:
stmts.append('%s.u.integer.min = INT64_C(%d);'
% (var, self.min))
if self.max is not None:
stmts.append('%s.u.integer.max = INT64_C(%d);'
% (var, self.max))
elif self.type == RealType:
if self.min is not None:
stmts.append('%s.u.real.min = %d;' % (var, self.min))
if self.max is not None:
stmts.append('%s.u.real.max = %d;' % (var, self.max))
elif self.type == StringType:
if self.min_length is not None:
stmts.append('%s.u.string.minLen = %d;'
% (var, self.min_length))
if self.max_length != sys.maxint:
stmts.append('%s.u.string.maxLen = %d;'
% (var, self.max_length))
elif self.type == UuidType:
if self.ref_table_name is not None:
stmts.append('%s.u.uuid.refTableName = "%s";'
% (var, escapeCString(self.ref_table_name)))
stmts.append('%s.u.uuid.refType = OVSDB_REF_%s;'
% (var, self.ref_type.upper()))
return '\n'.join([indent + stmt for stmt in stmts])
class Type(object):
DEFAULT_MIN = 1
DEFAULT_MAX = 1
def __init__(self, key, value=None, n_min=DEFAULT_MIN, n_max=DEFAULT_MAX):
self.key = key
self.value = value
self.n_min = n_min
self.n_max = n_max
def copy(self):
if self.value is None:
value = None
else:
value = self.value.copy()
return Type(self.key.copy(), value, self.n_min, self.n_max)
def __eq__(self, other):
if not isinstance(other, Type):
return NotImplemented
return (self.key == other.key and self.value == other.value and
self.n_min == other.n_min and self.n_max == other.n_max)
def __ne__(self, other):
if not isinstance(other, Type):
return NotImplemented
else:
return not (self == other)
def is_valid(self):
return (self.key.type != VoidType and self.key.is_valid() and
(self.value is None or
(self.value.type != VoidType and self.value.is_valid())) and
self.n_min <= 1 <= self.n_max)
def is_scalar(self):
return self.n_min == 1 and self.n_max == 1 and not self.value
def is_optional(self):
return self.n_min == 0 and self.n_max == 1
def is_composite(self):
return self.n_max > 1
def is_set(self):
return self.value is None and (self.n_min != 1 or self.n_max != 1)
def is_map(self):
return self.value is not None
def is_smap(self):
return (self.is_map()
and self.key.type == StringType
and self.value.type == StringType)
def is_optional_pointer(self):
return (self.is_optional() and not self.value
and (self.key.type == StringType or self.key.ref_table_name))
@staticmethod
def __n_from_json(json, default):
if json is None:
return default
elif type(json) == int and 0 <= json <= sys.maxint:
return json
else:
raise error.Error("bad min or max value", json)
@staticmethod
def from_json(json):
if type(json) in [str, unicode]:
return Type(BaseType.from_json(json))
parser = ovs.db.parser.Parser(json, "ovsdb type")
key_json = parser.get("key", [dict, str, unicode])
value_json = parser.get_optional("value", [dict, str, unicode])
min_json = parser.get_optional("min", [int])
max_json = parser.get_optional("max", [int, str, unicode])
parser.finish()
key = BaseType.from_json(key_json)
if value_json:
value = BaseType.from_json(value_json)
else:
value = None
n_min = Type.__n_from_json(min_json, Type.DEFAULT_MIN)
if max_json == 'unlimited':
n_max = sys.maxint
else:
n_max = Type.__n_from_json(max_json, Type.DEFAULT_MAX)
type_ = Type(key, value, n_min, n_max)
if not type_.is_valid():
raise error.Error("ovsdb type fails constraint checks", json)
return type_
def to_json(self):
if self.is_scalar() and not self.key.has_constraints():
return self.key.to_json()
json = {"key": self.key.to_json()}
if self.value is not None:
json["value"] = self.value.to_json()
if self.n_min != Type.DEFAULT_MIN:
json["min"] = self.n_min
if self.n_max == sys.maxint:
json["max"] = "unlimited"
elif self.n_max != Type.DEFAULT_MAX:
json["max"] = self.n_max
return json
def toEnglish(self, escapeLiteral=returnUnchanged):
keyName = self.key.toEnglish(escapeLiteral)
if self.value:
valueName = self.value.toEnglish(escapeLiteral)
if self.is_scalar():
return keyName
elif self.is_optional():
if self.value:
return "optional %s-%s pair" % (keyName, valueName)
else:
return "optional %s" % keyName
else:
if self.n_max == sys.maxint:
if self.n_min:
quantity = "%s or more " % commafy(self.n_min)
else:
quantity = ""
elif self.n_min:
quantity = "%s to %s " % (commafy(self.n_min),
commafy(self.n_max))
else:
quantity = "up to %s " % commafy(self.n_max)
if self.value:
return "map of %s%s-%s pairs" % (quantity, keyName, valueName)
else:
if keyName.endswith('s'):
plural = keyName + "es"
else:
plural = keyName + "s"
return "set of %s%s" % (quantity, plural)
def constraintsToEnglish(self, escapeLiteral=returnUnchanged,
escapeNumber=returnUnchanged):
constraints = []
keyConstraints = self.key.constraintsToEnglish(escapeLiteral,
escapeNumber)
if keyConstraints:
if self.value:
constraints.append('key %s' % keyConstraints)
else:
constraints.append(keyConstraints)
if self.value:
valueConstraints = self.value.constraintsToEnglish(escapeLiteral,
escapeNumber)
if valueConstraints:
constraints.append('value %s' % valueConstraints)
return ', '.join(constraints)
def cDeclComment(self):
if self.n_min == 1 and self.n_max == 1 and self.key.type == StringType:
return "\t/* Always nonnull. */"
else:
return ""
def cInitType(self, indent, var):
initKey = self.key.cInitBaseType(indent, "%s.key" % var)
if self.value:
initValue = self.value.cInitBaseType(indent, "%s.value" % var)
else:
initValue = ('%sovsdb_base_type_init(&%s.value, '
'OVSDB_TYPE_VOID);' % (indent, var))
initMin = "%s%s.n_min = %s;" % (indent, var, self.n_min)
if self.n_max == sys.maxint:
n_max = "UINT_MAX"
else:
n_max = self.n_max
initMax = "%s%s.n_max = %s;" % (indent, var, n_max)
return "\n".join((initKey, initValue, initMin, initMax))
|
Ofcom's sixth Communications Market Report has found that 32% of homes still lack a broadband connection, although the figure is up by 17% on a year ago when only 58% of homes were broadband enabled.
Take up of broadband around the UK continues to vary. In Scotland, the number of households with broadband is up this year by 13% to 60%. In Northern Ireland, it is up by nearly 25% to 64% and, in Wales, by a third to 58%.
More than 1 in 10 households (12%) has a mobile broadband connection and, of these, 75% also have access to broadband through their landline. In May of this year alone there were more than a quarter of a million new mobile broadband connections, up from 139,000 new connections in May 2008. In addition, some eight million people in the UK had accessed the internet on their mobile phone in the first quarter of this year, an increase of more than 40% on the previous year.
The average consumer is now spending 25 minutes a day online at home — up from nine minutes in 2004.
The report also looked into how consumers view their communications services and found that they remain important, despite the recession. When asked which items consumers were likely to cut back on in the recession, 47% would choose to cut back on going out for dinner, 41% on DIY and 41% on holidays. This compares with only 19% who would cut back on mobile phone spend, 16% on TV subscriptions and 10% on their broadband services.
|
import plotly as py
import plotly.graph_objs as graph
def choose_color(entropy):
""" Calculates color for bare with given entropy """
red = round(255 * entropy**5)
green = round(255 * ((1 - (entropy - 0.5)**2 )/8)**0.6 )
blue = round(255 * (1 - 0.8 * entropy**0.05))
alpha = 0.6 + round(0.3 * entropy * 100)/100.0
return (red, green, blue, alpha)
def create_annotations(entropy_data, block_descriptions, file_parts):
""" Returns annotations - block labels - in plotly style """
return [ dict (
x=block_descriptions[i][0],
y=entropy_data[i][0],
xref='x',
yref='y',
text=str(file_parts[i]),
showarrow=True,
arrowhead=4,
ax=0,
ay=-40
) for i in range(len(entropy_data))]
def create_plot(entropy_data, block_descriptions):
""" Returns colored plotly bar with entropy data """
entropy_data = sum(entropy_data, [])
return graph.Bar(
y=entropy_data,
x=sum(block_descriptions, []),
marker=dict(
color=['rgba' + str(choose_color(x)) for x in entropy_data]),
)
def plot_page(bar, title, name, annotations = []):
""" Converts plotly bar to .html page """
layout = graph.Layout(title=title, annotations = annotations, )
figure = graph.Figure(data=[bar], layout=layout)
# Runs plot page
py.offline.plot(figure, filename=(name + '.html'))
def save_to_file(bar, title, name, width=1024, height=640):
""" Converts plotly bar to page downloading .png file """
layout = graph.Layout(title=title, width=width, height=height)
figure = graph.Figure(data=[bar], layout=layout)
# Usability of this function is low as it runs plotly page
# We should consider writing alternative solution...
# in the distant future. (And in R.)
py.offline.plot(figure, image='png')
if __name__ == "__main__":
""" Part to check if plots creating works fine """
example_data = [ [1 - float(i)/x for i in range(x)] for x in [30, 150, 40] ]
example_desc = [ [i + (30 if x==150 else (180 if x==40 else 0)) for i in range(x)]
for x in [30, 150, 40] ]
example_parts= ['Start', 'Mid', 'End']
annotations = create_annotations(example_data, example_desc, example_parts)
bar = create_plot(example_data, example_desc)
save_to_file(bar, 'Example file title', 'examplename')
print("Page downloading .png file created!")
plot_page(bar, 'Example title', 'examplename', annotations)
print(".html file created - should open in browser autamaticly.")
|
Since the offices at the apartment buildings wouldn’t be open on a Sunday we decided to spend the day at Sea World. With the humidity and the heat it was a bit warm at Sea World but nothing like the temperatures back home. I think we topped out at about 85 at Sea World. Back home it was closer to 110 or 113. Given that 85 with humidity isn’t all that bad.
We saw many of the shows starting with the dolphins. We only did one ride, the Sky Tower. I saw the roller coaster while waiting for the dolphin show to start. I can handle steep drops and upside down but the one thing I really don’t like is going backwards. The roller coaster I saw went backwards so I wasn’t about to ride that thing. I blame it all on a roller coaster called the Tidal Wave at Marriot’s “Great America” in Santa Clara California. At least that is what is was called back when I lived there and was going to amusement parks.
We saw two different versions of the Seymore and Clyde show as well as the pets show. We also did the shark experience and watched the penguins. We also saw the Orca show which has definitely changed emphasis to conservation and protection of wildlife since the “Blackfish” documentary.
After watching the Orca show, we had dinner, did a little shopping and then decided to head back to the hotel in El Cajon.
|
# encoding: utf-8
"""
I/O configurations
"""
from __future__ import absolute_import, print_function, unicode_literals
import importlib
from gozokia.i_o.exceptions import GozokiaIoError
from gozokia.conf import settings
class Io(object):
_VALUE = 0
_TXT = 1
_VOICE = 2
_TXT_VOICE = 3
_METHOD_DEFAULT = "terminal_txt"
"""
INPUT
"""
_INPUT_METHODS = {"value": _VALUE, "terminal_txt": _TXT, "terminal_voice": _VOICE}
_INPUT_SELECTED = 0
"""
OUTPUT
"""
_OUTPUT_METHODS = {"value": _VALUE, "terminal_txt": _TXT, "terminal_voice": _VOICE, "terminal_txtvoice": _TXT_VOICE}
_OUTPUT_SELECTED = 0
# System program to play sounds
# _AUDIO_PLAYER = "mpg123"
def __init__(self, *args, **kwargs):
self.set_input_method(kwargs.get('input_type', settings.GOZOKIA_INPUT_TYPE))
self.set_output_method(kwargs.get('output_type', settings.GOZOKIA_OUTPUT_TYPE))
def set_input_method(self, input_type):
"""
Input configuration
"""
try:
self._INPUT_SELECTED = self._INPUT_METHODS[input_type]
except KeyError:
raise GozokiaIoError(self.__class__.__name__ + ": Input method {} not exist".format(input_type))
# Initialize the input method
input_module = importlib.import_module('gozokia.i_o.input')
if self._INPUT_SELECTED == self._VALUE:
self.input = input_module.InputValue()
elif self._INPUT_SELECTED == self._TXT:
self.input = input_module.InputTerminalText()
elif self._INPUT_SELECTED == self._VOICE:
self.input = input_module.InputTerminalVoice()
def get_input_method(self):
return self._INPUT_SELECTED
def listen(self, *args, **kwargs):
return self.input.listen(*args, **kwargs)
def set_output_method(self, output_type):
"""
Output configuration
"""
try:
self._OUTPUT_SELECTED = self._OUTPUT_METHODS[output_type]
except KeyError:
raise GozokiaIoError(self.__class__.__name__ + ": Output method {} not exist".format(output_type))
output_module = importlib.import_module('gozokia.i_o.output')
if self._OUTPUT_SELECTED == self._VALUE:
self.output = output_module.OutputValue()
elif self._OUTPUT_SELECTED == self._TXT:
self.output = output_module.OutputTerminalText()
elif self._OUTPUT_SELECTED == self._VOICE:
self.output = output_module.OutputTerminalVoice()
else:
raise GozokiaIoError(self.__class__.__name__ + ": No Output method for [{}] {}".format(self._OUTPUT_SELECTED, output_type))
def get_output_method(self):
return self._OUTPUT_SELECTED
def response(self, text, *args, **kwargs):
return self.output.response(response=text, *args, **kwargs)
|
We offer 25 Litre Oil Free Air Compressor.
Established in the year 1978, We, Talib Sons (Mumbai), are a highly distinguished firm and captivated in Manufacturer, Trader, and Wholesaler of Air Compressor, PEW Self Priming Monoset Pump, etc. These products are widely applauded for their exceptional performance, hassle-free working, durability, long functional life, and less maintenance. These presented products are precisely manufactured by our adroit professionals making utilization of optimum quality the components and most modern technology with the aid of contemporary techniques in compliance with set quality values. Under the administration of our mentor Mr. Murtaza, we are able to attain a vast appreciation of our valuable patrons. Owing to his innovative approach and capable guidance, we have been positively gaining a comprehensive base of clients.
|
x_len = 100
y_len = 100
steps = 100
def new_grid():
ng = [ ]
for x in range(x_len):
ng.append([])
for y in range(y_len):
ng[x].append(0)
#Force corners to be on
for (x,y) in [(0,0),(0,y_len-1),(x_len-1,0),(x_len-1,y_len-1)]:
ng[x][y] = 1
return ng
def main():
g = new_grid()
with open('../input.txt', 'r') as fp:
for x in range(x_len):
for y in range(y_len):
c = None
while c not in ['#','.']:
c = fp.read(1)
if c is None or c == '':
raise Exception("Not enough input")
g[x][y] = (1 if c == '#' else 0)
for i in range(steps):
new_g = new_grid()
for x in range(0, x_len):
for y in range(0, y_len):
#Ignore corners
if (x,y) in [(0,0),(0,y_len-1),(x_len-1,0),(x_len-1,y_len-1)]:
continue
count = 0
for n_x in range(x-1,x+2):
for n_y in range(y-1,y+2):
#Skip ourselves
if n_x == x and n_y == y:
continue
#Skip out of bounds
elif n_x < 0 or n_x >= x_len or n_y < 0 or n_y >= y_len:
continue
count += g[n_x][n_y]
#If on
if g[x][y] == 1:
value = 1 if count == 2 or count == 3 else 0
#If off
else:
value = 1 if count == 3 else 0
new_g[x][y] = value
g = new_g
#Count on
count = 0
for x in range(x_len):
for y in range(y_len):
count += g[x][y]
print count
if __name__ == "__main__":
main()
|
John Shifflett (1955 - 2017) was a beloved bassist, well-known as THE sideman in many of the San Francisco Bay Area’s foremost ensembles and for traveling jazz dignitaries. Kept more as a guarded secret was his compositional skills and library of compelling and varied pieces of music. Saxophonist Kristen Strom, guitarist Scott Sorkin and drummer Jason Lewis played with John in their quartet and on countless other projects over the last 25 years. For this deeply heartfelt tribute to their friend, Kristen, Scott and Jason have arranged a collection of his compositions and brought together some of his esteemed Bay Area colleagues to perform, record, and celebrate his music. Without a doubt, a labor of love, and a testament to a life well lived in service to music, the creative process, and friends.
|
import sys
from django.conf.urls import patterns
from django.conf.urls import url as django_url
def url(*regexes, **kwargs):
caller_filename = sys._getframe(1).f_code.co_filename
for m in sys.modules.values():
if (m and '__file__' in m.__dict__ and
m.__file__.startswith(caller_filename)):
module = m
break
def _wrapper(cls):
if module:
if 'urlpatterns' not in module.__dict__:
module.urlpatterns = []
view = cls.as_view()
view_name = kwargs.get('name') or cls.__name__
url_kwargs = dict(kwargs)
url_kwargs['name'] = view_name
for regex in regexes:
module.urlpatterns += patterns(
'', django_url(regex, view, **url_kwargs))
return cls
return _wrapper
def select(t, name, options=[], value=None):
with t.select():
for o in options:
with t.option(checked=o[0] == value, value=o[0]):
t(o[1])
def css_link(t, href):
t.link(rel='stylesheet', href=href)
def js_link(t, href):
t.script(type='text/javascript', src=href)()
|
Plant 8, 9 West Tai Lake Avenue, Wujin Economic Development Zone, Changzhou, Jiangsu,China.
Excellent stability, Durability, deep scratch resistance.
Competitive price based on guaranty of quality.
Packing: blister tray packaged, foam box and carton to ensure safety in transportation.
1.We provide the express company like DHL,UPS,EMS,FEDEX and TNTservice and fast(3--5 days).
2. Clients can check the packing status online with the tracking number and satisfy the customers' requirements.
3. When finish the shipment, we will follow it. If you don't receive box on time, please contact us immediately, we will assist to get the box smoothly.
The Wuxi Graphene film co.,ltd provide professional QC team, meanwhile, all product is through strict test with inspection machine and inspection before shipping, so it is believe that the goods can be used in your country.
Multimeter, logic analyzer, decibel meter, photometer, waist supporter.
The Wuxi Graphene film co.,ltd was located in the 5th factory, Huishan economic development area, which specialize in CVD grapheme film research and development and sales in high technology industries. It is wholly owned subsidiary in the sixth element science and technology co., ltd (stock code: 831190). The company has core team included a large number of Doctors that independently patent for invention of 50 items, which is the one of world's largest CVD graphene production in the world. If you are interested in our products, please feel free to talk with us.
|
from foldkin.base.model_factory import ModelFactory
from foldkin.base.model import Model
from foldkin.zam_protein import create_zam_protein_from_pdb_id
class ContactOrderModelFactory(ModelFactory):
"""docstring for ContactOrderModelFactory"""
def __init__(self):
super(ContactOrderModelFactory, self).__init__()
def create_model(self, pdb_id, parameter_set):
new_model = ContactOrderModel(pdb_id, parameter_set)
return new_model
class ContactOrderModel(Model):
"""docstring for ContactOrderModel"""
def __init__(self, pdb_id, parameter_set):
super(ContactOrderModel, self).__init__()
self.pdb_id = pdb_id
self.zam_protein = create_zam_protein_from_pdb_id(pdb_id)
self.parameter_set = parameter_set
def get_id(self):
return self.pdb_id
def get_parameter(self, parameter_name):
return self.parameter_set.get_parameter(parameter_name)
def get_contact_list(self):
contact_list = self.zam_protein.get_contact_list()
one_letter_sequence = self.zam_protein.get_sequence()
new_contact_list = []
for c in contact_list:
residue1_number = c[0]
residue2_number = c[1]
residue1_name = one_letter_sequence[residue1_number]
residue2_name = one_letter_sequence[residue2_number]
new_contact = Contact(residue1_name, residue2_name, residue1_number,
residue2_number)
new_contact_list.append(new_contact)
return new_contact_list
class Contact(object):
"""docstring for Contact"""
def __init__(self, residue1_name, residue2_name, residue1_number,
residue2_number):
super(Contact, self).__init__()
self.residue1_name = residue1_name
self.residue2_name = residue2_name
self.residue1_number = residue1_number
self.residue2_number = residue2_number
def get_sequence_separation(self):
return self.residue2_number - self.residue1_number
def get_residue_names_as_letters(self):
return [self.residue1_name, self.residue2_name]
|
ATLANTA – Completing its recruiting efforts for the November signing period, Georgia Tech’s basketball program signed 6-10 center Michael Southall of West Salem, Wis., to a letter-of-intent Friday, November 10.
Head coach Paul Hewitt’s first Tech signing class includes five players, one at every position on the floor, all ranked in the top 100 high school prospects nationally.
Southall, who graduated from West Salem (Wis.) High School in 2000 but is attending Hargrave Military Academy in Chatham, Va., this year, is expected to give Tech a dominant presence inside following the graduation of Alvin Jones after the 2000-01 season. As a senior at West Salem, Southall averaged 23 points, 14 rebounds and 5 blocked shots per game while earning first-team all-state honors. He ranks No. 88 among the nation’s prep prospects in PrepStars. He participated in the 2000 Derby Classic, a post-season all-star game held in Louisville, Ky.
Also signing letters-of-intent with Tech during the early signing period Wednesday were 6-7 power forward Ed Nelson of Fort Lauderdale, Fla., 6-4 shooting guard Barry Elder of Madison, Ga., 6-6 small forward Isma’il Muhammad of Atlanta, and 6-6 Anthony Vasser of Birmingham, Ala.
Tech will graduate five seniors after this season, and have eight scholarships available. But the Jackets are limited to signing only five players this year under a new NCAA rule that limits Division I programs to committing a maximum of eight scholarships over a two-year period, five in any one year.
|
import sys
from os import listdir
import operator
import collections
from pprint import pprint
from itertools import count
import matplotlib.pyplot as plt
import networkx as nx
import multinetx as mx
import numpy as np
from t5 import collect_data
from pdb import set_trace
def collect_community(path):
files = filter(lambda x: '_community' in x, listdir(path))
data = {
'lart':{}, 'glouvain':{}, 'pmm':{},
}
for file in files:
params = file.split('_')
method = params[0]
if method == 'pmm' or method == 'glouvain':
params = [params[1], params[2]]
if method == 'lart':
params = [params[1]]
with open(path + file) as fd:
d = fd.read()
data[method]['_'.join(['%.1f' % (float(p)) for p in params])] = d
return data
def main():
data = collect_data('results/t6/')
comm = collect_community('results/t6/')
def find_best(data, method):
key = max(data[method].iteritems(), key=operator.itemgetter(1))[0]
y = data[method][key]
return y, key, method
print 'LART', data['lart']['9.0_1.0_1.0']
print 'PMM', data['pmm']['30.0_140.0']
print 'Glouvain', data['glouvain']['1.0_1.0']
gl = find_best(data, 'glouvain')
ll = find_best(data, 'lart')
pl = find_best(data, 'pmm')
print 'LART', ll
print 'PMM', pl
print 'Glouvain', gl
best = max([gl, ll, pl], key=operator.itemgetter(0))
best_comm = {}
for b in comm[best[2]][best[1]].split('\n'):
if b:
a,l,c = b.split(',')
best_comm['%s-%s' % (a, l)] = int(c)
layers = {
'RT': nx.Graph(), 'ff': nx.Graph(), 'Re': nx.Graph()
}
ids = {}
counter = 0
groups = []
with open('data/t6/dk', 'r') as fd:
for l in fd.readlines():
a1,a2, layer = l.replace('\n', '').split(",")
if a1 not in ids:
ids[a1] = counter
counter = counter + 1
if a2 not in ids:
ids[a2] = counter
counter = counter + 1
groups.append(best_comm['%s-%s' % (a1, layer)])
groups.append(best_comm['%s-%s' % (a2, layer)])
for k,v in layers.iteritems():
v.add_node(ids[a1], label = best_comm['%s-%s' % (a1, layer)])
v.add_node(ids[a2], label = best_comm['%s-%s' % (a2, layer)])
layers[layer].add_edge(ids[a1], ids[a2])
truth = {}
with open('data/t6/dk_truth', 'r') as fd:
for l in fd.readlines():
actor, party = l.replace('\n', '').split(',')
truth[actor] = party
mapping = dict(zip(sorted(groups), count()))
N, L = len(layers['ff'].nodes()), len(layers.keys())
adj_block = mx.lil_matrix(np.zeros((N * L, N * L)))
for i in xrange(L):
for j in xrange(L):
if i < j:
adj_block[N * i: N * (i + 1), N * j: (j+1) * N] = np.identity(N)
adj_block += adj_block.T
mg = mx.MultilayerGraph(list_of_layers=[v for k, v in layers.items()])
#inter_adjacency_matrix = adj_block)
mg.set_edges_weights(intra_layer_edges_weight=1)
#inter_layer_edges_weight=2)
fig = plt.figure(figsize=(16, 16))
plt.title('Twitter data of the Danish 2015 election')
stats = collections.defaultdict(list)
for k, v in best_comm.items():
stats[v].append(k)
stats2 = collections.defaultdict(dict)
for k,v in stats.items():
for e in v:
actor,_ = e.split('-')
if truth[actor] in stats2[k]:
stats2[k][truth[actor]] += 1
else:
stats2[k][truth[actor]] = 1
left = [
'Dansk Folkeparti',
'Venstre',
'Liberal Alliance',
'Det Konservative Folkeparti',
'KristenDemokraterne',
]
right = [
'Socialistisk Folkeparti',
'Radikale Venstre',
'Socialdemokratiet',
'Alternativet',
'Enhedslisten'
]
out = 'Udenfor partierne'
for k,v in stats2.items():
total = 0
for k1,v1 in v.items():
total += v1
pscore = 0
for k1,v1 in v.items():
if k1 in left:
pscore += (stats2[k][k1] * 1)
if k1 in right:
pscore -= (stats2[k][k1] * 1)
stats2[k][k1] = round(float(v1) / float(total), 2)
stats2[k]['nodes'] = filter(lambda x, i = mg, k = k: i.node[x]['label'] == k, mg.node)
stats2[k]['pscore'] = pscore / float(total)
stats2 = dict(stats2)
if len(sys.argv) > 1 and sys.argv[1] == 'heat':
cmap = plt.get_cmap('RdBu_r')
colors = [stats2[mg.node[n]['label']]['pscore'] for n in mg.nodes()]
pos = mx.get_position(mg, nx.spring_layout(layers[layers.keys()[2]],
weight ='pscore'),
layer_vertical_shift=0.2,
layer_horizontal_shift=0.0,
proj_angle=47)
for key, val in stats2.items():
set_trace()
mx.draw_networkx_nodes(mg, pos=pos,node_size=100, with_labels=False,
nodelist = val['nodes'],
label = key,
node_color = [colors[n] for n in val['']],
cmap = cmap)
else:
val_map = {
0: 'k',
1: 'r',
2: 'g',
3: 'b',
4: 'c',
5: 'm',
6: 'y',
7: '0.75',
8: 'w',
}
colors = [val_map[mg.node[n]['label']] for n in mg.nodes()]
pos = mx.get_position(mg, nx.spring_layout(layers[layers.keys()[2]]),
layer_vertical_shift=0.2,
layer_horizontal_shift=0.0,
proj_angle=47)
for k, v in stats2.items():
mx.draw_networkx_nodes(mg, pos=pos,node_size=100, with_labels=False,
nodelist = v['nodes'],
label = k,
node_color = [colors[n] for n in v['nodes']],
cmap=plt.get_cmap('Set2'))
mx.draw_networkx_edges(mg, pos=pos, edge_color = 'b')
fig.tight_layout()
plt.legend(numpoints=1, loc=1)
plt.xticks([])
plt.yticks([])
plt.show()
if __name__ == "__main__":
main()
|
I live in Seattle, WA and I work as a software engineer during the day and I am a busy father of five kids. But at night, I work on the questions of science and have been doing so since 2003.
Is it possible that the only fundamental particles are the positron and the electron and that the only force is the electrostatic force?
Is gravity also just an electrostatic force?
|
from django_filters import CharFilter, NumberFilter
from django_filters import rest_framework as filters
from rest_framework.filters import OrderingFilter
from rest_framework.viewsets import ModelViewSet
from lily.notes.api.serializers import NoteSerializer
from lily.notes.models import Note
class NoteFilter(filters.FilterSet):
content_type = CharFilter(name='gfk_content_type__model')
object_id = NumberFilter(name='gfk_object_id')
class Meta:
model = Note
fields = ('is_pinned', )
class NoteViewSet(ModelViewSet):
"""
This viewset contains all possible ways to manipulate a Note.
"""
model = Note
queryset = Note.objects # Without .all() this filters on the tenant
serializer_class = NoteSerializer
# Set all filter backends that this viewset uses.
filter_backends = (OrderingFilter, filters.DjangoFilterBackend)
# OrderingFilter: set all possible fields to order by.
ordering_fields = ('created',)
# DjangoFilterBackend: set the possible fields to filter on.
filter_class = NoteFilter
def get_queryset(self, *args, **kwargs):
return super(NoteViewSet, self).get_queryset().filter(is_deleted=False)
|
“I wish the England football team good fortune in their match against Croatia. Our whole nation is urging them to win.
“I can actually remember when we last won the world cup in 1966. I was a scout and I along with other members of our troop stood outside a shop window watching the game on TV. When we won that match, we all felt so proud of our victory in the game that we invented. I very much hope that I live to see the victory repeated again.
|
# Copyright (c) 2015 Cisco.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import datetime
import keystoneclient.v3.client as ksclient
from monascaclient import client as monasca_client
from oslo_log import log as logging
from oslo_utils import timeutils
from congress.datasources import constants
from congress.datasources import datasource_driver
from congress.datasources import datasource_utils as ds_utils
LOG = logging.getLogger(__name__)
# TODO(thinrichs): figure out how to move even more of this boilerplate
# into DataSourceDriver. E.g. change all the classes to Driver instead of
# NeutronDriver, CeilometerDriver, etc. and move the d6instantiate function
# to DataSourceDriver.
class MonascaDriver(datasource_driver.PollingDataSourceDriver,
datasource_driver.ExecutionDriver):
METRICS = "metrics"
DIMENSIONS = "dimensions"
STATISTICS = "statistics"
DATA = "statistics.data"
# TODO(fabiog): add events and logs when fully supported in Monasca
# EVENTS = "events"
# LOGS = "logs"
value_trans = {'translation-type': 'VALUE'}
metric_translator = {
'translation-type': 'HDICT',
'table-name': METRICS,
'selector-type': 'DICT_SELECTOR',
'field-translators':
({'fieldname': 'id', 'translator': value_trans},
{'fieldname': 'name', 'translator': value_trans},
{'fieldname': 'dimensions',
'translator': {'translation-type': 'VDICT',
'table-name': DIMENSIONS,
'id-col': 'id',
'key-col': 'key', 'val-col': 'value',
'translator': value_trans}})
}
statistics_translator = {
'translation-type': 'HDICT',
'table-name': STATISTICS,
'selector-type': 'DICT_SELECTOR',
'field-translators':
({'fieldname': 'name', 'translator': value_trans},
{'fieldname': 'statistics',
'translator': {'translation-type': 'LIST',
'table-name': DATA,
'id-col': 'name',
'val-col': 'value_col',
'translator': value_trans}})
}
TRANSLATORS = [metric_translator, statistics_translator]
def __init__(self, name='', args=None):
super(MonascaDriver, self).__init__(name, args=args)
datasource_driver.ExecutionDriver.__init__(self)
self.creds = args
if not self.creds.get('project_name'):
self.creds['project_name'] = self.creds['tenant_name']
if not self.creds.get('poll_time'):
# set default polling time to 1hr
self.creds['poll_time'] = 3600
# Monasca uses Keystone V3
self.creds['auth_url'] = self.creds['auth_url'].replace("v2.0", "v3")
self.keystone = ksclient.Client(**self.creds)
self.creds['token'] = self.keystone.auth_token
if not self.creds.get('endpoint'):
# if the endpoint not defined retrieved it from keystone catalog
self.creds['endpoint'] = self.keystone.service_catalog.url_for(
service_type='monitoring', endpoint_type='publicURL')
self.monasca = monasca_client.Client('2_0', **self.creds)
self.add_executable_client_methods(self.monasca, 'monascaclient.')
self.initialize_update_methods()
self._init_end_start_poll()
@staticmethod
def get_datasource_info():
result = {}
result['id'] = 'monasca'
result['description'] = ('Datasource driver that interfaces with '
'monasca.')
result['config'] = ds_utils.get_openstack_required_config()
result['config']['lazy_tables'] = constants.OPTIONAL
result['secret'] = ['password']
return result
def initialize_update_methods(self):
metrics_method = lambda: self._translate_metric(
self.monasca.metrics.list())
self.add_update_method(metrics_method, self.metric_translator)
statistics_method = self.update_statistics
self.add_update_method(statistics_method, self.statistics_translator)
def update_statistics(self):
today = datetime.datetime.now()
yesterday = datetime.timedelta(hours=24)
start_from = timeutils.isotime(today-yesterday)
for metric in self.monasca.metrics.list_names():
LOG.debug("Monasca statistics for metric %s", metric['name'])
_query_args = dict(
start_time=start_from,
name=metric['name'],
statistics='avg',
period=int(self.creds['poll_time']),
merge_metrics='true')
statistics = self.monasca.metrics.list_statistics(
**_query_args)
self._translate_statistics(statistics)
@ds_utils.update_state_on_changed(METRICS)
def _translate_metric(self, obj):
"""Translate the metrics represented by OBJ into tables."""
LOG.debug("METRIC: %s", str(obj))
row_data = MonascaDriver.convert_objs(obj,
self.metric_translator)
return row_data
@ds_utils.update_state_on_changed(STATISTICS)
def _translate_statistics(self, obj):
"""Translate the metrics represented by OBJ into tables."""
LOG.debug("STATISTICS: %s", str(obj))
row_data = MonascaDriver.convert_objs(obj,
self.statistics_translator)
return row_data
def execute(self, action, action_args):
"""Overwrite ExecutionDriver.execute()."""
# action can be written as a method or an API call.
func = getattr(self, action, None)
if func and self.is_executable(func):
func(action_args)
else:
self._execute_api(self.monasca, action, action_args)
|
If you would like our newsletter please join our mailing list.
You can come and buy your cheese direct from the Connage Pantry at our shop at the dairy.
We have the full range of our own handmade cheeses and a superb hand-picked selection of guest cheeses from throughout Britain and the Continent. Our Pantry allows you to try before you buy while watching our cheese being made through the viewing window and if you have time, why not stay for a cappuccino?
All products are fully traceable and we employ excellent quality controls. Assurances can be given for the highest standard of animal welfare and care for our cows, continuing the attention to detail into our cheeses.
|
# -*- coding: utf-8 -*-
import datetime as dt
from flask_login import UserMixin
from sqlalchemy.orm import relationship, backref
from rank.api.models import UserRequest
from rank.core.models import (
DB as db,
SurrogatePK,
ReferenceCol,
CRUDMixin
)
from rank.extensions import bcrypt
class Role(SurrogatePK, db.Model):
__tablename__ = 'roles'
name = db.Column(db.String(80), nullable=False)
user_id = ReferenceCol('users', nullable=True)
user = db.relationship('User', backref='roles')
def __init__(self, name, **kwargs):
db.Model.__init__(self, name=name, **kwargs)
def __repr__(self):
return '<Role({name})>'.format(name=self.name)
class User(UserMixin, CRUDMixin, SurrogatePK, db.Model):
__tablename__ = 'users'
username = db.Column(db.String(80), unique=True, nullable=False)
#: The hashed password
password = db.Column(db.String(128), nullable=True)
created_at = db.Column(db.DateTime, nullable=False, default=dt.datetime.utcnow)
active = db.Column(db.Boolean(), default=False)
is_admin = db.Column(db.Boolean(), default=False, nullable=False)
game_id = db.Column(db.Integer, db.ForeignKey("games.id"))
game = relationship("Game", backref=backref("user", uselist=False), cascade="delete")
def __init__(self, username, password=None, **kwargs):
db.Model.__init__(self, username=username, **kwargs)
if password:
self.set_password(password)
else:
self.password = None
def set_password(self, password):
self.password = bcrypt.generate_password_hash(password)
def check_password(self, value):
return bcrypt.check_password_hash(self.password, value)
def requests_today(self):
midnight = dt.datetime.combine(dt.date.today(), dt.time())
if not self.is_admin:
return UserRequest.query.filter(
UserRequest.game_id == self.game.id,
UserRequest.time_requested > midnight).order_by(UserRequest.time_requested.desc())
else:
return UserRequest.query.filter(
UserRequest.time_requested > midnight).order_by(UserRequest.time_requested.desc())
def requests_this_week(self):
midnight = dt.datetime.combine(dt.date.today(), dt.time())
seven_days_ago = midnight - dt.timedelta(days=7)
if not self.is_admin:
return UserRequest.query.filter(
UserRequest.game_id == self.game.id,
UserRequest.time_requested > seven_days_ago).order_by(UserRequest.time_requested.desc())
else:
return UserRequest.query.filter(
UserRequest.time_requested > seven_days_ago).order_by(UserRequest.time_requested.desc())
def request_count_today(self):
midnight = dt.datetime.combine(dt.date.today(), dt.time())
if not self.is_admin:
return UserRequest.query.filter(
UserRequest.game_id == self.game.id,
UserRequest.time_requested > midnight).count()
else:
return UserRequest.query.filter(
UserRequest.time_requested > midnight).count()
def __repr__(self):
return '<User({username!r})>'.format(username=self.username)
|
Before sending product, make sure that the unit is getting power. On remote control models, make sure to test the unit with the side panel buttons and new batteries.
Make sure the product is packaged well, as these units are fragile. Send only by UPS or FedEx, fully insured. Do not send by mail as deliveries are not made inside our business complex.
Print and complete Bidet Seat Service Form, and include it with the product.
Do not include the remote control or any other parts with the repair, unless they specifically need attention. Parts can easily get lost or damaged in transit, so it’s better to keep these at home.
If in-warranty service is needed, product must be accompanied by a copy of the proof-of-purchase receipt.
If out-of-warranty service is needed, a free estimate will be provided before beginning repairs.
NOTE: Products must be fully insured & sent via UPS or FedEx Prepaid. Do not send by mail as deliveries are not made inside our business complex.
|
from scrapy.spider import BaseSpider
from scrapy.exceptions import DropItem
from scrapy.selector import HtmlXPathSelector
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from nytimesScraper.items import Article
import re
import json
class NYTSpider(CrawlSpider):
name = 'nytimes'
allowed_domains = ['nytimes.com']
f = open('All_URLs.json')
company = json.load(f)
f.close()
start_urls = company.keys()
rules = [Rule(SgmlLinkExtractor(allow=r'pagewanted=\d+',tags='//a[@class="next"]'), 'parse_link')]
def parse_link(self, response):
x = HtmlXPathSelector(response)
article = Article()
article['url'] = response.url
article['title'] = x.select('//title/text()').extract()
article['company'] = NYTSpider.company[self.baseURL(response.url)] if self.baseURL(response.url) in NYTSpider.company else ""
article['text'] = self.extractText(x.select('//div[@class="articleBody"]//text()').extract()) \
+ self.extractText(x.select('//div[@id="articleBody"]//text()').extract()) \
+ self.extractText(x.select('string(//div[@class="entry-content"])').extract())
article['date'] = self.extractDate(x.select('//meta[@name="pdate"]').extract())
if len(article['company']) == 0 or len(article['text']) == 0:
raise DropItem('Missing company and/or text: %s' % article)
return article
def parse_start_url(self, response):
return self.parse_link(response)
def baseURL(self, url):
url = re.sub('\?pagewanted=\d+', '', url)
url = re.sub('\?_r=\d', '', url)
url = re.sub('&pagewanted=\d+', '', url)
url = re.sub('&_r=\d', '', url)
url = re.sub('pagewanted=\d+', '', url)
url = re.sub('_r=\d', '', url)
return url
def extractText(self, body):
texts = []
for text in body:
'''cleanText = text
while '<' in cleanText:
openTag = cleanText.find('<')
closeTag = cleanText.find('>')
cleanText = cleanText[:openTag] + cleanText[closeTag+1:]
cleanText = cleanText.strip()
if len(cleanText) > 0:
texts.append(cleanText)'''
if len(text.strip()) > 100:
texts.append(text.strip())
return ' '.join(texts)
def extractDate(self, dateTags):
for dateTag in dateTags:
if 'content=' in dateTag:
spot = dateTag.find('content=') + 9
date = dateTag[spot:spot+8]
date = date[:4] + '-' + date[4:6] + '-' + date[6:]
return date
return '2013-01-01'
|
According to an article from USAToday.com, the government is going to try and impose a label on tires that shows how fuel efficient they will be.
The government believes that if consumers know how fuel efficient their tires are, they would be more prone to pick ones that have better gas mileage in the long run. The National Highway Traffic Safety Administration, Americans use about 135 billion gallons of gasoline each year. And these warnings could help reduce that by at least 2 percent.
The label would be affixed to the tire and would be available online, because consumers rarely get the see the labels on their tires before they are put on their car.
This comes after California attempting something a similar program. After that idea, people thought that it would be better to let the federal government take care to that kind of business, so it would be the same state to state.
Because of increasing gas prices, and after last summer when gas topped four dollars a gallon, there has been more effort into making consumers more aware with how to be more energy efficient.
Being more environmental is being considered in every industry today, including the tire industry. But the question remains if consumers would really make the choice of the more fuel efficient tire, over perhaps a cheaper tire. If consumers can only look at the labels online, how effective would they really be?
|
# Copyright (c) 2021, Djaodjin Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Decorators that check a User a verified email address.
"""
from __future__ import unicode_literals
from functools import wraps
from django.contrib import messages
from django.contrib.auth import (REDIRECT_FIELD_NAME, logout as auth_logout)
from django.contrib.auth.views import redirect_to_login
from django.core.exceptions import PermissionDenied
from django.utils.translation import ugettext_lazy as _
from . import settings, signals
from .auth import validate_redirect
from .compat import available_attrs, is_authenticated, reverse, six
from .models import Contact
from .utils import has_invalid_password, get_accept_list
def _insert_url(request, redirect_field_name=REDIRECT_FIELD_NAME,
inserted_url=None):
'''Redirects to the *inserted_url* before going to the orginal
request path.'''
# This code is pretty much straightforward
# from contrib.auth.user_passes_test
path = request.build_absolute_uri()
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
login_scheme, login_netloc = six.moves.urllib.parse.urlparse(
inserted_url)[:2]
current_scheme, current_netloc = six.moves.urllib.parse.urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
path = request.get_full_path()
return redirect_to_login(path, inserted_url, redirect_field_name)
def redirect_or_denied(request, inserted_url,
redirect_field_name=REDIRECT_FIELD_NAME, descr=None):
http_accepts = get_accept_list(request)
if ('text/html' in http_accepts
and isinstance(inserted_url, six.string_types)):
return _insert_url(request, redirect_field_name=redirect_field_name,
inserted_url=inserted_url)
if descr is None:
descr = ""
raise PermissionDenied(descr)
def send_verification_email(contact, request,
next_url=None,
redirect_field_name=REDIRECT_FIELD_NAME):
"""
Send an email to the user to verify her email address.
The email embed a link to a verification url and a redirect to the page
the verification email was sent from so that the user stays on her
workflow once verification is completed.
"""
back_url = request.build_absolute_uri(reverse('registration_activate',
args=(contact.email_verification_key,)))
if next_url:
back_url += '?%s=%s' % (redirect_field_name, next_url)
signals.user_verification.send(
sender=__name__, user=contact.user, request=request,
back_url=back_url, expiration_days=settings.KEY_EXPIRATION)
def send_verification_phone(contact, request,
next_url=None,
redirect_field_name=REDIRECT_FIELD_NAME):
"""
Send a text message to the user to verify her phone number.
The email embed a link to a verification url and a redirect to the page
the verification email was sent from so that the user stays on her
workflow once verification is completed.
"""
# XXX needs to send phone text message instead of e-mail!!!
back_url = request.build_absolute_uri(reverse('registration_activate',
args=(contact.email_verification_key,)))
if next_url:
back_url += '?%s=%s' % (redirect_field_name, next_url)
signals.user_verification.send(
sender=__name__, user=contact.user, request=request,
back_url=back_url, expiration_days=settings.KEY_EXPIRATION)
# The user we are looking to activate might be different from
# the request.user (which can be Anonymous)
def check_has_credentials(request, user,
redirect_field_name=REDIRECT_FIELD_NAME,
next_url=None):
"""
Checks that a *user* has set login credentials (i.e. password).
"""
if has_invalid_password(user):
# Let's send e-mail again.
#pylint:disable=unused-variable
contact, created = Contact.objects.prepare_email_verification(
user, user.email)
if not next_url:
next_url = validate_redirect(request)
send_verification_email(
contact, request, next_url=next_url,
redirect_field_name=redirect_field_name)
return False
return True
def check_email_verified(request, user,
redirect_field_name=REDIRECT_FIELD_NAME,
next_url=None):
"""
Checks that a *user*'s e-mail has been verified.
"""
#pylint:disable=unused-variable
if Contact.objects.is_reachable_by_email(user):
return True
contact, created = Contact.objects.prepare_email_verification(
user, user.email)
# Let's send e-mail again.
if not next_url:
next_url = validate_redirect(request)
send_verification_email(
contact, request, next_url=next_url,
redirect_field_name=redirect_field_name)
return False
def check_phone_verified(request, user,
redirect_field_name=REDIRECT_FIELD_NAME,
next_url=None):
"""
Checks that a *user*'s e-mail has been verified.
"""
#pylint:disable=unused-variable
if Contact.objects.is_reachable_by_phone(user):
return True
contact, created = Contact.objects.prepare_phone_verification(
user, user.phone) # XXX
# Let's send e-mail again.
if not next_url:
next_url = validate_redirect(request)
send_verification_phone(
contact, request, next_url=next_url,
redirect_field_name=redirect_field_name)
return False
def fail_authenticated(request):
"""
Authenticated
"""
if not is_authenticated(request):
return str(settings.LOGIN_URL)
return False
def fail_registered(request):
"""
Registered
"""
if not is_authenticated(request):
return str(reverse('registration_register'))
return False
def fail_active(request):
"""
Active with valid credentials
"""
if not check_has_credentials(request, request.user):
return str(settings.LOGIN_URL)
return False
def fail_verified_email(request):
"""
Active with a verified e-mail address
"""
if not check_email_verified(request, request.user):
return str(settings.LOGIN_URL)
return False
def fail_verified_phone(request):
"""
Active with a verified phone number
"""
if not check_phone_verified(request, request.user):
return str(settings.LOGIN_URL)
return False
def active_required(function=None,
redirect_field_name=REDIRECT_FIELD_NAME,
login_url=None):
"""
Decorator for views that checks that the user is active. We won't
activate the account of a user until we checked the email address
is valid.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
redirect_url = login_url or str(settings.LOGIN_URL)
if is_authenticated(request):
redirect_url = fail_active(request)
if not redirect_url:
return view_func(request, *args, **kwargs)
# User is logged in but her email has not been verified yet.
http_accepts = get_accept_list(request)
if 'text/html' in http_accepts:
messages.info(request, _(
"You should now secure and activate your account following the instructions"\
" we just emailed you. Thank you."))
auth_logout(request)
return redirect_or_denied(request, redirect_url,
redirect_field_name=redirect_field_name)
return _wrapped_view
if function:
return decorator(function)
return decorator
def verified_email_required(function=None,
redirect_field_name=REDIRECT_FIELD_NAME,
login_url=None):
"""
Decorator for views that checks that the user has a verified e-mail address.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
redirect_url = login_url or str(settings.LOGIN_URL)
if is_authenticated(request):
redirect_url = fail_verified_email(request)
if not redirect_url:
return view_func(request, *args, **kwargs)
# User is logged in but her email has not been verified yet.
http_accepts = get_accept_list(request)
if 'text/html' in http_accepts:
messages.info(request, _(
"You should now secure and activate your account following the instructions"\
" we just emailed you. Thank you."))
auth_logout(request)
return redirect_or_denied(request, redirect_url,
redirect_field_name=redirect_field_name)
return _wrapped_view
if function:
return decorator(function)
return decorator
|
A GOOD FRIEND — USED TO RUN A golf community in Manatee County, and a couple career moves later, he’s running a an exclusive luxury community off Hilton Head, South Carolina. Did I mention it’s only accessible by private ferry and there are no cars on the island?
Usually when I go someplace new, I study and research it to a fault. I feel like I’ve been there already before I even pack! I tried doing the same thing for Haig Point, but even after reading all the articles, I still wasn’t grasping what it was. That was both confusing and exciting.
After landing in Savannah, Erin and I drove an hour to a private entrance that revealed—a big parking lot. But something unusual was happening on the far end: People were loading their suitcases on a jeep’s trailer, and the bags were being loaded onto a ferry at the end of a dock.
We boarded a two-story ferry and tried to blend in with the regulars that come and go all the time. Twenty-five minutes later, we were received by our friends at the Haig Point dock. We loaded our bags on our designated golf cart and followed our hosts to lunch. Quiet paved roads with beautiful homes spread out on either side. No commerce, no noise, nothing. The only way to get around—golf carts. Peaceful. This was all surreal. A “gated” country club community in the middle of nowhere!
After lunch in the clubhouse grill next to the golf shop, our hosts took us to a restored mansion that had actually been shipped here from the 5-star resort a couple hours south called “Sea Island” where, coincidentally, my family used to vacation when I was growing up.
Beautiful hard-wood floors, a bar, sitting areas and four large rooms upstairs. Our suite faced the water and we were able to go out the porch door in our room and sit on a private deck.
We changed our clothes and went over to the tennis courts. Before playing mixed doubles, we watched the Haig Point ladies team compete against a club that’s team had taken the boat in with us from Hilton Head. Half a dozen soft courts, a fully-stocked pro shop and work-out area—next to a heated pool.
After a fun work-out, guys versus the girls, we cleaned up and met for dinner at another facility on the island– a restaurant with a great view of the water and Sea Pines, South Carolina. We had a wonderful dinner—Erin had sea bass and I had fillet. After dinner, we stopped by a wedding reception to have a little more fun. Yes, call us wedding crashers—but it was worth it.
I woke up early the next morning and watched the sun come up over the Atlantic Ocean. We went downstairs for a continental breakfast on the mansion’s first floor. Convenient.
We played tennis with another couple we met the day before and then had lunch in the grill again. Later in the day we took a water taxi across the river to Sea Pines where we had a great dinner of crab legs, french fries, cold beer, and ice cream for dessert. Decadent.
Next articleSarasota’s Florida Wine Fest!
|
from django.db import models
class Location(models.Model):
name = models.CharField(max_length=128)
page = models.ForeignKey("simplecms.Page", blank=True, null=True)
def __unicode__(self):
return self.name
class EventDescription(models.Model):
cost = models.TextField(default="", blank=True)
page = models.ForeignKey(
"simplecms.Page", blank=True, null=True,
limit_choices_to={"parent__isnull": False,
'locale': 'de'})
post = models.ForeignKey("zinnia.Entry", blank=True, null=True)
location = models.ForeignKey(Location)
instructor = models.CharField(max_length=128)
event = models.OneToOneField("schedule.Event")
def __unicode__(self):
return self.event.title
def get_absolute_url(self):
try:
return self.post.get_absolute_url()
except:
pass
try:
return self.page.get_absolute_url()
except:
pass
"""
list(rrule(dateutil.rrule.MONTHLY, count=5,
byweekday=dateutil.rrule.FR,
dtstart=datetime.date(2015,4,3),
bysetpos=1))
=>
byweekday:4;bysetpos:1
"""
|
Sometimes seasonal events and seemingly unrelated industries can have an adverse affect on our profession. Case in point, watermelons and produce from southern states can have a drastic affect on the price and availability of lumber moving to the northern part of the country. Sounds a bit unbelievable but it’s true.
In the spring, when produce is ready to be shipped to market, growers do not have the luxury of finding the most economical rate for a truck. Due to extremely limited availability, they must pay what the market will bear or lose their crop. The result is that loads of lumber that do not pay what produce does, are bypassed by truckers trying to maximize their profits. Companies that have developed their business to operate on just in time delivery are often hard pressed to maintain adequate supplies of raw materials during this time of the year. The trucking industry unfortunately is not what it used to be. There are roughly 25,000 unfilled truck driving positions in the U.S. This number will increase over time as drivers retire and are not replaced. The result will be greater difficulty in finding trucks and when they are able to move a load, the prices will be substantially higher. This all affects what manufacturers are able to charge for their product.
One way to stay ahead of this trend is to make arrangements as soon as possible to ensure you have a firm commitment with your shipping partners to receive raw materials in a timely fashion. To further assist with the freight issue, we have a special program with CH Robinson Company, one of the largest movers of freight in the world. This program helps members with both LTL and flatbed shipments to arrive within a reasonable time frame and as economical as possible.
|
#!/usr/bin/python
"""Takes the TOC, this time the raw HTML, and produces an ebook xhtml TOC with
rewritten local links.
We're producing this directly from the html so that we can keep the extra
multi-level chapter structure without parsing the entire thing into some
hierarchical tree.
"""
from bs4 import BeautifulSoup
import common
from string import Template
import pkg_resources
toc_template = Template(
pkg_resources.resource_string(__name__, "toc_template.xhtml"))
if __name__== "__main__":
soup = BeautifulSoup(open(
"web_cache/edgeofyourseat.dreamwidth.org/2121.html"))
the_toc_html = soup.select(".entry-content")[0]
# Remove the "how to read" link.
the_toc_html.find_all("center")[0].extract()
# As for the others, parse them & replace them with the appropriate internal
# links.
common.replace_links_with_internal(the_toc_html)
toc_string = the_toc_html.decode_contents(formatter="html")
toc_html_string = toc_template.substitute(toc_entries=toc_string)
with open("global_lists/toc.xhtml", mode="w") as f:
f.write(toc_html_string.encode('utf-8'))
|
Painted in acrylics, at her home studio.
Measures 122cm wide by 91cm high, 4 cm deep gallery quality canvas frame.
Original Painting on Canvas "Adelaide Hills" Original Painting on Canvas "Protea on Grey"
|
# Copyright 1999-2010 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import portage
from portage import os
from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
from _emerge.PollScheduler import PollScheduler
class MetadataRegen(PollScheduler):
def __init__(self, portdb, cp_iter=None, consumer=None,
max_jobs=None, max_load=None):
PollScheduler.__init__(self)
self._portdb = portdb
self._global_cleanse = False
if cp_iter is None:
cp_iter = self._iter_every_cp()
# We can globally cleanse stale cache only if we
# iterate over every single cp.
self._global_cleanse = True
self._cp_iter = cp_iter
self._consumer = consumer
if max_jobs is None:
max_jobs = 1
self._max_jobs = max_jobs
self._max_load = max_load
self._valid_pkgs = set()
self._cp_set = set()
self._process_iter = self._iter_metadata_processes()
self.returncode = os.EX_OK
self._error_count = 0
def _iter_every_cp(self):
every_cp = self._portdb.cp_all()
every_cp.sort(reverse=True)
try:
while True:
yield every_cp.pop()
except IndexError:
pass
def _iter_metadata_processes(self):
portdb = self._portdb
valid_pkgs = self._valid_pkgs
cp_set = self._cp_set
consumer = self._consumer
for cp in self._cp_iter:
cp_set.add(cp)
portage.writemsg_stdout("Processing %s\n" % cp)
cpv_list = portdb.cp_list(cp)
for cpv in cpv_list:
valid_pkgs.add(cpv)
ebuild_path, repo_path = portdb.findname2(cpv)
if ebuild_path is None:
raise AssertionError("ebuild not found for '%s'" % cpv)
metadata, st, emtime = portdb._pull_valid_cache(
cpv, ebuild_path, repo_path)
if metadata is not None:
if consumer is not None:
consumer(cpv, ebuild_path,
repo_path, metadata)
continue
yield EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
ebuild_mtime=emtime,
metadata_callback=portdb._metadata_callback,
portdb=portdb, repo_path=repo_path,
settings=portdb.doebuild_settings)
def run(self):
portdb = self._portdb
from portage.cache.cache_errors import CacheError
dead_nodes = {}
while self._schedule():
self._poll_loop()
while self._jobs:
self._poll_loop()
if self._global_cleanse:
for mytree in portdb.porttrees:
try:
dead_nodes[mytree] = set(portdb.auxdb[mytree])
except CacheError as e:
portage.writemsg("Error listing cache entries for " + \
"'%s': %s, continuing...\n" % (mytree, e),
noiselevel=-1)
del e
dead_nodes = None
break
else:
cp_set = self._cp_set
cpv_getkey = portage.cpv_getkey
for mytree in portdb.porttrees:
try:
dead_nodes[mytree] = set(cpv for cpv in \
portdb.auxdb[mytree] \
if cpv_getkey(cpv) in cp_set)
except CacheError as e:
portage.writemsg("Error listing cache entries for " + \
"'%s': %s, continuing...\n" % (mytree, e),
noiselevel=-1)
del e
dead_nodes = None
break
if dead_nodes:
for y in self._valid_pkgs:
for mytree in portdb.porttrees:
if portdb.findname2(y, mytree=mytree)[0]:
dead_nodes[mytree].discard(y)
for mytree, nodes in dead_nodes.items():
auxdb = portdb.auxdb[mytree]
for y in nodes:
try:
del auxdb[y]
except (KeyError, CacheError):
pass
def _schedule_tasks(self):
"""
@rtype: bool
@returns: True if there may be remaining tasks to schedule,
False otherwise.
"""
while self._can_add_job():
try:
metadata_process = next(self._process_iter)
except StopIteration:
return False
self._jobs += 1
metadata_process.scheduler = self.sched_iface
metadata_process.addExitListener(self._metadata_exit)
metadata_process.start()
return True
def _metadata_exit(self, metadata_process):
self._jobs -= 1
if metadata_process.returncode != os.EX_OK:
self.returncode = 1
self._error_count += 1
self._valid_pkgs.discard(metadata_process.cpv)
portage.writemsg("Error processing %s, continuing...\n" % \
(metadata_process.cpv,), noiselevel=-1)
if self._consumer is not None:
# On failure, still notify the consumer (in this case the metadata
# argument is None).
self._consumer(metadata_process.cpv,
metadata_process.ebuild_path,
metadata_process.repo_path,
metadata_process.metadata)
self._schedule()
|
For four Saturdays, Power Mac Center will hold Sync Sessions 2017 – an audio-digital crossover experience that lets Filipino artists in showcasing their music style with the help of Apple devices and accessories.
Sync Sessions will turn Power Mac Center stores into a stage. The artists will be using iOS apps like GarageBand for iPad, allowing them to create and record music by giving various effects and instruments.
“We’ll get to listen to music that’s very modern and refreshing, all thanks to our performers, who are mostly millennials. With Sync Sessions, Power Mac Center is giving these artists an opportunity to redefine their music and share it. We also hope to inspire budding artists that they can harness technology to create beautiful music,” Power Mac Center Marketing Director Joey Alvarez said.
Powered by Beats by Dr. Dre, admission at Sync Sessions 2017 is free.
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import logging
from multiprocessing import Process, Value, Condition, reduction
from TServer import TServer
from thrift.transport.TTransport import TTransportException
class TProcessPoolServer(TServer):
"""Server with a fixed size pool of worker subprocesses to service requests
Note that if you need shared state between the handlers - it's up to you!
Written by Dvir Volk, doat.com
"""
def __init__(self, *args):
TServer.__init__(self, *args)
self.numWorkers = 10
self.workers = []
self.isRunning = Value('b', False)
self.stopCondition = Condition()
self.postForkCallback = None
def setPostForkCallback(self, callback):
if not callable(callback):
raise TypeError("This is not a callback!")
self.postForkCallback = callback
def setNumWorkers(self, num):
"""Set the number of worker threads that should be created"""
self.numWorkers = num
def workerProcess(self):
"""Loop getting clients from the shared queue and process them"""
if self.postForkCallback:
self.postForkCallback()
while self.isRunning.value:
try:
client = self.serverTransport.accept()
self.serveClient(client)
except (KeyboardInterrupt, SystemExit):
return 0
except Exception, x:
logging.exception(x)
def serveClient(self, client):
"""Process input/output from a client for as long as possible"""
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransportException, tx:
pass
except Exception, x:
logging.exception(x)
itrans.close()
otrans.close()
def serve(self):
"""Start workers and put into queue"""
# this is a shared state that can tell the workers to exit when False
self.isRunning.value = True
# first bind and listen to the port
self.serverTransport.listen()
# fork the children
for i in range(self.numWorkers):
try:
w = Process(target=self.workerProcess)
w.daemon = True
w.start()
self.workers.append(w)
except Exception, x:
logging.exception(x)
# wait until the condition is set by stop()
while True:
self.stopCondition.acquire()
try:
self.stopCondition.wait()
break
except (SystemExit, KeyboardInterrupt):
break
except Exception, x:
logging.exception(x)
self.isRunning.value = False
def stop(self):
self.isRunning.value = False
self.stopCondition.acquire()
self.stopCondition.notify()
self.stopCondition.release()
|
"Reversible transformation in preferentially oriented sialon ceramics" by Andrew Carman, E V. Pereloma et al.
Carman, A., Pereloma, E. Cheng, Y. (2006). Reversible transformation in preferentially oriented sialon ceramics. Journal of the European Ceramic Society, 26 (8), 1337-1349.
A preferential orientation was observed in a Nd-(α + β)-sialon ceramic, of the composition Nd0.4Si9.6Al2.4O1.2N14.8, after hot pressing at 1800 °C for 2 h. Post-sintering heat treatment was performed at 1450 °C for 72 h, resulting in the α′ → β′ transformation, accompanied by an increase in the and 21R phases. Subsequent heat treatment at 1800 °C for 4 h resulted in a reverse transformation from β′ to α′, with a corresponding decrease in and 21R. This indicates that the α′ + liquid ↔ β′ + + 21R transformation reaction is thermodynamically reversible. The microstructure was also found to be reversible, except for a certain degree of grain growth. The mechanical properties were determined from indentation tests, and were found to cycle with the transformation cycling. In addition, the preferred orientation introduced during hot pressing was observed in both the forward and reverse transformations, indicating that there is a crystallographic relationship between the transformation phases.
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""The central Model and Database constructs for DBCore.
"""
from __future__ import division, absolute_import, print_function
import time
import os
import re
from collections import defaultdict
import threading
import sqlite3
import contextlib
import beets
from beets.util import functemplate
from beets.util import py3_path
from beets.dbcore import types
from .query import MatchQuery, NullSort, TrueQuery
import six
if six.PY2:
from collections import Mapping
else:
from collections.abc import Mapping
class DBAccessError(Exception):
"""The SQLite database became inaccessible.
This can happen when trying to read or write the database when, for
example, the database file is deleted or otherwise disappears. There
is probably no way to recover from this error.
"""
class FormattedMapping(Mapping):
"""A `dict`-like formatted view of a model.
The accessor `mapping[key]` returns the formatted version of
`model[key]` as a unicode string.
If `for_path` is true, all path separators in the formatted values
are replaced.
"""
def __init__(self, model, for_path=False):
self.for_path = for_path
self.model = model
self.model_keys = model.keys(True)
def __getitem__(self, key):
if key in self.model_keys:
return self._get_formatted(self.model, key)
else:
raise KeyError(key)
def __iter__(self):
return iter(self.model_keys)
def __len__(self):
return len(self.model_keys)
def get(self, key, default=None):
if default is None:
default = self.model._type(key).format(None)
return super(FormattedMapping, self).get(key, default)
def _get_formatted(self, model, key):
value = model._type(key).format(model.get(key))
if isinstance(value, bytes):
value = value.decode('utf-8', 'ignore')
if self.for_path:
sep_repl = beets.config['path_sep_replace'].as_str()
sep_drive = beets.config['drive_sep_replace'].as_str()
if re.match(r'^\w:', value):
value = re.sub(r'(?<=^\w):', sep_drive, value)
for sep in (os.path.sep, os.path.altsep):
if sep:
value = value.replace(sep, sep_repl)
return value
class LazyConvertDict(object):
"""Lazily convert types for attributes fetched from the database
"""
def __init__(self, model_cls):
"""Initialize the object empty
"""
self.data = {}
self.model_cls = model_cls
self._converted = {}
def init(self, data):
"""Set the base data that should be lazily converted
"""
self.data = data
def _convert(self, key, value):
"""Convert the attribute type according the the SQL type
"""
return self.model_cls._type(key).from_sql(value)
def __setitem__(self, key, value):
"""Set an attribute value, assume it's already converted
"""
self._converted[key] = value
def __getitem__(self, key):
"""Get an attribute value, converting the type on demand
if needed
"""
if key in self._converted:
return self._converted[key]
elif key in self.data:
value = self._convert(key, self.data[key])
self._converted[key] = value
return value
def __delitem__(self, key):
"""Delete both converted and base data
"""
if key in self._converted:
del self._converted[key]
if key in self.data:
del self.data[key]
def keys(self):
"""Get a list of available field names for this object.
"""
return list(self._converted.keys()) + list(self.data.keys())
def copy(self):
"""Create a copy of the object.
"""
new = self.__class__(self.model_cls)
new.data = self.data.copy()
new._converted = self._converted.copy()
return new
# Act like a dictionary.
def update(self, values):
"""Assign all values in the given dict.
"""
for key, value in values.items():
self[key] = value
def items(self):
"""Iterate over (key, value) pairs that this object contains.
Computed fields are not included.
"""
for key in self:
yield key, self[key]
def get(self, key, default=None):
"""Get the value for a given key or `default` if it does not
exist.
"""
if key in self:
return self[key]
else:
return default
def __contains__(self, key):
"""Determine whether `key` is an attribute on this object.
"""
return key in self.keys()
def __iter__(self):
"""Iterate over the available field names (excluding computed
fields).
"""
return iter(self.keys())
# Abstract base for model classes.
class Model(object):
"""An abstract object representing an object in the database. Model
objects act like dictionaries (i.e., they allow subscript access like
``obj['field']``). The same field set is available via attribute
access as a shortcut (i.e., ``obj.field``). Three kinds of attributes are
available:
* **Fixed attributes** come from a predetermined list of field
names. These fields correspond to SQLite table columns and are
thus fast to read, write, and query.
* **Flexible attributes** are free-form and do not need to be listed
ahead of time.
* **Computed attributes** are read-only fields computed by a getter
function provided by a plugin.
Access to all three field types is uniform: ``obj.field`` works the
same regardless of whether ``field`` is fixed, flexible, or
computed.
Model objects can optionally be associated with a `Library` object,
in which case they can be loaded and stored from the database. Dirty
flags are used to track which fields need to be stored.
"""
# Abstract components (to be provided by subclasses).
_table = None
"""The main SQLite table name.
"""
_flex_table = None
"""The flex field SQLite table name.
"""
_fields = {}
"""A mapping indicating available "fixed" fields on this type. The
keys are field names and the values are `Type` objects.
"""
_search_fields = ()
"""The fields that should be queried by default by unqualified query
terms.
"""
_types = {}
"""Optional Types for non-fixed (i.e., flexible and computed) fields.
"""
_sorts = {}
"""Optional named sort criteria. The keys are strings and the values
are subclasses of `Sort`.
"""
_queries = {}
"""Named queries that use a field-like `name:value` syntax but which
do not relate to any specific field.
"""
_always_dirty = False
"""By default, fields only become "dirty" when their value actually
changes. Enabling this flag marks fields as dirty even when the new
value is the same as the old value (e.g., `o.f = o.f`).
"""
@classmethod
def _getters(cls):
"""Return a mapping from field names to getter functions.
"""
# We could cache this if it becomes a performance problem to
# gather the getter mapping every time.
raise NotImplementedError()
def _template_funcs(self):
"""Return a mapping from function names to text-transformer
functions.
"""
# As above: we could consider caching this result.
raise NotImplementedError()
# Basic operation.
def __init__(self, db=None, **values):
"""Create a new object with an optional Database association and
initial field values.
"""
self._db = db
self._dirty = set()
self._values_fixed = LazyConvertDict(self)
self._values_flex = LazyConvertDict(self)
# Initial contents.
self.update(values)
self.clear_dirty()
@classmethod
def _awaken(cls, db=None, fixed_values={}, flex_values={}):
"""Create an object with values drawn from the database.
This is a performance optimization: the checks involved with
ordinary construction are bypassed.
"""
obj = cls(db)
obj._values_fixed.init(fixed_values)
obj._values_flex.init(flex_values)
return obj
def __repr__(self):
return '{0}({1})'.format(
type(self).__name__,
', '.join('{0}={1!r}'.format(k, v) for k, v in dict(self).items()),
)
def clear_dirty(self):
"""Mark all fields as *clean* (i.e., not needing to be stored to
the database).
"""
self._dirty = set()
def _check_db(self, need_id=True):
"""Ensure that this object is associated with a database row: it
has a reference to a database (`_db`) and an id. A ValueError
exception is raised otherwise.
"""
if not self._db:
raise ValueError(
u'{0} has no database'.format(type(self).__name__)
)
if need_id and not self.id:
raise ValueError(u'{0} has no id'.format(type(self).__name__))
def copy(self):
"""Create a copy of the model object.
The field values and other state is duplicated, but the new copy
remains associated with the same database as the old object.
(A simple `copy.deepcopy` will not work because it would try to
duplicate the SQLite connection.)
"""
new = self.__class__()
new._db = self._db
new._values_fixed = self._values_fixed.copy()
new._values_flex = self._values_flex.copy()
new._dirty = self._dirty.copy()
return new
# Essential field accessors.
@classmethod
def _type(cls, key):
"""Get the type of a field, a `Type` instance.
If the field has no explicit type, it is given the base `Type`,
which does no conversion.
"""
return cls._fields.get(key) or cls._types.get(key) or types.DEFAULT
def __getitem__(self, key):
"""Get the value for a field. Raise a KeyError if the field is
not available.
"""
getters = self._getters()
if key in getters: # Computed.
return getters[key](self)
elif key in self._fields: # Fixed.
if key in self._values_fixed:
return self._values_fixed[key]
else:
return self._type(key).null
elif key in self._values_flex: # Flexible.
return self._values_flex[key]
else:
raise KeyError(key)
def _setitem(self, key, value):
"""Assign the value for a field, return whether new and old value
differ.
"""
# Choose where to place the value.
if key in self._fields:
source = self._values_fixed
else:
source = self._values_flex
# If the field has a type, filter the value.
value = self._type(key).normalize(value)
# Assign value and possibly mark as dirty.
old_value = source.get(key)
source[key] = value
changed = old_value != value
if self._always_dirty or changed:
self._dirty.add(key)
return changed
def __setitem__(self, key, value):
"""Assign the value for a field.
"""
self._setitem(key, value)
def __delitem__(self, key):
"""Remove a flexible attribute from the model.
"""
if key in self._values_flex: # Flexible.
del self._values_flex[key]
self._dirty.add(key) # Mark for dropping on store.
elif key in self._fields: # Fixed
setattr(self, key, self._type(key).null)
elif key in self._getters(): # Computed.
raise KeyError(u'computed field {0} cannot be deleted'.format(key))
else:
raise KeyError(u'no such field {0}'.format(key))
def keys(self, computed=False):
"""Get a list of available field names for this object. The
`computed` parameter controls whether computed (plugin-provided)
fields are included in the key list.
"""
base_keys = list(self._fields) + list(self._values_flex.keys())
if computed:
return base_keys + list(self._getters().keys())
else:
return base_keys
@classmethod
def all_keys(cls):
"""Get a list of available keys for objects of this type.
Includes fixed and computed fields.
"""
return list(cls._fields) + list(cls._getters().keys())
# Act like a dictionary.
def update(self, values):
"""Assign all values in the given dict.
"""
for key, value in values.items():
self[key] = value
def items(self):
"""Iterate over (key, value) pairs that this object contains.
Computed fields are not included.
"""
for key in self:
yield key, self[key]
def get(self, key, default=None):
"""Get the value for a given key or `default` if it does not
exist.
"""
if key in self:
return self[key]
else:
return default
def __contains__(self, key):
"""Determine whether `key` is an attribute on this object.
"""
return key in self.keys(True)
def __iter__(self):
"""Iterate over the available field names (excluding computed
fields).
"""
return iter(self.keys())
# Convenient attribute access.
def __getattr__(self, key):
if key.startswith('_'):
raise AttributeError(u'model has no attribute {0!r}'.format(key))
else:
try:
return self[key]
except KeyError:
raise AttributeError(u'no such field {0!r}'.format(key))
def __setattr__(self, key, value):
if key.startswith('_'):
super(Model, self).__setattr__(key, value)
else:
self[key] = value
def __delattr__(self, key):
if key.startswith('_'):
super(Model, self).__delattr__(key)
else:
del self[key]
# Database interaction (CRUD methods).
def store(self, fields=None):
"""Save the object's metadata into the library database.
:param fields: the fields to be stored. If not specified, all fields
will be.
"""
if fields is None:
fields = self._fields
self._check_db()
# Build assignments for query.
assignments = []
subvars = []
for key in fields:
if key != 'id' and key in self._dirty:
self._dirty.remove(key)
assignments.append(key + '=?')
value = self._type(key).to_sql(self[key])
subvars.append(value)
assignments = ','.join(assignments)
with self._db.transaction() as tx:
# Main table update.
if assignments:
query = 'UPDATE {0} SET {1} WHERE id=?'.format(
self._table, assignments
)
subvars.append(self.id)
tx.mutate(query, subvars)
# Modified/added flexible attributes.
for key, value in self._values_flex.items():
if key in self._dirty:
self._dirty.remove(key)
tx.mutate(
'INSERT INTO {0} '
'(entity_id, key, value) '
'VALUES (?, ?, ?);'.format(self._flex_table),
(self.id, key, value),
)
# Deleted flexible attributes.
for key in self._dirty:
tx.mutate(
'DELETE FROM {0} '
'WHERE entity_id=? AND key=?'.format(self._flex_table),
(self.id, key)
)
self.clear_dirty()
def load(self):
"""Refresh the object's metadata from the library database.
"""
self._check_db()
stored_obj = self._db._get(type(self), self.id)
assert stored_obj is not None, u"object {0} not in DB".format(self.id)
self._values_fixed = LazyConvertDict(self)
self._values_flex = LazyConvertDict(self)
self.update(dict(stored_obj))
self.clear_dirty()
def remove(self):
"""Remove the object's associated rows from the database.
"""
self._check_db()
with self._db.transaction() as tx:
tx.mutate(
'DELETE FROM {0} WHERE id=?'.format(self._table),
(self.id,)
)
tx.mutate(
'DELETE FROM {0} WHERE entity_id=?'.format(self._flex_table),
(self.id,)
)
def add(self, db=None):
"""Add the object to the library database. This object must be
associated with a database; you can provide one via the `db`
parameter or use the currently associated database.
The object's `id` and `added` fields are set along with any
current field values.
"""
if db:
self._db = db
self._check_db(False)
with self._db.transaction() as tx:
new_id = tx.mutate(
'INSERT INTO {0} DEFAULT VALUES'.format(self._table)
)
self.id = new_id
self.added = time.time()
# Mark every non-null field as dirty and store.
for key in self:
if self[key] is not None:
self._dirty.add(key)
self.store()
# Formatting and templating.
_formatter = FormattedMapping
def formatted(self, for_path=False):
"""Get a mapping containing all values on this object formatted
as human-readable unicode strings.
"""
return self._formatter(self, for_path)
def evaluate_template(self, template, for_path=False):
"""Evaluate a template (a string or a `Template` object) using
the object's fields. If `for_path` is true, then no new path
separators will be added to the template.
"""
# Perform substitution.
if isinstance(template, six.string_types):
template = functemplate.template(template)
return template.substitute(self.formatted(for_path),
self._template_funcs())
# Parsing.
@classmethod
def _parse(cls, key, string):
"""Parse a string as a value for the given key.
"""
if not isinstance(string, six.string_types):
raise TypeError(u"_parse() argument must be a string")
return cls._type(key).parse(string)
def set_parse(self, key, string):
"""Set the object's key to a value represented by a string.
"""
self[key] = self._parse(key, string)
# Database controller and supporting interfaces.
class Results(object):
"""An item query result set. Iterating over the collection lazily
constructs LibModel objects that reflect database rows.
"""
def __init__(self, model_class, rows, db, flex_rows,
query=None, sort=None):
"""Create a result set that will construct objects of type
`model_class`.
`model_class` is a subclass of `LibModel` that will be
constructed. `rows` is a query result: a list of mappings. The
new objects will be associated with the database `db`.
If `query` is provided, it is used as a predicate to filter the
results for a "slow query" that cannot be evaluated by the
database directly. If `sort` is provided, it is used to sort the
full list of results before returning. This means it is a "slow
sort" and all objects must be built before returning the first
one.
"""
self.model_class = model_class
self.rows = rows
self.db = db
self.query = query
self.sort = sort
self.flex_rows = flex_rows
# We keep a queue of rows we haven't yet consumed for
# materialization. We preserve the original total number of
# rows.
self._rows = rows
self._row_count = len(rows)
# The materialized objects corresponding to rows that have been
# consumed.
self._objects = []
def _get_objects(self):
"""Construct and generate Model objects for they query. The
objects are returned in the order emitted from the database; no
slow sort is applied.
For performance, this generator caches materialized objects to
avoid constructing them more than once. This way, iterating over
a `Results` object a second time should be much faster than the
first.
"""
# Index flexible attributes by the item ID, so we have easier access
flex_attrs = self._get_indexed_flex_attrs()
index = 0 # Position in the materialized objects.
while index < len(self._objects) or self._rows:
# Are there previously-materialized objects to produce?
if index < len(self._objects):
yield self._objects[index]
index += 1
# Otherwise, we consume another row, materialize its object
# and produce it.
else:
while self._rows:
row = self._rows.pop(0)
obj = self._make_model(row, flex_attrs.get(row['id'], {}))
# If there is a slow-query predicate, ensurer that the
# object passes it.
if not self.query or self.query.match(obj):
self._objects.append(obj)
index += 1
yield obj
break
def __iter__(self):
"""Construct and generate Model objects for all matching
objects, in sorted order.
"""
if self.sort:
# Slow sort. Must build the full list first.
objects = self.sort.sort(list(self._get_objects()))
return iter(objects)
else:
# Objects are pre-sorted (i.e., by the database).
return self._get_objects()
def _get_indexed_flex_attrs(self):
""" Index flexible attributes by the entity id they belong to
"""
flex_values = dict()
for row in self.flex_rows:
if row['entity_id'] not in flex_values:
flex_values[row['entity_id']] = dict()
flex_values[row['entity_id']][row['key']] = row['value']
return flex_values
def _make_model(self, row, flex_values={}):
""" Create a Model object for the given row
"""
cols = dict(row)
values = dict((k, v) for (k, v) in cols.items()
if not k[:4] == 'flex')
# Construct the Python object
obj = self.model_class._awaken(self.db, values, flex_values)
return obj
def __len__(self):
"""Get the number of matching objects.
"""
if not self._rows:
# Fully materialized. Just count the objects.
return len(self._objects)
elif self.query:
# A slow query. Fall back to testing every object.
count = 0
for obj in self:
count += 1
return count
else:
# A fast query. Just count the rows.
return self._row_count
def __nonzero__(self):
"""Does this result contain any objects?
"""
return self.__bool__()
def __bool__(self):
"""Does this result contain any objects?
"""
return bool(len(self))
def __getitem__(self, n):
"""Get the nth item in this result set. This is inefficient: all
items up to n are materialized and thrown away.
"""
if not self._rows and not self.sort:
# Fully materialized and already in order. Just look up the
# object.
return self._objects[n]
it = iter(self)
try:
for i in range(n):
next(it)
return next(it)
except StopIteration:
raise IndexError(u'result index {0} out of range'.format(n))
def get(self):
"""Return the first matching object, or None if no objects
match.
"""
it = iter(self)
try:
return next(it)
except StopIteration:
return None
class Transaction(object):
"""A context manager for safe, concurrent access to the database.
All SQL commands should be executed through a transaction.
"""
def __init__(self, db):
self.db = db
def __enter__(self):
"""Begin a transaction. This transaction may be created while
another is active in a different thread.
"""
with self.db._tx_stack() as stack:
first = not stack
stack.append(self)
if first:
# Beginning a "root" transaction, which corresponds to an
# SQLite transaction.
self.db._db_lock.acquire()
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Complete a transaction. This must be the most recently
entered but not yet exited transaction. If it is the last active
transaction, the database updates are committed.
"""
with self.db._tx_stack() as stack:
assert stack.pop() is self
empty = not stack
if empty:
# Ending a "root" transaction. End the SQLite transaction.
self.db._connection().commit()
self.db._db_lock.release()
def query(self, statement, subvals=()):
"""Execute an SQL statement with substitution values and return
a list of rows from the database.
"""
cursor = self.db._connection().execute(statement, subvals)
return cursor.fetchall()
def mutate(self, statement, subvals=()):
"""Execute an SQL statement with substitution values and return
the row ID of the last affected row.
"""
try:
cursor = self.db._connection().execute(statement, subvals)
return cursor.lastrowid
except sqlite3.OperationalError as e:
# In two specific cases, SQLite reports an error while accessing
# the underlying database file. We surface these exceptions as
# DBAccessError so the application can abort.
if e.args[0] in ("attempt to write a readonly database",
"unable to open database file"):
raise DBAccessError(e.args[0])
else:
raise
def script(self, statements):
"""Execute a string containing multiple SQL statements."""
self.db._connection().executescript(statements)
class Database(object):
"""A container for Model objects that wraps an SQLite database as
the backend.
"""
_models = ()
"""The Model subclasses representing tables in this database.
"""
supports_extensions = hasattr(sqlite3.Connection, 'enable_load_extension')
"""Whether or not the current version of SQLite supports extensions"""
def __init__(self, path, timeout=5.0):
self.path = path
self.timeout = timeout
self._connections = {}
self._tx_stacks = defaultdict(list)
self._extensions = []
# A lock to protect the _connections and _tx_stacks maps, which
# both map thread IDs to private resources.
self._shared_map_lock = threading.Lock()
# A lock to protect access to the database itself. SQLite does
# allow multiple threads to access the database at the same
# time, but many users were experiencing crashes related to this
# capability: where SQLite was compiled without HAVE_USLEEP, its
# backoff algorithm in the case of contention was causing
# whole-second sleeps (!) that would trigger its internal
# timeout. Using this lock ensures only one SQLite transaction
# is active at a time.
self._db_lock = threading.Lock()
# Set up database schema.
for model_cls in self._models:
self._make_table(model_cls._table, model_cls._fields)
self._make_attribute_table(model_cls._flex_table)
# Primitive access control: connections and transactions.
def _connection(self):
"""Get a SQLite connection object to the underlying database.
One connection object is created per thread.
"""
thread_id = threading.current_thread().ident
with self._shared_map_lock:
if thread_id in self._connections:
return self._connections[thread_id]
else:
conn = self._create_connection()
self._connections[thread_id] = conn
return conn
def _create_connection(self):
"""Create a SQLite connection to the underlying database.
Makes a new connection every time. If you need to configure the
connection settings (e.g., add custom functions), override this
method.
"""
# Make a new connection. The `sqlite3` module can't use
# bytestring paths here on Python 3, so we need to
# provide a `str` using `py3_path`.
conn = sqlite3.connect(
py3_path(self.path), timeout=self.timeout
)
if self.supports_extensions:
conn.enable_load_extension(True)
# Load any extension that are already loaded for other connections.
for path in self._extensions:
conn.load_extension(path)
# Access SELECT results like dictionaries.
conn.row_factory = sqlite3.Row
return conn
def _close(self):
"""Close the all connections to the underlying SQLite database
from all threads. This does not render the database object
unusable; new connections can still be opened on demand.
"""
with self._shared_map_lock:
self._connections.clear()
@contextlib.contextmanager
def _tx_stack(self):
"""A context manager providing access to the current thread's
transaction stack. The context manager synchronizes access to
the stack map. Transactions should never migrate across threads.
"""
thread_id = threading.current_thread().ident
with self._shared_map_lock:
yield self._tx_stacks[thread_id]
def transaction(self):
"""Get a :class:`Transaction` object for interacting directly
with the underlying SQLite database.
"""
return Transaction(self)
def load_extension(self, path):
"""Load an SQLite extension into all open connections."""
if not self.supports_extensions:
raise ValueError(
'this sqlite3 installation does not support extensions')
self._extensions.append(path)
# Load the extension into every open connection.
for conn in self._connections.values():
conn.load_extension(path)
# Schema setup and migration.
def _make_table(self, table, fields):
"""Set up the schema of the database. `fields` is a mapping
from field names to `Type`s. Columns are added if necessary.
"""
# Get current schema.
with self.transaction() as tx:
rows = tx.query('PRAGMA table_info(%s)' % table)
current_fields = set([row[1] for row in rows])
field_names = set(fields.keys())
if current_fields.issuperset(field_names):
# Table exists and has all the required columns.
return
if not current_fields:
# No table exists.
columns = []
for name, typ in fields.items():
columns.append('{0} {1}'.format(name, typ.sql))
setup_sql = 'CREATE TABLE {0} ({1});\n'.format(table,
', '.join(columns))
else:
# Table exists does not match the field set.
setup_sql = ''
for name, typ in fields.items():
if name in current_fields:
continue
setup_sql += 'ALTER TABLE {0} ADD COLUMN {1} {2};\n'.format(
table, name, typ.sql
)
with self.transaction() as tx:
tx.script(setup_sql)
def _make_attribute_table(self, flex_table):
"""Create a table and associated index for flexible attributes
for the given entity (if they don't exist).
"""
with self.transaction() as tx:
tx.script("""
CREATE TABLE IF NOT EXISTS {0} (
id INTEGER PRIMARY KEY,
entity_id INTEGER,
key TEXT,
value TEXT,
UNIQUE(entity_id, key) ON CONFLICT REPLACE);
CREATE INDEX IF NOT EXISTS {0}_by_entity
ON {0} (entity_id);
""".format(flex_table))
# Querying.
def _fetch(self, model_cls, query=None, sort=None):
"""Fetch the objects of type `model_cls` matching the given
query. The query may be given as a string, string sequence, a
Query object, or None (to fetch everything). `sort` is an
`Sort` object.
"""
query = query or TrueQuery() # A null query.
sort = sort or NullSort() # Unsorted.
where, subvals = query.clause()
order_by = sort.order_clause()
sql = ("SELECT * FROM {0} WHERE {1} {2}").format(
model_cls._table,
where or '1',
"ORDER BY {0}".format(order_by) if order_by else '',
)
# Fetch flexible attributes for items matching the main query.
# Doing the per-item filtering in python is faster than issuing
# one query per item to sqlite.
flex_sql = ("""
SELECT * FROM {0} WHERE entity_id IN
(SELECT id FROM {1} WHERE {2});
""".format(
model_cls._flex_table,
model_cls._table,
where or '1',
)
)
with self.transaction() as tx:
rows = tx.query(sql, subvals)
flex_rows = tx.query(flex_sql, subvals)
return Results(
model_cls, rows, self, flex_rows,
None if where else query, # Slow query component.
sort if sort.is_slow() else None, # Slow sort component.
)
def _get(self, model_cls, id):
"""Get a Model object by its id or None if the id does not
exist.
"""
return self._fetch(model_cls, MatchQuery('id', id)).get()
|
Home/Internet/5G Networks are Coming, but When?
A hot topic of discussion in tech circles, both on the consumer and business sides of the line, has been focused on the adoption and expansion of 5G networking. According to a recent study, as many as 15 million smartphone users are ready and willing to switch to 5G mobile networks. Part of the anticipation of the 5G revolution is the fact that the United States and South Korea have offered some 5G capabilities to consumers, including mobile hotspots and Fixed Wireless Access broadband services. Although the UK government has stated its commitment to bring 5G to the public, the rollout has been slower than some mobile users appreciate.
The move to 5G networks comes with a variety of potential benefits, including greater efficiencies in connectivity as well as ultra fast speeds. The implications for 5G expand beyond a better mobile experience for phone users; countless industries are also likely to benefit from the power behind the enhanced technology. However, 5G is not yet a full force in the UK, given the obstacles faced by both government organisations and private companies including mobile carriers.
In the simplest terms, 5G is the next step of wireless mobile connections, piggybacking off 3G and 4G networks already in place throughout the world. 5G, or the fifth generation of this type of technology, comprises certain radio frequencies broken up into bands that are higher than 4G networking. This increase in frequency allows more capacity across mobile networks, designed to optimise the abilities of mobile users as well as the growing market of connected devices. The technology is able to produce higher speeds and increased reliability of connections because of its shorter wavelength and greater bandwidth.
The promise of 5G for smartphone users is mostly focused on speed. Recent research, conducted in stabilised lab environments, highlight this promise, showing 5G has the ability to deliver a consistent download speed of 2.8Gbps. That is ten times faster than the current 4G mobile network. However, the true speed of 5G may be diluted due to widespread use and the distance between users. For mobile phone users, though, the increased speed and improved reliability show great promise.
Although the expansion of 5G in the UK is not yet fully here, smartphone users across all mobile carriers can anticipate some changes. A finance expert from Money Pug, a website used to compare mobile phone contracts, explains that the increased efficiencies offered by 5G networks are likely to lead to a premium paid by consumers. No mobile carriers have shared hat that premium will be, but the cost required to make 5G a reality will be passed down, at least in part, to mobile phone users.
It is also unclear at this time which mobile phone carriers will offer 5G to their customers first. Currently, only a few thousand 5G enabled smartphones are available within the UK, despite the potential growth of the market. As this number increases, it is assumed that mobile phone carriers will partner with smartphone providers to offer the full 5G experience. It will be necessary, though, for consumers to take a close look at their wireless contract to ensure they are getting the best possible deal with 5G included.
While the hype of 5G is exciting from a consumer perspective, with greater speed and more reliable networks, businesses across the board have something to look forward to as well. Industries that are on the verge of technology breakthroughs, such as healthcare, construction, and manufacturing, can lean on the power of 5G networking to improve operations in several different facets. On the healthcare front, the ability to perform remote care and video examinations may be on the horizon. In manufacturing and construction, the ability to boost efficiencies in daily tasks can lead to cost savings and safer work environments for employees.
The government in the UK has shared its commitment to the 5G revolution, rolling out targeted testbeds in certain locations throughout the country and providing the capital needed to adopt new technologies. Private and public organisations have also jumped on board, offering investment in 5G innovations. The full adoption of 5G is not anticipated until 2025 in the UK, but the infrastructure, time, and capital needed to make it a reality is already up and running.
|
#!/usr/bin/python
# Filename: boutique.py
### REDESIGN of market.py
### (1) use web.py
### generator-based, yields web.storage from which you can whatever you need (url, sitekey)
'''
boutique
@author: Andrew Philpot
@version 0.6
WAT boutique module
Usage: python boutique.py
Options:
\t-h, --help:\tprint help to STDOUT and quit
\t-v, --verbose:\tverbose output
\t-s, --source:\tsource default backpage
\t-a, --application:\tapplication default escort (johnboard)
\t-c, --city:\tcity, must be quoted and include state, e.g., 'San Jose, CA', no default
\t-m, --market:\tFAA airport code used to designate market, default LAX
\t-t, --tier:\tsee wataux.markettiers, integer 1-99, no default
\t-r, --region:\4-digit region code or 5-char region desig, see wataux.marketregions, no default
'''
import sys
import getopt
# import trbotdb
import watdb
import util
import re
import web
web.config.debug = False
# import logging
from watlog import watlog
logger = watlog("wat.boutique")
logger.info('wat.boutique initialized')
VERSION = '0.6'
REVISION = "$Revision: 22999 $"
# defaults
VERBOSE = True
SOURCE = 'backpage'
APPLICATION = 'escort'
# MARKET = 'LAX'
MARKET = None
CODE = MARKET
CITY = None
SITEKEY = None
TIER = None
REGION = None
# REGIONID = None
boutiqueClassNames = {"backpage": "BackpageBoutique",
"cityvibe": "CityvibeBoutique",
"eros": "ErosBoutique",
"humaniplex": "HumaniplexBoutique",
"myredbook": "MyredbookBoutique",
"sugardaddy": "SugardaddyBoutique"}
def boutiqueClassName(source):
return boutiqueClassNames.get(source, "Boutique")
def boutiqueClass(source):
className = boutiqueClassName(source)
return globals().get(className)
# moved here from crawl.py
def interpretMarket(desig):
'''market designator could be:
AAA: three letters means faa airport code, use key "market"
RGAAA: five letters means region designator, use key "region"
1111: four digits means region code, use key "region"
11: one or two digits means tier code, use key "tier"
other string with space or comma in it: city name, use key "city"
any other string: site key, use key "sitekey"
'''
try:
i = int(desig)
if 1000<=i and i<=9999:
# region code
return ("region", i)
elif 0<=i and i<=99:
# tier code
return ("tier", i)
except ValueError:
pass
if re.search('^RG[A-Z]{3}', desig):
# region designator
return ("region", desig)
if re.search('^[A-Z]{3}', desig):
# FAA airport code
return ("market", desig)
if " " in desig or "," in desig:
return ("city", desig)
return ("sitekey", desig)
# Let's consider that the idea is to get tuples keyed to sitekeys including
# source (backpage, etc.)
# market anchor city/location (San Francisco, CA)
# application (escort, johnboard)
# code (airport code of the anchor city, SFO)
# regionid 4001/region (RGSFO), a grouping of markets
# tier (1=FBI focus cities, etc.)/tiername
class Boutique(object):
'''create Boutique'''
def __init__(self, verbose=VERBOSE, application=APPLICATION,
code=CODE, city=CITY, sitekey=SITEKEY,
tier=TIER,
region=REGION):
self.verbose = verbose
self.application = application
self.code = code if code else None
self.city = city if city else None
self.sitekey = sitekey if sitekey else None
self.tier = tier if tier else None
(self.region, self.regionid) = (None, None)
try:
self.regionid = int(region)
except:
self.region = region
def genRows(self):
db = watdb.Watdb(conf='wataux', engine=None)
db.connect()
required = []
if self.application:
required.append(wh('application', self.application))
else:
raise ValueError("Must supply application")
if self.source:
required.append(wh('source', self.source))
else:
raise ValueError("Must supply source")
options = []
if self.code:
options.append(wh('code', self.code))
if self.city:
options.append(wh('city', self.city))
if self.sitekey:
options.append(wh('sitekey', self.sitekey))
if self.tier:
options.append(wh('tier', self.tier))
if self.region:
options.append(wh('region', self.region))
if self.regionid:
options.append(wh('regionid', self.regionid))
# logger.info("options = %s", options)
if options:
pass
else:
raise ValueError("Must supply at least one option: code,city,sitekey,tier,region,regionid")
wheres=required
wheres.extend(options)
where = ' and '.join(wheres)
# logger.info(where)
empty = True
# formerly db.select('sites_master', where=where):
sql = 'select * from sites_master where %s' % where
# logger.info("sql = %s", sql)
for row in db.maybeFetch(sql):
empty = False
yield row
# am trusting that this causes the db connection to be freed
db = db.disconnect()
if empty:
if self.verbose:
print >> sys.stderr, "No rows were generated for %s" % wheres
logger.warn("No rows were generated for %s" % wheres)
def fetchBoutique(self, source, desig):
"""This should take a source such as 'backpage' and a desig such as a sitekey or city name and return a code?"""
rows = list(self.genRows())
logger.info("There should be one row, in fact there are %s: %s", len(rows), rows)
return []
fetchMarket = fetchBoutique
def wh(column_name, value, rel='='):
"""is sqlquote good enough to prevent SQL injection?"""
if value:
return """(`%s` %s %s)""" % (column_name, rel, watdb.sqlquote(str(value)))
else:
raise ValueError
class BackpageBoutique(Boutique):
def __init__(self, verbose=VERBOSE, application=APPLICATION,
code=CODE, city=CITY, sitekey=SITEKEY,
tier=TIER,
region=REGION):
'''create BPM'''
Boutique.__init__(self, verbose=verbose, application=application,
code=code, city=city, sitekey=sitekey,
tier=tier,
region=region)
self.source = 'backpage'
class CityvibeBoutique(Boutique):
def __init__(self, verbose=VERBOSE, application=APPLICATION,
code=CODE, city=CITY, sitekey=SITEKEY,
tier=TIER,
region=REGION):
'''create CVM'''
Boutique.__init__(self, verbose=verbose, application=application,
code=code, city=city, sitekey=sitekey,
tier=tier,
region=region)
self.source = 'cityvibe'
class MyredbookBoutique(Boutique):
def __init__(self, verbose=VERBOSE, application=APPLICATION,
code=CODE, city=CITY, sitekey=SITEKEY,
tier=TIER,
region=REGION):
'''create MRBM'''
Boutique.__init__(self, verbose=verbose, application=application,
code=code, city=city, sitekey=sitekey,
tier=tier,
region=region)
self.source = 'myredbook'
class HumaniplexBoutique(Boutique):
def __init__(self, verbose=VERBOSE, application=APPLICATION,
code=CODE, city=CITY, sitekey=SITEKEY,
tier=TIER,
region=REGION):
'''create HXM'''
Boutique.__init__(self, verbose=verbose, application=application,
code=code, city=city, sitekey=sitekey,
tier=tier,
region=region)
self.source = 'humaniplex'
class ErosBoutique(Boutique):
def __init__(self, verbose=VERBOSE, application=APPLICATION,
code=CODE, city=CITY, sitekey=SITEKEY,
tier=TIER,
region=REGION):
'''create ERM'''
Boutique.__init__(self, verbose=verbose, application=application,
code=code, city=city, sitekey=sitekey,
tier=tier,
region=region)
self.source = 'eros'
class SugardaddyBoutique(Boutique):
def __init__(self, verbose=VERBOSE, application=APPLICATION,
code=CODE, city=CITY, sitekey=SITEKEY,
tier=TIER,
region=REGION):
'''create SDM'''
Boutique.__init__(self, verbose=verbose, application=application,
code=code, city=city, sitekey=sitekey,
tier=tier,
region=region)
self.source = 'sugardaddy'
# 0.5 functional interface
def genSiteKeys(source=SOURCE,
verbose=VERBOSE, application=APPLICATION,
market=MARKET, city=CITY, sitekey=SITEKEY,
tier=TIER,
region=REGION):
return boutiqueClass(source)(verbose=verbose,
application=application,
code=market,
city=city,
sitekey=sitekey,
tier=tier,
region=region).genRows()
def main(argv=None):
'''this is called if run from command line'''
# process command line arguments
if argv is None:
argv = sys.argv
try:
opts, args = getopt.getopt(argv[1:], "hvs:a:c:m:t:r:",
["echo=", "help",
"source=", "application=", "city=", "market=", "tier=", "region="])
except getopt.error, msg:
print msg
print "for help use --help"
sys.exit(2)
# default options
my_verbose = VERBOSE
my_source = SOURCE
my_application = APPLICATION
my_city = CITY
my_market = MARKET
my_tier = TIER
my_region = REGION
# process options
for o,a in opts:
if o in ("-h","--help"):
print __doc__
sys.exit(0)
if o in ("--echo", ):
print a
if o in ("-v", "--verbose", ):
my_verbose = True
if o in ("-s", "--source", ):
my_source = a
if o in ("-a", "--application", ):
my_application = a
if o in ("-c", "--city", ):
my_city = a
if o in ("-m", "--market", ):
my_market = a
if o in ("-t", "--tier", ):
my_tier = a
if o in ("-r", "--region", ):
my_region = a
mktClass = boutiqueClass(my_source)
print mktClass
mkt = mktClass(verbose=my_verbose,
application=my_application, city=my_city, code=my_market,
tier=my_tier, region=my_region)
for row in mkt.genRows():
print row.source, row.application, row.tier, row.region, row.code, row.sitekey, row.url
# call main() if this is run as standalone
if __name__ == "__main__":
sys.exit(main())
# End of boutique.py
|
Last month, when I was visiting “Three Sisters” in Katoomba, I decided to do a little experiment shooting with my iPhone 7 Plus some footage to put together as a 4K film. Three Sisters in Katoomba is a rocky formation of three unique rock formations off a cliff in Katoomba. This is situated in the Blue Mountain range which attracts thousands of tourists on a daily basis.
It is situated about 2 hours from Sydney’s CBD so making a trip requires some level of commitment but as its a well known tourist spot many people flock over daily to see the amazing sights.
My experiment involved using my DJI Osmo Mobile to house the iPhone 7 Plus and shooting some footage of this amazing location in 4K resolution. I was utilising an app called Filimic Pro which gives you all the manual controls you desire for shooting good quality film. I objective was also to try out both the 28mm and 56mm lens of the iPhone 7 Plus.
I have to admit that the powerful camera on board this little device is amazing. I was super impressed with the video quality that it captured, so continued the experiment by putting a 4K video together showcasing the “Three Sisters”.
PreviousIs Manual Mode on DSLR overrated?
|
#!/usr/bin/env python
"""
pymagpos -- MagnaCarta POS protocol (minimal robust implementation)
"""
import serial
import codes
import logging
import time
class MagposError(Exception):
""" Base Exception Class for this Module """
pass
class ResponseError(MagposError):
"""
ResponseError occur when the response to a command does not match the command's OK signal.
"""
def __init__(self, function, code):
self.code = code
self.function = function
self.response = [code]
def store_rawdata(self, response):
""" Stores the raw response for evaluation purpose """
self.raw = response
def read_rawdata(self):
"""
Returns the raw response data
:return: Raw response data
:rtype: list of ints
"""
return self.raw
def __str__(self):
return ("[{0}] Unintended response received:{1}".format(self.function,
codes.desc.setdefault(self.code, self.code)))
class TransactionError(MagposError):
"""
TransactionError occur when the amount that has been decreased does not match the given amount
"""
def __init__(self, card, old, new, amount):
self.card = card
self.old = float(old)/100
self.new = float(new)/100
self.amount = float(amount)/100
def __str__(self):
return "Difference in balance does not match the amount that should have been decreased.\
\nCard:{0}\t Amount:{1:.2f}\nOld:{2:.2f}\tNew:{3:.2f}"\
.format(self.card, self.amount, self.old, self.new)
class ConnectionTimeoutError(MagposError):
"""
ConnectionTimeoutError occur, when the connection between the USB/RS232 reader and the MagnaBox is broken
and/or the MagnaBox does not send a response message.
"""
def __init__(self):
pass
def __str__(self):
return ("Serial connection to MagnaBox timed out (did not send command?)")
class MagPOS:
"""
MagPos Class implements functions to access payment features of the MagnaCarta-Security and Payment-System
"""
def __init__(self, device):
"""
Initializes the serial port communication on the given device port
:param device: serial port name
:type device: str
"""
pass
def start_connection(self, retries=5):
"""
Initializes the connection
:return: True if connection successful, False otherwise
:rtype: bool
:param retries: Max. Attempts to accomplish connection, Default value is 5
:type retries: int
"""
raise NotImplementedError()
def card_on_reader(self):
"""
Checks if there is a card on the reader
:return: True if card on reader, False if not
:rtype: bool
"""
raise NotImplementedError()
def set_display_mode(self, mode = 0, amount=0):
"""
Sets the display configuration
:return: True on success, False otherwise
:rtype: bool
:param mode: Config
:type mode: int
:param amonunt: (Optional) Amount the is asked for on display
:type amount: int
"""
raise NotImplementedError()
def get_last_transaction_result(self):
"""
Retrieves the details of last unacknowledged transaction
:return: Returns List of relevant data: status code, card number and amount
:rtype: list[int,int,int]
"""
raise NotImplementedError()
def response_ack(self):
"""
Sends an acknowledge-signal to the MagaBox
"""
raise NotImplementedError()
def decrease_card_balance_and_token(self, amount, card_number=0, token_index=0):
"""
Gives command to decrease balance by amount
:return: Returns list of retrieved data: card number, old balance, new balance, token id
:rtype: list[int,int,int,int]
:param amount: Amount in Cents the card balance shall be decreased
:type amount: int
:param card_number: (Optional) sets the card number from which balance should be decreased
:type card_number: int
:param token_index: (Optional) sets token id which should be decreased by 1
:type token_index: int
"""
raise NotImplementedError()
def get_long_card_number_and_balance(self):
"""
Retrieves the card number and balance of the card on card reader
:return: Returns list containing the response data from MagnaBox: card number and balance
:rtype: list[int]
"""
raise NotImplementedError()
def close(self):
""" Closes serial connection to MagnaBox. Needed to release the serial port for further transactions."""
raise NotImplementedError()
if __name__ == '__main__':
pos = MagPOS(device='/dev/ttyUSB0')
pos.start_connection()
|
What do you get when you combine an unusual natural rock formation and a population looking for a practical place to settle down in? The unique Setenil de las Bodegas in northern Cadiz, that's what you get.
Incredibly enough, people chose to build their homes under these rocks for practical reasons. They figured that with the rocks protecting them from the hot Spanish sun as well as the cold in winter, all they had to build was the facades. In fact, it is calculated that people have lived under these rocks from as far back as pre-historic times!
So remember: any time you think your outdated parents are living underneath a rock, don't forget it might actually not be that bad. Have a great week everyone!
|
from __future__ import absolute_import
import functools
import os
import re
import sys
import tempfile
import traceback
import subprocess
import datetime
from PyQt4 import QtCore, QtGui
Qt = QtCore.Qt
from sgfs import SGFS
from sgactions.ticketui import ticket_ui_context
from sgpublish.uiutils import ComboBox, hbox, vbox, icon
class PublishSafetyError(RuntimeError):
pass
class TimeSpinner(QtGui.QSpinBox):
def __init__(self):
super(TimeSpinner, self).__init__(
singleStep=15,
maximum=60*8*5,
)
def textFromValue(self, value):
return '%d:%02d' % (value / 60, value % 60)
def valueFromText(self, text, strict=False):
m = re.match('(\d+):(\d{,2})', text)
if m:
return 60 * int(m.group(1)) + int(m.group(2) or 0)
try:
return int(text)
except ValueError:
pass
try:
return int(60 * float(text))
except ValueError:
pass
if strict:
return None
else:
return 0
def validate(self, text, pos):
if self.valueFromText(text) is not None:
return QtGui.QValidator.Acceptable, pos
else:
return QtGui.QValidator.Invalid, pos
class Widget(QtGui.QWidget):
# Windows should hide on these.
beforeScreenshot = QtCore.pyqtSignal()
afterScreenshot = QtCore.pyqtSignal()
# Need a signal to communicate across threads.
loaded_publishes = QtCore.pyqtSignal(object, object)
def __init__(self, exporter):
super(Widget, self).__init__()
self._exporter = exporter
self._existing_streams = set()
basename = os.path.basename(exporter.filename_hint)
basename = os.path.splitext(basename)[0]
basename = re.sub(r'[^\w-]+', '_', basename)
self._basename = re.sub(r'_*[rv]\d+', '', basename)
self._setup_ui()
# First screenshot.
self.take_full_screenshot()
def _setup_ui(self):
self.setLayout(QtGui.QVBoxLayout())
self._task_combo = ComboBox()
self._task_combo.addItem('Loading...', {'loading': True})
self._task_combo.currentIndexChanged.connect(self._task_changed)
self._name_combo = ComboBox()
self._name_combo.addItem('Loading...', {'loading': True})
self._name_combo.addItem('Create new stream...', {'new': True})
self._name_combo.currentIndexChanged.connect(self._name_changed)
self._tasksLabel = QtGui.QLabel("Task")
self.layout().addLayout(hbox(
vbox(self._tasksLabel, self._task_combo),
vbox("Publish Stream", self._name_combo),
spacing=4
))
self._name_field = QtGui.QLineEdit(self._basename)
self._name_field.setEnabled(False)
self._name_field.editingFinished.connect(self._on_name_edited)
self._version_spinbox = QtGui.QSpinBox()
self._version_spinbox.setMinimum(1)
self._version_spinbox.setMaximum(9999)
self._version_spinbox.valueChanged.connect(self._on_version_changed)
self._version_warning_issued = False
self.layout().addLayout(hbox(
vbox("Name", self._name_field),
vbox("Version", self._version_spinbox),
spacing=4
))
# Get publish data in the background.
self.loaded_publishes.connect(self._populate_existing_data)
self._thread = QtCore.QThread()
self._thread.run = self._fetch_existing_data
self._thread.start()
self._description = QtGui.QTextEdit('')
self._description.setMaximumHeight(100)
self._thumbnail_path = None
self._thumbnail_canvas = QtGui.QLabel()
self._thumbnail_canvas.setFrameShadow(QtGui.QFrame.Sunken)
self._thumbnail_canvas.setFrameShape(QtGui.QFrame.Panel)
self._thumbnail_canvas.setToolTip("Click to specify part of screen.")
self._thumbnail_canvas.mouseReleaseEvent = self.take_partial_screenshot
self.layout().addLayout(hbox(
vbox("Describe Your Changes", self._description),
vbox("Thumbnail", self._thumbnail_canvas),
))
self._movie_path = QtGui.QLineEdit()
self._movie_browse = QtGui.QPushButton(icon('silk/folder', size=12, as_icon=True), "Browse")
self._movie_browse.clicked.connect(self._on_movie_browse)
self._movie_layout = hbox(self._movie_path, self._movie_browse)
self.layout().addLayout(vbox("Path to Movie or Frames (to be copied to publish)", self._movie_layout, spacing=4))
self._movie_browse.setFixedHeight(self._movie_path.sizeHint().height())
self._movie_browse.setFixedWidth(self._movie_browse.sizeHint().width() + 2)
self._promote_checkbox = QtGui.QCheckBox("Promote to 'Version' for review")
# self.layout().addWidget(self._promote_checkbox)
self._timelog_spinbox = TimeSpinner()
add_hour = QtGui.QPushButton("+1 Hour")
add_hour.setFixedHeight(self._timelog_spinbox.sizeHint().height())
@add_hour.clicked.connect
def on_add_hour():
self._timelog_spinbox.setValue(self._timelog_spinbox.value() + 60)
add_day = QtGui.QPushButton("+1 Day")
add_day.setFixedHeight(self._timelog_spinbox.sizeHint().height())
@add_day.clicked.connect
def on_add_day():
self._timelog_spinbox.setValue(self._timelog_spinbox.value() + 60 * 8)
self.layout().addLayout(hbox(
vbox("Time to Log", hbox(self._timelog_spinbox, "hrs:mins", add_hour, add_day)),
vbox("Review", self._promote_checkbox),
))
def _fetch_existing_data(self):
try:
sgfs = SGFS()
tasks = sgfs.entities_from_path(self._exporter.workspace)
if not tasks:
raise ValueError('No entities in workspace %r' % self._exporter.workspace)
if any(x['type'] != 'Task' for x in tasks):
raise ValueError('Non-Task entity in workspace %r' % self._exporter.workspace)
publishes = sgfs.session.find(
'PublishEvent',
[
('sg_link.Task.id', 'in') + tuple(x['id'] for x in tasks),
('sg_type', 'is', self._exporter.publish_type),
('sg_version', 'greater_than', 0), # Skipped failures.
], [
'code',
'sg_version'
]
)
except Exception as e:
self._task_combo.clear()
self._task_combo.addItem('Loading Error! %s' % e, {})
raise
else:
self.loaded_publishes.emit(tasks, publishes)
def _populate_existing_data(self, tasks, publishes):
if tasks:
entity = tasks[0].fetch('entity')
name = entity.get('code') or entity.get('name')
if name:
self._tasksLabel.setText('Task on %s %s' % (entity['type'], name))
history = self._exporter.get_previous_publish_ids()
select = None
publishes.sort(key=lambda p: p['sg_version'])
for t_i, task in enumerate(tasks):
name_to_publish = {}
for publish in publishes:
if publish['sg_link'] is not task:
continue
self._existing_streams.add((task['id'], publish['code']))
name = publish['code']
name_to_publish[name] = publish
if publish['id'] in history:
select = t_i, name
self._task_combo.addItem('%s - %s' % task.fetch(('step.Step.short_name', 'content')), {
'task': task,
'publishes': name_to_publish,
})
if 'loading' in self._task_combo.itemData(0):
if self._task_combo.currentIndex() == 0:
self._task_combo.setCurrentIndex(1)
self._task_combo.removeItem(0)
if select:
self._task_combo.setCurrentIndex(select[0])
for i in xrange(self._name_combo.count()):
data = self._name_combo.itemData(i)
if data and data.get('name') == select[1]:
self._name_combo.setCurrentIndex(i)
break
def _task_changed(self, index):
data = self._name_combo.currentData()
if not data:
return
was_new = 'new' in data or 'loading' in data
self._name_combo.clear()
data = self._task_combo.currentData() or {}
for name, publish in sorted(data.get('publishes', {}).iteritems()):
self._name_combo.addItem('%s (v%04d)' % (name, publish['sg_version']), {'name': name, 'publish': publish})
self._name_combo.addItem('Create New Stream...', {'new': True})
if was_new:
self._name_combo.setCurrentIndex(self._name_combo.count() - 1)
else:
self._name_combo.setCurrentIndex(0)
def _name_changed(self, index):
data = self._name_combo.itemData(index)
if not data:
return
self._name_field.setEnabled('new' in data)
self._name_field.setText(data.get('name', self._basename))
self._version_spinbox.setValue(data.get('publish', {}).get('sg_version', 0) + 1)
def _on_name_edited(self):
name = str(self._name_field.text())
name = re.sub(r'\W+', '_', name).strip('_')
self._name_field.setText(name)
def _on_version_changed(self, new_value):
data = self._name_combo.itemData(self._name_combo.currentIndex())
if data.get('publish') and new_value != data['publish']['sg_version'] + 1 and not self._version_warning_issued:
res = QtGui.QMessageBox.warning(None,
"Manual Versions?",
"Are you sure you want to change the version?\n"
"The next one has already been selected for you...",
QtGui.QMessageBox.Ok | QtGui.QMessageBox.Cancel,
QtGui.QMessageBox.Cancel
)
if res & QtGui.QMessageBox.Cancel:
self._version_spinbox.setValue(data['publish']['sg_version'] + 1)
return
self._version_warning_issued = True
def _on_movie_browse(self):
existing = str(self._movie_path.text())
dialog = QtGui.QFileDialog(None, "Select Movie or First Frame")
dialog.setFilter('Movie or Frame (*.mov *.exr *.tif *.tiff *.jpg *.jpeg)')
dialog.setFileMode(dialog.ExistingFile)
dialog.setDirectory(os.path.dirname(existing) if existing else os.getcwd())
if existing:
dialog.selectFile(existing)
if not dialog.exec_():
return
files = dialog.selectedFiles()
path = str(files.First())
self.setFrames(path)
def setFrames(self, path):
self._movie_path.setText(path)
if path:
self._promote_checkbox.setCheckState(Qt.Checked)
def take_full_screenshot(self):
pass
def take_partial_screenshot(self, *args):
path = tempfile.NamedTemporaryFile(suffix=".png", prefix="screenshot", delete=False).name
self.beforeScreenshot.emit()
if sys.platform.startswith('darwin'):
# use built-in screenshot command on the mac
proc = subprocess.Popen(['screencapture', '-mis', path])
else:
proc = subprocess.Popen(['import', path])
proc.wait()
self.afterScreenshot.emit()
if os.stat(path).st_size:
self.setThumbnail(path)
def setThumbnail(self, path):
self._thumbnail_path = path
pixmap = QtGui.QPixmap(path).scaled(200, 100, Qt.KeepAspectRatio, Qt.SmoothTransformation)
self._thumbnail_canvas.setPixmap(pixmap)
self._thumbnail_canvas.setFixedSize(pixmap.size())
def name(self):
data = self._name_combo.currentData()
return data.get('name', str(self._name_field.text()))
def description(self):
return str(self._description.toPlainText())
def version(self):
return self._version_spinbox.value()
def thumbnail_path(self):
return self._thumbnail_path
def _path_is_image(self, path):
if os.path.splitext(path)[1][1:].lower() in (
'jpg', 'jpeg', 'tif', 'tiff', 'exr',
):
return path
def frames_path(self):
path = str(self._movie_path.text())
if path and self._path_is_image(path):
return path
return None
def movie_path(self):
path = str(self._movie_path.text())
if path and not self._path_is_image(path):
return path
return None
def safety_check(self, **kwargs):
# Check that the name is unique for publishes on this task.
task = self._task_combo.currentData().get('task')
existing_name = self._name_combo.currentData().get('name')
new_name = str(self._name_field.text())
if existing_name is None and (task['id'], new_name) in self._existing_streams:
print 'XXX', task['id'], repr(existing_name), repr(new_name)
print self._existing_streams
QtGui.QMessageBox.critical(self,
"Name Collision",
"You cannot create a new stream with the same name as an"
" existing one. Please select the existing stream or enter a"
" unique name.",
)
# Fatal.
return False
# Promoting to version without a movie.
if self._promote_checkbox.isChecked() and not (self.frames_path() or self.movie_path()):
QtGui.QMessageBox.critical(self,
"Review Version Without Movie",
"You cannot promote a publish for review without frames or a"
" movie.",
)
# Fatal.
return False
# Promoting to version without a timelog.
if self._promote_checkbox.isChecked() and not self._timelog_spinbox.value():
res = QtGui.QMessageBox.warning(self,
"Version without Time Log",
"Are you sure that this version did not take you any time?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
QtGui.QMessageBox.No,
)
if res & QtGui.QMessageBox.No:
return False
return True
def export(self, **kwargs):
with ticket_ui_context(pass_through=PublishSafetyError):
return self._export(kwargs)
def _export(self, kwargs):
if not self.safety_check(**kwargs):
raise PublishSafetyError()
task_data = self._task_combo.currentData()
task = task_data.get('task')
if not task:
sgfs = SGFS()
tasks = sgfs.entities_from_path(self._exporter.workspace, 'Task')
if not tasks:
raise ValueError('Could not find SGFS tagged entities')
task = tasks[0]
stream_data = self._name_combo.currentData()
parent = stream_data.get('publish')
# Do the promotion.
if self._promote_checkbox.isChecked():
review_version_fields = self._exporter.fields_for_review_version(**kwargs)
else:
review_version_fields = None
publisher = self._exporter.publish(task,
name=self.name(),
description=self.description(),
version=self.version(),
parent=parent,
thumbnail_path=self.thumbnail_path(),
frames_path=self.frames_path(),
movie_path=self.movie_path(),
review_version_fields=review_version_fields,
export_kwargs=kwargs,
)
# Create the timelog.
minutes = self._timelog_spinbox.value()
if minutes:
publisher.sgfs.session.create('TimeLog', {
'project': publisher.entity.project(),
'entity': publisher.link,
'user': publisher.sgfs.session.guess_user(),
'duration': minutes,
'description': '%s_v%04d' % (publisher.name, publisher.version),
'date': datetime.datetime.utcnow().date(),
})
return publisher
|
Poppy seed tea is a popular herbal tea infusion using the seeds from the poppy plant. There are several different types of Poppy Seed Tea, depending on the species of the plant and seeds used. For instance, the tea can be made utilizing seeds from the California Poppy Plant (Eschscholzia californica). A more dangerous form of the tea is made with the seeds of Papaver somniferum, which is also known as the the opium poppy plant. Individuals choose the type of seeds based on personal preference and the benefits that they would like to experience. This post is dedicated to explaining the main facts of this complex tea in detail, which is why it is considered the Ultimate Guide.Depending on the type of seeds used, you can usually start feeling the positive effects of the tea within an hour. I have detailed the entire process of making California Poppy Seed Tea on my post, How to Make Poppy Tea. The basic steps are the same for all types of Poppy Seed Tea, and the only real difference is the type of seeds used.
I always recommend speaking with your doctor or healthcare professional prior to consuming any form of herbal tea. This is especially important for poppy seed tea. Several species of seeds exist, and each one has a unique profile and provides different side effects. Poppy tea may interact with prescription medications, and your doctor can help minimize any risks. This post is for informational purposes only, and it does not include medical advice.
This is the Ultimate Guide for Poppy Seed Tea. I have spent years studying this form of herbal tea, and I have compiled the main facts to create this post. Since this is a complex issue, I have decided to separate it into several different categories. I will be creating a table of contents in the near future to help navigate this sizeable article.
Poppy Seed Tea is a form of herbal tea that utilizes the seeds from the poppy flower. These poppy flowers are all members of the Papaveraceae family, which is also known as the poppy family. There are ~ 85 species that produce poppy seeds, and any of these can be used to make poppy seed tea. Don’t worry, I am not going to discuss every species of poppy on this guide. Instead, I will focus on the most relevant plants. Each type of poppy seeds is unique, but they do share certain characteristics.
Poppy seed tea is a very popular form of herbal tea, and it has been used for centuries. Ancient cultures used poppy seed tea for both religious and medicinal purposes. A majority of the historical preparations utilized the P. somniferum form of the tea. As stated above, this form of the tea may contain opiate compounds. The Papaver somniferum plant was first cultivated in Ancient Mesopotamia ~ 3400 BC. The Sumarians lovingly referred to the beautiful flower as the “joy plant.” Cultivation and harvesting knowledge spread rapidly, and techniques continuously improved. The Assyrians and Babylonians shared their harvesting and tea preparation techniques with the Egyptians. Around 1300 BC, the practice of consuming poppy tea quickly increased during the reign of the Pharaoh Tutankhamun. Tutankhamun ordered his massive number of slaves to plant large poppy fields so they could harvest seeds in addition to raw opiate compounds. The Egyptians then traded the excess poppy products to Greece and other parts of Europe, which spread the reach of poppy tea even further.
Poppy seed tea recipes have been perfected for centuries, and there are several great methods to prepare a delicious cup of tea. The poppy seed tea preparation techniques are similar to other forms of herbal tea, but they have one big difference. Herbal tea infusions primarily utilize warm or hot water, and poppy seed tea can be made using either cold or warm water. I actually suggest using cold water, but it is a matter of personal preference.
If you would like to learn more about making poppy seed tea, check out my Poppy Seed Herbal Tea Recipe.
This section will discuss two of the primary forms of poppy seed tea. It includes information pertaining to the specific poppy plants that produce the individual species of seeds.
California Poppy Tea is made with the plant Eschscholzia californica. The California poppy plant is also known as the golden poppy thanks to its beautiful golden flower. It is primarily grown in gardens and blooms in the summer. The tea is made with all parts of the plant except for the roots. the roots are actually used to make certain types of tinctures. Many people consume the tea in order to feel more relaxed. It is commonly made prior to bed time to help induce sleep. You can read more about this plant in my detailed post, California Poppy Plant Guide.
Another form of poppy tea utilizes seeds produced by the poppy plant, P. somniferum. The opium poppy plant, Papaver somniferum, grows up to 5 feet tall. The plant thrives with fertile soil and full sunlight. Papaver somniferum is unique even among other poppy plants because it produces opiate alkaloids as a natural defense against predators. Although a majority of the active opiate compounds are found in the pod walls, the seeds also contain trace amounts of these same alkaloids. During the harvesting process, the seeds come into contact with the other parts of the flower. The amount of the opiate compounds that the seeds contain varies widely from plant to plant. The most common alkaloid found in poppy seeds is morphine, which still comprises a small percentage (~ 8-10%) of total alkaloids. The other alkaloids contained in the seeds include codeine and thebaine. The drug manufacturers obtain the compounds from the pod of the plant to help create prescription drugs. Most poppy seeds are washed and the compounds are removed from the seeds.
Poppy seeds are seeds derived from a poppy plant. Poppy plants are members of the Papaveraceae family, and several species of poppy plant exist. As a result, several types of poppy seeds exist, and each one has different characteristics. Only one of the species of poppy plants (Papaver somniferum) naturally contains opiate compounds like morphine. Many of the poppy seeds sold online and in grocery stores are derived from the Papaver somniferum plant, and they have been washed to remove the trace amounts of opiate compounds. The coloring of unwashed poppy seeds are darker than washed seeds. Most unwashed seeds are dark blue or black in color. The package or item description will usually list whether or not the seeds are washed. It is important for you to discover the particular type of seeds that you have purchased and whether they are washed or unwashed.
I always recommend purchasing poppy seeds (and many other consumer goods) online. Despite my suggestion, some individuals still attempt to purchase the seeds in bulk from a local grocery store. There are various reasons that people choose local stores, and often it is because they cannot wait for shipping. Other people prefer supporting local businesses, or they want to see the seeds in person before they purchase.
The best chance for finding a variety of delicious poppy seeds is to search local health food stores, or natural food markets. Many of the national chain stores do not sell fresh or bulk poppy seeds. Many never have, and others were forced to stop selling bulk seeds because they can’t compete with the health food stores. Some individuals also run into another common problem: Local stores may carry the seeds, but they don’t have the item in stock. Chances are that there are several people in the average town/city looking for fresh bulk poppy seeds. There may be hundreds in larger cities. In summary, it can be difficult to find quality bulk poppy seeds locally, but the best chance is to check health food stores. These stores also may not sell quality seeds, but they are your best chance. Someone new to the the poppy seed scene can spend hours searching for quality seeds, which is why many people purchase online. It may take a little longer, but shipping times are getting faster and cheaper. You can learn more benefits to purchasing seeds online by reading the next section.
I suggest purchasing poppy seeds online for several reasons. First, it is much easier to find bulk poppy seeds online. Second, the quality is much better online than anything you will be able to find locally. Thirdly, prices are usually lower than what you find in the store. The final reason is that the item descriptions are usually much better. One primary drawback also exists, and it was briefly mentioned above. Purchasing seeds online may result in you having to wait for delivery; however, next day shipping is usually available, and Same Day shipping is becoming increasingly popular. You have better chances of seeing the Same Day option if you live in a larger city. I have completed a List of the Best Seeds for Tea that ranks each brand. I keep that list updated on a regular basis to reflect changes in quality.
I currently have one brand of poppy seeds that is my favorite. Blue Bird Poppy Seeds are delicious and always fresh. I have included a link to learn more about these poppy seeds above. Clicking on the link or picture above will take you to the Lone Goose Bakery website, where you can learn more about these seeds or purchase, if desired.
Poppy seed tea has become more popular over the past ten years as other forms of the tea have declined. The other form of tea, Poppy Pod Tea, has decreased in popularity as it has become increasingly difficult to buy/sell poppy pods. Poppy pod possession (Papaver somniferum) is actually illegal in the United States and many other countries. Many companies historically sold the pods for decoration purposes, but this was still illegal. About 10 years ago, the government began enforcing the laws more strictly, resulting in poppy pods becoming harder to find. Poppy seeds are still perfectly legal to buy and sell in the United States and most other countries. Many analysts are anticipating a further increase in poppy seed tea use as it becomes even harder for people to obtain other pain relief medications.
Poppy seed tea has a wide range of effects, both positive and negative. The effects can vary from individual to individual, and the species of poppy seeds play a large part too. You are unique, and the way that any form of tea affects you is also unique. This section contains some general info related to the main effects of this form of tea, and it is separated into positive and negative categories.
Poppy seed tea is primarily consumed for its calming and relaxing effects. Different types of seeds produce different benefits. Some forms even produce euphoric effects. Use of poppy seed tea originated in parts of Central and Eastern Europe and Central and South Central Asia thousands of years ago.
Tea drinkers consume this beverage to obtain the following possible benefits: pain relief, anxiety reduction, lower blood pressure, mood improvement, anti-diarrheal, and sleep improvement. Some people consume poppy seed tea to help with the withdrawals from other opiates.
Poppy seeds contain several vitamins & minerals such as calcium, copper, fiber, iron, magnesium, manganese, potassium, and zinc.
I suggest reading my detailed guide of the Benefits of Poppy Tea to learn more about this complex subject.
There are also possible negative effects associated with poppy seed tea. The negative effects are very dependent on the type of poppy seeds that are consumed. For California Poppy Seed Tea the negative side effects include: feeling drowsy or sleepy, lower blood pressure, slow breathing, and possible nausea. For the more dangerous form of the tea (P. somniferum), negative effects include: lethargy, constipation, urinary retention, decreased blood pressure, slower breathing, and nausea. The nausea could possibly be due to noscapine, and this negative effect is usually more common with first time uses.
The P. somniferum form of this tea also has possible long term negative effects. People that consume large amounts of unwashed seeds regularly may become physically and psychologically addicted. Some individuals have chosen to consume poppy seed tea to help them with the withdrawal symptoms from other opiates. These individuals are at higher risk for becoming addicted to the tea. In the end, they are risking replacing one addiction for another. These people may experience withdrawal symptoms if they suddenly stop consuming P. somniferum poppy seed tea.
Consuming extremely large amounts of P. somniferum seeds can lead to severe respiratory depression that may result in death. It is also extremely dangerous to mix consumption of this form of poppy seed tea with other prescription medications. Mixing the tea with other pain or anxiety medications can lead to an even greater impact on breathing and blood pressure, and it may also ultimately lead to death. Use common sense and contact your doctor prior to consuming any forms of herbal tea.
Poppy seed tea is following the trend of other forms of herbal tea and becoming increasingly popular. Many people make the mistake of trying to make this tea without learning the complex nature and risks. Individuals that take the time to read articles like this and familiarize themselves with the risks can minimize the potential for unpleasant side effects. It is possible to experience several health benefits from this tea if the correct species is used. If you have any questions, please contact me or leave a comment below. I really do enjoy my discussions with the members of the PSTR Community!
The species Papaver somniferum produces seeds that may contain trace amounts of opiate alkaloids. Consuming tea made with these unwashed seeds may be dangerous, especially if they are consumed in high amounts.
You can help support this community without spending an extra penny. If you’re planning on buying something (literally anything) on Amazon, click one of the Amazon links contained on this page before making your purchase. This website will make a small percentage of your total purchase price for any items purchased within 24 hours of clicking the link. You can purchase anything on Amazon, it doesn’t have to be something advertised on this website. I sincerely appreciate your support, it helps me dedicate a sufficient amount of my time to enhance this free resource!
This is currently my favorite brand of poppy seeds sold on Amazon. It is usually in stock, and it is being sold at a reasonable price. You can click the link or picture above to be taken to Amazon to learn more about Food to Live Poppy Seeds!
Do you know of other information about poppy seed tea? Please share your knowledge with this community by posting a comment below!
Poppy Seed Tea has been used for thousands of years for various purposes. Learn all of the fun facts pertaining to this potent beverage.
I was totally against you doing this at first. I have totally changed my mind tho. Ty for all your work and information. Truly appreciated. Sorry for some of the bad things I said in the past.
Fabulous… YR doin’ wonderful work!
I thought I had signed up for your newsletter in the past, but I never received any, so I will try again.
Let me know if you have any issues and I will look into it. We are revamping the newsletter and our contact page.
I ordered as you recommended last week, BB. The 5lb and got a 7% discount.
I used your recipe and WOW!!! Way better than TNT by a long shot! Well worth the price.
I wanted to know about using them the 2nd time. Can you give me instructions on how you do that?
I too have spent many hours/days researching poppy seeds and the brands that are good for use. My research always includes finding companies that sell bulk seeds and the very first thing I do is email them. I ask the companies whether their seeds are steamed, irradiated, or treated with ethylene oxide. We all know that we’re trying to avoid the steamed seeds, as well as the irradiated, which is a process that uses harmful (debatable by some) radiation to clean the seeds of fungi and other contaminates as well as rendering them non viable. It’s the Ethylene Oxide (ETO) that concerned me here. I found out about ETO while asking whether or not someone’s seeds where washed or irradiated to which they said no but they where treated with ETO.
ETO is a liquid chemical that turns to gas at a certain temp and it’s also used to treat the seeds for contaminates. In the medical field, it’s used to sterilize equipment. So I researched ETO and found that companies cannot call their product organic if they are treated with it but they also don’t have to disclose its use. Many studies have been done on its use and it turns out that it is known to cause cancer at high exposure levels. Mainly for the people who work with it.
https://www.cdc.gov/niosh/pgms/worknotify/ethyleneoxide.html <—-article about a study.
I’ll wrap it up. I advise that everyone does their research because we are possibly exposed to high levels of ETO. I also recommend that everyone gives companies their opinion on this treatment in hopes we can influence them to return to selling raw seeds. After all what’s worse, a chemical known to cause cancer or maybe a little mold on a few seeds that is unlikely if they’re handled correctly.
Brandon, I’m writing this in hopes that you post it for everyone’s info, you can edit it if you like. I just feel it’s important to know.
Thanks for the informative post! No editing necessary!
Thanks for the heads up brochacho.
|
#! /usr/bin/env python
import click
import re
import sys
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
# Usage something like:
# makeShortContigs.py -l 200000 -c 1,2,3,X -r human_g1k_v37_decoy.fasta
@click.command(context_settings = dict( help_option_names = ['-h', '--help'] ))
@click.option('--length', '-l', type=int, help='length of the contigs to export', required=True)
@click.option('--contigs', '-c', type=str, help='ids of contigs in the fasta file', required=True)
@click.option('--reference', '-r', type=str, help='source reference file', required=True)
def exportShortContigs(length,contigs,reference):
# this is the main processing routine
contigsToPrint = contigs.split(",")
for seq_record in SeqIO.parse(reference,"fasta"):
# seq_record.id is something like
# >chr1 dna:chromosome chromosome:GRCh37:1:1:249250621:1
# but we want to have the "chr1" only, so have to split and replace
shortID = seq_record.id.split()[0].replace(">","")
if shortID in contigs:
newSeq = seq_record.seq[0:length]
sys.stdout.write( SeqRecord(newSeq, id=seq_record.id, description=seq_record.description).format("fasta") )
if __name__ == "__main__":
exportShortContigs()
|
To embellish the Concordia ideology of BREW, faculty, administration and students have been devising a potential curriculum change that would push students outside the classroom several times throughout their academic career.
At the Faculty Senate meeting on Sept. 21, faculty voted in favor of requiring two integrative learning experiences and removing the core capstone requirement.
Intensive Integrative Learning Experiences is a proposed change to the curriculum for students to gain experience “that would provide…opportunities to apply knowledge, skills and attitudes in situations involving complexity and uncertainty,” according to the Curriculum Committee. These experiences can happen within a class, study abroad, internships, research and credit bearing or non-credit bearing experiences.
The Calendar Change Committee formulated the idea of implementing IILE into the curriculum in the summer of 2014, according to Dr. Jean Bokinskie, associate professor of nursing and chair of the Curriculum Committee. In Sept. 2014, her committee had been given the task to develop two integrative learning experiences.
Dr. Kirsten Theye, associate professor of communication studies and theatre art, joined the Core Committee two years ago. After attending a conference and workshop about integrated learning, she agrees with the idea of IILE.
“It’s a bold move, but IILE is in alignment with students’ needs and wants,” Theye said.
If enacted, an IILE would have to adhere to five criteria, according to Bokinskie. Bokinskie outlined the tentative five criteria to be venturing outside the classroom, working with others while addressing complex situations, creating interdisciplinary responses to these encounters while ruminating on diverse perspectives, experiencing frustration and ambiguity that lead to the discovery of issues outside what is stated in the syllabus and sharpening skills to prepare students for future employment. If IILE is implemented, these standards would need to be passed by the faculty senate.
Kiersten McMahon, student representative for the Core Committee and faculty senate and academic affairs officer for SGA, supports the concept of the new curriculum.
A student would be required to have an IILE during their freshman or sophomore year, then take the second one during their junior or senior year. A student may have more than two experiences during their academic career.
IILE would be implemented no earlier than the fall of 2017. Whether students will be immersed into IILE completely or gradually brought into it is still up for discussion, according to Bokinskie.
“I don’t know what the future holds,” Bokinskie said.
While the new curriculum idea has been extolled, some questions did arise from other faculty during the meeting. Dr. Dawn Duncan, professor of English and global studies, had several concerns regarding the removal of the Core Capstone.
The Core Capstones had been implemented into the curriculum in the 2010-2011 school year. The purpose of the Capstone is to “provide transitional learning experiences as students move forward from a period of study and preparation, and…apply their knowledge through real-world encounters,” according to the application guidelines for the Core Capstone Course. For a Core Capstone to be approved, it has to be writing intensive, have a noteworthy experiential component, address an issue that has a global impact, allow students to reflect upon the process of BREW and to be taken during senior year, according to the Core Capstone application guidelines.
Some of the issues of the capstone that were voiced during the meeting include the limitation of having to take a capstone course during senior year, the unavailability of a capstone course in one’s major and lack of intensity in some capstone courses due to a dearth of prerequisites.
Since the vote was cast at the end of the meeting, allowing no further discussion, Duncan sent a document titled “Concerning the Removal of the Core Capstone Courses,” to the president, the dean and faculty senate representatives for each department. In this document, she noted three problems: the concern of availability of resources to provide these experiences to all students, the quick vote at the faculty meeting and the lack of intensity in some capstone courses. To amend these problems, she asks to vote on keeping the Core Capstone as one of the IILE, void the vote at the faculty meeting, change the name of the capstone to fit the new curriculum, provide support for professors and aid the Core Committee with turning down capstone courses that do not meet the standards.
Six departments replied to her stating they agree with her call for desired action and three people had responded stating their dislike to Duncan calling some capstone courses “watered-down.” Two days after Duncan sent her original proposal, she sent another email acknowledging the displeasure noted on her usage of “watered-down” with the capstone courses, but reiterated that it was her interpretation of what was said about the issues with capstone during the faculty senate meeting.
“That is my choice of wording, and I stand by it for the reasons given,” Duncan said in the email.
As for students, the worry of having a heavier load comes into question. Bokinskie, Theye and McMahon all said that a heavier load should not be a complication since many students are partaking in an experience similar to the idea of integrated learning — having internships, studying abroad or participating in research.
“To me, it wouldn’t be a huge stress to roll out [IILE] for every student,” Theye said.
Other concerns about IILE that arose during the meeting were about the delegation of professors’ time between their classes and the supervision of students who are participating in a non-credit experience, the availability of resources when developing an IILE class and about how it will be implemented. These questions cannot be addressed until an Integrative Learning Committee is selected, according to Bokinskie.
|
"""
#: 27
Title: Remove Element
Description:
------
Given an array and a value, remove all instances of that value in place and return the new length.
Do not allocate extra space for another array, you must do this in place with constant memory.
The order of elements can be changed. It doesn't matter what you leave beyond the new length.
Example:
Given input array nums = `[3,2,2,3]`, val = `3`
Your function should return length = 2, with the first two elements of nums being 2.
------
Time: O(n)
Space: O(1)
Difficulty: Easy
"""
class Solution(object):
def removeElement(self, nums, val):
"""
:type nums: List[int]
:type val: int
:rtype: int
"""
h = 0
t = nums.__len__() - 1
while h <= t:
if nums[h] == val:
nums[h] = nums[t]
t -= 1
else:
h += 1
return t + 1
def removeElement2(self, nums, val):
"""
Alternative method, but not fast than first one
:type nums: List[int]
:type val: int
:rtype: int
"""
while True:
if val in nums:
nums.pop(nums.index(val))
else:
break
return len(nums)
if __name__ == '__main__':
Sol = Solution()
nums = [4,1,2,3,5]
val = 4
leng = Sol.removeElement(nums, val)
print( nums[:leng])
|
0%67°57°Night - Clear. Winds variable at 9 to 14 mph (14.5 to 22.5 kph). The overnight low will be 61 °F (16.1 °C).Partly cloudy with a high of 64 °F (17.8 °C). Winds variable at 4 to 9 mph (6.4 to 14.5 kph).
20%64°50°Mostly sunny today with a high of 64 °F (17.8 °C) and a low of 50 °F (10.0 °C).
0%65°59°Mostly cloudy today with a high of 65 °F (18.3 °C) and a low of 59 °F (15.0 °C).
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-08-08 19:45
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0125_auto_20180808_1942'),
]
operations = [
migrations.RunSQL(
[
"""
UPDATE auth_group AG0
SET name = (
SELECT 'reviews_' ||
CASE
WHEN P.type = 'osf.preprintprovider'
THEN 'preprint'
WHEN P.type = 'osf.collectionprovider'
THEN 'collection'
WHEN P.type = 'osf.registrationprovider'
THEN 'registration'
END || '_' || id || '_' || split_part(AG0.name, '_', 3)
FROM osf_abstractprovider P
WHERE _id = split_part(AG0.name, '_', 2)
)
WHERE AG0.name LIKE 'reviews_%';
"""
], [
"""
UPDATE auth_group AG0
SET name = (
SELECT 'reviews_' || P._id || '_' || split_part(AG0.name, '_', 4)
FROM osf_abstractprovider P
WHERE id = split_part(AG0.name, '_', 3)::INT
)
WHERE AG0.name LIKE 'reviews_%';
"""
]
)
]
|
: unless you had a lease w/ the prior owners which has to be honored when the new owners take over, you are out of luck. You dont have to be informed when the house is for sale. As a new owner, he is entitled to make renovations as well as ask you to leave since you are a month to month tenant. Although, it doesnt sound as if he gave you a proper 30 day notice to move. As a owner, he is entitled to make repairs and as many as he sees fit. Why didnt you offer to pay the $900 rent and continue living there? Your mistake was believing you were 'entitled' to the same renovations due to the increase. He has the right to increase your rent, without your approval, with or without renovations. Now, you're on his bad side. You do have rights as a tenant and should contact the electrical company if you dont have any service. This is illegal and can fall under harrassment. You should also contact local authorities if you have no heat in the winter. But ultimately, you will have to move.
|
from topaz.module import ClassDef
from topaz.objects.objectobject import W_Object
from topaz.modules.ffi.type import type_object, ffi_types, W_TypeObject, VOID
from topaz.modules.ffi.dynamic_library import coerce_dl_symbol
from topaz.modules.ffi.function_type import W_FunctionTypeObject
from topaz.modules.ffi.function import W_FFIFunctionObject
from rpython.rlib import clibffi
from rpython.rlib import jit
from rpython.rtyper.lltypesystem import lltype, rffi
class W_VariadicInvokerObject(W_Object):
classdef = ClassDef('VariadicInvoker', W_Object.classdef)
def __init__(self, space):
W_Object.__init__(self, space)
self.w_info = None
self.w_handle = None
@classdef.singleton_method('allocate')
def singleton_method_allocate(self, space, args_w):
return W_VariadicInvokerObject(space)
@classdef.method('initialize')
def method_initialize(self, space, w_handle, w_arg_types,
w_ret_type, w_options=None):
self.w_ret_type = w_ret_type
self.w_options = w_options
self.w_handle = w_handle
if w_options is None:
w_type_map = space.newhash()
else:
w_key = space.newsymbol('type_map')
w_type_map = space.send(w_options, '[]', [w_key])
space.send(self, 'init', [w_arg_types, w_type_map])
@classdef.method('invoke', arg_values_w='array')
def method_invoke(self, space, w_arg_types, arg_values_w):
w_func_cls = space.getclassfor(W_FFIFunctionObject)
w_func = space.send(w_func_cls, 'new',
[self.w_ret_type, w_arg_types,
self.w_handle, self.w_options])
return self._dli_call(space, w_func, arg_values_w)
@jit.dont_look_inside
def _dli_call(self, space, w_func, arg_values_w):
# XXX we are missing argument promotion for the variadic arguments here
# see
# http://stackoverflow.com/questions/1255775/default-argument-promotions-in-c-function-calls
return space.send(w_func, 'call', arg_values_w)
|
Very elegant. i love the spray of flowers. So colorful. hugs Mrs A.
Oh wow, I love this card, the white and red are just so striking.
|
from neo.core.baseneo import BaseNeo
class Block(BaseNeo):
"""
Main container gathering all the data, whether discrete or continous, for a
given recording session.
A block is not necessarily temporally homogeneous, in contrast to Segment.
*Usage*:
TODO
*Required attributes/properties*:
None
*Recommended attributes/properties*:
:name: A label for the dataset
:description: text description
:file_origin: filesystem path or URL of the original data file.
:file_datetime: the creation date and time of the original data file.
:rec_datetime: the date and time of the original recording
:index: integer. You can use this to define an ordering of your Block.
It is not used by Neo in any way.
*Container of*:
:py:class:`Segment`
:py:class:`RecordingChannelGroup`
*Properties*
list_units : descends through hierarchy and returns a list of
:py:class:`Unit` existing in the block. This shortcut exists
because a common analysis case is analyzing all neurons that
you recorded in a session.
list_recordingchannels: descends through hierarchy and returns
a list of :py:class:`RecordingChannel` existing in the block.
"""
def __init__(self, name=None, description=None, file_origin=None,
file_datetime=None, rec_datetime=None, index=None,
**annotations):
"""Initalize a new Block."""
BaseNeo.__init__(self, name=name, file_origin=file_origin,
description=description, **annotations)
self.file_datetime = file_datetime
self.rec_datetime = rec_datetime
self.index = index
self.segments = [ ]
self.recordingchannelgroups = [ ]
@property
def list_units(self):
"""
Return a list of all :py:class:`Unit` in a block.
"""
units = [ ]
for rcg in self.recordingchannelgroups:
for rc in rcg.recordingchannel:
for unit in rc.units:
if unit not in units:
units.append(unit)
return units
@property
def list_recordingchannels(self):
"""
Return a list of all :py:class:`RecordingChannel` in a block.
"""
all_rc = [ ]
for rcg in self.recordingchannelgroups:
for rc in rcg.recordingchannel:
if rc not in all_rc:
all_rc.append(rc)
return all_rc
|
Spyware are destructive software programes, that can develop mayhem if they get activated on your computer system as they can track your personal data as well as sensitive information. They can make your computer systems go slow eating up your processing resource without your permission. This area includes the information for Spyware elimination to stop your PC to Spywares.
The majority of spywares obtain installed without any user understanding. Some time they comes with shareware software application or other uncodable software program. Normally user download and install totally free software program yet they do not understand that spyware could also obtain downloaded with it. Spyware programs are planned to show some type of advertisement. and also present advertisement popup continuously.
Typically advertisement business fund and enhance this kind of spywares to get a gas mileage or popularity by them.
If by some methods you have downloaded and install the one as well as clicked it, it gets activated as well as set up immediately. Some cost-free internet toolbars might be spywares, they may add some new menu option, switches, new taskbar icones, new things in faves, excessive hyper links can added in you internet web browser as well as it can also change your default web page or you search engine.
As for spyware affect is considered it can make your computer handling slow making your computer take longer time to processing the task or start-up time.
To secure your COMPUTER use antivirus assistance software program which is incorporated with Spywares Defense?
Always utilize great antispyware as well as check your computer system on day-to-day basis. Now-a-days some antvirus include incorporated antispyware facility you can pick them as a precaution measure. If you are utilizing Windows Panorama there is an alternative to prevent spywares, you can use Windows Protector security. It find spywares as well as unwanted advertisement too. If you are using Windows XP then likewise you can download Windows Defender from Microsoft website. Just browse around this site for more details about antiviruses.
for better prevention of spyware maintain your Internet explorer setups set on high level.
There are different kind of security areas with the assistance of which you can restrict those internet site which create your computer security loose.
Throughout the Internet searching many pop-up comes if they provide any type of link to click do not click them if you discover your self suspicious of where you are going to land.
Always download and install whatever you want however just from trusted website. It may be possible that freeware software application contain spywares. Do not install any type of questionable toolbar or search engine.
|
# 3p
from nose.plugins.attrib import attr
# project
from checks import AgentCheck
from tests.checks.common import AgentCheckTest
# sample from /status?json
# {
# "accepted conn": 350,
# "active processes": 1,
# "idle processes": 2,
# "listen queue": 0,
# "listen queue len": 0,
# "max active processes": 2,
# "max children reached": 0,
# "max listen queue": 0,
# "pool": "www",
# "process manager": "dynamic",
# "slow requests": 0,
# "start since": 4758,
# "start time": 1426601833,
# "total processes": 3
# }
@attr(requires='phpfpm')
class PHPFPMCheckTest(AgentCheckTest):
CHECK_NAME = 'php_fpm'
def test_bad_status(self):
instance = {
'status_url': 'http://localhost:9001/status',
'tags': ['expectedbroken']
}
self.assertRaises(Exception, self.run_check, {'instances': [instance]})
def test_bad_ping(self):
instance = {
'ping_url': 'http://localhost:9001/status',
'tags': ['expectedbroken']
}
self.run_check({'instances': [instance]})
self.assertServiceCheck(
'php_fpm.can_ping',
status=AgentCheck.CRITICAL,
tags=['ping_url:http://localhost:9001/status'],
count=1
)
self.coverage_report()
def test_bad_ping_reply(self):
instance = {
'ping_url': 'http://localhost:42424/ping',
'ping_reply': 'blah',
'tags': ['expectedbroken']
}
self.run_check({'instances': [instance]})
self.assertServiceCheck(
'php_fpm.can_ping',
status=AgentCheck.CRITICAL,
tags=['ping_url:http://localhost:42424/ping'],
count=1
)
self.coverage_report()
def test_status(self):
instance = {
'status_url': 'http://localhost:42424/status',
'ping_url': 'http://localhost:42424/ping',
'tags': ['cluster:forums']
}
self.run_check_twice({'instances': [instance]})
metrics = [
'php_fpm.listen_queue.size',
'php_fpm.processes.idle',
'php_fpm.processes.active',
'php_fpm.processes.total',
'php_fpm.requests.slow',
'php_fpm.requests.accepted',
]
expected_tags = ['cluster:forums', 'pool:www']
for mname in metrics:
self.assertMetric(mname, count=1, tags=expected_tags)
self.assertMetric('php_fpm.processes.idle', count=1, value=1)
self.assertMetric('php_fpm.processes.total', count=1, value=2)
self.assertServiceCheck('php_fpm.can_ping', status=AgentCheck.OK,
count=1,
tags=['ping_url:http://localhost:42424/ping'])
self.assertMetric('php_fpm.processes.max_reached', count=1)
|
The classic toaster gets a modern update with the Compact Plastic Toaster by Cuisinart. It saves space, placed sideways or facing forward, to fit any kitchen counter.
The classic toaster gets a modern update with the Compact Plastic Toaster by Cuisinart. It saves space, placed sideways or facing forward, to fit any kitchen counter. Whether making thick bagel halves or thin sliced breads, the wide slots, high-lift carriage and custom controls ensure even, precise and convenient toasting, every time.
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 27 Mar 2017
@author: Éric Piel
Acquires a CL spectrum at different e-beam voltages.
If the e-beam is not in spot mode, it will be set to spot mode at the center
of the SEM field-of-view.
The spectrometer settings are used untouched.
Warning: the optical path should be properly configured already (ie, the spectrum
stream should be the last one playing in the GUI).
run as:
./spectrum_volt.py --volt 5 7.5 10 15 --spectrometer spectrometer-integrated --output spectra.h5
"""
from __future__ import division
import argparse
import logging
from odemis import model, dataio, util
import os
import sys
def save_hw_settings(ebeam):
res = ebeam.resolution.value
scale = ebeam.scale.value
trans = ebeam.translation.value
dt = ebeam.dwellTime.value
volt = ebeam.accelVoltage.value
hw_settings = (res, scale, trans, dt, volt)
return hw_settings
def resume_hw_settings(ebeam, hw_settings):
res, scale, trans, dt, volt = hw_settings
# order matters!
ebeam.scale.value = scale
ebeam.resolution.value = res
ebeam.translation.value = trans
ebeam.dwellTime.value = dt
ebeam.accelVoltage.value = volt
def discard_data(df, da):
"""
Receives the SE detector data, which is unused
"""
logging.debug("Received one ebeam data")
def acquire_volts(volts, detector):
"""
vots (list of floats > 0): voltage in kV
detector (str): role of the spectrometer to use
returns (list of DataArray): all the spectra, in order
"""
ebeam = model.getComponent(role="e-beam")
sed = model.getComponent(role="se-detector")
spmt = model.getComponent(role=detector)
hw_settings = save_hw_settings(ebeam)
# Go to spot mode (ie, res = 1x1)
if ebeam.resolution.value != (1, 1):
ebeam.resolution.value = (1, 1)
ebeam.translation.value = (0, 0) # at the center of the FoV
else:
logging.info("Leaving the e-beam in spot mode at %s", ebeam.translation.value)
ebeam.dwellTime.value = 0.1
try:
# Activate the e-beam
sed.data.subscribe(discard_data)
das = []
for vstr in volts:
v = float(vstr) * 1000
ebeam.accelVoltage.value = v
if not util.almost_equal(ebeam.accelVoltage.value, v):
logging.warning("Voltage requested at %g kV, but e-beam set at %g kV",
v / 1000, ebeam.accelVoltage.value / 1000)
else:
logging.info("Acquiring at %g kV", v / 1000)
# Acquire one spectrum
spec = spmt.data.get()
# Add dimensions to make it a spectrum (X, first dim -> C, 5th dim)
spec.shape = (spec.shape[-1], 1, 1, 1, 1)
# Add some useful metadata
spec.metadata[model.MD_DESCRIPTION] = "Spectrum at %g kV" % (v / 1000)
spec.metadata[model.MD_EBEAM_VOLTAGE] = v
# TODO: store the spot position in MD_POS
das.append(spec)
finally:
sed.data.unsubscribe(discard_data) # Just to be sure
resume_hw_settings(ebeam, hw_settings)
return das
def save_data(das, filename):
"""
Saves a series of spectra
das (list of DataArray): data to save
filename (str)
"""
exporter = dataio.find_fittest_converter(filename)
if os.path.exists(filename):
# mostly to warn if multiple ypos/xpos are rounded to the same value
logging.warning("Overwriting file '%s'.", filename)
else:
logging.info("Saving file '%s", filename)
exporter.export(filename, das)
def main(args):
"""
Handles the command line arguments
args is the list of arguments passed
return (int): value to return to the OS as program exit code
"""
# arguments handling
parser = argparse.ArgumentParser(description="Acquires a CL spectrum at different e-beam voltages")
parser.add_argument("--volt", "-v", dest="volts", nargs="+",
help="Voltages (in kV) for which a spectrum should be acquired"
)
parser.add_argument("--spectrometer", "-s", dest="spectrometer", default="spectrometer",
help="Role of the detector to use to acquire a spectrum (default: spectrometer)"
)
parser.add_argument("--output", "-o", dest="output", required=True,
help="Name where to save the spectra. "
"The file format is derived from the extension "
"(TIFF and HDF5 are supported).")
parser.add_argument("--log-level", dest="loglev", metavar="<level>", type=int,
default=1, help="set verbosity level (0-2, default = 1)")
options = parser.parse_args(args[1:])
# Set up logging before everything else
if options.loglev < 0:
logging.error("Log-level must be positive.")
return 127
loglev_names = [logging.WARNING, logging.INFO, logging.DEBUG]
loglev = loglev_names[min(len(loglev_names) - 1, options.loglev)]
logging.getLogger().setLevel(loglev)
try:
das = acquire_volts(options.volts, options.spectrometer)
save_data(das, options.output)
except KeyboardInterrupt:
logging.info("Interrupted before the end of the execution")
return 1
except ValueError as exp:
logging.error("%s", exp)
return 127
except Exception:
logging.exception("Unexpected error while performing action.")
return 127
return 0
if __name__ == '__main__':
ret = main(sys.argv)
logging.shutdown()
exit(ret)
|
Just when the dust began to settle on the DIVA Totem Pole, the 2002 SF Opera in the Park rolled around. Knowing the bar was high, and wanting to make it even more fun for friends, I took a different strategy: interactivity!
I sourced the most iconic and expressive diva portraits I could get my hands on to create quickie cut-out masks. They had to be high-resolution enough to print out cleanly at actual size. This was during my heyday of designing the SF Ballet Nutcracker collateral. I had just used a similar mask idea for their direct mail brochure, with many of the characters from their production (but with eye holes, rather than mouth holes).
I dug up hot shots of Birgit Nilsson’s icy Met Turandot (complete with one ton bejeweled crown), Regina Resnik’s tragic Klytemnestra, and June Anderson mugging in a photoshoot. But, I branched out a bit too, including Wagnerian basso Hagen (with imposing helmet, singer unknown), as well as two popular divas: 1960’s “Color Me…” Barbra, and AI Kelly “Moments Like This” Clarkson.
The pictures make me laugh out loud every time I rediscover them. The lips look positively labial. I reigned over the proceedings here as Turandot. A Streisand fanatic friend immediately got his paws on Babs (and even did her mannered hands, see above). My friends brought these TO LIFE!
As I sign off for now, I share with you Nilsson singing Turandot’s ruthless aria “In Questa Reggia” (and lookin’ like buttah) at Arena Macerata in ’71, for some bonus dementia!
|
# baleen.opml
# Reads opml files and gives back outline data
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Sat Sep 20 23:12:07 2014 -0400
#
# Copyright (C) 2014 Bengfort.com
# For license information, see LICENSE.txt
#
# ID: opml.py [b2f890b] [email protected] $
"""
Reads opml files and gives back outline data
"""
##########################################################################
## Imports
##########################################################################
import baleen.models as db
from bs4 import BeautifulSoup
from collections import Counter
from mongoengine.errors import *
##########################################################################
## Load Database function
##########################################################################
def load_opml(path):
"""
Loads an OPML file into the Mongo database; returns the count of the
number of documents added to the database.
"""
opml = OPML(path)
rows = 0
for feed in opml:
feed.pop('type') # Unneeded for database
feed.pop('text') # Unneeded for database
feed['link'] = feed.pop('xmlUrl') # Rename the XML URL
feed['urls'] = {
'xmlUrl': feed['link'], # Add xmlUrl to urls
'htmlUrl': feed.pop('htmlUrl'), # Add htmlUrl to urls
}
feed = db.Feed(**feed) # Construct without an ObjectId
try:
feed.save()
rows += 1
except NotUniqueError:
continue
return rows
##########################################################################
## OPMLReader
##########################################################################
class OPML(object):
def __init__(self, path):
"""
Reader for OPML XML files.
"""
self.path = path
def categories(self):
"""
Reads the file to capture all the categories
"""
with open(self.path, 'r') as data:
soup = BeautifulSoup(data, 'xml')
for topic in soup.select('body > outline'):
yield topic['title']
def counts(self):
"""
Returns the counts of feeds in each category
"""
counts = Counter()
for item in self:
counts[item['category']] += 1
return counts
def __iter__(self):
"""
Yields a dictionary representing the attributes of the RSS feed
from the OPML file; also captures category data.
"""
with open(self.path, 'r') as data:
soup = BeautifulSoup(data, 'xml')
for topic in soup.select('body > outline'):
for feed in topic.find_all('outline'):
data = feed.attrs.copy()
data['category'] = topic['title']
yield data
def __len__(self):
return sum(1 for item in self)
def __str__(self):
counts = self.counts()
return "OPML with {} categories and {} feeds".format(
len(counts), sum(counts.values())
)
def __repr__(self):
return "<{} at {}>".format(self.__class__.__name__, self.path)
|
A leash in order that you may take your pet and yourself on walks. This is a significant building opportunity along with your pet as well as gives you each exercise.
You'll be able to even add a little bit of food colour if the recipe permits.
A leash to ensure that you may take your dog as well as your own self on strolls. This is actually a significant connecting opportunity along with your pet and offers you each exercise.
We are specialize in supplying Quality wing mirrors at low cost, High Quality land vagabond driver/near side Wing Mirror Glass, land rover passenger/off side Wing Mirrors.
The granny would get a Actor if there was one for ad films.
She is really the pivot on which the entire film revolves. Nowhere is the name Fortune mentioned nor the pack shot shown till the entire narrative is done with.
That's why testing is so important if both partners have had comprehensive STI testing with no positive results, it's less likely that one will transfer an infection to the other.
jasa pembuatan web jakarta barat - jasa pembuatan website murah jakarta barat - jasa.
Οur mission іs tߋ һelp yⲟu get useful information аbout creativity, inspiration, entertainment, politics, social culture, lifestyle, photos, videos аnd mⲟre.
Supported ѡith ɑ solid team, Ꮃe are ready tо provide the Ƅest articles and սseful fοr yօu.
|
def carry_around_add(a, b):
"""
calculate the carry around sum for the 16 bit binary values
"""
c = a + b
return (c & 0xff) + (c >> 16)
def checksum(msg):
"""
calculate the checksum. documents say sum all the previous bytes and take
the ones complement. that doesn't work, need to use the carry around.
"""
s = 0
for i in msg:
s = carry_around_add(s,int(i,16))
return ~s + 1 & 0xff
def twos_comp(hexstr, nbits):
b = int(hexstr, 16)
if b >= 1<<nbits-1: b -= 1<<nbits
return b
def decode(msg):
pass
def print_decode(msg):
if isinstance(msg, str):
msg_dict = decode(msg)
if isinstance(msg, dict):
msg_dict = msg
print("""Decoded Argos message:
Transmitted checksum: {trans_chk:d}
Calculated checksum: {calcd_chk:d}
{chk_msg}
{timestamp:s}
Valid Fix {lat_iso:.2f} {lon_iso::.2f} ({lat_deg:.3f} {lon_deg:.3f})
Age: {age:d} minutes
Invalid Fix {in_lat_iso:.2f} {in_lon_iso:.2f} {in_age:d} minutes
Too-Far Fix {tf_lat_iso:.2f} {tf_lon_iso:.2f} {tf_age:d} minutes
Water currents: {vx:.2f} {vy:.2f}
{sp:.2f} {dir:.2f}
""".format(**msg_dict))
|
Tag Archives: 비상등을 이용하여 차가 속도를 줄이는 것을 미리 경고해요.
Easy to Learn Korean 1645 – Car hazard button.
Posted in Series | Tagged Car hazard button, funeral car, hazard lights, hazard lights are also used for other purposes., hazard lights are used to indicate emergency situations., In Korea, Internationally, merge, Press the hazard button to thank other motorists in front of your car., reception, tinted, Use hazard lights to warn in advance of slowing cars., wedding hall, 국제적으로 비상등은 응급 상황을 나타내는 데 사용되요., 비상등, 비상등을 눌러 제차가 앞에 끼어드는 것을 다른 운전자에게 감사표시를 해요., 비상등을 이용하여 차가 속도를 줄이는 것을 미리 경고해요., 웨딩홀, 한국에서는 비상등이 다른 목적으로도 사용되요.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.