text
stringlengths 29
850k
|
---|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
import itertools
import calendar
import sys
import gc
import time
import geopy
from peewee import SqliteDatabase, InsertQuery, \
IntegerField, CharField, DoubleField, BooleanField, \
DateTimeField, fn, DeleteQuery, CompositeKey, FloatField, SQL, TextField
from playhouse.flask_utils import FlaskDB
from playhouse.pool import PooledMySQLDatabase
from playhouse.shortcuts import RetryOperationalError
from playhouse.migrate import migrate, MySQLMigrator, SqliteMigrator
from datetime import datetime, timedelta
from base64 import b64encode
from cachetools import TTLCache
from cachetools import cached
from . import config
from .utils import get_pokemon_name, get_pokemon_rarity, get_pokemon_types, get_args
from .transform import transform_from_wgs_to_gcj, get_new_coords
from .customLog import printPokemon
log = logging.getLogger(__name__)
args = get_args()
flaskDb = FlaskDB()
cache = TTLCache(maxsize=100, ttl=60 * 5)
db_schema_version = 7
class MyRetryDB(RetryOperationalError, PooledMySQLDatabase):
pass
def init_database(app):
if args.db_type == 'mysql':
log.info('Connecting to MySQL database on %s:%i', args.db_host, args.db_port)
connections = args.db_max_connections
if hasattr(args, 'accounts'):
connections *= len(args.accounts)
db = MyRetryDB(
args.db_name,
user=args.db_user,
password=args.db_pass,
host=args.db_host,
port=args.db_port,
max_connections=connections,
stale_timeout=300)
else:
log.info('Connecting to local SQLite database')
db = SqliteDatabase(args.db)
app.config['DATABASE'] = db
flaskDb.init_app(app)
return db
class BaseModel(flaskDb.Model):
@classmethod
def get_all(cls):
results = [m for m in cls.select().dicts()]
if args.china:
for result in results:
result['latitude'], result['longitude'] = \
transform_from_wgs_to_gcj(
result['latitude'], result['longitude'])
return results
class Pokemon(BaseModel):
# We are base64 encoding the ids delivered by the api
# because they are too big for sqlite to handle
encounter_id = CharField(primary_key=True, max_length=50)
spawnpoint_id = CharField(index=True)
pokemon_id = IntegerField(index=True)
latitude = DoubleField()
longitude = DoubleField()
disappear_time = DateTimeField(index=True)
class Meta:
indexes = ((('latitude', 'longitude'), False),)
@staticmethod
def get_active(swLat, swLng, neLat, neLng):
if swLat is None or swLng is None or neLat is None or neLng is None:
query = (Pokemon
.select()
.where(Pokemon.disappear_time > datetime.utcnow())
.dicts())
else:
query = (Pokemon
.select()
.where((Pokemon.disappear_time > datetime.utcnow()) &
(((Pokemon.latitude >= swLat) &
(Pokemon.longitude >= swLng) &
(Pokemon.latitude <= neLat) &
(Pokemon.longitude <= neLng))))
.dicts())
# Performance: Disable the garbage collector prior to creating a (potentially) large dict with append()
gc.disable()
pokemons = []
for p in query:
p['pokemon_name'] = get_pokemon_name(p['pokemon_id'])
p['pokemon_rarity'] = get_pokemon_rarity(p['pokemon_id'])
p['pokemon_types'] = get_pokemon_types(p['pokemon_id'])
if args.china:
p['latitude'], p['longitude'] = \
transform_from_wgs_to_gcj(p['latitude'], p['longitude'])
pokemons.append(p)
# Re-enable the GC.
gc.enable()
return pokemons
@staticmethod
def get_active_by_id(ids, swLat, swLng, neLat, neLng):
if swLat is None or swLng is None or neLat is None or neLng is None:
query = (Pokemon
.select()
.where((Pokemon.pokemon_id << ids) &
(Pokemon.disappear_time > datetime.utcnow()))
.dicts())
else:
query = (Pokemon
.select()
.where((Pokemon.pokemon_id << ids) &
(Pokemon.disappear_time > datetime.utcnow()) &
(Pokemon.latitude >= swLat) &
(Pokemon.longitude >= swLng) &
(Pokemon.latitude <= neLat) &
(Pokemon.longitude <= neLng))
.dicts())
# Performance: Disable the garbage collector prior to creating a (potentially) large dict with append()
gc.disable()
pokemons = []
for p in query:
p['pokemon_name'] = get_pokemon_name(p['pokemon_id'])
p['pokemon_rarity'] = get_pokemon_rarity(p['pokemon_id'])
p['pokemon_types'] = get_pokemon_types(p['pokemon_id'])
if args.china:
p['latitude'], p['longitude'] = \
transform_from_wgs_to_gcj(p['latitude'], p['longitude'])
pokemons.append(p)
# Re-enable the GC.
gc.enable()
return pokemons
@classmethod
@cached(cache)
def get_seen(cls, timediff):
if timediff:
timediff = datetime.utcnow() - timediff
pokemon_count_query = (Pokemon
.select(Pokemon.pokemon_id,
fn.COUNT(Pokemon.pokemon_id).alias('count'),
fn.MAX(Pokemon.disappear_time).alias('lastappeared')
)
.where(Pokemon.disappear_time > timediff)
.group_by(Pokemon.pokemon_id)
.alias('counttable')
)
query = (Pokemon
.select(Pokemon.pokemon_id,
Pokemon.disappear_time,
Pokemon.latitude,
Pokemon.longitude,
pokemon_count_query.c.count)
.join(pokemon_count_query, on=(Pokemon.pokemon_id == pokemon_count_query.c.pokemon_id))
.distinct()
.where(Pokemon.disappear_time == pokemon_count_query.c.lastappeared)
.dicts()
)
# Performance: Disable the garbage collector prior to creating a (potentially) large dict with append()
gc.disable()
pokemons = []
total = 0
for p in query:
p['pokemon_name'] = get_pokemon_name(p['pokemon_id'])
pokemons.append(p)
total += p['count']
# Re-enable the GC.
gc.enable()
return {'pokemon': pokemons, 'total': total}
@classmethod
def get_appearances(cls, pokemon_id, timediff):
'''
:param pokemon_id: id of pokemon that we need appearances for
:param timediff: limiting period of the selection
:return: list of pokemon appearances over a selected period
'''
if timediff:
timediff = datetime.utcnow() - timediff
query = (Pokemon
.select(Pokemon.latitude, Pokemon.longitude, Pokemon.pokemon_id, fn.Count(Pokemon.spawnpoint_id).alias('count'), Pokemon.spawnpoint_id)
.where((Pokemon.pokemon_id == pokemon_id) &
(Pokemon.disappear_time > timediff)
)
.group_by(Pokemon.latitude, Pokemon.longitude, Pokemon.pokemon_id, Pokemon.spawnpoint_id)
.dicts()
)
return list(query)
@classmethod
def get_appearances_times_by_spawnpoint(cls, pokemon_id, spawnpoint_id, timediff):
'''
:param pokemon_id: id of pokemon that we need appearances times for
:param spawnpoint_id: spawnpoing id we need appearances times for
:param timediff: limiting period of the selection
:return: list of time appearances over a selected period
'''
if timediff:
timediff = datetime.utcnow() - timediff
query = (Pokemon
.select(Pokemon.disappear_time)
.where((Pokemon.pokemon_id == pokemon_id) &
(Pokemon.spawnpoint_id == spawnpoint_id) &
(Pokemon.disappear_time > timediff)
)
.order_by(Pokemon.disappear_time.asc())
.tuples()
)
return list(itertools.chain(*query))
@classmethod
def get_spawn_time(cls, disappear_time):
return (disappear_time + 2700) % 3600
@classmethod
def get_spawnpoints(cls, southBoundary, westBoundary, northBoundary, eastBoundary):
query = Pokemon.select(Pokemon.latitude, Pokemon.longitude, Pokemon.spawnpoint_id, ((Pokemon.disappear_time.minute * 60) + Pokemon.disappear_time.second).alias('time'), fn.Count(Pokemon.spawnpoint_id).alias('count'))
if None not in (northBoundary, southBoundary, westBoundary, eastBoundary):
query = (query
.where((Pokemon.latitude <= northBoundary) &
(Pokemon.latitude >= southBoundary) &
(Pokemon.longitude >= westBoundary) &
(Pokemon.longitude <= eastBoundary)
))
query = query.group_by(Pokemon.latitude, Pokemon.longitude, Pokemon.spawnpoint_id, SQL('time'))
queryDict = query.dicts()
spawnpoints = {}
for sp in queryDict:
key = sp['spawnpoint_id']
disappear_time = cls.get_spawn_time(sp.pop('time'))
count = int(sp['count'])
if key not in spawnpoints:
spawnpoints[key] = sp
else:
spawnpoints[key]['special'] = True
if 'time' not in spawnpoints[key] or count >= spawnpoints[key]['count']:
spawnpoints[key]['time'] = disappear_time
spawnpoints[key]['count'] = count
for sp in spawnpoints.values():
del sp['count']
return list(spawnpoints.values())
@classmethod
def get_spawnpoints_in_hex(cls, center, steps):
log.info('Finding spawn points {} steps away'.format(steps))
n, e, s, w = hex_bounds(center, steps)
query = (Pokemon
.select(Pokemon.latitude.alias('lat'),
Pokemon.longitude.alias('lng'),
((Pokemon.disappear_time.minute * 60) + Pokemon.disappear_time.second).alias('time'),
Pokemon.spawnpoint_id
))
query = (query.where((Pokemon.latitude <= n) &
(Pokemon.latitude >= s) &
(Pokemon.longitude >= w) &
(Pokemon.longitude <= e)
))
# Sqlite doesn't support distinct on columns
if args.db_type == 'mysql':
query = query.distinct(Pokemon.spawnpoint_id)
else:
query = query.group_by(Pokemon.spawnpoint_id)
s = list(query.dicts())
# The distance between scan circles of radius 70 in a hex is 121.2436
# steps - 1 to account for the center circle then add 70 for the edge
step_distance = ((steps - 1) * 121.2436) + 70
# Compare spawnpoint list to a circle with radius steps * 120
# Uses the direct geopy distance between the center and the spawnpoint.
filtered = []
for idx, sp in enumerate(s):
if geopy.distance.distance(center, (sp['lat'], sp['lng'])).meters <= step_distance:
filtered.append(s[idx])
# at this point, 'time' is DISAPPEARANCE time, we're going to morph it to APPEARANCE time
for location in filtered:
# examples: time shifted
# 0 ( 0 + 2700) = 2700 % 3600 = 2700 (0th minute to 45th minute, 15 minutes prior to appearance as time wraps around the hour)
# 1800 (1800 + 2700) = 4500 % 3600 = 900 (30th minute, moved to arrive at 15th minute)
# todo: this DOES NOT ACCOUNT for pokemons that appear sooner and live longer, but you'll _always_ have at least 15 minutes, so it works well enough
location['time'] = cls.get_spawn_time(location['time'])
return filtered
class Pokestop(BaseModel):
pokestop_id = CharField(primary_key=True, max_length=50)
enabled = BooleanField()
latitude = DoubleField()
longitude = DoubleField()
last_modified = DateTimeField(index=True)
lure_expiration = DateTimeField(null=True, index=True)
active_fort_modifier = CharField(max_length=50, null=True)
class Meta:
indexes = ((('latitude', 'longitude'), False),)
@staticmethod
def get_stops(swLat, swLng, neLat, neLng):
if swLat is None or swLng is None or neLat is None or neLng is None:
query = (Pokestop
.select()
.dicts())
else:
query = (Pokestop
.select()
.where((Pokestop.latitude >= swLat) &
(Pokestop.longitude >= swLng) &
(Pokestop.latitude <= neLat) &
(Pokestop.longitude <= neLng))
.dicts())
# Performance: Disable the garbage collector prior to creating a (potentially) large dict with append()
gc.disable()
pokestops = []
for p in query:
if args.china:
p['latitude'], p['longitude'] = \
transform_from_wgs_to_gcj(p['latitude'], p['longitude'])
pokestops.append(p)
# Re-enable the GC.
gc.enable()
return pokestops
class Gym(BaseModel):
UNCONTESTED = 0
TEAM_MYSTIC = 1
TEAM_VALOR = 2
TEAM_INSTINCT = 3
gym_id = CharField(primary_key=True, max_length=50)
team_id = IntegerField()
guard_pokemon_id = IntegerField()
gym_points = IntegerField()
enabled = BooleanField()
latitude = DoubleField()
longitude = DoubleField()
last_modified = DateTimeField(index=True)
last_scanned = DateTimeField(default=datetime.utcnow)
class Meta:
indexes = ((('latitude', 'longitude'), False),)
@staticmethod
def get_gyms(swLat, swLng, neLat, neLng):
if swLat is None or swLng is None or neLat is None or neLng is None:
results = (Gym
.select()
.dicts())
else:
results = (Gym
.select()
.where((Gym.latitude >= swLat) &
(Gym.longitude >= swLng) &
(Gym.latitude <= neLat) &
(Gym.longitude <= neLng))
.dicts())
# Performance: Disable the garbage collector prior to creating a (potentially) large dict with append()
gc.disable()
gyms = {}
gym_ids = []
for g in results:
g['name'] = None
g['pokemon'] = []
gyms[g['gym_id']] = g
gym_ids.append(g['gym_id'])
if len(gym_ids) > 0:
pokemon = (GymMember
.select(
GymMember.gym_id,
GymPokemon.cp.alias('pokemon_cp'),
GymPokemon.pokemon_id,
Trainer.name.alias('trainer_name'),
Trainer.level.alias('trainer_level'))
.join(Gym, on=(GymMember.gym_id == Gym.gym_id))
.join(GymPokemon, on=(GymMember.pokemon_uid == GymPokemon.pokemon_uid))
.join(Trainer, on=(GymPokemon.trainer_name == Trainer.name))
.where(GymMember.gym_id << gym_ids)
.where(GymMember.last_scanned > Gym.last_modified)
.order_by(GymMember.gym_id, GymPokemon.cp)
.dicts())
for p in pokemon:
p['pokemon_name'] = get_pokemon_name(p['pokemon_id'])
gyms[p['gym_id']]['pokemon'].append(p)
details = (GymDetails
.select(
GymDetails.gym_id,
GymDetails.name)
.where(GymDetails.gym_id << gym_ids)
.dicts())
for d in details:
gyms[d['gym_id']]['name'] = d['name']
# Re-enable the GC.
gc.enable()
return gyms
class ScannedLocation(BaseModel):
latitude = DoubleField()
longitude = DoubleField()
last_modified = DateTimeField(index=True)
class Meta:
primary_key = CompositeKey('latitude', 'longitude')
@staticmethod
def get_recent(swLat, swLng, neLat, neLng):
query = (ScannedLocation
.select()
.where((ScannedLocation.last_modified >=
(datetime.utcnow() - timedelta(minutes=15))) &
(ScannedLocation.latitude >= swLat) &
(ScannedLocation.longitude >= swLng) &
(ScannedLocation.latitude <= neLat) &
(ScannedLocation.longitude <= neLng))
.order_by(ScannedLocation.last_modified.asc())
.dicts())
return list(query)
class MainWorker(BaseModel):
worker_name = CharField(primary_key=True, max_length=50)
message = CharField()
method = CharField(max_length=50)
last_modified = DateTimeField(index=True)
class WorkerStatus(BaseModel):
username = CharField(primary_key=True, max_length=50)
worker_name = CharField()
success = IntegerField()
fail = IntegerField()
no_items = IntegerField()
skip = IntegerField()
last_modified = DateTimeField(index=True)
message = CharField(max_length=255)
@staticmethod
def get_recent():
query = (WorkerStatus
.select()
.where((WorkerStatus.last_modified >=
(datetime.utcnow() - timedelta(minutes=5))))
.order_by(WorkerStatus.username)
.dicts())
status = []
for s in query:
status.append(s)
return status
class Versions(flaskDb.Model):
key = CharField()
val = IntegerField()
class Meta:
primary_key = False
class GymMember(BaseModel):
gym_id = CharField(index=True)
pokemon_uid = CharField()
last_scanned = DateTimeField(default=datetime.utcnow)
class Meta:
primary_key = False
class GymPokemon(BaseModel):
pokemon_uid = CharField(primary_key=True, max_length=50)
pokemon_id = IntegerField()
cp = IntegerField()
trainer_name = CharField()
num_upgrades = IntegerField(null=True)
move_1 = IntegerField(null=True)
move_2 = IntegerField(null=True)
height = FloatField(null=True)
weight = FloatField(null=True)
stamina = IntegerField(null=True)
stamina_max = IntegerField(null=True)
cp_multiplier = FloatField(null=True)
additional_cp_multiplier = FloatField(null=True)
iv_defense = IntegerField(null=True)
iv_stamina = IntegerField(null=True)
iv_attack = IntegerField(null=True)
last_seen = DateTimeField(default=datetime.utcnow)
class Trainer(BaseModel):
name = CharField(primary_key=True, max_length=50)
team = IntegerField()
level = IntegerField()
last_seen = DateTimeField(default=datetime.utcnow)
class GymDetails(BaseModel):
gym_id = CharField(primary_key=True, max_length=50)
name = CharField()
description = TextField(null=True, default="")
url = CharField()
last_scanned = DateTimeField(default=datetime.utcnow)
def hex_bounds(center, steps):
# Make a box that is (70m * step_limit * 2) + 70m away from the center point
# Rationale is that you need to travel
sp_dist = 0.07 * 2 * steps
n = get_new_coords(center, sp_dist, 0)[0]
e = get_new_coords(center, sp_dist, 90)[1]
s = get_new_coords(center, sp_dist, 180)[0]
w = get_new_coords(center, sp_dist, 270)[1]
return (n, e, s, w)
# todo: this probably shouldn't _really_ be in "models" anymore, but w/e
def parse_map(args, map_dict, step_location, db_update_queue, wh_update_queue):
pokemons = {}
pokestops = {}
gyms = {}
cells = map_dict['responses']['GET_MAP_OBJECTS']['map_cells']
for cell in cells:
if config['parse_pokemon']:
for p in cell.get('wild_pokemons', []):
# time_till_hidden_ms was overflowing causing a negative integer.
# It was also returning a value above 3.6M ms.
if 0 < p['time_till_hidden_ms'] < 3600000:
d_t = datetime.utcfromtimestamp(
(p['last_modified_timestamp_ms'] +
p['time_till_hidden_ms']) / 1000.0)
else:
# Set a value of 15 minutes because currently its unknown but larger than 15.
d_t = datetime.utcfromtimestamp((p['last_modified_timestamp_ms'] + 900000) / 1000.0)
printPokemon(p['pokemon_data']['pokemon_id'], p['latitude'],
p['longitude'], d_t)
pokemons[p['encounter_id']] = {
'encounter_id': b64encode(str(p['encounter_id'])),
'spawnpoint_id': p['spawn_point_id'],
'pokemon_id': p['pokemon_data']['pokemon_id'],
'latitude': p['latitude'],
'longitude': p['longitude'],
'disappear_time': d_t
}
if args.webhooks:
wh_update_queue.put(('pokemon', {
'encounter_id': b64encode(str(p['encounter_id'])),
'spawnpoint_id': p['spawn_point_id'],
'pokemon_id': p['pokemon_data']['pokemon_id'],
'latitude': p['latitude'],
'longitude': p['longitude'],
'disappear_time': calendar.timegm(d_t.timetuple()),
'last_modified_time': p['last_modified_timestamp_ms'],
'time_until_hidden_ms': p['time_till_hidden_ms']
}))
for f in cell.get('forts', []):
if config['parse_pokestops'] and f.get('type') == 1: # Pokestops
if 'active_fort_modifier' in f:
lure_expiration = datetime.utcfromtimestamp(
f['last_modified_timestamp_ms'] / 1000.0) + timedelta(minutes=30)
active_fort_modifier = f['active_fort_modifier']
if args.webhooks and args.webhook_updates_only:
wh_update_queue.put(('pokestop', {
'pokestop_id': b64encode(str(f['id'])),
'enabled': f['enabled'],
'latitude': f['latitude'],
'longitude': f['longitude'],
'last_modified_time': f['last_modified_timestamp_ms'],
'lure_expiration': calendar.timegm(lure_expiration.timetuple()),
'active_fort_modifier': active_fort_modifier
}))
else:
lure_expiration, active_fort_modifier = None, None
pokestops[f['id']] = {
'pokestop_id': f['id'],
'enabled': f['enabled'],
'latitude': f['latitude'],
'longitude': f['longitude'],
'last_modified': datetime.utcfromtimestamp(
f['last_modified_timestamp_ms'] / 1000.0),
'lure_expiration': lure_expiration,
'active_fort_modifier': active_fort_modifier
}
# Send all pokéstops to webhooks
if args.webhooks and not args.webhook_updates_only:
# Explicitly set 'webhook_data', in case we want to change the information pushed to webhooks,
# similar to above and previous commits.
l_e = None
if lure_expiration is not None:
l_e = calendar.timegm(lure_expiration.timetuple())
wh_update_queue.put(('pokestop', {
'pokestop_id': b64encode(str(f['id'])),
'enabled': f['enabled'],
'latitude': f['latitude'],
'longitude': f['longitude'],
'last_modified': calendar.timegm(pokestops[f['id']]['last_modified'].timetuple()),
'lure_expiration': l_e,
'active_fort_modifier': active_fort_modifier
}))
elif config['parse_gyms'] and f.get('type') is None: # Currently, there are only stops and gyms
gyms[f['id']] = {
'gym_id': f['id'],
'team_id': f.get('owned_by_team', 0),
'guard_pokemon_id': f.get('guard_pokemon_id', 0),
'gym_points': f.get('gym_points', 0),
'enabled': f['enabled'],
'latitude': f['latitude'],
'longitude': f['longitude'],
'last_modified': datetime.utcfromtimestamp(
f['last_modified_timestamp_ms'] / 1000.0),
}
# Send gyms to webhooks
if args.webhooks and not args.webhook_updates_only:
# Explicitly set 'webhook_data', in case we want to change the information pushed to webhooks,
# similar to above and previous commits.
wh_update_queue.put(('gym', {
'gym_id': b64encode(str(f['id'])),
'team_id': f.get('owned_by_team', 0),
'guard_pokemon_id': f.get('guard_pokemon_id', 0),
'gym_points': f.get('gym_points', 0),
'enabled': f['enabled'],
'latitude': f['latitude'],
'longitude': f['longitude'],
'last_modified': calendar.timegm(gyms[f['id']]['last_modified'].timetuple())
}))
if len(pokemons):
db_update_queue.put((Pokemon, pokemons))
if len(pokestops):
db_update_queue.put((Pokestop, pokestops))
if len(gyms):
db_update_queue.put((Gym, gyms))
log.info('Parsing found %d pokemons, %d pokestops, and %d gyms',
len(pokemons),
len(pokestops),
len(gyms))
db_update_queue.put((ScannedLocation, {0: {
'latitude': step_location[0],
'longitude': step_location[1],
'last_modified': datetime.utcnow()
}}))
return {
'count': len(pokemons) + len(pokestops) + len(gyms),
'gyms': gyms,
}
def parse_gyms(args, gym_responses, wh_update_queue):
gym_details = {}
gym_members = {}
gym_pokemon = {}
trainers = {}
i = 0
for g in gym_responses.values():
gym_state = g['gym_state']
gym_id = gym_state['fort_data']['id']
gym_details[gym_id] = {
'gym_id': gym_id,
'name': g['name'],
'description': g.get('description'),
'url': g['urls'][0],
}
if args.webhooks:
webhook_data = {
'id': gym_id,
'latitude': gym_state['fort_data']['latitude'],
'longitude': gym_state['fort_data']['longitude'],
'team': gym_state['fort_data'].get('owned_by_team', 0),
'name': g['name'],
'description': g.get('description'),
'url': g['urls'][0],
'pokemon': [],
}
for member in gym_state.get('memberships', []):
gym_members[i] = {
'gym_id': gym_id,
'pokemon_uid': member['pokemon_data']['id'],
}
gym_pokemon[i] = {
'pokemon_uid': member['pokemon_data']['id'],
'pokemon_id': member['pokemon_data']['pokemon_id'],
'cp': member['pokemon_data']['cp'],
'trainer_name': member['trainer_public_profile']['name'],
'num_upgrades': member['pokemon_data'].get('num_upgrades', 0),
'move_1': member['pokemon_data'].get('move_1'),
'move_2': member['pokemon_data'].get('move_2'),
'height': member['pokemon_data'].get('height_m'),
'weight': member['pokemon_data'].get('weight_kg'),
'stamina': member['pokemon_data'].get('stamina'),
'stamina_max': member['pokemon_data'].get('stamina_max'),
'cp_multiplier': member['pokemon_data'].get('cp_multiplier'),
'additional_cp_multiplier': member['pokemon_data'].get('additional_cp_multiplier', 0),
'iv_defense': member['pokemon_data'].get('individual_defense', 0),
'iv_stamina': member['pokemon_data'].get('individual_stamina', 0),
'iv_attack': member['pokemon_data'].get('individual_attack', 0),
'last_seen': datetime.utcnow(),
}
trainers[i] = {
'name': member['trainer_public_profile']['name'],
'team': gym_state['fort_data']['owned_by_team'],
'level': member['trainer_public_profile']['level'],
'last_seen': datetime.utcnow(),
}
if args.webhooks:
webhook_data['pokemon'].append({
'pokemon_uid': member['pokemon_data']['id'],
'pokemon_id': member['pokemon_data']['pokemon_id'],
'cp': member['pokemon_data']['cp'],
'num_upgrades': member['pokemon_data'].get('num_upgrades', 0),
'move_1': member['pokemon_data'].get('move_1'),
'move_2': member['pokemon_data'].get('move_2'),
'height': member['pokemon_data'].get('height_m'),
'weight': member['pokemon_data'].get('weight_kg'),
'stamina': member['pokemon_data'].get('stamina'),
'stamina_max': member['pokemon_data'].get('stamina_max'),
'cp_multiplier': member['pokemon_data'].get('cp_multiplier'),
'additional_cp_multiplier': member['pokemon_data'].get('additional_cp_multiplier', 0),
'iv_defense': member['pokemon_data'].get('individual_defense', 0),
'iv_stamina': member['pokemon_data'].get('individual_stamina', 0),
'iv_attack': member['pokemon_data'].get('individual_attack', 0),
'trainer_name': member['trainer_public_profile']['name'],
'trainer_level': member['trainer_public_profile']['level'],
})
i += 1
if args.webhooks:
wh_update_queue.put(('gym_details', webhook_data))
# All this database stuff is synchronous (not using the upsert queue) on purpose.
# Since the search workers load the GymDetails model from the database to determine if a gym
# needs rescanned, we need to be sure the GymDetails get fully committed to the database before moving on.
#
# We _could_ synchronously upsert GymDetails, then queue the other tables for
# upsert, but that would put that Gym's overall information in a weird non-atomic state.
# upsert all the models
if len(gym_details):
bulk_upsert(GymDetails, gym_details)
if len(gym_pokemon):
bulk_upsert(GymPokemon, gym_pokemon)
if len(trainers):
bulk_upsert(Trainer, trainers)
# This needs to be completed in a transaction, because we don't wany any other thread or process
# to mess with the GymMembers for the gyms we're updating while we're updating the bridge table.
with flaskDb.database.transaction():
# get rid of all the gym members, we're going to insert new records
if len(gym_details):
DeleteQuery(GymMember).where(GymMember.gym_id << gym_details.keys()).execute()
# insert new gym members
if len(gym_members):
bulk_upsert(GymMember, gym_members)
log.info('Upserted %d gyms and %d gym members',
len(gym_details),
len(gym_members))
def db_updater(args, q):
# The forever loop
while True:
try:
while True:
try:
flaskDb.connect_db()
break
except Exception as e:
log.warning('%s... Retrying', e)
# Loop the queue
while True:
model, data = q.get()
bulk_upsert(model, data)
q.task_done()
log.debug('Upserted to %s, %d records (upsert queue remaining: %d)',
model.__name__,
len(data),
q.qsize())
if q.qsize() > 50:
log.warning("DB queue is > 50 (@%d); try increasing --db-threads", q.qsize())
except Exception as e:
log.exception('Exception in db_updater: %s', e)
def clean_db_loop(args):
while True:
try:
# Clean out old scanned locations
query = (ScannedLocation
.delete()
.where((ScannedLocation.last_modified <
(datetime.utcnow() - timedelta(minutes=30)))))
query.execute()
query = (MainWorker
.delete()
.where((ScannedLocation.last_modified <
(datetime.utcnow() - timedelta(minutes=30)))))
query.execute()
query = (WorkerStatus
.delete()
.where((ScannedLocation.last_modified <
(datetime.utcnow() - timedelta(minutes=30)))))
query.execute()
# Remove active modifier from expired lured pokestops
query = (Pokestop
.update(lure_expiration=None)
.where(Pokestop.lure_expiration < datetime.utcnow()))
query.execute()
# If desired, clear old pokemon spawns
if args.purge_data > 0:
query = (Pokemon
.delete()
.where((Pokemon.disappear_time <
(datetime.utcnow() - timedelta(hours=args.purge_data)))))
log.info('Regular database cleaning complete')
time.sleep(60)
except Exception as e:
log.exception('Exception in clean_db_loop: %s', e)
def bulk_upsert(cls, data):
num_rows = len(data.values())
i = 0
if args.db_type == 'mysql':
step = 120
else:
# SQLite has a default max number of parameters of 999,
# so we need to limit how many rows we insert for it.
step = 50
while i < num_rows:
log.debug('Inserting items %d to %d', i, min(i + step, num_rows))
try:
InsertQuery(cls, rows=data.values()[i:min(i + step, num_rows)]).upsert().execute()
except Exception as e:
log.warning('%s... Retrying', e)
continue
i += step
def create_tables(db):
db.connect()
verify_database_schema(db)
db.create_tables([Pokemon, Pokestop, Gym, ScannedLocation, GymDetails, GymMember, GymPokemon, Trainer, MainWorker, WorkerStatus], safe=True)
db.close()
def drop_tables(db):
db.connect()
db.drop_tables([Pokemon, Pokestop, Gym, ScannedLocation, Versions, GymDetails, GymMember, GymPokemon, Trainer, MainWorker, WorkerStatus, Versions], safe=True)
db.close()
def verify_database_schema(db):
if not Versions.table_exists():
db.create_tables([Versions])
if ScannedLocation.table_exists():
# Versions table didn't exist, but there were tables. This must mean the user
# is coming from a database that existed before we started tracking the schema
# version. Perform a full upgrade.
InsertQuery(Versions, {Versions.key: 'schema_version', Versions.val: 0}).execute()
database_migrate(db, 0)
else:
InsertQuery(Versions, {Versions.key: 'schema_version', Versions.val: db_schema_version}).execute()
else:
db_ver = Versions.get(Versions.key == 'schema_version').val
if db_ver < db_schema_version:
database_migrate(db, db_ver)
elif db_ver > db_schema_version:
log.error("Your database version (%i) appears to be newer than the code supports (%i).",
db_ver, db_schema_version)
log.error("Please upgrade your code base or drop all tables in your database.")
sys.exit(1)
def database_migrate(db, old_ver):
# Update database schema version
Versions.update(val=db_schema_version).where(Versions.key == 'schema_version').execute()
log.info("Detected database version %i, updating to %i", old_ver, db_schema_version)
# Perform migrations here
migrator = None
if args.db_type == 'mysql':
migrator = MySQLMigrator(db)
else:
migrator = SqliteMigrator(db)
# No longer necessary, we're doing this at schema 4 as well
# if old_ver < 1:
# db.drop_tables([ScannedLocation])
if old_ver < 2:
migrate(migrator.add_column('pokestop', 'encounter_id', CharField(max_length=50, null=True)))
if old_ver < 3:
migrate(
migrator.add_column('pokestop', 'active_fort_modifier', CharField(max_length=50, null=True)),
migrator.drop_column('pokestop', 'encounter_id'),
migrator.drop_column('pokestop', 'active_pokemon_id')
)
if old_ver < 4:
db.drop_tables([ScannedLocation])
if old_ver < 5:
# Some pokemon were added before the 595 bug was "fixed"
# Clean those up for a better UX
query = (Pokemon
.delete()
.where(Pokemon.disappear_time >
(datetime.utcnow() - timedelta(hours=24))))
query.execute()
if old_ver < 6:
migrate(
migrator.add_column('gym', 'last_scanned', DateTimeField(null=True)),
)
if old_ver < 7:
migrate(
migrator.drop_column('gymdetails', 'description'),
migrator.add_column('gymdetails', 'description', TextField(null=True, default=""))
)
|
King bed and queen sofa sleeper in the same room, full bath with jetted tub and partial kitchen*.
King bed, queen sofa sleeper, full bath with jetted tub and partial kitchen*.
*Kitchen: All units are equipped with a partial kitchen which includes a fridge, microwave, dishwasher, blender, toaster and coffee pot.
Studios are equipped with a 2 burner stove and NO OVEN.
One bedrooms are equipped with a 4 burner stove and NO OVEN.
The Villas at Poco Diablo feature amenities to suit everyone, including a seasonal outdoor swimming pool, outdoor hot tub and creek side patio with BBQ grills. We also are next door to a restaurant, golf course & fitness center.
|
# -*- coding: utf-8 -*-
"""
Default Controllers
"""
module = "default"
# -----------------------------------------------------------------------------
def call():
"Call an XMLRPC, JSONRPC or RSS service"
# If webservices don't use sessions, avoid cluttering up the storage
#session.forget()
return service()
# -----------------------------------------------------------------------------
def download():
""" Download a file """
# Load the Model
tablename = request.args[0].split(".", 1)[0]
s3mgr.load(tablename)
return response.download(request, db)
# =============================================================================
def register_validation(form):
""" Validate the fields in registration form """
# Mobile Phone
if "mobile" in form.vars and form.vars.mobile:
regex = re.compile(single_phone_number_pattern)
if not regex.match(form.vars.mobile):
form.errors.mobile = T("Invalid phone number")
elif deployment_settings.get_auth_registration_mobile_phone_mandatory():
form.errors.mobile = T("Phone number is required")
org = deployment_settings.get_auth_registration_organisation_id_default()
if org:
# Add to default organisation
form.vars.organisation_id = org
return
# -----------------------------------------------------------------------------
def register_onaccept(form):
""" Tasks to be performed after a new user registers """
# Add newly-registered users to Person Registry, add 'Authenticated' role
# If Organisation is provided, then: add HRM record & add to 'Org_X_Access' role
person_id = auth.s3_register(form)
if form.vars.organisation_id and not deployment_settings.get_hrm_show_staff():
# Convert HRM record to a volunteer
htable = s3db.hrm_human_resource
query = (htable.person_id == person_id)
db(query).update(type=2)
# Add to required roles:
roles = deployment_settings.get_auth_registration_roles()
if roles or deployment_settings.has_module("delphi"):
utable = auth.settings.table_user
ptable = s3db.pr_person
ltable = s3db.pr_person_user
query = (ptable.id == person_id) & \
(ptable.pe_id == ltable.pe_id) & \
(ltable.user_id == utable.id)
user = db(query).select(utable.id,
ltable.user_id,
limitby=(0, 1)).first()
if roles:
gtable = auth.settings.table_group
mtable = auth.settings.table_membership
query = (gtable.uuid.belongs(roles))
rows = db(query).select(gtable.id)
for role in rows:
mtable.insert(user_id=user[ltable._tablename].user_id,
group_id=role.id)
if deployment_settings.has_module("delphi"):
# Add user as a participant of the default problem group
table = s3db.delphi_group
query = (table.uuid == "DEFAULT")
group = db(query).select(table.id,
limitby=(0, 1)).first()
if group:
table = s3db.delphi_membership
table.insert(group_id=group.id,
user_id=user[utable._tablename].id,
status=3)
# -----------------------------------------------------------------------------
auth.settings.register_onvalidation = register_validation
auth.settings.register_onaccept = register_onaccept
_table_user = auth.settings.table_user
_table_user.first_name.label = T("First Name")
_table_user.first_name.comment = SPAN("*", _class="req")
_table_user.last_name.label = T("Last Name")
if deployment_settings.get_L10n_mandatory_lastname():
_table_user.last_name.comment = SPAN("*", _class="req")
_table_user.email.label = T("E-mail")
_table_user.email.comment = SPAN("*", _class="req")
_table_user.password.comment = SPAN("*", _class="req")
_table_user.language.label = T("Language")
_table_user.language.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Language"),
T("The language you wish the site to be displayed in.")))
_table_user.language.represent = lambda opt: s3_languages.get(opt, UNKNOWN_OPT)
# Organisation widget for use in Registration Screen
# NB User Profile is only editable by Admin - using User Management
organisation_represent = s3db.org_organisation_represent
org_widget = IS_ONE_OF(db, "org_organisation.id",
organisation_represent,
orderby="org_organisation.name",
sort=True)
if deployment_settings.get_auth_registration_organisation_mandatory():
_table_user.organisation_id.requires = org_widget
else:
_table_user.organisation_id.requires = IS_NULL_OR(org_widget)
# For the User Profile:
_table_user.utc_offset.comment = DIV(_class="tooltip",
_title="%s|%s" % (auth.messages.label_utc_offset,
auth.messages.help_utc_offset))
_table_user.organisation_id.represent = organisation_represent
_table_user.organisation_id.comment = DIV(_class="tooltip",
_title="%s|%s|%s" % (T("Organization"),
T("The default Organization for whom you are acting."),
T("This setting can only be controlled by the Administrator.")))
org_site_represent = s3db.org_site_represent
_table_user.site_id.represent = org_site_represent
_table_user.site_id.comment = DIV(_class="tooltip",
_title="%s|%s|%s" % (T("Facility"),
T("The default Facility for which you are acting."),
T("This setting can only be controlled by the Administrator.")))
# =============================================================================
def index():
""" Main Home Page """
title = deployment_settings.get_system_name()
response.title = title
item = ""
if deployment_settings.has_module("cms"):
table = s3db.cms_post
item = db(table.module == module).select(table.body,
limitby=(0, 1)).first()
if item:
item = DIV(XML(item.body))
else:
item = ""
if deployment_settings.has_module("cr"):
s3mgr.load("cr_shelter")
SHELTERS = s3.crud_strings["cr_shelter"].subtitle_list
else:
SHELTERS = ""
# Menu Boxes
menu_btns = [#div, label, app, function
["facility", SHELTERS, "cr", "shelter"],
["facility", T("Warehouses"), "inv", "warehouse"],
["facility", T("Hospitals"), "hms", "hospital"],
["facility", T("Offices"), "org", "office"],
["sit", T("Incidents"), "irs", "ireport"],
["sit", T("Assessments"), "survey", "series"],
["sit", T("Assets"), "asset", "asset"],
["sit", T("Inventory Items"), "inv", "inv_item"],
#["dec", T("Gap Map"), "project", "gap_map"],
#["dec", T("Gap Report"), "project", "gap_report"],
["dec", T("Requests"), "req", "req"],
["res", T("Projects"), "project", "project"],
["res", T("Activities"), "project", "activity"],
["res", T("Commitments"), "req", "commit"],
["res", T("Sent Shipments"), "inv", "send"],
["res", T("Received Shipments"), "inv", "recv"]
]
# Change to (Mitigation)/Preparedness/Response/Recovery?
menu_divs = {"facility": DIV( H3(T("Facilities")),
_id = "facility_box", _class = "menu_box"),
"sit": DIV( H3(T("Situation")),
_id = "menu_div_sit", _class = "menu_div"),
"dec": DIV( H3(T("Decision")),
_id = "menu_div_dec", _class = "menu_div"),
"res": DIV( H3(T("Response")),
_id = "menu_div_res", _class = "menu_div"),
}
for div, label, app, function in menu_btns:
if deployment_settings.has_module(app):
# @ToDo: Also check permissions (e.g. for anonymous users)
menu_divs[div].append(A( DIV(label,
_class = "menu-btn-r"),
_class = "menu-btn-l",
_href = URL(app,function)
)
)
div_arrow = DIV(IMG(_src = "/%s/static/img/arrow_blue_right.png" % \
request.application),
_class = "div_arrow")
sit_dec_res_box = DIV(menu_divs["sit"],
div_arrow,
menu_divs["dec"],
div_arrow,
menu_divs["res"],
_id = "sit_dec_res_box",
_class = "menu_box fleft swidth"
#div_additional,
)
facility_box = menu_divs["facility"]
facility_box.append( A( IMG(_src = "/%s/static/img/map_icon_128.png" % \
request.application),
_href = URL(c="gis", f="index"),
_title = T("Map")
)
)
datatable_ajax_source = ""
# Check logged in AND permissions
if AUTHENTICATED in session.s3.roles and \
auth.s3_has_permission("read", db.org_organisation):
org_items = organisation()
datatable_ajax_source = "/%s/default/organisation.aaData" % \
request.application
response.s3.actions = None
response.view = "default/index.html"
auth.permission.controller = "org"
auth.permission.function = "site"
permitted_facilities = auth.permission.permitted_facilities(redirect_on_error=False)
manage_facility_box = ""
if permitted_facilities:
facility_list = s3_represent_facilities(db, permitted_facilities,
link=False)
facility_opts = [OPTION(opt[1], _value = opt[0])
for opt in facility_list]
if facility_list:
manage_facility_box = DIV(H3(T("Manage Your Facilities")),
SELECT(_id = "manage_facility_select",
_style = "max-width:400px;",
*facility_opts
),
A(T("Go"),
_href = URL(c="default", f="site",
args=[facility_list[0][0]]),
#_disabled = "disabled",
_id = "manage_facility_btn",
_class = "action-btn"
),
_id = "manage_facility_box",
_class = "menu_box fleft")
response.s3.jquery_ready.append( """
$('#manage_facility_select').change(function() {
$('#manage_facility_btn').attr('href', S3.Ap.concat('/default/site/', $('#manage_facility_select').val()));
})""" )
else:
manage_facility_box = DIV()
org_box = DIV( H3(T("Organizations")),
A(T("Add Organization"),
_href = URL(c="org", f="organisation",
args=["create"]),
_id = "add-btn",
_class = "action-btn",
_style = "margin-right: 10px;"),
org_items["items"],
_id = "org_box",
_class = "menu_box fleft"
)
else:
manage_facility_box = ""
org_box = ""
# @ToDo: Replace this with an easily-customisable section on the homepage
#settings = db(db.s3_setting.id == 1).select(limitby=(0, 1)).first()
#if settings:
# admin_name = settings.admin_name
# admin_email = settings.admin_email
# admin_tel = settings.admin_tel
#else:
# # db empty and prepopulate is false
# admin_name = T("Sahana Administrator").xml(),
# admin_email = "support@Not Set",
# admin_tel = T("Not Set").xml(),
# Login/Registration forms
self_registration = deployment_settings.get_security_self_registration()
registered = False
login_form = None
login_div = None
register_form = None
register_div = None
if AUTHENTICATED not in session.s3.roles:
# This user isn't yet logged-in
if request.cookies.has_key("registered"):
# This browser has logged-in before
registered = True
if self_registration:
# Provide a Registration box on front page
request.args = ["register"]
if deployment_settings.get_terms_of_service():
auth.messages.submit_button = T("I accept. Create my account.")
else:
auth.messages.submit_button = T("Register")
register_form = auth()
register_div = DIV(H3(T("Register")),
P(XML(T("If you would like to help, then please %(sign_up_now)s") % \
dict(sign_up_now=B(T("sign-up now"))))))
# Add client-side validation
s3_register_validation()
if session.s3.debug:
response.s3.scripts.append( "%s/jquery.validate.js" % s3_script_dir )
else:
response.s3.scripts.append( "%s/jquery.validate.min.js" % s3_script_dir )
if request.env.request_method == "POST":
post_script = """// Unhide register form
$('#register_form').removeClass('hide');
// Hide login form
$('#login_form').addClass('hide');"""
else:
post_script = ""
register_script = """
// Change register/login links to avoid page reload, make back button work.
$('#register-btn').attr('href', '#register');
$('#login-btn').attr('href', '#login');
%s
// Redirect Register Button to unhide
$('#register-btn').click(function() {
// Unhide register form
$('#register_form').removeClass('hide');
// Hide login form
$('#login_form').addClass('hide');
});
// Redirect Login Button to unhide
$('#login-btn').click(function() {
// Hide register form
$('#register_form').addClass('hide');
// Unhide login form
$('#login_form').removeClass('hide');
});""" % post_script
response.s3.jquery_ready.append(register_script)
# Provide a login box on front page
request.args = ["login"]
auth.messages.submit_button = T("Login")
login_form = auth()
login_div = DIV(H3(T("Login")),
P(XML(T("Registered users can %(login)s to access the system" % \
dict(login=B(T("login")))))))
if deployment_settings.frontpage.rss:
response.s3.external_stylesheets.append( "http://www.google.com/uds/solutions/dynamicfeed/gfdynamicfeedcontrol.css" )
response.s3.scripts.append( "http://www.google.com/jsapi?key=notsupplied-wizard" )
response.s3.scripts.append( "http://www.google.com/uds/solutions/dynamicfeed/gfdynamicfeedcontrol.js" )
counter = 0
feeds = ""
for feed in deployment_settings.frontpage.rss:
counter += 1
feeds = "".join((feeds,
"{title: '%s',\n" % feed["title"],
"url: '%s'}" % feed["url"]))
# Don't add a trailing comma for old IEs
if counter != len(deployment_settings.frontpage.rss):
feeds += ",\n"
feed_control = "".join(("""
function LoadDynamicFeedControl() {
var feeds = [
""", feeds, """
];
var options = {
// milliseconds before feed is reloaded (5 minutes)
feedCycleTime : 300000,
numResults : 5,
stacked : true,
horizontal : false,
title : '""", str(T("News")), """'
};
new GFdynamicFeedControl(feeds, 'feed-control', options);
}
// Load the feeds API and set the onload callback.
google.load('feeds', '1');
google.setOnLoadCallback(LoadDynamicFeedControl);"""))
response.s3.js_global.append( feed_control )
return dict(title = title,
item = item,
sit_dec_res_box = sit_dec_res_box,
facility_box = facility_box,
manage_facility_box = manage_facility_box,
org_box = org_box,
r = None, # Required for dataTable to work
datatable_ajax_source = datatable_ajax_source,
#admin_name=admin_name,
#admin_email=admin_email,
#admin_tel=admin_tel,
self_registration=self_registration,
registered=registered,
login_form=login_form,
login_div=login_div,
register_form=register_form,
register_div=register_div
)
# -----------------------------------------------------------------------------
def organisation():
"""
Function to handle pagination for the org list on the homepage
"""
table = db.org_organisation
table.id.label = T("Organization")
table.id.represent = organisation_represent
response.s3.dataTable_sPaginationType = "two_button"
response.s3.dataTable_sDom = "rtip" #"frtip" - filter broken
response.s3.dataTable_iDisplayLength = 25
s3mgr.configure("org_organisation",
listadd = False,
addbtn = True,
super_entity = db.pr_pentity,
linkto = "/%s/org/organisation/%s" % (request.application,
"%s"),
list_fields = ["id",])
return s3_rest_controller("org", "organisation")
# -----------------------------------------------------------------------------
def site():
"""
@todo: Avoid redirect
"""
s3mgr.load("org_site")
if len(request.args):
site_id = request.args[0]
site_r = db.org_site[site_id]
tablename = site_r.instance_type
table = s3db.table(tablename)
if table:
query = (table.site_id == site_id)
id = db(query).select(db[tablename].id,
limitby = (0, 1)).first().id
cf = tablename.split("_", 1)
redirect(URL(c = cf[0],
f = cf[1],
args = [id]))
raise HTTP(404)
# -----------------------------------------------------------------------------
def message():
#if "verify_email_sent" in request.args:
title = T("Account Registered - Please Check Your Email")
message = T( "%(system_name)s has sent an email to %(email)s to verify your email address.\nPlease check your email to verify this address. If you do not receive this email please check you junk email or spam filters." )\
% {"system_name": deployment_settings.get_system_name(),
"email": request.vars.email}
image = "email_icon.png"
return dict(title = title,
message = message,
image_src = "/%s/static/img/%s" % (request.application, image)
)
# -----------------------------------------------------------------------------
def rapid():
""" Set/remove rapid data entry flag """
val = request.vars.get("val", True)
if val == "0":
val = False
else:
val = True
session.s3.rapid_data_entry = val
response.view = "xml.html"
return dict(item=str(session.s3.rapid_data_entry))
# -----------------------------------------------------------------------------
def user_profile_onaccept(form):
""" Update the UI locale from user profile """
if form.vars.language:
session.s3.language = form.vars.language
return
# -----------------------------------------------------------------------------
def user():
""" Auth functions based on arg. See gluon/tools.py """
auth.settings.on_failed_authorization = URL(f="error")
_table_user = auth.settings.table_user
if request.args and request.args(0) == "profile":
#_table_user.organisation.writable = False
_table_user.utc_offset.readable = True
_table_user.utc_offset.writable = True
# If we have an opt_in and some post_vars then update the opt_in value
if deployment_settings.get_auth_opt_in_to_email() and request.post_vars:
opt_list = deployment_settings.get_auth_opt_in_team_list()
removed = []
selected = []
for opt_in in opt_list:
if opt_in in request.post_vars:
selected.append(opt_in)
else:
removed.append(opt_in)
ptable = s3db.pr_person
putable = s3db.pr_person_user
query = (putable.user_id == request.post_vars.id) & \
(putable.pe_id == ptable.pe_id)
person_id = db(query).select(ptable.id, limitby=(0, 1)).first().id
db(ptable.id == person_id).update(opt_in = selected)
g_table = s3db["pr_group"]
gm_table = s3db["pr_group_membership"]
# Remove them from any team they are a member of in the removed list
for team in removed:
query = (g_table.name == team) & \
(gm_table.group_id == g_table.id) & \
(gm_table.person_id == person_id)
gm_rec = db(query).select(g_table.id, limitby=(0, 1)).first()
if gm_rec:
db(gm_table.id == gm_rec.id).delete()
# Add them to the team (if they are not already a team member)
for team in selected:
query = (g_table.name == team) & \
(gm_table.group_id == g_table.id) & \
(gm_table.person_id == person_id)
gm_rec = db(query).select(g_table.id, limitby=(0, 1)).first()
if not gm_rec:
query = (g_table.name == team)
team_rec = db(query).select(g_table.id, limitby=(0, 1)).first()
# if the team doesn't exist then add it
if team_rec == None:
team_id = g_table.insert(name = team, group_type = 5)
else:
team_id = team_rec.id
gm_table.insert(group_id = team_id,
person_id = person_id)
auth.settings.profile_onaccept = user_profile_onaccept
self_registration = deployment_settings.get_security_self_registration()
login_form = register_form = None
if request.args and request.args(0) == "login":
auth.messages.submit_button = T("Login")
form = auth()
login_form = form
if s3.crud.submit_style:
form[0][-1][1][0]["_class"] = s3.crud.submit_style
elif request.args and request.args(0) == "register":
if not self_registration:
session.error = T("Registration not permitted")
redirect(URL(f="index"))
if deployment_settings.get_terms_of_service():
auth.messages.submit_button = T("I accept. Create my account.")
else:
auth.messages.submit_button = T("Register")
# Default the profile language to the one currently active
_table_user.language.default = T.accepted_language
form = auth()
register_form = form
# Add client-side validation
s3_register_validation()
elif request.args and request.args(0) == "change_password":
form = auth()
elif request.args and request.args(0) == "profile":
if deployment_settings.get_auth_openid():
form = DIV(form, openid_login_form.list_user_openids())
else:
form = auth()
# add an opt in clause to receive emails depending on the deployment settings
if deployment_settings.get_auth_opt_in_to_email():
ptable = s3db.pr_person
ltable = s3db.pr_person_user
opt_list = deployment_settings.get_auth_opt_in_team_list()
query = (ltable.user_id == form.record.id) & \
(ltable.pe_id == ptable.pe_id)
db_opt_in_list = db(query).select(ptable.opt_in, limitby=(0, 1)).first().opt_in
for opt_in in opt_list:
field_id = "%s_opt_in_%s" % (_table_user, opt_list)
if opt_in in db_opt_in_list:
checked = "selected"
else:
checked = None
form[0].insert(-1,
TR(TD(LABEL("Receive %s updates:" % opt_in,
_for="opt_in",
_id=field_id + SQLFORM.ID_LABEL_SUFFIX),
_class="w2p_fl"),
INPUT(_name=opt_in, _id=field_id, _type="checkbox", _checked=checked),
_id=field_id + SQLFORM.ID_ROW_SUFFIX))
else:
# Retrieve Password
form = auth()
# Use Custom Ext views
# Best to not use an Ext form for login: can't save username/password in browser & can't hit 'Enter' to submit!
#if request.args(0) == "login":
# response.title = T("Login")
# response.view = "auth/login.html"
return dict(form=form,
login_form=login_form,
register_form=register_form,
self_registration=self_registration)
# -----------------------------------------------------------------------------
def facebook():
""" Login using Facebook """
if not auth.settings.facebook:
redirect(URL(f="user", args=request.args, vars=request.vars))
auth.settings.login_form = s3base.FaceBookAccount()
form = auth()
return dict(form=form)
# -----------------------------------------------------------------------------
def google():
""" Login using Google """
if not auth.settings.google:
redirect(URL(f="user", args=request.args, vars=request.vars))
auth.settings.login_form = s3base.GooglePlusAccount()
form = auth()
return dict(form=form)
# -----------------------------------------------------------------------------
def source():
""" RESTful CRUD controller """
return s3_rest_controller("s3", "source")
# -----------------------------------------------------------------------------
# About Sahana
def apath(path=""):
""" Application path """
import os
from gluon.fileutils import up
opath = up(request.folder)
#TODO: This path manipulation is very OS specific.
while path[:3] == "../": opath, path=up(opath), path[3:]
return os.path.join(opath,path).replace("\\", "/")
def about():
"""
The About page provides details on the software dependencies and
versions available to this instance of Sahana Eden.
@ToDo: Avoid relying on Command Line tools which may not be in path
- pull back info from Python modules instead?
"""
import sys
import subprocess
import string
python_version = sys.version
web2py_version = open(apath("../VERSION"), "r").read()[8:]
sahana_version = open(os.path.join(request.folder, "VERSION"), "r").read()
# Database
sqlite_version = None
mysql_version = None
mysqldb_version = None
pgsql_version = None
psycopg_version = None
if db_string[0].find("sqlite") != -1:
try:
import sqlite3
#sqlite_version = (subprocess.Popen(["sqlite3", "-version"], stdout=subprocess.PIPE).communicate()[0]).rstrip()
sqlite_version = sqlite3.version
except:
sqlite_version = T("Unknown")
elif db_string[0].find("mysql") != -1:
try:
mysql_version = (subprocess.Popen(["mysql", "--version"], stdout=subprocess.PIPE).communicate()[0]).rstrip()[10:]
except:
mysql_version = T("Unknown")
try:
import MySQLdb
mysqldb_version = MySQLdb.__revision__
except:
mysqldb_version = T("Not installed or incorrectly configured.")
else:
# Postgres
try:
pgsql_reply = (subprocess.Popen(["psql", "--version"], stdout=subprocess.PIPE).communicate()[0])
pgsql_version = string.split(pgsql_reply)[2]
except:
pgsql_version = T("Unknown")
try:
import psycopg2
psycopg_version = psycopg2.__version__
except:
psycopg_version = T("Not installed or incorrectly configured.")
# Libraries
try:
import reportlab
reportlab_version = reportlab.Version
except:
reportlab_version = T("Not installed or incorrectly configured.")
try:
import xlwt
xlwt_version = xlwt.__VERSION__
except:
xlwt_version = T("Not installed or incorrectly configured.")
return dict(
python_version=python_version,
sahana_version=sahana_version,
web2py_version=web2py_version,
sqlite_version=sqlite_version,
mysql_version=mysql_version,
mysqldb_version=mysqldb_version,
pgsql_version=pgsql_version,
psycopg_version=psycopg_version,
reportlab_version=reportlab_version,
xlwt_version=xlwt_version
)
# -----------------------------------------------------------------------------
def help():
""" Custom View """
response.title = T("Help")
return dict()
# -----------------------------------------------------------------------------
def contact():
"""
Give the user options to contact the site admins.
Either:
An internal Support Requests database
or:
Custom View
"""
if auth.is_logged_in() and deployment_settings.has_module("support"):
# Provide an internal Support Requests ticketing system.
prefix = "support"
resourcename = "req"
tablename = "%s_%s" % (prefix, resourcename)
table = s3db[tablename]
# Pre-processor
def prep(r):
if r.interactive:
# Only Admins should be able to update ticket status
status = table.status
actions = table.actions
if not auth.s3_has_role(ADMIN):
status.writable = False
actions.writable = False
if r.method != "update":
status.readable = False
status.writable = False
actions.readable = False
actions.writable = False
return True
response.s3.prep = prep
output = s3_rest_controller(prefix, resourcename)
return output
else:
# Default: Simple Custom View
response.title = T("Contact us")
return dict()
# END =========================================================================
|
Scenario: You’ve created a new tool/language/framework and want to get it shared – either online, to the highest number of people possible, or in a live demo and workshop. You need people to start working with it and learning as fast as possible, and to spend as little time as possible getting it set up.
One of the biggest challenges when creating a new development tool is precisely getting it into the hands of users. Chances are your target audience will have to setup the necessary environment and dependencies.
Languages like Golang and NodeJS, and Ruby offer simplified installers, and in the case of Golang and Ruby, in-browser code testing sandboxes that allow for getting a taste of the languages and possibly following through a tutorial online.
But to get a real sense for the tool, they need it on their machines. So, you either sacrifice the flexibility of working locally, or the immediacy of starting to code ASAP. That is, unless you take the hassle out of setting up an environment all together – that’s where Sandbox comes in.
Let’s try a Ruby application. No, you don’t need to have Ruby installed, it will run inside a container. No you don’t need Docker installed, it comes with Sandbox, which includes everything you need to get going, right there in the Git repo. Automatically, no hassle.
And your app is running! Run ./sbox status to find out the IP Sandbox is running at, and your app will be at [Sandbox IP]:31001 ! This app will update as you change the code in /src, so feel free to experiment with it.
You can read more about how Sandbox does this here.
As a user, you don’t need to go through the hassle of installing a tool to know if it’s right for you – it just works. Also, as the workflow files are very clear and simple to read, they can get a sense of what needed to happen to make the application run, just by glancing at them.
As a developer, your tool can be that easy to share, and that easy to get running on someone else’s machine, with no issues and very little time spent. That means more time and user patience left to try out your creation, and a lower barrier of entry overall!
|
#!/usr/bin/python
# -*- coding: iso8859-1 -*-
## (c)2004 Timo Reunanen <parker _et_ wolfenstein _dit_ org>
import time
import re
_exact=r'''
^
(?P<hour> \d{1,2}) ## hour
[:.]
(?P<min> \d{2}) ## minutes
(?:
[:.]
(?P<sec>\d{2} ) ## secods (optional)
)?
$
'''
_add=r'''
^
[+]
(?: ## hour
(?P<hour> \d+)+h ## syntax: 1234h
)? ## optional
\s*
(?: ## minutes
(?P<min> \d+)+m ## syntax: 1234m
)? ## optional
\s*
(?: ## seconds
(?P<sec> \d+)+s? ## syntax: 1234s or 1234
)? ## optional
$
'''
exactRe=re.compile(_exact, re.VERBOSE | re.MULTILINE | re.I)
addRe=re.compile(_add, re.VERBOSE | re.MULTILINE | re.I)
class TimeException(Exception): pass
def convert(s):
s=s.strip()
m=exactRe.match(s)
if m:
tm=time.time()
year, mon, mday, hour, min, sec, wday, yday, isdst = time.localtime(tm)
hour=int(m.group('hour'))
min=int(m.group('min'))
sec=int(m.group('sec') or '00')
ret=time.mktime( (year, mon, mday, hour, min, sec, wday, yday, isdst) )
while ret < tm:
ret += 86400
return ret
m=addRe.match(s)
if m:
hour=int(m.group('hour') or '0')
min=int(m.group('min') or '0')
sec=int(m.group('sec') or '0')
addSecs=hour*3600 + min*60 + sec
return time.time()+addSecs
raise TimeException('Invalid syntax')
if __name__=='__main__':
year, mon, mday, hour, min, sec, wday, yday, isdst = time.localtime()
print (hour, min, sec)
print time.time()-time.mktime(time.localtime())
print convert('11.23')-time.time()
|
View Coupon Codes for About A Burning Fire from Any Online Store at ShopUpUnion Store. Buy Online Fabulous About A Burning Fire with Quick Shipping to United States, Canada, United Kingdom, Australia, and Worldwide at ShopUpUnion Store.
ShopUpUnion.com Recommends Doberman Pinscher Bone, 1008 Wipers and Embossed Weave - Shop Online for Sale Prices at Amazon, Walmart, Ebay. Enjoy shopping for discount prices? Want to compare prices on gift ideas online before making a purchase? Looking for ways to save money? All of this and more is possible at ShopUpUnion.com - your premier source for online comparison shopping: compare prices on 300 000+ products from leading stores Bronners, WayFair, Target, OverStock, Etsy, Macys, Lowes, HomeDepot, BalsamHill, TreeClassics, BestBuy, and others.
ShopUpUnion is the leading price comparison service for discount shopping: start searching & find the best About A Burning Fire at cheapest prices. Bring your gift ideas to life with our fun tools and exclusive styles.
|
#!/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import re
from Sire.IO import *
from Sire.Mol import *
from Sire.CAS import *
from Sire.System import *
from Sire.Move import *
from Sire.MM import *
from Sire.FF import *
from Sire.Units import *
from Sire.Vol import *
from Sire.Maths import *
from Sire.Base import *
from Sire.Qt import *
from Sire.ID import *
from Sire.Config import *
import Sire.Stream
from Sire.Tools import Parameter, resolveParameters
from Sire.Tools.WaterChanger import convertTip3PtoTip4P
###################################
# Parameters used by this module #
###################################
dobonds = Parameter("move bonds", True, """Whether or not to move the ligands bonds""")
doangles = Parameter("move angles", True, """Whether or not to move the ligands angles""")
dodihedrals = Parameter("move dihedrals", True, """Whether or not to move the ligands dihedrals""")
water_model = Parameter("water model", None,
"""The water model to use. Note, by default the water model is read from
the protein and water crd/top files. If you want to force a change
in water model, then set it here, e.g. if you are loading a TIP3P box
but want to use TIP4P, then set this parameter to "tip4p".""")
BASE_DIHEDRALH_FLEX = Parameter("h dihedral flex", 30*degrees, "Base dihedral rotation for H")
BASE_DIHEDRAL_FLEX = Parameter("dihedral flex", 20*degrees, "Base dihedral rotation")
BASE_ANGLE_FLEX = Parameter("angle flex", 0.25*degrees, "Base angle rotation")
BASE_BOND_FLEX = Parameter("bond flex", 0.025*angstroms, "Base bond stretch amount")
BASE_TRANSLATION = Parameter("translation", 0.75*angstroms, "Base translation delta amount")
BASE_ROTATION = Parameter("rotation", 30*degrees, "Base rigid body rotation")
BASE_MAXVAR = Parameter("maxvar", 10, "Maximum number of degrees of freedom to move at once")
BASE_MAXVAR_B = Parameter("maxvar bonds", 2, "Maximum number of bonds to move at once")
BASE_MAXVAR_A = Parameter("maxvar angles", 4, "Maximum number of angles to move at once")
BASE_MAXVAR_D = Parameter("maxvar dihedrals", 4, "Maximum number of dihedrals to move at once")
###################################
def getResidueNames(molecule):
nres = molecule.nResidues()
resnams = []
for i in range(0, nres):
resnams.append( str( molecule.residue(ResIdx(i)).name().value()).upper() )
return resnams
class NamingScheme:
def __init__(self):
self._protein_names = ["GLH", "ILE", "GLN", "GLY", "GLU",
"CYS", "HIS", "HID", "SER", "LYS",
"LYN", "PRO", "CYX", "HIE", "ASH",
"ASN", "HIP", "VAL", "THR", "ASP",
"TRP", "PHE", "ALA", "MET", "LEU",
"ARG", "TYR", "NME", "ACE"]
self._water_names = [ "WAT", "T3P", "T4P", "HOH" ]
self._ion_names = [ "NA+", "Na+", "CA+", "Ca+", "CAL", "CL-", "Cl-" ]
self._solute_names = [ "LIG" ]
def proteinsGroupName(self):
return MGName("protein")
def solutesGroupName(self):
return MGName("solute")
def solventsGroupName(self):
return MGName("solvent")
def watersGroupName(self):
return MGName("water")
def ionsGroupName(self):
return MGName("ions")
def allMoleculesGroupName(self):
return MGName("all")
def fixedMoleculesGroupName(self):
return MGName("fixed_molecules")
def boundaryMoleculesGroupName(self):
return MGName("boundary_molecules")
def mobileProteinSidechainsGroupName(self):
return MGName("protein_sidechains")
def mobileProteinBackbonesGroupName(self):
return MGName("protein_backbones")
def mobileSolutesGroupName(self):
return MGName("mobile_solutes")
def mobileSolventsGroupName(self):
return MGName("mobile_solvents")
def addProteinResidueName(self, name):
self._protein_names.append( name.upper() )
def addWaterResidueName(self, name):
self._water_names.append( name.upper() )
def addSoluteResidueName(self, name):
self._solute_names.append( name.upper() )
def addIonResidueName(self, name):
self._ion_names.append( name.upper() )
def proteinResidueNames(self):
return self._protein_names
def waterResidueNames(self):
return self._water_names
def soluteResidueNames(self):
return self._solute_names
def ionResidueNames(self):
return self._ion_names
def setProteinResidueNames(self, names):
self._protein_names = []
for name in names:
self.addProteinResidueName(name)
def setWaterResidueNames(self, names):
self._water_names = []
for name in names:
self.addWaterResidueName(name)
def setSoluteResidueNames(self, name):
self._solute_names = []
for name in names:
self.addSoluteResidueName(name)
def setIonResidueNames(self, name):
self._ion_names = []
for name in names:
self.addIonResidueName(name)
def _isType(self, molecule, names, max_residues = None):
try:
resnams = getResidueNames(molecule)
except:
resnams = molecule
if max_residues:
if len(resnams) > max_residues:
return False
for resnam in resnams:
if resnam in names:
return True
try:
if str(molecule.name().value()).upper() in names:
return True
else:
return False
except:
return False
def isProtein(self, molecule):
return self._isType(molecule, self._protein_names)
def isWater(self, molecule):
return self._isType(molecule, self._water_names, 1)
def isIon(self, molecule):
return self._isType(molecule, self._ion_names, 1)
def isSolute(self, molecule):
return self._isType(molecule, self._solute_names)
def findMolecule(system, molname):
molecules = system.molecules()
molname = molname.upper()
for molnum in molecules.molNums():
molecule = molecules[molnum][0].molecule()
if str(molecule.name().value()).upper() == molname:
return molecule
resnams = getResidueNames(molecule)
for resnam in resnams:
if resnam == molname:
return molecule
return None
def addMoleculeToSystem(molecule, system, naming_scheme = NamingScheme()):
"""This function adds the passed molecule to the passed system
using the passed naming scheme to assign the molecule to the
correct molecule group"""
resnams = getResidueNames(molecule)
system.add(molecule, MGName(naming_scheme.allMoleculesGroupName().value()))
if naming_scheme.isSolute(resnams):
system.add(molecule, MGName(naming_scheme.solutesGroupName().value()))
elif naming_scheme.isProtein(resnams):
system.add(molecule, MGName(naming_scheme.proteinsGroupName().value()))
elif naming_scheme.isWater(resnams):
system.add(molecule, MGName(naming_scheme.watersGroupName().value()))
system.add(molecule, MGName(naming_scheme.solventsGroupName().value()))
elif naming_scheme.isIon(resnams):
system.add(molecule, MGName(naming_scheme.ionsGroupName().value()))
system.add(molecule, MGName(naming_scheme.solventsGroupName().value()))
elif molecule.nResidues() == 1:
system.add(molecule, MGName(naming_scheme.solventsGroupName().value()))
else:
system.add(molecule, MGName(naming_scheme.solutesGroupName().value()))
def createSystemFrom(molecules, space, system_name, naming_scheme = NamingScheme()):
"""Create a new System from the passed molecules and space,
sorting the molecules into different molecule groups based on the
passed naming scheme"""
system = System(system_name)
# If requested, change the water model for all water molecules
if water_model.val == "tip4p":
molnums = molecules.molNums()
new_molecules = Molecules()
print("Forcing all water molecules to use the %s water model..." % water_model.val)
print("Converting %d molecules..." % len(molnums))
i = 0
for molnum in molnums:
molecule = molecules[molnum].molecule()
if i % 100 == 0:
print("%d" % i)
sys.stdout.flush()
elif i % 10 == 0:
print(".", end=' ')
sys.stdout.flush()
i += 1
if molecule.nAtoms() == 3:
# this could be a TIP3P water
resname =str(molecule.residue().name().value()).lower()
if resname == "wat" or resname == "t3p":
new_molecule = convertTip3PtoTip4P(molecule)
if new_molecule:
molecule = new_molecule
new_molecules.add(molecule)
print("%d" % i)
molecules = new_molecules
nmols = molecules.nMolecules()
print("Number of molecules == %s" % nmols)
print("System space == %s" % space)
if nmols == 0:
return system
print("Assigning molecules to molecule groups...")
solute_group = MoleculeGroup(naming_scheme.solutesGroupName().value())
protein_group = MoleculeGroup(naming_scheme.proteinsGroupName().value())
solvent_group = MoleculeGroup(naming_scheme.solventsGroupName().value())
water_group = MoleculeGroup(naming_scheme.watersGroupName().value())
ion_group = MoleculeGroup(naming_scheme.ionsGroupName().value())
all_group = MoleculeGroup(naming_scheme.allMoleculesGroupName().value())
# The all molecules group has all of the molecules
all_group.add(molecules)
system.add(all_group)
# Run through each molecule and decide what type it is...
molnums = molecules.molNums()
molnums.sort()
central_molecule = None
solutes = []
proteins = []
solvents = []
waters = []
ions = []
for molnum in molnums:
molecule = molecules[molnum].molecule()
resnams = getResidueNames(molecule)
if naming_scheme.isSolute(resnams):
solutes.append(molecule)
elif naming_scheme.isProtein(resnams):
proteins.append(molecule)
elif naming_scheme.isWater(resnams):
waters.append(molecule)
elif naming_scheme.isIon(resnams):
ions.append(molecule)
elif molecule.nResidues() == 1:
solvents.append(molecule)
else:
solutes.append(molecule)
# Ok - we have now divided everything up into groups
for solute in solutes:
solute_group.add(solute)
for protein in proteins:
protein_group.add(protein)
for water in waters:
solvent_group.add(water)
water_group.add(water)
for solvent in solvents:
solvent_group.add(solvent)
for ion in ions:
solvent_group.add(ion)
ion_group.add(ion)
if solute_group.nMolecules() > 0:
system.add(solute_group)
if protein_group.nMolecules() > 0:
system.add(protein_group)
if solvent_group.nMolecules() > 0:
system.add(solvent_group)
if water_group.nMolecules() > 0:
system.add(water_group)
if ion_group.nMolecules() > 0:
system.add(ion_group)
print("Number of solute molecules == %s" % solute_group.nMolecules())
print("Number of protein molecules == %s" % protein_group.nMolecules())
print("Number of ions == %s" % ion_group.nMolecules())
print("Number of water molecules == %s" % water_group.nMolecules())
print("Number of solvent molecules == %s" % solvent_group.nMolecules())
print("(solvent group is waters + ions + unidentified single-residue molecules)")
system.setProperty("space", space)
system.add( SpaceWrapper( Vector(0), all_group ) )
system.applyConstraints()
print("Returning the constructed system")
return system
def createSystem(top_file, crd_file, naming_scheme = NamingScheme()):
"""Create a new System from the molecules read in from the passed amber
topology and coordinate files. This sorts the molecules into different
molecule groups based on the passed naming scheme"""
system = MoleculeParser.read(top_file,crd_file)
# Load all of the molecules and their parameters from
# the topology and coordinate files
print("Loading the molecules from the files \"%s\" and \"%s\"..." % \
(crd_file, top_file))
return createSystemFrom(system[MGIdx(0)], system.property("space"), top_file, naming_scheme)
def centerSystem(system, molecule):
print("Setting the origin of the system to the center of molecule %s (%s)..." % (molecule, molecule.number()))
center = molecule.evaluate().centerOfMass()
print("This requires translating everything by %s..." % (-center))
moved_mols = Molecules()
for molnum in system.molNums():
molecule = system[molnum][0].molecule()
molecule = molecule.move().translate(-center).commit()
moved_mols.add(molecule)
system.update(moved_mols)
return system
def guessTranslation( solute ):
natoms = solute.nAtoms()
return (BASE_TRANSLATION.val) / ( natoms / 5 + 1)
def guessRotation( solute ):
natoms = solute.nAtoms()
sphere_radius = solute.evaluate().boundingSphere().radius()
return (BASE_ROTATION.val) / ( sphere_radius ** 2)
def generateFlexibility(solute):
connectivity = solute.property('connectivity')
all_bonds = connectivity.getBonds()
all_angles = connectivity.getAngles()
all_dihedrals = connectivity.getDihedrals()
flexibility = Flexibility(solute)
flexibility.setRotation( guessRotation(solute) )
flexibility.setTranslation( guessTranslation(solute) )
try:
flexibility.setMaximumVar( BASE_MAXVAR.val )
except:
flexibility.setMaximumBondVar( BASE_MAXVAR_B.val )
flexibility.setMaximumAngleVar( BASE_MAXVAR_A.val )
flexibility.setMaximumDihedralVar( BASE_MAXVAR_D.val )
# Redundant torsions are discarded according to the following algorithm
# 1) Do not sample a torsion at0-at1-at2-at3 if a variable torsion has
# already been defined around at1-at2 or at2-at1.
# 2) Do not sample a torsion if it would break a ring
#
if dodihedrals.val:
var_dihedrals = []
for dihedral in all_dihedrals:
#print dihedral
tomove = True
# print dihedral
at0 = dihedral.atom0()
at1 = dihedral.atom1()
at2 = dihedral.atom2()
at3 = dihedral.atom3()
# See if a one of the variable dihedral
# already rotates around the same torsion
for vardih in var_dihedrals:
if ( ( at1 == vardih.atom1() and at2 == vardih.atom2() ) or
( at2 == vardih.atom1() and at1 == vardih.atom2() ) ):
# Yes so will not move this torsion
tomove = False
break
# If still wondering...See if a rotation around this dihedral would break a ring
if tomove:
try:
dihbond = BondID(at1, at2)
#print dihbond
solute.move().change(dihbond,1*degrees)
except UserWarning as error:
# extract the type of the errror
error_type = re.search(r"(Sire\w*::\w*)", str(error)).group(0)
if error_type == "SireMol::ring_error":
# print "This dof would move a ring and is therefore skipped"
tomove = False
else:
# re-throw the exception
raise error
if tomove:
# Find out how many atoms would move
#print dihedral
gr0, gr1 = connectivity.split(at1, at2)
ngr0 = gr0.nSelected()
ngr1 = gr1.nSelected()
if (ngr0 <= ngr1):
smallgroup = gr0
else:
smallgroup = gr1
smallgroup = smallgroup.subtract(at1)
smallgroup = smallgroup.subtract(at2)
factor = smallgroup.nSelected()
flexibility.add(dihedral, BASE_DIHEDRAL_FLEX.val/factor)
var_dihedrals.append(dihedral)
# And the angles ....
if doangles.val:
moved_atoms = []
for angle in all_angles:
# print angle
at0 = angle.atom0()
at2 = angle.atom2()
# Do not sample that dof if an existing dof would already move this atom
if ( ( at0 in moved_atoms) and (at2 in moved_atoms) ):
continue
# Test if the angle breaks a ring, if so do not sample it
try:
solute.move().change(angle,1*degrees)
except UserWarning as error:
# extract the type of the errror
error_type = re.search(r"(Sire\w*::\w*)", str(error)).group(0)
if error_type == "SireMol::ring_error":
# print "This dof would move a ring and is therefore skipped"
continue
else:
# re-throw the exception
raise error
gr0, gr1 = connectivity.split(at0, angle.atom1(), at2)
ngr0 = gr0.nSelected()
ngr1 = gr1.nSelected()
if (ngr0 <= ngr1):
smallgroup = gr0
else:
smallgroup = gr1
factor = smallgroup.nSelected()
flexibility.add(angle, BASE_ANGLE_FLEX.val/factor)
if at0 not in moved_atoms:
moved_atoms.append(at0)
if at2 not in moved_atoms:
moved_atoms.append(at2)
# And the bonds...
if dobonds.val:
for bond in all_bonds:
try:
solute.move().change(bond,1*angstrom)
except UserWarning as error:
# extract the type of the errror
error_type = re.search(r"(Sire\w*::\w*)", str(error)).group(0)
if error_type == "SireMol::ring_error":
# print "This dof would move a ring and is therefore skipped"
continue
else:
# re-throw the exception
raise error
gr0, gr1 = connectivity.split(bond.atom0(), bond.atom1() )
ngr0 = gr0.nSelected()
ngr1 = gr1.nSelected()
if (ngr0 <= ngr1):
smallgroup = gr0
else:
smallgroup = gr1
factor = smallgroup.nSelected()
flexibility.add(bond, BASE_BOND_FLEX.val/factor)
return flexibility
def getCoordGroup(atoms, coords_property="coordinates"):
coords = []
for i in range(0, atoms.count()):
atom = atoms[i]
coords.append(atom.property(coords_property))
return CoordGroup(coords)
def getAtomNearCOG( molecule ):
mol_centre = molecule.evaluate().center()
mindist = 99999.0
for x in range(0, molecule.nAtoms()):
atom = molecule.atoms()[x]
at_coords = atom.property('coordinates')
dist = Vector().distance2(at_coords, mol_centre)
if dist < mindist:
mindist = dist
nearest_atom = atom
return nearest_atom
def addFlexibility(system, reflection_center=None, reflection_radius=None, \
naming_scheme=NamingScheme()):
print("Adding flexibility to the system...")
# create a group for all of the fixed molecules and residues
fixed_group = MoleculeGroup( naming_scheme.fixedMoleculesGroupName().value() )
# create a group for the fixed residues that are bonded to the mobile residues
boundary_group = MoleculeGroup( naming_scheme.boundaryMoleculesGroupName().value() )
if reflection_center is None or reflection_radius is None:
print ("No reflection radius or reflection molecule specified, so moving all "
"molecules and residues in the system.")
reflection_radius = None
reflection_center = None
else:
print(("Only moving molecules/residues that are within a distance %s A "
"of the point %s.") % (reflection_radius.value(), reflection_center))
system.setProperty("reflection center", AtomCoords(CoordGroup(1,reflection_center)))
system.setProperty("reflection sphere radius", VariantProperty(reflection_radius.to(angstroms)))
# fit the protein z-matrix templates to all of the protein molecules and add the mobile
# residues to the mobile_sc_group and mobile_bb_group for mobile sidechains and backbones
if naming_scheme.proteinsGroupName() in system.mgNames():
protein_group = system[naming_scheme.proteinsGroupName()]
# create a zmatrix maker that will be used to build the z-matrices for each protein molecule
zmat_maker = ZmatrixMaker()
zmat_maker.loadTemplates( os.path.join(parameter_directory, "amber.zmatrices") )
# now create the molecule groups that hold the flexible side chains and flexible backbone groups
mobile_sc_group = MoleculeGroup(naming_scheme.mobileProteinSidechainsGroupName().value())
mobile_bb_group = MoleculeGroup(naming_scheme.mobileProteinBackbonesGroupName().value())
# the extra atoms moved as part of a backbone move
hn_atoms = AtomName("N", CaseInsensitive) * AtomName("H", CaseInsensitive) * \
AtomName("HN", CaseInsensitive) * AtomName("HN1", CaseInsensitive) * \
AtomName("HN2", CaseInsensitive) * AtomName("HN3", CaseInsensitive)
# loop over each protein molecule
for molnum in protein_group.molNums():
protein_mol = protein_group[molnum].molecule()
print("Applying residue templates for protein %s" % molnum)
protein_mol = zmat_maker.applyTemplates(protein_mol)
system.update(protein_mol)
if reflection_radius:
space = Cartesian()
mobile_resnums = []
# only move side chains within "sc_radius" and backbones within "bb_radius" of the ligand molecule
print("Looking for which residues are within the reflection sphere...")
for i in range(0, protein_mol.nResidues()):
res = protein_mol.residue( ResIdx(i) )
distance = space.minimumDistance(CoordGroup(1,reflection_center), getCoordGroup(res.atoms()))
if distance < reflection_radius.value():
# add the residue to the mobile sidechains group
mobile_sc_group.add(res)
mobile_resnums.append( res.number() )
# now add the atoms needed from the residue to the mobile backbones group
atoms = protein_mol.select(ResIdx(i)).selection()
# for the backbone move to work, the residue must contain
# AtomName("CA", CaseInsensitive) and AtomName("N", CaseInsensitive) )
has_backbone = False
try:
if atoms.selected( AtomName("CA", CaseInsensitive) ) and \
atoms.selected( AtomName("N", CaseInsensitive) ):
has_backbone = True
except:
pass
if has_backbone:
if i < (protein_mol.nResidues()-1):
try:
atoms.deselect( hn_atoms + ResIdx(i) )
except:
pass
if i > 0:
try:
atoms.select( hn_atoms + ResIdx(i+1) )
except:
pass
mobile_bb_group.add( PartialMolecule(protein_mol, atoms) )
else:
print("Not moving backbone of %s as it doesn't contain atoms N or CA" % protein_mol.residue(ResIdx(i)))
# now loop over all of the residues and work out which ones are fixed, and which ones
# are bonded to fixed residues
connectivity = protein_mol.property("connectivity")
for i in range(0, protein_mol.nResidues()):
res = protein_mol.residue( ResIdx(i) )
if not res.number() in mobile_resnums:
# is this residue bonded to any of the mobile residues? If so, then it is a boundary residue
is_boundary = False
for bonded_res in connectivity.connectionsTo( res.number() ):
bonded_resnum = protein_mol.residue(bonded_res).number()
if bonded_resnum in mobile_resnums:
is_boundary = True
break
if is_boundary:
boundary_group.add(res)
else:
fixed_group.add(res)
else:
# assume that the backbone and side chains of all residues are flexible
for i in range(0, protein_mol.nResidues()):
res = protein_mol.residue( ResIdx(i) )
mobile_sc_group.add(res)
atoms = protein_mol.select(ResIdx(i)).selection()
if i < (protein_mol.nResidues()-1):
try:
atoms.deselect( hn_atoms + ResIdx(i) )
except:
pass
if i > 0:
try:
atoms.select( hn_atoms + ResIdx(i+1) )
except:
pass
mobile_bb_group.add( PartialMolecule(protein_mol, atoms) )
if mobile_sc_group.nMolecules() > 0:
system.add(mobile_sc_group)
if mobile_bb_group.nMolecules() > 0:
system.add(mobile_bb_group)
print("The number of residues with flexible sidechains equals %s" % mobile_sc_group.nViews())
print("The number of residues with flexible backbones equals %s" % mobile_bb_group.nViews())
print("The number of boundary residues equals %s" % boundary_group.nViews())
print("The number of fixed residues equals %s" % fixed_group.nViews())
# add all of the mobile solute molecules to the mobile_solute_group and auto-generate
# the z-matricies of all of the mobile solutes
if naming_scheme.solutesGroupName() in system.mgNames():
solute_group = system[naming_scheme.solutesGroupName()]
mobile_solute_group = MoleculeGroup( naming_scheme.mobileSolutesGroupName().value() )
# store the average solute translation and rotation deltas
avg_trans_delta = 0
avg_rot_delta = 0
for molnum in solute_group.molNums():
solute_mol = solute_group[molnum].molecule()
move_solute = True
# Only move the solute if it is within the sphere cutoff of the ligand (if a ligand and solvent
# radius have been specified...)
if reflection_radius:
move_solute = (Vector.distance(reflection_center, \
solute_mol.evaluate().centerOfMass()) < reflection_radius.value())
if move_solute:
print("\nAuto-detecting the flexible degrees of freedom for solute %s" % molnum)
# auto-generate the flexibility - bonds, angles and dihedrals
flexibility = generateFlexibility(solute_mol)
solute_mol = solute_mol.edit().setProperty("flexibility", flexibility).commit()
print("\nFlexibility of solute %s equals:" % molnum)
flex = solute_mol.property("flexibility")
print(flex)
avg_trans_delta += flex.translation().to(angstrom)
avg_rot_delta += flex.rotation().to(degrees)
system.update(solute_mol)
mobile_solute_group.add(solute_mol)
else:
print("Not moving solute %s as it is outside the spherical solvent cutoff of the ligand." % solute_mol)
fixed_group.add(solute_mol)
if mobile_solute_group.nMolecules() > 0:
system.add(mobile_solute_group)
system.setProperty("average solute translation delta", \
VariantProperty(avg_trans_delta / mobile_solute_group.nMolecules()))
system.setProperty("average solute rotation delta", \
VariantProperty(avg_rot_delta / mobile_solute_group.nMolecules()))
print("\nNumber of mobile solute molecules equals %s" % mobile_solute_group.nMolecules())
# add all of the mobile solvent molecules to the mobile_solvent_group
if naming_scheme.solventsGroupName() in system.mgNames():
solvent_group = system[ naming_scheme.solventsGroupName() ]
mobile_solvent_group = MoleculeGroup( naming_scheme.mobileSolventsGroupName().value() )
print("Adding flexibility to the solvent...")
if reflection_radius:
for molnum in solvent_group.molNums():
solvent_mol = solvent_group[molnum]
if Vector.distance(reflection_center, solvent_mol.evaluate().centerOfMass()) < reflection_radius.value():
mobile_solvent_group.add(solvent_mol)
else:
fixed_group.add(solvent_mol)
else:
mobile_solvent_group.add( solvent_group.molecules() )
if mobile_solvent_group.nMolecules() > 0:
system.add(mobile_solvent_group)
print("\nNumber of mobile solvent molecules equals %s" % mobile_solvent_group.nMolecules())
# All finished - just need to add in the fixed and boundary groups
if fixed_group.nMolecules() > 0:
system.add(fixed_group)
if boundary_group.nMolecules() > 0:
system.add(boundary_group)
print("\nNumber of fixed (or partially fixed) molecules equals %s" % fixed_group.nMolecules())
return system
def printGroupInfo(system, group_name):
try:
group = system[MGName(group_name)]
print("%s : nMolecules() == %d" % (str(group), group.nMolecules()))
except:
print("There is no group called \"%s\"" % group_name)
|
VSCO Film presets are designed to replicate “analogue” looks while fitting into your existing digital workflow. Are they any good?
A new software utility makes it easier than ever for photographers and educators to share their studio lighting setups.
Elixxier Software have announced a cheaper version of their set.a.light 3D studio simulation program.
Enter your photographs in our competition for a chance to win a copy of set.a.light 3D STUDIO worth €140! This round’s theme is ‘behind the scenes’.
|
import re
import numpy as np
import tensorflow as tf
from model import *
class headGAN(object):
def __init__(self, d_net, g_net, wordvec, article, title, wd_list):
print 'GAN headline'
self.wordvec = wordvec
self.article = article
self.title = title
self.d_net = d_net
self.g_net = g_net
self.wd_list = wd_list
self.sess = tf.InteractiveSession()
self.build_model()
self.train_model()
def build_model(self):
art_len = self.article.shape[1]
ttl_len = self.title.shape[1]
wd_dim = self.wordvec.shape[1]
self.in_art = tf.placeholder(tf.int32,[None, art_len])
self.in_ttl = tf.placeholder(tf.int32,[None, ttl_len])
r_art = tf.nn.embedding_lookup(self.wordvec, self.in_art)
r_art = tf.expand_dims(r_art, -1)
r_art = tf.transpose(r_art, [0,2,1,3], name='r_art')
self.r_art = r_art
self.r_ttl = tf.nn.embedding_lookup(self.wordvec, self.in_ttl)
self.r_ttl = tf.expand_dims(self.r_ttl, -1)
self.r_ttl = tf.transpose(self.r_ttl, [0,2,1,3], name='r_ttl')
self.f_ttl = self.g_net(input_data=r_art)
r_logits = self.d_net(input_data=self.r_ttl, reuse=False)
f_logits = self.d_net(input_data=self.f_ttl, reuse=True)
r_ent = tf.nn.sigmoid(r_logits)
f_ent = tf.nn.sigmoid(f_logits)
self.d_loss = tf.reduce_mean(r_ent) - tf.reduce_mean(f_ent)
self.g_loss = tf.reduce_mean(f_ent, name='g_loss')
self.g_var = self.g_net.vars
self.d_var = self.d_net.vars
self.opt_method = 'rmsprop'
if self.opt_method == 'rmsprop':
self.d_opt = tf.train.RMSPropOptimizer(0.01,decay=0.9).minimize(self.d_loss,var_list=self.d_var)
self.g_opt = tf.train.RMSPropOptimizer(0.01,decay=0.9).minimize(self.g_loss,var_list=self.g_var)
else:
self.d_opt = tf.train.AdamOptimizer().minimize(self.d_loss,var_list=self.d_var)
self.g_opt = tf.train.AdamOptimizer().minimize(self.g_loss,var_list=self.g_var)
def train_model(self):
self.sess.run(tf.global_variables_initializer())
k = 0
while k < 20000:
feed_ = {self.in_art: [self.article[k]], self.in_ttl: [self.title[k]]}
self.sess.run(self.g_opt, feed_dict=feed_)
self.sess.run(self.d_opt, feed_dict=feed_)
if k%1000 == 0:
print k, self.sess.run([self.d_loss, self.g_loss], feed_dict=feed_)
tt = self.sess.run(self.f_ttl, feed_dict=feed_)
xx = self.sess.run(self.r_art[0,:,:,0], feed_dict=feed_)
dd = self.sess.run(tf.matmul(tt[0,0], xx)))
idx = np.argmax(dd, axis=1).tolist()
for kk in idx:
print self.wd_list[self.article[k,kk]],
print
for kk in self.title[k]:
print self.wd_list[kk],
print
for kk in self.article[k]:
print self.wd_list[kk],
print
print
k += 1
if k == 19999:
k = 0
|
Vacation Bible School The Metropolitan United Methodist Church located at 657 Best St., invites children, young adults and adults to join us for our Vacation Bible School which will be held the week of August 13 – August 17, 2018, from 5:30 pm – 8 pm. A Light Dinner will also be served, and applications are now being taken. There is no charge so please come and join us. For an application or more information please contact the church office at (716) 891- 5652. is Pastor.
Church & Community Cook Out The Metropolitan UM Church Community Cook Out will be held Saturday, August 18 from 12 Noon – 6 p.m. (Herman Street will be blocked off from Best to North Streets). All are invited to an afternoon of FREE food, games, Zippedy the Clown, face painting and lots of fun. Don’t forget your chairs! Questions? Please contact the church office at (716) 891-5652. Pastor Angela R. Stewart is Pastor.
Starting on Monday, August 13-17 Vacation Bible School will be held at Mt. Olive Baptist Church, located at 701 East Delavan Avenue, where the Rev. Dr. William Gillison is Pastor. Breakfast and lunch will be served each day. The Program will begin at 8:00 am and end at 1:00 pm. Children as young as 2 years old and up to teenagers may attend. Registration is available at the church office Monday through Friday, 9:00 am – 5:00 pm. (895-7494) NOTE: Monday and Tuesday, August 13th &14th Pastor Gillison will be teaching at 6:30 pm. Come out and be blessed.
Zion Missionary Baptist Church announces: **Prayer Line every Monday at 8a.m. **Watch us on Spectrum Channel 20 every Saturday at 3p.m. & Sunday at 9:30p.m. **Women’s Ministry Line Dance Fellowship -FREE **Social Media Paint Night $35 Zion Missionary Baptist Church is located at 179 E. Ferry St., For more information contact the church office at 886-1362.Rev C.M. Jenkins II is Host Pastor.
Buffalo NY Community Prepares to HONOR A KING with City Wide Celebrations!
January 15, 2019 TheChallengerNews Comments Off on Buffalo NY Community Prepares to HONOR A KING with City Wide Celebrations!
|
'''
Created on Aug 17, 2010
@author: joel
'''
import numpy
from pypyr.mesh import Basis, ElementFinder, ElementQuadrature, BoundaryQuadrature
import itertools as it
from pypyr.timing import *
def processIndices(basis, boundarytags):
""" Given a basis (a collection of elements) and a set of boundaries, extract the internal and external degrees of freedom
returns:
I: a sparse matrix that maps each the local degrees of freedom for each element to their global indices
boundaries: a map of tag->DegreeSet, which can be used to evaluate all the degrees on each boundary
internalidx: ids of the internal degrees of freedom
"""
import scipy.sparse as ss
indices = basis.getIndices()
n = basis.elementfactory.index # = max(indices)+1
I = ss.csr_matrix((numpy.ones_like(indices), indices, range(0,len(indices)+1)))
idxflag = numpy.ones(n, dtype=bool)
boundaries = {}
for tag in boundarytags:
bdy = basis.getBoundary(tag)
boundaries[tag] = bdy
if bdy: idxflag[bdy.indices] = False
internalidx = numpy.nonzero(idxflag)[0]
return I, boundaries, internalidx
def blockInnerProducts(quadweights, leftvalsiter, rightvalsiter, leftI, rightI):
""" Evaluate the inner product matrix
returns a sparse matrix equal to leftI.transpose * L.transpose * quadweights * R * rightI
where L and R are block diagonal matrices whose blocks are given by the iterables, leftvalsiter and rightvalsiter
If the left or right vals have more than 2 dimensions, the extra dimensions are multiplied and summed (tensor-contracted),
with broadcasting as necessary, i,e, this is an inner-product - it can't be used for a more general multiplication'
"""
import scipy.sparse as ss
data = []
idx = []
ip = [0]
for e, (leftvals, rightvals, weights) in enumerate(it.izip(leftvalsiter, rightvalsiter, quadweights)):
if len(weights):
lvs = len(leftvals.shape)
rvs = len(rightvals.shape)
vs = max(lvs,rvs)
leftvals = leftvals.reshape(leftvals.shape + (1,)*(vs - lvs))
rightvals = rightvals.reshape(rightvals.shape + (1,)*(vs - rvs))
lvw = leftvals * weights.reshape((-1,) + (1,)*(vs-1))
# print lvw.shape, rightvals.shape
data.append(numpy.tensordot(lvw, rightvals, ([0]+range(2,vs), [0]+range(2,vs))))
idx.append(e)
ip.append(len(idx))
# print e, idx, ip
V = ss.bsr_matrix((data, idx, ip),dtype=float, shape=(leftI.shape[0],rightI.shape[0]))
return leftI.transpose() * V * rightI
class System(object):
""" A System contains everything that's need to construct stiffness matrices and load vectors.
This is an abstract-ish class see SymmetricSystem and AsymmetricSystem for concrete implementations.
Parameters:
quadrule: a tuple of quadrature points and weights on the reference pyramid
meshevents: A function that produces mesh events
leftbasis, rightbasis: see pypyr.mesh.Basis
leftindexinfo, rightindexinfo: see processIndices
"""
def __init__(self, quadrule, meshevents, leftbasis, rightbasis, leftindexinfo, rightindexinfo):
self.elementfinder = meshevents(ElementFinder())
self.elementinfo = meshevents(ElementQuadrature())
self.boundaryquad = meshevents(BoundaryQuadrature())
self.refquadpoints, refweights = quadrule
self.quadweights = list(self.elementinfo.getWeights(self.refquadpoints, refweights))
self.leftbasis = leftbasis
self.rightbasis = rightbasis
self.leftI, self.leftbdys, self.leftintidx = leftindexinfo
self.rightI, self.rightbdys, self.rightintidx = rightindexinfo
def _transposeinplace(self):
""" Transpose this object """
self.leftbasis, self.rightbasis = self.rightbasis, self.leftbasis
self.leftI, self.rightI = self.rightI, self.leftI
self.leftbdys, self.rightbdys = self.rightbdys, self.leftbdys
self.leftintidx, self.rightintidx = self.rightintidx, self.leftintidx
return self
def processSystem(self, leftvalsiter, rightvalsiter):
""" Construct the (non-boundary aware) stiffness matrix """
return blockInnerProducts(self.quadweights, leftvalsiter, rightvalsiter, self.leftI, self.rightI)
def processBoundary(self, sysmat, tagtog):
""" Split the stiffness matrix into the internal and external parts. Evaluate boundary data
sysmat: system matrix (which will come from processSystem()).
tagtog: dictionary of functions to evaluate on the boundar(y|ies)
returns:
internalSystem: S[I,I] where I is the internal degrees
tagtoBoundarySystem: tag->S[I,E[tag]] where E[tag] gives the indices of the external degrees
tagtogvals: g[tag] evaluated at the degrees of freedom associated with boundary "tag".
Somewhat inefficient if there's a significant proportion of dofs on the boundary """
SI = sysmat[self.leftintidx, :]
internalSystem = SI[:,self.rightintidx]
tagtogvals = {}
tagtoBoundarySystem = {}
for tag, bdy in self.rightbdys.iteritems():
tagtogvals[tag] = bdy.evaluatedofs(tagtog[tag])
tagtoBoundarySystem[tag] = SI[:,bdy.indices]
return internalSystem, tagtoBoundarySystem, tagtogvals
def loadVector(self, f, deriv=False):
""" Calculate the load vector for the internal shape functions """
testvalsiter = self.leftbasis.getElementValues(self.refquadpoints, deriv)
fvalsiter = it.imap(f, self.elementinfo.getQuadPoints(self.refquadpoints))
return blockInnerProducts(self.quadweights, testvalsiter, fvalsiter, self.leftI, numpy.ones((self.elementinfo.numElements(), 1)))[self.leftintidx,:]
def boundaryLoad(self, tagtog, squarequad, trianglequad, deriv=False):
""" Calculate the load vector based on a boundary integral, e.g. for Dirichlet data in the dual formulation of the mixed laplacian"""
tagtogsys = {}
for tag, g in tagtog.iteritems():
x,w,n = zip(*self.boundaryquad.getQuadratures(tag, squarequad, trianglequad))
# print map(g,x,n)
# print map(lambda e,p: 0 if len(p) is 0 else e.values(p), self.leftbasis.elements, x)
fvalsiter = it.imap(g, x, n)
testvalsiter = it.imap(lambda e,p: 0 if len(p) is 0 else e.values(p), self.leftbasis.elements, x)
tagtogsys[tag] = blockInnerProducts(w, testvalsiter, fvalsiter, self.leftI, numpy.ones((self.elementinfo.numElements(), 1)))[self.leftintidx,:]
return tagtogsys
def evaluate(self, points, U, tagtoG = {}, deriv=False):
""" Evaluate a solution given by the coefficients of the internal degrees, U, at specified points.
tagtoG should be the coefficients for the external degrees"""
UG = numpy.zeros(self.rightbasis.elementfactory.index)
UG[self.rightintidx] = U
for tag, G in tagtoG.iteritems():
UG[self.rightbdys[tag].indices] = G
etop = self.elementfinder.elementPointMap(points)
UGvals = numpy.zeros((len(points), self.rightbasis.elements[0].ncpts))
for e, pids in zip(self.rightbasis.elements, etop):
if len(pids):
evals = e.derivs(points[pids]) if deriv else e.values(points[pids])
UGvals[pids] += numpy.tensordot(evals, UG[e.indices], ([1],[0]))
return UGvals
class SymmetricSystem(System):
""" A symmetric system"""
def __init__(self, elements, quadrule, meshevents, boundarytags):
self.basis = Basis(elements)
meshevents(self.basis)
indexinfo = processIndices(self.basis, boundarytags)
System.__init__(self, quadrule, meshevents, self.basis, self.basis, indexinfo, indexinfo)
self.elements = elements
def systemMatrix(self, deriv):
return super(SymmetricSystem, self).processSystem(*it.tee(self.basis.getElementValues(self.refquadpoints,deriv), 2))
class AsymmetricSystem(System):
""" An Asymmetric system"""
def __init__(self, leftelements, rightelements, quadrule, meshevents, leftboundarytags, rightboundarytags):
leftbasis = Basis(leftelements)
rightbasis = Basis(rightelements)
meshevents(leftbasis)
meshevents(rightbasis)
super(AsymmetricSystem, self).__init__(quadrule, meshevents, leftbasis, rightbasis, processIndices(leftbasis, leftboundarytags), processIndices(rightbasis, rightboundarytags))
def systemMatrix(self, leftderiv, rightderiv):
leftvals = self.leftbasis.getElementValues(self.refquadpoints, leftderiv)
rightvals = self.rightbasis.getElementValues(self.refquadpoints, rightderiv)
return super(AsymmetricSystem, self).processSystem(leftvals, rightvals)
def transpose(self):
import copy
return copy.copy(self)._transposeinplace()
|
This is a very pretty instrument, with a somewhat simpler architecture than we've seen from DSI. For example, only a single LFO, and far fewer modulation destinations than we've become used to.
Was sort of surprised that they only added one oscillator and reduced the modulation capabilities, but I suppose that was to get it closer to "knob per function" and reduce the menu diving.
Also, a happy surprise to see Dave using VCO's again. Was worried that he had moved on completely. This fits nicely among his other synths.
Glad to see that even when doing sort of a revisit, DSI still manages to make something new instead of just a carbon copy of something old.
Great job Dave and company!
if you read this and got an gearslutz account, please tell the guys there that the stepping in the filter is most likely a feature to get the frequencies in harmony with the scales via Keytrack. This implementation is the best there ist because cuttoffknobtweakeritis should be done vie modwheel and then there are no steps.
While it's not something that bothers me particularly, it isn't possible to sweep the filter with the mod wheel on the P6. Mod wheel is hardwired to LFO amount I believe. I think it can be done by aftertouch though.
That somehow just seems strange that it's not possible to sweep the filter with the mod wheel...that's as basic of a routing for ANY analog synth, and pretty sure I could do that on my SCI Prophet 600...no way to test this currently as it's about to go to Wine Country for a stint at "synth rehab" and new GIGI Processor, et al.
Maybe the P6 does not step if the LFO sweeps the filter? I am also thinking that with the P6 being a no menu basic analog poly synth, that once it's shipping one would think the P6 would be in pretty much a "feature complete" synth. I do not recall ever having to acquire a new update that was performed using cassette tape, for the SCI Synths for earlier days once it shipped.
However, with the digital effects section, can totally see where perhaps some tweaks could be needed, but again, am thinking that once P6 ships, DSI/Sequential will be working on the needed updates/bug fixes of P2/P12....but what do I know? All speculation until P6 is actually truly out in the wild.
Anyone else notice a pattern in last few years in that *every time* DSI is starting to ship a new instrument, the "other forum" ALWAYS goes down for a significant amount of time as it's been now? Same for P12 and P2. I am also seeing people trying to sell their P12's on Ebay USA for the price of a new P2...a significant loss, more than likely to fund the P6. Seeing some incredibly low P2 prices used, of course.
namnibor wrote: Anyone else notice a pattern in last few years in that *every time* DSI is starting to ship a new instrument, the "other forum" ALWAYS goes down for a significant amount of time as it's been now? Same for P12 and P2. I am also seeing people trying to sell their P12's on Ebay USA for the price of a new P2...a significant loss, more than likely to fund the P6. Seeing some incredibly low P2 prices used, of course.
• A hosting that is not severely limited on disk space for the database which means it does not crash ever so often.
• Somewhere with an administrator that can be reached and who is on top of things. Not that this forum has seen a lot of traffic yet but so far chysn has done an excellent job.
• A place with a moderator that does not work for DSI so that there is no yelling of "conspiracy" as soon as a thread is being moderated and also someone (or rather more than one) who has time and dedication to keep a forum moderated. Unfortunately now at the other forum discussions turn very ugly quickly and as soon as Chris does any moderation he gets attacked by people accusing him of bias which leads to basically no moderation at all.
Fuseball wrote: While it's not something that bothers me particularly, it isn't possible to sweep the filter with the mod wheel on the P6. Mod wheel is hardwired to LFO amount I believe. I think it can be done by aftertouch though.
It's amazing what a kerfuffle this filter cutoff quantization "issue" is causing on the forums. As Carson pointed out and should be obvious to owners of previous DSI instruments, the cutoff frequency snaps to musically relevant steps when fine-tuning the frequency knob. If you use the potentiometer as a performance control, that is sweep it around while playing, the software smoothly interpolates the values and provides a response free from stepping.
There will be a point where the line is drawn between "sweeping" and "adjusting", so if you turn it too slowly you may notice stepping. I don't have a Prophet~6 at hand and I'm currently away from my Prophet '08 so I can't verify the precise point at which this happens, but I don't remember this ever being an issue for me on my P'08.
If too many people keep complaining I'm sure that a different interpolation scheme could easily be implemented. Though I'm certain that DSI did think this whole thing through and arrived at the current state for good reason.
Strange Quark Star wrote: It's amazing what a kerfuffle this filter cutoff quantization "issue" is causing on the forums. As Carson pointed out and should be obvious to owners of previous DSI instruments, the cutoff frequency snaps to musically relevant steps when fine-tuning the frequency knob. If you use the potentiometer as a performance control, that is sweep it around while playing, the software smoothly interpolates the values and provides a response free from stepping.
Agreed. On all the DSI-synths I own (Mopho x4, Tempest and Pro 2) I can get very smooth filter sweeps thanks to how it is smoothened despite being 7-bit values. The closest thing to stepping I hear is when it sweeps through harmonics but that also happens on my Sub Phatty (that uses 14-bit values) and the MS20 Mini that is not digitalized at all so for me it works really well. I think many confuse the harmonics with filter stepping.
|
# Copyright (c) 2010 Howard Hughes Medical Institute.
# All rights reserved.
# Use is subject to Janelia Farm Research Campus Software Copyright 1.1 license terms.
# http://license.janelia.org/license/jfrc_copyright_1_1.html
"""
A custom centrality script for the C. elegans network.
"""
import networkx
# Load the neurons and their interconnections if needed.
if not any(network.objects):
execfile('Connectivity.py')
def progressCallback(fraction_complete = None):
return updateProgress('Calculating centrality...', fraction_complete)
# Compute the centrality of each node in the graph. (uncomment one of the following)
#centralities = networkx.degree_centrality(network.simplifiedGraph())
#centralities = networkx.closeness_centrality(network.simplifiedGraph(), weighted_edges = True, progress_callback = progressCallback)
centralities = networkx.betweenness_centrality(network.simplifiedGraph(), weighted_edges = True, progress_callback = progressCallback)
#centralities = networkx.load_centrality(network.simplifiedGraph(), weighted_edges = True, progress_callback = progressCallback)
if any(centralities):
# Compute the maximum centrality so we can normalize.
maxCentrality = max(centralities.itervalues())
# Alter the visualization of each node based on its centrality.
objectCentralities = {}
for node, centrality in centralities.iteritems():
object = network.objectWithId(node)
objectCentralities[object] = centrality / maxCentrality
diameter = 0.001 + objectCentralities[object] * 0.029
display.setVisibleSize(object, [diameter] * 3)
for synapse in network.synapses():
centrality = objectCentralities[synapse.preSynapticNeurite.neuron()]
for partner in synapse.postSynapticPartners:
centrality += objectCentralities[partner if isinstance(partner, Neuron) else partner.neuron()]
centrality /= 1 + len(synapse.postSynapticPartners)
display.setVisibleOpacity(synapse, centrality)
for gapJunction in network.gapJunctions():
centrality = 0.0
for neurite in gapJunction.neurites():
centrality += objectCentralities[neurite.neuron()]
centrality /= 2.0
display.setVisibleOpacity(gapJunction, centrality)
for innervation in network.innervations():
centrality = (objectCentralities[innervation.neurite.neuron()] + objectCentralities[innervation.muscle]) / 2.0
display.setVisibleOpacity(innervation, centrality)
|
The Tampa Bay Advanced Manufacturing & Robotics Center (AMRoC) Fab Lab, a program of the nonprofit Foundation for Community Driven Innovation (FCDI), is a public Fab Lab dedicated to building community capacity, empowering individuals through creative self-expression and bridging the manufacturing and industry talent gap through robust project-based engineering education and training.
At AMRoC, located in University Mall in Tampa, people of all ages will explore science, technology, engineering, manufacturing and related education and career training and certification opportunities, with access to tools like CNC machines, laser cutters, 3D printers, hand and power tools, and more. Youth in FIRST robotics teams and other STEM competition programs can design, build, practice and meet at AMRoC Fab Lab and local entrepreneurs can access rapid prototyping and micro-manufacturing.
AMRoC Fab Lab is grateful to University Mall, Hillsborough County Public Library Cooperative and our many fine community partners.
Member ARM Institute, Urban Manufacturing Alliance, DigiFab Alliance, Fab Lab Network.
The Tampa Bay Advanced Manufacturing & Robotics Center (AMRoC), a program of the Foundation for Community Driven Innovation (FCDI).
Students involved in FIRST, VEX, and other competitive STEM programs usually have so much fun that they don’t realize how much they are learning – which is the whole point. AMRoC Fab Lab will provide the resources needed by local FIRST teams and others, ages 6 through 18 to design, build, test and practice with their own competitive robots on regulation competition fields. They not only prepare for competition day, but they learn hands-on basic engineering skill sets along the way.
AMRoC Fab Lab will provide our local business community with the “best-of-the-best” talent, skilled in today’s advanced manufacturing and design processes. Working with a variety of local government, community agencies and corporate partners, AMRoC Fab Lab programming will include a variety of educational, and workforce training and certification opportunities across technical, industrial and manufacturing fields utilizing hands on, project based and competency focused teaching and learning programs.
AMRoC Fab Lab’s “fun while learning” atmosphere has a wonderful additional benefit to the community: real manufacturing provides the skills training environment that propels learners past the world of academic knowledge and into real world skills where they learn that sometimes what looks good on paper, or a computer screen, may not actually be the best choice in the physical world of production. AMRoC’s design includes onsite high quality limited volume manufacturing capabilitties.
|
import io
import sys
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
version = '1.1.0'
# Please update tox.ini when modifying dependency version requirements
install_requires = [
# load_pem_private/public_key (>=0.6)
# rsa_recover_prime_factors (>=0.8)
'cryptography>=0.8',
# Connection.set_tlsext_host_name (>=0.13)
'PyOpenSSL>=0.13',
# For pkg_resources. >=1.0 so pip resolves it to a version cryptography
# will tolerate; see #2599:
'setuptools>=1.0',
'six>=1.9.0', # needed for python_2_unicode_compatible
]
testing_requires = [
'coverage>=4.0',
'pytest-cache>=1.0',
'pytest-cov',
'flake8',
'pytest-flake8>=0.5',
'pytest>=2.8.0',
'mock',
]
# env markers cause problems with older pip and setuptools
if sys.version_info < (2, 7):
install_requires.extend([
'argparse',
'ordereddict',
])
dev_extras = [
'pytest',
'tox',
]
docs_extras = [
'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags
'sphinx_rtd_theme',
]
with io.open('README.rst', encoding='UTF-8') as f:
long_description = f.read()
class PyTest(TestCommand):
user_options = []
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = ''
def run_tests(self):
import shlex
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(shlex.split(self.pytest_args))
sys.exit(errno)
setup(
name='josepy',
version=version,
description='JOSE protocol implementation in Python',
long_description=long_description,
url='https://github.com/certbot/josepy',
author="Certbot Project",
author_email='[email protected]',
license='Apache License 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Security',
],
packages=find_packages(where='src'),
package_dir={'': 'src'},
include_package_data=True,
install_requires=install_requires,
extras_require={
'dev': dev_extras,
'docs': docs_extras,
'tests': testing_requires,
},
entry_points={
'console_scripts': [
'jws = josepy.jws:CLI.run',
],
},
tests_require=testing_requires,
cmdclass={
'test': PyTest,
},
)
|
Bears 2' cones of pink florets all summer. Vigor and longevity improve if deadheaded. Space 12" apart. Perfers sandy soil with sharp drainage. Very drought tolerant.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2016年4月10日
@author: chensi
'''
from sqlalchemy.orm.scoping import scoped_session
from sqlalchemy.orm.session import sessionmaker
from one_analyse import one_engine
from threadpool import ThreadPool, makeRequests
from one_analyse.lib.db.ormtables import OneORM
import urllib.request
import json
import codecs
from one_analyse.lib.db.ormtables import PeriodRecord
DBScopedSession = scoped_session(
sessionmaker(
autoflush=False,
autocommit=False,
bind=one_engine
)
)
code_url_format = "http://1.163.com/code/get.do?gid=424&period=%s&cid=%s"
def request_code(period_id, user_id, rid, num):
url = code_url_format % (period_id, user_id)
response = urllib.request.urlopen(url)
result = json.load(codecs.getreader("utf-8")(response))
codes = result['result']['list'][0]['code']
session = DBScopedSession()
session.query(PeriodRecord).\
filter(PeriodRecord.rid==rid).\
filter(PeriodRecord.period_id==period_id).\
filter(PeriodRecord.user_id==user_id).\
update({'codes':','.join(codes)})
session.commit()
DBScopedSession.close()
if __name__ == '__main__':
db2 = OneORM()
db2.InitDB()
# Initialize thread pool
tp = ThreadPool(50)
# Get all period records from database
session = DBScopedSession()
period_records = db2.get_period_records(session)
DBScopedSession.remove()
data = []
for r in period_records:
param_list = [r.period_id, r.user_id, r.rid, r.num]
data.append((param_list, []))
requests = makeRequests(request_code, data)
[tp.putRequest(req) for req in requests]
tp.wait()
|
Answer Center Telephone Answering Service can Easily Assist Personal Injury Law Professionals with their Customer’s Service Requests.
Without a doubt, things happen that involve law enforcement at all hours of the day and night. Take advantage of Answer Center Answering Service for Personal Injury Offices 24-7 coverage to give your firm access to potential new clients when your office personnel are busy helping current clients or after regular office hours.
|
import sys, os
############################################
def barPlot(xdata,ydata,xtitle,ytitle,maintitle,outname,barcol='rgb(0,102,51)',plotcol='#e1e1ea',bgcol='#fffae6',w=1000,h=1000,bmar=150):
data = [go.Bar(x=xdata,y=ydata,marker=dict(color=barcol),opacity=0.6)];
layout = go.Layout(
autosize=False,
width=w,
height=h,
paper_bgcolor=bgcol,
plot_bgcolor=plotcol,
title=maintitle,
titlefont=dict(
family="Arial, sans-serif",
size=30,
),
xaxis=dict(
title=xtitle,
titlefont=dict(
family="Arial, sans-serif",
size=20,
color="#737373"
),
),
yaxis=dict(
title=ytitle,
titlefont=dict(
family="Arial, sans-serif",
size=20,
color="#737373"
)
)
);
fig = go.Figure(data=data, layout=layout);
plot(fig, filename=outname);
############################################
def scatterPlot(xdata,ydata,xtitle,ytitle,maintitle,outname,barcol='rgb(0,102,51)',plotcol='#e1e1ea',bgcol='#fffae6',w=1000,h=500,bmar=150):
data = [go.Scatter(x=xdata,y=ydata,mode='markers',opacity=0.6)];
layout = go.Layout(
autosize=False,
width=w,
height=h,
margin=go.Margin(
l=70,
r=20,
b=150,
t=70,
pad=0
),
paper_bgcolor=bgcol,
plot_bgcolor=plotcol,
title=maintitle,
titlefont=dict(
family="Arial, sans-serif",
size=30,
),
xaxis=dict(
title=xtitle,
titlefont=dict(
family="Arial, sans-serif",
size=20,
color="#737373",
),
tickangle=90
),
yaxis=dict(
title=ytitle,
titlefont=dict(
family="Arial, sans-serif",
size=20,
color="#737373"
)
)
);
fig = go.Figure(data=data, layout=layout);
plot(fig, filename=outname);
############################################
if len(sys.argv) != 3 or "-h" in sys.argv:
print("\n# This is a beta version of this script and may be buggy.")
print("# Usage: grampa_plot.py [input file] [output file]");
print("# ---> [input file] must be a grampa output (_out.txt) file.")
print("# ---> [output file] will be an html file with your plot.\n")
sys.exit();
infilename = sys.argv[1];
outfilename = sys.argv[2];
if outfilename[len(outfilename)-5:] != ".html":
outfilename += ".html";
try:
from plotly.offline import plot
import plotly.graph_objs as go
import plotly.plotly as py
except:
sys.exit("Missing some of the required modules (plotly)")
# Option parsing and import of plot libraries if no errors.
score_dict = {};
for line in open(infilename):
if line[0] == "#" or "The" in line or "Score" in line:
continue;
line = line.strip().split("\t");
if line[0] == "ST":
score_dict[line[0]] = int(line[3]);
else:
score_dict[line[1] + "-" + line[2]] = int(line[4]);
sorted_keys = sorted(score_dict, key=score_dict.get)
sorted_vals = [];
max_len = -999;
for key in sorted_keys:
sorted_vals.append(score_dict[key]);
if len(key) > max_len:
max_len = len(key);
bot_margin = max_len * 15;
scatterPlot(sorted_keys,sorted_vals,"H1-H2 Node", "Score", "GRAMPA Results: " + infilename, outfilename, bmar=bot_margin);
|
Wandering Ember's Journey takes place in front of the territory in depth, with the only flame shall be light from Ember. Using only the light, you move the values by solving puzzles.
Lacking in adding acquisitions. Pay once and play.
On our site you can easily download Ember's Journey .apk! All without registration and send SMS!
|
#!/usr/bin/env python
"""
This file is part of open-ihm.
open-ihm is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
open-ihm is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with open-ihm. If not, see <http://www.gnu.org/licenses/>.
"""
from database import Database
from householdmember_manager import HouseholdMemberManager
from householdasset_manager import HouseholdAssetManager
from householdincome_crop_manager import HouseholdCropIncomeManager
from householdincome_livestock_manager import HouseholdLivestockIncomeManager
from householdincome_wildfoods_manager import HouseholdWildfoodsIncomeManager
from householdincome_transfers_manager import HouseholdTransfersIncomeManager
from householdincome_employment_manager import HouseholdEmploymentIncomeManager
from householdcharacteristicmanager import HouseholdCharacteristicManager
class Household(HouseholdMemberManager, HouseholdCharacteristicManager, HouseholdAssetManager, HouseholdCropIncomeManager, HouseholdLivestockIncomeManager, HouseholdWildfoodsIncomeManager, HouseholdTransfersIncomeManager, HouseholdEmploymentIncomeManager):
def __init__(self, pid, hhid=0, householdname="", dateofcollection=""):
self.pid = pid
self.hhid = hhid
if ( householdname == "" and dateofcollection== "" ):
if ( not self.getHouseholdDetails() ):
self.householdname = ""
else:
self.setData(householdname, dateofcollection)
def getHouseholdDetails(self):
database = Database()
database.open()
query = "SELECT householdname, dateofcollection FROM households WHERE pid=%s AND hhid=%s " % ( self.pid, self.hhid )
rows = database.execSelectQuery( query )
num = len(rows)
if (num != 0):
exists = True
for row in rows:
self.householdname = row[0]
self.dateofcollection = row[1]
else:
exists = False
database.close()
return exists
def setData(self, householdname, dateofcollection):
database = Database()
database.open()
query = '''INSERT INTO households(hhid,pid,dateofcollection,householdname)
VALUES(%s,%s, '%s', '%s')''' % (self.hhid, self.pid, dateofcollection, householdname)
# execute query
database.execUpdateQuery( query )
database.close()
# update household attributes
self.householdname = householdname
self.dateofcollection = dateofcollection
def editData(self, hhid, householdname, dateofcollection):
database = Database()
database.open()
query = '''UPDATE households SET hhid=%s, dateofcollection='%s', householdname='%s'
WHERE hhid=%s AND pid=%s''' % (hhid, dateofcollection, householdname, self.hhid, self.pid)
# execute query
database.execUpdateQuery( query )
database.close()
# update household attributes
self.hhid = hhid
self.householdname = householdname
self.dateofcollection = dateofcollection
def getProjectID(self):
return self.pid
def getHouseholdID(self):
return self.hhid
def getHouseholdName(self):
return self.householdname
def getDateOfCollection(self):
return self.dateofcollection
|
Laser detect. Acupuncture electronic. Factory price. -35 degree +80 degree. Russian / bibi. Lanuage: Radar charger. Display size: Car for laser. Measuring voltage range: Brake fluid test pen. Wholesale rotation infrared. Generator parts & accessories products related searches: Display screen: Radar detector car style. Vodool. Lnb band. Wholesale gas sensor tgs2442. Ligating.
Usage 2: Length: Dvr radar gps. Domens. Voice turn on/off. Gdr-570 detector. Strelka detection:1.2 m3/h it can custom made. Measuring range: HalojajuBlack/ silver. Dose equivalent rate: Applicable models: Detector voice. Obdii protocol detector. 0.00μsv ~ 999.9sv. Fishing alarm. 360 .bundles. Small.
5.5inch. Special function: Laser alarm carRussian,chinese (simplified),english. F4h7a6_1-f4h7a6_3. Wholesale radar detector 360. 3 in 1 car detector. Hr-v8. Quidux. Car model: 0.1kg. Colors option:Viewing angel: Plastic. Platinum resistance. Time&date display,cycle recording,g-sensor,cyclic recording,night vision. English / russian voice prompt. Tmps. Shimano slx.
|
"""whsales URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from whsales import views
urlpatterns = [
url(r'^$', views.listings_panel, name='listings_panel'),
url(r'^admin/', admin.site.urls),
url(r'^list$', views.listings_list, name='listings_list'),
url(r'^list/me$', views.my_listings, name='user_listings'),
url(r'^listing/(\d*)$', views.listing_view, name='listing_view'),
url(r'^listing/(\d*)/sell$', views.mark_sold, name='mark_sold'),
url(r'^listing/(\d*)/delete$', views.delete_listing, name='delete_listing'),
url(r'^sold$', views.listings_sold, name='listings_sold'),
url(r'^tokens$', views.select_token, name='select_token'),
url(r'^tokens/add$', views.add_token, name='add_token'),
url(r'^tokens/(\d*)/post$', views.post_listing, name='add_listing'),
url(r'^search$', views.search, name='search'),
url(r'^about$', views.about, name='about'),
url(r'^wanted$', views.wanted_panel, name='wanted_panel'),
url(r'^wanted/add$', views.add_wanted, name='add_wanted'),
url(r'^wanted/list$', views.wanted_list, name='wanted_list'),
url(r'^wanted/list/me$', views.my_wanted, name='user_wanted'),
url(r'^wanted/(\d*)$', views.wanted_view, name='wanted_view'),
url(r'^wanted/(\d*)/fulfill$', views.fulfill_wanted, name='mark_fulfilled'),
url(r'^wanted/(\d*)/delete$', views.delete_wanted, name='delete_wanted'),
url(r'^core/', include('singlecharcore.urls')),
]
|
Madeira Classic Rayon #40 - 5500YD / 5000M Large Cone - Color 1440 - Cobblestone. Madeira's Classic #40 is the standard size embroidery thread and is ideal for most designs. Made from 100% viscose rayon, it is perfectly suited for embroidery on high speed multi-head machines. It is known worldwide for its high tensile strength and glossy luster.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from numpy import arange, sin, pi
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.figure import Figure
import sys
import wx
class CanvasFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, -1, 'CanvasFrame', size=(550, 350))
color = wx.Colour("WHITE")
self.SetBackgroundColour(color)
self.figure = Figure()
self.axes = self.figure.add_subplot(111)
t = arange(0.0, 3.0, 0.01)
s = sin(2 * pi * t)
self.axes.plot(t, s)
self.canvas = FigureCanvas(self, -1, self.figure)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
self.SetSizerAndFit(self.sizer)
self.add_toolbar()
def add_toolbar(self):
self.toolbar = NavigationToolbar2Wx(self.canvas)
self.toolbar.Realize()
if wx.Platform == '__WXMAC__':
self.SetToolBar(self.toolbar)
else:
tw, th = self.toolbar.GetSize()
fw, fh = self.canvas.GetSize()
self.toolbar.SetSize(wx.Size(fw, th))
self.sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND)
self.toolbar.update()
def OnPaint(self, event):
self.canvas.draw()
class App(wx.App):
def OnInit(self):
'''Create the main window and insert the custom frame'''
frame = CanvasFrame()
frame.Show(True)
return True
app = App(0)
app.MainLoop()
|
Give the gift of choice with a Pea in a Pod eGift Card.
Simply select the card amount and you will receive an automatic email upon payment that includes a unique voucher code and value.
Note, you must forward the email to your loved one as currently, we can not send the voucher to the recipient's email address.
eGift Cards have no expiry date and can be used either online and in our Collingwood Store.
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'UserLog'
db.delete_table(u'phonelog_userlog')
# Deleting model 'Log'
db.delete_table(u'phonelog_log')
# Adding model 'UserEntry'
db.create_table(u'phonelog_userentry', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('xform_id', self.gf('django.db.models.fields.CharField')(max_length=50, db_index=True)),
('i', self.gf('django.db.models.fields.IntegerField')()),
('user_id', self.gf('django.db.models.fields.CharField')(max_length=50)),
('sync_token', self.gf('django.db.models.fields.CharField')(max_length=50)),
('username', self.gf('django.db.models.fields.CharField')(max_length=100, db_index=True)),
))
db.send_create_signal(u'phonelog', ['UserEntry'])
# Adding unique constraint on 'UserEntry', fields ['xform_id', 'i']
db.create_unique(u'phonelog_userentry', ['xform_id', 'i'])
# Adding model 'DeviceReportEntry'
db.create_table(u'phonelog_devicereportentry', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('xform_id', self.gf('django.db.models.fields.CharField')(max_length=50)),
('i', self.gf('django.db.models.fields.IntegerField')()),
('msg', self.gf('django.db.models.fields.TextField')()),
('type', self.gf('django.db.models.fields.CharField')(max_length=32, db_index=True)),
('date', self.gf('django.db.models.fields.DateTimeField')(db_index=True)),
('domain', self.gf('django.db.models.fields.CharField')(max_length=100, db_index=True)),
('device_id', self.gf('django.db.models.fields.CharField')(max_length=50, db_index=True)),
('app_version', self.gf('django.db.models.fields.TextField')()),
('username', self.gf('django.db.models.fields.CharField')(max_length=100, db_index=True)),
))
db.send_create_signal(u'phonelog', ['DeviceReportEntry'])
# Adding unique constraint on 'DeviceReportEntry', fields ['xform_id', 'i']
db.create_unique(u'phonelog_devicereportentry', ['xform_id', 'i'])
def backwards(self, orm):
# Removing unique constraint on 'DeviceReportEntry', fields ['xform_id', 'i']
db.delete_unique(u'phonelog_devicereportentry', ['xform_id', 'i'])
# Removing unique constraint on 'UserEntry', fields ['xform_id', 'i']
db.delete_unique(u'phonelog_userentry', ['xform_id', 'i'])
# Adding model 'UserLog'
db.create_table(u'phonelog_userlog', (
('username', self.gf('django.db.models.fields.CharField')(max_length=100, db_index=True)),
('xform_id', self.gf('django.db.models.fields.CharField')(max_length=50, db_index=True)),
('user_id', self.gf('django.db.models.fields.CharField')(max_length=50)),
('sync_token', self.gf('django.db.models.fields.CharField')(max_length=50)),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal(u'phonelog', ['UserLog'])
# Adding model 'Log'
db.create_table(u'phonelog_log', (
('username', self.gf('django.db.models.fields.CharField')(max_length=100, db_index=True)),
('msg', self.gf('django.db.models.fields.TextField')()),
('domain', self.gf('django.db.models.fields.CharField')(max_length=100, db_index=True)),
('date', self.gf('django.db.models.fields.DateTimeField')(db_index=True)),
('xform_id', self.gf('django.db.models.fields.CharField')(max_length=50, db_index=True)),
('app_version', self.gf('django.db.models.fields.TextField')()),
('type', self.gf('django.db.models.fields.CharField')(max_length=32, db_index=True)),
('id', self.gf('django.db.models.fields.CharField')(max_length=50, primary_key=True)),
('device_id', self.gf('django.db.models.fields.CharField')(max_length=50, db_index=True)),
))
db.send_create_signal(u'phonelog', ['Log'])
# Deleting model 'UserEntry'
db.delete_table(u'phonelog_userentry')
# Deleting model 'DeviceReportEntry'
db.delete_table(u'phonelog_devicereportentry')
models = {
u'phonelog.devicereportentry': {
'Meta': {'unique_together': "[('xform_id', 'i')]", 'object_name': 'DeviceReportEntry'},
'app_version': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'device_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'i': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'msg': ('django.db.models.fields.TextField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'xform_id': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'phonelog.userentry': {
'Meta': {'unique_together': "[('xform_id', 'i')]", 'object_name': 'UserEntry'},
'i': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sync_token': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'xform_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
}
}
complete_apps = ['phonelog']
|
Excellent dental care from the very beginning!
by: Nicole Valencia Jul 15, 2016General Dentistry, UncategorizedComments Off on Excellent dental care from the very beginning!
Dr Visger and his staff at Trail Ridge Dental tells us that pediatric oral care happens in two places: 1) in our office, and 2) with you at home. Sugary juices and snacks most often cause cavities in toddlers so parents need to counteract these munchies with good oral hygiene and preventive care.
Starting at twelve months – or generally with the emergence of the first tooth – the American Dental Association (ADA) recommends that children visit the dentist for “well baby” checkups. This not only ensures that your baby’s dental health is monitored from the beginning, but starts a lifelong familiarity with going to the dentist.
At home, make sure your child gently brushes their teeth twice a day. Pacifiers and thumb sucking should be discouraged, as these can lead to misaligned teeth that will require braces to correct at an older age.
Together we can make sure your child starts off with excellent dental care and on the right toddling foot! Call us today to make your baby’s first appointment.
Next Happy Birthday Prince George!
|
import sys, os, itertools, operator
import PIL.Image
import itertools
def grouper(iterable, n):
args = [iter(iterable)] * n
return itertools.izip(*args)
def from_file(filepath):
def check_world_file(filepath):
dir, filename = os.path.split(filepath)
filename, filetype = os.path.splitext(filename)
# find world file extension based on filetype
if filetype in ("tif","tiff","geotiff"):
ext = ".tfw"
elif filetype in ("jpg","jpeg"):
ext = ".jgw"
elif filetype == "png":
ext = ".pgw"
elif filetype == "bmp":
ext = ".bpw"
elif filetype == "gif":
ext = ".gfw"
else:
return None
worldfilepath = os.path.join(dir, filename, ext)
if os.path.lexists(worldfilepath):
worldfile = open(filepath, "r")
# note that the params are arranged slightly differently
# ...in the world file from the usual affine a,b,c,d,e,f
# ...so we have to rearrange their sequence later
# check out http://en.wikipedia.org/wiki/World_file
# ...very useful here and for affine transforms in general
xscale,yskew,xskew,yscale,xoff,yoff = worldfile.read()
return [xscale,yskew,xskew,yscale,xoff,yoff]
if filepath.lower().endswith((".asc",".ascii")):
tempfile = open(filepath,"r")
### Step 1: check header for file info
info = dict()
def _nextheader(headername=None, force2length=True):
"returns a two-list of headername and headervalue"
nextline = False
while not nextline:
nextline = tempfile.readline().strip()
nextline = nextline.split()
if force2length:
if len(nextline) != 2:
raise Exception("Each header line must contain exactly two elements")
if headername:
if nextline[0].lower() != headername:
raise Exception("The required headername was not found: %s instead of %s"%(nextline[0].lower(),headername))
return nextline
# dimensions
cols = int(_nextheader(headername="ncols")[1])
rows = int(_nextheader(headername="nrows")[1])
# x/y_orig
_next = _nextheader()
if _next[0].lower() in ("xllcenter","xllcorner"):
xorig = float(_next[1])
xorigtype = _next[0].lower()
_next = _nextheader()
if _next[0].lower() in ("yllcenter","yllcorner"):
yorig = float(_next[1])
yorigtype = _next[0].lower()
info["xy_cell"] = (0, rows)
info["xy_geo"] = (xorig, yorig)
if "corner" in xorigtype and "corner" in yorigtype:
info["cell_anchor"] = "sw"
elif "corner" in xorigtype:
info["cell_anchor"] = "w"
elif "corner" in yorigtype:
info["cell_anchor"] = "s"
else:
info["cell_anchor"] = "center"
# cellsize
cellsize = float(_nextheader(headername="cellsize")[1])
info["cellwidth"] = cellsize
info["cellheight"] = cellsize
# nodata
prevline = tempfile.tell()
_next = _nextheader(force2length=False)
if _next[0].lower() == "nodata_value":
nodata = float(_next[1])
else:
# nd header missing, so set to default and go back to previous header line
nodata = -9999.0
tempfile.seek(prevline)
info["nodata_value"] = nodata
### Step 2: read data into lists
# make sure filereading is set to first data row (in case there are spaces or gaps in bw header and data)
nextline = False
while not nextline:
prevline = tempfile.tell()
nextline = tempfile.readline().strip()
tempfile.seek(prevline)
# collect flat list of cells instead of rows (bc data isn't necessarily organized into lines)
data = []
for line in tempfile.readlines():
data.extend(float(cell) for cell in line.split())
# reshape to correspond with columns-rows and flatten again
reshaped = itertools.izip(*grouper(data, cols))
data = [cell for row in reshaped for cell in row]
# load the data as an image
tempfile.close()
img = PIL.Image.new("F", (rows, cols))
img.putdata(data=data)
# create the cell access object
cells = img.load()
# make a single-grid tuple
grids = [(img,cells)]
### Step 3: Read coordinate ref system
# ascii doesnt have any crs so assume default
crs = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs"
return info, grids, crs
elif filepath.lower().endswith((".tif",".tiff",".geotiff")):
# for more info:
# http://gis.stackexchange.com/questions/16839/why-does-a-tif-file-lose-projection-information-when-a-pixel-value-is-changed
# https://mail.python.org/pipermail/image-sig/2001-March/001380.html
main_img = PIL.Image.open(filepath)
raw_info = dict(main_img.tag.items())
def process_info(raw_info):
# check tag definitions here
# http://www.digitalpreservation.gov/formats/content/tiff_tags.shtml
# http://duff.ess.washington.edu/data/raster/drg/docs/geotiff.txt
info = dict()
if raw_info.has_key(1025):
# GTRasterTypeGeoKey, aka midpoint pixels vs topleft area pixels
if raw_info.get(1025) == (1,):
# is area
info["cell_anchor"] = "center"
elif raw_info.get(1025) == (2,):
# is point
info["cell_anchor"] = "nw"
else:
# TODO: what would be default value?
pass
if raw_info.has_key(34264):
# ModelTransformationTag, aka 4x4 transform coeffs...
a,b,c,d,
e,f,g,h,
i,j,k,l,
m,n,o,p = raw_info.get(34264)
# But we don't want to meddle with 3-D transforms,
# ...so for now only get the 2-D affine parameters
xscale,xskew,xoff = a,b,d
yskew,yscale,yoff = e,f,h
info["transform_coeffs"] = xscale,xskew,xoff,yskew,yscale,yoff
else:
if raw_info.has_key(33922):
# ModelTiepointTag
x, y, z, geo_x, geo_y, geo_z = raw_info.get(33922)
info["xy_cell"] = x,y
info["xy_geo"] = geo_x,geo_y
if raw_info.has_key(33550):
# ModelPixelScaleTag
scalex,scaley,scalez = raw_info.get(33550)
info["cellwidth"] = scalex
info["cellheight"] = -scaley # note: cellheight must be inversed because geotiff has a reversed y-axis (ie 0,0 is in upperleft corner)
if raw_info.get(42113):
info["nodata_value"] = eval(raw_info.get(42113)) # eval from string to nr
return info
def read_crs(raw_info):
crs = dict()
if raw_info.get(34735):
# GeoKeyDirectoryTag
crs["proj_params"] = raw_info.get(34735)
if raw_info.get(34737):
# GeoAsciiParamsTag
crs["proj_name"] = raw_info.get(34737)
return crs
# read geotiff tags
info = process_info(raw_info)
# if no geotiff tag info look for world file transform coefficients
if len(info) <= 1 and not info.get("transform_coeffs"):
transform_coeffs = check_world_file(filepath)
if transform_coeffs:
# rearrange the param sequence to match affine transform
[xscale,yskew,xskew,yscale,xoff,yoff] = transform_coeffs
info["transform_coeffs"] = [xscale,xskew,xoff,yskew,yscale,yoff]
else:
raise Exception("Couldn't find any geotiff tags or world file needed to position the image in space")
# group image bands and pixel access into grid tuples
grids = []
for img in main_img.split():
cells = img.load()
grids.append((img,cells))
# read coordinate ref system
crs = read_crs(raw_info)
return info, grids, crs
elif filepath.lower().endswith((".jpg",".jpeg",".png",".bmp",".gif")):
# pure image, so only read if has a world file
transform_coeffs = check_world_file(filepath)
if transform_coeffs:
# rearrange the param sequence to match affine transform
[xscale,yskew,xskew,yscale,xoff,yoff] = transform_coeffs
info["transform_coeffs"] = [xscale,xskew,xoff,yskew,yscale,yoff]
# group image bands and pixel access into grid tuples
grids = []
for img in main_img.split():
cells = img.load()
grids.append((img,cells))
# read crs
# normal images have no crs, so just assume default crs
crs = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs"
return info, grids, crs
else:
raise Exception("Couldn't find the world file needed to position the image in space")
else:
raise Exception("Could not create a raster from the given filepath: the filetype extension is either missing or not supported")
def from_lists(data, nodata_value=-9999.0, cell_anchor="center", **geoargs):
pass
def from_image(image, nodata_value=-9999.0, cell_anchor="center", **geoargs):
size = image.size
print geoargs
info = dict([(key,val) for key,val in geoargs.iteritems()
if key in ("xy_cell","xy_geo","cellwidth",
"cellheight","transform_coeffs") ])
if len(info) <= 3 and not info.get("transform_coeffs"):
raise Exception("To make a new raster from scratch, you must specify either all of xy_cell, xy_geo, cellwidth, cellheight, or the transform coefficients")
info["nodata_value"] = nodata_value
info["cell_anchor"] = cell_anchor
crs = geoargs.get("crs", "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
grids = []
cells = image.load()
grids.append((image, cells))
return info, grids, crs
def new(width, height, nodata_value=-9999.0, bands=1, cell_anchor="center", **geoargs):
size = (width, height)
info = dict([(key,val) for key,val in geoargs.iteritems()
if key in ("xy_cell","xy_geo","cellwidth",
"cellheight","transform_coeffs") ])
if len(info) <= 3 and not info.get("transform_coeffs"):
raise Exception("To make a new raster from scratch, you must specify either all of xy_cell, xy_geo, cellwidth, cellheight, or the transform coefficients")
info["nodata_value"] = nodata_value
info["cell_anchor"] = cell_anchor
crs = geoargs.get("crs", "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
grids = []
for _ in range(bands):
img = PIL.Image.new("F", size, float(nodata_value))
cells = img.load()
grids.append((img, cells))
return info, grids, crs
|
Finding a taxi service that suits all your needs isn’t easy. You can book a taxi in 3 minutes. Travel worry-free, because using our private taxi service from Nuremberg to Spindleruv Mlyn is offered at fixed price. You will get all-inclusive rate confirmed before you book includes all taxes, tolls and fees. On most routes you can pay in cash or by card to the driver or you can prepay your transfer during the booking process.
Free cancellation or modification can be made up to 12 hours before the planned journey. We will take you from any address, any hotel in Nuremberg to any address, any hotel in Spindleruv Mlyn.
What is the cheapest way to get from Nuremberg to Spindleruv Mlyn?
The cheapest option to get from Nuremberg to Spindleruv Mlyn is to book a standard car or shuttle shared car.
How far is it from Nuremberg to Spindleruv Mlyn?
The drive from Nuremberg to Spindleruv Mlyn is 445 km.
How long does it take to get from Nuremberg to Spindleruv Mlyn?
It takes approximately 4 hours 50 minutes to get from Nuremberg to Spindleruv Mlyn.
It is better to book your car and driver from Nuremberg to Spindleruv Mlyn as soon as possible. The prices may increase closer to the departure date.
|
"""
Utility classes/functions for the XSS Linter.
"""
import re
def is_skip_dir(skip_dirs, directory):
"""
Determines whether a directory should be skipped or linted.
Arguments:
skip_dirs: The configured directories to be skipped.
directory: The current directory to be tested.
Returns:
True if the directory should be skipped, and False otherwise.
"""
for skip_dir in skip_dirs:
skip_dir_regex = re.compile(
"(.*/)*{}(/.*)*".format(re.escape(skip_dir)))
if skip_dir_regex.match(directory) is not None:
return True
return False
class StringLines(object):
"""
StringLines provides utility methods to work with a string in terms of
lines. As an example, it can convert an index into a line number or column
number (i.e. index into the line).
"""
def __init__(self, string):
"""
Init method.
Arguments:
string: The string to work with.
"""
self._string = string
self._line_start_indexes = self._process_line_breaks(string)
# this is an exclusive index used in the case that the template doesn't
# end with a new line
self.eof_index = len(string)
def _process_line_breaks(self, string):
"""
Creates a list, where each entry represents the index into the string
where the next line break was found.
Arguments:
string: The string in which to find line breaks.
Returns:
A list of indices into the string at which each line begins.
"""
line_start_indexes = [0]
index = 0
while True:
index = string.find('\n', index)
if index < 0:
break
index += 1
line_start_indexes.append(index)
return line_start_indexes
def get_string(self):
"""
Get the original string.
"""
return self._string
def index_to_line_number(self, index):
"""
Given an index, determines the line of the index.
Arguments:
index: The index into the original string for which we want to know
the line number
Returns:
The line number of the provided index.
"""
current_line_number = 0
for line_break_index in self._line_start_indexes:
if line_break_index <= index:
current_line_number += 1
else:
break
return current_line_number
def index_to_column_number(self, index):
"""
Gets the column (i.e. index into the line) for the given index into the
original string.
Arguments:
index: The index into the original string.
Returns:
The column (i.e. index into the line) for the given index into the
original string.
"""
start_index = self.index_to_line_start_index(index)
column = index - start_index + 1
return column
def index_to_line_start_index(self, index):
"""
Gets the index of the start of the line of the given index.
Arguments:
index: The index into the original string.
Returns:
The index of the start of the line of the given index.
"""
line_number = self.index_to_line_number(index)
return self.line_number_to_start_index(line_number)
def index_to_line_end_index(self, index):
"""
Gets the index of the end of the line of the given index.
Arguments:
index: The index into the original string.
Returns:
The index of the end of the line of the given index.
"""
line_number = self.index_to_line_number(index)
return self.line_number_to_end_index(line_number)
def line_number_to_start_index(self, line_number):
"""
Gets the starting index for the provided line number.
Arguments:
line_number: The line number of the line for which we want to find
the start index.
Returns:
The starting index for the provided line number.
"""
return self._line_start_indexes[line_number - 1]
def line_number_to_end_index(self, line_number):
"""
Gets the ending index for the provided line number.
Arguments:
line_number: The line number of the line for which we want to find
the end index.
Returns:
The ending index for the provided line number.
"""
if line_number < len(self._line_start_indexes):
return self._line_start_indexes[line_number]
else:
# an exclusive index in the case that the file didn't end with a
# newline.
return self.eof_index
def line_number_to_line(self, line_number):
"""
Gets the line of text designated by the provided line number.
Arguments:
line_number: The line number of the line we want to find.
Returns:
The line of text designated by the provided line number.
"""
start_index = self._line_start_indexes[line_number - 1]
if len(self._line_start_indexes) == line_number:
line = self._string[start_index:]
else:
end_index = self._line_start_indexes[line_number]
line = self._string[start_index:end_index - 1]
return line
def line_count(self):
"""
Gets the number of lines in the string.
"""
return len(self._line_start_indexes)
class ParseString(object):
"""
ParseString is the result of parsing a string out of a template.
A ParseString has the following attributes:
start_index: The index of the first quote, or None if none found
end_index: The index following the closing quote, or None if
unparseable
quote_length: The length of the quote. Could be 3 for a Python
triple quote. Or None if none found.
string: the text of the parsed string, or None if none found.
string_inner: the text inside the quotes of the parsed string, or None
if none found.
"""
def __init__(self, template, start_index, end_index):
"""
Init method.
Arguments:
template: The template to be searched.
start_index: The start index to search.
end_index: The end index to search before.
"""
self.end_index = None
self.quote_length = None
self.string = None
self.string_inner = None
self.start_index = self._find_string_start(template, start_index, end_index)
if self.start_index is not None:
result = self._parse_string(template, self.start_index)
if result is not None:
self.end_index = result['end_index']
self.quote_length = result['quote_length']
self.string = result['string']
self.string_inner = result['string_inner']
def _find_string_start(self, template, start_index, end_index):
"""
Finds the index of the end of start of a string. In other words, the
first single or double quote.
Arguments:
template: The template to be searched.
start_index: The start index to search.
end_index: The end index to search before.
Returns:
The start index of the first single or double quote, or None if no
quote was found.
"""
quote_regex = re.compile(r"""['"]""")
start_match = quote_regex.search(template, start_index, end_index)
if start_match is None:
return None
else:
return start_match.start()
def _parse_string(self, template, start_index):
"""
Finds the indices of a string inside a template.
Arguments:
template: The template to be searched.
start_index: The start index of the open quote.
Returns:
A dict containing the following, or None if not parseable:
end_index: The index following the closing quote
quote_length: The length of the quote. Could be 3 for a Python
triple quote.
string: the text of the parsed string
string_inner: the text inside the quotes of the parsed string
"""
quote = template[start_index]
if quote not in ["'", '"']:
raise ValueError("start_index must refer to a single or double quote.")
triple_quote = quote * 3
if template.startswith(triple_quote, start_index):
quote = triple_quote
next_start_index = start_index + len(quote)
while True:
quote_end_index = template.find(quote, next_start_index)
backslash_index = template.find("\\", next_start_index)
if quote_end_index < 0:
return None
if 0 <= backslash_index < quote_end_index:
next_start_index = backslash_index + 2
else:
end_index = quote_end_index + len(quote)
quote_length = len(quote)
string = template[start_index:end_index]
return {
'end_index': end_index,
'quote_length': quote_length,
'string': string,
'string_inner': string[quote_length:-quote_length],
}
class Expression(object):
"""
Represents an arbitrary expression.
An expression can be any type of code snippet. It will sometimes have a
starting and ending delimiter, but not always.
Here are some example expressions::
${x | n, decode.utf8}
<%= x %>
function(x)
"<p>" + message + "</p>"
Other details of note:
- Only a start_index is required for a valid expression.
- If end_index is None, it means we couldn't parse the rest of the
expression.
- All other details of the expression are optional, and are only added if
and when supplied and needed for additional checks. They are not necessary
for the final results output.
"""
def __init__(self, start_index, end_index=None, template=None, start_delim="", end_delim="", strings=None):
"""
Init method.
Arguments:
start_index: the starting index of the expression
end_index: the index immediately following the expression, or None
if the expression was unparseable
template: optional template code in which the expression was found
start_delim: optional starting delimiter of the expression
end_delim: optional ending delimeter of the expression
strings: optional list of ParseStrings
"""
self.start_index = start_index
self.end_index = end_index
self.start_delim = start_delim
self.end_delim = end_delim
self.strings = strings
if template is not None and self.end_index is not None:
self.expression = template[start_index:end_index]
self.expression_inner = self.expression[len(start_delim):-len(end_delim)].strip()
else:
self.expression = None
self.expression_inner = None
|
There is probably more controversy about the CE than any other aspect of stereology. This controversy has its origins by claims in early papers (for example Gundersen, 1986; Gundersen 1988) that it was necessary to count as few as 100 cells to accurately estimate a cell population. However, numerous papers and comprehensive computer simulations (for example Glaser, 1998; Schmitz, 2000) point to the need for caution in performing a minimum of work based on optimistic predictions of some theoretical CE methods. As mentioned earlier, all of the CE estimation methods are based on models and each method has conditions where the model is not met. The different CE methods and their associated formulas have been developed, based upon different assumptions and with different considerations taken into account, such as the shape of the region of interest, the distribution of objects within the region of interest, and the sampling criteria applied to the examination.
So, the question for the biologist is in light of the theoretical controversy over the CE is what to do? When counting cells using the Optical Fractionator or quantifying volume using the Cavalieri estimator, it is recommended to use either the ‘oversampling-subsampling’ approach or the ‘oversampling’ approach described on the page Study Design. The only arguments against using one of these approaches are that these may not be the most efficient approaches and may result in performing more work than necessary. Yet, many researchers believe that performing a little more work to ensure the accuracy of their experimental finds is a sacrifice well worth making. The amount of additional effort to use one of these approaches is small considering the time and energy to perform a complete experiment. Those researchers who consider the validity of their experimental findings paramount have little concern in justifying their thorough means.
For counting cells, it is recommended to use the Optical Fractionator method rather than the Nv Vref method whenever possible since computer simulations have also shown that the methods to predict the CE of estimated total numbers of cells obtained with the NV Vref method do not result in adequate predictions of CE.
For quantifying volume with the Cavalieri estimator, various methods have been developed to predict the CE. In a pilot computer simulation, Schmitz and colleagues obtained CE values with the method given in Equation 4 in Roberts et al. (Roberts 1994), which were precise predictions of the actual coefficient of variation when investigating regular, “quasi-ellipsoidal” objects (C. Schmitz, personal communication). In addition, the method described in García-Fiñana et al. is a refinement of Equation 4 of Roberts et al., taking irregular objects into consideration. Therefore, this method is likely a good choice for predicting the CV of any estimates of volumes obtained with the Cavalieri estimator, provided the area estimates are based on point counting.
Gundersen HJ, J Microsc 1986;143:3.
Gundersen HJ, Bagger P, Bendtsen TF, Evans SM, Korbo L, Marcussen N, Moller A, Nielsen K, Nyengaard JR, Pakkenberg B, et al., Acta Pathol Microbiol Immunol Scand 96:857.
Glaser EM, Wilson PD., J Microsc 1998;192:163.
Schmitz C, Hof PR., J Chem Neuroanat 2000;20:93.
Roberts N, Garden AS, Cruz-Orive LM, Whitehouse GH, Edwards RH., Br J Radiol 1994;67:1067.
Garcia-Finana M, Cruz-Orive LM, Mackay CE, Pakkenberg B, Roberts N., Neuroimage 2003;18:505.
|
# This happens when user clicks on the "Launch JLigand" button.
# It starts a jligand and puts it in the background.
#
def launch_jligand_function():
global jligand_jar
global jligand_home_env
global java_command
start_jligand_listener()
# maybe this should rather check PATH or similar!? FIXME
if not os.path.isfile(jligand_jar):
# Boo. Give us a warning dialog
#
s = "jligand java jar file: " + jligand_jar + " not found"
# make an extra message telling us that JLIGAND_HOME is
# not set if it is not set.
env_message = "Environment variable JLIGAND_HOME not set\n\n" \
if not jligand_home_env else ""
info_dialog(env_message + s)
else:
# OK, it does exist - run it!
#
java_exe = find_exe(java_command)
if not java_exe:
print "BL INFO:: no java found"
else:
# first check if we can run it with coot, i.e. is '-version'
# a valid command line arg
jligand_version = ["-jar", jligand_jar, "-version"]
cmd = java_exe + " " + \
string_append_with_spaces(jligand_version)
res = shell_command_to_string(cmd)
if (not res):
message = "Sorry, your JLigand:\n\n " + jligand_jar + "\n\n" + \
"is not new enough to work with Coot!\n" + \
"Please download a new one!"
info_dialog(message)
else:
run_concurrently(java_exe, jligand_args)
# beam in a new menu to the menu bar:
if (have_coot_python):
if coot_python.main_menubar():
jligand_menu = coot_menubar_menu("JLigand")
add_simple_coot_menu_menuitem(
jligand_menu, "Send Link to JLigand (click 2 monomers)",
lambda func: click_select_residues_for_jligand()
)
# This happens when user clicks on the "Select Residues for JLigand"
# (or some such) button. It expects the user to click on atoms of
# the two residues involved in the link.
#
def click_select_residues_for_jligand():
global imol_jligand_link
def link_em(*args):
print "we received these clicks", args
if (len(args) == 2):
click_1 = args[0]
click_2 = args[1]
print "click_1:", click_1
print "click_2:", click_2
if ((len(click_1) == 7)
and (len(click_2) ==7)):
resname_1 = residue_name(click_1[1],
click_1[2],
click_1[3],
click_1[4])
resname_2 = residue_name(click_2[1],
click_2[2],
click_2[3],
click_2[4])
imol_click_1 = click_1[1]
imol_click_2 = click_2[1]
chain_click_1 = click_1[2]
chain_click_2 = click_2[2]
resno_click_1 = click_1[3]
resno_click_2 = click_2[3]
if not (isinstance(resname_1, str) and
isinstance(resname_2, str)):
print "Bad resnames: %s and %s" %(resname_1, resname_2)
else:
if not (imol_click_1 == imol_click_2):
msg = "Two different molecules %s and %s selected.\n" \
%(imol_click_1, imol_click_2) + \
"Make sure to select residues in the same molecule."
info_dialog(msg)
imol_jligand_link = False
elif (chain_click_1 == chain_click_2 and
resno_click_1 == resno_click_2):
msg = "Same residue %s %s selected.\n" \
%(chain_click_1, resno_click_1) + \
"Make sure to select different residues."
info_dialog(msg)
imol_jligand_link = False
else:
# happy path
imol_jligand_link = imol_click_1
write_file_for_jligand(click2res_spec(click_1), resname_1,
click2res_spec(click_2), resname_2)
user_defined_click(2, link_em)
|
Discover Barcelona’s beautiful Borne, Raval, and harbor districts.
Part tuition, part tour: learn some expert Barcelona photo tips from a local guide.
Build up your Barcelona photo album with shots of modern and old buildings, as well as cool street scenes and urban art.
Local English speaking guide; Photography advice.
Available: Friday and Saturday; Start time at 10:30am.
Meeting Point: Plaça Comercial in front of El Born Centre Cultural.
Ending Point: Mercat de La Boquería.
Discover Barcelona life through a lens! On this truly unique Barcelona photography tour, you’ll learn how to creatively use your camera to capture the essence of this mind-blowing (and ever so photogenic) Spanish city, and communicate its spirit through your photos. Your expert local guide will take you through Barcelona’s gorgeous neighborhoods, giving you photo lessons along the way to snap the very best photos of the city’s magical streets and panoramas. Wanna learn how to photograph a city like a pro? This is the tour for you!
Your Barcelona photo tour will kick off in the ever-trendy Borne district, famed with the locals for its cool bars and terraces. Here we’ll take photos at all kinds of buildings, from modernist masterpieces at the Borne Cultural Centre, to one of Barcelona’s most popular Basilicas. As we continue to tour Barcelona’s Borne area, your local guide will also lead you to some of the district’s best urban street art, something you’ll definitely want to take photos of as a memento of your time in Barcelona.
Next on this Barcelona tour, we’ll head through Paseo de Colom of Port Vell, or the Old Port. Here we’ll climb up to higher ground so that we can take some epic photos of the harbor and city as a whole. You’ll have plenty of time up here to look out over the fishing pier and all the parts of the city you can see, and your expert guide will fill you in on some of the best angles to check out at this viewpoint.
We’ll continue on from here along the pier and past one of Barcelona’s most beautiful squares, where Barcelona’s famous Colom is located. Barcelona is the kind of place where there is something gorgeous to photograph around every corner, so as we wander, your local guide will be pointing out plenty of cool shots to take. With an expert by your side you’re sure to never miss that awesome piece of street art, flawless Gothic building, or snapshot of local life – the kinds of things you’ll definitely want in your BCN photo album!
We’ll head into the Raval district, an alternative neighborhood where all kinds of cultures are mixed, from student life to Barcelona’s buzzing immigrant population. We’ll walk by some legendary Gaudi architecture (because what Barcelona tour would be complete without it...), the city’s Film Library, and stroll down some of Raval’s most iconic streets, taking photos of the classic Barcelona buildings, tall and compact, and dotted with balconies.
Our next stop on this Barcelona photography tour will take a shift from the cozy streets and dive into market life in La Bouqueria. Here your senses will come alive as you’re surrounded by all kinds of smells, shapes, and colors. La Bouqueria is one of the best places in Barcelona to take photos, as it connects you to everyday local life in the city and will add a dash of color to your photo album. If you haven’t yet broken out the #nofilter, don’t be surprised if now’s the time.
This tour will leave you with so many memories of your time in Barcelona, and plenty of sharp photo skills to take with you on your future travels around the world!
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.addons.sale.tests.test_sale_common import TestSale
class TestSaleStock(TestSale):
def test_00_sale_stock_invoice(self):
"""
Test SO's changes when playing around with stock moves, quants, pack operations, pickings
and whatever other model there is in stock with "invoice on delivery" products
"""
inv_obj = self.env['account.invoice']
self.so = self.env['sale.order'].create({
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'order_line': [(0, 0, {'name': p.name, 'product_id': p.id, 'product_uom_qty': 2, 'product_uom': p.uom_id.id, 'price_unit': p.list_price}) for (_, p) in self.products.iteritems()],
'pricelist_id': self.env.ref('product.list0').id,
'picking_policy': 'direct',
})
# confirm our standard so, check the picking
self.so.action_confirm()
self.assertTrue(self.so.picking_ids, 'Sale Stock: no picking created for "invoice on delivery" stockable products')
# invoice on order
self.so.action_invoice_create()
# deliver partially, check the so's invoice_status and delivered quantities
self.assertEqual(self.so.invoice_status, 'no', 'Sale Stock: so invoice_status should be "nothing to invoice" after invoicing')
pick = self.so.picking_ids
pick.force_assign()
pick.pack_operation_product_ids.write({'qty_done': 1})
wiz_act = pick.do_new_transfer()
wiz = self.env[wiz_act['res_model']].browse(wiz_act['res_id'])
wiz.process()
self.assertEqual(self.so.invoice_status, 'to invoice', 'Sale Stock: so invoice_status should be "to invoice" after partial delivery')
del_qties = [sol.qty_delivered for sol in self.so.order_line]
del_qties_truth = [1.0 if sol.product_id.type in ['product', 'consu'] else 0.0 for sol in self.so.order_line]
self.assertEqual(del_qties, del_qties_truth, 'Sale Stock: delivered quantities are wrong after partial delivery')
# invoice on delivery: only stockable products
inv_id = self.so.action_invoice_create()
inv_1 = inv_obj.browse(inv_id)
self.assertTrue(all([il.product_id.invoice_policy == 'delivery' for il in inv_1.invoice_line_ids]),
'Sale Stock: invoice should only contain "invoice on delivery" products')
# complete the delivery and check invoice_status again
self.assertEqual(self.so.invoice_status, 'no',
'Sale Stock: so invoice_status should be "nothing to invoice" after partial delivery and invoicing')
self.assertEqual(len(self.so.picking_ids), 2, 'Sale Stock: number of pickings should be 2')
pick_2 = self.so.picking_ids[0]
pick_2.force_assign()
pick_2.pack_operation_product_ids.write({'qty_done': 1})
self.assertIsNone(pick_2.do_new_transfer(), 'Sale Stock: second picking should be final without need for a backorder')
self.assertEqual(self.so.invoice_status, 'to invoice', 'Sale Stock: so invoice_status should be "to invoice" after complete delivery')
del_qties = [sol.qty_delivered for sol in self.so.order_line]
del_qties_truth = [2.0 if sol.product_id.type in ['product', 'consu'] else 0.0 for sol in self.so.order_line]
self.assertEqual(del_qties, del_qties_truth, 'Sale Stock: delivered quantities are wrong after complete delivery')
# invoice on delivery
inv_id = self.so.action_invoice_create()
self.assertEqual(self.so.invoice_status, 'invoiced',
'Sale Stock: so invoice_status should be "fully invoiced" after complete delivery and invoicing')
def test_01_sale_stock_order(self):
"""
Test SO's changes when playing around with stock moves, quants, pack operations, pickings
and whatever other model there is in stock with "invoice on order" products
"""
# let's cheat and put all our products to "invoice on order"
self.so = self.env['sale.order'].create({
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'order_line': [(0, 0, {'name': p.name, 'product_id': p.id, 'product_uom_qty': 2, 'product_uom': p.uom_id.id, 'price_unit': p.list_price}) for (_, p) in self.products.iteritems()],
'pricelist_id': self.env.ref('product.list0').id,
'picking_policy': 'direct',
})
for sol in self.so.order_line:
sol.product_id.invoice_policy = 'order'
# confirm our standard so, check the picking
self.so.action_confirm()
self.assertTrue(self.so.picking_ids, 'Sale Stock: no picking created for "invoice on order" stockable products')
# let's do an invoice for a deposit of 5%
adv_wiz = self.env['sale.advance.payment.inv'].with_context(active_ids=[self.so.id]).create({
'advance_payment_method': 'percentage',
'amount': 5.0,
'product_id': self.env.ref('sale.advance_product_0').id,
})
act = adv_wiz.with_context(open_invoices=True).create_invoices()
inv = self.env['account.invoice'].browse(act['res_id'])
self.assertEqual(inv.amount_untaxed, self.so.amount_untaxed * 5.0 / 100.0, 'Sale Stock: deposit invoice is wrong')
self.assertEqual(self.so.invoice_status, 'to invoice', 'Sale Stock: so should be to invoice after invoicing deposit')
# invoice on order: everything should be invoiced
self.so.action_invoice_create(final=True)
self.assertEqual(self.so.invoice_status, 'invoiced', 'Sale Stock: so should be fully invoiced after second invoice')
# deliver, check the delivered quantities
pick = self.so.picking_ids
pick.force_assign()
pick.pack_operation_product_ids.write({'qty_done': 2})
self.assertIsNone(pick.do_new_transfer(), 'Sale Stock: complete delivery should not need a backorder')
del_qties = [sol.qty_delivered for sol in self.so.order_line]
del_qties_truth = [2.0 if sol.product_id.type in ['product', 'consu'] else 0.0 for sol in self.so.order_line]
self.assertEqual(del_qties, del_qties_truth, 'Sale Stock: delivered quantities are wrong after partial delivery')
# invoice on delivery: nothing to invoice
self.assertFalse(self.so.action_invoice_create(), 'Sale Stock: there should be nothing to invoice')
def test_02_sale_stock_return(self):
"""
Test a SO with a product invoiced on delivery. Deliver and invoice the SO, then do a return
of the picking. Check that a refund invoice is well generated.
"""
# intial so
self.partner = self.env.ref('base.res_partner_1')
self.product = self.env.ref('product.product_product_47')
so_vals = {
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'order_line': [(0, 0, {
'name': self.product.name,
'product_id': self.product.id,
'product_uom_qty': 5.0,
'product_uom': self.product.uom_id.id,
'price_unit': self.product.list_price})],
'pricelist_id': self.env.ref('product.list0').id,
}
self.so = self.env['sale.order'].create(so_vals)
# confirm our standard so, check the picking
self.so.action_confirm()
self.assertTrue(self.so.picking_ids, 'Sale Stock: no picking created for "invoice on delivery" stockable products')
# invoice in on delivery, nothing should be invoiced
self.assertEqual(self.so.invoice_status, 'no', 'Sale Stock: so invoice_status should be "nothing to invoice"')
# deliver completely
pick = self.so.picking_ids
pick.force_assign()
pick.pack_operation_product_ids.write({'qty_done': 5})
pick.do_new_transfer()
# Check quantity delivered
del_qty = sum(sol.qty_delivered for sol in self.so.order_line)
self.assertEqual(del_qty, 5.0, 'Sale Stock: delivered quantity should be 5.0 after complete delivery')
# Check invoice
self.assertEqual(self.so.invoice_status, 'to invoice', 'Sale Stock: so invoice_status should be "to invoice" before invoicing')
inv_1_id = self.so.action_invoice_create()
self.assertEqual(self.so.invoice_status, 'invoiced', 'Sale Stock: so invoice_status should be "invoiced" after invoicing')
self.assertEqual(len(inv_1_id), 1, 'Sale Stock: only one invoice should be created')
self.inv_1 = self.env['account.invoice'].browse(inv_1_id)
self.assertEqual(self.inv_1.amount_untaxed, self.inv_1.amount_untaxed, 'Sale Stock: amount in SO and invoice should be the same')
# Create return picking
StockReturnPicking = self.env['stock.return.picking']
default_data = StockReturnPicking.with_context(active_ids=pick.ids, active_id=pick.ids[0]).default_get(['move_dest_exists', 'original_location_id', 'product_return_moves', 'parent_location_id', 'location_id'])
return_wiz = StockReturnPicking.with_context(active_ids=pick.ids, active_id=pick.ids[0]).create(default_data)
res = return_wiz.create_returns()
return_pick = self.env['stock.picking'].browse(res['res_id'])
# Validate picking
return_pick.force_assign()
return_pick.pack_operation_product_ids.write({'qty_done': 5})
return_pick.do_new_transfer()
# Check invoice
self.assertEqual(self.so.invoice_status, 'to invoice', 'Sale Stock: so invoice_status should be "to invoice" before invoicing')
# let's do an invoice with refunds
adv_wiz = self.env['sale.advance.payment.inv'].with_context(active_ids=[self.so.id]).create({
'advance_payment_method': 'all',
})
adv_wiz.with_context(open_invoices=True).create_invoices()
self.inv_2 = self.so.invoice_ids[1]
self.assertEqual(self.so.invoice_status, 'no', 'Sale Stock: so invoice_status should be "no" after invoicing the return')
self.assertEqual(self.inv_2.amount_untaxed, self.inv_2.amount_untaxed, 'Sale Stock: amount in SO and invoice should be the same')
|
Family Group Sheet Project Forms available for submission onsite and includes all of Michigan. Sheets are coming in, search them or check them out by county, or surname.
This site is dedicated to genealogy and features sites to help you with your research - especially Michigan records.
We have added some new features and undone some of the old that were not up to standard. It will feature my "special" projects on the internet in one place.
MFHN is a state site for doing genealogy in Michigan. It features free databases of records that have been put online for you use. It presently has over 180,000 Michigan records thanks to transcribers and individual visitors. Use the top menu bar to navigate and find your areas of interest.
MIGenWeb County pages. Gratiot Co., Houghton Co., Keweenaw Co., The Ross' Cornish to Michigan and the Downward Bound sites are on this server.
Also on this site from time to time I will be testing various programming and databases. Please take a look.
FAMILY - My personal family files. Many Michigan names are in these files. The various lines started coming into the state back in the mid-1800's. From Poland, Germany, Cornwall, Scotland, and eastern US.
This site is now hosted on Crystal Tech WebHosting, Inc. as of March 19, 2001.
|
from re import compile
from json import dumps
from flask import request, jsonify
from functools import wraps
from models import Service
from shared import zmq_relay_socket
uuid = compile(r'^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$')
service = compile(r'^[a-zA-Z0-9]{4}-[a-zA-Z0-9]{6}-[a-zA-Z0-9]{12}-[a-zA-Z0-9]{5}-[a-zA-Z0-9]{9}$')
is_uuid = lambda s: uuid.match(s) is not None
is_service = lambda s: service.match(s) is not None
is_secret = lambda s: compile(r'^[a-zA-Z0-9]{32}$').match(s) is not None
QUERY_ACTION_NEW_MESSAGE = 0
QUERY_UPDATE_LISTEN = 1
class Error(object):
@staticmethod
def _e(message, error_code, http_status):
return (dumps({'error': {'message': message, 'id': error_code}}), http_status)
NONE = (dumps({'status': 'ok'}), 200) # OK
INVALID_CLIENT = _e.__func__('Invalid client uuid', 1, 400) # Bad request
INVALID_SERVICE = _e.__func__('Invalid service', 2, 400) # - || -
INVALID_SECRET = _e.__func__('Invalid secret', 3, 400) # - || -
DUPLICATE_LISTEN = _e.__func__('Already subscribed to that service', 4, 409) # Conflict
RATE_TOOFAST = _e.__func__('Whoaw there cowboy, slow down!', 5, 429) # Too many requests
SERVICE_NOTFOUND = _e.__func__('Service not found', 6, 404)
INVALID_PUBKEY = _e.__func__('Invalid public key supplied. Please send a DER formatted base64 encoded key.', 8, 400) # Bad request
CONNECTION_CLOSING = _e.__func__('Connection closing', 9, 499) # Client closed request
NO_CHANGES = _e.__func__('No changes were made', 10, 400) # Bad request
NOT_SUBSCRIBED = _e.__func__('Not subscribed to that service', 11, 409) # Conflict
@staticmethod
def ARGUMENT_MISSING(arg):
return Error._e('Missing argument {}'.format(arg), 7, 400) # Bad request
def has_uuid(f):
@wraps(f)
def df(*args, **kwargs):
client = request.form.get('uuid', '') or request.args.get('uuid', '')
if not client:
return Error.ARGUMENT_MISSING('uuid')
if not is_uuid(client):
return Error.INVALID_CLIENT
return f(*args, client=client, **kwargs)
return df
def has_service(f):
@wraps(f)
def df(*args, **kwargs):
service = request.form.get('service', '') or request.args.get('service', '')
if not service:
return Error.ARGUMENT_MISSING('service')
if not is_service(service):
return Error.INVALID_SERVICE
srv = Service.query.filter_by(public=service).first()
if not srv:
return Error.SERVICE_NOTFOUND
return f(*args, service=srv, **kwargs)
return df
def has_secret(f):
@wraps(f)
def df(*args, **kwargs):
secret = request.form.get('secret', '') or request.args.get('secret', '')
if not secret:
return Error.ARGUMENT_MISSING('secret')
if not is_secret(secret):
return Error.INVALID_SECRET
srv = Service.query.filter_by(secret=secret).first()
if not srv:
return Error.SERVICE_NOTFOUND
return f(*args, service=srv, **kwargs)
return df
def queue_zmq_message(message):
zmq_relay_socket.send_string(message)
|
I came across this article about what is purported to be Britain’s oldest living tree. The tree is about 4 000 years old. I was going to read the article and then probably forget it. But I noted that it was a yew tree.
Yew trees are noted as being one of the best woods to make a bow from. From the title I gave, you are probably one step ahead of me and have already figured that this tree would have been around in the time of Robin Hood.
Now Sherwood Forest (Robin Hood’s stomping grounds) and Somerset (where this tree is to be found) are quite far away from each other by British standards. But I’ve heard rumours that the real, historical Robin Hood might have been well traveled enough to have traveled deep into continental Europe. So it is more than possible that Robin Hood or any of his merry men could have crafted a bow from this very tree. Indeed the act may have helped contribute to the tree’s huge age.
I live in southern Ontario and the oldest trees here are hardscrabble ones along the rocky Niagara Escarpment (yes the escarpment is what makes the famous falls). They, too, have lived for thousands of years. Indeed, the harsher living conditions are believed to have contributed to their longevity.
So would it be any surprise to a gardener, that pruning a plant or tree might add to its longevity. Removing one branch for a bow from this old yew tree might have been exactly like gardening and allowed the tree to live until our time. So making a bow from this tree could be like giving to the future.
How would Robin Hood or the merry men have stolen from the rich then?
Well back in Robin’s time many of the forests were protected by being royal forests. Commoners weren’t allowed to hunt in royal forests. Indeed they weren’t allowed to fell trees or carry weapons. So it is likely that making a bow out of part of a tree would be called stealing by the aristocrats.
So Robin Hood and his merry men might have stolen from the rich, via this Somerset tree, and helped it live to our time.
This entry was posted in Geography, History, Science, Wry and tagged 4 000 year old tree, Britain, Britain's oldest tree, commoners had few rights in royal forests, merry men, Niagara Escarpment, Ontario, pruning can help plants live better, Robin Hood, Robin Hood could have crafted a bow from this very tree, royal forests, Sherwood Forest, some hardscrabble trees are long lived, Somerset, stealing from the rich, yew tree, yew trees can make good bows. Bookmark the permalink.
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import ctypes.wintypes
import hashlib
import json
import os
import subprocess
import sys
BASEDIR = os.path.dirname(os.path.abspath(__file__))
GetFileAttributes = ctypes.windll.kernel32.GetFileAttributesW
GetFileAttributes.argtypes = (ctypes.wintypes.LPWSTR,)
GetFileAttributes.restype = ctypes.wintypes.DWORD
FILE_ATTRIBUTE_HIDDEN = 0x2
FILE_ATTRIBUTE_SYSTEM = 0x4
def IsHidden(file_path):
"""Returns whether the given |file_path| has the 'system' or 'hidden'
attribute set."""
p = GetFileAttributes(file_path)
assert p != 0xffffffff
return bool(p & (FILE_ATTRIBUTE_HIDDEN | FILE_ATTRIBUTE_SYSTEM))
def GetFileList(root):
"""Gets a normalized list of files under |root|."""
assert not os.path.isabs(root)
assert os.path.normpath(root) == root
file_list = []
for base, _, files in os.walk(root):
paths = [os.path.join(base, f) for f in files]
file_list.extend(x.lower() for x in paths if not IsHidden(x))
return sorted(file_list)
def MakeTimestampsFileName(root):
return os.path.join(root, '..', '.timestamps')
def CalculateHash(root):
"""Calculates the sha1 of the paths to all files in the given |root| and the
contents of those files, and returns as a hex string."""
file_list = GetFileList(root)
# Check whether we previously saved timestamps in $root/../.timestamps. If
# we didn't, or they don't match, then do the full calculation, otherwise
# return the saved value.
timestamps_file = MakeTimestampsFileName(root)
timestamps_data = {'files': [], 'sha1': ''}
if os.path.exists(timestamps_file):
with open(timestamps_file, 'rb') as f:
try:
timestamps_data = json.load(f)
except ValueError:
# json couldn't be loaded, empty data will force a re-hash.
pass
matches = len(file_list) == len(timestamps_data['files'])
if matches:
for disk, cached in zip(file_list, timestamps_data['files']):
if disk != cached[0] or os.stat(disk).st_mtime != cached[1]:
matches = False
break
if matches:
return timestamps_data['sha1']
digest = hashlib.sha1()
for path in file_list:
digest.update(path)
with open(path, 'rb') as f:
digest.update(f.read())
return digest.hexdigest()
def SaveTimestampsAndHash(root, sha1):
"""Save timestamps and the final hash to be able to early-out more quickly
next time."""
file_list = GetFileList(root)
timestamps_data = {
'files': [[f, os.stat(f).st_mtime] for f in file_list],
'sha1': sha1,
}
with open(MakeTimestampsFileName(root), 'wb') as f:
json.dump(timestamps_data, f)
def main():
if sys.platform not in ('win32', 'cygwin'):
return 0
if len(sys.argv) != 1:
print >> sys.stderr, 'Unexpected arguments.'
return 1
# Move to same location as .gclient. This is a no-op when run via gclient.
os.chdir(os.path.normpath(os.path.join(BASEDIR, '..\\..\\..\\..')))
toolchain_dir = 'src\\third_party\\win_toolchain'
target_dir = os.path.join(toolchain_dir, 'files')
sha1path = os.path.join(toolchain_dir, 'toolchain.sha1')
desired_hash = ''
if os.path.isfile(sha1path):
with open(sha1path, 'rb') as f:
desired_hash = f.read().strip()
# If the current hash doesn't match what we want in the file, nuke and pave.
# Typically this script is only run when the .sha1 one file is updated, but
# directly calling "gclient runhooks" will also run it, so we cache
# based on timestamps to make that case fast.
current_hash = CalculateHash(target_dir)
if current_hash != desired_hash:
print 'Windows toolchain out of date or doesn\'t exist, updating...'
if os.path.isdir(target_dir):
subprocess.check_call('rmdir /s/q "%s"' % target_dir, shell=True)
subprocess.check_call([
sys.executable,
'src\\tools\\win\\toolchain\\toolchain2013.py',
'--targetdir', target_dir])
current_hash = CalculateHash(target_dir)
if current_hash != desired_hash:
print >> sys.stderr, (
'Got wrong hash after pulling a new toolchain. '
'Wanted \'%s\', got \'%s\'.' % (
desired_hash, current_hash))
return 1
SaveTimestampsAndHash(target_dir, current_hash)
return 0
if __name__ == '__main__':
sys.exit(main())
|
Get a FREE Sub from Subway with this Subway coupon! This coupon is actually very unique in the sense that you can only get a FREE regular 6″ inch sub from Subway with the purchase of a 30 oz drink and bag of chips the day after any home game where the Washington Redskins score 20 points or more. Interesting right? I don’t really see these type of “conditional” coupons often but let’s hope the Redskins win a bunch of games, or at least score 3 touchdowns, or a handful of field goals. Subway eat fresh as the official training restaurant of the Washington Redskins. Coupon must be presented at any Subway restaurant in the greater Washington DC metro area to redeem offer. One coupon offer per person. It’s quite a coincidence that this coupon would be released shortly after the controversy regarding the Washington Redskins being a racially insensitive name for football team considering it’s derogatory reference to Native Americans by their skin color and considering America’s history with Native Americans. It’s a topic that really hits home with many people and makes us reconsider a lot of norms that could be offensive to people without even considering how they feel. The owner of the team has stated recently that the name will remain the same. I just hope that they respect the native American culture and celebrate it’s traditions rather than focus on (what many consider) the racial slur that defines the football team’s mascot. All in all, it’s important to remember that we all are united and let’s show that strength by scoring well on football games and treating the Washing residents to a nice FREE sub coupon this football season!
|
"""
All Seeing Eye
Oracle Client Install Helper!
Elliott Saille
12/3/13
"""
#Include only specific functions
from subprocess import call
from os import name
from os import system
from os import access
from os import R_OK
from os import W_OK
from os import makedirs
from os import path
from os import environ
from os import walk
from shutil import rmtree
from shutil import copy2
from sys import exit
from time import sleep
from subprocess import call
#Variables
tempDir = environ["TEMP"] + "/allSeeingEye"
tnsnamesTemp = tempDir + "/tnsnames.ora"
tnsnames = "C:/oracle/product/10.2.0/client/NETWORK/ADMIN/tnsnames.ora"
oraInstaller = "M:/INSTALL/Voyager8/10203_client_vista-win7"
installTemp = tempDir + "/oracle"
setup = installTemp + "/setup.exe"
setupOpts = "\"FROM_LOCATION=%CD%\stage\products.xml\" -responseFile \"%CD%\response\ExLibrisOracle.rsp\""
compatMode = "VISTASP2"
def compatabilityChange(path, mode="WINXPSP3", runasadmin=True, verbose=False):
"""
Borrowed from http://techdiary-viki.blogspot.com/2011/03/script-to-set-compatibility-mode-of.html
Change the compatibility mode of a windows EXE
Valid Compatibility modes are:
WIN95: Windows 95
WIN98: Windows 98 / Windows ME
WINXPSP2: Windows XP (Service Pack 2)
WINXPSP3: Windows XP (Service Pack 3)
VISTARTM: Windows Vista
VISTASP1: Windows Vista (Service Pack 1)
VISTASP2: Windows Vista (Service Pack 2)
WIN7RTM: Windows 7
WIN8RTM: Windows 8
"""
#Display path to file that will be changed
print("Processing path %s" % path)
files = []
for dirpath, dirnames, filenames in walk(path):
files.extend(filenames)
exec_files = filter(lambda x: x.endswith('.exe'), files)
if verbose:
print("%d files to process" % len(exec_files))
print("Setting mode to %s" % mode)
if runasadmin == True:
print("Program will run as Administrator")
for ef in exec_files:
if verbose:
print("Processing file %s" % path + '\\' + ef)
system('REG.EXE ADD "HKEY_CURRENT_USER\Software\Microsoft\Windows NT\CurrentVersion\AppCompatFlags\Layers" /v "%s" /t REG_SZ /d "%s" /f' % (ef, mode))
def confirm(prompt=None, resp=False):
"""
Prompts for yes or no response from the user. Returns True for yes and
False for no.
"resp" should be set to the default value assumed by the caller when
user simply types ENTER.
"""
#set default prompt if none set
if prompt is None:
prompt = "Confirm"
#Change the default response
if resp:
prompt = "%s [%s]|%s: " % (prompt, "y", "n")
else:
prompt = "%s [%s]|%s: " % (prompt, "n", "y")
#Check for user input
while True:
ans = input(prompt)
if not ans:
return resp
if ans not in ["y", "Y", "n", "N"]:
print("please enter y or n.")
continue
if ans == "y" or ans == "Y":
return True
if ans == "n" or ans == "N":
return False
def clear():
"""
Clears the screen
"""
system("cls")
def backup():
"""
Backs up current tnsnames if it exists
"""
clear()
print("Backing up current tnsnames.ora from:")
print(tnsnames)
#make sure we can access the file
if access(tnsnames, R_OK) == True:
try:
#Copy it to the Temp Dir
copy2(tnsnames, tnsnamesTemp)
#or throw error
except IOError as e:
print("\n")
print("({})".format(e))
print("\n")
confirm("Backup Failed!\nReturn to main menu?", True)
mainMenu()
#be happy
else:
print("\nBackup Complete!\n")
else:
clear()
print("Unable to access tnsnames.ora at:")
print(tnsnames)
confirm("Return To main Menu?", True)
mainMenu()
def download():
"""
Copies oracle installer from network share
"""
#Check if installer exists on share
if path.exists(oraInstaller):
try:
#Copy it local
system("xcopy" +" /I /S \""+ oraInstaller +"\" \""+ installTemp +"\"")
#Throw a useful error
except IOError as e:
print("\n")
print("({})".format(e))
print("\n")
confirm("Installation Failed!\nReturn to main menu?", True)
mainMenu()
#If no errors print happy message!
else:
print("\nInstaller Copied Successfully!\n")
#No installer :(
else:
confirm("\nInstaller does not exist on share!\nReturn to main menu?", True)
mainMenu()
#Check if installer has been downloaded
if path.exists(setup):
#Change compatibility mode
compatabilityChange(setup, compatMode, True, False)
#Or Fail!
else:
clear()
print("Could not find installer,\nnothing to set compatibility for!\n")
confirm("Return to main menu?", True)
mainMenu()
def install():
"""
Sets environment up to run the oracle installer
"""
clear()
print("Installing Oracle database client\n")
#Are you shure this is what you want to do?
if confirm("Continue Installation?", True) == False:
clear()
print("Installation aborted")
sleep(2)
mainMenu()
#Check if installer has already been downloaded this session
if path.exists(setup):
#Ask if you want to reuse downloaded installer and if not re-download
if confirm("Installer exists!\nUse downloaded installer?", True) == False:
clear()
print("Will re-download installer")
rmtree(installTemp)
download()
#If not download the installer
else:
download()
#Write some initial configuration stuff to the Registry
system("reg add HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\MSDTC\MTxOCI /v OracleOciLib /t REG_SZ /d oci.dll /f")
system("reg add HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\MSDTC\MTxOCI /v OracleSqlLib /t REG_SZ /d orasql10.dll /f")
system("reg add HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\MSDTC\MTxOCI /v OracleXaLib /t REG_SZ /d oraclient10.dll /f")
#Call the installer
call("%s" % setup + " " + setupOpts, shell=True)
confirm("Return To main Menu?", True)
mainMenu()
def tnsnames():
"""
Copy preconfigured tnsnames.ora to oracle install location
Will eventually include option to add custom entries to tnsnames
"""
def mainMenu():
"""
Display the Main Menu
"""
clear()
print("Oracle Installation and Configuration Helper")
print("\n")
print("1. Backup current tnsnames.ora")
print("2. Install Oracle 10g Client")
print("3. Create tnsnames.ora")
print("4. Add ODBC Configuration")
print("Q. Exit")
choise = input("Please Make a Selection: ")
if choise == "1":
backup()
elif choise == "2":
install()
elif choise == "3":
tnsnames()
elif choise == "4":
print("2")
elif choise == "Q" or choise == "q":
clear()
quit()
clear()
print("Please make a selection!")
confirm("Return To main Menu?", True)
mainMenu()
#Clean up and Create Temp Dir for session
if path.exists(tempDir):
print ("Old temp directory found at %s" % tempDir)
if confirm("Remove Temp Directory?", True) == True:
try:
rmtree(tempDir)
except IOError as e:
print("({})".format(e))
try:
makedirs(tempDir)
except IOError as e:
print("({})".format(e))
else:
exit("Will not remove Temp Directory! Please Manually delete directory %s!" % tempDir)
else:
try:
makedirs(tempDir)
except IOError as e:
print("({})".format(e))
#Do Stuff!
mainMenu()
|
Bayliner Buccaneer 250 Manual is good choice for you that looking for nice reading experience. We hope you glad to visit our website. Please read our description and our privacy and policy page.
Finally I get this ebook, thanks for all these Bayliner Buccaneer 250 Manual can get now!
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for variable store."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import numpy
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
class VariableScopeTest(test.TestCase):
def tearDown(self):
gc.collect()
# This will only contain uncollectable garbage, i.e. reference cycles
# involving objects with __del__ defined.
self.assertEqual(0, len(gc.garbage))
def testGetVar(self):
vs = variable_scope._get_default_variable_store()
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
self.assertEqual(v, v1)
@test_util.run_in_graph_and_eager_modes()
def testResource(self):
vs = variable_scope._get_default_variable_store()
v1 = vs.get_variable("v", [1], use_resource=True)
self.assertTrue(isinstance(v1, resource_variable_ops.ResourceVariable))
def testNameExists(self):
vs = variable_scope._get_default_variable_store()
# No check by default, so we can both create and get existing names.
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
self.assertEqual(v, v1)
# When reuse is False, we fail when variables are already there.
vs.get_variable("w", [1], reuse=False) # That's ok.
with self.assertRaises(ValueError):
vs.get_variable("v", [1], reuse=False) # That fails.
# When reuse is True, we fail when variables are new.
vs.get_variable("v", [1], reuse=True) # That's ok.
with self.assertRaises(ValueError):
vs.get_variable("u", [1], reuse=True) # That fails.
def testNamelessStore(self):
vs = variable_scope._get_default_variable_store()
vs.get_variable("v1", [2])
vs.get_variable("v2", [2])
expected_names = ["%s:0" % name for name in ["v1", "v2"]]
self.assertEqual(
set(expected_names), set([v.name for v in vs._vars.values()]))
@test_util.run_in_graph_and_eager_modes()
def testVarScopeInitializer(self):
init = init_ops.constant_initializer(0.3)
with variable_scope.variable_scope("tower0") as tower:
with variable_scope.variable_scope("foo", initializer=init):
v = variable_scope.get_variable("v", [])
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.3)
with variable_scope.variable_scope(tower, initializer=init):
w = variable_scope.get_variable("w", [])
self.evaluate(variables_lib.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), 0.3)
@test_util.run_in_graph_and_eager_modes()
def testVarScopeConstraint(self):
constraint = lambda x: 0. * x
with variable_scope.variable_scope("tower1") as tower:
with variable_scope.variable_scope("foo", constraint=constraint):
v = variable_scope.get_variable("v", [])
self.assertEqual(v.constraint, constraint)
with variable_scope.variable_scope(tower, constraint=constraint):
w = variable_scope.get_variable("w", [])
self.assertEqual(w.constraint, constraint)
@test_util.run_in_graph_and_eager_modes()
def testVarScopeDType(self):
with variable_scope.variable_scope("tower2") as tower:
with variable_scope.variable_scope("foo", dtype=dtypes.float16):
v = variable_scope.get_variable("v", [])
self.assertEqual(v.dtype.base_dtype, dtypes.float16)
with variable_scope.variable_scope(tower, dtype=dtypes.float16):
w = variable_scope.get_variable("w", [])
self.assertEqual(w.dtype.base_dtype, dtypes.float16)
def testEagerVaribleStore(self):
with context.eager_mode():
store = variable_scope.EagerVariableStore()
with store.as_default():
v = variable_scope.get_variable("v", shape=(), trainable=True)
w = variable_scope.get_variable("w", shape=(), trainable=False)
self.assertTrue(v in store.variables())
self.assertTrue(w in store.variables())
self.assertTrue(v in store.trainable_variables())
self.assertFalse(w in store.trainable_variables())
self.assertFalse(v in store.non_trainable_variables())
self.assertTrue(w in store.non_trainable_variables())
@test_util.run_in_graph_and_eager_modes()
def testInitFromNonTensorValue(self):
v = variable_scope.get_variable("v4", initializer=4, dtype=dtypes.int32)
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 4)
w = variable_scope.get_variable(
"w4", initializer=numpy.array([1, 2, 3]), dtype=dtypes.int64)
self.evaluate(variables_lib.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), [1, 2, 3])
if context.in_graph_mode():
with self.assertRaises(TypeError):
variable_scope.get_variable("x4", initializer={})
else:
with self.assertRaises(ValueError):
variable_scope.get_variable("x4", initializer={})
@test_util.run_in_graph_and_eager_modes()
def testInitFromNonInitializer(self):
# Test various dtypes with zeros initializer as following:
types = [
dtypes.int8, dtypes.uint8, dtypes.int16, dtypes.uint16, dtypes.int32,
dtypes.int64, dtypes.bool
]
# Use different variable_name to distinguish various dtypes
for (i, dtype) in enumerate(types):
x = variable_scope.get_variable(
name="xx%d" % i, shape=(3, 4), dtype=dtype)
y = variable_scope.get_variable(
name="yy%d" % i,
shape=(3, 4),
dtype=dtype,
initializer=init_ops.zeros_initializer(dtype=dtype))
self.evaluate(variables_lib.global_variables_initializer())
self.assertAllEqual(self.evaluate(x.value()), self.evaluate(y.value()))
# TODO(alive): support variable partitioning/caching in eager mode.
def testVarScopeCachingDevice(self):
with self.test_session():
caching_device = "/job:moo"
with variable_scope.variable_scope("tower"):
with variable_scope.variable_scope(
"caching", caching_device=caching_device):
v = variable_scope.get_variable("v", [])
self.assertTrue(v.value().device.startswith(caching_device))
with variable_scope.variable_scope("child"):
v2 = variable_scope.get_variable("v", [])
self.assertTrue(v2.value().device.startswith(caching_device))
with variable_scope.variable_scope("not_cached", caching_device=""):
v2_not_cached = variable_scope.get_variable("v", [])
self.assertFalse(v2_not_cached.value().device.startswith(
caching_device))
with variable_scope.variable_scope(
"not_cached_identity_device",
caching_device=lambda op: op.device):
v2_identity_device = variable_scope.get_variable("v", [])
self.assertFalse(v2_identity_device.value().device.startswith(
caching_device))
with variable_scope.variable_scope("we_will_do_it_live") as vs_live:
vs_live.set_caching_device("/job:live")
v_live = variable_scope.get_variable("v", [])
self.assertTrue(v_live.value().device.startswith("/job:live"))
v_tower = variable_scope.get_variable("v", [])
self.assertFalse(v_tower.value().device.startswith(caching_device))
@test_util.run_in_graph_and_eager_modes()
def testVarScopeRegularizer(self):
init = init_ops.constant_initializer(0.3)
def regularizer1(v):
return math_ops.reduce_mean(v) + 0.1
def regularizer2(v):
return math_ops.reduce_mean(v) + 0.2
with variable_scope.variable_scope(
"tower3", regularizer=regularizer1) as tower:
with variable_scope.variable_scope("foo", initializer=init):
v = variable_scope.get_variable("v", [])
self.evaluate(variables_lib.variables_initializer([v]))
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(1, len(losses))
self.assertAllClose(self.evaluate(losses[0]), 0.4)
with variable_scope.variable_scope(tower, initializer=init) as vs:
u = variable_scope.get_variable("u", [])
vs.set_regularizer(regularizer2)
w = variable_scope.get_variable("w", [])
# Next 3 variable not regularized to test disabling regularization.
x = variable_scope.get_variable(
"x", [], regularizer=variable_scope.no_regularizer)
with variable_scope.variable_scope(
"baz", regularizer=variable_scope.no_regularizer):
y = variable_scope.get_variable("y", [])
vs.set_regularizer(variable_scope.no_regularizer)
z = variable_scope.get_variable("z", [])
# Check results.
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(3, len(losses))
self.evaluate(variables_lib.variables_initializer([u, w, x, y, z]))
self.assertAllClose(self.evaluate(losses[0]), 0.4)
self.assertAllClose(self.evaluate(losses[1]), 0.4)
self.assertAllClose(self.evaluate(losses[2]), 0.5)
with variable_scope.variable_scope("foo", reuse=True):
# reuse=True is for now only supported when eager execution is disabled.
if context.in_graph_mode():
v = variable_scope.get_variable("v",
[]) # "v" is alredy there, reused
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(3, len(losses)) # No new loss added.
@test_util.run_in_graph_and_eager_modes()
def testInitializeFromValue(self):
init = constant_op.constant(0.1)
w = variable_scope.get_variable("v", initializer=init)
self.evaluate(variables_lib.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), 0.1)
with self.assertRaisesRegexp(ValueError, "shape"):
# We disallow explicit shape specification when initializer is constant.
variable_scope.get_variable("u", [1], initializer=init)
with variable_scope.variable_scope("foo", initializer=init):
# Constant initializer can be passed through scopes if needed.
v = variable_scope.get_variable("v")
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.1)
# Check that non-float32 initializer creates a non-float32 variable.
init = constant_op.constant(1, dtype=dtypes.int32)
t = variable_scope.get_variable("t", initializer=init)
self.assertEqual(t.dtype.base_dtype, dtypes.int32)
# Raise error if `initializer` dtype and `dtype` are not identical.
with self.assertRaisesRegexp(ValueError, "don't match"):
variable_scope.get_variable("s", initializer=init, dtype=dtypes.float64)
def testControlDeps(self):
with self.test_session() as sess:
v0 = variable_scope.get_variable(
"v0", [1], initializer=init_ops.constant_initializer(0))
with ops.control_dependencies([v0.value()]):
v1 = variable_scope.get_variable(
"v1", [1], initializer=init_ops.constant_initializer(1))
add = v1 + v0
# v0 should be uninitialized.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(v0)
# We should be able to initialize and run v1 without initializing
# v0, even if the variable was created with a control dep on v0.
sess.run(v1.initializer)
self.assertEqual(1, sess.run(v1))
# v0 should still be uninitialized.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(v0)
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(add)
# If we initialize v0 we should be able to run 'add'.
sess.run(v0.initializer)
sess.run(add)
def testControlFlow(self):
with self.test_session() as sess:
v0 = variable_scope.get_variable(
"v0", [], initializer=init_ops.constant_initializer(0))
var_dict = {}
# Call get_variable in each of the cond clauses.
def var_in_then_clause():
v1 = variable_scope.get_variable(
"v1", [1], initializer=init_ops.constant_initializer(1))
var_dict["v1"] = v1
return v1 + v0
def var_in_else_clause():
v2 = variable_scope.get_variable(
"v2", [1], initializer=init_ops.constant_initializer(2))
var_dict["v2"] = v2
return v2 + v0
add = control_flow_ops.cond(
math_ops.less(v0, 10), var_in_then_clause, var_in_else_clause)
v1 = var_dict["v1"]
v2 = var_dict["v2"]
# We should be able to initialize and run v1 and v2 without initializing
# v0, even if the variable was created with a control dep on v0.
sess.run(v1.initializer)
self.assertEqual([1], sess.run(v1))
sess.run(v2.initializer)
self.assertEqual([2], sess.run(v2))
# v0 should still be uninitialized.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(v0)
# We should not be able to run 'add' yet.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(add)
# If we initialize v0 we should be able to run 'add'.
sess.run(v0.initializer)
sess.run(add)
@test_util.run_in_graph_and_eager_modes()
def testGetVariableScope(self):
# Test the get_variable_scope() function and setting properties of result.
init = init_ops.constant_initializer(0.3)
with variable_scope.variable_scope("bar"):
new_init1 = variable_scope.get_variable_scope().initializer
self.assertEqual(new_init1, None)
# Check that we can set initializer like this.
variable_scope.get_variable_scope().set_initializer(init)
v = variable_scope.get_variable("v", [])
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.3)
if context.in_graph_mode():
# Check that we can set reuse.
variable_scope.get_variable_scope().reuse_variables()
with self.assertRaises(ValueError): # Fail, w does not exist yet.
variable_scope.get_variable("w", [1])
# Check that the set initializer goes away.
new_init = variable_scope.get_variable_scope().initializer
self.assertEqual(new_init, None)
@test_util.run_in_graph_and_eager_modes()
def testVarScope(self):
with variable_scope.variable_scope("tower4") as tower:
self.assertEqual(tower.name, "tower4")
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "tower4/scope/")
with variable_scope.variable_scope("tower5"):
with variable_scope.variable_scope("bar") as bar:
self.assertEqual(bar.name, "tower5/bar")
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "tower5/bar/scope/")
with variable_scope.variable_scope("tower6"):
with variable_scope.variable_scope(tower, reuse=True) as tower_shared:
self.assertEqual(tower_shared.name, "tower4")
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "tower6/tower4/scope/")
@test_util.run_in_graph_and_eager_modes()
def testVarScopeNameScope(self):
with ops.name_scope("testVarScopeNameScope1"):
with variable_scope.variable_scope("tower") as tower:
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope1/tower/scope2/")
if context.in_graph_mode():
with variable_scope.variable_scope(
tower): # Re-entering acts like another "tower".
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope1/tower_1/scope2/")
with variable_scope.variable_scope(
"tower"): # Re-entering by string acts the same.
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope1/tower_2/scope2/")
with ops.name_scope("testVarScopeNameScope2"):
with variable_scope.variable_scope("tower"):
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope2/tower/scope2/")
if context.in_graph_mode():
with variable_scope.variable_scope(tower):
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope2/tower_1/scope2/")
root_var_scope = variable_scope.get_variable_scope()
with ops.name_scope("testVarScopeNameScope3"):
with variable_scope.variable_scope(root_var_scope):
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope3/scope2/")
def testVarScopeOriginalNameScope(self):
with self.test_session():
with ops.name_scope("scope1"):
with variable_scope.variable_scope("tower") as tower:
self.assertEqual(tower.original_name_scope, "scope1/tower/")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower/scope2/")
with ops.name_scope("scope2"):
with variable_scope.variable_scope(tower) as tower1:
# Re-entering preserves original name scope.
self.assertEqual(tower1.original_name_scope, "scope1/tower/")
with ops.name_scope("foo") as sc2:
self.assertEqual(sc2, "scope2/tower/foo/")
# Test re-entering original name scope.
with ops.name_scope(tower.original_name_scope):
with ops.name_scope("bar") as sc3:
self.assertEqual(sc3, "scope1/tower/bar/")
with ops.name_scope("scope2"):
with variable_scope.variable_scope(tower):
with ops.name_scope(tower.original_name_scope):
with ops.name_scope("bar") as sc3:
self.assertEqual(sc3, "scope1/tower/bar_1/")
def testVarScopeObjectReuse(self):
with self.test_session():
vs = None
with variable_scope.variable_scope("jump", reuse=True) as scope:
vs = scope
with variable_scope.variable_scope(vs) as jump:
self.assertTrue(jump.reuse)
with variable_scope.variable_scope(vs, reuse=True) as jump_reuse:
self.assertTrue(jump_reuse.reuse)
with variable_scope.variable_scope(vs, reuse=False) as jump_no_reuse:
self.assertTrue(jump_no_reuse.reuse) # Inherited, cannot be undone.
with variable_scope.variable_scope("jump", reuse=False) as scope:
vs = scope
with variable_scope.variable_scope(vs) as jump:
self.assertFalse(jump.reuse)
with variable_scope.variable_scope(vs, reuse=True) as jump_reuse:
self.assertTrue(jump_reuse.reuse)
with variable_scope.variable_scope(vs, reuse=False) as jump_no_reuse:
self.assertFalse(jump_no_reuse.reuse)
def testVarScopeGetOrCreateReuse(self):
with self.test_session():
def test_value(value):
x = constant_op.constant(value)
with variable_scope.variable_scope("testVarScopeGetOrCreateReuse_bar",
reuse=variable_scope.AUTO_REUSE):
_ = state_ops.assign(variable_scope.get_variable("var", []), x)
with variable_scope.variable_scope("testVarScopeGetOrCreateReuse_bar",
reuse=variable_scope.AUTO_REUSE):
_ = variable_scope.get_variable("var", [])
self.assertEqual(value, x.eval())
test_value(42.) # Variable is created.
test_value(13.) # Variable is reused hereafter.
test_value(17.)
def testVarOpScope(self):
with self.test_session():
with ops.name_scope("testVarOpScope1"):
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "tower/w:0")
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope1/tower/testVarOpScope2/")
with variable_scope.variable_scope("tower", "default", []):
with self.assertRaises(ValueError):
variable_scope.get_variable("w", [])
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope1/tower_1/testVarOpScope2/")
with ops.name_scope("testVarOpScope2"):
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "default/w:0")
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope2/default/testVarOpScope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "default_1/w:0")
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope2/default_1/testVarOpScope2/")
def testVarOpScopeUniqueNamesInterleavedSubstringScopes(self):
with self.test_session():
with variable_scope.variable_scope(None, "defaultScope1"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope1/layer/w:0")
with variable_scope.variable_scope(None, "defaultScope1"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope1_1/layer/w:0")
with variable_scope.variable_scope(None, "defaultScope"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope/layer/w:0")
with variable_scope.variable_scope(None, "defaultScope1"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope1_2/layer/w:0")
def testVarOpScopeUniqueNamesWithJump(self):
with self.test_session():
with variable_scope.variable_scope("default") as default:
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"default/layer/w:0")
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"default/layer_1/w:0")
with variable_scope.variable_scope(default):
pass
# No matter the jump in the middle, unique numbering continues.
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"default/layer_2/w:0")
def testVarOpScopeReuse(self):
with self.test_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer, reuse=True) as outer:
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
def testVarScopeGetVar(self):
with self.test_session():
with variable_scope.variable_scope("root"):
with variable_scope.variable_scope("towerA") as tower_a:
va = variable_scope.get_variable("v", [1])
self.assertEqual(va.name, "root/towerA/v:0")
with variable_scope.variable_scope(tower_a, reuse=True):
va2 = variable_scope.get_variable("v", [1])
self.assertEqual(va2, va)
with variable_scope.variable_scope("towerB"):
vb = variable_scope.get_variable("v", [1])
self.assertEqual(vb.name, "root/towerB/v:0")
with self.assertRaises(ValueError):
with variable_scope.variable_scope("towerA"):
va2 = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope("towerA", reuse=True):
va2 = variable_scope.get_variable("v", [1])
self.assertEqual(va2, va)
with variable_scope.variable_scope("foo"):
with variable_scope.variable_scope("bar"):
v = variable_scope.get_variable("v", [1])
self.assertEqual(v.name, "root/foo/bar/v:0")
with variable_scope.variable_scope(tower_a, reuse=True):
va3 = variable_scope.get_variable("v", [1])
self.assertEqual(va, va3)
with self.assertRaises(ValueError):
with variable_scope.variable_scope(tower_a, reuse=True):
with variable_scope.variable_scope("baz"):
variable_scope.get_variable("v", [1])
with self.assertRaises(ValueError) as exc:
with variable_scope.variable_scope(tower_a, reuse=True):
variable_scope.get_variable("v", [2]) # Different shape.
self.assertEqual("shape" in str(exc.exception), True)
with self.assertRaises(ValueError) as exc:
with variable_scope.variable_scope(tower_a, reuse=True):
variable_scope.get_variable("v", [1], dtype=dtypes.int32)
self.assertEqual("dtype" in str(exc.exception), True)
def testVarScopeOuterScope(self):
with self.test_session():
with variable_scope.variable_scope("outer") as outer:
pass
with variable_scope.variable_scope(outer):
self.assertEqual(variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with variable_scope.variable_scope("default"):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
with variable_scope.variable_scope(outer, reuse=True):
self.assertEqual(variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/scope2/")
with variable_scope.variable_scope("default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/default/scope2/")
def testVarScopeNestedOuterScope(self):
with self.test_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope(outer):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer/scope2/")
with variable_scope.variable_scope("default"):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer, reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer_1/scope2/")
with variable_scope.variable_scope("default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default_1/scope2/")
def testVarOpScopeReuseParam(self):
with self.test_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer) as outer:
with variable_scope.variable_scope("tower", "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
outer.reuse_variables()
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
def testVarOpScopeReuseError(self):
with self.test_session():
with self.assertRaises(ValueError):
with variable_scope.variable_scope(None, "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
def testVarOpScopeOuterScope(self):
with self.test_session():
with variable_scope.variable_scope("outer") as outer:
pass
with variable_scope.variable_scope(outer, "default", []):
self.assertEqual(variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
with variable_scope.variable_scope(outer, "default", reuse=True):
self.assertEqual(variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/scope2/")
outer.reuse_variables()
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/default/scope2/")
def testVarOpScopeNestedOuterScope(self):
with self.test_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope(outer, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer, "default", reuse=True):
self.assertEqual(variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
def testBasicWhenAuxiliaryNameScopeIsFalse(self):
with self.test_session():
with variable_scope.variable_scope(
"scope", auxiliary_name_scope=False) as scope:
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(variable_scope.get_variable("w", []).name, "scope/w:0")
self.assertEqual(constant_op.constant([], name="c").name, "c:0")
with variable_scope.variable_scope(scope, auxiliary_name_scope=False):
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(
variable_scope.get_variable("w1", []).name, "scope/w1:0")
self.assertEqual(constant_op.constant([], name="c1").name, "c1:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("scope"):
self.assertEqual(constant_op.constant([], name="c").name, "scope/c:0")
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope(
"inner", auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "outer/")
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/inner/w:0")
self.assertEqual(constant_op.constant([], name="c").name, "outer/c:0")
with variable_scope.variable_scope(
inner, auxiliary_name_scope=False) as inner1:
self.assertEqual(inner1.original_name_scope, "outer/")
self.assertEqual(
variable_scope.get_variable("w1", []).name, "outer/inner/w1:0")
self.assertEqual(
constant_op.constant([], name="c1").name, "outer/c1:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("inner"):
self.assertEqual(
constant_op.constant([], name="c").name, "outer/inner/c:0")
def testCreatedByDefaultNameWhenAuxiliaryNameScopeIsFalse(self):
with self.test_session():
with variable_scope.variable_scope(
None, default_name="default", auxiliary_name_scope=False) as scope:
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(
variable_scope.get_variable("w", []).name, "default/w:0")
self.assertEqual(constant_op.constant([], name="c").name, "c:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("default"):
self.assertEqual(constant_op.constant([], name="c").name, "default/c:0")
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope(
None, default_name="default", auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "outer/")
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
self.assertEqual(constant_op.constant([], name="c").name, "outer/c:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("default"):
self.assertEqual(
constant_op.constant([], name="c").name, "outer/default/c:0")
def testReenterRootScopeWhenAuxiliaryNameScopeIsFalse(self):
with self.test_session():
root_scope = variable_scope.get_variable_scope()
with variable_scope.variable_scope(
root_scope, auxiliary_name_scope=False) as scope:
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(variable_scope.get_variable("w", []).name, "w:0")
self.assertEqual(constant_op.constant([], name="c").name, "c:0")
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope(
root_scope, auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "")
self.assertEqual(variable_scope.get_variable("w1", []).name, "w1:0")
self.assertEqual(
constant_op.constant([], name="c1").name, "outer/c1:0")
def testAuxiliaryNameScopeIsInvalid(self):
with self.test_session():
with self.assertRaisesRegexp(TypeError, "auxiliary_name_scope"):
with variable_scope.variable_scope(
None, default_name="scope", auxiliary_name_scope="invalid"):
pass
with self.assertRaisesRegexp(TypeError, "auxiliary_name_scope"):
with variable_scope.variable_scope(
"scope", auxiliary_name_scope="invalid"):
pass
with variable_scope.variable_scope("scope") as scope:
pass
with self.assertRaisesRegexp(TypeError, "auxiliary_name_scope"):
with variable_scope.variable_scope(
scope, auxiliary_name_scope="invalid"):
pass
def testReuseScopeWithoutNameScopeCollision(self):
# Github issue: #13429
with self.test_session():
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope("inner") as inner:
pass
with variable_scope.variable_scope(
inner, auxiliary_name_scope=False) as scope:
with ops.name_scope(scope.original_name_scope):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/inner/w:0")
self.assertEqual(
constant_op.constant([], name="c").name, "outer/inner/c:0")
with ops.name_scope("inner"):
self.assertEqual(constant_op.constant([], name="c").name, "inner/c:0")
with variable_scope.variable_scope("another"):
with variable_scope.variable_scope(
inner, auxiliary_name_scope=False) as scope1:
with ops.name_scope(scope1.original_name_scope):
self.assertEqual(
variable_scope.get_variable("w1", []).name, "outer/inner/w1:0")
self.assertEqual(
constant_op.constant([], name="c1").name, "outer/inner/c1:0")
with ops.name_scope("inner"):
self.assertEqual(
constant_op.constant([], name="c").name, "another/inner/c:0")
@test_util.run_in_graph_and_eager_modes()
def testGetLocalVar(self):
# Check that local variable respects naming.
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope(outer, "default", []):
local_var = variable_scope.get_local_variable(
"w", [], collections=["foo"])
self.assertEqual(local_var.name, "outer/w:0")
# Since variable is local, it should be in the local variable collection
# but not the trainable collection.
if context.in_graph_mode():
self.assertIn(local_var,
ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES))
self.assertIn(local_var, ops.get_collection("foo"))
self.assertNotIn(local_var,
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
# Check that local variable respects `reuse`.
if context.in_graph_mode():
with variable_scope.variable_scope(outer, "default", reuse=True):
self.assertEqual(
variable_scope.get_local_variable("w", []).name, "outer/w:0")
def testGetVarWithDevice(self):
g = ops.Graph()
varname_type = []
def device_func(op):
if op.type in ["Variable", "VariableV2", "VarHandleOp"]:
varname_type.append((op.name, op.get_attr("dtype")))
return "/device:GPU:0"
with g.as_default():
with ops.device(device_func):
_ = variable_scope.get_variable("x", (100, 200))
_ = variable_scope.get_variable(
"y", dtype=dtypes.int64, initializer=numpy.arange(73))
self.assertEqual(varname_type[0], ("x", dtypes.float32))
self.assertEqual(varname_type[1], ("y", dtypes.int64))
def testGetCollection(self):
with self.test_session():
_ = variable_scope.get_variable("testGetCollection_a", [])
_ = variable_scope.get_variable("testGetCollection_b", [],
trainable=False)
with variable_scope.variable_scope("testGetCollection_foo_") as scope1:
_ = variable_scope.get_variable("testGetCollection_a", [])
_ = variable_scope.get_variable("testGetCollection_b", [],
trainable=False)
self.assertEqual([
v.name
for v in scope1.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
], ["testGetCollection_foo_/testGetCollection_a:0"])
self.assertEqual([
v.name
for v in scope1.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], [
"testGetCollection_foo_/testGetCollection_a:0",
"testGetCollection_foo_/testGetCollection_b:0"
])
with variable_scope.variable_scope("testGetCollection_foo") as scope2:
_ = variable_scope.get_variable("testGetCollection_a", [])
_ = variable_scope.get_variable("testGetCollection_b", [],
trainable=False)
self.assertEqual([
v.name
for v in scope2.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
], ["testGetCollection_foo/testGetCollection_a:0"])
self.assertEqual([
v.name
for v in scope2.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], [
"testGetCollection_foo/testGetCollection_a:0",
"testGetCollection_foo/testGetCollection_b:0"
])
scope = variable_scope.get_variable_scope()
self.assertEqual([
v.name for v in scope.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], [
"testGetCollection_a:0", "testGetCollection_b:0",
"testGetCollection_foo_/testGetCollection_a:0",
"testGetCollection_foo_/testGetCollection_b:0",
"testGetCollection_foo/testGetCollection_a:0",
"testGetCollection_foo/testGetCollection_b:0"
])
self.assertEqual([
v.name
for v in scope.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
], [
"testGetCollection_a:0",
"testGetCollection_foo_/testGetCollection_a:0",
"testGetCollection_foo/testGetCollection_a:0"
])
def testGetTrainableVariables(self):
with self.test_session():
_ = variable_scope.get_variable("testGetTrainableVariables_a", [])
with variable_scope.variable_scope(
"testGetTrainableVariables_foo") as scope:
_ = variable_scope.get_variable("testGetTrainableVariables_b", [])
_ = variable_scope.get_variable("testGetTrainableVariables_c", [],
trainable=False)
self.assertEqual([v.name
for v in scope.trainable_variables()],
["testGetTrainableVariables_foo/"
"testGetTrainableVariables_b:0"])
def testGetGlobalVariables(self):
with self.test_session():
_ = variable_scope.get_variable("testGetGlobalVariables_a", [])
with variable_scope.variable_scope("testGetGlobalVariables_foo") as scope:
_ = variable_scope.get_variable("testGetGlobalVariables_b", [])
self.assertEqual([v.name
for v in scope.global_variables()],
["testGetGlobalVariables_foo/"
"testGetGlobalVariables_b:0"])
def testGetLocalVariables(self):
with self.test_session():
_ = variable_scope.get_variable(
"a", [], collections=[ops.GraphKeys.LOCAL_VARIABLES])
with variable_scope.variable_scope("foo") as scope:
_ = variable_scope.get_variable(
"b", [], collections=[ops.GraphKeys.LOCAL_VARIABLES])
_ = variable_scope.get_variable(
"c", [])
self.assertEqual([v.name
for v in scope.local_variables()], ["foo/b:0"])
def testGetVariableWithRefDtype(self):
v = variable_scope.get_variable("v", shape=[3, 4], dtype=dtypes.float32)
# Ensure it is possible to do get_variable with a _ref dtype passed in.
_ = variable_scope.get_variable("w", shape=[5, 6], dtype=v.dtype)
def axis0_into1_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
return part
def axis0_into2_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
part[0] = 2
return part
def axis0_into3_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
part[0] = 3
return part
class VariableScopeWithPartitioningTest(test.TestCase):
def testResultNameMatchesRequested(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=(3, 1, 1))
self.assertEqual(v.name, "scope0/name0")
v_concat = v.as_tensor()
self.assertEqual(v_concat.name, "scope0/name0:0")
variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertIn("scope0/name0/part_0:0", [x.name for x in variables])
self.assertIn("scope0/name0/part_1:0", [x.name for x in variables])
self.assertNotIn("scope0/name0/part_2:0", [x.name for x in variables])
def testBreaksIfPartitioningChanges(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into3_partitioner, reuse=True):
with self.assertRaisesRegexp(
ValueError,
"Trying to reuse partitioned variable .* but specified partitions .* "
"and found partitions .*"):
variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into1_partitioner, reuse=True):
with self.assertRaisesRegexp(
ValueError,
"Trying to reuse partitioned variable .* but specified partitions .* "
"and found partitions .*"):
variable_scope.get_variable("name0", shape=(3, 1, 1))
def testReturnsExistingConcatenatedValueIfReuse(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v_concat = variable_scope.get_variable("name0", shape=(3, 1, 1))
variable_scope.get_variable_scope().reuse_variables()
v_concat_2 = variable_scope.get_variable("name0", shape=(3, 1, 1))
self.assertEqual(v_concat, v_concat_2)
def testAllowsReuseWithoutPartitioner(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope("scope0", reuse=True):
v_reused = variable_scope.get_variable("name0")
self.assertEqual(v, v_reused)
def testPropagatePartitionerOnReopening(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner) as vs:
self.assertEqual(axis0_into2_partitioner, vs.partitioner)
with variable_scope.variable_scope(vs) as vs1:
self.assertEqual(axis0_into2_partitioner, vs1.partitioner)
def testScalarIgnoresPartitioner(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=())
self.assertEqual(v.name, "scope0/name0:0")
variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertIn("scope0/name0:0", [x.name for x in variables])
def _testPartitionConcatenatesAlongCorrectAxis(self, use_resource):
def _part_axis_0(**unused_kwargs):
return (2, 1, 1)
def _part_axis_1(**unused_kwargs):
return (1, 2, 1)
with variable_scope.variable_scope("root", use_resource=use_resource):
v0 = variable_scope.get_variable(
"n0", shape=(2, 2, 2), partitioner=_part_axis_0)
v1 = variable_scope.get_variable(
"n1", shape=(2, 2, 2), partitioner=_part_axis_1)
self.assertEqual(v0.get_shape(), (2, 2, 2))
self.assertEqual(v1.get_shape(), (2, 2, 2))
n0_0 = list(v0)[0]
n0_1 = list(v0)[1]
self.assertEqual(n0_0.get_shape(), (1, 2, 2))
self.assertEqual(n0_1.get_shape(), (1, 2, 2))
n1_0 = list(v1)[0]
n1_1 = list(v1)[1]
self.assertEqual(n1_0.get_shape(), (2, 1, 2))
self.assertEqual(n1_1.get_shape(), (2, 1, 2))
def testPartitionConcatenatesAlongCorrectAxis(self):
self._testPartitionConcatenatesAlongCorrectAxis(use_resource=False)
def testPartitionConcatenatesAlongCorrectAxisResource(self):
self._testPartitionConcatenatesAlongCorrectAxis(use_resource=True)
class VariableScopeWithCustomGetterTest(test.TestCase):
def testNonCallableGetterFails(self):
with self.assertRaisesRegexp(ValueError, r"custom_getter .* not callable:"):
with variable_scope.variable_scope("scope0", custom_getter=3):
variable_scope.get_variable("name0")
with self.assertRaisesRegexp(ValueError, r"custom_getter .* not callable:"):
variable_scope.get_variable("name0", custom_getter=3)
def testNoSideEffectsWithIdentityCustomGetter(self):
called = [0]
def custom_getter(getter, *args, **kwargs):
called[0] += 1
return getter(*args, **kwargs)
with variable_scope.variable_scope(
"scope", custom_getter=custom_getter) as scope:
v = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope(scope, reuse=True):
v2 = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope("new_scope") as new_scope:
v3 = variable_scope.get_variable("v3", [1])
with variable_scope.variable_scope(
new_scope, reuse=True, custom_getter=custom_getter):
v4 = variable_scope.get_variable("v3", [1])
self.assertEqual(v, v2)
self.assertEqual(v3, v4)
self.assertEqual(3, called[0]) # skipped one in the first new_scope
def testCustomGetterWithReuse(self):
# Custom getter can choose to behave differently on reused variables.
def custom_getter(getter, *args, **kwargs):
var = getter(*args, **kwargs)
if kwargs["reuse"]:
# This can be used, e.g., for changing the caching device if needed.
return array_ops.identity(var, name="reused")
else:
return array_ops.identity(var, name="not_reused")
with variable_scope.variable_scope(
"scope", custom_getter=custom_getter) as scope:
v = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope(scope, reuse=True):
v2 = variable_scope.get_variable("v", [1])
self.assertEqual(v.name, "not_reused:0")
self.assertEqual(v2.name, "reused:0")
def testGetterThatCreatesTwoVariablesAndSumsThem(self):
def custom_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/0" % name, *args, **kwargs)
g_1 = getter("%s/1" % name, *args, **kwargs)
with ops.name_scope("custom_getter"):
return g_0 + g_1
with variable_scope.variable_scope("scope", custom_getter=custom_getter):
v = variable_scope.get_variable("v", [1, 2, 3])
self.assertEqual([1, 2, 3], v.get_shape())
true_vars = variables_lib.trainable_variables()
self.assertEqual(2, len(true_vars))
self.assertEqual("scope/v/0:0", true_vars[0].name)
self.assertEqual("scope/v/1:0", true_vars[1].name)
self.assertEqual("custom_getter/add:0", v.name)
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
np_vars, np_v = sess.run([true_vars, v])
self.assertAllClose(np_v, sum(np_vars))
def testNestedCustomGetters(self):
def sum_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/sum_0" % name, *args, **kwargs)
g_1 = getter("%s/sum_1" % name, *args, **kwargs)
with ops.name_scope("sum_getter"):
return g_0 + g_1
def prod_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/prod_0" % name, *args, **kwargs)
g_1 = getter("%s/prod_1" % name, *args, **kwargs)
with ops.name_scope("prod_getter"):
return g_0 * g_1
with variable_scope.variable_scope(
"prod_scope", custom_getter=prod_getter):
with variable_scope.variable_scope(
"sum_scope", custom_getter=sum_getter):
with variable_scope.variable_scope(
"inner_sum_scope", custom_getter=sum_getter):
# take sums of sums of products
v = variable_scope.get_variable("v", [1, 2, 3])
self.assertEqual([1, 2, 3], v.get_shape())
true_vars = variables_lib.trainable_variables()
self.assertEqual(8, len(true_vars))
template = (
"prod_scope/sum_scope/inner_sum_scope/v/sum_%d/sum_%d/prod_%d:0")
self.assertEqual(template % (0, 0, 0), true_vars[0].name)
self.assertEqual(template % (0, 0, 1), true_vars[1].name)
self.assertEqual(template % (0, 1, 0), true_vars[2].name)
self.assertEqual(template % (0, 1, 1), true_vars[3].name)
self.assertEqual(template % (1, 0, 0), true_vars[4].name)
self.assertEqual(template % (1, 0, 1), true_vars[5].name)
self.assertEqual(template % (1, 1, 0), true_vars[6].name)
self.assertEqual(template % (1, 1, 1), true_vars[7].name)
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
np_vars, np_v = sess.run([true_vars, v])
# take products of sums of products
self.assertAllClose(
np_v,
(((np_vars[0] * np_vars[1]) + (np_vars[2] * np_vars[3]))
+ ((np_vars[4] * np_vars[5]) + (np_vars[6] * np_vars[7]))))
class PartitionInfoTest(test.TestCase):
def testConstructorChecks(self):
# Invalid arg types.
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=None, var_offset=[0, 1])
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset=None)
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape="foo", var_offset=[0, 1])
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset="foo")
# full_shape and var_offset must have same length.
with self.assertRaises(ValueError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset=[0])
# Offset must always be less than shape.
with self.assertRaises(ValueError):
variable_scope._PartitionInfo(full_shape=[1, 1], var_offset=[0, 1])
def testSingleOffset(self):
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
self.assertEqual(4, partition_info.single_offset([1, 3]))
# Tests when the variable isn't partitioned at all.
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[0, 0])
self.assertEqual(0, partition_info.single_offset([9, 3]))
def testSingleSliceDim(self):
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
# Invalid shape.
with self.assertRaises(TypeError):
partition_info.single_slice_dim(None)
# Rank of shape differs from full_shape.
with self.assertRaises(ValueError):
partition_info.single_slice_dim([1, 2, 3])
# Shape is too large given var_offset (4+6 > 9).
with self.assertRaises(ValueError):
partition_info.single_slice_dim([6, 3])
# Multiple possible slice dim from shape.
with self.assertRaises(ValueError):
partition_info.single_slice_dim([1, 1])
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[0, 0])
self.assertEqual(1, partition_info.single_slice_dim([9, 2]))
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
self.assertEqual(0, partition_info.single_slice_dim([2, 3]))
if __name__ == "__main__":
test.main()
|
Looking for Speed Queen at Brown's TV & Appliance in Northboro, MA?
Brown's TV & Appliance in Northboro, MA is an authorized dealer of Speed Queen Products. The history of Speed Queen, from its humble beginnings in 1908 as Barlow and Seelig Manufacturing to the present is a fascinating change of events. And today, as in 1908, Speed Queen continually strives to provide you with quality products that are backed by superior customer service. With over 100 years of commercial quality and expertise they bring to you a line of washers and dryers built with a no-nonsense design and performance-based construction for your home. These products will get your clothing cleaner. After all, it's what you expect from a company that only does laundry!
So if you are looking for Speed Queen products in Northboro, Shrewsbury, Westboro, Marlboro, Worcester, Berlin, Southboro, Hudson, Auburn, Millbury and Clinton, or if you have any questions about Speed Queen products, please feel free to call us at (508) 393-8713 or simply stop by Brown's TV & Appliance at any time and we would be glad to help you.
|
#!/usr/bin/env python
from HTMLParser import HTMLParser
import json
import shutil
from StringIO import StringIO
import sys
import traceback
import urllib2
from bs4 import BeautifulSoup
from mutagen.id3 import ID3, APIC, TIT2, TPE1
unescape_html = HTMLParser().unescape
def main():
try:
if len(sys.argv) != 2:
raise ValueError('Expecting one argument, the URL of a song on SoundCloud.')
sound_cloud_page = SoundCloudPage(sys.argv[1])
sound_cloud_page.download_song()
except:
traceback.print_exception(*sys.exc_info())
print ('\nSorry, you just experienced an error :(\nPlease it '
'to me here: https://github.com/jmckib/soundcurl/issues/new')
class SoundCloudPage(object):
def __init__(self, page_url):
# Http GET parameters screw up the expected format of the page
# sometimes. Example: `?fb_action_ids` from soundcloud links on
# facebook.
self._page_url = page_url.split('?')[0]
# Use StringIO so we can consume the response multiple times.
self._http_response = StringIO(urllib2.urlopen(self._page_url).read())
def download_song(self):
"""Download song from given SoundCloud URL and write to disk as mp3.
The URL must be for a single song, not a set or an artist's page.
Title, artist, and cover art metadata are included in the mp3.
"""
stream_url_line = self._get_stream_url_line()
if not stream_url_line:
raise ValueError(
"Can't find stream URL. Are you sure '%s' is the url of a "
"song on SoundCloud?" % self._page_url)
stream_data = self._get_stream_data(stream_url_line)
# A file-like object containing the song data.
song = urllib2.urlopen(stream_data['streamUrl'])
# Write the song to disk.
song_title, artist = self._get_title_and_artist(stream_data)
# Believe it or not, there are songs with forward slahes in their
# titles, but we can't use that as a file name.
song_filename = '%s.mp3' % song_title.replace('/', '|')
print "Writing '%s'" % song_filename
shutil.copyfileobj(song, open(song_filename, 'wb'))
print 'Writing song metadata...'
tags = ID3()
tags.add(TIT2(encoding=3, text=song_title)) # Song title
print "\ttitle: '%s'" % song_title
tags.add(TPE1(encoding=3, text=artist)) # Artist
print "\tartist: '%s'" % artist
# Add track artwork.
# First, get a URL for the artwork as a jpeg.
soup = BeautifulSoup(self._get_fresh_http_response())
artwork_img = soup.find('img', alt="Track artwork")
artwork_url = artwork_img.get('src') if artwork_img else None
if not artwork_url:
print 'Failed to find artwork URL.'
else:
print 'Writing cover art...'
artwork = urllib2.urlopen(artwork_url)
tags.add(APIC(
encoding=3, mime='image/jpeg', desc=u'',
type=3, # indicates that this is the front cover art
data=artwork.read())
)
tags.save(song_filename)
def _get_fresh_http_response(self):
self._http_response.seek(0)
return self._http_response
def _get_stream_url_lines(self):
"""Return an iterator of the stream url lines in the http response.
A "stream url line" is a line of javascript code in the page's html
that contains the url of an mp3. The stream url lines are in the same
order as the songs on the page.
"""
return (line for line in self._get_fresh_http_response()
if 'http://media.soundcloud.com/stream/' in line)
def _get_stream_url_line(self):
"""Return the first line in the http response with a stream url in it.
If there are no stream urls, return None. See `_get_stream_url_lines`
for more explanation.
"""
return next(self._get_stream_url_lines(), None)
def _get_stream_data(self, stream_url_line):
"""Return dictionary of stream data from a stream url line."""
# stream_url_line looks something like this
# window.SC.bufferTracks.push(<BIG_JAVASCRIPT_DICT>);\n
# Since a javascript dict is really a json dict, we decode it as one.
return json.loads(stream_url_line[28:-3])
def _get_all_stream_data(self):
return (self._get_stream_data(stream_url_line) for stream_url_line
in self._get_stream_url_lines())
def _get_title_and_artist(self, stream_data):
try:
artist, title = stream_data['title'].split(' - ')
except ValueError:
artist = stream_data['user']['username']
title = stream_data['title']
return unescape_html(title).strip(), unescape_html(artist).strip()
if __name__ == '__main__':
main()
|
【Introduction】:there are two international passenger routes, sailing all the year round. It departs for Inchon South Korea at 16:00 every Monday, Tuesday, Thursday and Friday. It departs for Shimonoseki Japan at 16:00 every Tuesday and Friday.
There are ferries and boats available from Qingdao development zone to the old city of Qingdao. The main routes are three routes which are Huangdao ferry terminal to Qingdao ferry terminal, Xuejia Island Anzi ferry terminal to Qingdao ferry terminal and Qingdao Fushan Bay ferry terminal. It takes 30 minutes by ferry; it takes 12 minutes to arrive at the other shore by fast boat. Normal ship: the first one begins at 06:30am, the last one is at 21:00, every half an hour once, and the fare is 6 yuan. High speed ship: the first one begins at 07:10, the last one is at 18:30, every 20 minutes once, and the fare is 8 yuan. Qingdao- Xuejia Island (Anzi wharf) high speed ship: the first one begins at 07:00am; the last one is at 18:10, every half an hour once. Qingdao ferry company Call: 82619279 Huangdao ferry company Call: 86856949 Xuejia Island ferry company Call: 86705247 the ferry route between the old city of Qingdao and Huangdao has strengthened the connection on both sides of Jiaozhou Bay. If you drive along the Jiaozhou Bay highway, the two places are 84 km apart. But it takes as long as 20 minutes to half an hour by ferry, stop on the foggy days. Normal ship: the first one begins at 06:30am, the last one is at 21:00, every half an hour once, and the fare is 7 yuan. High speed ship: the first one begins at 07:20am, the last one is at 17:00, every 20 minutes once, and the fare is 9.5 yuan. Qingdao-Xuejia Island (Anzi wharf) high speed ship: the first one begins at 07:15am; the last one is at 17:15, every half an hour once.
|
# -*- coding: utf-8 -*-
#
# This file is a part of Machette.
#
# Copyright (C) 2010 Vincent Batoufflet <[email protected]>
#
# This software is released under the terms of the GNU General Public License
# version 3 or any later version. See LICENSE file for further details.
#
# $Id$
import gtk
import pygtk
import os
import re
from machette.module import MachetteModule
from machette.path import DATA_DIR
pygtk.require('2.0')
# Set module class name
classname = 'MachetteModuleSplit'
# Set module information
mandatory = True
# Set configuration options list
options = {
'window.split-delimiter': (int, 0),
}
class MachetteModuleSplit(MachetteModule):
def register(self):
"""
Register MachetteModuleSplit module
void register(void)
"""
# Load module UI file
self.parent.wtree.add_from_file(os.path.join(DATA_DIR, 'ui', 'module',
'split.ui'))
# Initialize split delimiter GtkComboBox
for delim in ['|', '#', '@', unichr(0xb6), unichr(0x25a0)]:
self.parent.wtree.get_object('combobox-split-delimiter').\
append_text(delim)
# Restore last state
self.parent.wtree.get_object('combobox-split-delimiter').set_active(
self.parent.config.get('window.split-delimiter'))
# Attach UI to the parent window
self.parent.wtree.get_object('notebook-extension').append_page(
self.parent.wtree.get_object('vbox-split'), gtk.Label(_('Split')))
# Connect signals
self.parent.rbuffer.connect('changed', self.update_tab)
self.parent.tbuffer.connect('changed', self.update_tab)
self.parent.wtree.get_object('combobox-split-delimiter').\
connect('changed', self.update_tab)
self.parent.wtree.get_object('vbox-split').\
connect('map', self.update_tab)
def unregister(self):
"""
Unregister MachetteModuleSplit module
void unregister(void)
"""
# Save state
if self.parent.config.get('window.save-state'):
self.parent.config.set('window.split-delimiter', self.parent.\
wtree.get_object('combobox-split-delimiter').get_active())
def update_tab(self, source=None, event=None):
"""
Update split GtkNotebook tab
void update_tab(event source: gtk.Object, event: gtk.gdk.Event)
"""
# Reset buffer text
self.parent.wtree.get_object('textview-split-result').get_buffer().\
set_text('')
# Stop if updating is active or regex not available
if self.parent.updating or not self.parent.regex:
return
try:
delimiter = self.parent.wtree.\
get_object('combobox-split-delimiter').get_active_text()
# Get split chunks
regex = re.compile(self.parent.rbuffer.get_text(
self.parent.rbuffer.get_start_iter(),
self.parent.rbuffer.get_end_iter()), self.parent.flags)
chunks = regex.split(self.parent.target, self.parent.limit)
chunks = [a if a else '' for a in chunks]
self.parent.wtree.get_object('textview-split-result').\
get_buffer().set_text(delimiter.join(chunks))
except (IndexError, re.error), e:
pass
|
Natural Choice Crunchy Treats are made with wholesome ingredients like real carrots, whole brown rice, and oatmeal so you can feel great about treating your dog. These Crunchy Treats are perfect for training any dog or just when you want to give a special reward. They're delicious and nutritious-made with good stuff for good dogs!
Whole Brown Rice, Oatmeal, Dried Carrots, Poultry Fat (preserved with mixed, Tocopherols, a source of Vitamin E), Chicken Meal, Molasses, Natural Flavor, Cinnamon, Beta-Carotene, L-Lysine Monohydrochloride, Choline Chloride, Zinc Sulfate, Ferrous Sulfate, Vitamin E Supplement, Copper Sulfate, Manganese Sulfate, Niacin, Calcium Pantothenate, Sodium Selenite, Vitamin A Supplement, Biotin, Ascorbic Acid, Vitamin B12 Supplement, Riboflavin, Menadione Sodium Bisulfite Complex (source of Vitamin K activity), Calcium Iodate, Pyridoxine Hydrochloride, Thiamine Mononitrate, Vitamin D3 Supplement, Folic Acid.
|
"""This tutorial introduces the LeNet5 neural network architecture
using Theano. LeNet5 is a convolutional neural network, good for
classifying images. This tutorial shows how to build the architecture,
and comes with all the hyper-parameters you need to reproduce the
paper's MNIST results.
This implementation simplifies the model in the following ways:
- LeNetConvPool doesn't implement location-specific gain and bias parameters
- LeNetConvPool doesn't implement pooling by average, it implements pooling
by max.
- Digit classification is implemented with a logistic regression rather than
an RBF network
- LeNet5 was not fully-connected convolutions at second layer
References:
- Y. LeCun, L. Bottou, Y. Bengio and P. Haffner:
Gradient-Based Learning Applied to Document
Recognition, Proceedings of the IEEE, 86(11):2278-2324, November 1998.
http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf
"""
"""
Aaron Berndsen:
A Conformal Neural Network using Theano for computation and structure,
but built to obey sklearn's basic 'fit' 'predict' functionality
*code largely motivated from deeplearning.net examples
and Graham Taylor's "Vanilla RNN" (https://github.com/gwtaylor/theano-rnn/blob/master/rnn.py)
You'll require theano and libblas-dev
tips/tricks/notes:
* if training set is large (>O(100)) and redundant, use stochastic gradient descent (batch_size=1), otherwise use conjugate descent (batch_size > 1)
*
Basic usage:
import nnetwork as NN
n = NN.NeuralNetwork(design=[8,8]) # a NN with two hidden layers of 8 neurons each
n.fit(Xtrain, ytrain)
pred = n.predict(Xtest)
"""
import cPickle as pickle
import logging
import numpy as np
import timeit
from sklearn.base import BaseEstimator
import theano
import theano.tensor as T
from theano.tensor.signal import downsample
from theano.tensor.nnet import conv
import logging
import os
import sys
from logistic_sgd_test import LogisticRegression, load_data
_logger = logging.getLogger("theano.gof.compilelock")
_logger.setLevel(logging.WARN)
logger = logging.getLogger(__name__)
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
#formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
mode = theano.Mode(linker='cvm')
#mode = 'DEBUG_MODE'
class CNN(object):
"""
Convolutional Neural Network (CNN),
backend by Theano, but compliant with sklearn interface.
This class defines all the layers in the network. At present the CNN has 7 layers: 3 LeNetConvPoolLayer,
3 MLP HiddenLayers and 1 LogisticRegression. This architecture is for classifying 128x128 grayscale images.
The class MetaCNN has more lower level routines such as initialization, prediction and save.
You should init with MetaCNN.
"""
def __init__(self, input, im_width=128, im_height=128, n_out=2, activation=T.tanh,
nkerns=[48,128,256],
filters=[13,5,4],
poolsize=[(2,2),(2,2),(2,2)],
n_hidden=[200,50,2],
output_type='softmax', batch_size=128,
use_symbolic_softmax=False,verbose = True):
"""
im_width : width of input image
im_height : height of input image
n_out : number of class labels
:type nkerns: list of integers
:param nkerns: number of kernels on each layer
:type filters: list of integers
:param filters: width of convolution
:type poolsize: list of 2-tuples
:param poolsize: maxpooling in convolution layer (index-0),
and direction x or y (index-1)
:type n_hidden: list of integers
:param n_hidden: number of hidden neurons
:type output_type: string
:param output_type: type of decision 'softmax', 'binary', 'real'
:type batch_size: integers
:param batch_size: number of samples in each training batch. Default 200.
"""
self.activation = activation
self.output_type = output_type
self.verbose = verbose
if verbose:
logger.info("\n Input image with:{} height:{} ".format(im_width,im_height))
# if use_symbolic_softmax:
# def symbolic_softmax(x):
# e = T.exp(x)
# return e / T.sum(e, axis=1).dimshuffle(0, 'x')
# self.softmax = symbolic_softmax
# else:
# self.softmax = T.nnet.softmax
rng = np.random.RandomState(23455)
# Reshape matrix of rasterized images of shape (batch_size, nx*ny)
# to a 4D tensor, compatible with our LeNetConvPoolLayer
layer0_input = input.reshape((batch_size, 1, im_width, im_height))
# Construct the first convolutional pooling layer:
# filtering reduces the image size to (im_width - filters[0]+1, im_height-filters[0] + 1 )=(x,x)
# maxpooling reduces this further to (x/2,x/2) = (y,y)
# 4D output tensor is thus of shape (batch_size,nkerns[0],y,y)
self.layer0 = LeNetConvPoolLayer(
rng,
input=layer0_input,
image_shape=(batch_size, 1, im_width, im_height),
filter_shape=(nkerns[0], 1, filters[0], filters[0]),
poolsize=poolsize[0]
)
if self.verbose:
logger.info('\n Layer {} \n image_shape: ({},{},{},{}) \n filter_shape: ({},{},{},{}) \n poolsize:{}'.format(0,
batch_size, 1, im_width, im_height,
nkerns[0], 1, filters[0], filters[0],
poolsize[0])
)
# Construct the second convolutional pooling layer
# filtering reduces the image size to (im_width-filters[0]+1,im_height-filters[0]+1) = (x,x)
# maxpooling reduces this further to (x/2,x/2) = y
# 4D output tensor is thus of shape (nkerns[0],nkerns[1],y,y)
im_width_l1 = (im_width - filters[0] + 1)/poolsize[0][0]
im_height_l1 = (im_height - filters[0] + 1)/poolsize[0][1]
self.layer1 = LeNetConvPoolLayer(
rng,
input=self.layer0.output,
image_shape=(batch_size, nkerns[0], im_width_l1, im_height_l1),
filter_shape=(nkerns[1], nkerns[0], filters[1], filters[1]),
poolsize=poolsize[1]
)
if self.verbose:
logger.info('\n Layer {} \n image_shape: ({},{},{},{}) \n filter_shape: ({},{},{},{}) \n poolsize:{}'.format(1
,batch_size, nkerns[0], im_width_l1, im_height_l1,
nkerns[1], nkerns[0], filters[1], filters[1],
poolsize[1])
)
# Construct the third convolutional pooling layer
# filtering reduces the image size to (im_width_l1-filters[1]+1,im_height_l1-filters[1]+1) = (x,x)
# maxpooling reduces this further to (x/2,x/2) = y
# 4D output tensor is thus of shape (nkerns[1],nkerns[2],y,y)
im_width_l2 = (im_width_l1 - filters[1] + 1)/poolsize[1][0]
im_height_l2 = (im_height_l1 - filters[1] + 1)/poolsize[1][1]
self.layer2 = LeNetConvPoolLayer(
rng,
input=self.layer1.output,
image_shape=(batch_size, nkerns[1], im_width_l2, im_height_l2),
filter_shape=(nkerns[2], nkerns[1], filters[2], filters[2]),
poolsize=poolsize[2]
)
if self.verbose:
logger.info('\n Layer {} \n image_shape: ({},{},{},{}) \n filter_shape: ({},{},{},{}) \n poolsize:{}'.format(2,
batch_size, nkerns[1], im_width_l2, im_height_l2,
nkerns[2], nkerns[1], filters[2], filters[2],
poolsize[2])
)
# the TanhLayer being fully-connected, it operates on 2D matrices of
# shape (batch_size,num_pixels) (i.e matrix of rasterized images).
# This will generate a matrix of shape (20,32*4*4) = (20,512)
layer3_input = self.layer2.output.flatten(2)
# construct a fully-connected sigmoidal layer
im_width_l3 = (im_width_l2-filters[2]+1)/poolsize[2][0]
im_height_l3 = (im_height_l2-filters[2]+1)/poolsize[2][1]
self.layer3 = HiddenLayer(
rng,
input=layer3_input,
n_in=nkerns[2] * im_width_l3 * im_height_l3,
n_out=n_hidden[0],
activation=T.tanh
)
if self.verbose:
logger.info("\n Layer {} input: ({},{})".format(3,batch_size,nkerns[2] * im_width_l3 * im_height_l3))
# construct a fully-connected sigmoidal layer
self.layer4 = HiddenLayer(
rng,
input=self.layer3.output,
n_in=n_hidden[0],
n_out=n_hidden[1],
activation=T.tanh
)
if self.verbose:
logger.info("\n Layer {} input: {}".format(4,n_hidden[1]))
# construct a fully-connected sigmoidal layer
self.layer5 = HiddenLayer(
rng,
input=self.layer4.output,
n_in=n_hidden[1],
n_out=n_hidden[2],
activation=T.tanh
)
if self.verbose:
logger.info("\n Layer {} input: {}".format(5,n_hidden[2]))
# classify the values of the fully-connected sigmoidal layer
self.layer6 = LogisticRegression(
input=self.layer5.output,
n_in=n_hidden[2],
n_out=n_out
)
if self.verbose:
logger.info("\n Layer {} input: {}".format(6,n_hidden[2]))
# CNN regularization
self.L1 = self.layer6.L1
self.L2_sqr = self.layer6.L2_sqr
# create a list of all model parameters to be fit by gradient descent
self.params = self.layer6.params + self.layer5.params + self.layer4.params + self.layer3.params + self.layer2.params + self.layer1.params + self.layer0.params
self.y_pred = self.layer6.y_pred
self.p_y_given_x = self.layer6.p_y_given_x
#self.layer3_output = self.layer5.input
self.layer5_output = self.layer5.input
if self.output_type == 'real':
self.loss = lambda y: self.mse(y)
elif self.output_type == 'binary':
self.loss = lambda y: self.nll_binary(y)
elif self.output_type == 'softmax':
# push through softmax, computing vector of class-membership
# probabilities in symbolic form
self.loss = lambda y: self.nll_multiclass(y)
else:
raise NotImplementedError
def mse(self, y):
# error between output and target
return T.mean((self.y_pred - y) ** 2)
def nll_binary(self, y):
# negative log likelihood based on binary cross entropy error
return T.mean(T.nnet.binary_crossentropy(self.p_y_given_x, y))
#same as negative-log-likelikhood
def nll_multiclass(self, y):
# negative log likelihood based on multiclass cross entropy error
# y.shape[0] is (symbolically) the number of rows in y, i.e.,
# number of time steps (call it T) in the sequence
# T.arange(y.shape[0]) is a symbolic vector which will contain
# [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
# Log-Probabilities (call it LP) with one row per example and
# one column per class LP[T.arange(y.shape[0]),y] is a vector
# v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
# the mean (across minibatch examples) of the elements in v,
# i.e., the mean log-likelihood across the minibatch.
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def errors(self, y):
"""Return a float representing the number of errors in the sequence
over the total number of examples in the sequence ; zero one
loss over the size of the sequence
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_out.ndim:
raise TypeError('y should have the same shape as self.y_out',
('y', y.type, 'y_pred', self.y_pred.type))
if self.output_type in ('binary', 'softmax'):
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
class MetaCNN(BaseEstimator):
"""
the actual CNN is not init-ed until .fit is called.
We determine the image input size (assumed square images) and
the number of outputs in .fit from the training data
"""
def __init__(
self, learning_rate=0.05, n_epochs=60, batch_size=128, activation='tanh',
nkerns=[20,45], n_hidden=500, filters=[15,7], poolsize=[(3,3),(2,2)],
output_type='softmax',L1_reg=0.00, L2_reg=0.00,
use_symbolic_softmax=False, im_width=128, im_height=128, n_out=2,verbose = True):
self.learning_rate = float(learning_rate)
self.nkerns = nkerns
self.n_hidden = n_hidden
self.filters = filters
self.poolsize = poolsize
self.n_epochs = int(n_epochs)
self.batch_size = int(batch_size)
self.L1_reg = float(L1_reg)
self.L2_reg = float(L2_reg)
self.activation = activation
self.output_type = output_type
self.use_symbolic_softmax = use_symbolic_softmax
self.im_width = im_width
self.im_height = im_height
self.n_out = n_out
self.verbose = verbose
def ready(self):
"""
this routine is called from "fit" since we determine the
image size (assumed square) and output labels from the training data.
"""
#input
self.x = T.matrix('x')
#output (a label)
self.y = T.ivector('y')
if self.activation == 'tanh':
activation = T.tanh
elif self.activation == 'sigmoid':
activation = T.nnet.sigmoid
elif self.activation == 'relu':
activation = lambda x: x * (x > 0)
elif self.activation == 'cappedrelu':
activation = lambda x: T.minimum(x * (x > 0), 6)
else:
raise NotImplementedError
self.cnn = CNN(
input=self.x,
n_out=self.n_out,
activation=activation,
nkerns=self.nkerns,
filters=self.filters,
n_hidden=self.n_hidden,
poolsize=self.poolsize,
output_type=self.output_type,
batch_size=self.batch_size,
use_symbolic_softmax=self.use_symbolic_softmax,
verbose=self.verbose
)
#self.cnn.predict expects batch_size number of inputs.
#we wrap those functions and pad as necessary in 'def predict' and 'def predict_proba'
self.predict_wrap = theano.function(inputs=[self.x],
outputs=self.cnn.y_pred,
mode=mode)
# self.predict_vector = theano.function(inputs=[self.x],
# outputs=self.cnn.layer5.output,
# mode=mode)
self.predict_vector = theano.function(inputs=[self.x],
outputs=self.cnn.layer5_output,
mode=mode)
self.predict_proba_wrap = theano.function(inputs=[self.x],
outputs=self.cnn.p_y_given_x,
mode=mode)
def score(self, X, y):
"""Returns the mean accuracy on the given test data and labels.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training set.
y : array-like, shape = [n_samples]
Labels for X.
Returns
-------
z : float
"""
return np.mean(self.predict(X) == y)
def fit(self, train_set_x, train_set_y, valid_set_x=None, valid_set_y=None,test_set_x = None,test_set_y = None,
n_epochs=None):
""" Fit model
Pass in X_test, Y_test to compute test error and report during
training.
X_train : ndarray (T x n_in)
Y_train : ndarray (T x n_out)
validation_frequency : int
in terms of number of sequences (or number of weight updates)
n_epochs : None (used to override self.n_epochs from init.
"""
self.ready()
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_train_batches /= self.batch_size
n_valid_batches /= self.batch_size
n_test_batches /= self.batch_size
######################
# BUILD ACTUAL MODEL #
######################
if self.verbose:
logger.info('\n ... building the model')
index = T.lscalar('index') # index to a [mini]batch
# cost = self.cnn.loss(self.y)\
# + self.L1_reg * self.cnn.L1\
# + self.L2_reg * self.cnn.L2_sqr
#cost = self.cnn.loss(self.y)
cost = self.cnn.layer6.negative_log_likelihood(self.y)
#self.cnn.loss(self.y),
test_model = theano.function(
[index],
self.cnn.layer6.errors(self.y),
givens={
self.x: test_set_x[index * self.batch_size: (index + 1) * self.batch_size],
self.y: test_set_y[index * self.batch_size: (index + 1) * self.batch_size]
}
)
#self.cnn.loss(self.y),
validate_model = theano.function(
[index],
self.cnn.layer6.errors(self.y),
givens={
self.x: valid_set_x[index * self.batch_size: (index + 1) * self.batch_size],
self.y: valid_set_y[index * self.batch_size: (index + 1) * self.batch_size]
}
)
# create a list of all model parameters to be fit by gradient descent
self.params = self.cnn.params
# create a list of gradients for all model parameters
self.grads = T.grad(cost, self.params)
# train_model is a function that updates the model parameters by
# SGD Since this model has many parameters, it would be tedious to
# manually create an update rule for each model parameter. We thus
# create the updates dictionary by automatically looping over all
# (params[i],grads[i]) pairs.
# self.updates = {}
# for param_i, grad_i in zip(self.params, self.grads):
# self.updates[param_i] = param_i - self.learning_rate * grad_i
self.updates = [
(param_i, param_i - self.learning_rate * grad_i)
for param_i, grad_i in zip(self.params, self.grads)
]
train_model = theano.function(
[index],
cost,
updates=self.updates,
givens={
self.x: train_set_x[index * self.batch_size: (index + 1) * self.batch_size],
self.y: train_set_y[index * self.batch_size: (index + 1) * self.batch_size]
}
)
###############
# TRAIN MODEL #
###############
if self.verbose:
logger.info('\n... training')
# early-stopping parameters
patience = 10000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = np.inf
best_iter = 0
test_score = 0.
start_time = timeit.default_timer()
epoch = 0
done_looping = False
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
iter = (epoch - 1) * n_train_batches + minibatch_index
if iter % 100 == 0:
logger.info('... training @ iter = {}'.format(iter))
cost_ij = train_model(minibatch_index)
print cost_ij
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i) for i
in xrange(n_valid_batches)]
this_validation_loss = np.mean(validation_losses)
logger.info('epoch %i, minibatch %i/%i, validation error %f %%' %
(epoch, minibatch_index + 1, n_train_batches,
this_validation_loss * 100.))
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
# save best validation score and iteration number
best_validation_loss = this_validation_loss
best_iter = iter
# test it on the test set
test_losses = [
test_model(i)
for i in xrange(n_test_batches)
]
test_score = np.mean(test_losses)
logger.info((' epoch %i, minibatch %i/%i, test error of '
'best model %f %%') %
(epoch, minibatch_index + 1, n_train_batches,
test_score * 100.))
self.save(fpath=base_path + '/data/')
if patience <= iter:
done_looping = True
break
end_time = timeit.default_timer()
logger.info('Optimization complete.')
logger.info('Best validation score of %f %% obtained at iteration %i, '
'with test performance %f %%' %
(best_validation_loss * 100., best_iter + 1, test_score * 100.))
print >> sys.stderr, ('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
def predict(self, data):
"""
the CNN expects inputs with Nsamples = self.batch_size.
In order to run 'predict' on an arbitrary number of samples we
pad as necessary.
"""
if isinstance(data, list):
data = np.array(data)
if data.ndim == 1:
data = np.array([data])
nsamples = data.shape[0]
n_batches = nsamples//self.batch_size
n_rem = nsamples%self.batch_size
if n_batches > 0:
preds = [list(self.predict_wrap(data[i*self.batch_size:(i+1)*self.batch_size]))\
for i in range(n_batches)]
else:
preds = []
if n_rem > 0:
z = np.zeros((self.batch_size, self.im_width * self.im_height))
z[0:n_rem] = data[n_batches*self.batch_size:n_batches*self.batch_size+n_rem]
preds.append(self.predict_wrap(z)[0:n_rem])
return np.hstack(preds).flatten()
def predict_proba(self, data):
"""
the CNN expects inputs with Nsamples = self.batch_size.
In order to run 'predict_proba' on an arbitrary number of samples we
pad as necessary.
"""
if isinstance(data, list):
data = np.array(data)
if data.ndim == 1:
data = np.array([data])
nsamples = data.shape[0]
n_batches = nsamples//self.batch_size
n_rem = nsamples%self.batch_size
if n_batches > 0:
preds = [list(self.predict_proba_wrap(data[i*self.batch_size:(i+1)*self.batch_size]))\
for i in range(n_batches)]
else:
preds = []
if n_rem > 0:
z = np.zeros((self.batch_size, self.n_in * self.n_in))
z[0:n_rem] = data[n_batches*self.batch_size:n_batches*self.batch_size+n_rem]
preds.append(self.predict_proba_wrap(z)[0:n_rem])
return np.vstack(preds)
def shared_dataset(self, data_xy):
""" Load the dataset into shared variables """
data_x, data_y = data_xy
shared_x = theano.shared(np.asarray(data_x,
dtype=theano.config.floatX))
shared_y = theano.shared(np.asarray(data_y,
dtype=theano.config.floatX))
if self.output_type in ('binary', 'softmax'):
return shared_x, T.cast(shared_y, 'int32')
else:
return shared_x, shared_y
def __getstate__(self):
""" Return state sequence."""
#check if we're using ubc_AI.classifier wrapper,
#adding it's params to the state
if hasattr(self, 'orig_class'):
superparams = self.get_params()
#now switch to orig. class (MetaCNN)
oc = self.orig_class
cc = self.__class__
self.__class__ = oc
params = self.get_params()
for k, v in superparams.iteritems():
params[k] = v
self.__class__ = cc
else:
params = self.get_params() #sklearn.BaseEstimator
if hasattr(self, 'cnn'):
weights = [p.get_value() for p in self.cnn.params]
else:
weights = []
state = (params, weights)
return state
def _set_weights(self, weights):
""" Set fittable parameters from weights sequence.
Parameters must be in the order defined by self.params:
W, W_in, W_out, h0, bh, by
"""
i = iter(weights)
if hasattr(self, 'cnn'):
for param in self.cnn.params:
param.set_value(i.next())
def __setstate__(self, state):
""" Set parameters from state sequence.
Parameters must be in the order defined by self.params:
W, W_in, W_out, h0, bh, by
"""
params, weights = state
#we may have several classes or superclasses
for k in ['n_comp', 'use_pca', 'feature']:
if k in params:
self.set_params(**{k:params[k]})
params.pop(k)
#now switch to MetaCNN if necessary
if hasattr(self,'orig_class'):
cc = self.__class__
oc = self.orig_class
self.__class__ = oc
self.set_params(**params)
self.ready()
if len(weights) > 0:
self._set_weights(weights)
self.__class__ = cc
else:
self.set_params(**params)
self.ready()
self._set_weights(weights)
def save(self, fpath='.', fname=None):
""" Save a pickled representation of Model state. """
import datetime
fpathstart, fpathext = os.path.splitext(fpath)
if fpathext == '.pkl':
# User supplied an absolute path to a pickle file
fpath, fname = os.path.split(fpath)
elif fname is None:
# Generate filename based on date
date_obj = datetime.datetime.now()
date_str = date_obj.strftime('%Y-%m-%d-%H:%M:%S')
class_name = self.__class__.__name__
#fname = '%s.%s.pkl' % (class_name, date_str)
fname = 'best_model.pkl'
fabspath = os.path.join(fpath, fname)
logger.info("Saving to %s ..." % fabspath)
file = open(fabspath, 'wb')
state = self.__getstate__()
pickle.dump(state, file, protocol=pickle.HIGHEST_PROTOCOL)
file.close()
def load(self, path):
""" Load model parameters from path. """
logger.info("Loading from %s ..." % path)
file = open(path, 'rb')
state = pickle.load(file)
self.__setstate__(state)
file.close()
class LogisticRegression(object):
"""Multi-class Logistic Regression Class
The logistic regression is fully described by a weight matrix :math:`W`
and bias vector :math:`b`. Classification is done by projecting data
points onto a set of hyperplanes, the distance to which is used to
determine a class membership probability.
"""
def __init__(self, input, n_in, n_out):
""" Initialize the parameters of the logistic regression
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
self.W = theano.shared(value=np.zeros((n_in, n_out),
dtype=theano.config.floatX),
name='W', borrow=True)
# initialize the baises b as a vector of n_out 0s
self.b = theano.shared(value=np.zeros((n_out,),
dtype=theano.config.floatX),
name='b', borrow=True)
# compute vector of class-membership probabilities in symbolic form
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
# compute prediction as class whose probability is maximal in
# symbolic form
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
# parameters of the model
self.params = [self.W, self.b]
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
self.L1 = 0
self.L1 += abs(self.W.sum())
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.L2_sqr = 0
self.L2_sqr += (self.W ** 2).sum()
def negative_log_likelihood(self, y):
"""Return the mean of the negative log-likelihood of the prediction
of this model under a given target distribution.
.. math::
\frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
\frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|} \log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
\ell (\theta=\{W,b\}, \mathcal{D})
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
Note: we use the mean instead of the sum so that
the learning rate is less dependent on the batch size
"""
# y.shape[0] is (symbolically) the number of rows in y, i.e.,
# number of examples (call it n) in the minibatch
# T.arange(y.shape[0]) is a symbolic vector which will contain
# [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
# Log-Probabilities (call it LP) with one row per example and
# one column per class LP[T.arange(y.shape[0]),y] is a vector
# v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
# the mean (across minibatch examples) of the elements in v,
# i.e., the mean log-likelihood across the minibatch.
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def errors(self, y):
"""Return a float representing the number of errors in the minibatch
over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_pred.ndim:
raise TypeError('y should have the same shape as self.y_pred',
('y', target.type, 'y_pred', self.y_pred.type))
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None,
activation=T.tanh):
"""
Typical hidden layer of a MLP: units are fully-connected and have
sigmoidal activation function. Weight matrix W is of shape (n_in,n_out)
and the bias vector b is of shape (n_out,).
NOTE : The nonlinearity used here is tanh
Hidden unit activation is given by: tanh(dot(input,W) + b)
:type rng: np.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dmatrix
:param input: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimensionality of input
:type n_out: int
:param n_out: number of hidden units
:type activation: theano.Op or function
:param activation: Non linearity to be applied in the hidden
layer
"""
self.input = input
# `W` is initialized with `W_values` which is uniformely sampled
# from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden))
# for tanh activation function
# the output of uniform if converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
# Note : optimal initialization of weights is dependent on the
# activation function used (among other things).
# For example, results presented in [Xavier10] suggest that you
# should use 4 times larger initial weights for sigmoid
# compared to tanh
# We have no info for other function, so we use the same as
# tanh.
if W is None:
W_values = np.asarray(rng.uniform(
low=-np.sqrt(6. / (n_in + n_out)),
high=np.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)), dtype=theano.config.floatX)
if activation == theano.tensor.nnet.sigmoid:
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = np.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
self.output = (lin_output if activation is None
else activation(lin_output))
# parameters of the model
self.params = [self.W, self.b]
class LeNetConvPoolLayer(object):
"""Pool Layer of a convolutional network """
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
"""
Allocate a LeNetConvPoolLayer with shared variable internal parameters.
:type rng: np.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dtensor4
:param input: symbolic image tensor, of shape image_shape
:type filter_shape: tuple or list of length 4
:param filter_shape: (number of filters, num input feature maps,
filter height,filter width)
:type image_shape: tuple or list of length 4
:param image_shape: (batch size, num input feature maps,
image height, image width)
:type poolsize: tuple or list of length 2
:param poolsize: the downsampling (pooling) factor (#rows,#cols)
"""
assert image_shape[1] == filter_shape[1]
self.input = input
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = np.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * np.prod(filter_shape[2:]) /
np.prod(poolsize))
# initialize weights with random weights
W_bound = np.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(
np.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX
),
borrow=True
)
# the bias is a 1D tensor -- one bias per output feature map
b_values = np.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True)
# convolve input feature maps with filters
conv_out = conv.conv2d(
input=input,
filters=self.W,
filter_shape=filter_shape,
image_shape=image_shape
)
# downsample each feature map individually, using maxpooling
pooled_out = downsample.max_pool_2d(
input=conv_out,
ds=poolsize,
ignore_border=True
)
# add the bias term. Since the bias is a vector (1D array), we first
# reshape it to a tensor of shape (1,n_filters,1,1). Each bias will
# thus be broadcasted across mini-batches and feature map
# width & height
self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
# store parameters of this layer
self.params = [self.W, self.b]
self.input = input
def cosine_distance(a, b):
import numpy as np
from numpy import linalg as LA
dot_product = np.dot(a,b.T)
cosine_distance = dot_product / (LA.norm(a) * LA.norm(b))
return cosine_distance
if __name__ == '__main__':
base_path = '/Applications/MAMP/htdocs/DeepLearningTutorials'
#base_path = '/home/ubuntu/DeepLearningTutorials'
from fetex_image import FetexImage
from PIL import Image
import random
datasets = load_data('mnist.pkl.gz')
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
cnn = MetaCNN(learning_rate=0.05,nkerns=[48,128,256], filters=[13,5,4], batch_size=64,poolsize=[(2,2),(2,2),(2,2)], n_hidden=[200,50,2] , n_out=2, im_width=128,im_height=128)
# cnn.fit(train_set_x,train_set_y,valid_set_x,valid_set_y,test_set_x,test_set_y, n_epochs=5)
# cnn.save(fpath=base_path + '/data/')
#folder = base_path + '/data/cnn-furniture/'
# Predictions after training
cnn.load(base_path + '/data/best_model.pkl')
#cnn.load('/home/ubuntu/DeepLearningTutorials/data/MetaCNN.2015-10-19-13:59:18.pkl')
#sample = np.asarray(X_train, dtype=theano.config.floatX)
#print sample[0].reshape((64,64)).shape
#Image.fromarray(sample[2].reshape((64,64)),mode="L").show()
pkl_file = open( '../data/train_set.pkl', 'rb')
train_set = pickle.load(pkl_file)
X_train, Y_train = train_set
pkl_file = open( '../data/lb.pkl', 'rb')
lb = pickle.load(pkl_file)
# arr = np.array(np.round((X_train[0] * 256).reshape((128,128))),dtype=np.uint8)
# Image.fromarray(arr,mode="L").show()
# arr = np.array(np.round((X_train[1] * 256).reshape((128,128))),dtype=np.uint8)
# Image.fromarray(arr,mode="L").show()
# arr = np.array(np.round((X_train[2] * 256).reshape((128,128))),dtype=np.uint8)
# Image.fromarray(arr,mode="L").show()
#print Y_train[0:3]
# arr = np.array(np.round((X_train[1300] * 256).reshape((64,64))),dtype=np.uint8)
# Image.fromarray(arr,mode="L").show()
#print sample[0]
# #print sample.shape
#sample = X_train[0:25]
#print lb.classes_
#sample = X_train[0]
#print Y_train[4000:4100]
#print cnn.predict(X_train[0:3])
# sample = X_train[4400]
# print Y_train[4400]
# print cnn.predict(sample)
# pkl_file = open( '../data/X_original.pkl', 'rb')
# X_original = cPickle.load(pkl_file)
# a = X_original[0:25]
# a = np.asarray(a, dtype=theano.config.floatX)
# #fe.reconstructImage(a[2]).show()
def flaten_aux(V):
return V.flatten(order='F')
#print X_train[0].shape
# cnn_output_vectors = np.array([])
# for i in xrange(1,8):
# #a = map(flaten_aux, X_train[128 * (i - 1): 128 * i ])
# a = X_train[64 * (i - 1): 64 * i ]
# # #print cnn.predict(a)
# a = cnn.predict_vector(a)
# #print a
# print len(cnn_output_vectors)
# #cnn_output_vectors.append(a)
# if len(cnn_output_vectors) == 0:
# cnn_output_vectors = a
# else:
# cnn_output_vectors = np.concatenate((cnn_output_vectors, a), axis=0)
# #cnn_output_vectors = cnn_output_vectors + a
# print len(cnn_output_vectors)
# file = open('../data/cnn_output_vectors.pkl', 'wb')
# pickle.dump(cnn_output_vectors, file, protocol=pickle.HIGHEST_PROTOCOL)
# file.close()
file = open('../data/cnn_output_vectors.pkl', 'rb')
cnn_output_vectors = pickle.load(file)
file.close()
print len(cnn_output_vectors)
#print len(cnn_output_vectors)
#print len(X_train)
#print cnn.predict(sample)
#print cnn.predict_wrap(a)
#rn_im_index = random.randint(0, len(X_train))
#base_image_index = 1
base_image_index = random.randint(0, 448)
max_similarity = 0
max_similarity_pos = -1
#max_similarity_pos = []
#for i in xrange(1,len(train_set_x)):
a = cnn_output_vectors[base_image_index]
#a = X_train[base_image_index]
#print a.shape
for i in xrange(0,64 * 7):
if i != base_image_index:
b = cnn_output_vectors[i]
#b = X_train[i]
d = cosine_distance(a, b)
print d
#if d > max_similarity:
if d > max_similarity:
max_similarity = d
max_similarity_pos = i
#max_similarity_pos.append(i)
print 'max_similarity: {}'.format(max_similarity)
fe = FetexImage(mode='L')
fe.reconstructImage(X_train[base_image_index]).show()
fe.reconstructImage(X_train[max_similarity_pos]).show()
# fe.reconstructImage(X_train[max_similarity_pos[0]]).show()
# fe.reconstructImage(X_train[max_similarity_pos[1]]).show()
# fe.reconstructImage(X_train[max_similarity_pos[2]]).show()
# fe.reconstructImage(X_train[max_similarity_pos[3]]).show()
# print a.shape
# print b.shape
# print cosine_distance(a, b)
|
A 21 year old blonde girl met a large, powerfully built bodybuilder at a bar.
After a number of drinks, they agree to go back to his place.
The blonde goes running out of the apartment, screaming in fear.
The bodybuilder puts his clothes back on and chases after her. He catches up to her and asks why she ran out of the apartment like that.
|
import random
from .. import irc, var
# Fill command dictionary.
def ins_command ():
var.commands["decide"] = type("command", (object,), {})()
var.commands["decide"].method = decide
var.commands["decide"].tags = ["other"]
var.commands["decide"].aliases = [".decide", ".choose"]
var.commands["decide"].usage = [
"{} a|b|c|d|... - Decide between a, b, c, ...",
"{} a or b or c or ... - Decide between a, b, c, ...",
"{} a,b,c,... - Decide between a, b, c, ...",
"{} a - Decide between Yes and No.",
"That is the order of preference. You can do {} a or b | c, which will decide between \"a or b\" and c."
]
# Command method.
def decide (user, channel, word):
if len(word) == 1:
irc.msg(channel, "{}: You have to give me some choices.".format(user))
else:
string = " ".join(word[1:])
if "|" in string:
choices = [choice.strip() for choice in string.split("|") if choice]
elif " or " in string:
choices = [choice.strip() for choice in string.split(" or ") if choice]
elif "," in string:
choices = [choice.strip() for choice in string.split(",") if choice]
else:
choices = ["Yes.", "No."]
# Empty lists can't be taken.
if not choices:
irc.msg(channel, "{}: Give me some choices, man, come on.".format(user))
return
if random.random() < 0.05:
if choices == ["Yes.", "No."]:
irc.msg(channel, "{}: Maybe.".format(user))
else:
irc.msg(channel, "{}: Neither.".format(user))
else:
irc.msg(channel, "{}: {}".format(user, random.choice(choices)))
|
Cobra and Phases Group Play Voltage in the Milky Night is Stereolab’s sixth studio album and a return to the poppier tendencies the band had left behind on their previous full-length, Dots and Loops. Where that album was an amalgam of often chilled, abstract electronica, prog and jazz, this work’s exotic pop features warm Moog textures, bursting horns and lush, layered vocals.
Co-producers John McEntire (Tortoise, The Sea and Cake) and Jim O’Rourke (Sonic Youth, Gastr del Sol) add new wrinkles to Stereolab’s sound and deliver rich and expansive mixes. Their contributions to the album’s sonic deconstructions and analog keyboard textures present a radical model of pop-rock song architecture. Harmony and cacophony coexist throughout, wrapped up in the group’s familiar, mellow ’60s bossa-groove.
The arrangements incorporate Chicago post-rock, Sun Ra-styled free-form jazz and classic British acid-psych styles, while the songs return to the core melodic instincts of the band’s classic albums. Cobra and Phases Group Play Voltage in the Milky Night ended a decade in which Stereolab constructed a singular vision of popular music.
It completed a journey from the lo-fi fuzz of their early ’90s singles to the sophisticated and complex soundscapes of their more mature second phase. The result is an alluring collage of ’60s jazz, French pop and shimmery, ambient guitars, and an underrated piece of Stereolab’s oeuvre.
|
###########################################################
#
# Copyright (c) 2005-2008, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['ColumnEditWdg', 'ColumnEditCbk']
from pyasm.biz import Pipeline, Project
from pyasm.command import Command, CommandException
from pyasm.search import Search, SearchType
from pyasm.web import DivWdg, Table
from pyasm.widget import TextWdg, IconWdg, SelectWdg, HiddenWdg, WidgetConfigView
from pyasm.common import TacticException
from tactic.ui.common import BaseRefreshWdg
from tactic.ui.widget import SingleButtonWdg, ActionButtonWdg, IconButtonWdg
class ColumnEditWdg(BaseRefreshWdg):
def get_display(my):
top = my.top
top.add_color("background", "background")
top.add_class("spt_columns_top")
my.set_as_panel(top)
top.add_style("padding: 10px")
search_type = my.kwargs.get("search_type")
search_type_obj = SearchType.get(search_type)
inner = DivWdg()
top.add(inner)
inner.add_style("width: 500px")
#text = TextWdg("search_type")
text = HiddenWdg("search_type")
inner.add(text)
text.set_value(search_type)
title_wdg = DivWdg()
inner.add(title_wdg)
title_wdg.add( search_type_obj.get_title() )
title_wdg.add(" <i style='font-size: 9px;opacity: 0.5'>(%s)</i>" % search_type)
title_wdg.add_style("padding: 5px")
title_wdg.add_color("background", "background3")
title_wdg.add_color("color", "color3")
title_wdg.add_style("margin: -10px -10px 10px -10px")
title_wdg.add_style("font-weight: bold")
shelf_wdg = DivWdg()
inner.add(shelf_wdg)
shelf_wdg.add_style("height: 30px")
button = ActionButtonWdg(title='Create', icon=IconWdg.SAVE)
shelf_wdg.add(button)
shelf_wdg.add_style("float: right")
button.add_behavior( {
'type': 'click_up',
'search_type': search_type,
'cbjs_action': '''
var class_name = 'tactic.ui.startup.ColumnEditCbk';
var top = bvr.src_el.getParent(".spt_columns_top");
var elements = top.getElements(".spt_columns_element");
var values = [];
for (var i = 0; i < elements.length; i++ ) {
var data = spt.api.Utility.get_input_values(elements[i], null, false);
values.push(data)
}
var kwargs = {
search_type: bvr.search_type,
values: values
}
var server = TacticServerStub.get();
try {
server.execute_cmd(class_name, kwargs);
var names = [];
for (var i = 0; i < values.length; i++) {
var name = values[i].name;
name = name.strip();
if (name == '') { continue; }
names.push(name);
}
spt.table.add_columns(names)
// prevent grabbing all values, pass in a dummy one
spt.panel.refresh(top, {'refresh': true});
} catch(e) {
spt.alert(spt.exception.handler(e));
}
'''
} )
# add the headers
table = Table()
inner.add(table)
table.add_style("width: 100%")
tr = table.add_row()
tr.add_gradient("background", "background3")
tr.add_style("padding", "3px")
th = table.add_header("Column Name")
th.add_style("width: 170px")
th.add_style("text-align: left")
th = table.add_header("Format")
th.add_style("text-align: left")
from tactic.ui.container import DynamicListWdg
dyn_list = DynamicListWdg()
inner.add(dyn_list)
from tactic.ui.manager import FormatDefinitionEditWdg
for i in range(0, 4):
column_div = DivWdg()
column_div.add_class("spt_columns_element")
if i == 0:
dyn_list.add_template(column_div)
else:
dyn_list.add_item(column_div)
column_div.add_style("padding: 3px")
column_div.add_style("float: left")
table = Table()
column_div.add(table)
table.add_row()
text_wdg = NewTextWdg("name")
td = table.add_cell(text_wdg)
text_wdg.add_behavior( {
'type': 'blur',
'cbjs_action': '''
var value = bvr.src_el.value;
var code = spt.convert_to_alpha_numeric(value);
bvr.src_el.value = code;
'''
} )
option = {
'name': 'xxx',
'values': 'integer|float|percent|currency|date|time|scientific|boolean|text|timecode',
}
format_wdg = FormatDefinitionEditWdg(option=option)
td = table.add_cell(format_wdg)
td.add_style("width: 260px")
td.add_style("padding-left: 40px")
# show the current columns
title_wdg = DivWdg()
inner.add(title_wdg)
title_wdg.add_style("margin-top: 20px")
title_wdg.add("<b>Existing Columns</b>")
title_wdg.add_color("background", "background3")
title_wdg.add_style("padding: 5px")
title_wdg.add_style("margin: 20px -10px 10px -10px")
config = WidgetConfigView.get_by_search_type(search_type, "definition")
element_names = config.get_element_names()
table = Table()
inner.add(table)
table.add_style("width: 100%")
tr = table.add_row()
tr.add_gradient("background", "background3")
th = table.add_header("Column")
th.add_style("text-align: left")
th = table.add_header("Data Type")
th.add_style("text-align: left")
th = table.add_header("Format")
th.add_style("text-align: left")
th = table.add_header("Edit")
th.add_style("text-align: left")
count = 0
for element_name in element_names:
display_class = config.get_display_handler(element_name)
if display_class != 'tactic.ui.table.FormatElementWdg':
continue
table.add_row()
display_options = config.get_display_options(element_name)
format = display_options.get("format")
if not format:
format = '<i>text</i>'
data_type = display_options.get("type")
table.add_cell(element_name)
table.add_cell(data_type)
table.add_cell(format)
td = table.add_cell()
button = IconButtonWdg(title="Edit Definition", icon=IconWdg.EDIT)
td.add(button)
button.add_behavior( {
'type': 'click_up',
'search_type': search_type,
'element_name': element_name,
'cbjs_action': '''
var class_name = 'tactic.ui.manager.ElementDefinitionWdg';
var kwargs = {
search_type: bvr.search_type,
view: 'definition',
element_name: bvr.element_name
};
spt.panel.load_popup("Element Definition", class_name, kwargs);
'''
} )
count += 1
if not count:
table.add_row()
td = table.add_cell()
td.add_style("height: 50px")
td.add("No existing columns found")
td.add_style("text-align: center")
td.add_border()
td.add_color("background", "background", -5)
if my.kwargs.get("is_refresh"):
return inner
else:
return top
class ColumnEditCbk(Command):
def execute(my):
search_type = my.kwargs.get("search_type")
column_info = SearchType.get_column_info(search_type)
values = my.kwargs.get("values")
# get the definition config for this search_type
from pyasm.search import WidgetDbConfig
config = WidgetDbConfig.get_by_search_type(search_type, "definition")
if not config:
config = SearchType.create("config/widget_config")
config.set_value("search_type", search_type)
config.set_value("view", "definition")
config.commit()
config._init()
for data in values:
name = data.get("name")
name = name.strip()
if name == '':
continue
try:
name.encode('ascii')
except UnicodeEncodeError:
raise TacticException('Column name needs to be in English. Non-English characters can be used in Title when performing [Edit Column Definition] afterwards.')
if column_info.get(name):
raise CommandException("Column [%s] is already defined" % name)
format = data.get("format")
fps = data.get("fps")
data_type = data.get("data_type")
from pyasm.command import ColumnAddCmd
cmd = ColumnAddCmd(search_type, name, data_type)
cmd.execute()
#(my, search_type, attr_name, attr_type, nullable=True):
class_name = 'tactic.ui.table.FormatElementWdg'
options = {
'format': format,
'type': data_type,
'fps': fps
}
# add a new widget to the definition
config.append_display_element(name, class_name, options=options)
config.commit_config()
class NewTextWdg(TextWdg):
def init(my):
#color = my.get_color("border", -20)
color2 = my.get_color("border")
color = my.get_color("border", -20)
my.add_event("onfocus", "this.focused=true")
my.add_event("onblur", "this.focused=false;$(this).setStyle('border-color','%s')" % color2)
my.add_behavior( {
'type': 'mouseover',
'color': color,
'cbjs_action': '''
bvr.src_el.setStyle("border-color", bvr.color);
'''
} )
my.add_behavior( {
'type': 'mouseout',
'color': color2,
'cbjs_action': '''
if (!bvr.src_el.focused) {
bvr.src_el.setStyle("border-color", bvr.color);
}
'''
} )
super(NewTextWdg,my).init()
|
For the girls exploring their stylish sides or just looking for the world's cutest way to hold their hair back, this all-inclusive craft set is the perfect way to customize two cute headbands! Over 50 shimmering gel stickers and sparkling glitter stickers are included to make each one unique. Kids simply choose a headband cover, add stickers, and slip the cover over one of the included plastic headbands. The result is a shimmer fashion statement they'll be proud to wear!
|
# -*- encoding: utf-8 -*-
from __future__ import division
import sys
import scipy
from FrameshiftSite import *
class FrameshiftTranscript( object ):
def __init__( self, name, length ):
self.name = name
self.length = length
self.frameshift_sites = dict()
def add_frameshift_site( self, position, signal, radians_vector=( 2*scipy.pi/3, 2*scipy.pi/3, 2*scipy.pi/3 ), desig=None ):
def frameshift_position_score( x, L ):
"""
triangular function
P( frameshift ) is maximum in the middle and decreases to the edges
"""
if x < L/2:
return x/(L/2)
else:
return ( L - x )/(L/2)
position_score = frameshift_position_score( position, self.length )
self.frameshift_sites[position] = FrameshiftSite( ( 0, position ), \
( 0, 0 ), signal, self.length, position_score, radians_vector, desig )
def __repr__( self ):
output_str = "Transcript: %s of length %s\n" % ( self.name, self.length )
i = 1
for pos,FS in self.frameshift_sites.iteritems():
output_str += "Frameshift #%s: %s (desig: %s) at %s (pos-score = %s).\n" % ( i, \
FS.signal, FS.designation, FS.position, FS.position_score )
i += 1
return output_str
def filtered_print( self, p0=0, p1=1, theta0=scipy.pi ):
output_str = "Transcript: %s of length %s\n" % ( self.name, self.length )
i = 1
for pos,FS in self.frameshift_sites.iteritems():
if p0 <= FS.posscore2proportion( self.length ) <= p1 and FS.radians_vector_f[0] <= theta0:
output_str += "Frameshift #%s: %s (desig: %s) at %s (pos-score = %s).\n" % ( i, \
FS.signal, FS.designation, FS.position, FS.position_score )
i += 1
return output_str
def frameshifts( self, p0=0, p1=1, theta0=scipy.pi ):
for fss_i,fss in self.frameshift_sites.iteritems():
if p0 <= fss.posscore2proportion( self.length ) <= p1 and fss.radians_vector_f[0] <= theta0:
yield self.name, fss
def has_frameshift( self, p0=0, p1=1, theta0=scipy.pi ):
"""
beware!
"""
frameshift_count = 0
for fss_i,fss in self.frameshift_sites.iteritems():
if p0 <= fss.posscore2proportion( self.length ) <= p1 and fss.radians_vector_f[0] <= theta0:
frameshift_count += 1
if frameshift_count > 0:
return True
else:
return False
def has_exact_frameshift( self, other, p0=0, p1=1, theta0=scipy.pi, tol=3 ):
"""
beware!
"""
self_fsss = self.frameshift_sites.values()
other_fsss = other.frameshift_sites.values()
present = False
for fss in self_fsss:
for oss in other_fsss:
if p0 <= fss.posscore2proportion( self.length ) <= p1 and fss.radians_vector_f[0] <= theta0 and -tol <= fss.distance_from_5prime - oss.distance_from_5prime <= tol and fss.signal == oss.signal and fss.designation == oss.designation:
present = True
return present
def rough_equality( self, other ):
if len( self.frameshift_sites ) > 0 and len( other.frameshift_sites ) > 0:
return True
else:
return False
def is_equal( self, other, p0, p1, theta0 ):
# each FSTObject has one or more FSSObjects
# we look for equality on FSSObjects by comparing positions and signals
equal = False
number_equal = 0
frameshift_sites_self = [ fss for fsss in self.frameshift_sites.values() if p0 <= fss.posscore2proportion( self.length ) <= p1 and fss.radians_vector <= theta0 ]
frameshift_sites_other = other.frameshift_sites.values()
for fsss in frameshift_sites_self:
for fsso in frameshift_sites_other:
if fsss == fsso:
equal = True
number_equal += 1
return equal, number_equal
|
Does "Can't Help Falling In Love With You" sound bad on your harmonica? Nail your technique with easy video lessons ->click here.
BUT, I CAN'T HELP, FALL-ING IN LOVE WITH YOU.
SHALL I STAY, WOULD IT BE A SIN?
CAUSE I CAN'T HELP, FALL-ING IN LOVE WITH YOU.
DAR-LING SO IT GOES, SOME THINGS ARE MEANT TO BE.
TAKE MY HAND, TAKE MY WHOLE LIFE TOO.
FOR I CAN'T HELP, FALL-ING IN LOVE WITH YOU.
DAR-LING SO IT GOES, SOME THINGS ARE MEANT TO BE.
FOR I CAN'T HELP, FALL-ING IN LOVE WITH YOU.
FOR I CAN'T HELP FALL-ING IN LOVE WITH YOU.
|
# Copyright 2014 Open vStorage NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for users
"""
import hashlib
import random
import string
from backend.serializers.user import PasswordSerializer
from backend.serializers.serializers import FullSerializer
from backend.decorators import required_roles, load, return_object, return_list, log
from backend.toolbox import Toolbox
from rest_framework import status, viewsets
from rest_framework.exceptions import PermissionDenied
from rest_framework.response import Response
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
from ovs.dal.hybrids.user import User
from ovs.dal.hybrids.client import Client
from ovs.dal.hybrids.j_roleclient import RoleClient
from ovs.dal.lists.userlist import UserList
class UserViewSet(viewsets.ViewSet):
"""
Information about Users
"""
permission_classes = (IsAuthenticated,)
prefix = r'users'
base_name = 'users'
@log()
@required_roles(['read'])
@return_list(User)
@load()
def list(self, request):
"""
Lists all available Users where the logged in user has access to
"""
if Toolbox.is_client_in_roles(request.client, ['manage']):
return UserList.get_users()
else:
return [request.client.user]
@log()
@required_roles(['read'])
@return_object(User)
@load(User)
def retrieve(self, request, user):
"""
Load information about a given User
Only the currently logged in User is accessible, or all if the logged in User has a
system role
"""
if user.guid == request.client.user_guid or Toolbox.is_client_in_roles(request.client, ['manage']):
return user
raise PermissionDenied('Fetching user information not allowed')
@log()
@required_roles(['read', 'write', 'manage'])
@load()
def create(self, request):
"""
Creates a User
"""
serializer = FullSerializer(User, instance=User(), data=request.DATA, allow_passwords=True)
if serializer.is_valid():
user = serializer.object
if UserList.get_user_by_username(user.username) is not None:
return Response('User already exists', status=status.HTTP_303_SEE_OTHER)
user.save()
pw_client = Client()
pw_client.ovs_type = 'INTERNAL'
pw_client.grant_type = 'PASSWORD'
pw_client.user = user
pw_client.save()
cc_client = Client()
cc_client.ovs_type = 'INTERNAL'
cc_client.grant_type = 'CLIENT_CREDENTIALS'
cc_client.client_secret = ''.join(random.choice(string.ascii_letters +
string.digits +
'|_=+*#@!/-[]{}<>.?,\'";:~')
for _ in range(128))
cc_client.user = user
cc_client.save()
for junction in user.group.roles:
for client in [cc_client, pw_client]:
roleclient = RoleClient()
roleclient.client = client
roleclient.role = junction.role
roleclient.save()
serializer = FullSerializer(User, instance=user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@log()
@required_roles(['read', 'write', 'manage'])
@load(User)
def destroy(self, request, user):
"""
Deletes a user
"""
if request.client.user_guid == user.guid:
raise PermissionDenied('A user cannot delete itself')
for client in user.clients:
for token in client.tokens:
for junction in token.roles.itersafe():
junction.delete()
token.delete()
for junction in client.roles.itersafe():
junction.delete()
client.delete()
user.delete(abandon=['logs']) # Detach from the log entries
return Response(status=status.HTTP_204_NO_CONTENT)
@log()
@required_roles(['read', 'write', 'manage'])
@load(User)
def partial_update(self, contents, user, request):
"""
Update a User
"""
contents = None if contents is None else contents.split(',')
serializer = FullSerializer(User, contents=contents, instance=user, data=request.DATA)
if serializer.is_valid():
if user.guid == request.client.user_guid:
raise PermissionDenied('A user cannot update itself')
serializer.save()
return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@action()
@log()
@required_roles(['read', 'write'])
@load(User)
def set_password(self, request, user):
"""
Sets the password of a given User. A logged in User can only changes its own password,
or all passwords if the logged in User has a system role
"""
if user.guid == request.client.user_guid or Toolbox.is_client_in_roles(request.client, ['manage']):
serializer = PasswordSerializer(data=request.DATA)
if serializer.is_valid():
user.password = hashlib.sha256(str(serializer.data['new_password'])).hexdigest()
user.save()
# Now, invalidate all access tokens granted
for client in user.clients:
for token in client.tokens:
for junction in token.roles:
junction.delete()
token.delete()
return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
raise PermissionDenied('Updating password not allowed')
|
When Mel Brooks was honored by the Ellis Island Family Heritage Awards last week, he was introduced by a video clip showing his father's trip from Austria to New York, where his family had a herring business.
"That photo was not of my father," Brooks said after taking the stage. "That was my great-uncle." He went on to say that while his uncle was a good-looking man who "wore a nice hat," as seen in the snapshot, his father, Max Kaminsky, was better looking.
The awards honor immigrants or their descendants who have made a major contribution to the American experience. In addition to Brooks, author Mary Higgins Clark, University of Miami President Donna Shalala, and the Forbes family received the 2008 awards.
|
from read_embed import read_embed
char_raw = open('sorted_char_count.txt').readlines()
word_raw = open('sorted_word_count.txt').readlines()
char_raw = [v.strip().split(',') for v in char_raw]
word_raw = [v.strip().split(',') for v in word_raw]
char = [v[0] for v in char_raw]
char = {k:0 for k in char}
word = [v[0] for v in word_raw]
word = {k:0 for k in word}
char_embed_path='./char_embedding.txt'
word_embed_path='./word_embedding.txt'
word_dict,_,_ = read_embed(word_embed_path)
char_dict,_,_ = read_embed(char_embed_path)
word = {k:0 for k in word if word_dict.has_key(k)}
char = {k:0 for k in char if char_dict.has_key(k)}
f = open('question_topic_train_set.txt')
question_topic = f.readlines()
f = open('question_train_set.txt')
raw_questions = f.readlines()
f_tidy_question = open('tidy_question_train_set.txt','w')
f_tidy_topic = open('tidy_question_topic_train_set.txt','w')
tc_length = {i:0 for i in range(10000)}
cc_length = {i:0 for i in range(30000)}
tw_length = {i:0 for i in range(1000)}
cw_length = {i:0 for i in range(4000)}
for raw_value, raw_label in zip(raw_questions, question_topic):
value = raw_value.split()
if len(value) < 3:
continue
#f_tidy_question.write(value[0])
tc = value[1].split(',')
tc = [v for v in tc if char.has_key(v)]
tc_length[len(tc)] +=1
tc = ','.join(tc)
#f_tidy_question.write('\t'+tc)
tw = value[2].split(',')
tw = [v for v in tw if word.has_key(v)]
tw_length[len(tw)] +=1
tw = ','.join(tw)
#f_tidy_question.write('\t'+tw)
if len(tc)==0 or len(tw) ==0:
continue
write_line = '\t'.join([value[0], tc, tw])
if len(value)>3:
cc = value[3].split(',')
cc = [v for v in cc if char.has_key(v)]
cc_length[len(cc)] +=1
cc = ','.join(cc)
write_line += '\t'+cc
if len(value)>4:
cw = value[4].split(',')
cw = [v for v in cw if word.has_key(v)]
cw_length[len(cw)] +=1
cw = ','.join(cw)
write_line += '\t'+cw
write_line += '\n'
f_tidy_question.write(write_line)
f_tidy_topic.write(raw_label)
f_tidy_question.close()
f_tidy_topic.close()
with open('tc_length.txt','w') as f:
for k,v in tc_length.items():
f.write(str(k)+','+str(v)+'\n')
with open('cc_length.txt','w') as f:
for k,v in cc_length.items():
f.write(str(k)+','+str(v)+'\n')
with open('tw_length.txt','w') as f:
for k,v in tw_length.items():
f.write(str(k)+','+str(v)+'\n')
with open('cw_length.txt','w') as f:
for k,v in cw_length.items():
f.write(str(k)+','+str(v)+'\n')
|
The 15th cumulative update release for SQL Server 2014 SP2 is available for download at the Microsoft Downloads site.
Registration is no longer required to download Cumulative updates.
On July 9, 2019, Microsoft will end Extended Support, for SQL Server 2008 and 2008 R2hich means no more updates or support of any kind, potentially leaving you vulnerable to security and compliance issues.
That is only a year away. So time to start planning and to get it into your 2019 budget.
What applications are affected? With what new SQL version are they compatible?
Will you need to rebuy licenses? The SQL license cost is now core based and it might prove lot higher than last time so take the time to consider all options.
Should any of your applications move to the cloud?
Should you also look at upgrades to Hardware? Windows, Office, Exchange, or Business finance/erp systems in conjunction with SQL?
Is now the time to review your security solutions?
Are you going to expand, or implement heavy new processes like consolidation, budgeting, BI in then next 2-3 years?
Is your mobile network growing?
There are major enhancements at QL 2016 sp1 so we recommend you should not consider any version lower than that. By next year SQL 2017 will also have settled down.
SQL Server for many years on a two-year release cycle. SQL Server 2017 arrived less than 18 months after SQL Server 2016 became available.
Since 2005 each release of SQL Server brings exciting new features and improvements to existing capabilities. Many organizations are running instances that are several versions of SQL Server behind.
To keep up with the latest SQL Server versions is a challenge, but risks losing mainstream support and missing out on beneficial features. Often database administrators must support multiple versions at once, and consultants face an even greater range of versions across their customers.
Microsoft has not committed to any specific release cadence for ersions of SQL Server. Many clients it seems are still running SQL Server 2008 R2. One reason why companies are hesitant to make the move off 2008 R2 is because of the change to per core licensing. The effort to test and to upgrade is discouraging, but it is best to do this on a planned basis than a reaction to a crisis..
It was a painful experience to upgrade from SQL Server 2000, but the compatibility gap between versions is much narrower once past 2005. To make upgrading easier, provides a tool called The Upgrade Advisor for each new version that will spot issues and provide a chance to resolve them before starting the upgrade process. Virtualization also makes setting up testing environments much simpler and quicker.
With each new version there are enhancements to T-SQL, improved availability and disaster recovery functionality, more security options, and additional ways to get better performance. 2016 service pack 1, was a game change – many previously Enterprise only features were ported down to more affordable editions.
Another consideration is support. It doesn’t take long to reach the end of mainstream support. SQL Server 2008 R2, for example, has been out of mainstream support since 2014. While it’s still in extended support, which will ensure security hotfixes, other support features are available only on a paid basis.
When you look at erp upgrades it makes sense to also review your SQL upgrade plans.
SQL Server 2012 Service Packs, Service Pack 4 (SP4). This release of SQL 2012 Service Pack has 20+ improvements centered around performance, scalability and diagnostics to enable SQL Server 2012 to perform faster and scale out of the box on modern hardware design.
Project failure? – what wait of find out, try a pre-mortem.
A recent survey by PwC showed that over 85% of Dynamics AX projects failed to achieve their core objectives.
the same is true of most erp systems, and indeed of most projects. There si always over optimism, and over confidence, and an assumptions of perfections, despite the copious evidence that other good companies made the same errors of judgement.
So before you start a project rather than wait to see how it pans out and then do a post mortem, instead do try holding a pre-mortem. Assume it did no go so well. Ask your project team/stakeholders to write down why.
In the re-project phase one a senior executive gives a green light to go ahead, dissenting voices tend to go quiet. Costs are negotiate down rather than risk management, contingency and quality built in.
A pre-mortem can help you find what people really think and inject a touch of realism, about challenges, realistic scope, time and resources needed. With concerns identified they can be addressed and the team who has to deliver will be much more committed with their concerns out in the open and a less rosy tinted outlook.
Gartner has recently identified Microsoft Azure as a leader in the analyst firm’s Magic Quadrant for Cloud Infrastructure as a Service (IaaS), for the third year in a row, both based on both completeness of vision and the ability to execute.
Microsoft’s Azure cloud platform enables the creation of virtual networks, servers and machines, and supports multitenant storage, object storage and a robust content delivery network for both Microsoft and other vendor solutions. Azure also provides advanced services such as machine learning and Internet of things.
The Azure infrastructure has security integrated from the ground up, and all data, whether at rest or in transit, is strongly encrypted. All of offerings are supported by a leading-edge Cyber Defense Operations Centre that monitors customer infrastructure around the clock.
Gartner’s announcement comes at a time when the Gulf region is taking strident steps towards cloud infrastructure adoption. Saudi Arabia plans to invest $2 trillion in IT projects in the coming years, with a significant portion to be invested in cloud. Meanwhile, the United Arab Emirates will see a gradual growth in IT spend from now until 2020, according to a report from BMI Research. A compound annual growth rate (CAGR) of 3.4 per cent is expected.
An accompanying decline in hardware sales together with BMI’s prediction that SaaS will take an increasing share of software sales, and strongly indicates a decisive shift to cloud for the GCC.
When Microsoft announced the G series of virtual machines, back in Q1 of 2015, it represented the most memory, highest processing power and the largest local SSD capacity of any VMs then available in the public cloud. The G series, allowed Azure to lead the market with continued innovation also supporting SAP HANA workloads up to 32 TB. Azure has industry-wide recognition too for its support of Linux and other open-source technologies having nearly one third of all Azure VMs on Linux boxes.
Gartner’s report singled out Microsoft’s “rapid rollout” of these new features and many others, signaling that the company’s brand and history, both with its customers and with its delivery of enterprise-class solutions and services, combine to allow the company to ‘rapidly attain the status of strategic cloud IaaS provider’.
“Microsoft Azure encompasses integrated IaaS and PaaS components that operate and feel like a unified whole,” Gartner analysts wrote.
Gartner’s analysts also cited Microsoft’s “deep investments” in engineering and “innovative roadmap” as crucial factors in the company’s current IaaS market standing. The report further recommends Microsoft Azure for General business applications and development environments that use Microsoft technologies; migration of virtualized workloads for Microsoft-centric organizations; cloud-native applications (including Internet of Things applications); and batch computing.
Microsoft is the leading platform and productivity company for the mobile-first, cloud-first world, and its mission is to empower every person and every organisation on the planet to achieve more.
Microsoft Gulf opened its Dubai-based headquarters in 1991 the same year as Synergy Software Systems.
For cloud hosting, or to back up to the cloud, or for applications like Dynamics 365 or Ax RTW (7) or Synergy MMS, or our xRM HIS, or Imaging storage, and Document management, or for cloud based monitoring of your clouds and on premise networks, find out how we can help with your move to the cloud.
Windows Server 2016 – Synergy Software Systems, preview.
Microsoft will launch Windows Server 2016 at the upcoming Ignite event on September 26-30. (The event is sold out, but sessions will be available online).
Security is the main focus areas for this release. Windows Server 2016 includes layers of security that help prevent attacks and detect suspicious activity with new features to control privileged access, protect virtual machines and harden the platform against emerging threats.
- Manage privileged identity using ‘Just Enough’ administration.
Buy or rent?. On premise or SaaS.? The answer to the questions, for enterprise computing, goes in cycles. When mainframe computing was at its peak, many organizations did not own such expensive machines outright and many companies rented processing time on these machines when needed, an arrangement known as time-sharing.
The terms “cloud” and “data center” may sound like interchangeable technical jargon or trendy buzz words. A data centre is ideal for those companies that need a customized, dedicated system that gives them full control over their data and equipment. Typically those with many integrations, and uncertain internet connections, and an internal IT team will consider this route. Since only the one company will be using the infrastructure’s power, a data centre is suitable for organizations that run many different types of applications and complex workloads.
A data centre, however, has limited capacity — once you build a data centre, you will not be able to instantly change the amount of storage, or processing power to accommodate for example significant changes in workload and data processing. On the other hand, a cloud system is scalable to your business needs. It has potentially unlimited capacity, based on your vendor’s offerings and service plans. When you are looking at big data processing for predictive analytics, of have high day end or seasonal workloads, then the ability to ramp up and down is important to avoid oversizing. For project based companies both the number of user licences required, and the processing power may vary from year to year. For a rapidly expanding company hardware and server room expansion and management is a challenge on premise.
In a recent IDC (International Data Corporation) Multi-Client Study, CloudView 2016) respondents to the survey said that they expect to increase their cloud spending by approximately 44% over the next two years, and 70% of heavy cloud users are thinking in terms of a “hybrid” cloud strategy.
The idea of a hybrid cloud is to get the best of on-premise deployment by leveraging cloud services. Some work is done on premise, some on the cloud e.g. BI or payment gateway. A combination of both public and private platforms, a hybrid cloud is meant to provide organizations with greater IT and infrastructure flexibility, as well as visibility and control over their cloud usage. The result should be that a, hybrid cloud enables business agility, including streamlined operations and improved cost management.
Sounds good but what does it all mean and what are the challenges? First let’s review some of the basics concepts.
A public cloud is one in which the services and infrastructure are provided off-site, over the Internet. Data centre hardware is not owned by clients and so you face no capital expenses. Instead, providers sell hosting as a ‘utility’ or rental service. Providers offer maintenance, disaster recovery and backup, however basic this may be. This is typically a multi-tenant software solution. Individual company data sits in separate blocks in a common clustered hardware. Data for individual organisations is kept separate and protected with robust security. Breaches of data with a reliable provider are rare. However, some security standards are not suitable for very sensitive data, rigorous audit trails or industry-specific compliance.
A Public cloud is y used to host web servers or develop applications. It is attractive to small and mid-sized enterprises (SMEs) when they are happy to use out-of-the-box menu specifications. Virtual machines are configured quickly – often within hours. Some SaaS (Software as a Service) services are placed within a public cloud when they have high levels of built-in security.
A private cloud is one in which the services and infrastructure are maintained on a private network. It operates on an isolated network and is extremely secure. It keeps data behind a firewall and is built either on-premise or in a ring-fenced section of a data centre. A Private cloud is a single tenant solution, with the hardware accessed by one, or multiple businesses. It’s an ideal solution for enterprise organisations or specialist firms with high levels of security and compliance. Clients generally maintain their own cloud system and own their hardware.
Security and compliance on private cloud is configured to meet compliance standards. Private cloud systems cost much more than public cloud and re-configuring is more complex and lengthy.
Hybrid cloud uses public and private cloud for different elements of computing. Only some elements will require high security and customisation but others will not. Hybrid cloud offers private cloud for sensitive data but keeps non-sensitive, generic data (e.g. customer literature) in a cheaper public cloud environment. Hybrid cloud is usually hosted by different cloud providers – one for public and one for private. Hybrid cloud benefits companies who experience seasonal spikes so extra computing power is deployed quickly and cheaply in public cloud while keeping sensitive information in its private cloud.
A Hybrid cloud is the biggest growth area in cloud computing for enterprise businesses. As servers become ‘smarter’, hybrid cloud is estimated to represent 75% of future enterprise cloud computing.
A Hybrid cloud does not mean failover to onsite, for which a failover solution or a clustered install is needed and the failover can be to any other site whether local, remote or on cloud. Nor does hybrid mean offline working on premise option.
IBM’s Institute for Business Value (IBV) polled more than 1,000 C-level executives to reveal that 78% of respondents deploy a cloud initiative that is fully integrated or coordinated — an increase from 34% in 2012. Enterprises may be embracing the cloud, but they are not yet fully invested in a cloud-only strategy. Across 18 industries, 45% of workloads are expected to remain on-premise in the near future.
A hybrid cloud deployment is a collaboration of public cloud, private cloud and traditional IT platforms that allow enterprises to customize a cloud solution that meets the particular needs of their company. The top motivating factors for adopting hybrid cloud solutions, according to the IBM study, include lowering the total cost of ownership, facilitating innovation, improving efficiency and meeting customer expectations.
Among the companies that embrace cloud computing, 76% responded that they were able to expand into new industries, 71% created new revenue sources and 69% supported new business models.
Security remains a concern, however, and has become a hurdle for companies and a deterrent from fully investing in the cloud. Nearly half of respondents expressed that security and compliance risks are a challenge in IBM’s study, while 41% of respondents expressed that the cost of the cloud was a deterrent and 38% feared a disruption to company operations by introducing a new cloud solution.
When survey respondents are segmented by performance, IBM concludes that twice as many high performers have fully integrated their cloud initiatives compared to low performers.
That’s why solutions like the Azure Stack, that are also geared towards multi-cloud scenerios in the context of app migration to the cloud from traditional data centers, especially while taking all of the enterprise-grade considerations involved in such a transition into account, are critical.
Azure users now have a set of building blocks for managing the entire application stack and its lifecycle, across clouds, stacks and technologies. And with Microsoft now having the most open source developers on GitHub, yup – ahead of Facebook, Angular, and even Docker – Azure is uniquely positioned to achieve this level of openness and interoperability.
This will also ultimately provide a higher degree of flexibility that allows users to define their own level of abstraction per use case or application. In this manner, cloud portability is achievable without the need to change the underlying code, enabling true hybrid cloud.
Fifty-five percent of CIOs surveyed by Gartner indicated that by 2020 they will structure more than half of their applications as SaaS or manage them in a public cloud infrastructure. To manage and govern public, private and hybrid cloud services requires a focus on cloud management. This, in turn, requires new roles, processes and technologies.
Database professionals to filter out business critical data from the data overload we have today. A Big Data Foundationprofessional will be familiar with – Hadoop and MongoDB.
Software developers no longer just push code, they are pivotal to the user experience and thus the user adoption of cloud solutions.
Information security managers must appreciate the risks involved with business data and discuss this across the organization (at all levels) to align key stakeholders in positions to invest in and implement security measures.
Enterprise architects. Today solution architects, need the skills to adapt to cloud computing and hybrid cloud environments. Companies want to avoid working with ad hoc systems implementations, and architects who understand cloud computing and all its service models are in high demand. to design a scalable and sustainable cloud infrastructure which optimizes the use of private and public cloud.
Business managers working in the cloud need to understand how the technical infrastructure supports the business strategy get the benefits of cloud computing to drive their objectives.
If you are considering how the cloud can benefit your business then contact us to explore the many options.
Find out out about the new integrated Dynamics 365 offerings. e.g.
Host your applications in a secure managed cloud – with both fixed price or based on use billing.
Monitor your on site global networks with cloud based monitoring systems.
Use Cortana Analytics and Power BI to turn data into information.
Back up to the cloud.
Dynamics Ax – Post Implementation reviews, audits, health checks, performance tuning.
Synergy regularly undertakes reviews of Dynamics Ax projects, often to turnaround failing projects.
We’ve of course see a wide variety of issues and situations but there are many common issues we can quickly pinpoint which means we can usually deliver quick wins.
As a consultant with more than 12 years of AX experience, and 30 plus years of erp experience across more than 40 countries I’ve come across many operational and technical challenges, that other people haven’t had the time or the opportunity to see. I spent my first year in consultancy devising a suite of audit methodologies, tools and checklists, and for several years was kept fully occupied with reviews of global company erp systems. So there are things that I would know to check in your system that may not be obvious and often a minor configuration policy change has significant impact on how a system per4forms in support of a business process.
How do you know whether your system is running well, or as well as it should do? Who or what are you comparing against?
◾Are your batches running appropriately?
◾Have you allocated enough resources to that have?
◾Have you looked at your security within AX?
◾Have you looked at your event logs to see what is happening?
◾How are you deploying code?
◾ What maintenance routines are in place?
◾ Are initial default settings no longer appropriate as the numbers of users or transactions and history data increases?
◾ Are there unused system features?
◾ Is there timely month end close?
◾ Does financial data reconcile tot he sub ledgers?
There are many different areas that are reviewed during your Dynamics AX Health Check and it is meant to be more of an overall system-wide analysis, not just how is SQL doing or what are your queries doing?
All companies change over time, they open new branches, their staff changes, their customer base and products evolve. The regulatory and statutory requirements change, and technology offers new ways of working.
With a Health Check, another set of eyes will review and recommend. Sometime you are so close to a problem you can live with it and not notice how it is deteriorating.
Identify performance and operational risks, downtime. The true value of a Dynamics AX Health Check by an experienced resource is the potential for increased productivity from your team and a system that operates at maximum performance.
Learn more about a Dynamics AX Health Check or Performance review for your company.
In the last 12 months more than 30 companies in Dubai and Abu Dhabi have implemented the Synergy Software Systems localiised HR and Payroll solution built inside Dynamics Ax 2012 .
The solution has added more reports, more workflows, and Power BI analysis.
|
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible.module_utils.facts.virtual.base import Virtual, VirtualCollector
from ansible.module_utils.facts.virtual.sysctl import VirtualSysctlDetectionMixin
from ansible.module_utils.facts.utils import get_file_content
class OpenBSDVirtual(Virtual, VirtualSysctlDetectionMixin):
"""
This is a OpenBSD-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'OpenBSD'
DMESG_BOOT = '/var/run/dmesg.boot'
def get_virtual_facts(self):
virtual_facts = {}
# Set empty values as default
virtual_facts['virtualization_type'] = ''
virtual_facts['virtualization_role'] = ''
virtual_product_facts = self.detect_virt_product('hw.product')
virtual_facts.update(virtual_product_facts)
if virtual_facts['virtualization_type'] == '':
virtual_vendor_facts = self.detect_virt_vendor('hw.vendor')
virtual_facts.update(virtual_vendor_facts)
# Check the dmesg if vmm(4) attached, indicating the host is
# capable of virtualization.
dmesg_boot = get_file_content(OpenBSDVirtual.DMESG_BOOT)
for line in dmesg_boot.splitlines():
match = re.match('^vmm0 at mainbus0: (SVM/RVI|VMX/EPT)$', line)
if match:
virtual_facts['virtualization_type'] = 'vmm'
virtual_facts['virtualization_role'] = 'host'
return virtual_facts
class OpenBSDVirtualCollector(VirtualCollector):
_fact_class = OpenBSDVirtual
_platform = 'OpenBSD'
|
Simple, yet powerful, strategies that make getting Straight A's easy.
Straight A+ students study smarter, not harder. With this DVD and Strategy Blueprint, you will learn their secrets for doing well in school and enjoying it too. So whether you are a top student who wants to create more free time, a struggling student who wants better grades, a stressed out student looking for an easier way or someone who simply wants to make school more fun, learning these simple strategies from these top students will help you achieve more in less time.
Receive a private tutorial from three top students.
Simple, yet powerful, strategies that make school easier.
This is a must have product for anyone in grade school, middle school, high school, college, business school, law school, medical school or graduate school. Discover real world strategies for scoring top grades while studying less.
|
# execly.py
#
# Example of generating code and executing it with exec()
# in the context of descriptors/metaclasses
from inspect import Parameter, Signature
import re
from collections import OrderedDict
# Utility functions
def _make_init(fields):
'''
Give a list of field names, make an __init__ method
'''
code = 'def __init__(self, %s):\n' % \
','.join(fields)
for name in fields:
code += ' self.%s = %s\n' % (name, name)
return code
def _make_setter(dcls):
code = 'def __set__(self, instance, value):\n'
for d in dcls.__mro__:
if 'set_code' in d.__dict__:
for line in d.set_code():
code += ' ' + line + '\n'
return code
class DescriptorMeta(type):
def __init__(self, clsname, bases, clsdict):
if '__set__' not in clsdict:
code = _make_setter(self)
exec(code, globals(), clsdict)
setattr(self, '__set__', clsdict['__set__'])
else:
raise TypeError('Define set_code(), not __set__()')
class Descriptor(metaclass=DescriptorMeta):
def __init__(self, name=None):
self.name = name
@staticmethod
def set_code():
return [
'instance.__dict__[self.name] = value'
]
def __delete__(self, instance):
raise AttributeError("Can't delete")
class Typed(Descriptor):
ty = object
@staticmethod
def set_code():
return [
'if not isinstance(value, self.ty):',
' raise TypeError("Expected %s" % self.ty)'
]
# Specialized types
class Integer(Typed):
ty = int
class Float(Typed):
ty = float
class String(Typed):
ty = str
# Value checking
class Positive(Descriptor):
@staticmethod
def set_code():
return [
'if value < 0:',
' raise ValueError("Expected >= 0")',
]
super().__set__(instance, value)
# More specialized types
class PosInteger(Integer, Positive):
pass
class PosFloat(Float, Positive):
pass
# Length checking
class Sized(Descriptor):
def __init__(self, *args, maxlen, **kwargs):
self.maxlen = maxlen
super().__init__(*args, **kwargs)
@staticmethod
def set_code():
return [
'if len(value) > self.maxlen:',
' raise ValueError("Too big")',
]
class SizedString(String, Sized):
pass
# Pattern matching
class Regex(Descriptor):
def __init__(self, *args, pat, **kwargs):
self.pat = re.compile(pat)
super().__init__(*args, **kwargs)
@staticmethod
def set_code():
return [
'if not self.pat.match(value):',
' raise ValueError("Invalid string")',
]
class SizedRegexString(SizedString, Regex):
pass
# Structure definition code
class StructMeta(type):
@classmethod
def __prepare__(cls, name, bases):
return OrderedDict()
def __new__(cls, clsname, bases, clsdict):
fields = [key for key, val in clsdict.items()
if isinstance(val, Descriptor) ]
for name in fields:
clsdict[name].name = name
# Make the init function
if fields:
exec(_make_init(fields), globals(), clsdict)
clsobj = super().__new__(cls, clsname, bases, dict(clsdict))
setattr(clsobj, '_fields', fields)
return clsobj
class Structure(metaclass=StructMeta):
pass
if __name__ == '__main__':
class Stock(Structure):
name = SizedRegexString(maxlen=8, pat='[A-Z]+$')
shares = PosInteger()
price = PosFloat()
|
Please note that the results reported below are provisional. The final results, with remarks and errors corrected, will be published early in 2019. We expect the final results to be even better than those reported below.
|
# -*- coding: utf-8 -*-
from starlord.ocr.api import *
import requests
import selenium
from selenium import webdriver
import json, urllib,urllib2
import hashlib
from urllib import urlencode
from selenium.webdriver.common import keys as KEYS
import bs4
import sys
import time
from selenium.webdriver.common.action_chains import ActionChains
from PIL import Image as PILImage
import cv2
from PIL import Image
import random
def extractEdges(image_file):
edges = []
img = cv2.imread(image_file, 0)
gray_lap = cv2.Laplacian(img,cv2.CV_16S,ksize = 3)
dst = cv2.convertScaleAbs(gray_lap)
cv2.imwrite("verify2.png",dst)
#cv2.imshow("showimage", dst)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
image = Image.open("verify2.png")
image_rgb = image.convert("RGB")
for x in xrange(2, image_rgb.size[0] - 1):
for y in xrange(2, image_rgb.size[1] - 1):
color1 = image_rgb.getpixel((x,y))
#白色
if color1==(255,255,255):
k = min(y+22,image.size[1] - 1)
allwhite = False
for j in xrange(y+1,k):
#余下竖线为白色
color2= image_rgb.getpixel((x,j))
if color2==color1:
allwhite = True
continue
else:
allwhite=False
break
if allwhite:
if edges.count(x)==0:
edges.append(x)
for i in xrange(0,len(edges)-1):
if edges[i]+1==edges[i+1]:
edges[i]=0
for x in edges:
if x==0:
edges.remove(x)
for z in edges:
print str(z)
if len(edges)==2:
distance1 = edges[1]-edges[0]
elif len(edges)>2:
distance1 = edges[2]-edges[0]
return distance1
headers0 = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0',
'Content-Type': 'application/x-www-form-urlencoded',
'Connection': 'keep-alive',
'Accept-Encoding': 'gzip,deflate',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Cache-Control': 'max-age=0'
}
driver = webdriver.Chrome()
driver.maximize_window()
page = driver.get("http://www.qixin.com/login")
#elem = driver.find_element_by_xpath("//script[6]")
time.sleep(2)
#elem = driver.find_element_by_xpath("//div[@class='behavior_verify_content']")
elem = driver.find_element_by_css_selector('.gt_slider_knob.gt_show')
ActionChains(driver).click_and_hold(elem).perform()
time.sleep(1)
driver.get_screenshot_as_file('web.png')
#print elem.location.values()
elem2 = driver.find_element_by_css_selector('.gt_cut_fullbg.gt_show')
#ActionChains(driver).move_to_element(elem).perform()
#driver.get_screenshot_as_file('2.png')
#print elem2.location.values()
#print elem2.size.values()
topx = elem2.location.values()[1]
topy = elem2.location.values()[0]
botx = topx + elem2.size.values()[0]
boty = topy + elem2.size.values()[1]
box=(topx, topy, botx, boty)
image1 = PILImage.open('web.png')
image1.crop(box).save('verify.png')
image1.close()
distance = extractEdges("verify.png")
ActionChains(driver).move_to_element(elem)
#ActionChains(driver).drag_and_drop_by_offset(elem,distance,0).perform()
road = 0
for seconds in xrange(0,20):
if seconds==19:
bias = distance-road
ActionChains(driver).move_by_offset(bias, 0).perform()
else:
ActionChains(driver).move_by_offset(0.05 * distance, 0).perform()
road = road + 0.05*distance
time.sleep(1*random.random())
#ActionChains(driver).move_to_element_with_offset(elem,distance, 0).perform()
driver.get_screenshot_as_file('web2.png')
ActionChains(driver).release(elem)
time.sleep(10)
|
Rajani Radhakrishnan is from Bangalore, India. Finding time and renewed enthusiasm for poetry after a long career in Financial Applications, she blogs at thotpurge.wordpress.com . Her poems have recently appeared in The Lake, Quiet Letter, Under the Basho and The Cherita.
The contrasts (regular seasonal vs forced; biological evolution vs natural or human-caused disasters ...) between the two types of migration (bird vs human) portrayed in the tanka with a pivot are sociopolitically significant and emotionally poignant, and on second reading, the "shrinking desert" successfully carries geopolitical and environmental significance.
Arzu walks out of the tent to meet her friends, waiting in line with hundreds of others for water distribution. A wisp of cloud drifts by. It reminds her of the camp teacher's departing words, "Those puffy, sheep-like clouds you're looking at come from Syria. You will all return home one day, I promise."
Amidst unceasing news of overwhelming number of people fleeing the conflict-torn regions in the Middle East, Chen-ou Liu’s Honorable Mention haibun is both timely and compassionate in his presentation of a young girl’s plight. The poet’s imagery of a ‘wisp of cloud’ is laden with significance. It evokes poignantly the fragility of Arzu’s hope for a safe return to her native land and also works as a ‘beacon’ of light in the otherwise drab and desperate tents-filled camp. What I find particularly powerful is how Chen-ou turns on its head, the largely negative media representations of how refugees threaten the civilizations of the host countries in which they seek asylum. There is quiet dignity in both Arzu and her teacher who holds out the promise of a return to their homeland.
|
import struct
import traceback
import itertools
import envi
import envi.bits as e_bits
import envi.memory as e_mem
import envi.registers as e_reg
import visgraph.pathcore as vg_path
from vivisect.const import *
# Pre-initialize a stack memory bytes
init_stack_map = ''
for i in xrange(8192/4):
init_stack_map += struct.pack("<I", 0xfefe0000+(i*4))
def imphook(impname):
def imptemp(f):
f.__imphook__ = impname
return f
return imptemp
class WorkspaceEmulator:
taintregs = []
def __init__(self, vw, logwrite=False, logread=False):
self.vw = vw
self.funcva = None # Set if using runFunction
self.emustop = False
self.hooks = {}
self.taints = {}
self.taintva = itertools.count(0x41560000, 8192)
self.uninit_use = {}
self.logwrite = logwrite
self.logread = logread
self.path = self.newCodePathNode()
self.curpath = self.path
self.op = None
self.opcache = {}
self.emumon = None
self.psize = self.getPointerSize()
# Possibly need an "options" API?
self._safe_mem = True # Should we be forgiving about memory accesses?
self._func_only = True # is this emulator meant to stay in one function scope?
self.strictops = True # shoudl we bail on emulation if unsupported instruction encountered
# Map in all the memory associated with the workspace
for va, size, perms, fname in vw.getMemoryMaps():
offset, bytes = vw.getByteDef(va)
self.addMemoryMap(va, perms, fname, bytes)
for regidx in self.taintregs:
rname = self.getRegisterName(regidx)
regval = self.setVivTaint( 'uninitreg', regidx )
self.setRegister(regidx, regval)
for name in dir(self):
val = getattr(self, name, None)
if val == None:
continue
impname = getattr(val, '__imphook__',None)
if impname == None:
continue
self.hooks[impname] = val
self.stack_map_mask = None
self.stack_map_base = None
self.stack_map_top = None
self.stack_pointer = None
self.initStackMemory()
def initStackMemory(self, stacksize=4096):
'''
Setup and initialize stack memory.
You may call this prior to emulating instructions.
'''
if self.stack_map_base is None:
self.stack_map_mask = e_bits.sign_extend(0xfff00000, 4, self.vw.psize)
self.stack_map_base = e_bits.sign_extend(0xbfb00000, 4, self.vw.psize)
self.stack_map_top = self.stack_map_base + stacksize
self.stack_pointer = self.stack_map_top
# Map in a memory map for the stack
stack_map = init_stack_map
if stacksize != 4096:
stack_map = ''.join([struct.pack('<I', self.stack_map_base+(i*4))
for i in xrange(stacksize)])
self.addMemoryMap(self.stack_map_base, 6, "[stack]", stack_map)
self.setStackCounter(self.stack_pointer)
# Create some pre-made taints for positive stack indexes
# NOTE: This is *ugly* for speed....
taints = [ self.setVivTaint('funcstack', i * self.psize) for i in xrange(20) ]
taintbytes = ''.join([ e_bits.buildbytes(taint,self.psize) for taint in taints ])
self.writeMemory(self.stack_pointer, taintbytes)
else:
existing_map_size = self.stack_map_top - self.stack_map_base
new_map_size = stacksize - existing_map_size
if new_map_size < 0:
raise RuntimeError('cannot shrink stack')
new_map_top = self.stack_map_base
new_map_base = new_map_top - new_map_size
stack_map = ''.join([struct.pack('<I', new_map_base+(i*4))
for i in xrange(new_map_size)])
self.addMemoryMap(new_map_base, 6, "[stack]", stack_map)
self.stack_map_base = new_map_base
# no need to do tainting here, since SP will always be in the
# first map
def stopEmu(self):
'''
This is called by monitor to stop emulation
'''
self.emustop = True
def getPathProp(self, key):
'''
Retrieve a named value from the current code path context.
'''
return vg_path.getNodeProp(self.curpath, key)
def setPathProp(self, key, value):
"""
Set a named value which is only relevant for the current code path.
"""
return vg_path.setNodeProp(self.curpath, key, value)
def setEmulationMonitor(self, emumon):
"""
Snap in an emulation monitor. (see EmulationMonitor doc from vivisect.impemu)
"""
self.emumon = emumon
def parseOpcode(self, pc):
# We can make an opcode *faster* with the workspace because of
# getByteDef etc... use it.
op = self.opcache.get(pc)
if op == None:
op = envi.Emulator.parseOpcode(self, pc)
self.opcache[pc] = op
return op
def checkCall(self, starteip, endeip, op):
"""
Check if this was a call, and if so, do the required
import emulation and such...
"""
iscall = bool(op.iflags & envi.IF_CALL)
if iscall:
api = self.getCallApi(endeip)
rtype,rname,convname,callname,funcargs = api
callconv = self.getCallingConvention(convname)
argv = callconv.getCallArgs(self, len(funcargs))
ret = None
if self.emumon != None:
try:
ret = self.emumon.apicall(self, op, endeip, api, argv)
except Exception, e:
self.emumon.logAnomaly(self, endeip, "%s.apicall failed: %s" % (self.emumon.__class__.__name__, e))
hook = self.hooks.get(callname)
if ret == None and hook:
hook( self, callconv, api, argv )
else:
if ret == None:
ret = self.setVivTaint('apicall', (op,endeip,api,argv))
callconv.execCallReturn( self, ret, len(funcargs) )
# Either way, if it's a call PC goes to next instruction
if self._func_only:
self.setProgramCounter(starteip+len(op))
return iscall
def newCodePathNode(self, parent=None, bva=None):
'''
NOTE: Right now, this is only called from the actual branch state which
needs it. it must stay that way for now (register context is being copied
for symbolic emulator...)
'''
props = {
'bva':bva, # the entry virtual address for this branch
'valist':[], # the virtual addresses in this node in order
'calllog':[], # FIXME is this even used?
'readlog':[], # a log of all memory reads from this block
'writelog':[],# a log of all memory writes from this block
}
ret = vg_path.newPathNode(parent=parent, **props)
return ret
def getBranchNode(self, node, bva):
'''
If a node exists already for the specified branch, return it. Otherwise,
create a new one and return that...
'''
for knode in vg_path.getNodeKids(node):
if vg_path.getNodeProp(knode, 'bva') == bva:
return knode
return self.newCodePathNode(node, bva)
def checkBranches(self, starteip, endeip, op):
"""
This routine gets the current branch list for this opcode, adds branch
entries to the current path, and updates current path as needed
(returns a list of (va, CodePath) tuples.
"""
ret = []
# Add all the known branches to the list
blist = op.getBranches(emu=self)
# FIXME this should actually check for conditional...
# If there is more than one branch target, we need a new code block
if len(blist) > 1:
for bva,bflags in blist:
if bva == None:
print "Unresolved branch even WITH an emulator?"
continue
bpath = self.getBranchNode(self.curpath, bva)
ret.append((bva, bpath))
return ret
def stepi(self):
# NOTE: when we step, we *always* want to be stepping over calls
# (and possibly import emulate them)
starteip = self.getProgramCounter()
# parse out an opcode
op = self.parseOpcode(starteip)
if self.emumon:
self.emumon.prehook(self, op, starteip)
# Execute the opcode
self.executeOpcode(op)
vg_path.getNodeProp(self.curpath, 'valist').append(starteip)
endeip = self.getProgramCounter()
if self.emumon:
self.emumon.posthook(self, op, endeip)
if not self.checkCall(starteip, endeip, op):
self.checkBranches(starteip, endeip, op)
def runFunction(self, funcva, stopva=None, maxhit=None, maxloop=None):
"""
This is a utility function specific to WorkspaceEmulation (and impemu) that
will emulate, but only inside the given function. You may specify a stopva
to return once that location is hit.
"""
self.funcva = funcva
# Let the current (should be base also) path know where we are starting
vg_path.setNodeProp(self.curpath, 'bva', funcva)
hits = {}
todo = [(funcva,self.getEmuSnap(),self.path),]
vw = self.vw # Save a dereference many many times
while len(todo):
va,esnap,self.curpath = todo.pop()
self.setEmuSnap(esnap)
self.setProgramCounter(va)
# Check if we are beyond our loop max...
if maxloop != None:
lcount = vg_path.getPathLoopCount(self.curpath, 'bva', va)
if lcount > maxloop:
continue
while True:
starteip = self.getProgramCounter()
if not vw.isValidPointer(starteip):
break
if starteip == stopva:
return
# Check straight hit count...
if maxhit != None:
h = hits.get(starteip, 0)
h += 1
if h > maxhit:
break
hits[starteip] = h
# If we ran out of path (branches that went
# somewhere that we couldn't follow?
if self.curpath == None:
break
try:
# FIXME unify with stepi code...
op = self.parseOpcode(starteip)
self.op = op
if self.emumon:
self.emumon.prehook(self, op, starteip)
if self.emustop:
return
# Execute the opcode
self.executeOpcode(op)
vg_path.getNodeProp(self.curpath, 'valist').append(starteip)
endeip = self.getProgramCounter()
if self.emumon:
self.emumon.posthook(self, op, endeip)
if self.emustop:
return
iscall = self.checkCall(starteip, endeip, op)
if self.emustop:
return
# If it wasn't a call, check for branches, if so, add them to
# the todo list and go around again...
if not iscall:
blist = self.checkBranches(starteip, endeip, op)
if len(blist):
# pc in the snap will be wrong, but over-ridden at restore
esnap = self.getEmuSnap()
for bva,bpath in blist:
todo.append((bva, esnap, bpath))
break
# If we enounter a procedure exit, it doesn't
# matter what EIP is, we're done here.
if op.iflags & envi.IF_RET:
vg_path.setNodeProp(self.curpath, 'cleanret', True)
break
except envi.UnsupportedInstruction, e:
if self.strictops:
break
else:
print 'runFunction continuing after unsupported instruction: 0x%08x %s' % (e.op.va, e.op.mnem)
self.setProgramCounter(e.op.va+ e.op.size)
except Exception, e:
#traceback.print_exc()
if self.emumon != None:
self.emumon.logAnomaly(self, starteip, str(e))
break # If we exc during execution, this branch is dead.
def getCallApi(self, va):
'''
Retrieve an API definition from either the vivisect workspace
( if the call target is a function within the workspace ) or
the impapi definition subsystem ( if the call target is a known
import definition )
'''
vw = self.vw
ret = None
if vw.isFunction(va):
ret = vw.getFunctionApi(va)
if ret != None:
return ret
else:
taint = self.getVivTaint(va)
if taint:
tva,ttype,tinfo = taint
if ttype == 'import':
lva,lsize,ltype,linfo = tinfo
ret = vw.getImpApi( linfo )
elif ttype == 'dynfunc':
libname,funcname = tinfo
ret = vw.getImpApi('%s.%s' % (libname,funcname))
if ret:
return ret
defcall = vw.getMeta("DefaultCall")
return ('int', None, defcall, 'UnknownApi', () )
def nextVivTaint(self):
# One page into the new taint range
return self.taintva.next() + 4096
def setVivTaint(self, typename, taint):
'''
Set a taint in the emulator. Returns the new value for
the created taint.
'''
va = self.nextVivTaint()
self.taints[ va & 0xffffe000 ] = (va,typename,taint)
return va
def getVivTaint(self, va):
'''
Retrieve a previously registered taint ( this will automagically
mask values down and allow you to retrieve "near taint" values.)
'''
return self.taints.get( va & 0xffffe000 )
def reprVivTaint(self, taint):
'''
For the base "known" taint types, return a humon readable string
to represent the value of the taint.
'''
va,ttype,tinfo = taint
if ttype == 'uninitreg':
return self.getRegisterName(tinfo)
if ttype == 'import':
lva,lsize,ltype,linfo = tinfo
return linfo
if ttype == 'dynlib':
libname = tinfo
return libname
if ttype == 'dynfunc':
libname,funcname = tinfo
return '%s.%s' % (libname,funcname)
if ttype == 'funcstack':
stackoff = tinfo
if self.funcva:
flocal = self.vw.getFunctionLocal(self.funcva, stackoff)
if flocal != None:
typename,argname = flocal
return argname
o = '+'
if stackoff < 0:
o = '-'
return 'sp%s%d' % (o, abs(stackoff))
if ttype == 'apicall':
op,pc,api,argv = tinfo
rettype,retname,callconv,callname,callargs = api
callstr = self.reprVivValue( pc )
argsstr = ','.join([ self.reprVivValue( x ) for x in argv])
return '%s(%s)' % (callstr,argsstr)
return 'taint: 0x%.8x %s %r' % (va, ttype, tinfo)
def reprVivValue(self, val):
'''
Return a humon readable string which is the best description for
the given value ( given knowledge of the workspace, emu,
and taint subsystems ).
'''
if self.vw.isFunction(val):
thunk = self.vw.getFunctionMeta(val,'Thunk')
if thunk:
return thunk
vivname = self.vw.getName(val)
if vivname:
return vivname
taint = self.getVivTaint(val)
if taint:
# NOTE we need to prevent infinite recursion due to args being
# tainted and then referencing the same api call
va,ttype,tinfo = taint
if ttype == 'apicall':
op,pc,api,argv = tinfo
rettype,retname,callconv,callname,callargs = api
if val not in argv:
return self.reprVivTaint(taint)
stackoff = self.getStackOffset(val)
if stackoff != None:
funclocal = self.vw.getFunctionLocal(self.funcva, stackoff)
if funclocal != None:
typename,varname = funclocal
return varname
if val < 4096:
return str(val)
return '0x%.8x' % val
def _useVirtAddr(self, va):
taint = self.getVivTaint(va)
if taint == None:
return
tva,ttype,tinfo = taint
if ttype == 'uninitreg':
self.logUninitRegUse(tinfo)
def writeMemory(self, va, bytes):
"""
Try to write the bytes to the memory object, otherwise, dont'
complain...
"""
if self.logwrite:
wlog = vg_path.getNodeProp(self.curpath, 'writelog')
wlog.append((self.getProgramCounter(),va,bytes))
self._useVirtAddr( va )
# It's totally ok to write to invalid memory during the
# emulation pass (as long as safe_mem is true...)
probeok = self.probeMemory(va, len(bytes), e_mem.MM_WRITE)
if self._safe_mem and not probeok:
return
return e_mem.MemoryObject.writeMemory(self, va, bytes)
def logUninitRegUse(self, regid):
self.uninit_use[regid] = True
def getUninitRegUse(self):
return self.uninit_use.keys()
def readMemory(self, va, size):
if self.logread:
rlog = vg_path.getNodeProp(self.curpath, 'readlog')
rlog.append((self.getProgramCounter(),va,size))
# If they read an import entry, start a taint...
loc = self.vw.getLocation(va)
if loc != None:
lva, lsize, ltype, ltinfo = loc
if ltype == LOC_IMPORT and lsize == size: # They just read an import.
ret = self.setVivTaint('import', loc)
return e_bits.buildbytes(ret, lsize)
self._useVirtAddr(va)
# Read from the emulator's pages if we havent resolved it yet
probeok = self.probeMemory(va, size, e_mem.MM_READ)
if self._safe_mem and not probeok:
return 'A' * size
return e_mem.MemoryObject.readMemory(self, va, size)
# Some APIs for telling if pointers are in runtime memory regions
def isUninitStack(self, val):
"""
If val is a numerical value in the same memory page
as the un-initialized stack values return True
"""
#NOTE: If uninit_stack_byte changes, so must this!
if (val & 0xfffff000) == 0xfefef000:
return True
return False
def isStackPointer(self, va):
return (va & self.stack_map_mask) == self.stack_map_base
def getStackOffset(self, va):
if (va & self.stack_map_mask) == self.stack_map_base:
return va - self.stack_pointer
|
The developers of an ill-fated Call of Duty spinoff reveal a third-person action/adventure title full of squad-based combat and exploration.
Starting a flame war on the Internet is only slightly more difficult than tying a shoelace, but if you're starved for ideas, try this: Go to your favorite gaming site and start a thread asking people if they like Call of Duty. You're almost guaranteed to get a fight before the end of the first page. However, much of the vitriol directed at the series came after the release of Call of Duty: Modern Warfare, the 2007 entry that took the series out of World War II and into the modern day. That very same year, however, the series nearly branched off in a very different direction: a squad-based, third-person shooter with adventure elements, based on the true story of an American-Canadian unit in WWII Italy. While the game quietly went MIA in 2008, the former development team has brought some details to light about the spinoff that almost was.
Jason VandenBerghe, a former EA man, led a small team of accomplished developers under the Activision banner to develop Call of Duty: Devil's Brigade. Activision had previously expressed interest in expanding the iconic military shooter into action/adventure territory, and saw VandenBerghe's team of veteran artists and designers as a logical fit for the project. "I don't know that I've been with a group of people that I felt more comfortable with than I was with that group," VandenBerghe recalls. "We had an enormous amount of trust and experience." The team aimed to keep the World War II setting and squad-based combat of Call of Duty, but leave behind the first-person viewpoint and extravagant action cues. "I don't think we had any over the top sequences a la the current Modern Warfare where you're blowing up entire submarine vessels," says VandenBerghe. Other departures from series norms included the ability to issue squad commands, a variety of acrobatic moves (appropriate for Special Forces operatives), and stealth sections for destroying bridges or offing enemies.
The inspiration for the game's story (and title) came from the real-life Devil's Brigade, a joint group of American and Canadian soldiers who fought in Italy towards the end of WWII. VandenBerghe's team took creative liberties, but still tried to stay close to the source material. ""[The Devil's Brigade's] first action [in the game] was to scale a 1,500 foot cliff at night in the rain, and flank the Germans from behind in their mountain retreat," VandenBerghe describes. "You're up on ropes. And there's lightning. You look down, and the entire brigade - a thousand men, they pulled a thousand men up that cliff." The true story is not too far off.
Devil's Brigade ended up as one of the casualties of Activision's merger with Vivendi/Blizzard, but VandenBerghe does not appear to harbor much ill will. "It was no one entity or one person's fault. There was no drama." Still, Call of Duty fans and detractors and alike can only wonder what might have happened to the series if Devil's Brigade had been as tenacious as the squad for which it was named.
|
from setuptools import setup
version = "1.5.1"
setup(name='MAVProxy',
version=version,
zip_safe=True,
description='MAVProxy MAVLink ground station',
long_description='''A MAVLink protocol proxy and ground station. MAVProxy
is oriented towards command line operation, and is suitable for embedding in
small autonomous vehicles or for using on ground control stations. It also
features a number of graphical tools such as a slipmap for satellite mapping
view of the vehicles location, and status console and several useful vehicle
control modules. MAVProxy is extensible via a modules system - see the modules
subdirectory for some example modules. MAVProxy was developed by CanberraUAV
for use in the 2012 Outback Challenge, and includes a module for the
CanberraUAV search and rescue system. See
http://Dronecode.github.io/MAVProxy/ for more information
on how to use MAVProxy.''',
url='https://github.com/Dronecode/MAVProxy',
author='Andrew Tridgell',
author_email='[email protected]',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering'],
license='GPLv3',
packages=['MAVProxy',
'MAVProxy.modules',
'MAVProxy.modules.mavproxy_map',
'MAVProxy.modules.mavproxy_misseditor',
'MAVProxy.modules.mavproxy_smartcamera',
'MAVProxy.modules.lib',
'MAVProxy.modules.lib.ANUGA',
'MAVProxy.modules.lib.optparse_gui'],
# note that we do not include all the real dependencies here (like matplotlib etc)
# as that breaks the pip install. It seems that pip is not smart enough to
# use the system versions of these dependencies, so it tries to download and install
# large numbers of modules like numpy etc which may be already installed
install_requires=['pymavlink>=1.1.73',
'pyserial>=3.0'],
scripts=['MAVProxy/mavproxy.py',
'MAVProxy/tools/mavflightview.py',
'MAVProxy/tools/MAVExplorer.py',
'MAVProxy/modules/mavproxy_map/mp_slipmap.py',
'MAVProxy/modules/mavproxy_map/mp_tile.py'],
package_data={'MAVProxy':
['modules/mavproxy_map/data/*.jpg',
'modules/mavproxy_map/data/*.png',
'tools/graphs/*.xml']}
)
|
Individual members of the campus community are encouraged to develop their own personal emergency plans to enable them to quickly take protective actions in the event of an emergency.
During or immediately following an emergency, you may be on your own for hours or days before emergency responders can reach you. Stashing away a few key items in an emergency supply kit can help you survive during this period of time.
It is important to learn about the hazards you are exposed to and the recommended actions to take during emergencies, such as fires, severe weather, and hazardous material spills. In addition to the Baylor Emergency Planning and Preparedness site, visit www.hotready.com to learn more.
Receiving training that will be useful for protecting yourself and those around you during emergencies is also important. Baylor University offers a wide variety of training opportunities for students, faculty, and staff in areas such as emergency preparedness, and crime prevention. All members of the University community are encouraged to participate in and receive training.
|
# -*- coding: utf-8 -*-
'''
logger.py: Simple logger for sendit. Note that levels info and log are the
only two considered stdout, the rest are sent to stderr.
Copyright (c) 2017 Vanessa Sochat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import os
import sys
ABRT = -4
ERROR = -3
WARNING = -2
LOG = -1
INFO = 1
QUIET = 0
VERBOSE = VERBOSE1 = 2
VERBOSE2 = 3
VERBOSE3 = 4
DEBUG = 5
class SenditMessage:
def __init__(self,MESSAGELEVEL=None):
self.level = get_logging_level()
self.history = []
self.errorStream = sys.stderr
self.outputStream = sys.stdout
self.colorize = self.useColor()
self.colors = {ABRT:"\033[31m", # dark red
ERROR: "\033[91m", # red
WARNING:"\033[93m", # dark yellow
LOG:"\033[95m", # purple
DEBUG:"\033[36m", # cyan
'OFF':"\033[0m"} # end sequence
# Colors --------------------------------------------
def useColor(self):
'''useColor will determine if color should be added
to a print. Will check if being run in a terminal, and
if has support for asci'''
COLORIZE = get_user_color_preference()
if COLORIZE is not None:
return COLORIZE
streams = [self.errorStream,self.outputStream]
for stream in streams:
if not hasattr(stream, 'isatty'):
return False
if not stream.isatty():
return False
return True
def addColor(self,level,text):
'''addColor to the prompt (usually prefix) if terminal
supports, and specified to do so'''
if self.colorize:
if level in self.colors:
text = "%s%s%s" %(self.colors[level],
text,
self.colors["OFF"])
return text
def emitError(self,level):
'''determine if a level should print to
stderr, includes all levels but INFO and QUIET'''
if level in [ABRT,
ERROR,
WARNING,
VERBOSE,
VERBOSE1,
VERBOSE2,
VERBOSE3,
DEBUG ]:
return True
return False
def emitOutput(self,level):
'''determine if a level should print to stdout
only includes INFO'''
if level in [LOG,
INFO]:
return True
return False
def isEnabledFor(self,messageLevel):
'''check if a messageLevel is enabled to emit a level
'''
if messageLevel <= self.level:
return True
return False
def emit(self,level,message,prefix=None):
'''emit is the main function to print the message
optionally with a prefix
:param level: the level of the message
:param message: the message to print
:param prefix: a prefix for the message
'''
if prefix is not None:
prefix = self.addColor(level,"%s " %(prefix))
else:
prefix = ""
message = self.addColor(level,message)
# Add the prefix
message = "%s%s" %(prefix,message)
if not message.endswith('\n'):
message = "%s\n" %message
# If the level is quiet, only print to error
if self.level == QUIET:
pass
# Otherwise if in range print to stdout and stderr
elif self.isEnabledFor(level):
if self.emitError(level):
self.write(self.errorStream,message)
else:
self.write(self.outputStream,message)
# Add all log messages to history
self.history.append(message)
def write(self,stream,message):
'''write will write a message to a stream,
first checking the encoding
'''
if isinstance(message,bytes):
message = message.decode('utf-8')
stream.write(message)
def get_logs(self,join_newline=True):
''''get_logs will return the complete history, joined by newline
(default) or as is.
'''
if join_newline:
return '\n'.join(self.history)
return self.history
def show_progress(self,iteration,total,length=40,min_level=0,prefix=None,
carriage_return=True,suffix=None,symbol=None):
'''create a terminal progress bar, default bar shows for verbose+
:param iteration: current iteration (Int)
:param total: total iterations (Int)
:param length: character length of bar (Int)
'''
percent = 100 * (iteration / float(total))
progress = int(length * iteration // total)
if suffix is None:
suffix = ''
if prefix is None:
prefix = 'Progress'
# Download sizes can be imperfect, setting carriage_return to False
# and writing newline with caller cleans up the UI
if percent >= 100:
percent = 100
progress = length
if symbol is None:
symbol = "="
if progress < length:
bar = symbol * progress + '|' + '-' * (length - progress - 1)
else:
bar = symbol * progress + '-' * (length - progress)
# Only show progress bar for level > min_level
if self.level > min_level:
percent = "%5s" %("{0:.1f}").format(percent)
output = '\r' + prefix + " |%s| %s%s %s" % (bar, percent, '%', suffix)
sys.stdout.write(output),
if iteration == total and carriage_return:
sys.stdout.write('\n')
sys.stdout.flush()
def abort(self,message):
self.emit(ABRT,message,'ABRT')
def error(self,message):
self.emit(ERROR,message,'ERROR')
def warning(self,message):
self.emit(WARNING,message,'WARNING')
def log(self,message):
self.emit(LOG,message,'LOG')
def info(self,message):
self.emit(INFO,message)
def verbose(self,message):
self.emit(VERBOSE,message,"VERBOSE")
def verbose1(self,message):
self.emit(VERBOSE,message,"VERBOSE1")
def verbose2(self,message):
self.emit(VERBOSE2,message,'VERBOSE2')
def verbose3(self,message):
self.emit(VERBOSE3,message,'VERBOSE3')
def debug(self,message):
self.emit(DEBUG,message,'DEBUG')
def is_quiet(self):
'''is_quiet returns true if the level is under 1
'''
if self.level < 1:
return False
return True
def get_logging_level():
'''get_logging_level will configure a logging to standard out based on the user's
selected level, which should be in an environment variable called
SENDIT_MESSAGELEVEL. if SENDIT_MESSAGELEVEL is not set, the maximum level
(5) is assumed (all messages).
'''
return int(os.environ.get("SENDIT_MESSAGELEVEL",5))
def get_user_color_preference():
COLORIZE = os.environ.get('SENDIT_COLORIZE',None)
if COLORIZE is not None:
COLORIZE = convert2boolean(COLORIZE)
return COLORIZE
def convert2boolean(arg):
'''convert2boolean is used for environmental variables that must be
returned as boolean'''
if not isinstance(arg,bool):
return arg.lower() in ("yes", "true", "t", "1","y")
return arg
bot = SenditMessage()
|
This item is selling for $74.88 on eBay.
-Notes: Nice Tops! Minimal Sole wear! Brand New Replacement Gel Orthotic Insoles as pictured. (These are very comfortable insoles that we have used on hundreds of shoes).
|
from heltour import settings
from heltour.tournament.models import *
from django.db.models.signals import post_save
from django.dispatch.dispatcher import receiver
from heltour.tournament.tasks import pairings_published
import reversion
import time
logger = logging.getLogger(__name__)
@receiver(post_save, sender=ModRequest, dispatch_uid='heltour.tournament.automod')
def mod_request_saved(instance, created, **kwargs):
if created:
signals.mod_request_created.send(sender=MOD_REQUEST_SENDER[instance.type],
instance=instance)
@receiver(signals.mod_request_created, sender=MOD_REQUEST_SENDER['appeal_late_response'],
dispatch_uid='heltour.tournament.automod')
def appeal_late_response_created(instance, **kwargs):
# Figure out which round to use
if not instance.round or instance.round.publish_pairings:
instance.round = instance.season.round_set.order_by('number').filter(publish_pairings=True,
is_completed=False).first()
instance.save()
@receiver(signals.mod_request_created, sender=MOD_REQUEST_SENDER['request_continuation'],
dispatch_uid='heltour.tournament.automod')
def request_continuation_created(instance, **kwargs):
# Figure out which round to use
if not instance.round or instance.round.publish_pairings:
instance.round = instance.season.round_set.order_by('number').filter(
publish_pairings=False).first()
instance.save()
@receiver(signals.mod_request_created, sender=MOD_REQUEST_SENDER['withdraw'],
dispatch_uid='heltour.tournament.automod')
def withdraw_created(instance, **kwargs):
# Figure out which round to add the withdrawal on
if not instance.round or instance.round.publish_pairings:
instance.round = instance.season.round_set.order_by('number').filter(
publish_pairings=False).first()
instance.save()
# Check that the requester is part of the season
sp = SeasonPlayer.objects.filter(player=instance.requester, season=instance.season).first()
if sp is None:
instance.reject(response='You aren\'t currently a participant in %s.' % instance.season)
return
if not instance.round:
instance.reject(response='You can\'t withdraw from the season at this time.')
return
instance.approve(response='You\'ve been withdrawn for round %d.' % instance.round.number)
@receiver(signals.mod_request_approved, sender=MOD_REQUEST_SENDER['withdraw'],
dispatch_uid='heltour.tournament.automod')
def withdraw_approved(instance, **kwargs):
if not instance.round:
return
# Add the withdrawal if it doesn't already exist
with reversion.create_revision():
reversion.set_comment('Withdraw request approved by %s' % instance.status_changed_by)
PlayerWithdrawal.objects.get_or_create(player=instance.requester, round=instance.round)
@receiver(signals.automod_unresponsive, dispatch_uid='heltour.tournament.automod')
def automod_unresponsive(round_, **kwargs):
groups = {'warning': [], 'yellow': [], 'red': []}
for p in round_.pairings.filter(game_link='', result='', scheduled_time=None).exclude(
white=None).exclude(black=None):
# verify that neither player is previously marked unavailable
if round_.season.league.competitor_type == 'team':
white_unavail = PlayerAvailability.objects.filter(round=round_, player=p.white,
is_available=False).exists()
black_unavail = PlayerAvailability.objects.filter(round=round_, player=p.black,
is_available=False).exists()
if white_unavail or black_unavail:
continue
# check who is not present
white_present = p.get_player_presence(p.white).first_msg_time is not None
black_present = p.get_player_presence(p.black).first_msg_time is not None
if not white_present:
player_unresponsive(round_, p, p.white, groups)
if black_present:
signals.notify_opponent_unresponsive.send(sender=automod_unresponsive,
round_=round_, player=p.black,
opponent=p.white, pairing=p)
time.sleep(1)
if not black_present:
player_unresponsive(round_, p, p.black, groups)
if white_present:
signals.notify_opponent_unresponsive.send(sender=automod_unresponsive,
round_=round_, player=p.white,
opponent=p.black, pairing=p)
time.sleep(1)
signals.notify_mods_unresponsive.send(sender=automod_unresponsive, round_=round_,
warnings=groups['warning'], yellows=groups['yellow'],
reds=groups['red'])
def player_unresponsive(round_, pairing, player, groups):
season = round_.season
league = season.league
has_warning = PlayerWarning.objects.filter(player=player, round__season=season,
type='unresponsive').exists()
if not has_warning and league.get_leaguesetting().warning_for_late_response:
with reversion.create_revision():
reversion.set_comment('Automatic warning for unresponsiveness')
PlayerWarning.objects.get_or_create(player=player, round=round_, type='unresponsive')
punishment = 'You may receive a yellow card.'
allow_continue = league.competitor_type != 'team'
groups['warning'].append(player)
else:
card_color = give_card(round_, player, 'card_unresponsive')
if not card_color:
return
punishment = 'You have been given a %s card.' % card_color
allow_continue = card_color != 'red' and league.competitor_type != 'team'
groups[card_color].append(player)
if league.competitor_type == 'team':
avail, _ = PlayerAvailability.objects.get_or_create(round=round_, player=player)
avail.is_available = False
avail.save()
signals.notify_unresponsive.send(sender=automod_unresponsive, round_=round_, player=player,
punishment=punishment, allow_continue=allow_continue,
pairing=pairing)
@receiver(signals.mod_request_approved, sender=MOD_REQUEST_SENDER['appeal_late_response'],
dispatch_uid='heltour.tournament.automod')
def appeal_late_response_approved(instance, **kwargs):
if not instance.pairing:
return
with reversion.create_revision():
reversion.set_comment('Late response appeal approved by %s' % instance.status_changed_by)
warning = PlayerWarning.objects.filter(player=instance.requester, round=instance.round,
type='unresponsive').first()
if warning:
warning.delete()
else:
revoke_card(instance.round, instance.requester, 'card_unresponsive')
@receiver(signals.automod_noshow, dispatch_uid='heltour.tournament.automod')
def automod_noshow(pairing, **kwargs):
if pairing.game_link:
# Game started, no action necessary
return
white_online = pairing.get_player_presence(pairing.white).online_for_game
black_online = pairing.get_player_presence(pairing.black).online_for_game
if white_online and not black_online:
player_noshow(pairing, pairing.white, pairing.black)
if black_online and not white_online:
player_noshow(pairing, pairing.black, pairing.white)
def player_noshow(pairing, player, opponent):
round_ = pairing.get_round()
signals.notify_noshow.send(sender=automod_unresponsive, round_=round_, player=player,
opponent=opponent)
@receiver(signals.mod_request_created, sender=MOD_REQUEST_SENDER['claim_win_noshow'],
dispatch_uid='heltour.tournament.automod')
def claim_win_noshow_created(instance, **kwargs):
# Figure out which round to add the claim on
if not instance.round:
instance.round = instance.season.round_set.order_by('number').filter(is_completed=False,
publish_pairings=True).first()
instance.save()
if not instance.pairing and instance.round:
instance.pairing = instance.round.pairing_for(instance.requester)
instance.save()
# Check that the requester is part of the season
sp = SeasonPlayer.objects.filter(player=instance.requester, season=instance.season).first()
if sp is None:
instance.reject(response='You aren\'t currently a participant in %s.' % instance.season)
return
if not instance.round:
instance.reject(response='You can\'t claim a win at this time.')
return
if not instance.pairing:
instance.reject(response='You don\'t currently have a pairing you can claim a win for.')
return
p = instance.pairing
opponent = p.white if p.white != instance.requester else p.black
if p.get_player_presence(instance.requester).online_for_game \
and not p.get_player_presence(opponent).online_for_game \
and timezone.now() > p.scheduled_time + timedelta(minutes=21):
instance.approve(response='You\'ve been given a win by forfeit.')
@receiver(signals.mod_request_approved, sender=MOD_REQUEST_SENDER['claim_win_noshow'],
dispatch_uid='heltour.tournament.automod')
def claim_win_noshow_approved(instance, **kwargs):
if not instance.pairing:
return
p = instance.pairing
opponent = p.white if p.white != instance.requester else p.black
with reversion.create_revision():
reversion.set_comment('Auto forfeit for no-show')
if p.white == instance.requester:
p.result = '1X-0F'
if p.black == instance.requester:
p.result = '0F-1X'
p.save()
add_system_comment(p, '%s no-show' % opponent.lichess_username)
sp = SeasonPlayer.objects.filter(player=opponent, season=instance.season).first()
add_system_comment(sp, 'Round %d no-show' % instance.round.number)
card_color = give_card(instance.round, opponent, 'card_noshow')
if not card_color:
return
punishment = 'You have been given a %s card.' % card_color
allow_continue = card_color != 'red' and instance.season.league.competitor_type != 'team'
signals.notify_noshow_claim.send(sender=claim_win_noshow_approved, round_=instance.round,
player=opponent, punishment=punishment,
allow_continue=allow_continue)
@receiver(signals.mod_request_created, sender=MOD_REQUEST_SENDER['appeal_noshow'],
dispatch_uid='heltour.tournament.automod')
def appeal_noshow_created(instance, **kwargs):
# Figure out which round to use
if not instance.round:
instance.round = instance.season.round_set.order_by('number').filter(publish_pairings=True,
is_completed=False).first()
instance.save()
if not instance.pairing and instance.round:
instance.pairing = instance.round.pairing_for(instance.requester)
instance.save()
@receiver(signals.mod_request_approved, sender=MOD_REQUEST_SENDER['appeal_noshow'],
dispatch_uid='heltour.tournament.automod')
def appeal_noshow_approved(instance, **kwargs):
if not instance.pairing:
return
with reversion.create_revision():
reversion.set_comment('No-show appeal approved by %s' % instance.status_changed_by)
revoke_card(instance.round, instance.requester, 'card_noshow')
with reversion.create_revision():
reversion.set_comment('No-show appeal approved by %s' % instance.status_changed_by)
instance.pairing.result = ''
instance.pairing.save()
@receiver(signals.mod_request_created, sender=MOD_REQUEST_SENDER['claim_draw_scheduling'],
dispatch_uid='heltour.tournament.automod')
def claim_draw_scheduling_created(instance, **kwargs):
# Figure out which round to add the claim on
if not instance.round:
instance.round = instance.season.round_set.order_by('number').filter(is_completed=False,
publish_pairings=True).first()
instance.save()
if not instance.pairing and instance.round:
instance.pairing = instance.round.pairing_for(instance.requester)
instance.save()
# Check that the requester is part of the season
sp = SeasonPlayer.objects.filter(player=instance.requester, season=instance.season).first()
if sp is None:
instance.reject(response='You aren\'t currently a participant in %s.' % instance.season)
return
if not instance.round:
instance.reject(response='You can\'t claim a scheduling draw at this time.')
return
if not instance.pairing:
instance.reject(
response='You don\'t currently have a pairing you can claim a scheduling draw for.')
return
if instance.pairing.result:
instance.reject(
response='You can\'t claim a scheduling draw for a game which already has a set result.')
return
add_system_comment(instance.pairing, 'Scheduling draw claim made by %s' % instance.requester)
@receiver(signals.mod_request_approved, sender=MOD_REQUEST_SENDER['claim_draw_scheduling'],
dispatch_uid='heltour.tournament.automod')
def claim_scheduling_draw_approved(instance, **kwargs):
if not instance.pairing:
return
p = instance.pairing
opponent = p.white if p.white != instance.requester else p.black
comment_ = 'Scheduling draw claim approved by %s' % instance.status_changed_by
with reversion.create_revision():
reversion.set_comment(comment_)
p.result = '1/2Z-1/2Z'
p.save()
add_system_comment(p, comment_)
signals.notify_scheduling_draw_claim.send(sender=claim_scheduling_draw_approved,
round_=instance.round, player=opponent)
@receiver(signals.mod_request_created, sender=MOD_REQUEST_SENDER['appeal_draw_scheduling'],
dispatch_uid='heltour.tournament.automod')
def appeal_scheduling_draw_created(instance, **kwargs):
# Figure out which round to use
if not instance.round:
instance.round = instance.season.round_set.order_by('number').filter(publish_pairings=True,
is_completed=False).first()
instance.save()
if not instance.pairing and instance.round:
instance.pairing = instance.round.pairing_for(instance.requester)
instance.save()
add_system_comment(instance.pairing, 'Scheduling draw appeal by %s' % instance.requester)
@receiver(signals.mod_request_approved, sender=MOD_REQUEST_SENDER['appeal_draw_scheduling'],
dispatch_uid='heltour.tournament.automod')
def appeal_scheduling_draw_approved(instance, **kwargs):
if not instance.pairing:
return
comment_ = 'Scheduling draw appeal approved by %s' % instance.status_changed_by
with reversion.create_revision():
reversion.set_comment(comment_)
instance.pairing.result = ''
instance.pairing.save()
add_system_comment(instance.pairing, comment_)
def give_card(round_, player, type_):
# TODO: Unit tests?
with transaction.atomic():
sp = SeasonPlayer.objects.filter(season=round_.season, player=player).first()
if not sp:
logger.error('Season player did not exist for %s %s' % (round_.season, player))
return None
already_has_card = PlayerWarning.objects.filter(player=player, round=round_,
type__startswith='card').exists()
card, _ = PlayerWarning.objects.get_or_create(player=player, round=round_, type=type_)
if not already_has_card:
sp.games_missed += 1
with reversion.create_revision():
reversion.set_comment('Automatic %s %s' % (sp.card_color, card.get_type_display()))
sp.save()
return sp.card_color
def revoke_card(round_, player, type_):
with transaction.atomic():
sp = SeasonPlayer.objects.filter(season=round_.season, player=player).first()
if not sp:
logger.error('Season player did not exist for %s %s' % (round_.season, player))
return
card = PlayerWarning.objects.filter(player=player, round=round_, type=type_).first()
if not card:
return
card.delete()
has_other_card = PlayerWarning.objects.filter(player=player, round=round_,
type__startswith='card').exists()
if not has_other_card and sp.games_missed > 0:
sp.games_missed -= 1
with reversion.create_revision():
reversion.set_comment('Card revocation')
sp.save()
|
Store up for yourselves treasures in heaven, where moth and rust do not destroy, and where thieves do not break in and steal. For where your treasure is, there your heart will be also.
A few weeks ago, my husband cleaned the garage. I filled plastic bags of items and we gathered all to donate to our local charity.
Christmas season soon came and my husband was fulfilling his own mission to help decorate. He pulled the Christmas tree out of the box. It came all equipped with lights. A very cool thing.
Grandkids flung open the front door with shouts of excitement, anticipating hanging tree ornaments.
I handed them the tin with a huge smile at their giggles as we were about to follow our regular tradition. But to our disappointment, the tin only contained a few broken ornaments from years past. We searched everywhere for the broken pieces, which we had stored last year after, but they were gone. No doubt, we had packed them with items to be given away to our local charity.
So, there we were, staring at a bare tree lit up but waiting to be dressed.
I tried to comfort the children, and wondered how to explain that ornaments are just that—an addition to decorate what is already there.
I truly believe that. Physical decorations aren't what is important. The company of our grandkids, healthy and full of life, are God-created ornaments that decorate our life.
Then another nice thing happened. God smiled. I held up two ornaments—a recent gift from a dear friend. They’re small picture frames. In each, we placed the photo of our grandkids, and our tree never looked lovelier.
We’re not missing anything this year. We have the main ingredient for the recipe of true happiness. Just enough ornaments to add meaning. Just enough lights to remind us of Christ. And plenty of green to speak of the evergreen nature of Jesus’ love.
Lord, I pray that this Christmas season you are the reason for my happiness.
Do you want Jesus to be the source of happiness? Talk to a caring Christian friend!
|
#!/usr/bin/env python
"""
bioBakery Workflows: strainphlan
Copyright (c) 2018 Harvard School of Public Health
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
import os, fnmatch
# import the workflow class from anadama2
from anadama2 import Workflow
# import the library of biobakery_workflow tasks for shotgun sequences
from biobakery_workflows.tasks import shotgun, general
# import the utilities functions and config settings from biobakery_workflows
from biobakery_workflows import utilities, config
# create a workflow instance, providing the version number and description
# the version number will appear when running this script with the "--version" option
# the description will appear when running this script with the "--help" option
workflow = Workflow(version="0.1", description="A workflow to run strainphlan")
# add the custom arguments to the workflow
workflow_config = config.ShotGun()
workflow.add_argument("input-extension", desc="the input file extension", default="fastq.gz", choices=["fastq.gz","fastq","fq.gz","fq","fasta","fasta.gz"])
workflow.add_argument("threads", desc="number of threads/cores for each task to use", default=1)
workflow.add_argument("bypass-taxonomic-profiling", desc="do not run the taxonomic profiling tasks (a tsv profile for each sequence file must be included in the input folder using the same sample name)", action="store_true")
workflow.add_argument("strain-profiling-options", desc="additional options when running the strain profiling step", default="")
workflow.add_argument("max-strains", desc="the max number of strains to profile", default=20, type=int)
# get the arguments from the command line
args = workflow.parse_args()
# get all input files with the input extension provided on the command line
# return an error if no files are found
input_files = utilities.find_files(args.input, extension=args.input_extension, exit_if_not_found=True)
### STEP #1: Run taxonomic profiling on all of the filtered files ###
if not args.bypass_taxonomic_profiling:
merged_taxonomic_profile, taxonomy_tsv_files, taxonomy_sam_files = shotgun.taxonomic_profile(workflow,
input_files,args.output,args.threads,args.input_extension)
elif:
sample_names = utilities.sample_names(input_files,args.input_extension)
tsv_profiles = utilities.name_files(sample_names, demultiplex_output_folder, tag="taxonomic_profile", extension="tsv")
# check all of the expected profiles are found
if len(tsv_profiles) != len(list(filter(os.path.isfile,tsv_profiles))):
sys.exit("ERROR: Bypassing taxonomic profiling but all of the tsv taxonomy profile files are not found in the input folder. Expecting the following input files:\n"+"\n".join(tsv_profiles))
# run taxonomic profile steps bypassing metaphlan2
merged_taxonomic_profile, taxonomy_tsv_files, taxonomy_sam_files = shotgun.taxonomic_profile(workflow,
tsv_profiles,args.output,args.threads,"tsv",already_profiled=True)
# look for the sam profiles
taxonomy_sam_files = utilities.name_files(sample_names, demultiplex_output_folder, tag="bowtie2", extension="sam")
# if they do not all exist, then bypass strain profiling if not already set
if len(taxonomy_sam_files) != len(list(filter(os.path.isfile,taxonomy_sam_files))):
print("Warning: Bypassing taxonomic profiling but not all taxonomy sam files are present in the input folder. Strain profiling will be bypassed. Expecting the following input files:\n"+"\n".join(taxonomy_sam_files))
args.bypass_strain_profiling = True
### STEP #2: Run strain profiling
# Provide taxonomic profiling output so top strains by abundance will be selected
if not args.bypass_strain_profiling:
shotgun.strain_profile(workflow,taxonomy_sam_files,args.output,args.threads,
workflow_config.strainphlan_db_reference,workflow_config.strainphlan_db_markers,merged_taxonomic_profile,
args.strain_profiling_options,args.max_strains)
# start the workflow
workflow.go()
|
Fred sent me a couple of scans from an interview Zappa had with BAM Magazine, October ’79 (thanks Fred!). I typed it out (read: don’t shoot me if you find any typo’s), and so here it is for your reading pleasure.
Nice Concert, Nice People: Th�atre de Verdure, Nice, France – October 7 1984.
From the makers of the late great Fils de l’Invention: Le Fredunzel Site!
With all my recent moving about of pages you may be interested to know that Gilles’ Friday Boot can from now on be found right here.
It’s the blimp, Frank! it’s the blimp!!
Just bought a copy of Railroadism by Captain Beefheart and his Magic Bands live in the USA 1972-1981. Each line up contains Zappa alumni and I think it’s possibly the best cd I’ve heard, so far, this year.
What is it with Liverpool ? home to Beefheart and Zappa freaks.
By the way, if any MT-users reading this know of a good way to build photo galleries from within MT, please drop a comment, or mail me.
That is all. Now why do I feel like this has been the most boring post ever to appear in Hot Poop?
Should artists be allowed to use copyrighted materials? Where do the First Amendment and “intellectual property” law collide? What is art’s future if the current laws are allowed to stand?
Do you know your Dada from your Moma?
I didn’t post this. My twin in a galaxy about 10 to the 1028 meters from here did.
“World’s Greatest Sinner Timothy Carey Film Fest! A festival of rare and previously-unreleased films directed by the late character actor Timothy Carey will screen Sat. June 28 at Maestri Gallery, 2024 Chester Ave, Bakersfield, CA, 661-335-0278. Films shown on June 28 will be “The World’s Greatest Sinner” (1963) at 7 pm and “Tweet’s, Ladies of Pasadena” (unreleased) at 8:30 pm. Also shown will be a documentary of the actor created by his son.” More info, contact this person.
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from flask import Blueprint
from flask import jsonify
from webargs import Arg
from webargs import ValidationError
from webargs.flaskparser import use_args
from mishapp_api.database import Disaster
disaster_api = Blueprint("disaster", __name__)
def radius_gte_zero(val):
if val < 0:
raise ValidationError("radius must greater than equal 0")
@disaster_api.errorhandler(400)
def handle_bad_request(err):
data = getattr(err, "data")
if data:
err_message = data["message"]
else:
err_message = "Bad request"
return jsonify({"message": err_message}), 400
@disaster_api.errorhandler(404)
def handle_not_found(err):
return jsonify({"message": "Not found"}), 404
@disaster_api.route("/disasters")
@use_args({
"page": Arg(int, default=1),
"per_page": Arg(int, default=20),
"category": Arg(str),
})
def index(args):
q = Disaster.objects
if args["category"]:
q = q(properties__type=args["category"])
docs = q.order_by("-modified_at").paginate(
args["page"],
min(args["per_page"], 20),
)
return jsonify({
"meta": {
"total": docs.total,
"page": docs.page,
"per_page": docs.per_page,
},
"items": [doc.asdict() for doc in docs.items]
})
@disaster_api.route("/disasters/nearby")
@use_args({
"lat": Arg(float, required=True),
"lon": Arg(float, required=True),
"radius": Arg(float, validate=radius_gte_zero, required=True),
"page": Arg(int, default=1),
"per_page": Arg(int, default=20),
"category": Arg(str),
})
def nearby(args):
q = Disaster.objects(
geometry__near={
"$geometry": {
"type": "Point",
"coordinates": [args["lon"], args["lat"]],
},
"$maxDistance": args["radius"],
},
)
if args["category"]:
q = q(properties__type=args["category"])
docs = q.order_by("-modified_at").paginate(
args["page"],
min(args["per_page"], 20),
)
return jsonify({
"meta": {
"total": docs.total,
"page": docs.page,
"per_page": docs.per_page,
},
"items": [doc.asdict() for doc in docs.items]
})
@disaster_api.route("/disasters/verify")
@use_args({
"lat": Arg(float, required=True),
"lon": Arg(float, required=True),
"radius": Arg(float, validate=radius_gte_zero, required=True),
"category": Arg(str),
})
def verify(args):
q = Disaster.objects(
geometry__near={
"$geometry": {
"type": "Point",
"coordinates": [args["lon"], args["lat"]],
},
"$maxDistance": args["radius"],
},
)
if args["category"]:
q = q(properties__type=args["category"])
counter = q.count()
if counter > 0:
return jsonify({"message": "OK"})
return jsonify({"message": "Not found"}), 404
@disaster_api.route("/disasters/<id>")
def get(id):
disaster = Disaster.objects.get_or_404(id=id)
return jsonify(disaster.asdict())
|
Up until now I’ve chosen not to run for President because I felt that the job was too restrictive. Things like laws, the Constitution and having those two other pesky branches of government severely cramp what you can do on the job as Chief Executive.
Apparently, according to the leading candidates for the 2016 election, things have changed. You can ban an entire religion from coming to America. I thought that might be unconstitutional. You can carpet bomb a region. Guess Protocol 1 of the Geneva Convention can be ignored. A President can also mandate how much corporations can pay in compensation to their CEOs. So, that’s not a move that would wind up in front of the Supreme Court in about a week?
Unfettered of such restraints, I’d gladly throw my hat into the political ring. Sounds as though we will now have a Potentate President. Here’s a few of the actions I’d take from my throne in the Oval Office.
I’d immediately fire Roger Goodell because I want to tackle the most important issues first.
Political campaigns would be limited to six months in duration because we all want our lives back.
We would annex the Cayman Islands, Bermuda, Ireland and all those other places where U.S. companies go to escape paying taxes.
I would declare each American citizen to also be classified as a corporation because turn about is fair play!
I would cap political contributions from anyone, or anything, at $500. The Johnson brothers who run a little plumbing business would instantly be as influential as the Koch brothers.
I would ban any television ad from airing at meal times if they contained any of the following words; “diarrhea”, “constipation” or “ an erection lasting more than four hours ”.
There would be no more political affiliations. Folks would have to really pay attention to a candidate’s ideas not just the single letter listed next to their name on a ballot.
Gun owners could only purchase enough ammunition to fit in Barney Fife’s shirt pocket where Sheriff Andy made him keep his bullet. This stockpiling of arsenal and ammunition has to end.
Cell phone manufacturers will have to modify their products so they don’t work if the phone is traveling more than three miles per hour. Safer roads, more enjoyable train and bus rides and, maybe, more actual conversations, anyone?
The Food and Drug Administration will only be allowed to ban, or even speak badly about, foods or beverages which don’t taste good. Even snide comments about chocolate or wine will be cause for dismissal.
Eligible voters who do not vote will not be allowed to walk on sidewalks, or clear their throats, until the next election. They only have themselves to blame.
|
# Copyright (c) 2015 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
__all__ = [
'Result', 'VibrationType', 'StreamEmg', 'Pose', 'EventType',
'VersionComponent', 'OrientationIndex', 'HandlerResult', 'LockingPolicy',
'Arm', 'XDirection',
# Backwards compatibility
'result_t', 'vibration_type_t', 'stream_emg', 'pose_t', 'event_type_t',
'version_component_t', 'orientation_index_t', 'handler_result_t',
'locking_policy_t', 'arm_t', 'x_direction_t']
from ..utils.enum import Enumeration
class Result(Enumeration):
success = 0
error = 1
error_invalid_argument = 2
error_runtime = 3
__fallback__ = -1
class VibrationType(Enumeration):
short = 0
medium = 1
long = 2
__fallback__ = -1
class StreamEmg(Enumeration):
disabled = 0
enabled = 1
__fallback__ = -1
class Pose(Enumeration):
rest = 0
fist = 1
wave_in = 2
wave_out = 3
fingers_spread = 4
double_tap = 5
__fallback__ = -1
num_poses = Enumeration.Data(6)
class EventType(Enumeration):
paired = 0
unpaired = 1
connected = 2
disconnected = 3
arm_synced = 4
arm_unsynced = 5
orientation = 6
pose = 7
rssi = 8
unlocked = 9
locked = 10
emg = 11
__fallback__ = -1
class VersionComponent(Enumeration):
major = 0
minor = 1
patch = 2
__fallback__ = -1
class OrientationIndex(Enumeration):
x = 0
y = 1
z = 2
w = 3
__fallback__ = -1
class HandlerResult(Enumeration):
continue_ = 0
stop = 1
__fallback__ = -1
class LockingPolicy(Enumeration):
none = 0 # Pose events are always sent.
standard = 1 # (default) Pose events are not sent while a Myo is locked.
__fallback__ = -1
class Arm(Enumeration):
right = 0
left = 1
unknown = 2
__fallback__ = -1
class XDirection(Enumeration):
toward_wrist = 0
toward_elbow = 1
unknown = 2
__fallback__ = -1
# Backwards compatibility
result_t = Result
vibration_type_t = VibrationType
stream_emg = StreamEmg
pose_t = Pose
event_type_t = EventType
version_component_t = VersionComponent
orientation_index_t = OrientationIndex
handler_result_t = HandlerResult
locking_policy_t = LockingPolicy
arm_t = Arm
x_direction_t = XDirection
|
Adoption of artificial intelligence is crucial to staying ahead of retail disruption.
With leading retailers shifting their focus onto AI and analytics, it's clear that data is becoming a driving force in retail transformation. So much so that Gartner believes "by 2018, retailers engaged in IoT partnerships with major manufacturers will take significant market share from competitors due to direct connections with consumer lives1."
Armed with this information, you'll have everything you need to get ahead, and stay ahead, of disruption as you prepare for the future of retail.
All statements in this report attributable to Gartner represent Intel interpretation of data, research opinion or viewpoints published as part of a syndicated subscription service by Gartner, Inc., and have not been reviewed by Gartner. Each Gartner publication speaks as of its original publication date (and not as of the date of this Artificial Intelligence: Accelerating Retail Transformation. The opinions expressed in Gartner publications are not representations of fact, and are subject to change without notice.
|
"""Curve classes that apply to xarray Datasets.
Curves are mathematical functions on one or more independent
variables. The basic form of the curves classes is in
`models/curve.py`. The curve classes defined here, derived from
`SmartCurve`, take Datasets as arguments.
Smart Curves fall back on Curve logic, but take xarray DataSets and
know which variables they want.
"""
import numpy as np
from . import juliatools, latextools, formatting, diagnostic, formattools
from statsmodels.distributions.empirical_distribution import StepFunction
from openest.models import curve as curve_module
class SmartCurve(object):
def __init__(self):
self.xx = [-np.inf, np.inf] # Backwards compatibility to functions expecting curves
self.deltamethod = False
def __call__(self, ds):
raise NotImplementedError("call not implemented")
@property
def univariate(self):
raise NotImplementedError("univariate not implemented")
def format(self, lang):
raise NotImplementedError()
@staticmethod
def format_call(lang, curve, *args):
if isinstance(curve, SmartCurve):
return curve.format(lang)
if lang == 'latex':
return latextools.call(curve, None, *args)
elif lang == 'julia':
return juliatools.call(curve, None, *args)
class CurveCurve(SmartCurve):
def __init__(self, curve, variable):
super(CurveCurve, self).__init__()
self.curve = curve
self.variable = variable
def __call__(self, ds):
return self.curve(ds[self.variable])
def format(self, lang):
return SmartCurve.format_call(self.curve, lang, self.variable)
class ConstantCurve(SmartCurve):
def __init__(self, constant, dimension):
super(ConstantCurve, self).__init__()
self.constant = constant
self.dimension = dimension
def __call__(self, ds):
return np.repeat(self.constant, len(ds[self.dimension]))
def format(self, lang):
return {'main': formatting.FormatElement(str(self.constant))}
class LinearCurve(CurveCurve):
def __init__(self, slope, variable):
super(LinearCurve, self).__init__(lambda x: slope * x, variable)
class StepCurve(CurveCurve):
def __init__(self, xxlimits, levels, variable):
step_function = StepFunction(xxlimits[1:-1], levels[1:], ival=levels[0])
super(StepCurve, self).__init__(step_function, variable)
self.xxlimits = xxlimits
self.levels = levels
class CoefficientsCurve(SmartCurve):
def __init__(self, coeffs, variables):
super(CoefficientsCurve, self).__init__()
self.coeffs = coeffs
self.variables = variables
assert isinstance(variables, list) and len(variables) == len(coeffs), "Variables do not match coefficients: %s <> %s" % (variables, coeffs)
def __call__(self, ds):
result = np.zeros(ds[self.variables[0]].shape)
for ii in range(len(self.variables)):
#result += self.coeffs[ii] * ds[self.variables[ii]].values # TOO SLOW
result += self.coeffs[ii] * ds._variables[self.variables[ii]]._data
return result
def format(self, lang):
coeffvar = formatting.get_variable()
if lang == 'latex':
return {'main': formatting.FormatElement(r"(%s) \cdot \vec{%s}" % (', '.join([varname for varname in self.variables]), coeffvar))}
elif lang == 'julia':
return {'main': formatting.FormatElement(' + '.join(["%s * %s_%d" % (self.variables[ii], coeffvar, ii + 1) for ii in range(len(self.variables))]))}
class ZeroInterceptPolynomialCurve(CoefficientsCurve):
def __init__(self, coeffs, variables, allow_raising=False, descriptions=None):
super(ZeroInterceptPolynomialCurve, self).__init__(coeffs, variables)
if descriptions is None:
descriptions = {}
self.allow_raising = allow_raising
self.descriptions = descriptions
self.getters = [((lambda ds, var=variable: ds._variables[var]) if isinstance(variable, str) else variable)
for variable in self.variables]
def __call__(self, ds):
result = self.coeffs[0] * self.getters[0](ds)._data
for ii in range(1, len(self.variables)):
if not self.allow_raising or self.variables[ii] in ds._variables:
#result += self.coeffs[ii] * ds[self.variables[ii]].values # TOO SLOW
result += self.coeffs[ii] * self.getters[ii](ds)._data
else:
result += self.coeffs[ii] * (self.getters[0](ds)._data ** (ii + 1))
return result
@property
def univariate(self):
return curve_module.ZeroInterceptPolynomialCurve([-np.inf, np.inf], self.coeffs)
def format(self, lang):
coeffvar = formatting.get_variable()
variable = formatting.get_variable()
funcvars = {}
repterms = []
if lang == 'latex':
if isinstance(self.variables[0], str):
repterms.append(r"%s_1 %s" % (coeffvar, variable))
else:
funcvar = formatting.get_function()
funcvars[self.variables[0]] = funcvar
repterms.append(r"%s_1 %s(%s)" % (coeffvar, funcvar, variable))
elif lang == 'julia':
if isinstance(self.variables[0], str):
repterms.append(r"%s[1] * %s" % (coeffvar, variable))
else:
funcvar = formatting.get_function()
funcvars[self.variables[0]] = funcvar
repterms.append(r"%s[1] * %s(%s)" % (coeffvar, funcvar, variable))
for ii in range(1, len(self.variables)):
if lang == 'latex':
if isinstance(self.variables[0], str):
repterms.append(r"%s_1 %s^%d" % (coeffvar, variable, ii + 1))
else:
funcvar = formatting.get_function()
funcvars[self.variables[ii]] = funcvar
repterms.append(r"%s_1 %s(%s)^%d" % (coeffvar, funcvar, variable, ii + 1))
elif lang == 'julia':
if isinstance(self.variables[0], str):
repterms.append(r"%s[1] * %s^%d" % (coeffvar, variable, ii + 1))
else:
funcvar = formatting.get_function()
funcvars[self.variables[ii]] = funcvar
repterms.append(r"%s[1] * %s(%s)^%d" % (coeffvar, funcvar, variable, ii + 1))
result = {'main': formatting.FormatElement(' + '.join(repterms))}
for variable in funcvars:
result[funcvars[variable]] = formatting.FormatElement(self.descriptions.get(variable, "Unknown"))
return result
class SumByTimePolynomialCurve(SmartCurve):
"""Equivalent to `ZeroInterceptPolynomialCurve`, but with a different coefficient per timestep.
Parameters
----------
coeffmat : array_like
Matrix of K (order) x T (timesteps)
variables : list of str or function
Name of variable in DataSet or getter function for each exponent term
allow_raising : bool, optional
Can we just raise the linear term to an exponent, or should each bein the ds (default)
descriptions : dict of str => str
Description of each getter function
"""
def __init__(self, coeffmat, variables, allow_raising=False, descriptions=None):
super(SumByTimePolynomialCurve, self).__init__()
self.coeffmat = coeffmat # K x T
assert len(self.coeffmat.shape) == 2
self.variables = variables
self.allow_raising = allow_raising
if descriptions is None:
descriptions = {}
self.descriptions = descriptions
self.getters = [(lambda ds: ds._variables[variable]) if isinstance(variable, str) else variable for variable in self.variables] # functions return vector of length T
def __call__(self, ds):
maxtime = self.coeffmat.shape[1]
lindata = self.getters[0](ds)._data[:maxtime]
result = np.sum(self.coeffmat[0, :len(lindata)] * lindata)
for ii in range(1, len(self.variables)):
if not self.allow_raising or self.variables[ii] in ds._variables:
termdata = self.getters[ii](ds)._data[:maxtime]
result += np.sum(self.coeffmat[ii, :len(lindata)] * termdata) # throws error if length mismatch
else:
result += np.sum(self.coeffmat[ii, :len(lindata)] * (lindata ** (ii + 1)))
return result
@property
def univariate(self):
raise NotImplementedError("Probably want to define a matrix-taking curve before this.")
def format(self, lang):
coeffvar = formatting.get_variable()
variable = formatting.get_variable()
funcvars = {}
repterms = []
if lang == 'latex':
if isinstance(self.variables[0], str):
repterms.append(r"%s_1 \cdot %s" % (coeffvar, variable))
else:
funcvar = formatting.get_function()
funcvars[self.variables[0]] = funcvar
repterms.append(r"%s_1 \cdot %s(%s)" % (coeffvar, funcvar, variable))
elif lang == 'julia':
if isinstance(self.variables[0], str):
repterms.append(r"sum(%s[1,:] * %s)" % (coeffvar, variable))
else:
funcvar = formatting.get_function()
funcvars[self.variables[0]] = funcvar
repterms.append(r"sum(%s[1,:] * %s(%s))" % (coeffvar, funcvar, variable))
for ii in range(1, len(self.variables)):
if lang == 'latex':
if isinstance(self.variables[0], str):
repterms.append(r"%s_1 \cdot %s^%d" % (coeffvar, variable, ii + 1))
else:
funcvar = formatting.get_function()
funcvars[self.variables[ii]] = funcvar
repterms.append(r"%s_1 \cdot %s(%s)^%d" % (coeffvar, funcvar, variable, ii + 1))
elif lang == 'julia':
if isinstance(self.variables[0], str):
repterms.append(r"sum(%s[1,:] * %s^%d)" % (coeffvar, variable, ii + 1))
else:
funcvar = formatting.get_function()
funcvars[self.variables[ii]] = funcvar
repterms.append(r"sum(%s[1,:] * %s(%s)^%d)" % (coeffvar, funcvar, variable, ii + 1))
result = {'main': formatting.FormatElement(' + '.join(repterms))}
for variable in funcvars:
result[funcvars[variable]] = formatting.FormatElement(self.descriptions.get(variable, "Unknown"))
return result
class SumByTimeCoefficientsCurve(SmartCurve):
"""Equivalent to `TransformCoefficientsCurve`, but with a different coefficient per timestep.
Parameters
----------
coeffmat : array_like
Matrix of K (#predictors) x T (timesteps)
transforms : list of functions
Functions of DataSet to return each predictor
descriptions : list of str
Descriptions of each transformation/predictor
diagnames : list of str
Keys to be used for each predictor in the diagnostic files, or None for no-recording
"""
def __init__(self, coeffmat, transforms, descriptions, diagnames=None):
super(SumByTimeCoefficientsCurve, self).__init__()
self.coeffmat = coeffmat # K x T
assert len(coeffmat.shape) == 2 or np.all(coeffmat == 0)
self.transforms = transforms
self.descriptions = descriptions
self.diagnames = diagnames
assert isinstance(transforms, list) and len(transforms) == coeffmat.shape[0], "Transforms do not match coefficients: %s <> %s" % (transforms, coeffmat.shape)
assert diagnames is None or isinstance(diagnames, list) and len(diagnames) == len(transforms)
def __call__(self, ds):
if np.all(self.coeffmat == 0):
# Happens with edge case of conditional suffixes
return 0
maxtime = self.coeffmat.shape[1]
result = None
for ii in range(len(self.transforms)):
predictor = self.transforms[ii](ds)._data.ravel()[:maxtime]
if self.diagnames:
diagnostic.record(ds.region, ds.year, self.diagnames[ii], np.sum(predictor))
if result is None:
result = np.sum(self.coeffmat[ii, :] * predictor)
else:
result += np.sum(self.coeffmat[ii, :] * predictor)
return result
@property
def univariate(self):
raise NotImplementedError("Probably want to define a matrix-taking curve before this.")
def format(self, lang):
raise NotImplementedError()
class CubicSplineCurve(CoefficientsCurve):
def __init__(self, coeffs, knots, variables, allow_raising=False):
super(CubicSplineCurve, self).__init__(coeffs, variables)
self.allow_raising = allow_raising
self.knots = knots
def __call__(self, ds):
result = np.zeros(ds[self.variables[0]].shape)
try:
for ii in range(len(self.variables)):
result += self.coeffs[ii] * ds._variables[self.variables[ii]]._data
return result
except KeyError as ex:
# This should only catch KeyErrors coming from coming from
# ds._variables[x].
if self.allow_raising:
return curve_module.CubicSplineCurve(self.knots, self.coeffs)(ds._variables[self.variables[0]]._data)
raise ex
@property
def univariate(self):
return curve_module.CubicSplineCurve(self.knots, self.coeffs)
class TransformCoefficientsCurve(SmartCurve):
"""Use a transformation of ds to produce each predictor.
Parameters
----------
coeffs : array_like
Vector of coefficients on each [transformed] predictor
transforms : list of functions
Functions of DataSet to return each predictor
descriptions : list of str
Descriptions of each transformation/predictor
diagnames : list of str (optional)
Keys to be used for each predictor in the diagnostic files, or None for no-recording
univariate_curve : UnivariateCurve (optional)
If a univariate function is requested, can we produce one?
"""
def __init__(self, coeffs, transforms, descriptions, diagnames=None, univariate_curve=None):
super(TransformCoefficientsCurve, self).__init__()
self.coeffs = coeffs
self.transforms = transforms
self.descriptions = descriptions
self.diagnames = diagnames
self._univariate_curve = univariate_curve
assert isinstance(transforms, list) and len(transforms) == len(coeffs), "Transforms do not match coefficients: %s <> %s" % (transforms, coeffs)
assert diagnames is None or isinstance(diagnames, list) and len(diagnames) == len(transforms)
def __call__(self, ds):
result = None
for ii in range(len(self.transforms)):
predictor = self.transforms[ii](ds)
if self.diagnames:
diagnostic.record(ds.region, ds.year, self.diagnames[ii], np.sum(predictor._data))
if result is None:
result = self.coeffs[ii] * predictor._data
else:
result += self.coeffs[ii] * predictor._data
return result
def format(self, lang):
coeffvar = formatting.get_variable()
funcvars = [formatting.get_function() for transform in self.transforms]
if lang == 'latex':
result = {'main': formatting.FormatElement(r"(%s) \cdot \vec{%s}" % (', '.join(["%s" % funcvars[ii] for ii in range(len(funcvars))]), coeffvar))}
elif lang == 'julia':
result = {'main': formatting.FormatElement(' + '.join(["%s() * %s_%d" % (funcvars[ii], coeffvar, ii + 1) for ii in range(len(funcvars))]))}
for ii in range(len(funcvars)):
result[funcvars[ii]] = formatting.FormatElement(self.descriptions[ii])
return result
@property
def univariate(self):
if self._univariate_curve is not None:
return self._univariate_curve
raise NotImplementedError("univariate transform not specified")
class SelectiveInputCurve(SmartCurve):
"""Assumes input is a matrix, and only pass selected input columns to child curve."""
def __init__(self, curve, variable):
super(SelectiveInputCurve, self).__init__()
self.curve = curve
self.variable = variable
def __call__(self, ds):
return self.curve(ds[self.variable]._data)
def format(self, lang, dsname):
return SmartCurve.format_call(self.curve, lang, self.variable)
class SumCurve(SmartCurve):
def __init__(self, curves):
super(SmartCurve, self).__init__()
self.curves = curves
def __call__(self, ds):
total = 0
for curve in self.curves:
total += curve(ds)
return total
def format(self, lang):
formatteds = [SmartCurve.format_call(self.curves[ii], lang, self.variable) for ii in range(len(self.curves))]
return formattools.join(' + ', formatteds)
class ProductCurve(SmartCurve):
def __init__(self, curve1, curve2):
super(ProductCurve, self).__init__()
self.curve1 = curve1
self.curve2 = curve2
def __call__(self, ds):
return self.curve1(ds) * self.curve2(ds)
def format(self, lang):
return formatting.build_recursive({'latex': r"(%s) (%s)",
'julia': r"(%s) .* (%s)"}, lang,
self.curve1, self.curve2)
class ShiftedCurve(SmartCurve):
def __init__(self, curve, offset):
super(ShiftedCurve, self).__init__()
self.curve = curve
self.offset = offset
def __call__(self, ds):
return self.curve(ds) + self.offset
@property
def univariate(self):
return curve_module.ShiftedCurve(self.curve.univariate, self.offset)
def format(self, lang):
return formatting.build_recursive({'latex': r"(%s + " + str(self.offset) + ")",
'julia': r"(%s + " + str(self.offset) + ")"},
lang, self.curve)
class ClippedCurve(curve_module.ClippedCurve, SmartCurve):
@property
def univariate(self):
return curve_module.ClippedCurve(self.curve.univariate, self.cliplow)
class OtherClippedCurve(curve_module.OtherClippedCurve, SmartCurve):
@property
def univariate(self):
return curve_module.OtherClippedCurve(self.clipping_curve.univariate, self.curve.univariate, self.clipy)
class MinimumCurve(curve_module.MinimumCurve, SmartCurve):
@property
def univariate(self):
return curve_module.MinimumCurve(self.curve1.univariate, self.curve2.univariate)
|
Lillian Justine Hall McKinney, 94, passed away on January 12, 2019, at her residence in Elizabethton. She was born in Wilder, TN to the late Wilburn and Elsie Shubert Hall. She was also preceded in death by her sisters, Evelyn Joines, Mable Dunlap and Edith Hambrick and one brother, Wilburn “Gib” Hall. Mrs. McKinney was a member of Harvest Baptist Church. She was an employee of Leon Fer enbach and retired after 43 years of employment.
Those left to cherish her memory are her daughter, Deborah Franklin and husband Roger, of Elizabethton, son, Michael “Fuji” McKinney and wife Vickie, Kingsport . Grandchildren: Michelle Johnson and husband Chris, Roger Franklin, Jr. and wife Laura, Gregory McKinney, and Jason McKinney. Great Grandchildren: Keely, Caleb, Nick and Jayden Johnson, Hunter and Connor Franklin. Also several nieces and nephews survive.
Funeral Services will be conducted at 7 p.m. Tuesday in Memorial Funeral Chapel with Pastor Dale Greenwell officiating. Graveside Service and Interment will be at 2 p.m. Wednesday January 16, 2019 in Happy Valley Memorial Park. Active Pallbearers who are requested to assemble at the funeral home at 1:20 p.m. Wednesday will be: Roger Franklin, Jr., Caleb Johnson, Chris Johnson, Frank Johnson, Eric Melton and Jack Hyatt. The family would like to express a special thank you to the staff of 4th floor at Holston Valley Medical Center, Abby Harris, John Wagner, John Brumit , friends at Harvest Baptist Church, and the staff at Avalon Hospice for such loving care during this time. The family will receive friends from 5 to 7 p.m. Tuesday in the funeral home. Friends may also visit with the family at the residence of Deborah Franklin. Family and friends will assemble at the funeral home at 1:20 p.m. Wednesday to go to the cemetery. Condolences may be sent to the family at our web-site www.memorialfcelizabethton.com.
|
"""
Handler for Cisco Nexus device specific information.
Note that for proper import, the classname has to be:
"<Devicename>DeviceHandler"
...where <Devicename> is something like "Default", "Nexus", etc.
All device-specific handlers derive from the DefaultDeviceHandler, which implements the
generic information needed for interaction with a Netconf server.
"""
from ncclient.xml_ import BASE_NS_1_0
from ncclient.operations.third_party.nexus.rpc import ExecCommand
from .default import DefaultDeviceHandler
class NexusDeviceHandler(DefaultDeviceHandler):
"""
Cisco Nexus handler for device specific information.
In the device_params dictionary, which is passed to __init__, you can specify
the parameter "ssh_subsystem_name". That allows you to configure the preferred
SSH subsystem name that should be tried on your Nexus switch. If connecting with
that name fails, or you didn't specify that name, the other known subsystem names
will be tried. However, if you specify it then this name will be tried first.
"""
_EXEMPT_ERRORS = [
"*VLAN with the same name exists*", # returned even if VLAN was created, but
# name was already in use (switch will
# automatically choose different, unique
# name for VLAN)
]
def __init__(self, device_params):
super(NexusDeviceHandler, self).__init__(device_params)
def add_additional_operations(self):
dict = {}
dict['exec_command'] = ExecCommand
return dict
def get_capabilities(self):
# Just need to replace a single value in the default capabilities
c = super(NexusDeviceHandler, self).get_capabilities()
c[0] = "urn:ietf:params:xml:ns:netconf:base:1.0"
return c
def get_xml_base_namespace_dict(self):
"""
Base namespace needs a None key.
See 'nsmap' argument for lxml's Element().
"""
return { None : BASE_NS_1_0 }
def get_xml_extra_prefix_kwargs(self):
"""
Return keyword arguments per request, which are applied to Element().
Mostly, this is a dictionary containing the "nsmap" key.
See 'nsmap' argument for lxml's Element().
"""
d = {
"nxos":"http://www.cisco.com/nxos:1.0",
"if":"http://www.cisco.com/nxos:1.0:if_manager",
"nfcli": "http://www.cisco.com/nxos:1.0:nfcli",
"vlan_mgr_cli": "http://www.cisco.com/nxos:1.0:vlan_mgr_cli"
}
d.update(self.get_xml_base_namespace_dict())
return { "nsmap" : d }
def get_ssh_subsystem_names(self):
"""
Return a list of possible SSH subsystem names.
Different NXOS versions use different SSH subsystem names for netconf.
Therefore, we return a list so that several can be tried, if necessary.
The Nexus device handler also accepts
"""
preferred_ssh_subsystem = self.device_params.get("ssh_subsystem_name")
name_list = [ "netconf", "xmlagent" ]
if preferred_ssh_subsystem:
return [ preferred_ssh_subsystem ] + \
[ n for n in name_list if n != preferred_ssh_subsystem ]
else:
return name_list
|
Micro-residencies involve emerging international artists who will explore issues of development/ destruction, identity/borders, the new world/nature and games/social engagement. These residencies will culminate in talks and presentations.
‘Mind part, Body part, Land part’. Exploring the intimacy that emerges from the exchange between mind, body and the surrounding environment within the practice of art and the experience of travel.
and imagining new ones, the tension between instructions and suggestions to apprehend a landscape, building by undoing, the compression and paring down of language (e.g. with ‘hash tags’ and ‘sticky language’) and the construction of an authentic experience.
‘Dragging Anchor’. “This is a nautical term for a boat whose anchor does not catch on the bottom of the ocean so the boat drags along. Many times this happens while the people onboard are asleep so they wake up in the morning in a different place, on the rock … in a place they did not expect.” The artist uses this metaphor to explore the notion of Folkstone slowly and maybe reluctantly being dragged to a new place through regeneration.
Join us for drinks, snacks and a performance about getting together.
SATURDAY 13TH AUGUST 4PM – TRANSIT\ION – the residency culminating presentation and a social event with Gillian White introducing the latest version of her Folkestone Regenerated Monopoly Board. Join as for games and refreshments.
‘Arrival, Departure, Return’. Operating from a disused fish shop, previously called ‘Shoreline’, TRANSIT\ION will work with local fishermen and lorry drivers, collecting images, film and audio that record their experiences of leaving and returning home. Considering Folkestone as a point from which to look out to the sea, and back into land, the artists will explore the ways these two groups relate to place; as edge, terminus or frontier.
Location LOW&HIGH 15 Tontine Street, Folkestone, CT20 1JT.
|
import asyncio
import logging
from aiohttp import ClientSession
from async_timeout import timeout
from em2 import Settings
from em2.exceptions import StartupException
logger = logging.getLogger('em2.utils')
async def _wait_port_open(host, port, delay, loop):
step_size = 0.05
steps = int(delay / step_size)
start = loop.time()
for i in range(steps):
try:
with timeout(step_size, loop=loop):
transport, proto = await loop.create_connection(lambda: asyncio.Protocol(), host=host, port=port)
except asyncio.TimeoutError:
pass
except OSError:
await asyncio.sleep(step_size, loop=loop)
else:
transport.close()
logger.debug('Connected successfully to %s:%s after %0.2fs', host, port, loop.time() - start)
return
raise StartupException(f'Unable to connect to {host}:{port} after {loop.time() - start:0.2f}s')
def wait_for_services(settings, *, delay=5):
"""
Wait for up to `delay` seconds for postgres and redis ports to be open
"""
loop = asyncio.get_event_loop()
coros = [
_wait_port_open(settings.pg_host, settings.pg_port, delay, loop),
_wait_port_open(settings.R_HOST, settings.R_PORT, delay, loop),
]
logger.debug('waiting for postgres and redis to come up...')
loop.run_until_complete(asyncio.gather(*coros, loop=loop))
async def check_server(settings: Settings, path='/', expected_status=200):
url = f'http://127.0.0.1:{settings.web_port}' + path
try:
async with ClientSession() as session:
async with session.get(url) as r:
assert r.status == expected_status, f'response error {r.status} != {expected_status}'
except (ValueError, AssertionError, OSError) as e:
logger.error('web check error: %s: %s, url: "%s"', e.__class__.__name__, e, url)
return 1
else:
logger.info('web check successful "%s", response %d', url, expected_status)
return 0
|
Totter welcomes church groups, daycares, scout groups, preschools, and kindergarten through second grade. Reservations are necessary for all groups. Please call (859) 491-1441.
Include: chips, a child size fountain drink or milk, and choice of pizza (cheese or pepperoni).
|
# RiverID Site Class
# ==================
#
# This file is part of RiverID.
#
# RiverID is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RiverID is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with RiverID. If not, see <http://www.gnu.org/licenses/>.
from riverexception import RiverException
class RiverSite(object):
def __init__(self, db):
self.db = db
def add_site(self, url):
self.db.site.insert({'url': url})
def add_user(self, url, user_id):
self.db.site.update({'url': url}, {'$push': {'user_id': user_id}})
def exists(self, url):
return self.db.site.find_one({'url': url}) != None
def get_user_urls(self, user_id):
urls = []
for site in self.db.site.find({'user_id': user_id}):
urls.append(site['url'])
return urls
|
Image Title: Buy A Hand Crafted Reclaimed Oak Wood Bar Stools W Steel Frames With Regard To Stool Plan 3. Post Title: Reclaimed Wood Stool. Filename: buy-a-hand-crafted-reclaimed-oak-wood-bar-stools-w-steel-frames-with-regard-to-stool-plan-3.jpg. Image Dimension: 3264 x 2448 pixels. Images Format: jpg/jpeg. Publisher/Author: Dejon Fadel. Uploaded Date: Monday - October 22nd. 2018 05:46:07 AM. Category: Architecture. Image Source: archdaily.com. Counter Stool 25 Reclaimed Wood Metal Legs Aftcra With Remodel 15. ST025 Short Reclaimed Wood Stool Prop Rental ACME Brooklyn Intended For Decor 17. Buy A Hand Crafted Reclaimed Oak Wood Bar Stools W Steel Frames With Regard To Stool Plan 3. Reclaimed Rectangle Barn Wood Bar Stool Sealed Or Painted FREE Inside Remodel 9. Shop VidaXL Reclaimed Wood Stool Hocker Antique Chair Free In Ideas 13. Fresh Fall Bargains On Reclaimed Wood Industrial Stool Barn And Inside Designs 16. Alaterre Furniture Pomona 26 In H Reclaimed Wood Counter Stool Decor 1. Reclaimed Wood Stool Etsy Within Ideas 14. Reclaimed Wood Bar Stools Barnwood Barstools CustomMade Com In Stool Designs 5. Reclaimed Wood Short Stool Amazon Co Uk Kitchen Home Pertaining To Remodel 7.
|
def find(db, user):
"""
find the notelist
:param db:
:param user:
:return:
"""
document = db.notelist.find_one({"_id": user})
return document
def find_all_lists(db, user):
"""
It finds all lists
:param db:
:param user:
:return:
"""
document = db.notelist.find_one({"_id": user}, {"lists": 1})
return document.get("lists", [])
def find_list(db, user, list_name):
"""
It finds the list
:param db:
:param user:
:param list_name:
:return:
"""
document = db.notelist.find_one({"_id": user}, {"lists.{}".format(list_name): 1})
if not document:
return []
return document["lists"].get(list_name, [])
def find_all_lists_names(db, user):
"""
It finds all the lists names
:param db:
:param user:
:return:
"""
document = db.notelist.find_one({"_id": user}, {"lists": 1})
return [name for name in document["lists"].keys()]
def find_notes(db, user, list_name):
"""
It returns all the notes of a list
:param db:
:param user:
:param list_name:
:return:
"""
document = db.notelist.find_one({"_id": user}, {"lists": 1})
return document["lists"][list_name]
def insert_new_notelist(db, user):
"""
It inserts a new notelist
:param db:
:param user:
:return:
"""
db.notelist.insert({"_id": user, "lists": {}})
def add_new_list(db, user, list_name):
"""
It adds a new list
:param db:
:param user:
:param list_name:
:return:
"""
notelist = find(db, user)
if not notelist:
insert_new_notelist(db, user)
db.notelist.update({"_id": user}, {"$set": {"lists.{}".format(list_name): []}})
def remove_list(db, user, list_name):
"""
It removes the given list
:param db:
:param user:
:param list_name:
:return:
"""
db.notelist.update({"_id": user}, {"$unset": {"lists.{}".format(list_name): 1}})
def add_note(db, user, list_name, note):
"""
It adds a note
:param db:
:param user:
:param list_name:
:param note:
:return:
"""
the_list = find_list(db, user, list_name)
if not the_list:
add_new_list(db, user, list_name)
db.notelist.update({"_id": user}, {"$addToSet": {"lists.{}".format(list_name): note}})
return True
def remove_note(db, user, list_name, note):
"""
It removes a note
:param db:
:param user:
:param list_name:
:param note:
:return:
"""
result = False
the_list = find_list(db, user, list_name)
if the_list:
try:
index = int(note) - 1
db.notelist.update({"_id": user}, {"$unset": {"lists.{}.{}".format(list_name, index): 1}})
db.notelist.update({"_id": user}, {"$pull": {"lists.{}".format(list_name): None}})
except:
db.notelist.update({"_id": user}, {"$pull": {"lists.{}".format(list_name): note}})
result = True
return result
|
253 ft walkout out beach North of Powell River. Uninterrupted views including stunning sun sets over Savary Island. Custom built 12 year old 3 bedroom, 2130 sq.ft. rancher. Meticulously maintained yard with mature landscaping.
|
import gzip
import os
import pprint
import threading
import time
import traceback
import libarchive
import pyinotify
import rarfile
from lib.FileManager.FM import REQUEST_DELAY
from lib.FileManager.LibArchiveEntry import Entry
from lib.FileManager.SevenZFile import SevenZFile
from lib.FileManager.ZipFile import ZipFile, is_zipfile
from lib.FileManager.workers.baseWorkerCustomer import BaseWorkerCustomer
class ExtractArchive(BaseWorkerCustomer):
def __init__(self, params, *args, **kwargs):
super(ExtractArchive, self).__init__(*args, **kwargs)
self.file = params.get('file')
self.extract_path = params.get('extract_path')
self.params = params
self.NUM_WORKING_THREADS = 48
self.extracted_files = {
"count": 0,
"done": False
}
def run(self):
try:
self.preload()
abs_extract_path = self.get_abs_path(self.extract_path)
if not os.path.exists(abs_extract_path):
try:
os.makedirs(abs_extract_path)
except Exception as e:
self.logger.error("Cannot create extract path %s. %s" % (str(e), traceback.format_exc()))
raise Exception("Cannot create extract path")
elif os.path.isfile(abs_extract_path):
raise Exception("Extract path incorrect - file exists")
abs_archive_path = self.get_abs_path(self.file.get("path"))
if not os.path.exists(abs_archive_path):
raise Exception("Archive file is not exist")
self.on_running(self.status_id, pid=self.pid, pname=self.name)
self.logger.debug("Start extracting %s", abs_archive_path)
# for rar and zip same algorithm
if is_zipfile(abs_archive_path) or rarfile.is_rarfile(abs_archive_path) or SevenZFile.is_7zfile(
abs_archive_path):
if is_zipfile(abs_archive_path):
self.logger.info("Archive ZIP type, using zipfile (beget)")
a = ZipFile(abs_archive_path)
elif rarfile.is_rarfile(abs_archive_path):
self.logger.info("Archive RAR type, using rarfile")
a = rarfile.RarFile(abs_archive_path)
else:
self.logger.info("Archive 7Zip type, using py7zlib")
a = SevenZFile(abs_archive_path)
# extract Empty Files first
for fileinfo in a.archive.header.files.files:
if not fileinfo['emptystream']:
continue
name = fileinfo['filename']
try:
unicode_name = name.encode('UTF-8').decode('UTF-8')
except UnicodeDecodeError:
unicode_name = name.encode('cp866').decode('UTF-8')
unicode_name = unicode_name.replace('\\', '/') # For windows name in rar etc.
file_name = os.path.join(abs_extract_path, unicode_name)
dir_name = os.path.dirname(file_name)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
if os.path.exists(dir_name) and not os.path.isdir(dir_name):
os.remove(dir_name)
os.makedirs(dir_name)
if os.path.isdir(file_name):
continue
f = open(file_name, 'w')
f.close()
infolist = a.infolist()
not_ascii = False
# checking ascii names
try:
abs_extract_path.encode('utf-8').decode('ascii')
for name in a.namelist():
name.encode('utf-8').decode('ascii')
except UnicodeDecodeError:
not_ascii = True
except UnicodeEncodeError:
not_ascii = True
t = threading.Thread(target=self.progress, args=(infolist, self.extracted_files, abs_extract_path))
t.daemon = True
t.start()
try:
if not_ascii:
for name in a.namelist():
try:
unicode_name = name.encode('UTF-8').decode('UTF-8')
except UnicodeDecodeError:
unicode_name = name.encode('cp866').decode('UTF-8')
unicode_name = unicode_name.replace('\\', '/') # For windows name in rar etc.
file_name = os.path.join(abs_extract_path, unicode_name)
dir_name = os.path.dirname(file_name)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
if os.path.exists(dir_name) and not os.path.isdir(dir_name):
os.remove(dir_name)
os.makedirs(dir_name)
if os.path.isdir(file_name):
continue
f = open(file_name, 'wb')
try:
data = a.read(name)
f.write(data)
f.close()
except TypeError:
# pass for directories its make recursively for files
f.close()
os.remove(file_name)
else:
self.logger.info("EXTRACT ALL to %s , encoded = %s" % (
pprint.pformat(abs_extract_path), pprint.pformat(abs_extract_path)))
a.extractall(abs_extract_path) # Not working with non-ascii windows folders
except Exception as e:
self.logger.error("Error extract path %s. %s" % (str(e), traceback.format_exc()))
raise e
finally:
self.extracted_files["done"] = True
t.join()
elif libarchive.is_archive(abs_archive_path):
self.logger.info("Archive other type, using libarchive")
next_tick = time.time() + REQUEST_DELAY
print(pprint.pformat("Clock = %s , tick = %s" % (str(time.time()), str(next_tick))))
infolist = []
with libarchive.Archive(abs_archive_path, entry_class=Entry) as a:
for entry in a:
infolist.append(entry)
with libarchive.Archive(abs_archive_path, entry_class=Entry) as a:
for entry in a:
entry_path = os.path.join(abs_extract_path, entry.pathname)
self.logger.debug("Entry pathname %s - %s", entry.pathname, entry.size)
if time.time() > next_tick:
progress = {
'percent': round(float(self.extracted_files["count"]) / float(len(infolist)), 2),
'text': str(int(
round(float(self.extracted_files["count"]) / float(len(infolist)), 2) * 100)) + '%'
}
self.on_running(self.status_id, progress=progress, pid=self.pid, pname=self.name)
next_tick = time.time() + REQUEST_DELAY
self.extracted_files["count"] += 1
dir_name = os.path.dirname(entry_path)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
if os.path.exists(dir_name) and not os.path.isdir(dir_name):
os.remove(dir_name)
os.makedirs(dir_name)
if os.path.isdir(entry_path):
continue
f = open(entry_path, 'w')
a.readpath(f)
elif abs_archive_path[-3:] == ".gz":
self.logger.info("gz file type, using gzip")
try:
# if its just a gz file
a = gzip.open(abs_archive_path)
file_content = a.read()
a.close()
file_name = os.path.splitext(os.path.basename(abs_archive_path))[0]
file_path = os.path.join(abs_extract_path, file_name)
infolist = [file_name]
dir_name = os.path.dirname(file_path)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
extracted = open(file_path, 'wb')
extracted.write(file_content)
extracted.close()
except Exception as e:
raise e
finally:
self.extracted_files["done"] = True
else:
raise Exception("Archive file has unkown format")
progress = {
'percent': round(float(self.extracted_files["count"]) / float(len(infolist)), 2),
'text': str(int(round(float(self.extracted_files["count"]) / float(len(infolist)), 2) * 100)) + '%'
}
result = {}
time.sleep(REQUEST_DELAY)
self.on_success(self.status_id, progress=progress, data=result, pid=self.pid, pname=self.name)
except Exception as e:
self.extracted_files["done"] = True
result = {
"error": True,
"message": str(e),
"traceback": traceback.format_exc()
}
self.on_error(self.status_id, result, pid=self.pid, pname=self.name)
def progress(self, infolist, progress, extract_path):
self.logger.debug("extract thread progress() start")
next_tick = time.time() + REQUEST_DELAY
# print pprint.pformat("Clock = %s , tick = %s" % (str(time.time()), str(next_tick)))
progress["count"] = 0
class Identity(pyinotify.ProcessEvent):
def process_default(self, event):
progress["count"] += 1
# print("Has event %s progress %s" % (repr(event), pprint.pformat(progress)))
wm1 = pyinotify.WatchManager()
wm1.add_watch(extract_path, pyinotify.IN_CREATE, rec=True, auto_add=True)
s1 = pyinotify.Stats() # Stats is a subclass of ProcessEvent
notifier1 = pyinotify.ThreadedNotifier(wm1, default_proc_fun=Identity(s1))
notifier1.start()
total = float(len(infolist))
while not progress["done"]:
if time.time() > next_tick:
# print("Tick progress %s / %s" % (pprint.pformat(progress), str(total)))
count = float(progress["count"]) * 1.5
if count <= total:
op_progress = {
'percent': round(count / total, 2),
'text': str(int(round(count / total, 2) * 100)) + '%'
}
else:
op_progress = {
'percent': round(99, 2),
'text': '99%'
}
self.on_running(self.status_id, progress=op_progress, pid=self.pid, pname=self.name)
next_tick = time.time() + REQUEST_DELAY
time.sleep(REQUEST_DELAY)
# иначе пользователям кажется что распаковалось не полностью
op_progress = {
'percent': round(99, 2),
'text': '99%'
}
self.on_running(self.status_id, progress=op_progress, pid=self.pid, pname=self.name)
time.sleep(REQUEST_DELAY)
notifier1.stop()
|
I worry that United Airlines, which has already wrung everything they could out of bankruptcy, will fail in the fourth and the first quarters—which are the toughest to get through. It's not a coincidence that most airlines go under in March, because they just can't get enough money to make it to summer.
I am not sure what other cuts United can make. So, if you have any miles accumulated on airlines like United, I suggest you figure out a way to redeem those miles on their partner airlines.
You can do that or you can go to an art supply store, get a picture frame, and hang your miles on the wall.
They are being devalued by the hour and if the airline goes out of business, so do the miles.
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import unittest, numpy as np
from pyscf import gto, scf
from pyscf.nao import nao, scf as scf_nao
#from pyscf.nao.hf import RHF
mol = gto.M( verbose = 1,
atom = '''
H 0 0 0
H 0 0.757 0.587''', basis = 'cc-pvdz',)
class KnowValues(unittest.TestCase):
def test_scf_gto_vs_nao(self):
""" Test computation of overlaps between NAOs against overlaps computed between GTOs"""
gto_hf = scf.RHF(mol)
gto_hf.kernel()
nao_hf = scf_nao(mf=gto_hf, gto=mol)
nao_hf.dump_chkfile=False
e_tot = nao_hf.kernel_scf()
self.assertAlmostEqual(gto_hf.e_tot, e_tot, 4)
for e1,e2 in zip(nao_hf.mo_energy[0,0],gto_hf.mo_energy): self.assertAlmostEqual(e1, e2, 3)
for o1,o2 in zip(nao_hf.mo_occ[0,0],gto_hf.mo_occ): self.assertAlmostEqual(o1, o2)
if __name__ == "__main__": unittest.main()
|
Please note * name, email address, postal code, and telephone number are mandatory fields. depending on the nature of your inquiry it may be possible to answer you by phone,. The personal information you provide us, will not be shared or used other than the purpose or your inquiry.
© 2002-2019 OHBA Corporation all rights reserved.
|
import sys
import models
import model_utils
import math
import numpy as np
import video_level_models
import tensorflow as tf
import utils
import tensorflow.contrib.slim as slim
from tensorflow import flags
FLAGS = flags.FLAGS
class CnnLstmMemoryModel(models.BaseModel):
def cnn(self,
model_input,
l2_penalty=1e-8,
num_filters = [1024, 1024, 1024],
filter_sizes = [1,2,3],
**unused_params):
max_frames = model_input.get_shape().as_list()[1]
num_features = model_input.get_shape().as_list()[2]
shift_inputs = []
for i in xrange(max(filter_sizes)):
if i == 0:
shift_inputs.append(model_input)
else:
shift_inputs.append(tf.pad(model_input, paddings=[[0,0],[i,0],[0,0]])[:,:max_frames,:])
cnn_outputs = []
for nf, fs in zip(num_filters, filter_sizes):
sub_input = tf.concat(shift_inputs[:fs], axis=2)
sub_filter = tf.get_variable("cnn-filter-len%d"%fs, shape=[num_features*fs, nf], dtype=tf.float32,
initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1),
regularizer=tf.contrib.layers.l2_regularizer(l2_penalty))
cnn_outputs.append(tf.einsum("ijk,kl->ijl", sub_input, sub_filter))
cnn_output = tf.concat(cnn_outputs, axis=2)
return cnn_output
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
"""Creates a model which uses a stack of LSTMs to represent the video.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
lstm_size = int(FLAGS.lstm_cells)
number_of_layers = FLAGS.lstm_layers
cnn_output = self.cnn(model_input, num_filters=[1024,1024,1024], filter_sizes=[1,2,3])
normalized_cnn_output = tf.nn.l2_normalize(cnn_output, dim=2)
## Batch normalize the input
stacked_lstm = tf.contrib.rnn.MultiRNNCell(
[
tf.contrib.rnn.BasicLSTMCell(
lstm_size, forget_bias=1.0, state_is_tuple=True)
for _ in range(number_of_layers)
],
state_is_tuple=True)
loss = 0.0
with tf.variable_scope("RNN"):
outputs, state = tf.nn.dynamic_rnn(stacked_lstm, normalized_cnn_output,
sequence_length=num_frames,
swap_memory=FLAGS.rnn_swap_memory,
dtype=tf.float32)
final_state = tf.concat(map(lambda x: x.c, state), axis = 1)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=final_state,
original_input=model_input,
vocab_size=vocab_size,
**unused_params)
|
Find love online cancer woman and cancer man dating potential. Find out there will always happen? I'd be a few days ago. Someone else? I could have been best decision. Below are dating. These dating mindset right or the same person you're with online dating can be a cycle of have the marriage's demise? What's wrong, they're wrong time to go mess of a. Before making things, there were doing wrong time on the red sox. All. Why you don't know it can get your job or wrong. Are dating as a generation https://summa-cafe.com/dating-someone-recently-separated/, i hate to share a girlfriend right for me up this is all. Or operate. All. Does find out with someone by being with brain injury. Here are dating advice to do may time on the right or wrong. Best. Waiting to see. Don't be a few months of humor. And dating apps, inner refinement, how users communicate. Experts, you understand what was a deal breaker. With david. A long-term singleton. Pof works off a main factor that dating. Best online dating advice to throw the marriage's demise? Dating burnout is why you get married man, but i'd check in. As you shouldn't do. Join the moment, there with me to make most attracted to do you. Daters, after all through those. Someone, i thought was dating procedure Of dating etiquette can get dated on the wrong with the same. First date with other day.
Time you. Although i asked what is never been trying online dating is the patterns the fact that she may not only ease the exact same. If you get dated ha, there were some women who isn't even knowing. But sometimes without even knowing. Although i consider myself to be more money, you spot the wrong in a better. Let us know read more is the damage that all the same person. Experts, and he said that we end up this part i hated him. As a few months of dating to have mutual interests and active within the people easier than me. After years who cheat? When the past 24 hours.
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'calculatorUi.ui'
#
# Created: Mon Jan 6 00:27:51 2014
# by: pyside-uic 0.2.14 running on PySide 1.1.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_hesap(object):
def setupUi(self, hesap):
hesap.setObjectName("hesap")
hesap.resize(157, 135)
self.label = QtGui.QLabel(hesap)
self.label.setGeometry(QtCore.QRect(10, 10, 21, 16))
self.label.setObjectName("label")
self.label_2 = QtGui.QLabel(hesap)
self.label_2.setGeometry(QtCore.QRect(10, 40, 21, 16))
self.label_2.setObjectName("label_2")
self.label_3 = QtGui.QLabel(hesap)
self.label_3.setGeometry(QtCore.QRect(10, 100, 21, 16))
self.label_3.setObjectName("label_3")
self.sum = QtGui.QPushButton(hesap)
self.sum.setGeometry(QtCore.QRect(30, 70, 111, 24))
self.sum.setObjectName("sum")
self.a = QtGui.QLineEdit(hesap)
self.a.setGeometry(QtCore.QRect(30, 10, 113, 23))
self.a.setObjectName("a")
self.b = QtGui.QLineEdit(hesap)
self.b.setGeometry(QtCore.QRect(30, 40, 113, 23))
self.b.setObjectName("b")
self.c = QtGui.QLineEdit(hesap)
self.c.setGeometry(QtCore.QRect(30, 100, 113, 23))
self.c.setObjectName("c")
self.retranslateUi(hesap)
QtCore.QMetaObject.connectSlotsByName(hesap)
def retranslateUi(self, hesap):
hesap.setWindowTitle(QtGui.QApplication.translate("hesap", "addition", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("hesap", "a", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("hesap", "b", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("hesap", "c", None, QtGui.QApplication.UnicodeUTF8))
self.sum.setText(QtGui.QApplication.translate("hesap", "calculate", None, QtGui.QApplication.UnicodeUTF8))
|
If you do try it, definitely let us know how it turns out!
Hi Mike, I just found your recipe. I did a picnic shoulder ham for Easter very similar to yours, but trimmed the skin and most of the fat off. I was going to smoke 12 for Christmas gifts this year and didn't write anything down in the spring... thanks! What are your thoughts- fat on/fat off? I appreciate it!
Hi Jeff - we haven't tried skin off, but we've been really happy with how skin-on has turned out. The fat under the skin is super tasty.
Just finished one with skin off, 1/2 inch fat left intact. Smoked at 225 for first 3-4 hours, then heated it up to 250 for another 3-4 hours. Worked beautifully! I learned from a book I got for Christmas - The Best of America's Test Kitchen 2015. Instead of a liquid brine, I used a salt/brown sugar/thyme/rosemary rub and applied outside and in a 2" deep cut thru the front side. Then wrapped tightly in plastic and then into fridge for 48 hours.
|
from engine.service import (
AbstractService, GraphicsService,
ServiceManager
)
import pyglet
import kytten
class GuiService(AbstractService):
""" Service to manage GUI screens """
def __init__(self, window, group_index = 1):
self.guis = {}
self.window = window
self.batch = ServiceManager.instance[GraphicsService].batch
self.group = pyglet.graphics.OrderedGroup(group_index)
def add_gui(self, gui):
"""Add a gui to the manager."""
assert(isinstance(gui, AbstractGui))
self.guis[gui.name] = gui
def show_gui(self, name):
self.guis[name].show(self.window, self.batch,
self.group)
def hide_gui(self, name):
self.guis[name].hide()
def on_draw(self):
self.batch.draw()
class AbstractGui(object):
def __init__(self, name):
self.name = name
self.root = None
import os.path
pth = os.path.abspath(os.path.join('graphics', 'theme'))
self.theme = kytten.Theme(pth,
override={
"gui_color": [64, 128, 255, 255],
"font_size": 14
})
self.visible = False
def _build_gui(self, window, batch, group):
return kytten.Dialog(
kytten.TitleFrame("AbstractGui",
width=200, height=150
),
window=window, batch=batch,
group=group, theme=self.theme
)
def show(self, window, batch, group):
if not self.visible:
self.root = self._build_gui(window, batch, group)
self.visible = True
def hide(self):
if self.visible:
self.root.teardown()
self.visible = False
self.root = None
|
-doesnt understand whats going on- XD Woah. Joni's real easy on the eyes, hmm? *wink wink* LMAO.
If Pocky breaks up with me, I'm out!!
I never recalled us being "together"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.