text
stringlengths 29
850k
|
---|
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Okinawa Institute of Science and Technology, Japan.
#
# This script runs on STEPS 2.x http://steps.sourceforge.net
#
# H Anwar, I Hepburn, H Nedelescu, W Chen and E De Schutter
# Stochastic calcium mechanisms cause dendritic calcium spike variability
# J Neuroscience 2013
#
# *StochasticHH.py : The stochastic Hodgkin-Huxley model, used in the
# above study.
#
# Script authors: Haroon Anwar and Iain Hepburn
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# USAGE
#
# $ python StochasticHH.py *mesh* *root* *iter_n*
#
# *mesh* is the tetrahedral mesh (10um to 160um cylinder)
# *root* is the path to the location for data storage
# *iter_n* (is intened to be an integer) is an identifier number for each
# simulation iteration.
#
# E.g:
# $ python StochasticHH.py Cylinder2_dia2um_L10um_outer0_3um_0.3shell_0.3size_19156tets_adaptive.inp ~/stochHHsims/ 1
#
#
# OUTPUT
#
# In (root)/data/StochasticHH/(mesh)/(iter_n+time) directory
# 2 data files will be recorded. Each file contains one row for every
# time-point at which data is recorded, organised into the following columns:
#
# currents.dat
# Time (ms), Na current, K current, leak current
# (current units are Amps/m^2)
#
# voltage.dat
# Time (ms), voltage at mesh centre (mV)
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import steps.interface
import math
import time
from random import *
from steps.model import *
from steps.geom import *
from steps.rng import *
from steps.sim import *
from steps.saving import *
from extra.constants_hh import *
import sys
import os
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
_, meshfile_ab, root, iter_n = sys.argv
if meshfile_ab == 'Cylinder2_dia2um_L160um_outer0_0.3shell_0.3size_279152tets_adaptive.inp':
cyl160=True
else:
cyl160=False
########################### BIOCHEMICAL MODEL ###############################
mdl = Model()
r = ReactionManager()
with mdl:
ssys = SurfaceSystem.Create()
# Potassium channel
n0, n1, n2, n3, n4 = SubUnitState.Create()
Kn = SubUnit.Create([n0, n1, n2, n3, n4])
Kchan = Channel.Create([Kn])
_a_n = VDepRate(lambda V: 1.0e3 * a_n(V*1.0e3)* Qt, vrange=Vrange)
_b_n = VDepRate(lambda V: 1.0e3 * b_n(V*1.0e3)* Qt, vrange=Vrange)
# Sodium channel
m0, m1, m2, m3, h0, h1 = SubUnitState.Create()
Nam, Nah = SubUnit.Create([m0, m1, m2, m3], [h0, h1])
Nachan = Channel.Create([Nam, Nah])
_a_m = VDepRate(lambda V:1.0e3*a_m(V*1.0e3)* Qt, vrange=Vrange)
_b_m = VDepRate(lambda V:1.0e3*b_m(V*1.0e3)* Qt, vrange=Vrange)
_a_h = VDepRate(lambda V:1.0e3*a_h(V*1.0e3)* Qt, vrange=Vrange)
_b_h = VDepRate(lambda V:1.0e3*b_h(V*1.0e3)* Qt, vrange=Vrange)
# Leak channel
Leak = SubUnitState.Create()
L = Channel.Create([Leak])
with ssys:
with Kchan[...]:
n0.s <r[1]> n1.s <r[2]> n2.s <r[3]> n3.s <r[4]> n4.s
r[1].K = 4 * _a_n, 1 * _b_n
r[2].K = 3 * _a_n, 2 * _b_n
r[3].K = 2 * _a_n, 3 * _b_n
r[4].K = 1 * _a_n, 4 * _b_n
with Nachan[...]:
h0.s <r[1]> h1.s
r[1].K = _a_h, _b_h
m0.s <r[1]> m1.s <r[2]> m2.s <r[3]> m3.s
r[1].K = 3*_a_m, _b_m
r[2].K = 2*_a_m, 2*_b_m
r[3].K = _a_m, 3*_b_m
OC_K = OhmicCurr.Create(Kchan[n4], K_G, K_rev)
OC_Na = OhmicCurr.Create(Nachan[m3, h1], Na_G, Na_rev)
OC_L = OhmicCurr.Create(L[Leak], L_G, leak_rev)
##################################
########### MESH & COMPARTMENTALIZATION #################
##########Import Mesh
mesh = TetMesh.Load('./meshes/'+meshfile_ab)
with mesh:
rad, zmin, zmax = 1e-6, -200e-6, 200e-6
inner_tets, outer_tets = TetList(), TetList()
for t in mesh.tets:
c = t.center
if zmin <= c.z <= zmax and c.x**2 + c.y**2 <= rad**2:
inner_tets.append(t)
else:
outer_tets.append(t)
print(len(outer_tets), " tets in outer compartment")
print(len(inner_tets), " tets in inner compartment")
# Record voltage from the central tetrahedron
cent_tet = mesh.tets[0.0, 0.0, 0.0]
########## Create an intracellular compartment i.e. cytosolic compartment
cyto = Compartment.Create(inner_tets)
if cyl160:
# Ensure that we use points a small distance inside the boundary:
minz, maxz = mesh.bbox.min.z, mesh.bbox.max.z
memb_tris = TriList(tri for tri in mesh_stock.surface if minz < tri.center.z < maxz)
else:
print('Finding connecting triangles...')
memb_tris = inner_tets.surface & outer_tets.surface
########## Create a membrane as a surface mesh
memb = Patch.Create(memb_tris, cyto, None, ssys)
# For EField calculation
print("Creating membrane..")
membrane = Membrane.Create([memb])
print("Membrane created.")
###### TRANSLATION TOKEN
# # # # # # # # # # # # # # # # # # # # # # # # SIMULATION # # # # # # # # # # # # # # # # # # # # # #
rng = RNG('mt19937', 512, 7)
sim = Simulation('Tetexact', mdl, mesh, rng, calcMembPot=True)
#### Recording #####
dc = time.strftime('%b%d_%H_%M_%S_%Y')
runPath = os.path.join(root, 'data/StochasticHH/', meshfile_ab, f'{iter_n}__{dc}')
os.makedirs(runPath, exist_ok=True)
rs = ResultSelector(sim)
rs1 = rs.SUM(rs.TRIS(memb_tris).OC_Na.I) <<\
rs.SUM(rs.TRIS(memb_tris).OC_K.I) <<\
rs.SUM(rs.TRIS(memb_tris).OC_L.I)
rs2 = rs.TET(cent_tet).V
rs1.toFile(os.path.join(runPath, 'currents.dat.bin'))
rs2.toFile(os.path.join(runPath, 'voltage.dat.bin'))
sim.toSave(rs1, rs2, dt=TIMECONVERTER)
print("Resetting simulation object..")
sim.newRun()
print("Injecting molecules..")
sim.Temp = TEMPERATURE+273.15
surfarea = sim.memb.Area
sim.memb.L[Leak].Count = round(L_ro * surfarea)
for h, hsu in enumerate(Nah):
for m, msu in enumerate(Nam):
sim.memb.Nachan[msu, hsu].Count = round(Na_ro*surfarea*Na_facs[h*4 + m])
for n, ksu in enumerate(Kn):
sim.memb.Kchan[ksu].Count = round(K_ro*surfarea*K_facs[n])
print('Leak', round(L_ro * surfarea))
print('Na_m0h0', round(Na_ro*surfarea*Na_facs[0]))
print('Na_m1h0', round(Na_ro*surfarea*Na_facs[1]))
print('Na_m2h0', round(Na_ro*surfarea*Na_facs[2]))
print('Na_m3h0', round(Na_ro*surfarea*Na_facs[3]))
print('Na_m0h1', round(Na_ro*surfarea*Na_facs[4]))
print('Na_m1h1', round(Na_ro*surfarea*Na_facs[5]))
print('Na_m2h1', round(Na_ro*surfarea*Na_facs[6]))
print('Na_m3h1', round(Na_ro*surfarea*Na_facs[7]))
print('K_n0', round(K_ro*surfarea*K_facs[0]))
print('K_n1', round(K_ro*surfarea*K_facs[1]))
print('K_n2', round(K_ro*surfarea*K_facs[2]))
print('K_n3', round(K_ro*surfarea*K_facs[3]))
print('K_n4', round(K_ro*surfarea*K_facs[4]))
print("Targeted Injection: ", round(Na_ro*surfarea), "Na channels")
print("Targeted Injection: ", round(K_ro*surfarea), "K channels")
print("Targeted Injection: ", round(L_ro*surfarea), "Leak channels")
sim.EfieldDT = EF_DT
sim.membrane.Potential = init_pot
sim.membrane.VolRes = Ra
sim.membrane.Capac = memb_capac
rng.initialize(100*int(iter_n))
for l in range(NTIMEPOINTS):
print("Tpnt: ", l)
sim.run(TIMECONVERTER*l)
# This last part is only present for backwards compatibility with the scripts created with API_1.
# We need to save to text files, like in the original script.
with open(os.path.join(runPath, 'currents.dat'), 'w') as f:
for t, row in zip(rs1.time[0], rs1.data[0]):
f.write('%.6g' % (t * 1e3) + ' ')
for val in row:
f.write('%.6g' % (val * 0.1 / surfarea) + ' ')
f.write('\n')
with open(os.path.join(runPath, 'voltage.dat'), 'w') as f:
for t, row in zip(rs2.time[0], rs2.data[0]):
f.write('%.6g' % (t * 1e3) + ' ')
for val in row:
f.write('%.6g' % (val * 1e3) + ' ')
f.write('\n')
|
On 4/4/2016, Lilia Shevtsova wrote in The American Interest about the powerful effect of US-European sanctions against Russia and Syria. Frozen assets in the U.S. of the oligarch Yuri Kovalchuk, a close ally of Putin, have totaled $572 million. In Italy, the Rottenberg brothers have also sustained losses worth nearly $40 million.
Ms. Shevtsova emphasizes that these “sanctions have exacerbated the economic recession in Russia: by accelerating capital flight and shrinking internal financial resources; by restricting Russia’s access to international financial markets and triggering a financial crunch; and by creating crisis of confidence in international business circles regarding Russia.” As a result, Russia could find no buyers or investors for its government-owned oil companies.
#FollowTheMoney- It’s all about the Easing and Lifting Sanctions.
Vladimir Putin has proven to be nothing more than another brutal dictator. He will do anything—including murder—to maintain enormous power, thwart any and all perceived threats to him and Russia, while personally enriching himself with billions of rubles.
Just a few weeks before the Election, Franklin Foer broke this incredible story on Russia’s Alpha Bank server, which is installed in—yes, it’s true—Trump Tower!
Funny how Corporate Media buried that story tout de suite. To this day, Trump, every member of his campaign, plus way too many Republicans have challenged the now insurmountable evidence that Russia and Putin himself directed a multiple and major hacking of US agencies. Why do these so-called Americans now in power trust Russia’s Intel more than our own?
5. The US Election Assistance Commission, whose sole purpose is to certify the security of American voting machines.
Why did Russia hack American voting and email systems to benefit Trump?
Hillary Clinton not only supported the sanctions, but made it crystal clear that as President she would continue a hard line against Syria and Russia’s brutality. Not only would the current economic sanctions remain in place, but she promised more actions. Hillary was Putin’s # 1 threat to his Power and to his country’s economic stability.
We know that Donald Trump was an obvious and very easy choice, because he owes enormous financial debt to Russian Banks. How much? We would know the exact amounts if mainstream media had done their job instead of focusing most of their time on Trump’s sophomoric antics.
What if Corporate Media (MSM) focused as much attention on Trump’s utter defiant refusal to release his tax returns as they did on Clinton’s e-mails?
The abject failure of MSM continues to give Putin a free hand to put the very core of US democracy and put “free & fair elections” at great peril!
Hillary Clinton has almost 3 Million more votes. She is the clear winner of the popular vote. Will the obsolete Electoral College do the moral and right thing to protect our democratic republic and vote for her? Extremely doubtful.
Make no mistake about it, Russia has committed an act of war. CyberWar is no less a direct attack on the safety and sovereignty of “We The People” than Pearl Harbor!
Therefore it is essential for President Obama & all Democrats to invalidate, to demand a full investigation this election, and to call for a full #Revote. Today.
|
#from whoosh.reading import iter_docs
import threading, sys, time, os, csv, re, codecs, shutil
from collections import defaultdict
def reindex_all(reader, writer, analyzer):
for i in xrange(reader.maxDoc()):
if reader.isDeleted(i): continue
doc = reader.document(i)
p = doc.get("path")
pkid = doc.get('txtorg_id')
if p is None:
# No filepath specified, just use original document
writer.updateDocument(Term("txtorg_id",pkid),doc,analyzer)
else:
# if a path field is found, try to read the file it points to and add a contents field
edited_doc = Document()
for f in doc.getFields():
edited_doc.add(Field.cast_(f))
try:
inf = open(p)
contents = unicode(inf.read(), 'UTF-8')
inf.close()
if len(contents) > 0:
edited_doc.add(Field("contents", contents,
Field.Store.NO,
Field.Index.ANALYZED,
Field.TermVector.YES))
else:
print "warning: no content in %s" % filename
except:
print "Could not read file; skipping"
writer.updateDocument(Term("txtorg_id",pkid),edited_doc,analyzer)
def delete_index(index_path):
shutil.rmtree(index_path)
def get_fields_and_values(reader, max_vals = 30):
all_fields = defaultdict(set)
for doc in reader.iter_docs():
print "get fields?"
for field_name in reader.indexed_field_names():
all_fields[field_name] = reader.field_terms(field_name)
return(dict(all_fields))
# for i in xrange(reader.maxDoc()):
# if reader.isDeleted(i): continue
# doc = reader.document(i)
# for f in doc.getFields():
# field = Field.cast_(f)
# if len(all_fields[field.name()]) < max_vals: all_fields[field.name()].add(field.stringValue())
# return dict(all_fields)
|
Know Olive Branch High Class of 1961 graduates that are NOT on this List? Help us Update the 1961 Class List by adding missing names.
More 1961 alumni from Olive Branch HS have posted profiles on Classmates.com®. Click here to register for free at Classmates.com® and view other 1961 alumni.
The Olive Branch High class of '61 alumni are listed below. These are former students from Olive Branch High in Olive Branch, MS who graduated in 1961. Alumni listings below have either been searched for or they registered as members of this directory.
|
"""
Django settings for test_django project.
Generated by 'django-admin startproject' using Django 1.8.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '**6vq2nfe-!0r+wy$wgw9woam3#3$c!23ol-2+ax(l-=oeluhd'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'music',
'login',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'test_django.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test_django.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
# connect to sqlite3 database
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
### connect to postgres database
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'music',
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': 'localhost',
'PORT': '5432',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
MEDIA_URL = '/media/'
EMAIL_HOST = 'localhost'
EMAIL_PORT = 8000
|
Our amazement still plays like a favorite song on the radio.
beeps, alarms, no more rotary dial sounds for us.
blue dress highlighting your beauty.
& a river we ride.
listening for the door to open & slam shut.
with us & without us.
in our love: loving love.
|
from upseto import gitwrapper
from upseto import run
import subprocess
class RootfsLabel:
def __init__(self, rootfs, product="rootfs"):
self._rootfs = rootfs
self._product = product
if rootfs == "THIS":
self._label = run.run([
"solvent", "printlabel", "--thisProject", "--product=%s" % (self._product,)]).strip()
wrapper = gitwrapper.GitWrapper(".")
self._hint = wrapper.originURLBasename()
elif self._labelExists(self._rootfs):
self._label = self._rootfs
self._hint = self._rootfs
elif "__" in self._rootfs:
repository, product = self._rootfs.split("__")
self._label = run.run([
"solvent", "printlabel", "--repositoryBasename", repository, "--product", product]).strip()
self._hint = repository
else:
self._label = run.run([
"solvent", "printlabel", "--repositoryBasename", rootfs,
"--product=%s" % (self._product,)]).strip()
self._hint = rootfs
def label(self):
return self._label
def imageHint(self):
return self._hint
def _labelExists(self, label):
with open("/dev/null", "w") as out:
return subprocess.call(["solvent", "labelexists", "--label", label], stdout=out, stderr=out) == 0
|
Get an overview of the entrepreneurship methods that will enable you to create, identify, shape, and act on opportunities in a variety of contexts and organizations.
Come learn how to properly protect your business from intellectual theft and secure your ideas for long-term success. Our licensed attorney will guide your business to the finish line.
Come learn more about the new trends in business technology to better assist in managing your business, website, apps, software and reaching your audience.
Come learn new was to focus on raising seed and growth capital from a venture capitalist, grants, business angels, investment banking, commercial banking sources, and gain resources to capital.
APRIL 29 – Marketing for Entrepreneurs Come Learn how to use social media and current marketing trends to help in your business.
All classes are on Mondays at 6:30 pm in the ASU Center for Entrepreneurship and Innovation at 1951 SkySong. Ready, Set, Go Foundation is led by the dynamic, award-winning, entrepreneur, OD Harris. For close to 20 years, Mr. Harris has assisted and guided entrepreneurs to success with strategic methods, and collaborative, interactive courses. Ready, Set, Go Foundation members learn best practices, and ethical policies to participate with an ever-expanding client base while becoming engaged within their local communities. Speakers are held to the highest standards in their industry, and professional practices.
|
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard.dashboards.newtouch.models import Server,Service
def get_available_services():
services = Service.objects.all()
return ((service.name, service.name) for service in services)
class ServerEditServicesForm(forms.SelfHandlingForm):
services_available = forms.MultipleChoiceField(label=_('services_available'),
widget=forms.CheckboxSelectMultiple,
choices=get_available_services())
def __init__(self, request, *args, **kwargs):
super(ServerEditServicesForm, self).__init__(request, *args, **kwargs)
def handle(self, request, data):
try:
server = Server.objects.get(pk=self.initial['pk'])
server.services.clear()
print server.services.all()
for service in data['services_available']:
server.services.add(Service.objects.get(name=service).id)
server.save()
message = _('Successfully Add Services %s') % (self.initial['pk'])
messages.success(request, message)
except Exception:
exceptions.handle(request, _('Unable to Add Services.'))
return True
class EditServerForm(forms.SelfHandlingForm):
snmp_version = forms.CharField(label=_("SNMP Version"),
max_length=255)
snmp_commit = forms.CharField(label=_("SNMP Commit"),
max_length=255)
ssh_name = forms.CharField(label=_("SSH Name"),
max_length=255,
required=False)
ssh_key = forms.CharField(label=_("SSH Key"),
max_length=255,
required=False)
def __init__(self, request, *args, **kwargs):
super(EditServerForm, self).__init__(request, *args, **kwargs)
def handle(self, request, data):
pk = self.initial['pk']
snmp_version = data['snmp_version']
snmp_commit = data['snmp_commit']
ssh_name = data['ssh_name']
ssh_key = data['ssh_key']
try:
Server.objects.filter(pk=pk).update(snmp_version=snmp_version,
snmp_commit=snmp_commit,
ssh_name=ssh_name,
ssh_key=ssh_key)
server_name = Server.objects.get(pk = pk).name
message = _('Successfully update Server %s') % (server_name)
messages.success(request, message)
except Exception:
exceptions.handle(request, _('Unable to update the Server.'))
return True
|
J & K Home Furnishings in Myrtle Beach, SC is an authorized dealer of Sealy Products. Sealy continues their quest for the world to sleep better for decades to come. Milestones include the introduction of the Sealy Posturepedic mattress. Being the first mattress company to display and advertise a king-sized bed. Creating an Orthopedic Advisory Board. Reinventing the innerspring coil and the box spring. Patenting a variety of groundbreaking technologies, including Posture Channels and Pressure Relief Inlays designed to relieve pressure points that cause tossing and turning. And the list continues to grow to this day. In late 1983, Sealy purchased the Stearns & Foster Company. True to the Sealy heritage, these mattresses redefined luxury and expressed a dedication to craftsmanship and intricate detail. Making it a perfect and sound addition to the Sealy family of beds.
So if you are looking for Sealy products in Myrtle Beach, North Myrtle Beach, Murrells Inlet, Litte River, Shallotte, Garden City, Sunset Beach, Holden Beach, Oak Island, Florence and Wilmington, or if you have any questions about Sealy products, please feel free to call us at 843-361-1616 (N. Myrtle Beach) or 843-249-1882 (Little River) or simply stop by J & K Home Furnishings at any time and we would be glad to help you.
|
"""
Tests for branding page
"""
import datetime
from pytz import UTC
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.test.utils import override_settings
from django.test.client import RequestFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.django import editable_modulestore
from xmodule.modulestore.tests.factories import CourseFactory
from courseware.tests.tests import TEST_DATA_MONGO_MODULESTORE
import student.views
FEATURES_WITH_STARTDATE = settings.FEATURES.copy()
FEATURES_WITH_STARTDATE['DISABLE_START_DATES'] = False
FEATURES_WO_STARTDATE = settings.FEATURES.copy()
FEATURES_WO_STARTDATE['DISABLE_START_DATES'] = True
@override_settings(MODULESTORE=TEST_DATA_MONGO_MODULESTORE)
class AnonymousIndexPageTest(ModuleStoreTestCase):
"""
Tests that anonymous users can access the '/' page, Need courses with start date
"""
def setUp(self):
self.store = editable_modulestore()
self.factory = RequestFactory()
self.course = CourseFactory.create()
self.course.days_early_for_beta = 5
self.course.enrollment_start = datetime.datetime.now(UTC) + datetime.timedelta(days=3)
self.store.update_item(self.course)
@override_settings(FEATURES=FEATURES_WITH_STARTDATE)
def test_none_user_index_access_with_startdate_fails(self):
"""
This is a regression test for a bug where the incoming user is
anonymous and start dates are being checked. It replaces a previous
test as it solves the issue in a different way
"""
request = self.factory.get('/')
request.user = AnonymousUser()
student.views.index(request)
@override_settings(FEATURES=FEATURES_WITH_STARTDATE)
def test_anon_user_with_startdate_index(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
@override_settings(FEATURES=FEATURES_WO_STARTDATE)
def test_anon_user_no_startdate_index(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
def test_allow_x_frame_options(self):
"""
Check the x-frame-option response header
"""
# check to see that the default setting is to ALLOW iframing
resp = self.client.get('/')
self.assertEquals(resp['X-Frame-Options'], 'ALLOW')
@override_settings(X_FRAME_OPTIONS='DENY')
def test_deny_x_frame_options(self):
"""
Check the x-frame-option response header
"""
# check to see that the override value is honored
resp = self.client.get('/')
self.assertEquals(resp['X-Frame-Options'], 'DENY')
|
April 15, 2019 - Fucked Asians While Hogtied!
April 15, 2019 - Hogtied Asians In Uniforms.
April 15, 2019 - Hogtied And Tortured Asians!
April 15, 2019 - Fucked While Hogtied!
April 15, 2019 - Asian Slaves Hogtied!
April 15, 2019 - Hogtied Asians With Ball GAgs!
|
#!/usr/bin/env python3
# this should be run at NIF-Ontology 9ef5b9e63d60f92cd01733be9d480ac3e5aee31c
# TODO need to retrieve the FMA hierarchy...
import os
from collections import defaultdict, namedtuple
import rdflib
from rdflib import URIRef, RDFS, RDF, OWL
from rdflib.namespace import SKOS
import requests
from pyontutils.scigraph import Vocabulary, Graph
from pyontutils.utils import TODAY, async_getter, TermColors as tc
from pyontutils.scig import scigPrint
from pyontutils.hierarchies import creatTree, flatten
from pyontutils.core import devconfig, OntMeta, makePrefixes, makeGraph
from pyontutils.core import NIFRID, oboInOwl
from IPython import embed
sgg = Graph(cache=True)
sgv = Vocabulary(cache=True)
Query = namedtuple('Query', ['root','relationshipType','direction','depth'])
CON = oboInOwl.consider
DBX = oboInOwl.hasDbXref # FIXME also behaves as objectProperty :/
AID = oboInOwl.hasAlternativeId
IRBC = NIFRID.isReplacedByClass
PREFIXES = makePrefixes('UBERON',
'ro',
'owl',
'skos',
)
NIFPREFIXES = makePrefixes('NIFGA',
'oboInOwl',
'replacedBy',
)
NIFPREFIXES.update(PREFIXES)
nifga_path = devconfig.ontology_local_repo + '/ttl/NIF-GrossAnatomy.ttl'
uberon_path = devconfig.ontology_local_repo + '/ttl/external/uberon.owl'
uberon_bridge_path = 'http://purl.obolibrary.org/obo/uberon/bridge/uberon-bridge-to-nifstd.owl'
#bridge_path = os.path.expanduser('~/git/NIF-Ontology/ttl/uberon-bridge-to-nifstd.ttl') # scigraph's got us
#uberon_obsolete = {'UBERON:0022988', # obsolete regional part of thalamaus
#'UBERON:0014606', # replaced by UBERON:0002434
#}
# TODO need to unpapck all the oboInOwl:hasAlternativeId entries for the purposes of resolution... (madness)
manual = {'NIFGA:nlx_144456':'UBERON:0034918', # prefer over UBERON:0002565, see note on UBERON:0034918
'NIFGA:birnlex_1248':'UBERON:0002434', # fix for what is surely and outdated bridge
'NIFGA:nlx_anat_20081242':'UBERON:0004073', # as of late latest version of uberon 'UBERON:0004073' replaces 'UBERON:0019281'
'NIFGA:nlx_59721':'UBERON:0001944', # (equivalentClass NIFGA:nlx_59721 NIFGA:birnlex_703) polutes
'NIFGA:birnlex_703':'UBERON:0001944', # insurance
#'NIFGA:birnlex_1663':'UBERON:0002265', # FIXME this is in hasDbXref ... AND equivalentClass... wat
'NIFGA:birnlex_1191':'UBERON:0001885', # this was already replaced by NIFGA:birnlex_1178, the existing equiv assertion to UBERON:0035560 is also obsolete, so we are overriding so we don't have to chase it all down again
'NIFGA:birnlex_2598':'UBERON:0000044', # UBERON:0026602 is the alternative and is a bug from the old version of the uberon to nif bridge :/ this has been fixed in the nifgad branch of the ontology but has not been propagated to scigraph
'NIFGA:nlx_anat_20090702':'UBERON:0022327', # UBERON:0032288 is an alternate id for UBERON:0022327
'NIFGA:birnlex_864':'UBERON:0014450', # UBERON:0002994 is an alternate id for UBERON:0014450
'NIFGA:birnlex_2524':'UBERON:0006725', # UBERON:0028186 is an alternate id for UBERON:0006725
'NIFGA:nlx_anat_20081245':'UBERON:0002613', # was previously deprecated without a replaced by
'NIFGA:birnlex_726':'UBERON:0001954', # 726 was already deped, this is a good match detect by moar
'NIFGA:nlx_anat_20081252':'UBERON:0014473', # already deprecated, found via moar
'NIFGA:nifext_15':'NOREP', # does not exist
'NIFGA:birnlex_9':'NOREP', # unused biopysical imateral entitiy
# affernt roles that were never well developed
'NIFGA:nlx_anat_1010':'NOREP',
'NIFGA:nlx_anat_1011003':'NOREP',
'NIFGA:nlx_anat_1011004':'NOREP',
'NIFGA:nlx_anat_1011005':'NOREP',
'NIFGA:nlx_anat_1011006':'NOREP',
'NIFGA:nlx_anat_1011007':'NOREP',
'NIFGA:nlx_anat_1011008':'NOREP',
'NIFGA:nlx_anat_1011009':'NOREP',
'NIFGA:nlx_anat_1011010':'NOREP',
'NIFGA:nlx_anat_1011011':'NOREP',
}
preflabs = ( # pulled from conflated
'NIFGA:birnlex_2596',
'NIFGA:birnlex_4101',
'NIFGA:birnlex_1184',
'NIFGA:birnlex_703',
'NIFGA:birnlex_1117',
'NIFGA:nlx_143552',
'NIFGA:birnlex_1341',
'NIFGA:birnlex_1335',
'NIFGA:birnlex_1400',
'NIFGA:birnlex_1519', # NOTE: nerve root and nerve fiber bundle are being conflated...
'NIFGA:birnlex_1277',
'NIFGA:birnlex_2523',
'NIFGA:birnlex_2528', # a real exact duple with 2529 apparently
'NIFGA:birnlex_2651', # a real exact duple with 2654 apparently
'NIFGA:nlx_anat_20081224', # other option is 'NIFGA:birnlex_932' -> Lingula
'NIFGA:nlx_anat_20081235', # other option is 'NIFGA:birnlex_1165' -> Nodulus
'NIFGA:birnlex_1588',
'NIFGA:birnlex_1106',
'NIFGA:birnlex_1582',
'NIFGA:birnlex_1589',
'NIFGA:birnlex_1414',
'NIFGA:birnlex_4081',
)
cross_over_issues = 'NIFSUB:nlx_subcell_100205'
wat = 'NIFGA:nlx_144456'
anns_to_port = [] # (SKOS.prefLabel, ) # skipping this for now :/
def invert(dict_):
output = defaultdict(list)
for k,v in dict_.items():
output[v].append(k)
return dict(output)
def review_reps(dict_):
for k,v in invert(dict_).items():
if k is None:
continue
if len(v) > 1:
kn = sgv.findById(k)
print(k, kn['labels'][0])
for s in kn['synonyms']:
print(' ' * 4, s)
for v_ in v:
n = sgv.findById(v_)
print(' ' * 8, v_, n['labels'][0])
for s in n['synonyms']:
print(' ' * 12, s)
def review_norep(list_):
print('List of norep (aka already deprecated) to review')
for curie in list_:
n = sgg.getNode(curie)
scigPrint.pprint_node(n)
def do_deprecation(replaced_by, g, additional_edges, conflated):
bmeta = OntMeta('http://ontology.neuinfo.org/NIF/ttl/bridge/',
'uberon-bridge',
'NIFSTD Uberon Bridge',
'UBERON Bridge',
('This is the bridge file that holds local NIFSTD additions to uberon. '
'This is also staging for any changes that we want to push upstream.'),
TODAY())
ontid = bmeta.path + bmeta.filename + '.ttl'
bridge = makeGraph('uberon-bridge', PREFIXES)
bridge.add_ont(ontid, *bmeta[2:])
graph = makeGraph('NIF-GrossAnatomy', NIFPREFIXES, graph=g)
#graph.g.namespace_manager._NamespaceManager__cache = {}
#g.namespace_manager.bind('UBERON','http://purl.obolibrary.org/obo/UBERON_') # this has to go in again because we reset g FIXME
udone = set('NOREP')
uedges = defaultdict(lambda:defaultdict(set))
def inner(nifga, uberon):
# check neuronames id TODO
udepr = sgv.findById(uberon)['deprecated'] if uberon != 'NOREP' else False
if udepr:
# add xref to the now deprecated uberon term
graph.add_trip(nifga, 'oboInOwl:hasDbXref', uberon)
#print('Replacement is deprecated, not replacing:', uberon)
graph.add_trip(nifga, RDFS.comment, 'xref %s is deprecated, so not using replacedBy:' % uberon)
else:
# add replaced by -> uberon
graph.add_trip(nifga, 'replacedBy:', uberon)
# add deprecated true (ok to do twice...)
graph.add_trip(nifga, OWL.deprecated, True)
# review nifga relations, specifically has_proper_part, proper_part_of
# put those relations on the uberon term in the
# if there is no uberon term raise an error so we can look into it
#if uberon not in uedges:
#uedges[uberon] = defaultdict(set)
resp = sgg.getNeighbors(nifga)
edges = resp['edges']
if nifga in additional_edges:
edges.append(additional_edges[nifga])
include = False # set this to True when running anns
for edge in edges: # FIXME TODO hierarchy extraction and porting
#print(edge)
if udepr: # skip everything if uberon is deprecated
include = False
hier = False
break
sub = edge['sub']
obj = edge['obj']
pred = edge['pred']
hier = False
if pred == 'subClassOf':
pred = RDFS.subClassOf
continue
elif pred == 'equivalentClass':
pred = OWL.equivalentClass
continue
elif pred == 'isDefinedBy':
pred = RDFS.isDefinedBy
continue
elif pred == 'http://www.obofoundry.org/ro/ro.owl#has_proper_part':
hier = True
include = True
elif pred == 'http://www.obofoundry.org/ro/ro.owl#proper_part_of':
hier = True
include = True
elif pred == 'ilx:partOf':
hier = True
include = True
if sub == nifga:
try:
obj = replaced_by[obj]
if obj == 'NOREP':
hier = False
except KeyError:
print('not in replaced_by', obj)
if type(obj) == tuple: continue # TODO
if hier:
if uberon not in uedges[obj][pred]:
uedges[obj][pred].add(uberon)
bridge.add_hierarchy(obj, pred, uberon)
else:
#bridge.add_trip(uberon, pred, obj)
pass
elif obj == nifga:
try:
sub = replaced_by[sub]
if sub == 'NOREP':
hier = False
except KeyError:
print('not in replaced_by', sub)
if type(sub) == tuple: continue # TODO
if hier:
if sub not in uedges[uberon][pred]:
uedges[uberon][pred].add(sub)
bridge.add_hierarchy(uberon, pred, sub)
else:
#bridge.add_trip(sub, pred, uberon)
pass
if False and uberon not in udone and include: # skip porting annotations and labels for now
#udone.add(uberon)
try:
label = sgv.findById(uberon)['labels'][0]
except IndexError:
WAT = sgv.findById(uberon)
embed()
bridge.add_class(uberon, label=label)
# annotations to port
for p in anns_to_port:
os_ = list(graph.g.objects(graph.expand(nifga), p))
for o in os_:
if label.lower() != o.lower(): # we can simply capitalize labels
print(label.lower())
print(o.lower())
print()
bridge.add_trip(uberon, p, o)
if p == SKOS.prefLabel and not os_:
if uberon not in conflated or (uberon in conflated and nifga in preflabs):
l = list(graph.g.objects(graph.expand(nifga), RDFS.label))[0]
bridge.add_trip(uberon, SKOS.prefLabel, l) # port label to prefLabel if no prefLabel
for nifga, uberon in replaced_by.items():
if type(uberon) == tuple:
print(uberon)
for ub in uberon:
print(ub)
inner(nifga, ub)
elif uberon == 'NOREP':
graph.add_trip(nifga, OWL.deprecated, True) # TODO check for missing edges?
elif uberon is None:
continue # BUT TODAY IS NOT THAT DAY!
else:
inner(nifga, uberon)
return graph, bridge, uedges
def print_report(report, fetch=False):
for eid, r in report.items():
out = ('**************** Report for {} ****************'
'\n\tNRID: {NRID}\n\tURID: {URID} {UDEP}\n\tMATCH: {MATCH}\n')
#if not r['MATCH']:
print(out.format(eid, **r))
if fetch:
scigPrint.pprint_node(sgg.getNode('NIFGA:' + eid))
if r['NRID']: scigPrint.pprint_node(sgg.getNode(r['NRID']))
if r['URID']: scigPrint.pprint_node(sgg.getNode(r['URID']))
def print_trees(graph, bridge):
PPO = 'ro:proper_part_of'
HPP = 'ro:has_proper_part'
hpp = HPP.replace('ro:', graph.namespaces['ro'])
ppo = PPO.replace('ro:', graph.namespaces['ro'])
a, b = creatTree(*Query(tc.red('birnlex_796'), HPP, 'OUTGOING', 10), # FIXME seems to be a last one wins bug here with birnlex_796 vs NIFGA:birnlex_796 depending on the has seed...
json=graph.make_scigraph_json(HPP))
c, d = creatTree(*Query('NIFGA:birnlex_796', hpp, 'OUTGOING', 10), graph=sgg)
j = bridge.make_scigraph_json(HPP) # issue https://github.com/RDFLib/rdflib/pull/661
e, f = creatTree(*Query('UBERON:0000955', HPP, 'OUTGOING', 10), json=j)
k_, l_ = creatTree(*Query('NIFGA:nlx_anat_101177', ppo, 'INCOMING', 10), graph=sgg)
merge = dict(d[-1]) # full tree with ppo converted to hpp
merge['nodes'].extend(l_[-1]['nodes'])
merge['edges'].extend([{'sub':e['obj'], 'pred':hpp, 'obj':e['sub']} for e in l_[-1]['edges']])
m_, n_ = creatTree(*Query('NIFGA:birnlex_796', hpp, 'OUTGOING', 10), json=merge)
print('nifga dep')
print(a)
print('nifga live')
print(c)
print('new bridge')
print(e)
print('nifga total (both directions)')
print(m_)
print('nifga white matter')
print(k_)
return a, b, c, d, e, f, k_, l_, m_, n_
def new_replaced_by(ids, existing):
out = {}
for k in ids:
if k in existing:
out[k] = existing[k]
else:
out[k] = None
return out
def make_uberon_graph():
#ub = rdflib.Graph()
#ub.parse(uberon_path) # LOL rdflib your parser is slow
SANITY = rdflib.Graph()
ont = requests.get(uberon_bridge_path).text
split_on = 263
prefs = ('xmlns:NIFSTD="http://uri.neuinfo.org/nif/nifstd/"\n'
'xmlns:UBERON="http://purl.obolibrary.org/obo/UBERON_"\n')
ont = ont[:split_on] + prefs + ont[split_on:]
SANITY.parse(data=ont)
u_replaced_by = {}
for s, o in SANITY.subject_objects(OWL.equivalentClass):
nif = SANITY.namespace_manager.qname(o)
uberon = SANITY.namespace_manager.qname(s)
if nif in u_replaced_by:
one = u_replaced_by[nif]
u_replaced_by[nif] = one, uberon
print('WE GOT DUPES', nif, one, uberon) # TODO
u_replaced_by[nif] = uberon
#print(s, o)
#print(nif, uberon)
return u_replaced_by
def make_neurolex_graph():
# neurolex test stuff
nlxpref = {'ilx':'http://uri.interlex.org/base/'}
nlxpref.update(NIFPREFIXES)
neurolex = makeGraph('neurolex-temp', nlxpref)
neurolex.g.parse('/tmp/neurolex_basic.ttl', format='turtle')
ILXPO = 'ilx:partOf'
nj = neurolex.make_scigraph_json(ILXPO)
g_, h = creatTree(*Query('NIFGA:birnlex_796', ILXPO, 'INCOMING', 10), json=nj)
i_, j_ = creatTree(*Query('NIFGA:nlx_412', ILXPO, 'INCOMING', 10), json=nj)
brht = sorted(set(flatten(h[0],[])))
wmht = sorted(set(flatten(j_[0],[])))
ufixedrb = {'NIFGA:' + k.split(':')[1]:v for k, v in u_replaced_by.items()}
b_nlx_replaced_by = new_replaced_by(brht, ufixedrb)
w_nlx_replaced_by = new_replaced_by(wmht, ufixedrb)
additional_edges = defaultdict(list) # TODO this could be fun for the future but is a nightmare atm
for edge in h[-1]['edges'] + j_[-1]['edges']:
additional_edges[edge['sub']] = edge
additional_edges[edge['obj']] = edge
#filter out bad edges becase we are lazy
additional_edges = {k:v for k, v in additional_edges.items()
if k in b_nlx_replaced_by or k in w_nlx_replaced_by}
print('neurolex tree') # computed above
print(g_)
print(i_)
return additional_edges
def do_report(nif_bridge, ub_bridge, irbcs):
report = {}
for existing_id, nif_uberon_id in nif_bridge.items():
cr = {}
cr['UDEP'] = ''
if nif_uberon_id == 'NOREP':
cr['NRID'] = ''
else:
cr['NRID'] = nif_uberon_id
if 'NIFGA:' + existing_id in manual:
cr['URID'] = ''
if nif_uberon_id == 'NOREP':
match = False
else:
match = 'MANUAL'
elif existing_id in ub_bridge:
ub_uberon_id = ub_bridge[existing_id]
cr['URID'] = ub_uberon_id
if type(nif_uberon_id) == tuple:
if ub_uberon_id in nif_uberon_id:
match = True
else:
match = False
elif ub_uberon_id != nif_uberon_id:
match = False
else:
match = True
elif 'NIFGA:' + existing_id in irbcs:
er, ub = irbcs['NIFGA:' + existing_id]
cr['NRID'] = er
cr['URID'] = ub
match = 'EXISTING REPLACED BY (%s -> %s -> %s)' % (existing_id, er, ub)
else:
match = False
cr['URID'] = ''
if cr['NRID']:
meta = sgg.getNode(nif_uberon_id)['nodes'][0]['meta']
if 'http://www.w3.org/2002/07/owl#deprecated' in meta and meta['http://www.w3.org/2002/07/owl#deprecated']:
cr['UDEP'] = 'Deprecated'
cr['MATCH'] = match
report[existing_id] = cr
return report
def make_nifga_graph(_doprint=False):
# use equivalent class mappings to build a replacement mapping
g = rdflib.Graph()
g.parse(nifga_path, format='turtle')
getQname = g.namespace_manager.qname
classes = sorted([getQname(_) for _ in g.subjects(RDF.type, OWL.Class) if type(_) is URIRef])
curies = ['NIFGA:' + n for n in classes if ':' not in n]
matches = async_getter(sgv.findById, [(c,) for c in curies])
replaced_by = {}
exact = {}
internal_equivs = {}
irbcs = {}
def equiv(curie, label):
if curie in manual:
replaced_by[curie] = manual[curie]
return manual[curie]
ec = sgg.getNeighbors(curie, relationshipType='equivalentClass')
nodes = [n for n in ec['nodes'] if n['id'] != curie]
if len(nodes) > 1:
#print('wtf node', [n['id'] for n in nodes], curie)
for node in nodes:
id_ = node['id']
label_ = node['lbl']
if id_.startswith('UBERON'):
if curie in replaced_by:
one = replaced_by[curie]
replaced_by[curie] = one, id_
print('WE GOT DUPES', curie, label, one, id_) # TODO
else:
replaced_by[curie] = id_
else:
internal_equivs[curie] = id_
elif not nodes:
node = sgg.getNode(curie)['nodes'][0]
if OWL.deprecated.toPython() in node['meta']:
print('THIS CLASS IS DEPRECATED', curie)
lbl = node['lbl']
if lbl.startswith('Predominantly white regional') or lbl.startswith('Predominantly gray regional'):
print('\tHE\'S DEAD JIM!', lbl, node['id'])
replaced_by[curie] = 'NOREP'
if IRBC in node['meta']:
existing_replaced = node['meta'][IRBC][0]
ec2 = sgg.getNeighbors(existing_replaced, relationshipType='equivalentClass')
print('\tFOUND ONE', existing_replaced)
#scigPrint.pprint_node(sgg.getNode(existing_replaced))
if ec2['edges']: # pass the buck if we can
print('\t',end='')
scigPrint.pprint_edge(ec2['edges'][0])
rb = ec2['edges'][0]['obj']
print('\tPASSING BUCK : (%s -> %s -> %s)' % (curie, existing_replaced, rb))
irbcs[curie] = (existing_replaced, rb)
replaced_by[curie] = rb
return nodes
else:
er_node = sgv.findById(existing_replaced)
if not er_node['deprecated']:
if not er_node['curie'].startswith('NIFGA:'):
print('\tPASSING BUCK : (%s -> %s)' % (curie, er_node['curie']))
return nodes
print('\tERROR: could not pass buck, we are at a dead end at', er_node) # TODO
print()
moar = [t for t in sgv.findByTerm(label) if t['curie'].startswith('UBERON')]
if moar:
#print(moar)
#replaced_by[curie] = moar[0]['curie']
if len(moar) > 1:
print('WARNING', curie, label, [(m['curie'], m['labels'][0]) for m in moar])
for node in moar:
#if node['curie'] in uberon_obsolete: # node['deprecated']?
#continue
ns = sgg.getNode(node['curie'])
assert len(ns['nodes']) == 1, "WTF IS GOING ON %s" % node['curie']
ns = ns['nodes'][0]
if _doprint:
print('Found putative replacement in moar: (%s -> %s)' % (curie, ns['id']))
if DBX in ns['meta']:
print(' ' * 8, node['curie'], ns['meta'][DBX],
node['labels'][0], node['synonyms'])
if AID in ns['meta']:
print(' ' * 8, node['curie'], ns['meta'][AID],
node['labels'][0], node['synonyms'])
if CON in ns['meta']:
print(' ' * 8, node['curie'], ns['meta'][CON],
node['labels'][0], node['synonyms'])
replaced_by[curie] = ns['id']
else:
replaced_by[curie] = None
if False: # review
print('NO FORWARD EQUIV', tc.red(curie), label) # TODO
for k,v in sorted(sgg.getNode(curie)['nodes'][0]['meta'].items()):
if type(v) == iter:
print(' ' * 4, k)
for _ in v:
print(' ' * 8, _)
else:
print(' ' * 4, k, v)
else:
node = nodes[0]
replaced_by[curie] = node['id']
exact[curie] = node['id']
return nodes
equivs = [equiv(c['curie'], c['labels'][0]) for c in matches] # async causes print issues :/
return g, matches, exact, internal_equivs, irbcs, replaced_by
def main():
u_replaced_by = make_uberon_graph()
additional_edges = make_uberon_graph()
g, matches, exact, internal_equivs, irbcs, replaced_by = make_nifga_graph()
#review_norep([m['curie'] for m in matches if m['deprecated']])
#review_reps(exact) # these all look good
#review_reps(replaced_by) # as do these
#rpob = [_['id'] for _ in sgg.getNeighbors('NIFGA:birnlex_1167', relationshipType='subClassOf')['nodes'] if 'UBERON:' not in _['id']] # these hit pretty much everything because of how the subclassing worked out, so can't use this
regional_no_replace = {k:v for k,v in replaced_by.items() if not v and sgv.findById(k)['labels'][0].startswith('Regional')}
for k in regional_no_replace:
replaced_by[k] = 'NOREP' # yes, deprecate these
#or sgv.findById(k)['labels'][0].startswith('Predominantly white regional')
#or sgv.findById(k)['labels'][0].startswith('Predominantly gray regional')
# TODO predominately gray region -> just deprecate completely these cause pretty much all of the no_match problems
# predominantly white regional part
# TODO add comments in do_deprecation
asdf = {}
for n, u in replaced_by.items():
if u in asdf:
asdf[u].add(n)
else:
asdf[u] = {n}
deprecated = [_ for _ in replaced_by if sgv.findById(_)['deprecated']]
multi = {k:v for k, v in asdf.items() if len(v) > 1}
conflated = {k:[_ for _ in v if _ not in deprecated] for k, v in multi.items() if len([_ for _ in v if _ not in deprecated]) > 1 and k != 'NOREP'}
#_ = [print(k, sgv.findById(k)['labels'][0], '\n\t', [(_, sgv.findById(_)['labels'][0]) for _ in v]) for k, v in sorted(conflated.items())]
graph, bridge, uedges = do_deprecation(replaced_by, g, {}, conflated) # additional_edges) # TODO
bridge.write()
graph.write()
#trees = print_trees(graph, bridge)
# we do this because each of these have different prefixes :(
nif_bridge = {k.split(':')[1]:v for k, v in replaced_by.items()} # some are still None
ub_bridge = {k.split(':')[1]:v for k, v in u_replaced_by.items()}
report = do_report(nif_bridge, ub_bridge, irbcs)
double_checked = {i:r for i, r in report.items() if r['MATCH']} # aka exact from above
dc_erb = {k:v for k, v in double_checked.items() if v['NRID'] != v['URID']}
no_match = {i:r for i, r in report.items() if not r['MATCH']}
no_match_udep = {i:r for i, r in no_match.items() if r['UDEP']}
no_match_not_udep = {i:r for i, r in no_match.items() if not r['UDEP']}
no_match_not_udep_region = {i:r for i, r in no_match.items()
if not r['UDEP'] and (
sgv.findById('NIFGA:' + i)['labels'][0].startswith('Regional') or
sgv.findById('NIFGA:' + i)['labels'][0].startswith('Predominantly gray regional') or
sgv.findById('NIFGA:' + i)['labels'][0].startswith('Predominantly white regional')
)}
no_match_not_udep_not_region = {i:r for i, r in no_match.items()
if not r['UDEP'] and (
not sgv.findById('NIFGA:' + i)['labels'][0].startswith('Regional') and
not sgv.findById('NIFGA:' + i)['labels'][0].startswith('Predominantly gray regional') and
not sgv.findById('NIFGA:' + i)['labels'][0].startswith('Predominantly white regional')
)}
no_replacement = {i:r for i, r in report.items() if not r['NRID']}
very_bad = {i:r for i, r in report.items() if not r['MATCH'] and r['URID'] and not r['UDEP']}
fetch = True
#print('\n>>>>>>>>>>>>>>>>>>>>>> No match uberon dep reports\n')
#print_report(no_match_udep, fetch) # These are all dealt with correctly in do_deprecation
print('\n>>>>>>>>>>>>>>>>>>>>>> Existing Replaced by\n')
#print_report(dc_erb)
#print('\n>>>>>>>>>>>>>>>>>>>>>> No match not dep reports\n')
#print_report(no_match_not_udep, fetch)
print('\n>>>>>>>>>>>>>>>>>>>>>> No match not dep +region reports\n')
#print_report(no_match_not_udep_region, fetch)
print('\n>>>>>>>>>>>>>>>>>>>>>> No match not dep -region reports\n')
#print_report(no_match_not_udep_not_region, fetch)
print('\n>>>>>>>>>>>>>>>>>>>>>> No replace reports\n')
#print_report(no_replacement, fetch)
print('\n>>>>>>>>>>>>>>>>>>>>>> No match and not deprecated reports\n')
#print_report(very_bad, fetch)
print('Total count', len(nif_bridge))
print('Match count', len(double_checked))
print('No Match count', len(no_match))
print('No Match +udep count', len(no_match_udep))
print('No Match -udep count', len(no_match_not_udep))
print('No Match -udep +region count', len(no_match_not_udep_region))
print('No Match -udep -region count', len(no_match_not_udep_not_region))
print('No replace count', len(no_replacement)) # there are none with a URID and no NRID
print('No match not deprecated count', len(very_bad))
print('Mismatch between No match and No replace', set(no_match_not_udep) ^ set(no_replacement))
assert len(nif_bridge) == len(double_checked) + len(no_match)
assert len(no_match) == len(no_match_udep) + len(no_match_not_udep)
assert len(no_match_not_udep) == len(no_match_not_udep_region) + len(no_match_not_udep_not_region)
#[scigPrint.pprint_node(sgg.getNode('NIFGA:' + _)) for _ in no_match_not_udep_not_region]
#print('>>>>>>>>>>>>> Deprecated')
#[scigPrint.pprint_node(sgg.getNode('NIFGA:' + _))
#for _ in no_match_not_udep_not_region if sgv.findById('NIFGA:' + _)['deprecated']]
#print('>>>>>>>>>>>>> Not deprecated')
#[scigPrint.pprint_node(sgg.getNode('NIFGA:' + _))
#for _ in sorted(no_match_not_udep_not_region) if not sgv.findById('NIFGA:' + _)['deprecated']]
embed()
if __name__ == '__main__':
main()
|
The Records Division of the El Paso Police Department consists of skilled civilians and supervisors who collect, process and maintain crime records, other information and provide photographic service to the Department and external customers.
Our customers include the public, the courts, and other government agencies in partnerships for providing services to our community. We are implementing a new generation of optical imaging for storing documents, and improved electronic systems for processing police data/information.
Records can be reached at 915-212-4267.
File non-emergency reports on the telephone. If you don’t need an officer dispatched to your location, please call the Telephone Reporting Unit at (915) 832-4436 during the hours of 9:00 a.m. to 6:00 p.m., Monday thru Friday to make a Non-emergency incident or accident report.
Call 911 for Emergencies If you need to have a police officer dispatched to your location immediately, if some one is injured, or if you have any emergency situation, call “911". This includes any “crime still in progress”or where the report you want to make has any type of “evidence” to be collected.
File non-emergency reports using the Internet You may file a police report On-line for selected offenses (listed below) via the Crime Reporting Form, or on the homepage click on Online Reports, then click Crime Reporting. You will need a valid return e-mail address to make an on-line report. A police officer will NOT be dispatched to your location. Reports that include “evidence” will not be processed on-line.
Once an On-Line Report is received in our designated e-mail box, a Records Specialist will notify the reporting party by e-mail and give the case number assigned for the report within 24 to 72 hours, depending on the volume of requests received.
All requests for reports and records must be in writing; however, you may submit your request in person, or by US mail.
If you have the case number, please provide that as well.
If you have questions, please contact Records at 915-212-4267 for assistance.
For other releasable information (other than the public information copy of police reports) you may submit a written request to our Open Records Desk. For some of these requests, ultimately the Texas Attorney General’s office may be asked to decide whether the information can be released under the Texas Public Information Act (Govt. Code, Chapter 552). Please be specific in the written description of the document or information you are requesting.
Do not send payment with your request. You will be charged the appropriate fees when the documents are delivered or picked up.
We will NOT email or fax the responsive documents. You must provide a mailing address on all requests.
Please do not include attachments with your request.
Clearance letters are prepared and issued, after sufficient research, to persons requesting their own background check.
You must present proper identification (providing a fingerprint may speed and ensure the accuracy of the research) and pay the required fee to obtain the clearance letter.
To request a background check on another person requires a signed and notarized release form from that person who is the subject of the check.
PLEASE NOTE: Background Checks performed by the EPPD are limited to information in local EPPD records. They do NOT include information from other law enforcement agencies or locations in Texas or elsewhere in the United States of America.
ARE CLOSED FOR TRAINING FROM 2:00P.M. TO 5:00P.M.
NOTE: There is a fee of $0.10 per page payable at the time you pick up your report. Please have your case number available/ready at the time you pick up your report.
Send to El Paso Police Dept. Records Division, 911N. Raynor St., El Paso, TX 79903. Include your postal mailing (return) address. Allow 10 business days for a response. The fee for reports requested by mail is $1.50.
OFFENSE REPORTS and INCIDENT REPORTS: 10 cents per page for the first 50 pages; Additional charges for overhead, materials, and labor if more than 50 pages.
CERTIFIED ACCIDENT REPORTS: $6.00 plus $2.00 extra for a certified copy.
OPEN RECORDS PROCEDURE REQUESTS: 10 cents per page for the first 50 pages; There may be additional charges for overhead (including computer research time) materials, and labor if more than 50 pages.
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RInteractivedisplaybase(RPackage):
"""Base package for enabling powerful shiny web displays of Bioconductor
objects.
The interactiveDisplayBase package contains the the basic methods needed
to generate interactive Shiny based display methods for Bioconductor
objects."""
homepage = "https://bioconductor.org/packages/interactiveDisplayBase"
git = "https://git.bioconductor.org/packages/interactiveDisplayBase.git"
version('1.22.0', commit='4ce3cde1dabc01375c153ad614d77a5e28b96916')
version('1.20.0', commit='f40912c8af7afbaaf68c003a6e148d81cbe84df6')
version('1.18.0', commit='d07ea72a595877f27bf054f664f23e8f0304def8')
version('1.16.0', commit='a86aa586b589497f5449d36c2ce67a6b6055026d')
version('1.14.0', commit='e2ccc7eefdd904e3b1032dc6b3f4a28d08c1cd40')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-shiny', type=('build', 'run'))
|
YouTube has revealed it is receiving 3 billion views per day in a week that it celebrates its 6th anniversary.
In a blog post the video-sharing website, owned by Google, also revealed that it has now surpassed the 3 million mark for views per day. This is an increase of a half since last year. Little over a year ago we reported that it was receiving 2 billion views a day and in its 6th year YouTube has continued to see staggering increases.
In addition to the high volumes of viewing traffic, this content driven site that encouraged the development of web 2.0 now sees 48 hours of footage uploaded every minute. Again, this is a huge increase from last year. Back in November, after reporting 35 hours a minute of uploads, YouTube set the challenge to its users to increase this to 48 hours, and they succeeded. March 2010 saw the site increasing in content by 24 hours each minute and since then this figure has doubled to 48 hours.
In 48 hours you could watch a whole series of 24 - twice, fly to Australia and back or watch 1 minutes worth of uploads from YouTube. The sheer volume of content currently shared on the site, and the ever-increasing uploads, means it would be impossible to view all the footage in a lifetime.
YouTube also proves an exciting platform for advertisers with the volume of viewers it is obtaining. Although increased uploads are good in their own right, it is views that increases revenue for both YouTube and Google through advertising. In its 6 years YouTube has advanced its services to offer users whole TV series and YouTube Movies. Increased availability of smartphones and tablets has also helped YouTube to witness a healthy rise in viewers.
The first ever YouTube video was uploaded on 23 April 2005, titled Me at the Zoo, and was a 19 second clip of YouTube's founder at San Diego Zoo.
|
import logging
from pkg_resources import get_distribution
APP_NAME = __package__.split('.')[0]
_DIST = get_distribution(APP_NAME)
PROJECTDIR = _DIST.location
__version__ = _DIST.version
log = logging.getLogger(__name__)
RESERVED_PARAMS = [
'_start',
'_limit',
'_page',
'_fields',
'_count',
'_sort',
'_search_fields',
'_refresh_index',
]
def includeme(config):
from nefertari.resource import get_root_resource, get_resource_map
from nefertari.renderers import (
JsonRendererFactory, NefertariJsonRendererFactory)
from nefertari.utils import dictset
from nefertari.events import (
ModelClassIs, FieldIsChanged, subscribe_to_events,
add_field_processors)
log.info("%s %s" % (APP_NAME, __version__))
config.add_directive('get_root_resource', get_root_resource)
config.add_directive('subscribe_to_events', subscribe_to_events)
config.add_directive('add_field_processors', add_field_processors)
config.add_renderer('json', JsonRendererFactory)
config.add_renderer('nefertari_json', NefertariJsonRendererFactory)
if not hasattr(config.registry, '_root_resources'):
config.registry._root_resources = {}
if not hasattr(config.registry, '_resources_map'):
config.registry._resources_map = {}
# Map of {ModelName: model_collection_resource}
if not hasattr(config.registry, '_model_collections'):
config.registry._model_collections = {}
config.add_request_method(get_resource_map, 'resource_map', reify=True)
config.add_tween('nefertari.tweens.cache_control')
config.add_subscriber_predicate('model', ModelClassIs)
config.add_subscriber_predicate('field', FieldIsChanged)
Settings = dictset(config.registry.settings)
root = config.get_root_resource()
root.auth = Settings.asbool('auth')
|
Hello forum! Where can I get started with some live leads?
Which sites can I find leads for a crypto currency trading service?
[POC] Why VT scans shouldn't ever be trusted for detecting malicious web scripts.
Some BlackHaters are going to NDH ?
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'author': 'ADHOC SA',
'auto_install': False,
'installable': True,
'category': 'Tools',
'demo_xml': [
],
'depends': [
'base',
'mail'
],
'description': """
Partners User
=============
Add partner user related fields on partner and add them in partner view. Also adds an action that allow quick creation of user.
For using the quick creation you must set a "template user" for the partner, you can do it by context or making this field visible.
""",
'license': 'AGPL-3',
'name': u'Partner User',
'test': [],
'data': [
'partner_view.xml',
'security/ir.model.access.csv',
],
'version': '8.0.1.1.0',
'website': 'www.adhoc.com.ar',
'application': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Since Bristol City Council has recently announced its introduction of resident parking restrictions in Clifton and thereby reducing the amount of car parking spaces for local businesses.
Clifton High School and other local educational establishments have been provided with Faxi journey sharing technology by Bristol Council in an effort to encourage sustainable transport alternatives.
Now Clifton High School and other educational institutes (comprising over 1000 staff) have adopted a policy where car parking permits will be issued as a priority for those engaged in car sharing. Only members of the Faxi group will be able to obtain a parking permit to provide staff members with alternative transport options and to enforce localised sustainable travel.
Guy Cowper, Director of Facilities at Clifton High School, is liaising with Bristol City Council and Faxi to explore the future possibility for parking permits to be automatically allocated to those engaged in car sharing.
As Faxi develops its patent technology which relies on GPS and geo-tracking to confirm when car sharing has taken place, the opportunities for prioritised parking spaces and permits allow companies to adopt enforceable sustainable transport initiatives.
Faxi is working with numerous other nationwide organisations, local authorities and institutions to provide a similar solution over the next six months.
Start your own free Faxi journey sharing group now.
|
class check_privilege_revoke_all_context():
"""
check_privilege_revoke_all_context
Ensure 'ALL' Is Revoked from Unauthorized 'GRANTEE' on CONTEXT$
The CONTEXT$ table contains columns for the schema and name for a PL/SQL
package that will execute for a given application context.
"""
# References:
# http://www.davidlitchfield.com/AddendumtotheOracle12cCISGuidelines.pdf
# http://www.davidlitchfield.com/oracle_backdoors.pdf
TITLE = 'Revoke ALL from CONTEXT$'
CATEGORY = 'Privilege'
TYPE = 'sql'
SQL = "SELECT GRANTEE, PRIVILEGE FROM DBA_TAB_PRIVS WHERE TABLE_NAME = 'CONTEXT$'"
verbose = False
skip = False
result = {}
def do_check(self, *results):
self.result['level'] = 'GREEN'
output = ''
for rows in results:
for row in rows:
self.result['level'] = 'RED'
output += row[0] + ' with ' + row[1] + 'on CONTEXT$\n'
if 'GREEN' == self.result['level']:
output = 'No user with grants to CONTEXT$.'
self.result['output'] = output
return self.result
def __init__(self, parent):
print('Performing check: ' + self.TITLE)
|
Omry 50,158 views 7:48 How To Fix Wlan0 in Kali Linux (if it's Not Showing) - Duration: 4:36. How to deal with a really persuasive character? User contributions on this site are licensed under the Creative Commons Attribution Share Alike 4.0 International License. Worked perfectly on my Lucid Lynx (Acer Aspire laptop) box.
Unless there's a specific reason that you need to use weplab, I'd recommend that you try the aircrack-ng suite instead. Can a creature benefit from differently typed speed bonuses all named fast movement? A name for a well-informed person who is not believed? UbuntuCommunityAsk!DeveloperDesignDiscourseHardwareInsightsJujuShopMore ›AppsHelpForumLaunchpadMAASCanonical current community chat Ask Ubuntu Ask Ubuntu Meta your communities Sign up or log in to customize your list.
Source: http://docs.kali.org/installation/tr...-driver-issues Distros from now on are going to adopt 'upstart' which is going to replace the /sbin/init daemon which manages services and tasks during boot. kikoman 09-29-2007,09:42 PM #2 -=Xploitz=- View Profile View Forum Posts Senior Member Join Date Apr 2007 Posts 3,351 Originally Posted by kikoman Im following xploits tutorial and i got thru airmon-ng May 2010 M T W T F S S « Mar Aug » 12 3456789 10111213141516 17181920212223 24252627282930 31 Recent Posts How to do OFFLine Installation in LinuxUbuntu? Using Wildcards .
The host system > iwconfig wlan0 IEEE 802.11abgn ESSID:"some ssid" Mode:Managed Frequency:2.412 GHz Access Point: Bit Rate=78 Mb/s Tx-Power=15 dBm Retry long limit:7 RTS thr:off Fragment thr:off Power Management:on on August 23, 2010 at 10:11 pm | Reply Sergey Thanks a lot. Thanks every one for the help on September 10, 2011 at 1:53 pm | Reply mohaned_nj Error for wireless request "Set Mode" (8B06) : SET failed on device eth1 ; Invalid Using AND, OR and IN .
on November 10, 2011 at 11:43 am | Reply Kashif Aziz Awan its really helpful … thnx on December 10, 2011 at 10:40 am | Reply clarck connect ty dude rock How's the CMD trip bonuses from extra legs work? I am new to Kali and Linux in general so still learning a lot of this. Google Search – Change Google Title with your name using PimpMySearch How to install FrontlineSMS SMSgateway Server in Ubuntu Linux Terminator – Multiple GNOME terminals in one window.
Very informative post. How to install Metasploit in Linux Ubuntu14.04? FAQ Forum Quick Links Unanswered Posts New Posts View Forum Leaders FAQ Contact an Admin Forum Community Forum Council FC Agenda Forum Governance Forum Staff Ubuntu Forums Code of Conduct Forum May 14, 2010 by taufanlubis Airodump-ng is one of favorite tools in hacking wireless network.
hey great site but i assume this is only for linux users. Functions - Aggregate Functions .
|
"""SelfStoreDict for Python.
Author: markus schulte <[email protected]>
The module provides a subclassed dictionary that saves itself to a JSON file or redis-key whenever changed or when used
within a context.
"""
import json
from os.path import getmtime
from datetime import datetime, timedelta
from pathlib import Path
def adapt(parent, elem=None):
"""
called whenever a dict or list is added. needed in order to let SelfStoreDict know about changes happening to its
childs.
:param parent: the parent object of the to be constructed one. parent should always be off type SelfStorageDict and
should always be the root object.
:param elem: the element added to SelfStoreDict or it's childs
:return: the elem, converted to a subclass of dict or list that notifies it's parent
"""
if isinstance(elem, list):
return ChildList(parent, elem)
if isinstance(elem, dict):
return ChildDict(parent, elem)
return elem
class ChildList(list):
"""
a subclass of list that notifies self.parent about any change to its members
"""
def __init__(self, parent, li=None):
super(ChildList, self).__init__()
if li is None:
li = list()
self.parent = parent
for v in li:
self.append(v)
if not li:
self.parent.save()
def append(self, v):
v = adapt(self.parent, v)
super(ChildList, self).append(v)
self.parent.save()
def extend(self, v):
v = adapt(self.parent, v)
super(ChildList, self).extend(v)
self.parent.save()
def insert(self, i, v):
v = adapt(self.parent, v)
super(ChildList, self).insert(i, v)
self.parent.save()
def remove(self, v):
v = adapt(self.parent, v)
super(ChildList, self).remove(v)
self.parent.save()
def pop(self, i=None):
r = super(ChildList, self).pop(i)
self.parent.save()
return r
def clear(self):
super(ChildList, self).clear()
self.parent.save()
def __setitem__(self, k, v):
v = adapt(self.parent, v)
super(ChildList, self).__setitem__(k, v)
self.parent.save()
class ChildDict(dict):
"""
a subclass of dict that notifies self.parent about any change to its members
"""
def __init__(self, parent, d=None):
super(ChildDict, self).__init__()
if d is None:
d = dict()
self.parent = parent
for k, v in d.items():
self[k] = v
if d != {}:
self.parent.save()
def __setitem__(self, k, v):
v = adapt(self.parent, v)
super(ChildDict, self).__setitem__(k, v)
self.parent.save()
def __delitem__(self, k):
super(ChildDict, self).__delitem__(k)
self.parent.save()
def setdefault(self, k, v=None):
v = adapt(self.parent, v)
v = super(ChildDict, self).setdefault(k, v)
self.parent.save()
return v
def clear(self):
super(ChildDict, self).clear()
self.parent.save()
class FileContainer(object):
def __init__(self, path):
self.path = path
def save(self, data):
with open(self.path, "w") as fp:
json.dump(data.copy(), fp)
def load(self):
try:
with open(self.path) as fp:
for k, v in json.load(fp).items():
yield [k, v]
except FileNotFoundError:
raise FileNotFoundError
def touch(self):
Path(self.path).touch()
@property
def modified(self):
return int(getmtime(self.path))
class RedisContainer(object):
def __init__(self, key, redis):
self.key = key
self.redis = redis
self.f = 9223370527000000
def save(self, data):
self.redis.set(self.key, json.dumps(data.copy()))
self.redis.expire(self.key, self.f)
def load(self):
data = self.redis.get(self.key)
try:
jdata = json.loads(data)
except TypeError:
return
try:
for k, v in jdata.items():
yield [k, v]
except FileNotFoundError:
raise FileNotFoundError
def touch(self):
self.redis.expire(self.key, self.f)
@property
def modified(self):
ttl = self.redis.ttl(self.key)
if ttl is None:
return
delta = timedelta(seconds=self.f - ttl)
return int((datetime.now() - delta).timestamp())
class SelfStoreDict(ChildDict):
"""
This class acts like a dict but constructs all attributes from JSON. please note: it is a subclass of 'ChildDict'
but always the parent.
call the constructor with a path or a redis connection
you may add an optional initial value as a dict
"""
def __init__(self, path, data=None, redis=None):
self._saves_ = 0
self._context_ = False
self._inactive_ = True
self.parent = self
# check if there is a redis object
if redis is not None:
self.sc = RedisContainer(path, redis=redis)
else:
self.sc = FileContainer(path)
self._path_ = path
super(SelfStoreDict, self).__init__(self, data)
if data is not None:
self._inactive_ = False
self.save()
else:
self._load()
self._inactive_ = False
def _inc_saves(self):
self._saves_ += 1
def _savenow(self):
if self._inactive_:
return False
if self._context_:
return False
return True
def save(self):
if self._savenow():
self.sc.save(self.copy())
self._inc_saves()
return
@property
def saves(self):
return self._saves_
@property
def modified(self):
return self.sc.modified
def touch(self):
self.sc.touch()
def __enter__(self):
self._context_ = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._context_ = False
self._inactive_ = False
self.save()
def _load(self):
"""
called by '@path.setter' to load dict.
:return: None
"""
try:
for k, v in self.sc.load():
self[k] = v
except FileNotFoundError:
pass
|
prove that Homoeopathy was the source of her improvement.
toxic products from the paralyzed intestinal tract.
it may require five or six segments for the crossing of pain impulses.
|
#!/usr/bin/env python
"""
add_metadata_to_delly_manta_vcf.py
##INFO=<ID=SOMATIC is automatically added after i) the delly somatic filtering step ii) manta somatic calling
"""
import sys
import argparse
import re
import vcf
def add_meta2vcf(vcf_file):
try:
f = open(vcf_file, 'r')
except IOError:
sys.exit("Error: Can't open vcf file: {0}".format(vcf_file))
else:
with f:
vcf_header = []
vcf_chrom = []
vcf_variants = []
countline = 0
version = ""
for line in f:
countline = countline + 1
line = line.strip('\n')
if line.startswith('##'):
##Print original vcf meta-information lines##
vcf_header.append(line)
if line.startswith('##cmdline'):
find_manta = re.findall('(manta_\w+\.\w+\.\w+)', line)
version = find_manta[0]
## print header lines and Add meta-information lines with melter info to vcf
elif line.startswith("#CHROM"):
vcf_chrom.append(line)
countline = 0
else:
variant = line.split('\t')
if countline == 1:
find_delly = re.findall('(EMBL.DELLYv\w+\.\w+.\w+)', variant[7])
if not version:
version = find_delly[0]
if not "DELLY" in variant[7]:
variant[7] = variant[7]+";SVMETHOD={0}".format(find_manta[0])
vcf_variants.append("\t".join(variant))
print "\n".join(vcf_header)
print "##INFO=<ID=caller={0}".format(version)
print "\n".join(vcf_chrom)
print "\n".join(vcf_variants)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'add metadata to a SV VCF file')
required_named = parser.add_argument_group('required named arguments')
required_named.add_argument('-v', '--vcf_file', help='path/to/file.vcf', required=True)
args = parser.parse_args()
add_meta2vcf(args.vcf_file)
|
The sixth installment of a new series on the blog that we will be posting throughout the month of December. As some of you are aware, Music for All will be taking an Honor Band to the 2017 Rose Parade. Our members were selected by audition for the ensemble and come from all across the United States. We are looking forward to coming together for the first time as a band at the end of December and we hope this series will allow us all to get to know some (hopefully most!) of the members of this awesome ensemble!
I love marching band and I love meeting new people. When I first heard about the honor band, it was like a dream come true. To march in a parade with other strong players and marchers, and the Rose Parade nonetheless, I jumped at the chance to be a part of it.
Definitely marching at Disneyland! Disney World has always been an important part of my life and I can't wait to be part of the parades I loved as a child.
Never regret trying something new.
Take advantage of the opportunities around you.
For my senior year show (fall 2016), we ended our show "Happy" with "Make Me Smile" by Chicago. The last few charts were loud and strong. When performing it at Ford Field for MCBA state finals, we cut off our final note and you could hear it ring throughout the stadium. It was my last show and the best one my band, my section, and myself had ever performed. We ended up winning 3rd place and getting a score of 90.025, both records for my high school.
I'm part of the varsity quiz bowl and robotics team at my school.
After I graduate high school, I plan to go to college to get a degree to teach high schoolers biology or music.
My sister and I, with the help of our band director, resurrected my high school's wind ensemble in order to provide our band members with another opportunity to play and challenge themselves.
The second installment of a new series on the blog that we will be posting throughout the month of December. As some of you are aware, Music for All will be taking an Honor Band to the 2017 Rose Parade. Our members were selected by audition for the ensemble and come from all across the United States. We are looking forward to coming together for the first time as a band at the end of December and we hope this series will allow us all to get to know some (hopefully most!) of the members of this awesome ensemble!
My brothers marched in the Tournament of Roses Parade in 2011 as a part of Lindbergh's Spirit of St. Louis Marching Band. At that time, I participated in my middle school's winter guard. I knew it was my dream to someday apply to march in the parade; my brothers inspired me to work for this goal!
I am excited to learn color guard skills and routines in a different environment than my high school. It will be interesting to work within a group of extremely dedicated and skilled people!
I have learned many valuable leadership skills through my role as the color guard captain for the past two years. Marching band has taught me about hard work and the importance of maintaining a positive mindset regardless of the inevitable frustration that accompanies large group activities.
My most memorable moment in band was my sophomore year at our summer getaway in Potosi, Missouri: Camp Lakewood. During our bonfire night, the sky was incredibly clear. We all took a moment of silence to remember our dear band director, Mr. Spiegelman. As we all fondly thought to ourselves, we stared at the beautiful night sky, and amazingly the Milky Way was visible! It was the most tranquil, calming moment in my life.
I am a student coach for the Lindbergh Middle School Winter Guard. I am involved in Link Crew, the Spanish Honor Society, and the National Society of High School Honors. I also coordinate, produce, and host the student-run Annual Lindbergh Film Festival.
I plan to attend Southeast Missouri University to graduate with a Bachelor of Science in Nursing. I hope to then study for a Doctor of Nursing Practice in order to work as pediatric nurse practitioner in my future. I want to help improve children's lives around the world.
My favorite things to do in my free time include watching The Office and solving jigsaw puzzles!
|
from __future__ import absolute_import
from django.utils.functional import cached_property
from parsimonious.exceptions import IncompleteParseError
from sentry.api.event_search import (
event_search_grammar,
InvalidSearchQuery,
SearchFilter,
SearchKey,
SearchValue,
SearchVisitor,
)
from sentry.constants import STATUS_CHOICES
from sentry.search.utils import (
parse_actor_value,
parse_user_value,
parse_release,
parse_status_value,
)
class IssueSearchVisitor(SearchVisitor):
key_mappings = {
"assigned_to": ["assigned"],
"bookmarked_by": ["bookmarks"],
"subscribed_by": ["subscribed"],
"first_release": ["first-release", "firstRelease"],
"first_seen": ["age", "firstSeen"],
"last_seen": ["lastSeen"],
"active_at": ["activeSince"],
# TODO: Special case this in the backends, since they currently rely
# on date_from and date_to explicitly
"date": ["event.timestamp"],
"times_seen": ["timesSeen"],
"sentry:dist": ["dist"],
}
numeric_keys = SearchVisitor.numeric_keys.union(["times_seen"])
date_keys = SearchVisitor.date_keys.union(["active_at", "date"])
@cached_property
def is_filter_translators(self):
is_filter_translators = {
"assigned": (SearchKey("unassigned"), SearchValue(False)),
"unassigned": (SearchKey("unassigned"), SearchValue(True)),
}
for status_key, status_value in STATUS_CHOICES.items():
is_filter_translators[status_key] = (SearchKey("status"), SearchValue(status_value))
return is_filter_translators
def visit_is_filter(self, node, children):
# the key is "is" here, which we don't need
negation, _, _, search_value = children
if search_value.raw_value not in self.is_filter_translators:
raise InvalidSearchQuery(
'Invalid value for "is" search, valid values are {}'.format(
sorted(self.is_filter_translators.keys())
)
)
search_key, search_value = self.is_filter_translators[search_value.raw_value]
operator = "!=" if self.is_negated(negation) else "="
return SearchFilter(search_key, operator, search_value)
def visit_boolean_operator(self, node, children):
raise InvalidSearchQuery(
'Boolean statements containing "OR" or "AND" are not supported in this search'
)
def parse_search_query(query):
try:
tree = event_search_grammar.parse(query)
except IncompleteParseError as e:
raise InvalidSearchQuery(
"%s %s"
% (
u"Parse error: %r (column %d)." % (e.expr.name, e.column()),
"This is commonly caused by unmatched-parentheses. Enclose any text in double quotes.",
)
)
return IssueSearchVisitor().visit(tree)
def convert_actor_value(value, projects, user, environments):
return parse_actor_value(projects, value, user)
def convert_user_value(value, projects, user, environments):
return parse_user_value(value, user)
def convert_release_value(value, projects, user, environments):
return parse_release(value, projects, environments)
def convert_status_value(value, projects, user, environments):
try:
return parse_status_value(value)
except ValueError:
raise InvalidSearchQuery(u"invalid status value of '{}'".format(value))
value_converters = {
"assigned_to": convert_actor_value,
"bookmarked_by": convert_user_value,
"subscribed_by": convert_user_value,
"first_release": convert_release_value,
"release": convert_release_value,
"status": convert_status_value,
}
def convert_query_values(search_filters, projects, user, environments):
"""
Accepts a collection of SearchFilter objects and converts their values into
a specific format, based on converters specified in `value_converters`.
:param search_filters: Collection of `SearchFilter` objects.
:param projects: List of projects being searched across
:param user: The user making the search
:return: New collection of `SearchFilters`, which may have converted values.
"""
def convert_search_filter(search_filter):
if search_filter.key.name in value_converters:
converter = value_converters[search_filter.key.name]
new_value = converter(search_filter.value.raw_value, projects, user, environments)
search_filter = search_filter._replace(value=SearchValue(new_value))
return search_filter
return map(convert_search_filter, search_filters)
|
A "Northwestern Region" of NLA existed at least as early as 1952. Activity and interest died down periodically, and attempts to revive the district occurred in 1968 and 1972. In 1973, the district gained representation on the NLA board of trustees with the transition to the new bylaws.
Northwest District of the Nevada Library Association.
The purpose of the Northwest District of the Nevada Library Association shall be to promote the highest quality library service and librarianship, particularly in the Northwest District.
All paid members of the Nevada Library Association living/or employed in the area encompassed by the Northwest District (Churchill, Douglas, Lyon, Mineral, Pershing, Storey, and Washoe Counties and Carson City) are eligible as members of the Northwest District.
A majority of the DISTRICT's members vote to approve such affiliation.
The BOARD OF TRUSTEES of NLA approve such affiliation.
Disaffiliation will follow procedures 1 and 2 of this section.
ARTICLE V. OFFICERS AND GOVERNMENT.
Section 1. The officers who shall comprise the executive board of the Northwest District shall consist of an elected Chairperson, and a Secretary-Treasurer appointed by the Chairperson.
Section 2. The District shall elect one member to serve on the Nevada Library Association Nominating Committee.
Appoint such committees as are necessary to carry on the functions of the District.
Prepare an annual report to the district membership by January 1, of each year. Said report will be published by the Nevada Library Association in the spring of the same year.
To submit a budget request and program justification to the NLA Finance Committee by December 31st of each year preceding the year for which the budget is requested.
Section 5. The Chairperson shall attend all meetings of the Board of Trustees of the Nevada Library Association and shall act as liaison between the Northwest District of the Nevada Library Association and the Board of Trustees of the Nevada Library Association.
In the event that the Chairperson is unable to attend the required meeting or meetings the chairperson of the Northwest District will appoint a substitute subject to the Nevada Library Association Board's approval.
Send a one copy of the minutes to the Executive Secretary of the Nevada Library Association.
Send notices of information and activities to the membership.
Maintain financial records using the generally accepted accounting procedure.
Pay all bills incurred by the Northwest District.
a. Expenditures must be approved by the chairperson prior to encumbrance.
b. Reimbursement claims must be submitted to the Secretary-treasurer of the Northwest District.
c. Expenditures budgeted or approved by the chairperson at the beginning of the calendar year may be met by the Secretary-treasurer as they occur.
Elections will take place at a designated meeting fulfilling the requirement of the above.
Election shall be by a voice vote unless a secret ballot is requested by any member.
Election to any office shall be by majority vote of those members casting ballots at the meeting.
a. Shall serve one year commencing January 1st and ending December 31st.
Nevada Library Association Nominating Committee Representative.
Officers, other elected members, and committee chairpersons, shall hold only the one office to which they were elected or appointed.
Committee chairperson shall serve a term of one year and may be reappointed.
Unless their previous term was served in the filling of a vacancy no officers or other elected members of the District shall succeed themselves in the same office. This restriction may be removed by a 2/3 vote of the members of the District attending the election meeting.
Section 9. In the event of a vacancy in elected offices, an election to fill said office for the duration of the term shall be held at the next meeting of the membership. Removal from office shall follow the procedures established in the NLA By-laws. Within thirty days of the election, the chairperson shall notify the Executive Secretary of the Nevada Library Association of the results of the election.
Section 1. Meetings shall be held at least four times and places as are designated at the previous meeting, or at the call of the Chairperson.
Section 2. All meetings must be open to members of Nevada Library Association and to the public.
Section 3. The number of meetings may be changed by the vote of the membership at any meeting.
The District may make recommendations through the Chairperson to the Board of Trustees of the Nevada Library Association providing a majority of the members present are in favor of the recommendation or a minority report may also be submitted.
Section 1. These rules of procedure may be amended by a two-third vote the regular members present at the meeting at which voting takes place. Notice of the proposed amendments other than a yearly review, shall have been sent by the Secretary-treasurer to the membership at least 14 days prior to the meeting which voting takes place.
Section 2. The Chairperson shall designate a member to annually review these procedures and report to the membership.
Section 3. Approved changes in rules of procedure shall be sent to the Board of Trustees of Nevada Library Association for approval.
|
# -*- encoding: utf-8 -*-
"""Test class for User Group CLI
:Requirement: Usergroup
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: UsersRoles
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
import random
from robottelo.cli.base import CLIReturnCodeError
from robottelo.cli.factory import make_ldap_auth_source
from robottelo.cli.factory import make_role
from robottelo.cli.factory import make_user
from robottelo.cli.factory import make_usergroup
from robottelo.cli.factory import make_usergroup_external
from robottelo.cli.ldapauthsource import LDAPAuthSource
from robottelo.cli.task import Task
from robottelo.cli.user import User
from robottelo.cli.usergroup import UserGroup
from robottelo.cli.usergroup import UserGroupExternal
from robottelo.config import settings
from robottelo.constants import LDAP_ATTR
from robottelo.constants import LDAP_SERVER_TYPE
from robottelo.datafactory import gen_string
from robottelo.datafactory import valid_data_list
from robottelo.decorators import run_in_one_thread
from robottelo.decorators import skip_if_not_set
from robottelo.decorators import tier1
from robottelo.decorators import tier2
from robottelo.decorators import upgrade
from robottelo.test import CLITestCase
class UserGroupTestCase(CLITestCase):
"""User group CLI related tests."""
@tier1
def test_positive_CRUD(self):
"""Create new user group with valid elements that attached group.
List the user group, update and delete it.
:id: bacef0e3-31dd-4991-93f7-f54fbe64d0f0
:expectedresults: User group is created, listed, updated and
deleted successfully.
:CaseImportance: Critical
"""
user = make_user()
ug_name = random.choice(valid_data_list())
role_name = random.choice(valid_data_list())
role = make_role({'name': role_name})
sub_user_group = make_usergroup()
# Create
user_group = make_usergroup(
{
'user-ids': user['id'],
'name': ug_name,
'role-ids': role['id'],
'user-group-ids': sub_user_group['id'],
}
)
self.assertEqual(user_group['name'], ug_name)
self.assertEqual(user_group['users'][0], user['login'])
self.assertEqual(len(user_group['roles']), 1)
self.assertEqual(user_group['roles'][0], role_name)
self.assertEqual(user_group['user-groups'][0]['usergroup'], sub_user_group['name'])
# List
result_list = UserGroup.list({'search': 'name={0}'.format(user_group['name'])})
self.assertTrue(len(result_list) > 0)
self.assertTrue(UserGroup.exists(search=('name', user_group['name'])))
# Update
new_name = random.choice(valid_data_list())
UserGroup.update({'id': user_group['id'], 'new-name': new_name})
user_group = UserGroup.info({'id': user_group['id']})
self.assertEqual(user_group['name'], new_name)
# Delete
UserGroup.delete({'name': user_group['name']})
with self.assertRaises(CLIReturnCodeError):
UserGroup.info({'name': user_group['name']})
@tier1
def test_positive_create_with_multiple_elements(self):
"""Create new user group using multiple users, roles and user
groups attached to that group.
:id: 3b0a3c3c-aab2-4e8a-b043-7462621c7333
:expectedresults: User group is created successfully and contains all
expected elements.
:CaseImportance: Critical
"""
count = 2
users = [make_user()['login'] for _ in range(count)]
roles = [make_role()['name'] for _ in range(count)]
sub_user_groups = [make_usergroup()['name'] for _ in range(count)]
user_group = make_usergroup(
{'users': users, 'roles': roles, 'user-groups': sub_user_groups}
)
self.assertEqual(sorted(users), sorted(user_group['users']))
self.assertEqual(sorted(roles), sorted(user_group['roles']))
self.assertEqual(
sorted(sub_user_groups), sorted([ug['usergroup'] for ug in user_group['user-groups']]),
)
@tier2
def test_positive_add_and_remove_elements(self):
"""Create new user group. Add and remove several element from the group.
:id: a4ce8724-d3c8-4c00-9421-aaa40394134d
:BZ: 1395229
:expectedresults: Elements are added to user group and then removed
successfully.
:CaseLevel: Integration
"""
role = make_role()
user_group = make_usergroup()
user = make_user()
sub_user_group = make_usergroup()
# Add elements by id
UserGroup.add_role({'id': user_group['id'], 'role-id': role['id']})
UserGroup.add_user({'id': user_group['id'], 'user-id': user['id']})
UserGroup.add_user_group({'id': user_group['id'], 'user-group-id': sub_user_group['id']})
user_group = UserGroup.info({'id': user_group['id']})
self.assertEqual(len(user_group['roles']), 1)
self.assertEqual(user_group['roles'][0], role['name'])
self.assertEqual(len(user_group['users']), 1)
self.assertEqual(user_group['users'][0], user['login'])
self.assertEqual(len(user_group['user-groups']), 1)
self.assertEqual(user_group['user-groups'][0]['usergroup'], sub_user_group['name'])
# Remove elements by name
UserGroup.remove_role({'id': user_group['id'], 'role': role['name']})
UserGroup.remove_user({'id': user_group['id'], 'user': user['login']})
UserGroup.remove_user_group({'id': user_group['id'], 'user-group': sub_user_group['name']})
user_group = UserGroup.info({'id': user_group['id']})
self.assertEqual(len(user_group['roles']), 0)
self.assertEqual(len(user_group['users']), 0)
self.assertEqual(len(user_group['user-groups']), 0)
@tier2
@upgrade
def test_positive_remove_user_assigned_to_usergroup(self):
"""Create new user and assign it to user group. Then remove that user.
:id: 2a2623ce-4723-4402-aae7-8675473fd8bd
:expectedresults: User should delete successfully.
:CaseLevel: Integration
:BZ: 1667704
"""
user = make_user()
user_group = make_usergroup()
UserGroup.add_user({'id': user_group['id'], 'user-id': user['id']})
with self.assertNotRaises(CLIReturnCodeError):
User.delete({'id': user['id']})
@run_in_one_thread
class ActiveDirectoryUserGroupTestCase(CLITestCase):
"""Implements Active Directory feature tests for user groups in CLI."""
@classmethod
@skip_if_not_set('ldap')
def setUpClass(cls):
"""Read settings and create LDAP auth source that can be re-used in
tests."""
super(ActiveDirectoryUserGroupTestCase, cls).setUpClass()
cls.ldap_user_name = settings.ldap.username
cls.ldap_user_passwd = settings.ldap.password
cls.base_dn = settings.ldap.basedn
cls.group_base_dn = settings.ldap.grpbasedn
cls.ldap_hostname = settings.ldap.hostname
cls.auth = make_ldap_auth_source(
{
'name': gen_string('alpha'),
'onthefly-register': 'true',
'host': cls.ldap_hostname,
'server-type': LDAP_SERVER_TYPE['CLI']['ad'],
'attr-login': LDAP_ATTR['login_ad'],
'attr-firstname': LDAP_ATTR['firstname'],
'attr-lastname': LDAP_ATTR['surname'],
'attr-mail': LDAP_ATTR['mail'],
'account': cls.ldap_user_name,
'account-password': cls.ldap_user_passwd,
'base-dn': cls.base_dn,
'groups-base': cls.group_base_dn,
}
)
def setUp(self):
"""Create new usergroup per each test"""
super(ActiveDirectoryUserGroupTestCase, self).setUp()
self.user_group = make_usergroup()
def tearDown(self):
"""Delete usergroup per each test"""
for dict in UserGroup.list():
if UserGroup.info({'id': dict['id']})['external-user-groups']:
UserGroup.delete({'id': dict['id']})
super(ActiveDirectoryUserGroupTestCase, self).tearDown()
@classmethod
@skip_if_not_set('ldap')
def tearDownClass(cls):
"""Delete the AD auth-source afterwards"""
LDAPAuthSource.delete({'id': cls.auth['server']['id']})
super(ActiveDirectoryUserGroupTestCase, cls).tearDownClass()
@tier2
@upgrade
def test_positive_create_and_refresh_external_usergroup_with_local_user(self):
"""Create and refresh external user group with AD LDAP. Verify Local user
association from user-group with external group with AD LDAP
:id: 7431979c-aea8-4984-bb7d-185f5b7c3109
:expectedresults: User group is created and refreshed successfully.
Local user is associated from user-group with external group.
:CaseLevel: Integration
:BZ: 1412209
"""
ext_user_group = make_usergroup_external(
{
'auth-source-id': self.auth['server']['id'],
'user-group-id': self.user_group['id'],
'name': 'foobargroup',
}
)
self.assertEqual(ext_user_group['auth-source'], self.auth['server']['name'])
with self.assertNotRaises(CLIReturnCodeError):
UserGroupExternal.refresh(
{'user-group-id': self.user_group['id'], 'name': 'foobargroup'}
)
user = make_user()
UserGroup.add_user({'user': user['login'], 'id': self.user_group['id']})
self.assertEqual(
User.info({'login': user['login']})['user-groups'][0]['usergroup'],
self.user_group['name'],
)
with self.assertNotRaises(CLIReturnCodeError):
UserGroupExternal.refresh(
{'user-group-id': self.user_group['id'], 'name': 'foobargroup'}
)
self.assertEqual(
User.info({'login': user['login']})['user-groups'][0]['usergroup'],
self.user_group['name'],
)
@tier2
def test_positive_automate_bz1426957(self):
"""Verify role is properly reflected on AD user.
:id: 1c1209a6-5bb8-489c-a151-bb2fce4dbbfc
:expectedresults: Roles from usergroup is applied on AD user successfully.
:CaseLevel: Integration
:BZ: 1426957, 1667704
"""
ext_user_group = make_usergroup_external(
{
'auth-source-id': self.auth['server']['id'],
'user-group-id': self.user_group['id'],
'name': 'foobargroup',
}
)
self.assertEqual(ext_user_group['auth-source'], self.auth['server']['name'])
role = make_role()
UserGroup.add_role({'id': self.user_group['id'], 'role-id': role['id']})
with self.assertNotRaises(CLIReturnCodeError):
Task.with_user(username=self.ldap_user_name, password=self.ldap_user_passwd).list()
UserGroupExternal.refresh(
{'user-group-id': self.user_group['id'], 'name': 'foobargroup'}
)
self.assertEqual(User.info({'login': self.ldap_user_name})['user-groups'][1], role['name'])
User.delete({'login': self.ldap_user_name})
@tier2
def test_negative_automate_bz1437578(self):
"""Verify error message on usergroup create with 'Domain Users' on AD user.
:id: d4caf33e-b9eb-4281-9e04-fbe1d5b035dc
:expectedresults: Error message as Domain Users is a special group in AD.
:CaseLevel: Integration
:BZ: 1437578
"""
with self.assertRaises(CLIReturnCodeError):
result = UserGroupExternal.create(
{
'auth-source-id': self.auth['server']['id'],
'user-group-id': self.user_group['id'],
'name': 'Domain Users',
}
)
self.assertEqual(
'Could not create external user group: '
'Name is not found in the authentication source'
'Name Domain Users is a special group in AD.'
' Unfortunately, we cannot obtain membership information'
' from a LDAP search and therefore sync it.',
result,
)
@run_in_one_thread
class FreeIPAUserGroupTestCase(CLITestCase):
"""Implements FreeIPA LDAP feature tests for user groups in CLI."""
@classmethod
@skip_if_not_set('ipa')
def setUpClass(cls):
"""Read settings and create LDAP auth source that can be re-used in
tests."""
super(FreeIPAUserGroupTestCase, cls).setUpClass()
cls.ldap_user_name = settings.ipa.username_ipa
cls.ldap_user_passwd = settings.ipa.password_ipa
cls.base_dn = settings.ipa.basedn_ipa
cls.group_base_dn = settings.ipa.grpbasedn_ipa
cls.ldap_hostname = settings.ipa.hostname_ipa
cls.auth = make_ldap_auth_source(
{
'name': gen_string('alpha'),
'onthefly-register': 'true',
'host': cls.ldap_hostname,
'server-type': LDAP_SERVER_TYPE['CLI']['ipa'],
'attr-login': LDAP_ATTR['login'],
'attr-firstname': LDAP_ATTR['firstname'],
'attr-lastname': LDAP_ATTR['surname'],
'attr-mail': LDAP_ATTR['mail'],
'account': cls.ldap_user_name,
'account-password': cls.ldap_user_passwd,
'base-dn': cls.base_dn,
'groups-base': cls.group_base_dn,
}
)
def setUp(self):
"""Create new usergroup per each test"""
super(FreeIPAUserGroupTestCase, self).setUp()
self.user_group = make_usergroup()
def tearDown(self):
"""Delete usergroup per each test"""
for dict in UserGroup.list():
if UserGroup.info({'id': dict['id']})['external-user-groups']:
UserGroup.delete({'id': dict['id']})
super(FreeIPAUserGroupTestCase, self).tearDown()
@classmethod
@skip_if_not_set('ipa')
def tearDownClass(cls):
"""Delete the IPA auth-source afterwards"""
LDAPAuthSource.delete({'id': cls.auth['server']['id']})
super(FreeIPAUserGroupTestCase, cls).tearDownClass()
@tier2
@upgrade
def test_positive_create_and_refresh_external_usergroup_with_local_user(self):
"""Create and Refresh external user group with FreeIPA LDAP. Verify Local user
association from user-group with external group with FreeIPA LDAP
:id: bd6152e3-51ac-4e84-b084-8bab1c4eb583
:expectedresults: User group is created successfully and assigned to correct auth
source. User group is refreshed successfully. Local user is associated from
user group with external group.
:CaseLevel: Integration
:BZ: 1412209
"""
ext_user_group = make_usergroup_external(
{
'auth-source-id': self.auth['server']['id'],
'user-group-id': self.user_group['id'],
'name': 'foobargroup',
}
)
self.assertEqual(ext_user_group['auth-source'], self.auth['server']['name'])
with self.assertNotRaises(CLIReturnCodeError):
UserGroupExternal.refresh(
{'user-group-id': self.user_group['id'], 'name': 'foobargroup'}
)
user = make_user()
UserGroup.add_user({'user': user['login'], 'id': self.user_group['id']})
self.assertEqual(
User.info({'login': user['login']})['user-groups'][0]['usergroup'],
self.user_group['name'],
)
with self.assertNotRaises(CLIReturnCodeError):
UserGroupExternal.refresh(
{'user-group-id': self.user_group['id'], 'name': 'foobargroup'}
)
print(User.info({'login': user['login']}))
self.assertEqual(
User.info({'login': user['login']})['user-groups'][0]['usergroup'],
self.user_group['name'],
)
|
Contact us to know more about Torre Mayor, Mexico City.
Mexico sits in one of the world’s worst seismic zones, and when we started working on the structural design of the Torre Mayor in Mexico City, it was less than a decade after the 1985 earthquake. Measuring 8.1 on the Richter scale, it wreaked devastation throughout the region and caused the deaths of at least 10,000 people.
|
# Copyright 2010-2012 Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django import template
register = template.Library()
@register.inclusion_tag("color_value.html")
def color_value(collection):
""" {% color_value collection %}
render collection.value with the assigned color
depending on parameters, optionally with a link.
collection.value value to be rendered
collection.textcolor : The color to be used
if callable textcolor_fun(value, collection) return the text and color based on the value as (text, color)
collection.link: If present contain a URL to be linked to
"""
if not collection:
return {"valid":False}
if not isinstance(collection,dict):
return {"valid":True, "text":collection, "color":None, "debug":collection}
if not collection or "value" not in collection:
return {"valid":False}
value = collection["value"]
color = collection.get("textcolor", None)
if callable(color):
(value, color) = color(value, collection)
args = {
"valid":True,
"text":value,
"color":color,
"debug":(value, color, collection.get("color", None), collection.get("values",None))
}
if "link" in collection:
args["link"] = collection["link"];
return args
|
Primary atrophic rhinitis is a form of rhinitis found in elderly individuals due to reduced blood flow to the nasal membranes. Symptoms include nasal congestion and a constant foul odor in the nose caused by thick dry crusts. The condition is characterized by progressive atrophy (deterioration) of the lining of the nose from reduced blood flow to the nasal membranes. You may suffer from associated headaches and chronic sinusitis.
A secondary form of atrophic rhinitis results from chronic nasal infections, chronic sinusitis, nasal surgery, trauma and radiation.
Salt water irrigation and emollient ointments may help this condition.
|
# Author: Alexander Decurnou
# Team: iDev
from math import ceil
from moviepy.editor import VideoFileClip, AudioFileClip
from os.path import join
import subprocess
DEFAULT_AUDIOFILE_CODEC = 'libvorbis' # used to create webms
DEFAULT_AUDIOFILE_BITRATE = None
DEFAULT_ZEROES_PADDING = 5
DEFAULT_AUDIO_SEGMENT_DURATION_SEC = 180
def audio_extraction(path, audio_dest, audio_codec=DEFAULT_AUDIOFILE_CODEC,
audio_bitrate=DEFAULT_AUDIOFILE_BITRATE):
try:
print("Extracting audio...")
#video = VideoFileClip(vid_src)
#audio = video.audio
#audio.write_audiofile(audio_dest, codec=audio_codec,
# bitrate=audio_bitrate, verbose=False,
# progress_bar=False)
command = "ffmpeg -i {} -vn -acodec {} -y {}".format(path, audio_codec, audio_dest)
subprocess.call(command, shell=True)
print("Audio file extracted.")
except:
print("Unexpected error!")
raise
# Really hacky way of making audio-only files into audio-only webms. Yes,
# transcoding from lossy to lossy is bad, but since this will be used on mostly
# voice-only stuff, I'm not terribly worried about a loss of fidelity.
def audio_conversion(audio_src, audio_dest, audio_codec=DEFAULT_AUDIOFILE_CODEC,
audio_bitrate=DEFAULT_AUDIOFILE_BITRATE):
try:
print("Extracting audio...")
audio = AudioFileClip(audio_src)
audio.write_audiofile(audio_dest, codec=audio_codec,
bitrate=audio_bitrate, verbose=False,
progress_bar=False)
print("Audio file extracted.")
except:
print("Unexpected error!")
raise
def audio_segmentation(audio_src, audio_seg_dir,
seg_dur=DEFAULT_AUDIO_SEGMENT_DURATION_SEC,
pad_zeroes=DEFAULT_ZEROES_PADDING):
src_ext = ".webm"
audio = AudioFileClip(audio_src)
total_sec = audio.duration
start_sec = 0
print("Segmenting audio...")
while start_sec < total_sec:
end_sec = start_sec + seg_dur
if end_sec > total_sec:
end_sec = ceil(total_sec)
segment = audio.subclip(start_sec)
else:
segment = audio.subclip(start_sec, end_sec)
seg_name = "%s-%s%s" % (
str(start_sec).rjust(pad_zeroes, "0"),
str(end_sec).rjust(pad_zeroes, "0"), src_ext)
start_sec = end_sec
seg_full_path = join(audio_seg_dir, seg_name)
segment.write_audiofile(seg_full_path, codec=DEFAULT_AUDIOFILE_CODEC,
bitrate=DEFAULT_AUDIOFILE_BITRATE,
verbose=False, progress_bar=False)
print("Audio segmentation complete.")
|
Beautifully thrown ceramic stoneware tray.
Stone gray with hints of greenish blue and brown.
Cutouts on side for cigarettes, but also a functional tray for other purposes.
Good vintage condition with no major flaws or cracks.
Signed underneath but can't make out what it says.
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2013 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class procurement_order(osv.osv):
_inherit = "procurement.order"
def run(self, cr, uid, ids, autocommit=False, context=None):
context = dict(context or {}, procurement_autorun_defer=True)
res = super(procurement_order, self).run(cr, uid, ids, autocommit=autocommit, context=context)
procurement_ids = self.search(cr, uid, [('move_dest_id.procurement_id', 'in', ids), ('state', 'not in', ['exception', 'cancel'])], order='id', context=context)
if procurement_ids:
return self.run(cr, uid, procurement_ids, autocommit=autocommit, context=context)
return res
class stock_move(osv.osv):
_inherit = "stock.move"
def _create_procurements(self, cr, uid, moves, context=None):
res = super(stock_move, self)._create_procurements(cr, uid, moves, context=dict(context or {}, procurement_autorun_defer=True))
self.pool['procurement.order'].run(cr, uid, res, context=context)
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Information with any value will always be at risk. Even institutions with world-class security systems know that a breach is still possible, even likely. The best strategy? Make like the Boy Scouts and be prepared.
In the previous two blogs, I reviewed the importance of first understanding the threats and then assessing your institutional risk. The next step is to plan. Plan for the short term, as well as the long. Plan for how you’ll reduce risk … and how you’ll address the inevitable breach. And continually revise your plan to keep pace as threats, legal requirements, and technology evolve.
1. Do we know our security requirements?
In order to plan, budget for, and implement an effective security programme, you must first understand (1) what needs to be protected and (2) what it will take to do the job right.
the tools and capabilities required to safeguard your assets.
Even systems and data that are not under the direct control of your central IT team should be included in this audit. The institution is responsible for protecting all of its information assets, regardless of ownership, so your plan must be comprehensive.
2. Are we staying up to date on legal and regulatory requirements?
In addition to your own internal requirements, there are a range of legal and regulatory requirements governing data privacy and protection. Requirements may vary by state, country, or type of institution, but failure to comply can impact your funding and reputation.
Practical tip: Make sure your legal, technical, and administrative teams are working together to prepare for the EU’s new General Data Protection Regulations (GDPR)—which goes into effect in May 2018.
3. Do we have adequate policies and standards?
The SANS Institute—a non-profit organisation serving security professionals across multiple industries—offers templates for creating and implementing a range of information security policies.
All employees should be educated and expected to follow institutional policies and standards. Ensure other users are informed of their responsibilities as well. Some organisations have a formal certification process for new employees—or all employees at regular intervals—to ensure compliance.
4. Are we tightly managing identity and access?
Identity and access management is about giving the right people access to the right information at the right time. If you don’t keep a tight rein on who’s accessing what, you leave multiple entry points for hackers.
There are dozens of systems across campus that aren’t linked and require separate IDs and passwords. To make life easy, staff tend to use the same password for a low-security system (like a survey app) as they do for a high-security system (like payroll). A hacker then only needs to break into the survey app to gain entry into payroll.
There isn’t a clear process for changing or removing credentials when employees switch roles or leave the institution. Perhaps a disgruntled former employee can still access financial information or a staff member who has moved to a new department can still access information that’s no longer relevant to her job.
Ultimately, institutions need a unified, centralised system for managing identity and access—one that is well integrated into daily business processes.
5. Have we engaged senior management and the board?
Security is not just a technology challenge, it’s a business imperative. Given the large potential impact of a data breach, senior management and board members must be highly engaged in information security planning. IT alone cannot weigh risk vs. cost or ensure a culture of compliance at every level of the institution.
When faced with decisions such as whether to fund scholarships or network security, senior leaders need to understand exactly what’s at stake and accept responsibility for their decisions.
6. Are we providing adequate ongoing education?
As I discussed in the first blog in this series, lack of awareness and education about security threats is one of the biggest risks for most institutions.
You must have a well-documented and adequately resourced plan for ongoing information security training. This could include everything from mandatory courses on phishing and malware to regular e-blasts on the latest threats to annual certification programmes.
Educause offers a number of free resources institutions can use to educate faculty, staff, and students about cybersecurity.
7. Are we careful when choosing partners?
When retail giant Target experienced a massive data breach in 2013, it was not their own network that hackers broke into but rather the network of a heating and air conditioning sub-contractor that had worked at a number of Target stores.
No one remembers the name of that HVAC company, but they surely remember Target as a company that loses personal data. Target also paid a heavy financial price to rectify the situation and appease customers. The key takeaway? You are ultimately responsible for your students’ and employees’ personal data, even when—especially when—it’s being shared with third party vendors. So choose partners wisely.
Ask potential partners the same hard questions about the security of their information systems as you do about your own. Discuss auditing and compliance up front. Put processes in place to hold them accountable. If they can’t meet your standards, look for someone who can.
8. Are we using appropriate technology?
Technology can greatly enhance information security. The key is to modernise and simplify.
Sometimes moving forward means first looking backward. Take time to identify and retire legacy systems and business processes that are needlessly cumbersome. Complexity only makes it harder to monitor and control who is accessing what. Streamline the steps you use to grant system and data access, as well as those used to close the loop once an employee moves on. Retire out of date systems that no longer receive security patches.
As you build out your information security programme, pick the right tools for each job. For example, if you have a highly secure environment, using non-standard laptops or allowing contractors network access might not be wise. On the other hand, if you’re securing a simple web site with limited connection to other systems, don’t overcomplicate the security solution. Using resources wisely is key to winning the security battle on multiple fronts.
9. Do we aggressively follow up on incidents?
We live in a world where data breaches are ‘when’ not ‘if.’ That’s why responding appropriately to security incidents is as important as preventing them.
Gather data on incidents that will help you reduce recurring issues or prevent more damaging impact. Establish routines and best practices, so that you can mobilise quickly in the event of a breach. Review and analyse your trends. Data can also help you make the case for spending more money on things like firewalls or network intrusion detection.
10. Are we making continuous investments?
Information security is an ongoing practice, not a one-time implementation. You will never be fully protected, because there will always be new threats. But with careful planning—and a sustained investment of resources—you can effectively mitigate risk.
Make sure that your annual and long-term budget for information security reflects its level of importance to your business. Help decision makers understand the link between data protection—or lack thereof—and successful recruiting, advising, fundraising, and other key functions. Stay actively engaged with industry forums and workgroups to understand evolving threats and security best practices.
Planning for something to go wrong, in a world where what can go wrong is constantly changing, is, in a word, uncomfortable. But if you can get comfortable with discomfort—becoming agile, alert, responsive, and realistic—you can create the level of security that faculty, staff, and students need to thrive.
Read the complete infosec blog series.
Lee Congdon is responsible for Ellucian’s information technology, including enabling the business through technology services, information technology strategy, delivering next generation solutions, process improvement and advanced data and analytics.
Improve efficiencies, stay informed, and deliver a great constituent experience.
No one’s immune to a data breach. But just how vulnerable are you?
|
import uuid
import ddt
from openstackinabox.tests.base import TestBase, DbFailure
from openstackinabox.models.keystone import exceptions
from openstackinabox.models.keystone.db.roles import KeystoneDbRoles
@ddt.ddt
class TestKeystoneDbRoles(TestBase):
def setUp(self):
super(TestKeystoneDbRoles, self).setUp()
self.model = KeystoneDbRoles
self.master = 'Venus'
self.db = self.get_testing_database()
self.role_info = {
'name': 'role_{0}'.format(
str(uuid.uuid4())
)
}
def tearDown(self):
super(TestKeystoneDbRoles, self).tearDown()
def test_initialization(self):
instance = self.model(
self.master,
self.db
)
self.assertEqual(self.master, instance.master)
self.assertEqual(self.db, instance.database)
self.assertIsNone(instance.admin_role_id)
self.assertIsNone(instance.viewer_role_id)
instance.initialize()
self.assertIsNotNone(instance.admin_role_id)
self.assertIsNotNone(instance.viewer_role_id)
def test_add_failure(self):
instance = self.model(
self.master,
DbFailure(),
)
with self.assertRaises(exceptions.KeystoneRoleError):
instance.add('br34k1ng4llth1ng$')
def test_add_user_role_by_id_failure(self):
instance = self.model(
self.master,
DbFailure(),
)
with self.assertRaises(exceptions.KeystoneRoleError):
instance.add_user_role_by_id(
tenant_id=0,
user_id=0,
role_id=1
)
def test_add_and_get(self):
instance = self.model(
self.master,
self.db
)
instance.initialize()
with self.assertRaises(exceptions.KeystoneRoleError):
instance.get(
self.role_info['name']
)
role_id = instance.add(
self.role_info['name']
)
role_data = instance.get(
self.role_info['name']
)
self.assertEqual(
role_id,
role_data['id']
)
self.assertEqual(
self.role_info['name'],
role_data['name']
)
@ddt.data(
'tenant',
'user',
'role',
None
)
def test_add_user_role_by_id(self, invalid_value):
role_name = 'phearB0t'
tenant = {
'name': 'megaTokyo',
'description': 'US Manga'
}
user = {
'name': 'largo',
'email': '[email protected]',
'password': '3l1t30n3$rul3',
'apikey': 'p4$$w0rd$suck'
}
tenant_id = self.tenants.add(
tenant_name=tenant['name'],
description=tenant['description'],
enabled=True
)
user_id = self.users.add(
tenant_id=tenant_id,
username=user['name'],
email=user['email'],
password=user['password'],
apikey=user['apikey'],
enabled=True
)
role_id = self.roles.add(
role_name
)
if invalid_value is None:
self.roles.add_user_role_by_id(
tenant_id=tenant_id,
user_id=user_id,
role_id=role_id,
)
user_roles = self.roles.get_user_roles(
tenant_id=tenant_id,
user_id=user_id,
)
self.assertEqual(1, len(user_roles))
for user_role in user_roles:
self.assertEqual(role_id, user_role['id'])
self.assertEqual(role_name, user_role['name'])
else:
with self.assertRaises(exceptions.KeystoneRoleError):
self.roles.add_user_role_by_id(
tenant_id=tenant_id if invalid_value != 'tenant' else None,
user_id=user_id if invalid_value != 'user' else None,
role_id=role_id if invalid_value != 'role' else None
)
def test_add_user_role_by_name(self):
role_name = 'phearB0t'
tenant = {
'name': 'megaTokyo',
'description': 'US Manga'
}
user = {
'name': 'largo',
'email': '[email protected]',
'password': '3l1t30n3$rul3',
'apikey': 'p4$$w0rd$suck'
}
tenant_id = self.tenants.add(
tenant_name=tenant['name'],
description=tenant['description'],
enabled=True
)
user_id = self.users.add(
tenant_id=tenant_id,
username=user['name'],
email=user['email'],
password=user['password'],
apikey=user['apikey'],
enabled=True
)
role_id = self.roles.add(
role_name
)
self.roles.add_user_role_by_role_name(
tenant_id=tenant_id,
user_id=user_id,
role_name=role_name
)
user_roles = self.roles.get_user_roles(
tenant_id=tenant_id,
user_id=user_id,
)
self.assertEqual(1, len(user_roles))
for user_role in user_roles:
self.assertEqual(role_id, user_role['id'])
self.assertEqual(role_name, user_role['name'])
@ddt.data(
0,
1,
20
)
def test_get_user_roles(self, role_count):
tenant = {
'name': 'megaTokyo',
'description': 'US Manga'
}
user = {
'name': 'largo',
'email': '[email protected]',
'password': '3l1t30n3$rul3',
'apikey': 'p4$$w0rd$suck'
}
tenant_id = self.tenants.add(
tenant_name=tenant['name'],
description=tenant['description'],
enabled=True
)
user_id = self.users.add(
tenant_id=tenant_id,
username=user['name'],
email=user['email'],
password=user['password'],
apikey=user['apikey'],
enabled=True
)
role_names = [
'ph34rb0t_{0}'.format(x)
for x in range(role_count)
]
roles = [
{
'name': name,
'id': self.roles.add(name)
}
for name in role_names
]
for role in roles:
self.roles.add_user_role_by_id(
tenant_id=tenant_id,
user_id=user_id,
role_id=role['id']
)
user_roles = self.roles.get_user_roles(
tenant_id=tenant_id,
user_id=user_id,
)
self.assertEqual(role_count, len(user_roles))
def find_index(rolename):
for x in range(len(roles)):
if roles[x]['name'] == rolename:
return x
return None
for user_role in user_roles:
role_index = find_index(user_role['name'])
self.assertIsNotNone(role_index)
role_info = roles[role_index]
self.assertEqual(role_info['id'], user_role['id'])
self.assertEqual(role_info['name'], user_role['name'])
|
The people responsible for the crash of a Bulava intercontinental ballistic missile at the start of September will be held accountable and possibly fired, a Russian military official told reporters Friday.
The solution to the problem of the Sept. 6 launch failure of a Bulava submarine-launched ballistic missile (SLBM) will be "implemented very harshly," said Oleg Bochkarev, deputy chairman of the Military-Industrial Commission, adding that personnel would not be exempt from the consequences for the launch's failure.
"Of course they'll get to the bottom of it, don't doubt it for a second," he said.
Bochkarev said that the commission is still investigating what caused the missile to malfunction in the second minute of its flight during state trials of the Alexander Nevsky nuclear-powered submarine in the White Sea.
He told RIA Novosti earlier this week that all Bulava missiles from the same batch as the one that failed on Sept. 6 will undergo additional tests by their manufacturer.
Russia's Defense Minister Sergei Shoigu also ordered that trials of two nuclear submarines be halted as a result of the crash.
Including this latest failure, 8 out of 20 test launches of the troubled Bulava have been officially declared unsuccessful.
Despite repeated problems with the missile, the Russian military maintains that there is no alternative to the three-stage Bulava, which carries up to 10 MIRV warheads, has a range of over 8,000 kilometers (5,000 miles) and is designed for deployment on Borey-class nuclear submarines.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
u"""
===============================
Shimehari.app
~~~~~~~~~~~~~
Flask などなど参考に。
ルーティングが多少複雑になっても
対応できるような作りにしたいなぁ
===============================
"""
import os
import sys
from threading import Lock
from functools import update_wrapper
from datetime import timedelta
from werkzeug.exceptions import HTTPException, InternalServerError, BadRequest
from werkzeug.routing import RequestRedirect, Rule
from .helpers import _Kouzi, findPackage, getHandlerAction, getModulesFromPyFile, getEnviron, \
lockedCachedProperty, getTemplater
from .contexts import RequestContext, AppContext
from .routing import Router
from core.config import RESTFUL_ACTIONS
from .wrappers import Request, Response
from shimehari.configuration import Config, ConfigManager
from shimehari.session import SessionStore
from shimehari.shared import _requestContextStack, _SharedRequestClass, request
from shimehari.template import _defaultTemplateCtxProcessor
from shimehari.core.exceptions import ShimehariSetupError
from shimehari.core.signals import appContextTearingDown, requestContextTearingDown, requestStarted, requestFinished, gotRequestException
_loggerLock = Lock()
defaultHost = '127.0.0.1'
defaultPort = 5959
def setupMethod(f):
def wrapperFunc(self, *args, **kwargs):
if self.debug and self._gotFirstRequest:
raise AssertionError('Setup seems to have already completed ...')
return f(self, *args, **kwargs)
return update_wrapper(wrapperFunc, f)
class Shimehari(_Kouzi):
u"""Shimehari Object は WSGI アプリケーションとして必要な機能を実装しており、
アプリケーションの中心となるオブジェクトです。
メインモジュール、または __init__.py ファイルの中で以下のように書くことで
Shimehari インスタンスを生成することができます。
.. code-block:: python
from shimehari import Shimehari
app = Shimehari(__name__)
ただし、Shimehari では通常コマンドラインでアプリケーションを生成することを推奨しているので
あなたが直接インスタンス生成のコードを書くことはそうそうないと思われます。
:param importName: アプリケーションのパッケージ名
:param staticURL: サイト内共通、静的ファイルの URL
:param staticFolder: 静的ファイルが格納されているディレクトリ
:param appFolder: アプリケーション全体が格納されているディレクトリ
:param controllerFolder: コントローラーが格納されているディレクトリ
:param viewFolder: ビューが格納されているディレクトリ
:param assetsFolder: アセットファイルが格納されているディレクトリ
:param instancePath: アプリケーションのための代替インスタンスパス
"""
currentEnv = getEnviron()
debug = None
testing = None
requestClass = Request
responseClass = Response
testClientCls = None
teardownAppContextFuncs = []
defaultConfig = {
'DEBUG': False,
'TEST': False,
'APP_DIRECTORY': 'app',
'CONTROLLER_DIRECTORY': 'controllers',
'VIEW_DIRECTORY': 'views',
#for daiginjou
'MODEL_DIRECTORY': 'models',
'PREFERRED_URL_SCHEME': 'http',
'AUTO_SETUP': True,
'TEMPLATE_ENGINE': 'jinja2',
'TRAP_HTTP_EXCEPTIONS': False,
'SERVER_NAME': None,
'PERMANENT_SESSION_LIFETIME': timedelta(days=31),
'SECRET_KEY': '_secret_shimehari'
}
templateOptions = {}
sessionStore = SessionStore()
sharedRequestClass = _SharedRequestClass
allowedMethods = set(['GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'OPTIONS', 'PATCH'])
bodylessMethods = frozenset(['GET', 'HEAD', 'OPTIONS', 'DELETE'])
def __init__(self, importName,
staticURL=None, staticFolder='static',
appFolder='app', controllerFolder='controllers',
viewFolder='views', assetsFolder='assets',
instancePath=None, isRelativeConfig=False, templateOptions={}):
_Kouzi.__init__(self, importName, appFolder=appFolder,
controllerFolder=controllerFolder, viewFolder=viewFolder)
if instancePath is None:
self._instancePath = self.getInstancePath()
self._logger = None
self.loggerName = self.importName
self.config = self.getConfig()
self.controllers = {}
self.urlValuePreprocesors = {}
self.beforeRequestFuncs = {}
self.beforeFirstRequestFuncs = []
self.urlDefaultFuncs = {}
self.afterRequestFuncs = {}
self._errorHandlers = {}
self.errorHandlerSpec = {None: self._errorHandlers}
self.buildErrorHandlers = None
self.teardownRequestContextFuncs = {}
self.templateContextProcessors = {
None: [_defaultTemplateCtxProcessor]
}
#CSRF
from shimehari.crypt import CSRF
self.csrf = CSRF(self)
self._router = Router()
self._gotFirstRequest = False
self._beforeRequestLock = Lock()
self.debug = self.config['DEBUG']
self.test = self.config['TEST']
self.sessionKey = self.config['SESSION_COOKIE_NAME']
self.useXSendFile = self.config['USE_X_SENDFILE']
self.templateOptions = templateOptions
if self.config['AUTO_SETUP']:
self.setup()
@lockedCachedProperty
def name(self):
if self.importName == '__main__':
fn = getattr(sys.modules['__main__'], '__file__', None)
if fn is None:
return '__main__'
return os.path.splitext(os.path.basename(fn))[0]
return self.importName
@property
def gotFirstRequest(self):
return self._gotFirstRequest
@property
def propagateExceptions(self):
return self.testing or self.debug
@property
def preserveContextOnException(self):
rv = self.config['PRESERVE_CONTEXT_ON_EXCEPTION']
if rv is not None:
return rv
return self.debug
@property
def logger(self):
if self._logger and self._logger.name == self.loggerName:
return self._logger
with _loggerLock:
if self._logger and self._logger.name == self.loggerName:
return self._logger
from shimehari.logging import createLogger
self._logger = rv = createLogger(self.loggerName)
return rv
def router():
u"""アプリケーションのルーティングを管理するルーターを設定します。"""
def fget(self):
return self._router
def fset(self, value):
self.setControllerFromRouter(value)
self._router = value
def fdel(self):
self.controllers = {}
del self._router
return locals()
router = property(**router())
def getInstancePath(self):
u"""インスタンスパスを返します。"""
prefix, pkgPath = findPackage(self.importName)
if prefix is None:
return os.path.join(pkgPath, 'instance')
return os.path.join(prefix, 'var', self.name + '-instance')
def getConfig(self):
u"""現在アプリケーションに適用されているコンフィグを返します。"""
configs = ConfigManager.getConfigs()
try:
# from .config import config
configs = ConfigManager.getConfigs()
except ImportError:
pass
if not configs:
cfg = Config(self.currentEnv, self.defaultConfig)
ConfigManager.addConfig(cfg)
return cfg
else:
return configs[self.currentEnv]
def saveSession(self, session, response):
u"""セッションを保存します。
:param session: 保存したいセッション
:param response: レスポンス
"""
if session.should_save:
self.sessionStore.save(session, response)
response.set_cookie(self.sessionKey, session.sid)
return response
def openSession(self, request):
sid = request.cookies.get(self.sessionKey, None) or request.values.get(self.sessionKey, None)
if sid is None:
return self.sessionStore.new()
else:
return self.sessionStore.get(sid)
def setControllerFromRouter(self, router):
u"""設定されたルーターからコントローラーをバインディングします。
:param router: ルーター
"""
if not self.controllers:
self.controllers = {}
for rule in router._rules:
self.controllers[rule.endpoint] = rule.endpoint
def addController(self, controller):
u"""アプリケーションにコントローラーを追加します。
:param controller: 追加したいコントローラー。
追加されたコントローラーはアプリケーションの管理下に置かれ、
ルーティングが自動生成されます。
"""
for action in RESTFUL_ACTIONS:
handler = getHandlerAction(controller, action)
if handler is not None:
self.controllers[handler] = handler
def addRoute(self, url, func, methods=None, **options):
rule = Rule(url, endpoint=func.__name__, methods=methods)
self.controllers[func.__name__] = func
self.router.add(rule)
def logException(self, excInfo):
self.logger.error('excepts on %s [%s]' % (request.path, request.method), exc_info=excInfo)
def injectURLDefaults(self, endpoint, values):
funcs = self.urlDefaultFuncs.get(None, ())
for func in funcs:
func(endpoint, values)
@lockedCachedProperty
def templateLoader(self):
return self.templater.templateLoader
@lockedCachedProperty
def templateEnv(self):
rv = self.templater.templateEnv()
return rv
def createTemplateEnvironment(self):
rv = self.templater.createTemplateEnvironment()
return rv
def createGlobalTemplateLoader(self):
return self.templater.dispatchLoader(self)
def updateTemplateContext(self, context):
self.templater.updateTemplateContext(context)
def setup(self):
u"""アプリケーションをセットアップします。
指定された app ディレクトリ配下にあるコントローラー、ルーターを探し出しバインドします。
"""
self.appPath = os.path.join(self.rootPath, self.appFolder)
if not os.path.isdir(self.appPath):
raise ShimehariSetupError('Application directory is not found\n%s' % self.rootPath)
sys.exit(0)
try:
__import__(self.appFolder)
self.setupTemplater()
self.setupBindController()
self.setupBindRouter()
except (ImportError, AttributeError):
raise ShimehariSetupError('Application directory is invalid')
def setupTemplater(self):
try:
self.templater = getTemplater(self, self.config['TEMPLATE_ENGINE'], templateOptions=self.templateOptions)
except Exception, e:
raise ShimehariSetupError('setup template engine was failed... \n%s' % e)
def setupBindController(self):
u"""コントローラーをバインドします"""
self.controllerPath = os.path.join(self.appPath, self.controllerFolder)
if not os.path.isdir(self.controllerPath):
raise ShimehariSetupError('Controller in the specified directory does not exist. %s' % self.controllerPath)
try:
ctrlDir = self.appFolder + '.' + self.controllerFolder
__import__(ctrlDir)
getModulesFromPyFile(self.controllerPath, self.rootPath)
except (ImportError, AttributeError), error:
raise ShimehariSetupError('setup controller was failed... \n%s' % error)
def setupBindRouter(self):
u"""ルーターをバインドします。"""
try:
routerFile = self.appFolder + '.' + 'router'
routerMod = __import__(routerFile, fromlist=['router'])
if hasattr(routerMod, 'appRoutes'):
self.router = routerMod.appRoutes
if self.hasStaticFolder:
for url in self.getStaticURLs():
self.addRoute(url + '/<path:filename>', self.sendStaticFile)
except (ImportError, AttributeError), e:
raise ShimehariSetupError('Failed to setup the router ...\n details::\n%s' % e)
@setupMethod
def beforeRequest(self, f):
u"""リクエストを処理する前に実行したいメソッドを登録します。
:param f: リクエスト処理前に実行させたい処理
"""
self.beforeRequestFuncs.setdefault(None, []).append(f)
return f
@setupMethod
def beforeFirstRequest(self, f):
u"""アプリケーションに対し初めてリクエストがあったときのみ、
リクエストを処理する前に実行したいメソッドを登録します。
:param f: 初めてのリクエスト処理前に実行したい処理
"""
self.beforeFirstRequestFuncs.append(f)
@setupMethod
def afterRequest(self, f):
u"""リクエスト処理が終わった後に実行したいメソッドを登録します。
:param f: リクエスト処理後に実行したい処理
"""
self.afterRequestFuncs.setdefault(None, []).append(f)
return f
@setupMethod
def urlValuePreprocessor(self, f):
u"""リクエストの前に実行したい処理を登録します。
:param f: 実行したい処理
"""
self.urlValuePreprocesors.setdefault(None, []).append(f)
return f
@setupMethod
def tearDownAppContext(self, f):
self.teardownAppContextFuncs.append(f)
return f
@setupMethod
def errorHandler(self, codeOrException):
def decorator(f):
self._registerErrorHandler(None, codeOrException, f)
return f
return decorator
@setupMethod
def _registerErrorHandler(self, key, codeOrException, f):
if isinstance(codeOrException, HTTPException):
codeOrException = codeOrException.code
if isinstance(codeOrException, (int, long)):
assert codeOrException != 500 or key is None
self.errorHandlerSpec.setdefault(key, {})[codeOrException] = f
else:
self.errorHandlerSpec.setdefault(key, {}).setdefault(None, []).append((codeOrException, f))
def tryTriggerBeforeFirstRequest(self):
u"""アプリケーションに対し最初のリクエストがあった時のみに行う処理を
実際に実行します。
"""
if self._gotFirstRequest:
return
with self._beforeRequestLock:
if self._gotFirstRequest:
return
self._gotFirstRequest = True
[f() for f in self.beforeFirstRequestFuncs]
def createAdapter(self, request):
u"""url adapter を生成します
:param request: 元となるリクエストオブジェクト
"""
if request is not None:
if request.environ['REQUEST_METHOD'] == 'POST':
method = request.form.get('_method', '').upper() or request.environ.get('HTTP_X_HTTP_METHOD_OVERRIDE', '').upper()
if method in self.allowedMethods:
method = method.encode('ascii', 'replace')
request.environ['REQUEST_METHOD'] = method
if method in self.bodylessMethods:
request.environ['CONTENT_LENGTH'] = 0
return self.router.bind_to_environ(request.environ)
#なんのこっちゃ
if self.config['SERVER_NAME'] is not None:
return self.router.bind(
self.config['SERVER_NAME'],
script_name=self.config['APP_ROOT'] or '/',
url_scheme=self.config['PREFERRED_URL_SCHEME'])
def appContext(self):
u"""アプリケーションコンテキストを返します。"""
return AppContext(self)
def requestContext(self, environ):
u"""リクエストコンテキストを返します。
:param environ: リクエスト環境変数
"""
return RequestContext(self, environ)
def doAppContextTearDonw(self, exc=None):
if exc is None:
exc = sys.exc_info()[1]
for func in reversed(self.teardownAppContextFuncs):
func(exc)
appContextTearingDown.send(self, exc)
def doRequestContextTearDown(self, exc=None):
if exc is None:
exc = sys.exc_info()[1]
for func in reversed(self.teardownRequestContextFuncs.get(None, ())):
func(exc)
requestContextTearingDown.send(self, exc=exc)
def preprocessRequest(self):
u"""リクエスト前に処理したい登録済みのメソッドを実行します。
:param rv: 登録した処理
"""
for func in self.urlValuePreprocesors.get(None, ()):
func(request.endpoint, request.viewArgs)
self.csrf.checkCSRFExempt()
self.csrf.csrfProtect()
for func in self.beforeRequestFuncs.get(None, ()):
rv = func()
if rv is not None:
return rv
def processResponse(self, response):
u"""レスポンスを返す前に処理したい登録済みのメソッドをを実行します。
:param response: レスポンス
"""
context = _requestContextStack.top
funcs = ()
if None in self.afterRequestFuncs:
funcs = self.afterRequestFuncs[None]
for handler in funcs:
response = handler(response)
if not self.sessionStore.isNullSession(context.session):
self.saveSession(context.session, response)
return response
def dispatchRequest(self):
u"""リクエストをもとに、レスポンスを発行します。
:param request: リクエスト
"""
self.tryTriggerBeforeFirstRequest()
try:
requestStarted.send(self)
rv = self.preprocessRequest()
if rv is None:
req = _requestContextStack.top.request
if req.routingException is not None:
self.raiseRoutingException(req)
rule = req.urlRule
rv = self.controllers[rule.endpoint](**req.viewArgs)
except Exception, e:
rv = self.makeResponse(self.handleUserException(e))
response = self.makeResponse(rv)
response = self.processResponse(response)
requestFinished.send(self, response=response)
return response
def makeResponse(self, rv):
u"""レスポンスを生成して返します。
:param rv: リクエスト
"""
status = headers = None
if isinstance(rv, tuple):
rv, status, headers = rv + (None,) * (3 - len(rv))
if rv is None:
raise ValueError('view function does not return a response.')
if not isinstance(rv, self.responseClass):
if isinstance(rv, basestring):
rv = self.responseClass(rv, headers=headers, status=status)
headers = status = None
else:
rv = self.responseClass.force_type(rv, request.environ)
if status is not None:
if isinstance(status, basestring):
rv.status = status
else:
rv.status_code = status
if headers:
rv.headers.extend(headers)
return rv
def handleException(self, e):
u"""エラーをハンドリングします。
:param e: エラー内容
"""
excType, excValue, excTb = sys.exc_info()
gotRequestException.send(self, exception=e)
handler = self.errorHandlerSpec[None].get(500)
if self.propagateExceptions:
if excValue is e:
raise excType, excValue, excTb
else:
raise e
self.logException((excType, excValue, excTb))
if handler is None:
return InternalServerError()
return handler(e)
def handleHTTPException(self, e):
handler = self.errorHandlerSpec[None].get(e.code)
if handler is None:
return e
return handler(e)
def trapHTTPException(self, e):
if self.config['TRAP_HTTP_EXCEPTIONS']:
return True
if self.config['TRAP_BAD_REQUEST_ERRORS']:
return isinstance(e, BadRequest)
return False
def handleUserException(self, e):
excType, excValue, tb = sys.exc_info()
assert excValue is e
if isinstance(e, HTTPException) and not self.trapHTTPException(e):
return self.handleHTTPException(e)
appHandlers = self.errorHandlerSpec[None].get(None, ())
for typecheck, handler in appHandlers:
if isinstance(e, typecheck):
return handler(e)
raise excType, excValue, tb
def raiseRoutingException(self, request):
if not self.debug or not isinstance(request.routingException, RequestRedirect) \
or request.method in ('GET', 'HEAD', 'OPTIONS'):
raise request.routingException
def testRequestContext(self, *args, **kwargs):
from shimehari.testing import makeTestEnvironBuilder
builder = makeTestEnvironBuilder(self, *args, **kwargs)
try:
return self.requestContext(builder.get_environ())
finally:
builder.close()
def handleBuildError(self, error, endpoint, **kwargs):
if self.buildErrorHandlers is None:
excType, excValue, tb = sys.exc_info()
if excValue is error:
raise excType, excValue, tb
else:
raise error
return self.buildErrorHandlers(error, endpoint, **kwargs)
def wsgiApp(self, environ, startResponse):
u"""WSGI アプリとして実行します。(であってるのか)
:param environ: 環境変数
:param startResponse: hoge
"""
with self.requestContext(environ):
try:
response = self.dispatchRequest()
except Exception, e:
response = self.makeResponse(self.handleException(e))
return response(environ, startResponse)
def drink(self, host=None, port=None, debug=None, **options):
u"""アプリを実行します。
:param host: ホスト名
:param port: ポート番号
:param debug: デバッグモードとして起動するかどうか
:param options: kwargs
"""
host = host or defaultHost
port = port or defaultPort
from werkzeug.serving import run_simple
if debug is not None:
self.debug = bool(debug)
options.setdefault('use_reloader', self.debug)
options.setdefault('use_debugger', self.debug)
try:
from werkzeug._internal import _log
_log('info', ' * Shimehari GKGK!')
run_simple(host, port, self, **options)
finally:
self._gotFirstRequest = False
def run(self, host=None, port=None, debug=None, **options):
u"""アプリを実行します。
単純に drink メソッドをラップしているだけです。
WSGI 周りのライブラリや既存のコードで run を自動的に呼ぶ物がおおいので念のため。
"""
self.drink(host, port, debug, **options)
def testClient(self, useCookies=True):
cls = self.testClientCls
if cls is None:
from shimehari.testing import ShimehariClient as cls
return cls(self, self.responseClass, use_cookies=useCookies)
def __call__(self, environ, startResponse):
return self.wsgiApp(environ, startResponse)
def __str__(self):
return 'Shimehari WSGI Application Framework!'
|
Hundreds of thousands of small business owners could be due a hefty tax rebate from HMRC, according to capital allowances tax specialists, CA Tax Solutions.
It says that any company that owns a commercial building has a high probability of receiving a capital allowances tax windfall to the tune of thousands or even tens of thousands of pounds.
Businesses of all types could be eligible — from fish and chip shop owners, dry cleaners and dentists to grocery stores and estate agents. Research from accountancy firm Deloitte confirms that in nine cases out of 10, capital allowances reports will uncover a tax rebate for the owner of a commercial property.
With about 1.4 million commercial properties in the UK, the numbers are potentially huge. Claims can be made historically and CA Tax Solutions estimates that there is £65bn–£70bn of net tax rebate lying unclaimed in UK commercial property.
To date, the average rebate CA Tax Solutions has generated for smaller UK commercial property owners is £25,000 net and the biggest tax rebate more than £10m net.
It says smaller businesses that own commercial property are most likely to be due a rebate as their accountants will often not understand the intricacies of capital allowances.
Any small business owner with commercial property who feels that they may be due a rebate should contact a contact allowances specialist.
|
from mtgsdk import Card
import json, discord
prop = {'name','multiverse_id','layout','names','mana_cost','cmc','colors','type','supertypes','subtypes','rarity','text','flavor','artist','number','power','toughness','loyalty','variations','watermark','border','timeshifted','hand','life','reserved','release_date','starter','rulings','foreign_names','printings','original_text','original_type','legalities','source','image_url','set','set_name','id'}
run = 'card_adv = Card'
def adv(str_input):
return '='.join(str_input.split(' ')).lower()
def reduce(str_input):
return '-'.join(str_input.split(' ')).lower()
def http_image(uid):
return 'https://image.deckbrew.com/mtg/multiverseid/'+ str(uid) +'.jpg'
def http_address(set,name):
return 'http://store.tcgplayer.com/magic/'+reduce(set)+'/'+reduce(name)
def http_parse(str_input):
return '%20'.join(str_input.split(' '))
client = discord.Client()
@client.event
async def on_message(message):
# we do not want the bot to reply to itself
if message.author == client.user:
return
if message.content.startswith('!magic'):
msg_com = message.content.split('-')
msg_com.pop(0)
for msg in msg_com:
if '-help' in msg.lower():
print('help')
await client.send_message(message.channel,'Magic Card Bot \n --help : This message displaying \n -s_reg : Followed by a string will search that string \n -m_uid : Searchs cards by multivesrse id \n -s_adv : Not currently finished')
elif 'm_uid' in msg.lower():
print(msg[6:])
card_m = Card.find(msg[6:])
print(http_address(card_m.set_name,card_m.name))
await client.send_message(message.channel,http_address(card_m.set_name,card_m.name))
print(http_image(card_m.multiverse_id))
await client.send_message(message.channel,http_image(card_m.multiverse_id))
elif 's_reg' in msg.lower():
print(http_parse(msg[6:]))
card_s = Card.where(name=msg[6:]).all()
for s_card in card_s:
print(http_address(s_card.set_name,s_card.name))
await client.send_message(message.channel,http_address(s_card.set_name,s_card.name))
print(http_image(s_card.multiverse_id))
await client.send_message(message.channel,http_image(s_card.multiverse_id))
elif 's_adv' in msg.lower():
await client.send_message(message.channel,'This command is disabled')
else:
print('RIP something went wrong')
await client.send_message(message.channel, 'RIP something went wrong')
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
client.run('Bot Token')
|
Twenty-three years. That's how long Chrysa Robertson's Rancho Pinot restaurant has been open, making it one of Arizona's longest-running restaurants. Robertson was raised in Arizona and trained under Carol Steele — in fact, she was one of C. Steele & Co.'s first and longest employees — but then left the state to work with top chefs such as Nancy Silverton and Mark Peel at Campanile and Hiro Sone at Terra in Napa Valley.
She came back to the Valley in 1993 to open Rancho Pinot Grill at its original location in Town & Country in Phoenix. It's been years since she moved to the Paradise Valley/Scottsdale border and dropped the "Grill" portion of the name. But the restaurant still remains one of the city's top dining destinations, serving Arizona-inspired food that's as memorable as Robertson herself.
My go-to place for lunch in Phoenix is Noble Eatery. The "Bowl of Love," as I know it. Not sure what they officially call it, but it's that grain, seed, and vegetable situation. The sandwiches are wonderful, too.
My restaurant pet peeve is when servers interrupt you to ask how everything is tasting.
Food should always be delicious and recognizable. I have a hard time with tricky, deconstructed, exploded, foamed, dabbed, doodled, or dusted food that's been manipulated into oblivion. I prefer beautiful, naturally plated food that tastes like what it "is."
My personal mantra in the kitchen is inspect what you expect.
The most memorable meal I've had recently was at Bianco's. Available in late spring, the artichokes from Claudio, the Artichoke Man. Braised and finished in the wood oven with smoked mozzarella fonduta, that special Italian mint, and bread crumbs. A lovely dish after my own heart! And for dessert, Marco's bread dipped in the last of my red wine.
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""nk_exectuable will store a single executable experiment, designed in SpineML
SpineML is a declaratice Spiking neuron modelling language.
"""
#import os # STD lib imports first
#import sys # alphabetical
#import some_third_party_lib # 3rd party stuff next
#import some_third_party_other_lib # alphabetical
import argparse
import numpy as np
import pdb
import networkx as nx
import h5py
import libSpineML
from libSpineML import smlBundle
from libSpineML import smlExperiment
from neurokernel.core_gpu import Manager
from libSpineML2NK.LPU import LPU
import nk_utils
import nk_manager
#import local_stuff # local stuff last
#import more_local_stuff
#import dont_import_two, modules_in_one_line # IMPORTANT!
#from pyflakes_cannot_handle import * # and there are other reasons it should be avoided # noqa
# Using # noqa in the line above avoids flake8 warnings about line length!
class Executable(object):
"""Executable Neurokernel Object
Can take a libSpineML bundle, or a SpineML Experiment file
"""
def __init__(self, experiment=None):
self.params = {}
if type(experiment) is str:
self.bundle = smlBundle.Bundle()
self.bundle.add_experiment(experiment,True)
exp = self.bundle.experiments[0].Experiment[0]
self.params['name'] = exp.get_name()
elif type(experiment) is smlBundle.Bundle:
self.bundle = experiment
exp = self.bundle.experiments[0].Experiment[0]
self.params['name'] = exp.get_name()
else:
self.bundle = smlBundle.Bundle()
self.params['name'] = 'No_name'
self.network = nx.MultiDiGraph()
#self.network = nx.DiGraph()
self.inputs = np.zeros((0, 0), dtype=np.double)
self.time = np.zeros((0, 0), dtype=np.double)
self.debug = False
self.log = False
def execute(self):
"""Execute the model, after processing the class
This method will create the Input file and Network file dynamically
As a minimum, this fucntion will need to create a params dictionary as required by nk_manager
with the following keys:
params['name'],
params['dt'],
params['n_dict'],
params['s_dict'],
input_file=params['input_file'],
output_file=params['output_file'],
components=params['components'])
params['steps']
"""
self.params['input_file'] = self.params['name'] + '_input.h5'
self.params['output_file'] = self.params['name'] + '_output.h5'
self.process_experiment()
self.process_network()
self.process_component()
## output the input
self.save_input()
self.save_network()
#nk_manager.launch_nk(self.params)
from nk_manager import launch_nk
launch_nk(self.params,self.debug,self.log)
def set_debug(self, debug=True):
self.debug = debug
def set_log(self,log=True):
self.log = log
def process_experiment(self,bundleIndex=0,expIndex=0):
"""Process to the experiment file to extract NK relevant objects
Each bundle can store many experiments, bundleIndex dictates the
SpineML experient to use. Similary each SpineML Experiment can
contain several experiment types, and so provisions are made for
acommodating multiple experiments.
"""
# Extract input and output files
exp = self.bundle.experiments[bundleIndex].Experiment[expIndex]
self.params['name'] = exp.get_name()
# save everything in standard units before saving
self.params['dt'] = nk_utils.units(float(exp.Simulation.AbstractIntegrationMethod.dt),'mS')
self.params['steps'] = nk_utils.units(float(exp.Simulation.duration),'S') / self.params['dt']
self.params['num_neurons'] = 0;
for n in self.bundle.networks[0].Population:
self.params['num_neurons']+= n.Neuron.size
######################################################################
# Correct dt and time to be in standard
#####################################################################
self.inputs = np.zeros((self.params['steps'], self.params['num_neurons']), dtype=np.double)
self.time = time = np.arange(0,self.params['dt']*self.params['steps'] , self.params['dt'])
# Provess Lesions
# Process Configutations
def process_network(self):
"""Process to the experiment file to extract NK relevant objects
"""
# extract input file
# extract network file
# create n_dict
# create s_dict
exp_name = self.bundle.index.keys()[0]
model_name = self.bundle.index[exp_name]['network'].keys()[0]
populations = self.bundle.index[exp_name]['network'][model_name].Population
lpu_index = 0
for p in populations:
lpu_start = lpu_index; # Start position for each neuron
for n in np.arange(0,p.Neuron.size):
self.add_neuron(p.Neuron.url,p.Neuron.Property,lpu_index,n,p.Neuron.name,exp_name)
lpu_index +=1
for i in self.bundle.index[exp_name]['experiment'][exp_name].Experiment[0].AbstractInput:
if p.Neuron.name == i.target:
self.initialise_input(i,lpu_start,p.Neuron.size)
self.params['graph'] = self.network
(n_dict, s_dict) = LPU.graph_to_dicts(self.params['graph'])
self.params['n_dict'] = n_dict
self.params['s_dict'] = s_dict
def initialise_input(self,params,lpu_start,lpu_size):
# initialise an input in the matrix for a given input to a population
itype = type(params)
if (itype == smlExperiment.TimeVaryingArrayInputType):
self.inputs = nk_utils.TimeVaryingArrayInput(params,lpu_start,lpu_size,self.time,self.inputs)
elif (itype == smlExperiment.ConstantInputType):
self.inputs = nk_utils.ConstantInput(params,lpu_start,lpu_size,self.time,self.inputs)
elif (itype == smlExperiment.ConstantArrayInputType):
self.inputs = nk_utils.ConstantArrayInput(params,lpu_start,lpu_size,self.time,self.inputs)
elif (itype == smlExperiment.TimeVaryingInputType):
self.inputs = nk_utils.TimeVaryingInput(params,lpu_start,lpu_size,self.time,self.inputs)
else:
raise TypeError('type %s is not recognised as an input type' %str(itype))
def standard_neurons(self,model):
""" provide the base neuron parameters from neurokernel, which are not in SpineML """
""" DEPRECIATED TO ALLOW C_GENERATION
URL is used to load the correct kernel
WIP: Automatic discovery of extern, spiking and public based on component and connections
external is true, to work with input generation, this will not scale well
"""
return {'model': 'SpineMLNeuron','name': 'neuron_x','extern': True,'public': False,'spiking': True,'selector': '/a[0]','V':0,"url":model}
def add_neuron(self,model,props,lpu_index,p_index,pop_name,exp_name):
""" add a neuron to the gexf population,
where p_index is the neuron index within a population
"""
neuron = self.standard_neurons(model)
for p in props:
""" p example: 'C': {'dimension': 'nS','input':{'type':'FixedValue','value':1}} """
neuron[p.name] = nk_utils.gen_value(p,p_index)
neuron['name'] = 'neuron_' +str(lpu_index) # + '_' + str(p_index)
neuron['selector'] = '/'+pop_name+'[' +str(lpu_index) +']' #+ '[' + str(p_index)+']'
# Determine if the neuron will be spiking or gpot
# requires that only one output port exists
comp = self.bundle.index[exp_name]['component'][model]
for port in comp.ComponentClass.Port:
if type(port) is libSpineML.smlComponent.AnalogSendPortType:
neuron['spiking'] = False
break
if type(port) is libSpineML.smlComponent.ImpulseSendPortType:
neuron['spiking'] = True
break
##################################
#
# Swap V out with default parameter from output port
#
##################################
self.network.add_node(str(lpu_index),attr_dict=neuron)
def process_component(self):
"""Process to the experiment file to extract NK relevant objects
"""
exp_name = self.bundle.index.keys()[0]
self.params['components'] = self.bundle.index[exp_name]['component']
###############################################
# Added for output testing
##############################################
def save_input(self):
""" save the input file before running """
with h5py.File(self.params['input_file'], 'w') as f:
f.create_dataset('array', (self.params['steps'], self.params['num_neurons']),
dtype=np.double,
data=self.inputs)
def save_network(self):
""" save the network file before running """
nx.write_gexf(self.network, self.params['input_file'] +'.gexf.gz')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--experiment', default='none', type=str,
help='Path to a SpineML experiment file')
args = parser.parse_args()
if args.experiment is not 'none':
exe = Executable(args.experiment)
exe.execute()
else:
print "No Experiment Provided"
if __name__=='__main__':
main()
|
Published 04/25/2019 02:48:56 pm at 04/25/2019 02:48:56 pm in Cree Pole Light.
cree pole light 120w array chip on board led lamp module optical glass lens for cree cxb 3050 cree led pole light.
best flag pole lighting youtube,cree pole lighting,best flag pole lights solar,cree flag pole light,cree flag pole lights,best pole lights for a cape cod home,best pole lights sodium and mercury vapor,cree led pole light,cree pole light,best pole lights,cree parking lot pole lights.
|
import tensorflow as tf
import os, re
def get_checkpoints(checkpoint_dir):
'''
Finds all checkpoints in a directory and returns them in order
from least iterations to most iterations
'''
meta_list=[]
for file in os.listdir(checkpoint_dir):
if file.endswith('.meta'):
meta_list.append(os.path.join(checkpoint_dir, file[:-5]))
meta_list = sort_nicely(meta_list)
return meta_list
def sort_nicely(l):
"""
Sort the given list in the way that humans expect.
From Ned Batchelder
https://nedbatchelder.com/blog/200712/human_sorting.html
"""
def alphanum_key(s):
""" Turn a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"]
"""
def tryint(s):
try:
return int(s)
except:
return s
return [ tryint(c) for c in re.split('([0-9]+)', s) ]
l.sort(key=alphanum_key)
return l
def save(saver, sess, logdir, step):
'''Save weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
logdir: path to the snapshots directory.
step: current training step.
'''
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print('The checkpoint has been created.')
def load(saver, sess, ckpt_path):
'''Load trained weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
ckpt_path: path to checkpoint file with parameters.
'''
saver.restore(sess, ckpt_path)
print("Restored model parameters from {}".format(ckpt_path))
def optimistic_restore(session, save_file, variable_scope=''):
'''
A Caffe-style restore that loads in variables
if they exist in both the checkpoint file and the current graph.
Call this after running the global init op.
By DanielGordon10 on December 27, 2016
https://github.com/tensorflow/tensorflow/issues/312
With RalphMao tweak.
bpugh, July 21, 2017: Added a variable_scope so that a network can be
loaded within a tf.variable_scope() and still have weights restored.
'''
reader = tf.train.NewCheckpointReader(save_file)
saved_shapes = reader.get_variable_to_shape_map()
if variable_scope is '':
saved_shapes_scoped = saved_shapes
offset = 0
else:
saved_shapes_scoped = [variable_scope + '/' + x for x in saved_shapes]
offset = len(variable_scope) + 1
var_names = []
for var in tf.global_variables():
search_term = var.name.split(':')[0]
if search_term in saved_shapes_scoped:
var_names.append((var.name.split(':')[0], var.name.split(':')[0][offset:]))
name2var = dict(zip(map(lambda x:x.name.split(':')[0],
tf.global_variables()), tf.global_variables()))
restore_variables = []
with tf.variable_scope('', reuse=True):
for var_name, saved_var_name in var_names:
try:
curr_var = name2var[var_name]
var_shape = curr_var.get_shape().as_list()
if var_shape == saved_shapes[saved_var_name]:
found_variable = tf.get_variable(var_name)
restore_variables.append(found_variable.assign(reader.get_tensor(saved_var_name)))
except:
print("{} couldn't be loaded.".format(saved_var_name))
session.run(restore_variables)
|
Published 04/19/2019 02:18:54 am at 04/19/2019 02:18:54 am in The Best Way To Paint A Room.
the best way to paint a room the best way to paint a safety screen door best way to paint a room white curtains.
best way to paint a room with slanted ceilings ikea,best way to paint a room white,best way to paint a room white design,best way to paint a room uk top,best way to paint a room white noise,best way to paint a room with slanted ceilings,best way to paint a room with trim,best way to paint a room uk,best way to paint a room white and green,best way to paint a room by yourself,best way to paint a room with slanted ceilings kitchen.
|
from sqlalchemy import Column, Integer, String, Float
from sqlalchemy.orm import relationship
from htsohm.db import Base, GasLoading, SurfaceArea, VoidFraction
class Material(Base):
"""Declarative class mapping to table storing material/simulation data.
Attributes:
id (int): database table primary_key.
run_id (str): identification string for run.
"""
__tablename__ = 'materials'
id = Column(Integer, primary_key=True)
run_id = Column(String(50))
seed = Column(Float)
# structure properties
unit_cell_volume = Column(Float)
number_density = Column(Float)
average_epsilon = Column(Float)
average_sigma = Column(Float)
# structure property bins
density_bin = Column(Integer)
epsilon_bin = Column(Integer)
sigma_bin = Column(Integer)
# relationships
gas_loading = relationship("GasLoading")
surface_area = relationship("SurfaceArea")
void_fraction = relationship("VoidFraction")
def __init__(self, run_id=None, seed=None, ):
"""Init material-row.
Args:
self (class): row in material table.
run_id : identification string for run (default = None).
Initializes row in materials datatable.
"""
self.seed = seed
self.run_id = run_id
def clone(self):
copy = super(Material, self).clone()
return copy
|
If you are lucky enough to have a vintage reel-to-reel recorder, then you know how important it is to keep it in the best shape possible – finding spare parts and a proper technician is getting harder and harder! One the best ways to prevent damage to the internal components is by using a dust cover. DigitalDeckCovers manufacturers the highest quality dust covers to preserve the life of your analog recording gear, ensuring that your great sound keeps coming for many years to come! Most recorder brands are already in our huge database, including Technics, Akai, Tascam, Sony, Teac, Pioneer, and more. If you don’t see your model, let us know and we are happy to add it.
|
# Copyright 2021 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Utilities for managing wav files and labels for transcription."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import bisect
import math
import librosa
from note_seq import audio_io
from note_seq import constants
from note_seq import sequences_lib
from note_seq.protobuf import music_pb2
import numpy as np
import tensorflow.compat.v1 as tf
def velocity_range_from_sequence(ns):
"""Derive a VelocityRange proto from a NoteSequence."""
velocities = [note.velocity for note in ns.notes]
velocity_max = np.max(velocities) if velocities else 0
velocity_min = np.min(velocities) if velocities else 0
velocity_range = music_pb2.VelocityRange(min=velocity_min, max=velocity_max)
return velocity_range
def find_inactive_ranges(note_sequence):
"""Returns ranges where no notes are active in the note_sequence."""
start_sequence = sorted(
note_sequence.notes, key=lambda note: note.start_time, reverse=True)
end_sequence = sorted(
note_sequence.notes, key=lambda note: note.end_time, reverse=True)
notes_active = 0
time = start_sequence[-1].start_time
inactive_ranges = []
if time > 0:
inactive_ranges.append(0.)
inactive_ranges.append(time)
start_sequence.pop()
notes_active += 1
# Iterate through all note on events
while start_sequence or end_sequence:
if start_sequence and (start_sequence[-1].start_time <
end_sequence[-1].end_time):
if notes_active == 0:
time = start_sequence[-1].start_time
inactive_ranges.append(time)
notes_active += 1
start_sequence.pop()
else:
notes_active -= 1
if notes_active == 0:
time = end_sequence[-1].end_time
inactive_ranges.append(time)
end_sequence.pop()
# if the last note is the same time as the end, don't add it
# remove the start instead of creating a sequence with 0 length
if inactive_ranges[-1] < note_sequence.total_time:
inactive_ranges.append(note_sequence.total_time)
else:
inactive_ranges.pop()
assert len(inactive_ranges) % 2 == 0
inactive_ranges = [(inactive_ranges[2 * i], inactive_ranges[2 * i + 1])
for i in range(len(inactive_ranges) // 2)]
return inactive_ranges
def _last_zero_crossing(samples, start, end):
"""Returns the last zero crossing in the window [start, end)."""
samples_greater_than_zero = samples[start:end] > 0
samples_less_than_zero = samples[start:end] < 0
samples_greater_than_equal_zero = samples[start:end] >= 0
samples_less_than_equal_zero = samples[start:end] <= 0
# use np instead of python for loop for speed
xings = np.logical_or(
np.logical_and(samples_greater_than_zero[:-1],
samples_less_than_equal_zero[1:]),
np.logical_and(samples_less_than_zero[:-1],
samples_greater_than_equal_zero[1:])).nonzero()[0]
return xings[-1] + start if xings.size > 0 else None
def find_split_points(note_sequence, samples, sample_rate, min_length,
max_length):
"""Returns times at which there are no notes.
The general strategy employed is to first check if there are places in the
sustained pianoroll where no notes are active within the max_length window;
if so the middle of the last gap is chosen as the split point.
If not, then it checks if there are places in the pianoroll without sustain
where no notes are active and then finds last zero crossing of the wav file
and chooses that as the split point.
If neither of those is true, then it chooses the last zero crossing within
the max_length window as the split point.
If there are no zero crossings in the entire window, then it basically gives
up and advances time forward by max_length.
Args:
note_sequence: The NoteSequence to split.
samples: The audio file as samples.
sample_rate: The sample rate (samples/second) of the audio file.
min_length: Minimum number of seconds in a split.
max_length: Maximum number of seconds in a split.
Returns:
A list of split points in seconds from the beginning of the file.
"""
if not note_sequence.notes:
return []
end_time = note_sequence.total_time
note_sequence_sustain = sequences_lib.apply_sustain_control_changes(
note_sequence)
ranges_nosustain = find_inactive_ranges(note_sequence)
ranges_sustain = find_inactive_ranges(note_sequence_sustain)
nosustain_starts = [x[0] for x in ranges_nosustain]
sustain_starts = [x[0] for x in ranges_sustain]
nosustain_ends = [x[1] for x in ranges_nosustain]
sustain_ends = [x[1] for x in ranges_sustain]
split_points = [0.]
while end_time - split_points[-1] > max_length:
max_advance = split_points[-1] + max_length
# check for interval in sustained sequence
pos = bisect.bisect_right(sustain_ends, max_advance)
if pos < len(sustain_starts) and max_advance > sustain_starts[pos]:
split_points.append(max_advance)
# if no interval, or we didn't fit, try the unmodified sequence
elif pos == 0 or sustain_starts[pos - 1] <= split_points[-1] + min_length:
# no splits available, use non sustain notes and find close zero crossing
pos = bisect.bisect_right(nosustain_ends, max_advance)
if pos < len(nosustain_starts) and max_advance > nosustain_starts[pos]:
# we fit, great, try to split at a zero crossing
zxc_start = nosustain_starts[pos]
zxc_end = max_advance
last_zero_xing = _last_zero_crossing(
samples, int(math.floor(zxc_start * sample_rate)),
int(math.ceil(zxc_end * sample_rate)))
if last_zero_xing:
last_zero_xing = float(last_zero_xing) / sample_rate
split_points.append(last_zero_xing)
else:
# give up and just return where there are at least no notes
split_points.append(max_advance)
else:
# there are no good places to cut, so just pick the last zero crossing
# check the entire valid range for zero crossings
start_sample = int(
math.ceil((split_points[-1] + min_length) * sample_rate)) + 1
end_sample = start_sample + (max_length - min_length) * sample_rate
last_zero_xing = _last_zero_crossing(samples, start_sample, end_sample)
if last_zero_xing:
last_zero_xing = float(last_zero_xing) / sample_rate
split_points.append(last_zero_xing)
else:
# give up and advance by max amount
split_points.append(max_advance)
else:
# only advance as far as max_length
new_time = min(np.mean(ranges_sustain[pos - 1]), max_advance)
split_points.append(new_time)
if split_points[-1] != end_time:
split_points.append(end_time)
# ensure that we've generated a valid sequence of splits
for prev, curr in zip(split_points[:-1], split_points[1:]):
assert curr > prev
assert curr - prev <= max_length + 1e-8
if curr < end_time:
assert curr - prev >= min_length - 1e-8
assert end_time - split_points[-1] < max_length
return split_points
def create_example(example_id, ns, wav_data, velocity_range=None):
"""Creates a tf.train.Example proto for training or testing."""
if velocity_range is None:
velocity_range = velocity_range_from_sequence(ns)
# Ensure that all sequences for training and evaluation have gone through
# sustain processing.
sus_ns = sequences_lib.apply_sustain_control_changes(ns)
example = tf.train.Example(
features=tf.train.Features(
feature={
'id':
tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[example_id.encode('utf-8')])),
'sequence':
tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[sus_ns.SerializeToString()])),
'audio':
tf.train.Feature(
bytes_list=tf.train.BytesList(value=[wav_data])),
'velocity_range':
tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[velocity_range.SerializeToString()])),
}))
return example
def process_record(wav_data,
ns,
example_id,
min_length=5,
max_length=20,
sample_rate=16000,
allow_empty_notesequence=False,
load_audio_with_librosa=False):
"""Split a record into chunks and create an example proto.
To use the full length audio and notesequence, set min_length=0 and
max_length=-1.
Args:
wav_data: audio data in WAV format.
ns: corresponding NoteSequence.
example_id: id for the example proto
min_length: minimum length in seconds for audio chunks.
max_length: maximum length in seconds for audio chunks.
sample_rate: desired audio sample rate.
allow_empty_notesequence: whether an empty NoteSequence is allowed.
load_audio_with_librosa: Use librosa for sampling. Works with 24-bit wavs.
Yields:
Example protos.
"""
try:
if load_audio_with_librosa:
samples = audio_io.wav_data_to_samples_librosa(wav_data, sample_rate)
else:
samples = audio_io.wav_data_to_samples(wav_data, sample_rate)
except audio_io.AudioIOReadError as e:
print('Exception %s', e)
return
samples = librosa.util.normalize(samples, norm=np.inf)
# Add padding to samples if notesequence is longer.
pad_to_samples = int(math.ceil(ns.total_time * sample_rate))
padding_needed = pad_to_samples - samples.shape[0]
if padding_needed > 5 * sample_rate:
raise ValueError(
'Would have padded {} more than 5 seconds to match note sequence total '
'time. ({} original samples, {} sample rate, {} sample seconds, '
'{} sequence seconds) This likely indicates a problem with the source '
'data.'.format(
example_id, samples.shape[0], sample_rate,
samples.shape[0] / sample_rate, ns.total_time))
samples = np.pad(samples, (0, max(0, padding_needed)), 'constant')
if max_length == min_length:
splits = np.arange(0, ns.total_time, max_length)
elif max_length > 0:
splits = find_split_points(ns, samples, sample_rate, min_length, max_length)
else:
splits = [0, ns.total_time]
velocity_range = velocity_range_from_sequence(ns)
for start, end in zip(splits[:-1], splits[1:]):
if end - start < min_length:
continue
if start == 0 and end == ns.total_time:
new_ns = ns
else:
new_ns = sequences_lib.extract_subsequence(ns, start, end)
if not new_ns.notes and not allow_empty_notesequence:
tf.logging.warning('skipping empty sequence')
continue
if start == 0 and end == ns.total_time:
new_samples = samples
else:
# the resampling that happen in crop_wav_data is really slow
# and we've already done it once, avoid doing it twice
new_samples = audio_io.crop_samples(samples, sample_rate, start,
end - start)
new_wav_data = audio_io.samples_to_wav_data(new_samples, sample_rate)
yield create_example(
example_id, new_ns, new_wav_data, velocity_range=velocity_range)
def mix_sequences(individual_samples, sample_rate, individual_sequences):
"""Mix multiple audio/notesequence pairs together.
All sequences will be repeated until they are as long as the longest sequence.
Note that the mixed sequence will contain only the (sustain-processed) notes
from the individual sequences. All other control changes and metadata will not
be preserved.
Args:
individual_samples: A list of audio samples to mix.
sample_rate: Rate at which to interpret the samples
individual_sequences: A list of NoteSequences to mix.
Returns:
mixed_samples: The mixed audio.
mixed_sequence: The mixed NoteSequence.
"""
# Normalize samples and sequence velocities before mixing.
# This ensures that the velocities/loudness of the individual samples
# are treated equally.
for i, samples in enumerate(individual_samples):
individual_samples[i] = librosa.util.normalize(samples, norm=np.inf)
for sequence in individual_sequences:
velocities = [note.velocity for note in sequence.notes]
velocity_max = np.max(velocities)
for note in sequence.notes:
note.velocity = int(
(note.velocity / velocity_max) * constants.MAX_MIDI_VELOCITY)
# Ensure that samples are always at least as long as their paired sequences.
for i, (samples, sequence) in enumerate(
zip(individual_samples, individual_sequences)):
if len(samples) / sample_rate < sequence.total_time:
padding = int(math.ceil(
(sequence.total_time - len(samples) / sample_rate) * sample_rate))
individual_samples[i] = np.pad(samples, [0, padding], 'constant')
# Repeat each ns/wav pair to be as long as the longest wav.
max_duration = np.max([len(s) for s in individual_samples]) / sample_rate
extended_samples = []
extended_sequences = []
for samples, sequence in zip(individual_samples, individual_sequences):
extended_samples.append(
audio_io.repeat_samples_to_duration(samples, sample_rate, max_duration))
extended_sequences.append(
sequences_lib.repeat_sequence_to_duration(
sequence, max_duration,
sequence_duration=len(samples) / sample_rate))
# Mix samples and sequences together
mixed_samples = np.zeros_like(extended_samples[0])
for samples in extended_samples:
mixed_samples += samples / len(extended_samples)
mixed_sequence = music_pb2.NoteSequence()
mixed_sequence.ticks_per_quarter = constants.STANDARD_PPQ
del mixed_sequence.notes[:]
for sequence in extended_sequences:
# Process sustain changes before copying notes.
sus_sequence = sequences_lib.apply_sustain_control_changes(sequence)
if sus_sequence.total_time > mixed_sequence.total_time:
mixed_sequence.total_time = sus_sequence.total_time
# TODO(fjord): Manage instrument/program numbers.
mixed_sequence.notes.extend(sus_sequence.notes)
return mixed_samples, mixed_sequence
|
In order to acquire Bresso Console Table By MEBLE NOVA , look for high quality hardwood joinery. In case a item is fixed together or if perhaps nails and screws are utilized to keep your furniture piece with each other, it will not previous extended. It is best to spend more money to get a Bresso Console Table By MEBLE NOVA with a few exceptional wooden joinery focus on it. Definitely consider the thighs of the Bresso Console Table By MEBLE NOVA you are thinking about purchasing. Individuals thighs and legs require to not only support body weight, they'll even be in contact with your flooring. Once they seem like they'll do problems for your ground as a result of components employed (like metallic), then you may want to opt for a diverse design. When buying Bresso Console Table By MEBLE NOVA for your family room, select simple hues. You can alter the colors and other design from the area by shifting features, painting and other points. As a result, it is possible to transform up the appearance of your family room without spending a lot of money.
Its very hard to imagine a kitchen area with out some kind of club stool. In fact, barstools have become very popular that it is very hard not to locate one in a cooking area. That's for a good reason, though, given that they allow buddies, family, and other guests to merely sign up for you in the kitchen area for a scrumptious meal and some great conversation. But buying the right bar stools for your kitchen area may be a little bit challenging, and this is especially the situation if you dont understand how to pick one. In the end, because this is one of the most used and most essential rooms in the house, its natural that you will get the very best bar stools for it. Fortunately, we currently did the research for you, as well as in the paragraphs below we created a summary of the top 10 barstools you can purchase for your kitchen or bar. After you evaluation this list, youll effortlessly have the ability to purchase the correct bar stools for your requirements. Lets check them out!
Typically now utilized in houses with a living room or open up layout, they are usually large configurations that chair plenty of guests. In smaller sized areas, they works well for seats within an area which has an odd part or other room limitation. The ability to combine part models, end models and lying areas based on space and individual preference tends to make this couch style extremely flexible. Sectional couches also come in a multitude of styles, from extremely modern or extremely luxe, to more loved ones-friendly modern designs.
Although this reclining loveseat comes with a heftier price tag, its classic design, options for personalization and automated lying program make it well worth the dough. Its hand made wood body is cushioned with down blend cushions for a much softer feel than youd receive from high-density froth soft cushions, and also the furniture is available in a whopping 72 color and material combos. The supply of so many choices causes it to be extra easy to set this reclining loveseat with different patterns, materials, highlights and dcor styles. Plus, for those who have kids or animals (or both!), you have the option to choose a stronger, spot-proof material. 1 customer shared that the Sunbrella fabric materials has organized well against her pets and children. This reclining loveseat could have a classic, aged-school look, nevertheless its constructed-in technology is anything but. Not only does it have an automatic reclining program (that provides you with a complete recline, with your footrest completely parallel to the chair), but it has a Universal serial bus port for implementing or just getting your apple ipad, iPhone, Amazon kindle or laptop computer.
Does your room display room requirements? Are you willing to throw a sleep party within the mild of the absence of room to rest? This DHP Futon mattress would be well suited for you in cases like this. This Futon mattress couch by DHP includes the performance of a couch bed with a modern and stylish appearance. Place it in your family room to attain an extra tired bed during the night. A micro-fiber floor that took off in the centre would be a perfect combination. This unique sofa accompanies a tapestry with gleaming chrome locks and thighs. Bringing together these results with each other in a comfy well-liked style sofa. You may choose to have it in Faux Leather, Purple velvet or Bed linen. An element that is worth specifying is its back again design. In light of the different chairs, DHP promotes the progres of the couch to your comfort and ease levels. You are able to gain levels to have an animated babble, or for silent moving image evening.
This is actually the next sofa on the checklist. It's extra comfy and incredibly ideal for small rooms or loft living. It is padded in polyester fabric, potential customers inset control keys that give an elegant diamond-tufted design. It is constructed of durable materials and also the thighs are constructed with durable wooden to increase its durability. The loveseat has an java discolored wooden thighs and non-marking foot hats. It includes an appropriate froth cushioning and rayon fabric upholstery which makes it very luxurious. It has a longue place that gives an extraordinary room for relaxing.
This sofa is caused the bell-shaped contour combined with the higher armrest of the cushion. Additionally, the head is based on blocked corner sides. The legs, however, have joined the phony wood wrapping. The couch gives a comfortable and extravagant feeling. This is the direct result of extremely flexible cushioning inside the rayon upholstery. You are able to sit down easily for the moment to unwind. Each one of the shades provided with this sofa demonstrates its noteworthy component, Versatile style. The red sculpt could be ideal for each topics with mild and dark tones. That remains constant for various shades that it provides too. The couch is situated in space. Regardless of whether its Thanksgiving holiday or perhaps a huge loved ones reunion, you can rely on it. Marathon-watching yet still time collapsing in your sofa together with your buddies is really a genuine option.
If youre looking for some serious rest, this loveseats solid cushion back again and rolled arms will deliverbut at a higher cost. Every seat cushion consists of 30 individual pocketed coils for spring and support, and high strength seat foam for an additional coating of plushness and durability. In addition, the froth soft cushions are capped with super gentle, blended lower feathers. For your extra comfort and ease, there are 3 standard jobs: seated, a small lie down with an elevated footrest, along with a full lie down by having an raised footrest. It doesnt hurt this reclining loveseat appears lux, possibly. Its all-leather furniture and ornamental toe nail mind trim give it a classic look that may function with lots of home dcor designs, from traditional to country to arts and crafts. Plus, leather stands up well against everyday use, children and pet locks and nails. Just remember to situation the leather each and every six to 12 months.
This established is padded in a push and comfy textured cushioned purple velvet. This gives it the incredible comfort and ease that it is loved for. Again it supports chaise design seats for adequate comfort. Furthermore, the sinus spring base adds to the comfort and durability. Furthermore, the couch has hard wood structures that increase its toughness and durability. The sofa has cushions on both the seat and also the back. But still, the sofa can move from a seated placement to a lying down position easily. That said, simply to repeat, the sofa is extremely soft and gives exceptional comfort.
This set includes a one remaining arm sofa established, two armless couch sets, and something part sofa set. This provides enough room to support your friends and relations. The material is 100% polyester for sufficient comfort and durability. The advantage of this couch set is the matching and mixing of chairs within the space within the room for a perfect form. As well as enables fitted even in little areas. It takes just mild assembling.It also features push cushions for maximum comfort and ease. You might want to try this established. It does the job well.
If youre looking for some serious rest, this loveseats strong pillow back again and rolled arms will deliverbut at a higher price tag. Each seat cushioning contains 30 person pocketed circles for spring and assistance, and resilience seat froth for an extra coating of plushness and durability. In addition, the froth soft cushions are topped with super soft, combined lower feathers. For your extra comfort, you will find 3 regular jobs: seated, a small recline by having an elevated foot rest, and a complete recline with an elevated footrest. It doesnt harm that this lying loveseat appears lux, possibly. Its all-leather-based upholstery and decorative nail head cut give it a vintage appear that may function with a lot of home decorations styles, from traditional to nation to arts and crafts. In addition, leather-based stands up well against daily use, kids and dog locks and fingernails. Just remember to condition the leather-based every 6 to 12 months.
Just like its namesake, the camelback sofa has a difficulty or maybe two that accents the primary silhouette of the couch style. This is a traditional design which was made popular in the 1700s by Chippendale and households who wanted aristocratic, formal Farragutt Console Table frequently opted for this design. These days, this couch style lends a more formal air to any room, especially when padded in a formal material. A more casual fabric option would make it appropriate for a household room that does not need a great deal of custom. Either way, the camelback sofa has some elements that characterize the design and style. The legs are typically exposed, the sofa doesn't have back cushions and it normally has square or rolled arms.
Copyright © Bresso Console Table By MEBLE NOVA in Console Tables All right reserved.
|
# coding:utf-8
"""
DCRM - Darwin Cydia Repository Manager
Copyright (C) 2017 WU Zheng <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import unicode_literals
import os
import bz2
import gzip
import shutil
import hashlib
import subprocess
from PIL import Image
from django.conf import settings
from django.contrib.sites.models import Site
from django.forms import ModelForm
from django.contrib import admin
from django.urls import reverse
from django.utils.safestring import mark_safe
from django_rq import queues
from preferences import preferences
from django.contrib.admin.actions import delete_selected
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
from suit.widgets import AutosizedTextarea
from WEIPDCRM.models.build import Build
from WEIPDCRM.models.package import Package
from WEIPDCRM.models.version import Version
from WEIPDCRM.models.release import Release
from WEIPDCRM.models.debian_package import DebianPackage
from WEIPDCRM.tools import mkdir_p
if settings.ENABLE_REDIS is True:
import django_rq
def build_procedure(conf):
"""
This is the main package list building procedure.
"""
if not conf["build_p_diff"]:
# Build Package file
build_all_versions_enabled = conf["build_all"]
# Get Package List QuerySet
if build_all_versions_enabled:
version_set = Version.objects.filter(enabled=True).order_by('-id')
version_count = version_set.count()
else:
version_set = Version.objects.raw(
"SELECT * FROM `WEIPDCRM_version` "
"WHERE `enabled` = TRUE "
"GROUP BY `c_package` "
"ORDER BY `c_package`, `id` DESC"
)
version_count = 0
for version in version_set:
version_count += 1
# Check Empty
if version_count == 0:
raise ValueError(_("No enabled package available."))
# Preparing Temp Directory
build_temp_path = os.path.join(settings.TEMP_ROOT, str(conf["build_uuid"]))
if not os.path.exists(build_temp_path):
mkdir_p(build_temp_path)
# Create Temp Package file
build_temp_package = open(os.path.join(build_temp_path, "Packages"), "wb+")
# Generate Control List
depiction_url = ""
if preferences.Setting.advanced_mode:
site = Site.objects.get(id=settings.SITE_ID)
scheme = "http"
if settings.SECURE_SSL is True:
scheme = "https"
depiction_url = "%s://%s" % (scheme, site.domain)
for version_instance in version_set:
# !!! HERE WE SHOULD USE ADVANCED CONTROL DICT !!!
control_dict = version_instance.get_advanced_control_dict()
if (not version_instance.custom_depiction) and len(depiction_url) != 0:
control_dict["Depiction"] = depiction_url + version_instance.get_absolute_url()
if version_instance.online_icon is not None and len(str(version_instance.online_icon)) > 0:
control_dict["Icon"] = depiction_url + os.path.join(str(preferences.Setting.resources_alias), version_instance.online_icon.name)
DebianPackage.get_control_content(control_dict, build_temp_package)
build_temp_package.write("\n".encode("utf-8"))
# Compression Gzip
build_temp_package.seek(0)
if conf["build_compression"] == 1 \
or conf["build_compression"] == 2 \
or conf["build_compression"] == 5 \
or conf["build_compression"] == 6:
build_temp_package_gz = gzip.open(os.path.join(build_temp_path, "Packages.gz"), mode="wb")
while True:
cache = build_temp_package.read(16 * 1024) # 16k cache
if not cache:
break
build_temp_package_gz.write(cache)
build_temp_package_gz.close()
# Compression Bzip
build_temp_package.seek(0)
if conf["build_compression"] == 3 \
or conf["build_compression"] == 4 \
or conf["build_compression"] == 5 \
or conf["build_compression"] == 6:
build_temp_package_bz2 = bz2.BZ2File(os.path.join(build_temp_path, "Packages.bz2"), mode="wb")
while True:
cache = build_temp_package.read(16 * 1024) # 16k cache
if not cache:
break
build_temp_package_bz2.write(cache)
build_temp_package_bz2.close()
# Close original Package file
build_temp_package.close()
# Release
active_release = Release.objects.get(id=conf["build_release"])
active_release_control_dict = active_release.get_control_field()
build_temp_release = open(os.path.join(build_temp_path, "Release"), mode="wb")
DebianPackage.get_control_content(active_release_control_dict, build_temp_release)
# Checksum
if conf["build_secure"] is True:
def hash_file(hash_obj, file_path):
with open(file_path, "rb") as f:
for block in iter(lambda: f.read(65535), b""):
hash_obj.update(block)
checksum_list = [
"Packages",
"Packages.gz",
"Packages.bz2"
]
build_validation_titles = [
"MD5Sum", "SHA1", "SHA256", "SHA512"
]
build_validation_methods = [
hashlib.md5, hashlib.sha1, hashlib.sha256, hashlib.sha512
]
# Using a loop to iter different validation methods
for build_validation_index in range(0, 3):
if conf["build_validation"] > build_validation_index:
build_temp_release.write((build_validation_titles[build_validation_index] + ":\n").encode("utf-8"))
for checksum_instance in checksum_list:
checksum_path = os.path.join(build_temp_path, checksum_instance)
if os.path.exists(checksum_path):
m2 = build_validation_methods[build_validation_index]()
hash_file(m2, checksum_path)
p_hash = m2.hexdigest()
p_size = os.path.getsize(checksum_path)
build_temp_release.write(
(" " + p_hash +
" " + str(p_size) +
" " + checksum_instance +
"\n").encode("utf-8")
)
build_temp_release.close()
if conf["build_secure"] is True:
# GPG Signature
"""
Use 'gpg --gen-key' to generate GnuPG key before using this function.
"""
password = preferences.Setting.gpg_password
if password is not None and len(password) > 0:
subprocess.check_call(
["gpg", "-abs", "--homedir", os.path.join(settings.BASE_DIR, '.gnupg'), "--batch", "--yes", "--passphrase", password, "-o",
os.path.join(build_temp_path, "Release.gpg"),
os.path.join(build_temp_path, "Release"),
]
)
else:
subprocess.check_call(
["gpg", "-abs", "--homedir", os.path.join(settings.BASE_DIR, '.gnupg'), "--batch", "--yes", "-o",
os.path.join(build_temp_path, "Release.gpg"),
os.path.join(build_temp_path, "Release"),
]
)
# Preparing Directory
release_root = os.path.join(
settings.MEDIA_ROOT,
"releases",
str(active_release.id),
)
build_path = os.path.join(
release_root,
"builds",
str(conf["build_uuid"])
)
if not os.path.isdir(build_path):
mkdir_p(build_path)
# Publish
rename_list = [
"Release",
"Release.gpg",
"Packages",
"Packages.gz",
"Packages.bz2"
]
for rename_instance in rename_list:
rename_path = os.path.join(build_temp_path, rename_instance)
rename_to_path = os.path.join(build_path, rename_instance)
active_path = os.path.join(release_root, rename_instance)
if os.path.exists(rename_path):
if os.path.exists(active_path):
os.unlink(active_path)
shutil.copyfile(rename_path, active_path)
os.chmod(active_path, 0o755)
# os.rename(rename_path, rename_to_path)
shutil.move(rename_path, rename_to_path)
os.chmod(rename_to_path, 0o755)
else:
if os.path.exists(rename_to_path):
os.unlink(rename_to_path)
if os.path.exists(active_path):
os.unlink(active_path)
def thumb_png(png_path):
img = Image.open(png_path)
img.thumbnail((60, 60), Image.ANTIALIAS)
img.save(png_path)
# Cydia Icon
cydia_icon_path = os.path.join(release_root, "CydiaIcon.png")
if os.path.exists(cydia_icon_path):
os.unlink(cydia_icon_path)
if active_release.icon is not None and len(str(active_release.icon)) > 0:
src_path = os.path.join(settings.MEDIA_ROOT, active_release.icon.name)
if os.path.exists(src_path):
shutil.copyfile(
src_path,
cydia_icon_path
)
else:
src_path = os.path.join(settings.STATIC_ROOT, "img/CydiaIcon.png")
if os.path.exists(src_path):
shutil.copyfile(
src_path,
cydia_icon_path
)
if os.path.exists(cydia_icon_path):
thumb_png(cydia_icon_path)
os.chmod(cydia_icon_path, 0o755)
build_instance = Build.objects.get(uuid=str(conf["build_uuid"]))
if build_instance is not None:
build_instance.is_finished = True
build_instance.save()
else:
# TODO: Pdiffs Feature
pass
class BuildForm(ModelForm):
class Meta(object):
widgets = {
'details': AutosizedTextarea,
}
class BuildAdmin(admin.ModelAdmin):
form = BuildForm
actions = [delete_selected]
list_display = ('uuid', 'active_release', 'is_finished', 'created_at')
search_fields = ['uuid']
fieldsets = [
(_('General'), {
'fields': ['active_release', 'job_link', 'details']
}),
(_('History'), {
'fields': ['created_at']
}),
]
change_form_template = "admin/build/change_form.html"
change_list_template = "admin/build/change_list.html"
def job_link(self, obj):
if obj.job_id is None:
if obj.is_finished:
return mark_safe("<img src=\"/static/admin/img/icon-yes.svg\" alt=\"True\" /> %s" % _('Finished'))
else:
return mark_safe("<img src=\"/static/admin/img/icon-unknown.svg\" alt=\"Unknown\" /> %s" % _('Unknown'))
m_job = queues.get_queue('high').fetch_job(obj.job_id)
if m_job is None:
return _('No such job')
if m_job.is_failed:
status_str = mark_safe("<img src=\"/static/admin/img/icon-no.svg\" alt=\"False\" /> %s" % _('Failed'))
elif m_job.is_finished:
if obj.is_finished:
status_str = mark_safe("<img src=\"/static/admin/img/icon-yes.svg\" alt=\"True\" /> %s" % _('Finished'))
else:
status_str = mark_safe(
"<img src=\"/static/admin/img/icon-unknown.svg\" alt=\"Unknown\" /> %s" % _('Unknown'))
else:
status_str = mark_safe("<img src=\"/static/img/icon-loading.svg\" width=\"13\" alt=\"Loading\" "
"onload=\"setTimeout(function () { window.location.reload(); }, 2000);\" /> "
"%s" % _("Processing..."))
return mark_safe('<a href="%s" target="_blank">%s</a>' % (
reverse('rq_job_detail', kwargs={
'queue_index': 1,
'job_id': m_job.id
}),
status_str
))
job_link.short_description = _("Job")
job_link.allow_tags = True
def has_add_permission(self, request):
return preferences.Setting.active_release is not None and Package.objects.count() != 0
def get_readonly_fields(self, request, obj=None):
if not obj:
return ['active_release', 'job_link', 'created_at']
else:
return ['active_release', 'job_link', 'created_at', 'details']
def save_model(self, request, obj, form, change):
"""
Set the active release, call building procedure, and then save.
:type obj: Build
"""
setting = preferences.Setting
obj.active_release = setting.active_release
super(BuildAdmin, self).save_model(request, obj, form, change)
if setting.active_release is not None:
build_args = {
"build_uuid": obj.uuid,
"build_all": setting.downgrade_support,
"build_p_diff": setting.enable_pdiffs,
"build_compression": setting.packages_compression,
"build_secure": setting.gpg_signature,
"build_validation": setting.packages_validation,
"build_release": obj.active_release.id,
}
if settings.ENABLE_REDIS is True:
queue = django_rq.get_queue('high')
build_job = queue.enqueue(build_procedure, build_args)
obj.job_id = build_job.id
messages.info(request, mark_safe(
_("The Build \"<a href=\"{job_detail}\">{obj}</a>\" generating job has been added to the \"<a href=\"{jobs}\">high</a>\" queue.").format(
job_detail=reverse('rq_job_detail', kwargs={
'queue_index': 1,
'job_id': build_job.id,
}),
obj=str(obj),
jobs=reverse('rq_jobs', args=(1, )),
)
))
else:
build_procedure(build_args)
messages.info(request, _("The Build \"%s\" generating job has been finished.") % str(obj))
obj.save()
|
3 sets of vector pills, tablets, drugs and other medicine things for your health related designs. Format: EPS stock vector clip art. Free for download.
Posted on 07/08/2011, in Free Vector Graphics | Tags: health, medicine.
|
# -*- coding: UTF-8 -*-
# Copyright 2012-2021 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
from lino.projects.std.settings import *
from lino.utils import i2d
class Site(Site):
title = "Lino Mini 9"
project_model = 'contacts.Person'
languages = 'en de fr'
user_types_module = 'lino_xl.lib.xl.user_types'
demo_fixtures = """std demo demo2 checkdata""".split()
default_build_method = 'weasy2pdf'
the_demo_date = i2d(20141023)
webdav_protocol = 'davlink'
def get_installed_apps(self):
yield super(Site, self).get_installed_apps()
# yield 'lino.modlib.users'
yield 'lino_book.projects.min9.modlib.contacts'
yield 'lino_xl.lib.excerpts'
yield 'lino_xl.lib.addresses'
yield 'lino_xl.lib.phones'
yield 'lino_xl.lib.reception'
yield 'lino_xl.lib.courses'
yield 'lino_xl.lib.sepa'
yield 'lino_xl.lib.notes'
# yield 'lino_xl.lib.projects'
yield 'lino_xl.lib.humanlinks'
yield 'lino_xl.lib.households'
yield 'lino_xl.lib.calview'
# yield 'lino_xl.lib.extensible'
yield 'lino_xl.lib.pages'
yield 'lino.modlib.export_excel'
yield 'lino_xl.lib.dupable_partners'
yield 'lino.modlib.checkdata'
yield 'lino.modlib.tinymce'
# yield 'lino.modlib.wkhtmltopdf'
yield 'lino.modlib.weasyprint'
yield 'lino_xl.lib.appypod'
yield 'lino.modlib.notify'
yield 'lino.modlib.changes'
yield 'lino.modlib.comments'
yield 'lino.modlib.uploads'
yield 'lino_xl.lib.properties'
yield 'lino_xl.lib.cv'
yield 'lino_xl.lib.b2c'
yield 'lino_xl.lib.sales'
yield 'lino_xl.lib.finan'
def get_plugin_configs(self):
"""
Change the default value of certain plugin settings.
"""
yield super(Site, self).get_plugin_configs()
yield ('countries', 'country_code', 'BE')
yield ('b2c', 'import_statements_path', self.project_dir.child('sepa_in'))
def do_site_startup(self):
# lino_xl.lib.reception requires some workflow to be imported
from lino_xl.lib.cal.workflows import feedback
super(Site, self).do_site_startup()
SITE = Site(globals())
# ALLOWED_HOSTS = ['*']
DEBUG = True
# SECRET_KEY = "20227" # see :djangoticket:`20227`
|
Our future environments will consist of an increasing number of augmented artefacts; not only information appliances, but also ordinary objects enhanced with computing and communication capabilities. People may find themselves thrown into a world consisting of distinct artefacts, yet interconnected via an invisible web of network services. The approach presented in this paper is to enable people make their own applications with ‘augmented’ artifacts, which are treated as reusable “components”, by creating an appropriate infrastructure and tools.
|
'''
Given a pattern and a string str, find if str follows the same pattern.
Here follow means a full match, such that there is a bijection between a letter in pattern and a non-empty word in str.
Examples:
pattern = "abba", str = "dog cat cat dog" should return true.
pattern = "abba", str = "dog cat cat fish" should return false.
pattern = "aaaa", str = "dog cat cat dog" should return false.
pattern = "abba", str = "dog dog dog dog" should return false.
Notes:
You may assume pattern contains only lowercase letters, and str contains lowercase letters separated by a single space.
'''
def wordPattern(pattern, str):
s = pattern
t = str.split()
return map(s.find, s) == map(t.index, t)
if __name__ == "__main__":
assert (wordPattern('abba', 'dog cat cat dog'))
assert (not wordPattern('abba', 'dog dog cat dog'))
assert (not wordPattern('abba', 'dog cat cat fish'))
assert (not wordPattern('aaaa', 'dog cat cat dog'))
assert (wordPattern('aaaa', 'dog dog dog dog'))
assert (not wordPattern('abba', 'dog dog dog dog'))
print ('done')
|
Eight Clarendon Crescent is a bed and breakfast in the heart of Leamington Spa, Warwickshire. A splendid grade II Listed Regency house elegantly furnished with antiques. The property is a haven of peace and tranquillity with four tastefully decorated, individually designed bedrooms.
Hosts David and Christine Lawson have created an environment with a warm, relaxed informal atmosphere and are on hand to recommend the pick of Leamington Spa's many splendid restaurants and places of interest to visit in the area.
We are phasing out support for Internet Explorer 6. Please upgrade to a standards compatible browser.
Breakfasts include fruit, yoghurts, cereals as well as the traditional full English. We hope you enjoy our freshly baked bread and homemade marmalade. Special diets are catered for.
"An oasis of calm in the rat race of life"
"Extremely comfortable welcoming wonderful atmosphere"
"A beautiful house, lovely room, superb breakfast and wonderful hospitality"
"What a wonderful find, we will certainly be back if we can find an excuse or reason!"
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2014-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import time
from .. import ivi
from .. import scpi
class diconGP700(scpi.common.IdnCommand, scpi.common.ErrorQuery, scpi.common.Reset,
scpi.common.SelfTest, scpi.common.Memory,
ivi.Driver):
"DiCon Fiberoptics GP700 Programmable Fiberoptic Instrument"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'GP700')
super(diconGP700, self).__init__(*args, **kwargs)
self._identity_description = "DiCon Fiberoptics GP700 Programmable Fiberoptic Instrument"
self._identity_identifier = ""
self._identity_revision = ""
self._identity_vendor = ""
self._identity_instrument_manufacturer = "DiCon Fiberoptics Inc"
self._identity_instrument_model = ""
self._identity_instrument_firmware_revision = ""
self._identity_specification_major_version = 0
self._identity_specification_minor_version = 0
self._identity_supported_instrument_models = ['GP700']
self._self_test_delay = 5
self._memory_size = 8
self._memory_offset = 1
self._config = ""
self._attenuator_count = 0
self._attenuator_name = list()
self._attenuator_level = list()
self._attenuator_level_max = list()
self._filter_count = 0
self._filter_name = list()
self._filter_wavelength = list()
self._filter_wavelength_max = list()
self._filter_wavelength_min = list()
self._matrix_input_count = 0
self._matrix_input_name = list()
self._matrix_output_count = 0
self._matrix_input_output = list()
self._switch_count = 0
self._switch_name = list()
self._switch_output = list()
self._switch_input = list()
self._switch_output_count = list()
self._switch_input_count = list()
self._add_property('attenuators[].level',
self._get_attenuator_level,
self._set_attenuator_level,
None,
ivi.Doc("""
Specifies the level of the attenuator module. The units are dB.
"""))
self._add_property('attenuators[].level_max',
self._get_attenuator_level_max,
None,
None,
ivi.Doc("""
Returns the maximum attenuation level supported. The units are dB.
"""))
self._add_property('attenuators[].name',
self._get_attenuator_name,
None,
None,
ivi.Doc("""
Returns the name of the attenuator module.
"""))
self._add_property('filters[].wavelength',
self._get_filter_wavelength,
self._set_filter_wavelength,
None,
ivi.Doc("""
Specifies the center wavelength of the filter module. The units are nm.
"""))
self._add_property('filters[].wavelength_max',
self._get_filter_wavelength_max,
None,
None,
ivi.Doc("""
Returns the maximum center wavelength of the filter. The units are nm.
"""))
self._add_property('filters[].wavelength_min',
self._get_filter_wavelength_min,
None,
None,
ivi.Doc("""
Returns the minimum center wavelength of the filter. The units are nm.
"""))
self._add_property('filters[].name',
self._get_filter_name,
None,
None,
ivi.Doc("""
Returns the name of the filter module.
"""))
self._add_property('switches[].output',
self._get_switch_output,
self._set_switch_output,
None,
ivi.Doc("""
Specify switch output connection.
"""))
self._add_property('switches[].output_count',
self._get_switch_output_count,
None,
None,
ivi.Doc("""
Query number of outputs supported by switch.
"""))
self._add_property('switches[].input',
self._get_switch_input,
self._set_switch_input,
None,
ivi.Doc("""
Specify switch input connection.
"""))
self._add_property('switches[].input_count',
self._get_switch_input_count,
None,
None,
ivi.Doc("""
Query number of inputs supported by switch.
"""))
self._add_method('switches[].get',
self._switch_get,
ivi.Doc("""
Get current switch input and output configuration.
"""))
self._add_method('switches[].set',
self._switch_set,
ivi.Doc("""
Set switch input and output configuration.
"""))
self._add_property('switches[].name',
self._get_switch_name,
None,
None,
ivi.Doc("""
Returns the name of the switch module.
"""))
self._add_method('memory.save',
self._memory_save,
ivi.Doc("""
Save device configuration to the specified memory slot.
"""))
self._add_method('memory.recall',
self._memory_recall,
ivi.Doc("""
Recall device configuration from the specified memory slot.
"""))
if self._initialized_from_constructor:
self._init_channels()
def _initialize(self, resource = None, id_query = False, reset = False, **keywargs):
"Opens an I/O session to the instrument."
super(diconGP700, self)._initialize(resource, id_query, reset, **keywargs)
# interface clear
if not self._driver_operation_simulate:
self._clear()
# check ID
if id_query and not self._driver_operation_simulate:
id = self.identity.instrument_model
id_check = self._instrument_id
id_short = id[:len(id_check)]
if id_short != id_check:
raise Exception("Instrument ID mismatch, expecting %s, got %s", id_check, id_short)
# reset
if reset:
self.utility_reset()
if not self._initialized_from_constructor:
self._init_channels()
def _utility_disable(self):
pass
def _utility_lock_object(self):
pass
def _utility_reset(self):
if not self._driver_operation_simulate:
self._write("*RST")
time.sleep(0.1)
self._clear()
self.driver_operation.invalidate_all_attributes()
def _utility_unlock_object(self):
pass
def _init_channels(self):
try:
super(diconGP700, self)._init_channels()
except AttributeError:
pass
config = self._get_config()
self._attenuator_count = 0
self._attenuator_name = list()
self._attenuator_level = list()
self._attenuator_level_max = list()
self._filter_count = 0
self._filter_name = list()
self._filter_wavelength = list()
self._filter_wavelength_max = list()
self._filter_wavelength_min = list()
self._matrix_input_count = 0
self._matrix_input_name = list()
self._matrix_output_count = 0
self._matrix_input_output = list()
self._switch_count = 0
self._switch_name = list()
self._switch_output = list()
self._switch_input = list()
self._switch_output_count = list()
self._switch_input_count = list()
lst = config.split(",")
lst = [x.strip() for x in lst]
lst.sort()
for itm in lst:
v = itm.split(" ")
if len(itm) == 0:
continue
if v[0] == 'MATRIX':
self._matrix_input_count = int(v[1][5:])
self._matrix_output_count = int(v[2][6:])
elif itm[0] == 'A':
if v[0] not in self._attenuator_name:
self._attenuator_count += 1
self._attenuator_name.append(v[0])
self._attenuator_level.append(0.0)
self._attenuator_level_max.append(0.0)
i = ivi.get_index(self._attenuator_name, v[0])
self._attenuator_level[i] = 0.0
self._attenuator_level_max[i] = float(v[1])
elif itm[0] == 'F':
if v[0] not in self._filter_name:
self._filter_count += 1
self._filter_name.append(v[0])
self._filter_wavelength.append(0.0)
self._filter_wavelength_min.append(0.0)
self._filter_wavelength_max.append(0.0)
i = ivi.get_index(self._filter_name, v[0])
self._filter_wavelength[i] = 0.0
self._filter_wavelength_min[i] = float(v[1][3:])
self._filter_wavelength_max[i] = float(v[2][3:])
elif itm[0] == 'M':
if v[0] not in self._switch_name:
self._switch_count += 1
self._switch_name.append(v[0])
self._switch_input.append(0)
self._switch_output.append(0)
self._switch_input_count.append(0)
self._switch_output_count.append(0)
i = ivi.get_index(self._switch_name, v[0])
self._switch_input[i] = 1
self._switch_output[i] = 0
self._switch_input_count[i] = int(v[2][1:])
self._switch_output_count[i] = int(v[1][1:])
elif itm[0] == 'P':
if v[0] not in self._switch_name:
self._switch_count += 1
self._switch_name.append(v[0])
self._switch_input.append(0)
self._switch_output.append(0)
self._switch_input_count.append(0)
self._switch_output_count.append(0)
i = ivi.get_index(self._switch_name, v[0])
self._switch_input[i] = 1
self._switch_output[i] = 0
self._switch_input_count[i] = 1
self._switch_output_count[i] = int(v[1][7:])
elif itm[0] == 'S':
cnt = int(v[0][1:])
for i in range(cnt):
n = 'S%02d' % (i+1)
if n not in self._switch_name:
self._switch_count += 1
self._switch_name.append(n)
self._switch_input.append(0)
self._switch_output.append(0)
self._switch_input_count.append(0)
self._switch_output_count.append(0)
i = ivi.get_index(self._switch_name, n)
self._switch_input[i] = 1
self._switch_output[i] = 1
self._switch_input_count[i] = 1
self._switch_output_count[i] = 2
self.attenuators._set_list(self._attenuator_name)
self.filters._set_list(self._filter_name)
self.switches._set_list(self._switch_name)
def _get_config(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._config = self._ask("system:config?")
self._set_cache_valid()
return self._config
def _get_attenuator_level(self, index):
index = ivi.get_index(self._attenuator_name, index)
name = self._attenuator_name[index]
if not self._driver_operation_simulate and not self._get_cache_valid():
resp = self._ask("%s?" % (name))
self._attenuator_level = float(resp)
self._set_cache_valid()
return self._attenuator_level[index]
def _set_attenuator_level(self, index, value):
index = ivi.get_index(self._attenuator_name, index)
name = self._attenuator_name[index]
value = float(value)
if value < 0 or value > self._attenuator_level_max[index]:
raise ivi.OutOfRangeException()
if not self._driver_operation_simulate:
self._write("%s %f" % (name, value))
self._attenuator_level[index] = value
self._set_cache_valid()
def _get_attenuator_level_max(self, index):
index = ivi.get_index(self._attenuator_name, index)
return self._attenuator_level_max[index]
def _get_attenuator_name(self, index):
index = ivi.get_index(self._attenuator_name, index)
return self._attenuator_name[index]
def _get_filter_wavelength(self, index):
index = ivi.get_index(self._filter_name, index)
name = self._filter_name[index]
if not self._driver_operation_simulate and not self._get_cache_valid():
resp = self._ask("%s?" % (name))
self._filter_wavelength = float(resp)
self._set_cache_valid()
return self._filter_wavelength[index]
def _set_filter_wavelength(self, index, value):
index = ivi.get_index(self._filter_name, index)
name = self._filter_name[index]
value = float(value)
if value < self._filter_wavelength_min[index] or value > self._filter_wavelength_max[index]:
raise ivi.OutOfRangeException()
if not self._driver_operation_simulate:
self._write("%s %f" % (name, value))
self._filter_wavelength[index] = value
self._set_cache_valid()
def _get_filter_wavelength_max(self, index):
index = ivi.get_index(self._filter_name, index)
return self._filter_wavelength[index]
def _get_filter_wavelength_min(self, index):
index = ivi.get_index(self._filter_name, index)
return self._filter_wavelength[index]
def _get_filter_name(self, index):
index = ivi.get_index(self._filter_name, index)
return self._filter_name[index]
def _get_switch_output(self, index):
return self._switch_get(index)[0]
def _set_switch_output(self, index, value):
self._switch_set(index, value)
def _get_switch_output_count(self, index):
index = ivi.get_index(self._switch_name, index)
return self._switch_output_count[index]
def _get_switch_input(self, index):
return self._switch_get(index)[1]
def _set_switch_input(self, index, value):
index = ivi.get_index(self._switch_name, index)
self._switch_set(index, self._switch_output[index], value)
def _get_switch_input_count(self, index):
index = ivi.get_index(self._switch_name, index)
return self._switch_input_count[index]
def _switch_get(self, index):
index = ivi.get_index(self._switch_name, index)
name = self._switch_name[index]
if name[0] == 'M':
if not self._driver_operation_simulate:
if not self._get_cache_valid('switch_output', index) or not self._get_cache_valid('switch_input', index):
#if True:
resp = self._ask("%s?" % name)
lst = resp.split(',')
self._switch_output[index] = int(lst[0].strip())
self._switch_input[index] = int(lst[1].strip())
self._set_cache_valid(True, 'switch_output', index)
self._set_cache_valid(True, 'switch_input', index)
return (self._switch_output[index], self._switch_input[index])
elif name[0] == 'P' or name[0] == 'S':
if not self._driver_operation_simulate:
if not self._get_cache_valid('switch_output', index):
#if True:
resp = self._ask("%s?" % name)
self._switch_output[index] = int(resp.strip())
self._switch_input[index] = 1
self._set_cache_valid(True, 'switch_output', index)
self._set_cache_valid(True, 'switch_input', index)
return (self._switch_output[index], self._switch_input[index])
def _switch_set(self, index, output, input=None):
index = ivi.get_index(self._switch_name, index)
name = self._switch_name[index]
output = int(output)
if input is not None:
input = int(input)
if name[0] == 'M':
if output < 0 or output > self._switch_output_count[index]:
raise ivi.OutOfRangeException()
if input is not None and (input < 1 or input > self._switch_input_count[index]):
raise ivi.OutOfRangeException()
if not self._driver_operation_simulate:
if input is None:
self._write("%s %d" % (name, output))
else:
self._write("%s %d, %d" % (name, output, input))
else:
self._switch_output[index] = output
self._set_cache_valid(True, 'switch_output', index)
if input is not None:
self._switch_input[index] = input
self._set_cache_valid(True, 'switch_input', index)
elif name[0] == 'P' or name[0] == 'S':
if output < 1 or output > self._switch_output_count[index]:
raise ivi.OutOfRangeException()
if input is not None and input != 1:
raise ivi.OutOfRangeException()
if not self._driver_operation_simulate:
self._write("%s %d" % (name, output))
else:
self._switch_output[index] = output
self._switch_input[index] = 1
self._set_cache_valid(True, 'switch_output', index)
self._set_cache_valid(True, 'switch_input', index)
def _get_switch_name(self, index):
index = ivi.get_index(self._switch_name, index)
return self._switch_name[index]
|
The Shaded Ceiling Light Bar Holiday is inspired by an American jazz singer and songwriter Billie Holiday. Nicknamed "Lady Day", Holiday had a seminal influence on jazz and pop singing. The intelligent contrast of brown shades and stunning nickel finish makes this a contemporary fixed 7 light ceiling bar a striking centrepiece. The nickel detailing lends your room an added tone of luxury, whilst the slimline design ensures it's modern and congruous with your interior. Create that trendy, modern look with this stylish Ceiling Light designed by Villa Lumi. Complete with brown shades it diffuses the light beautifully. Hang it above a dining table for that chic look.
|
"""
Set up the plot figures, axes, and items to be done for each frame.
This module is imported by the plotting routines and then the
function setplot is called to set the plot parameters.
"""
import numpy as np
from mapping import Mapping
from clawpack.clawutil.data import ClawData
import dtopotools_horiz_okada_and_1d as dtopotools
reload(dtopotools)
from clawpack.geoclaw.data import LAT2METER
length_scale = 1.0e-3 # m to km
xlimits = [-150e3*length_scale,200e3*length_scale]
ylimits_fault = [-175e3*length_scale,0.0*length_scale]
ylimits_surface = [-0.3,0.5]
#--------------------------
def setplot(plotdata):
#--------------------------
"""
Specify what is to be plotted at each frame.
Input: plotdata, an instance of clawpack.visclaw.data.ClawPlotData.
Output: a modified version of plotdata.
"""
fault = dtopotools.Fault()
fault.read(plotdata.outdir + '/fault.data')
mapping = Mapping(fault)
xp1 = mapping.xp1*length_scale
xp2 = mapping.xp2*length_scale
yp1 = mapping.yp1*length_scale
yp2 = mapping.yp2*length_scale
gaugedata = ClawData()
gaugedata.read(plotdata.outdir + '/gauges.data', force=True)
ngauges = gaugedata.ngauges
xc = np.zeros(ngauges)
for j in range(ngauges):
g = plotdata.getgauge(j)
xc[j] = g.location[0]
fault.create_dtopography(xc/LAT2METER,np.array([0.]),[1.0],y_disp=True)
from clawpack.visclaw import colormaps
plotdata.clearfigures() # clear any old figures,axes,items data
plotdata.format = 'binary'
def mapc2p(xc,yc):
xp,yp = mapping.mapc2p(xc,yc)
return xp*length_scale,yp*length_scale
def plot_fault(current_data):
from pylab import linspace, plot, xlabel, ylabel, tick_params
xl = linspace(xp1,xp2,100)
yl = linspace(yp1,yp2,100)
plot(xl,yl,'k',linewidth=3)
tick_params(labelsize=25)
xlabel('kilometers',fontsize=25)
ylabel('kilometers',fontsize=25)
def sigmatr(current_data):
# return -trace(sigma)
q = current_data.q
return -(q[0,:,:] + q[1,:,:])
def plot_vertical_displacement(current_data):
from pylab import plot,zeros,ylabel,tick_params
t = current_data.t
ys = zeros(ngauges)
for gaugeno in range(ngauges):
g = plotdata.getgauge(gaugeno)
for k in range(1,len(g.t)):
if g.t[k] > t:
break
dt = g.t[k] - g.t[k-1]
v = 0.5*(g.q[4,k]+g.q[4,k-1])
ys[gaugeno] += dt*v
plot(xc[:ngauges]*length_scale,ys,linewidth=3)
plot(xc*length_scale,fault.dtopo.dZ[0,0,:],linestyle='--',color='r',linewidth=3)
tick_params(labelsize=25)
ylabel('meters',fontsize=25)
# Figure for surface
plotfigure = plotdata.new_plotfigure(name='surface', figno=1)
plotfigure.kwargs = {'figsize':(11,4)}
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits_surface
plotaxes.title_with_t = False
plotaxes.title = ''
plotaxes.scaled = False
plotaxes.afteraxes = plot_vertical_displacement
# Figure for fault
plotfigure = plotdata.new_plotfigure(name='fault', figno=2)
plotfigure.kwargs = {'figsize':(11,6)}
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits_fault
plotaxes.title = ''
plotaxes.title_with_t = False
plotaxes.scaled = True
plotaxes.afteraxes = plot_fault
# Set up for item on these axes:
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = sigmatr
plotitem.pcolor_cmap = colormaps.blue_white_red
plotitem.pcolor_cmin = -1e6
plotitem.pcolor_cmax = 1e6
plotitem.add_colorbar = False
plotitem.amr_celledges_show = [0]
plotitem.amr_patchedges_show = [0]
plotitem.MappedGrid = True
plotitem.mapc2p = mapc2p
# Parameters used only when creating html and/or latex hardcopy
# e.g., via clawpack.visclaw.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html' # pointer for top of index
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
plotdata.parallel = True
return plotdata
|
My next Spiritual Development Course starts in 2015. Date to be announced.
Sign-up to my mailing list to avoid missing out.
I am hoping you can join me for a fun and exciting series of evenings at my healing practice in Eltham (Melbourne, Australia).
CALL ME on 0419 147 036 to reserve a place.
WHERE: The course takes place every Tuesday night at 6:30pm near the Eltham Police Station at the rear of the Eltham shopping district.
WHEN: Course starts Tuesday evening, 2015 dates TBA. Duration is 1.5hrs plus an additional and casual 30min question time if required.
WHY: The aim of the course is to develop your inner strength, to awaken your intuitive talents, to develop your spiritual powers, and to ultimately, unlock your True Potential in life.
EVERY SESSION: Meditation is one of the most simple and yet powerful practises that anyone can use, to better all aspects of their life. It should be at the corner stone of every Spiritual Development Course, and for that reason, Benita starts each and every session with a simple but profoundly effective meditation.
The purpose is to immediately release you from the known aspects of your life, so that you are not bound up with conditioning; in your mind, being and body. To move you into the realm of possibility, discovery and creativity.
There are no religious affiliations with any of Benita’s teachings. The meditation period will be a guided experience beginning with gratitude for all the things you already have and moving you towards a place where you can simply focus on the self and then finally leading you towards a better position to create what YOU want in YOUR life.
EVERY SESSION: Healers invest long periods of time with their clients, listening to all the aspects of their Emotional, Spiritual, Mental, Energetic and Physical life. Healing is caring for and helping another individual. Benita’s experience in this field is extensiveand her dedication to healing has been constant and unbroken for the past 30 years.
Each Spiritual Development session will include a self-healing and a group-healing session. One entire study session will be devoted to a better understanding of the many vibrational energies within the mind, body and spirit… and how to heal those energies in yourself and others.
UNIQUE TOPICS: Across eight individual evenings… Each Tuesday night session will include an extensive study section, covering a different theme to help you discover the wealth of information and experience Benita has collected over the past 30 years.
Benita will guide you through the knowledge using hands on experience, case studies and live examples of the knowledge in action.
What is energy and its use in healing.
Healing past traumas; physical, mental, emotional, beliefs and responsibilities.
The affect of thoughts and feelings on the physical body and the aura.
The body’s key healing points and meridians.
The Chakras and how to heal and balance them.
Kinesiology as a healing modality.
Why and when to consult the tarot.
Which deck to use and the divisions of the cards.
Shuffles, intent, questions and spreads.
What are you connecting to when consulting the Tarot.
Your numbers and what they can tell you about yourself, strangers and loved ones.
What lessons you need to learn for this life from your number lessons.
Compatibility with others through your numbers and their numbers.
How to determine a numerology chart.
Reincarnation, karma, and past lives.
How to remember your past lives.
People we have known before and people we are yet to meet.
How to choose and use crystals.
10 ace crystals and their meanings.
Cleansing and recharging your crystals.
Understanding your nightly dreams and their creation.
What dreams can show us on many levels.
How to remember and record your dreams.
5 common dream symbols explained.
Heightening your energy vibration and attunement for healing and manifesting.
The meat, vegetarian and vegan diets.
Coffee and alcohol vs healthy drinks and water.
The importance of water and the many different types.
Meditation, walks, yoga, Tai Chi and physical discipline.
Associating with people and places that will support your energy.
Television bombardment vs controlled entertainment.
What is manifesting and how to do it.
How to make your life dreams come true.
10 best techniques to help you manifest.
Click the button below to secure a place in the next course.
HAVE A VIP COUPON CODE? – Enter it at the checkout.
|
import logging
from six.moves.urllib.parse import parse_qsl
from .oauth import OAuth2
from .oauth import BEARER_URI
log = logging.getLogger(__name__)
class Facebook(OAuth2):
# General info about the provider
provider_url = 'https://facebook.com/'
docs_url = 'https://developers.facebook.com/docs/'
category = 'Social'
api_path = 'chatterbox.api.facebook.Facebook'
refresh_url = None
# URLs to interact with the API
authorize_url = 'https://www.facebook.com/dialog/oauth'
access_token_url = 'https://graph.facebook.com/oauth/access_token'
api_domain = 'graph.facebook.com'
bearer_type = BEARER_URI
available_permissions = [
(None, 'read your basic, public information'), # public_profile
('email', 'access your email address'),
('read_stream', 'access to read the posts your news feed, or your profile'),
('user_about_me', 'access your profile information'),
('user_checkins', 'access your checkins'),
('user_events', 'access your events'),
('user_groups', 'access your groups'),
('user_likes', 'access the things you like'),
('user_location', 'access your location'),
('user_photos', 'access your photos'),
('user_status', 'access your most recent status'),
]
def parse_token(self, content):
data = dict(parse_qsl(content))
data['expires_in'] = data.get('expires', None)
return data
|
Lucy Railton hefur getið sér orð sem sellóleikari, tónskáld, höfundur innsetninga og listrænn stjórnandi að ýmsum hátíðum og tónleikaröðum. Hún hefur spilað með mörgum af fremstu samtímatónlistarhópum Lundúna og Berlínar, en hún hefur jafna búsetu í þessum borgum, innsetningar hennar hafa verið settar upp í Tate Modern og Institute of Contemporary Arts í London svo dæmi séu tekin, hún er stofnandi og listrænn stjórnandi tónleikaraðarinnar Kammer Klang í Cafe Oto í London og einn af listrænum stjórnendum London Contemporary Music Festival. Á meðal samstarfsmanna má nefna Peter Zinovieff, Jennifer Walshe, Aisha Orazbayeva, Sofie Jernberg og Russell Haswell en Lucy hefur komið fram á tónleikum á hundruðum tónleika um víða veröld.
Með Lucy kemur fram Kit Downes á harmóníum.
Giacinto Scelsi: Trilogy, for solo cello.
Lucy Railton has worked with some of the leading new music ensembles in London and Berlin and has spent the last decade dedicated to the development of new music through commissions, festival curating and collaborating with prominent artists and musicians making experimental and brand new works.
She has composed for installations including the Tate Modern and the ICA, London, for the theatre company Complicite and new works have been commissioned by Sonic Acts/Dark Ecology and Borealis Festival, PAF Festival of Film Animation, Czech Republic, Phillippe Parreno and the Portland Film Festival.
Her current collaboration with Peter Zinovieff has performed at the electronic music festivals Atonal, Rewire and Norberg, and with pianist Kit Downes performances include Koln Philharmonie, BBC Radio and hundreds and thousands of venues around Europe. As well as co-directing a the dance piece Everything that rises must dance with Sasha Milavic Davies (Complicite), she has been co-director of the London Contemporary Music Festival and is founder and curator or Kammer Klang new music series at Cafe Oto, London.
|
import argparse
import pystache
import os
import random
import sys
from djvasa.templates import View
class Project(object):
_secret_key = None
def __init__(self, **kwargs):
self.project_name = raw_input("What's the name of your project? ")
self.heroku = kwargs.get('heroku')
self.mysql = kwargs.get('mysql')
self.postgres = kwargs.get('postgres') or self.heroku
self.hg = kwargs.get('hg')
self.git = False if self.hg else True
self.full_name = raw_input("What's your full name? ")
self.email = raw_input("What's your email? ")
self.project_path = self.project_root = os.path.join(os.getcwd(), self.project_name)
self.renderer = pystache.Renderer()
self.view = View(self.project_name, **self._kwargs)
def _create_file(self, names):
for file_name, template_name in names:
self.view.template_name = template_name
with open(os.path.join(self.project_path, file_name), 'w+') as f:
f.write(self.renderer.render(self.view))
@property
def secret_key(self):
if not self._secret_key:
chars = "!@#$%^&*(-_=+)abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
self._secret_key = ''.join(random.choice(chars) for c in range(50))
return self._secret_key
@property
def _kwargs(self):
return {
'heroku': self.heroku,
'mysql': self.mysql,
'postgres': self.postgres,
'full_name': self.full_name,
'email': self.email,
'secret_key': self.secret_key
}
@property
def root_files(self):
files = {
'manage.py': 'manage',
'requirements.txt': 'pip_requirements',
'Vagrantfile': 'vagrantfile'
}
if self.hg:
files['.hgignore'] = 'hgignore'
else:
files['.gitignore'] = 'gitignore'
if self.heroku:
files['Procfile'] = 'procfile'
return files.items()
@property
def django_files(self):
files = {
'settings.py': 'settings',
'settingslocal.py': 'settings_local',
'urls.py': 'urls',
'wsgi.py': 'wsgi'
}
return files.items()
@property
def salt_files(self):
files = {
'top.sls': 'top',
'%s.sls' % self.project_name: 'salt_project',
'requirements.sls': 'requirements',
'motd': 'motd'
}
if self.mysql:
files['mysql.sls'] = 'mysql'
if self.postgres:
files['pg_hba.conf'] = 'pgconf'
files['postgres.sls'] = 'postgres'
return files.items()
def initialize(self):
# Create root directory
os.mkdir(self.project_name)
self._create_file(self.root_files)
# Create project
os.chdir(self.project_path)
self.project_path = os.path.join(os.getcwd(), self.project_name)
os.mkdir(self.project_name)
open(os.path.join(self.project_path, '__init__.py'), 'w+').close()
self._create_file(self.django_files)
os.chdir(self.project_name)
# Create static directories
os.mkdir('public')
os.mkdir('templates')
os.makedirs('static/css')
os.makedirs('static/js')
os.makedirs('static/img')
os.makedirs('static/less')
os.chdir('templates')
self.project_path = os.path.join(os.getcwd())
self._create_file([('base.html', 'base')])
os.chdir(self.project_root)
self.project_path = os.path.join(os.getcwd(), 'salt')
os.makedirs('salt/roots/salt')
if self.postgres:
# Create the pillar directories
os.mkdir('pillar')
# Create minion
self._create_file([('minion', 'minion')])
self.project_path = os.path.join(os.getcwd(), 'salt', 'roots', 'salt')
self._create_file(self.salt_files)
if self.postgres:
# create pillar directory and postgres settings.
pillar = os.path.join(self.project_root, 'pillar')
os.chdir(pillar)
self.project_path = pillar
self._create_file([
('top.sls', 'pillar_top'),
('settings.sls', 'pillar_settings')
])
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--heroku', action='store_true', default=False, help="Initialize the project for "
"deployment to Heroku.")
parser.add_argument('--mysql', action='store_true', default=False, help='Initialize the project with MySQL.')
parser.add_argument('--postgres', action='store_true', default=False, help="Initialize the project with Postgres.")
parser.add_argument('--hg', action='store_true', default=False, help="Initialize project for mercurial.")
args = parser.parse_args()
if args.mysql and args.postgres:
sys.exit("You can only enable one database, you enabled both MySQL and Postgres.")
if args.mysql and args.heroku:
sys.exit("Enable MySQL is not valid with the heroku option. By default postgres is enabled with "
"the heroku option is used.")
project = Project(**vars(args))
project.initialize()
|
Stem the mushrooms, then tear or cut the caps into pieces about 1/2-inch wide. Remove the strings and leaves from the celery and cut the stalks into 1/2-inch lengths. Combine the ginger and garlic in a small bowl. Cut the green parts off the scallions, chop them into 1/4-inch pieces, and save as a garnish. Cut the white parts of the scallions into 1/4-inch rounds and add them to the ginger and garlic. Place the chilies in a sieve, rinse, and shake them dry.
Mix the sauce ingredients together in a small bowl or measuring cup, adding the sweet soy sauce (or soy sauce plus sugar) to taste.
Place a wok over medium heat and pour in the oil when the wok is hot. Swirl the oil around and add the Sichuan peppercorns. When the peppercorns have toasted to a very dark brown, use a slotted spoon to remove and discard them. Turn up the heat under the wok to high. Add the chilies to the oil, fry them until they start to brown, and add the ginger, garlic, and whites of the green onions. Stir-fry these for a few minutes to release their fragrance.
Add the mushrooms and stir-fry them until they start to brown, and then add the celery and toss it with the mushrooms. Fry only until the raw edge of the celery is gone. Scoot the vegetables up the side of the wok. Add whatever protein you are using and fry them quickly in the oil at the bottom of the wok until they barely brown, adding more oil as needed to keep them from sticking to the wok. Toss them with the vegetables, and then pour the sauce around the edge so that it heats up quickly. Toss everything together quickly until the sauce thickens and forms a sheen over all of the ingredients. Taste and adjust the seasoning. Toss in the onion greens and peanuts and then serve immediately.
Sorry to be a pest, Is this sauce called "Lychee flavor Sauce" because Lychees are often included in the sauce or because it is used to flavor dishes with lychees or?
You're not a pest! It's more of a description of the fruitiness of the sauce itself. Lots of rich flavors popping in the background.
|
# -*- coding: utf-8 -*-
"""
@author: Karthik Madathil (github: @kmadathil)
"""
from argparse import ArgumentParser, Action
import logging
from sanskrit_parser.base.sanskrit_base import SLP1, DEVANAGARI
from sanskrit_parser.generator.paninian_object import PaninianObject
from sanskrit_parser.generator.prakriya import Prakriya, PrakriyaVakya
from sanskrit_parser.generator.pratyaya import * # noqa: F403
from sanskrit_parser.generator.dhatu import * # noqa: F403
from sanskrit_parser.generator.pratipadika import * # noqa: F403
from sanskrit_parser.generator.sutras_yaml import sutra_list
from sanskrit_parser import enable_file_logger, enable_console_logger
logger = logging.getLogger(__name__)
def run_pp(s, verbose=False):
pl = []
# Assemble list of inputs
for i in range(len(s)):
def _gen_obj(s, i):
if isinstance(s[i], tuple) or isinstance(s[i], list):
lelem = [_gen_obj(s[i], ii) for (ii, ss) in enumerate(s[i])]
else:
lelem = s[i]
return lelem
lelem = _gen_obj(s, i)
pl.append(lelem)
p = Prakriya(sutra_list, PrakriyaVakya(pl))
p.execute()
if verbose:
p.describe()
o = p.output()
return o
# Insert all sup vibhaktis one after the other, with avasAnas
# Return results with avasAnas stripped as 8x3 list of lists
def generate_vibhakti(pratipadika, verbose=False):
r = []
for ix, s in enumerate(sups): # noqa: F405
if verbose:
logger.info(f"Vibhakti {ix+1} {s}")
else:
logger.debug(f"Vibhakti {ix+1} {s}")
r.append([])
for jx, ss in enumerate(s):
# For nitya eka/dvi/bahuvacana, generate only the appropriate
if (((jx == 0) and pratipadika.hasTag("nityEkavacana")) or
((jx == 1) and pratipadika.hasTag("nityadvivacana")) or
((jx == 2) and pratipadika.hasTag("nityabahuvacana")) or
(not (pratipadika.hasTag("nityEkavacana") or
pratipadika.hasTag("nityadvivacana") or
pratipadika.hasTag("nityabahuvacana")))):
t = [(pratipadika, ss), avasAna] # noqa: F405
_r = run_pp(t, verbose)
r[-1].append(_r)
p = [''.join([str(x) for x in y]) for y in _r]
pp = ", ".join([x.strip('.') for x in p])
if verbose:
logger.info(f"Vacana {jx+1} {ss} {pp}")
else:
logger.debug(f"Vacana {jx+1} {ss} {pp}")
return r
last_option = False
class CustomAction(Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
# if nargs is not None:
# raise ValueError("nargs not allowed")
super(CustomAction, self).__init__(option_strings, dest, nargs, **kwargs)
logger.debug(f"Initializing CustomAction {option_strings}, {dest}")
def __call__(self, parser, namespace, values, option_string=None):
logger.debug('%r %r %r' % (namespace, values, option_string))
global last_option
assert not last_option, f"Option {option_string} added after avasana"
if getattr(namespace, self.dest) is None:
_n = []
# This tracks the hierarchical input list
setattr(namespace, self.dest, _n)
# Last item of this is always the current level of the input
setattr(namespace, "pointer", [_n])
if values is not None:
if isinstance(values, str):
values = [values]
for v in values:
assert v in globals(), f"{v} is not defined!"
getattr(namespace, "pointer")[-1].append(globals()[v])
else:
if option_string == "-o": # Open
_l = []
# Add a new level at the end of current list
getattr(namespace, "pointer")[-1].append(_l)
# Designate new list as current list
getattr(namespace, "pointer").append(_l)
elif option_string == "-c": # Close
# Current is updated to previous
getattr(namespace, "pointer").pop()
elif option_string == "-a": # AvasAna
# Add avasana
lav = getattr(namespace, self.dest)
setattr(namespace, self.dest, [lav, avasAna]) # noqa: F405
last_option = True
else:
logger.error(f"Unrecognized Option {option_string}")
class CustomActionString(Action):
def __init__(self, option_strings, dest, nargs=None, encoding=SLP1, **kwargs):
# if nargs is not None:
# raise ValueError("nargs not allowed")
self.encoding = encoding
super(CustomActionString, self).__init__(option_strings, dest, nargs, **kwargs)
logger.debug(f"Initializing CustomAction {option_strings}, {dest}")
def __call__(self, parser, namespace, values, option_string=None):
global last_option
assert not last_option, f"Option {option_string} added after avasana"
encoding = self.encoding
def _exec(value):
# Shortcuts for two input tests not using predefined objects
# If a string in the first place ends with * it's an anga
# Else it's a pada
# For everything else, use predefined objects
if (value[-1] == "*"):
value = value[:-1]
value = PaninianObject(value, encoding) # noqa: F405
value.setTag("aNga")
elif (value[-1] == "_"):
value = value[:-1]
value = PaninianObject(value, encoding) # noqa: F405
value.setTag("pada")
else:
value = PaninianObject(value, encoding) # noqa: F405
getattr(namespace, "pointer")[-1].append(value)
logger.info('%r %r %r' % (namespace, values, option_string))
if getattr(namespace, self.dest) is None:
_n = []
# This tracks the hierarchical input list
setattr(namespace, self.dest, _n)
# Last item of this is always the current level of the input
setattr(namespace, "pointer", [_n])
if isinstance(values, list):
for v in values:
_exec(v)
else:
_exec(values)
def get_args(argv=None):
"""
Argparse routine.
Returns args variable
"""
parser = ArgumentParser(description='Paninian Generator: Prakriti + Pratyaya')
# String to encode
parser.add_argument('--debug', action='store_true')
parser.add_argument('-p', '--pratyaya', nargs="+", dest="inputs", action=CustomAction)
parser.add_argument('-d', '--dhatu', dest="inputs", action=CustomAction)
parser.add_argument('-t', '--pratipadika', dest="inputs", action=CustomAction)
parser.add_argument('-s', '--string', nargs="+", dest="inputs", encoding=SLP1, action=CustomActionString)
parser.add_argument('-o', nargs="?", dest="inputs", action=CustomAction, help="Open bracket") # Open Brace
parser.add_argument('-c', nargs="?", dest="inputs", action=CustomAction, help="Close bracket")
parser.add_argument('-a', nargs="?", dest="inputs", action=CustomAction, help="Avasana")
parser.add_argument("--vibhakti", action="store_true", help="generate all vibhaktis")
parser.add_argument("--gen-test", action="store_true", help="generate vibhakti test")
parser.add_argument("--verbose", action="store_true", help="verbose")
return parser.parse_args(argv)
def cmd_line():
# Logging
enable_console_logger()
args = get_args()
if args.debug:
enable_file_logger(level=logging.DEBUG)
logger.info(f"Inputs {args.inputs}")
for i in args.inputs:
def _i(x):
if isinstance(x, list):
for _x in x:
_i(_x)
else:
logger.info(f"{x} {x.tags}")
_i(i)
logger.info("End Inputs")
if args.vibhakti:
if ((len(args.inputs) != 1) or (not isinstance(args.inputs[0], Pratipadika))): # noqa: F405
logger.info(f"Need a single pratipadika for vibhaktis, got {len(args.inputs)} inputs, first one of type {type(args.inputs[0])}")
logger.info("Simplifying")
r = run_pp(args.inputs, args.verbose)
logger.debug(f"Output: {[''.join([str(x) for x in y]) for y in r]}")
assert len(r) == 1, "Got multiple outputs"
pp = PaninianObject.join_objects(r)
logger.info(f"Output {pp} {pp.tags}")
else:
pp = args.inputs[0]
r = generate_vibhakti(pp, args.verbose)
print("Output")
if args.gen_test:
rr = [[[y[0].transcoded(DEVANAGARI) for y in va] if len(va) > 1 else va[0][0].transcoded(DEVANAGARI) for va in vi] for vi in r]
print(f"prAtipadika[\"{str(pp)}\"] = {str(pp)}")
print(f"viBakti[\"{str(pp)}\"] = [")
for vi in rr:
print(f"{vi},")
print("]")
else:
for ix, vi in enumerate(r):
print(f"{', '.join(['/'.join([''.join([x.transcoded(DEVANAGARI) for x in y]).strip('।') for y in va]) for va in vi])}")
else:
r = run_pp(args.inputs, args.verbose)
print(f"Output: {[''.join([str(x) for x in y]) for y in r]}")
|
(Country Fancast) Gal Friday Sets Our Hearts On Fire With Debut Album, "Smoke and Mirrors"
Emerging country music group Gal Friday is ready to leave their mark on the country scene with their debut record, 'Smoke and Mirrors.' More here!
Country sister trio Gal Friday (Melissa York, Ashley Min, Lauren Wilson) will release their debut album Smoke and Mirrors this Friday, October 20th, but you can hear the focus track exclusively right here, right now! Enjoy listening to “Smoke and Mirrors” below.
|
# Debian packaging tools: GPG key pair generation.
#
# Author: Peter Odding <[email protected]>
# Last Change: April 18, 2020
# URL: https://github.com/xolox/python-deb-pkg-tools
"""
GPG key pair generation and signing of ``Release`` files.
The :mod:`deb_pkg_tools.gpg` module is used to manage GPG key pairs. It allows
callers to specify which GPG key pair and/or key ID they want to use and will
automatically generate GPG key pairs that don't exist yet.
.. _GnuPG 2.1 compatibility:
GnuPG 2.1 compatibility
-----------------------
In 2018 the :mod:`deb_pkg_tools.gpg` module got a major update to enable
compatibility with GnuPG >= 2.1:
- The :mod:`deb_pkg_tools.gpg` module was first integrated into deb-pkg-tools
in 2013 and was developed based on GnuPG 1.4.10 which was the version
included in Ubuntu 10.04.
- Ubuntu 18.04 includes GnuPG 2.2.4 which differs from 1.4.10 in several
backwards incompatible ways that require changes in deb-pkg-tools which
directly affect the users of deb-pkg-tools (the API has changed).
The following sections discuss the concrete changes:
.. contents::
:local:
Storage of secret keys
~~~~~~~~~~~~~~~~~~~~~~
The storage of secret keys has changed in a backwards incompatible way, such
that the ``--secret-keyring`` command line option is now obsolete and ignored.
The GnuPG documentation suggests to use an `ephemeral home directory`_ as a
replacement for ``--secret-keyring``. To enable compatibility with GnuPG >= 2.1
while at the same time preserving compatibility with older releases, the
:class:`GPGKey` class gained a new :attr:`~GPGKey.directory` property:
- When GnuPG >= 2.1 is detected :attr:`~GPGKey.directory` is required.
- When GnuPG < 2.1 is detected :attr:`~GPGKey.directory` may be specified and
will be respected, but you can also use "the old calling convention" where
the :attr:`~GPGKey.public_key_file`, :attr:`~GPGKey.secret_key_file` and
:attr:`~GPGKey.key_id` properties are specified separately.
- The documentation of the :class:`GPGKey` initializer explains how to enable
compatibility with old and new versions GnuPG versions at the same time
(using the same Python code).
Unattended key generation
~~~~~~~~~~~~~~~~~~~~~~~~~
The default behavior of ``gpg --batch --gen-key`` has changed:
- The user is now presented with a GUI prompt that asks to specify a pass
phrase for the new key, at which point the supposedly unattended key
generation is effectively blocked on user input...
- To avoid the GUI prompt the new ``%no-protection`` option needs to be added
to the batch file, but of course that option will not be recognized by older
GnuPG releases, so it needs to be added conditionally.
.. _ephemeral home directory: https://www.gnupg.org/documentation/manuals/gnupg/Ephemeral-home-directories.html#Ephemeral-home-directories
"""
# Standard library modules.
import logging
import multiprocessing
import os.path
import tempfile
# External dependencies.
from executor import execute, quote
from humanfriendly import Timer, coerce_boolean, parse_path
from humanfriendly.decorators import cached
from humanfriendly.text import compact
from property_manager import PropertyManager, cached_property, mutable_property
# Modules included in our package.
from deb_pkg_tools.utils import find_installed_version, makedirs
from deb_pkg_tools.version import Version
# Public identifiers that require documentation.
__all__ = (
"EntropyGenerator",
"FORCE_ENTROPY",
"GPGKey",
"GPG_AGENT_VARIABLE",
"create_directory",
"generate_entropy",
"have_updated_gnupg",
"initialize_gnupg",
"logger",
)
# Initialize a logger.
logger = logging.getLogger(__name__)
FORCE_ENTROPY = coerce_boolean(os.environ.get('DPT_FORCE_ENTROPY', 'false'))
"""
:data:`True` to allow :func:`GPGKey.generate_key_pair()` to force the system to
generate entropy based on disk I/O , :data:`False` to disallow this behavior
(the default).
This was added to facilitate the deb-pkg-tools test suite running on Travis CI.
It is assumed that this rather obscure functionality will only ever be useful
in the same context: Running a test suite in a virtualization environment with
very low entropy.
The environment variable ``$DPT_FORCE_ENTROPY`` can be used to control the
value of this variable (see :func:`~humanfriendly.coerce_boolean()` for
acceptable values).
"""
GPG_AGENT_VARIABLE = 'GPG_AGENT_INFO'
"""The name of the environment variable used to communicate between the GPG agent and :man:`gpg` processes (a string)."""
def create_directory(pathname):
"""
Create a GnuPG directory with sane permissions (to avoid GnuPG warnings).
:param pathname: The directory to create (a string).
"""
makedirs(pathname)
os.chmod(pathname, 0o700)
@cached
def have_updated_gnupg():
"""
Check which version of GnuPG is installed.
:returns: :data:`True` if GnuPG >= 2.1 is installed,
:data:`False` for older versions.
"""
gnupg_version = find_installed_version('gnupg')
return Version(gnupg_version) >= Version('2.1')
def initialize_gnupg():
"""
Make sure the ``~/.gnupg`` directory exists.
Older versions of GPG can/will fail when the ``~/.gnupg`` directory doesn't
exist (e.g. in a newly created chroot). GPG itself creates the directory
after noticing that it's missing, but then still fails! Later runs work
fine however. To avoid this problem we make sure ``~/.gnupg`` exists before
we run GPG.
"""
create_directory(parse_path('~/.gnupg'))
class GPGKey(PropertyManager):
"""
Container for generating GPG key pairs and signing release files.
This class is used to sign ``Release`` files in Debian package
repositories. If the given GPG key pair doesn't exist yet it will be
automatically created without user interaction (except gathering of
entropy, which is not something I can automate :-).
"""
def __init__(self, **options):
"""
Initialize a :class:`GPGKey` object.
:param options: Refer to the initializer of the superclass
(:class:`~property_manager.PropertyManager`)
for details about argument handling.
There are two ways to specify the location of a GPG key pair:
- The old way applies to GnuPG < 2.1 and uses :attr:`public_key_file`
and :attr:`secret_key_file`.
- The new way applies to GnuPG >= 2.1 and uses :attr:`directory`.
If you don't specify anything the user's default key pair will be used.
Specifying all three properties enables isolation from the user's
default keyring that's compatible with old and new GnuPG installations
at the same time.
You can also use :attr:`key_id` to select a specific existing GPG key
pair, possibly in combination with the previously mentioned properties.
When the caller has specified a custom location for the GPG key pair
but the associated files don't exist yet a new GPG key pair will be
automatically generated. This requires that :attr:`name` and
:attr:`description` have been set.
"""
# Initialize our superclass.
super(GPGKey, self).__init__(**options)
# Initialize ourselves based on the GnuPG version.
if have_updated_gnupg():
self.check_new_usage()
else:
self.check_old_usage()
self.set_old_defaults()
self.check_old_files()
self.check_key_id()
self.generate_key_pair()
def check_key_id(self):
"""Raise :exc:`~exceptions.EnvironmentError` when a key ID has been specified but the key pair doesn't exist."""
if self.key_id and not self.existing_files:
raise EnvironmentError(compact(
"The key ID {key_id} was specified but the configured key pair doesn't exist!",
key_id=self.key_id,
))
def check_new_usage(self):
"""
Raise an exception when detecting a backwards incompatibility.
:raises: :exc:`~exceptions.TypeError` as described below.
When GnuPG >= 2.1 is installed the :func:`check_new_usage()` method is
called to make sure that the caller is aware of the changes in API
contract that this implies. We do so by raising an exception when both
of the following conditions hold:
- The caller is using the old calling convention of setting
:attr:`public_key_file` and :attr:`secret_key_file` (which
confirms that the intention is to use an isolated GPG key).
- The caller is not using the new calling convention of setting
:attr:`directory` (even though this is required to use an isolated
GPG key with GnuPG >= 2.1).
"""
if self.old_usage and not self.new_usage:
raise TypeError(compact("""
You're running GnuPG >= 2.1 which requires changes to how
deb_pkg_tools.gpg.GPGKey is used and unfortunately our
caller hasn't been updated to support this. Please refer
to the the deb-pkg-tools 5.0 release notes for details.
"""))
def check_old_files(self):
"""
Raise an exception when we risk overwriting an existing public or secret key file.
:returns: A list of filenames with existing files.
:raises: :exc:`~exceptions.EnvironmentError` as described below.
When GnuPG < 2.1 is installed :func:`check_old_files()` is called to
ensure that when :attr:`public_key_file` and :attr:`secret_key_file`
have been provided, either both of the files already exist or neither
one exists. This avoids accidentally overwriting an existing file that
wasn't generated by deb-pkg-tools and shouldn't be touched at all.
"""
if len(self.existing_files) == 1:
raise EnvironmentError(compact(
"Refusing to overwrite existing key file! ({filename})",
filename=self.existing_files[0],
))
def check_old_usage(self):
"""
Raise an exception when either the public or the secret key hasn't been provided.
:raises: :exc:`~exceptions.TypeError` as described below.
When GnuPG < 2.1 is installed :func:`check_old_usage()` is called
to ensure that :attr:`public_key_file` and :attr:`secret_key_file`
are either both provided or both omitted.
"""
if self.secret_key_file and not self.public_key_file:
raise TypeError(compact("""
The secret key file {filename} was provided without a
corresponding public key file! Please provide both or
neither.
""", filename=self.secret_key_file))
elif self.public_key_file and not self.secret_key_file:
raise TypeError(compact("""
The public key file {filename} was provided without a
corresponding secret key file! Please provide both or
neither.
""", filename=self.public_key_file))
def generate_key_pair(self):
"""
Generate a missing GPG key pair on demand.
:raises: :exc:`~exceptions.TypeError` when the GPG key pair needs to be
generated (because it doesn't exist yet) but no :attr:`name`
and :attr:`description` were provided.
"""
logger.debug("Checking if GPG key pair exists ..")
if self.existing_files:
logger.debug("Assuming key pair exists (found existing files: %s).", self.existing_files)
return
elif not (self.name and self.description):
raise TypeError("Can't generate GPG key pair without 'name' and 'description'!")
logger.info("Generating GPG key pair: %s (%s)", self.name, self.description)
# Make sure all of the required directories exist and have sane
# permissions (to avoid GnuPG warnings).
required_dirs = set([self.directory_default, self.directory_effective])
if not have_updated_gnupg():
required_dirs.update([
os.path.dirname(self.public_key_file),
os.path.dirname(self.public_key_file),
])
for directory in required_dirs:
create_directory(directory)
# Use a temporary file for the `gpg --batch --gen-key' batch instructions.
fd, temporary_file = tempfile.mkstemp(suffix='.txt')
try:
with open(temporary_file, 'w') as handle:
handle.write(self.batch_script)
handle.write('\n')
# Inform the operator that this may take a while.
logger.info(compact("""
Please note: Generating a GPG key pair can take a long time. If
you are logged into a virtual machine or a remote server over
SSH, now is a good time to familiarize yourself with the
concept of entropy and how to make more of it :-)
"""))
timer = Timer()
with EntropyGenerator():
gen_key_cmd = self.scoped_command
gen_key_cmd += ['--batch', '--gen-key', temporary_file]
execute(*gen_key_cmd, logger=logger)
logger.info("Finished generating GPG key pair in %s.", timer)
finally:
os.unlink(temporary_file)
# Reset cached properties after key generation.
self.clear_cached_properties()
def set_old_defaults(self):
"""Fall back to the default public and secret key files for GnuPG < 2.1."""
if not self.public_key_file and not self.secret_key_file:
self.public_key_file = os.path.join(self.directory_effective, 'pubring.gpg')
self.secret_key_file = os.path.join(self.directory_effective, 'secring.gpg')
@cached_property
def batch_script(self):
"""A GnuPG batch script suitable for ``gpg --batch --gen-key`` (a string)."""
logger.debug("Generating batch script for 'gpg --batch --gen-key' ..")
lines = [
'Key-Type: RSA',
'Key-Length: 1024',
'Subkey-Type: ELG-E',
'Subkey-Length: 1024',
'Name-Real: %s' % self.name,
'Name-Comment: %s' % self.description,
'Name-Email: none',
'Expire-Date: 0',
]
if have_updated_gnupg():
# GnuPG >= 2.1 prompts the operator to pick a password
# interactively unless '%no-protection' is used. Also
# %secring has been obsoleted and is now ignored.
logger.debug("Specializing batch script for GnuPG >= 2.1 ..")
lines.append('%no-protection')
else:
logger.debug("Specializing batch script for GnuPG < 2.1 ..")
lines.append('%%pubring %s' % self.public_key_file)
lines.append('%%secring %s' % self.secret_key_file)
lines.append('%commit')
text = '\n'.join(lines)
logger.debug("Here's the complete batch script:\n%s", text)
return text
@mutable_property
def command_name(self):
"""The name of the GnuPG program (a string, defaults to :man:`gpg`)."""
return 'gpg'
@mutable_property
def description(self):
"""
The description of the GPG key pair (a string or :data:`None`).
Used only when the key pair is generated because it doesn't exist yet.
"""
@mutable_property
def directory(self):
"""
The pathname of the GnuPG home directory to use (a string or :data:`None`).
This property was added in deb-pkg-tools 5.0 to enable compatibility
with GnuPG >= 2.1 which changed the storage of secret keys in a
backwards incompatible way by obsoleting the ``--secret-keyring``
command line option. The GnuPG documentation suggests to use an
`ephemeral home directory`_ as a replacement and that's why the
:attr:`directory` property was added.
"""
@cached_property
def directory_default(self):
"""The pathname of the default GnuPG home directory (a string)."""
return parse_path('~/.gnupg')
@cached_property
def directory_effective(self):
"""The pathname of the GnuPG home directory that will actually be used (a string)."""
return self.directory or self.directory_default
@cached_property
def existing_files(self):
"""
A list of strings with the filenames of existing GnuPG data files.
The content of this list depends on the GnuPG version:
- On GnuPG >= 2.1 and/or when :attr:`directory` has been set (also on
GnuPG < 2.1) any files in or below :attr:`directory` are included.
- On GnuPG < 2.1 :attr:`public_key_file` and :attr:`secret_key_file`
are included (only if the properties are set and the files exist of
course).
"""
filenames = []
if have_updated_gnupg() or self.new_usage:
# New usage is mandatory in combination with GnuPG >= 2.1 and
# optional but supported in combination with GnuPG < 2.1.
if os.path.isdir(self.directory_effective):
for root, dirs, files in os.walk(self.directory_effective):
filenames.extend(os.path.join(root, fn) for fn in files)
if self.old_usage and not have_updated_gnupg():
# Old usage is only possibly in combination with GnuPG < 2.1.
candidates = (self.public_key_file, self.secret_key_file)
filenames.extend(fn for fn in candidates if os.path.isfile(fn))
return filenames
@cached_property
def identifier(self):
"""
A unique identifier for the GPG key pair (a string).
The output of the ``gpg --list-keys --with-colons`` command is parsed
to extract a unique identifier for the GPG key pair:
- When a fingerprint is available this is preferred.
- Otherwise a long key ID will be returned (assuming one is available).
- If neither can be extracted :exc:`~exceptions.EnvironmentError` is raised.
If an isolated key pair is being used the :attr:`directory` option
should be used instead of the :attr:`public_key_file` and
:attr:`secret_key_file` properties, even if GnuPG < 2.1 is being used.
This is necessary because of what appears to be a bug in GnuPG, see
`this mailing list thread`_ for more discussion.
.. _this mailing list thread: https://lists.gnupg.org/pipermail/gnupg-users/2002-March/012144.html
"""
listing = execute(' '.join([self.gpg_command, '--list-keys', '--with-colons']), capture=True)
parsed_listing = [line.split(':') for line in listing.splitlines()]
# Look for an 'fpr:*' line with a key fingerprint.
for fields in parsed_listing:
if len(fields) >= 10 and fields[0] == 'fpr' and fields[9].isalnum():
return fields[9]
# Look for an 'pub:*' line with a long key ID.
for fields in parsed_listing:
if len(fields) >= 5 and fields[0] == 'pub' and fields[4].isalnum():
return fields[4]
# Explain what went wrong, try to provide hints.
msg = "Failed to get unique ID of GPG key pair!"
if self.old_usage and not self.new_usage:
msg += " Use of the 'directory' option may help to resolve this."
raise EnvironmentError(msg)
@property
def gpg_command(self):
"""
The GPG command line that can be used to sign using the key, export the key, etc (a string).
The value of :attr:`gpg_command` is based on :attr:`scoped_command`
combined with the ``--no-default-keyring``
The documentation of :func:`GPGKey.__init__()` contains two examples.
"""
command = self.scoped_command
if not have_updated_gnupg():
command.extend((
'--no-default-keyring',
'--keyring', self.public_key_file,
'--secret-keyring', self.secret_key_file,
))
if self.key_id:
command.extend(('--recipient', self.key_id))
if self.use_agent:
command.append('--use-agent')
return quote(command)
@mutable_property
def key_id(self):
"""
The key ID of an existing key pair to use (a string or :data:`None`).
If this option is provided then the key pair must already exist.
"""
@mutable_property
def name(self):
"""
The name of the GPG key pair (a string or :data:`None`).
Used only when the key pair is generated because it doesn't exist yet.
"""
@property
def new_usage(self):
""":data:`True` if the new API is being used, :data:`False` otherwise."""
return bool(self.directory)
@property
def old_usage(self):
""":data:`True` if the old API is being used, :data:`False` otherwise."""
return bool(self.public_key_file or self.secret_key_file)
@mutable_property
def public_key_file(self):
"""
The pathname of the public key file (a string or :data:`None`).
This is only used when GnuPG < 2.1 is installed.
"""
@property
def scoped_command(self):
"""
The GPG program name and optional ``--homedir`` command line option (a list of strings).
The name of the GPG program is taken from :attr:`command_name` and the
``--homedir`` option is only added when :attr:`directory` is set.
"""
command = [self.command_name]
if self.directory:
command.append('--homedir')
command.append(self.directory)
return command
@mutable_property
def secret_key_file(self):
"""
The pathname of the secret key file (a string or :data:`None`).
This is only used when GnuPG < 2.1 is installed.
"""
@property
def use_agent(self):
"""
Whether to enable the use of the `GPG agent`_ (a boolean).
This property checks whether the environment variable given by
:data:`GPG_AGENT_VARIABLE` is set to a nonempty value. If it is then
:attr:`gpg_command` will include the ``--use-agent`` option. This makes
it possible to integrate repository signing with the GPG agent, so that
a password is asked for once instead of every time something is signed.
.. _GPG agent: http://linux.die.net/man/1/gpg-agent
"""
return bool(os.environ.get(GPG_AGENT_VARIABLE))
class EntropyGenerator(object):
"""
Force the system to generate entropy based on disk I/O.
The `deb-pkg-tools` test suite runs on Travis CI which uses virtual
machines to isolate tests. Because the `deb-pkg-tools` test suite generates
several GPG keys it risks the chance of getting stuck and being killed
after 10 minutes of inactivity. This happens because of a lack of entropy
which is a very common problem in virtualized environments.
There are tricks to use fake entropy to avoid this problem:
- The `rng-tools` package/daemon can feed ``/dev/random`` based on
``/dev/urandom``. Unfortunately this package doesn't work on Travis CI
because they use OpenVZ which uses read only ``/dev/random`` devices.
- GPG version 2 supports the ``--debug-quick-random`` option but I haven't
investigated how easy it is to switch.
Instances of this class can be used as a context manager to generate
endless disk I/O which is one of the few sources of entropy on virtualized
systems. Entropy generation is enabled when the environment variable
``$DPT_FORCE_ENTROPY`` is set to ``yes``, ``true`` or ``1``.
"""
def __init__(self):
"""Initialize a :class:`EntropyGenerator` object."""
self.enabled = coerce_boolean(os.environ.get('DPT_FORCE_ENTROPY', 'false'))
if self.enabled:
self.process = multiprocessing.Process(target=generate_entropy)
def __enter__(self):
"""Enable entropy generation."""
if self.enabled:
logger.warning("Forcing entropy generation using disk I/O, performance will suffer ..")
self.process.start()
def __exit__(self, exc_type, exc_value, traceback):
"""Disable entropy generation."""
if self.enabled:
self.process.terminate()
logger.debug("Terminated entropy generation.")
def generate_entropy():
"""
Force the system to generate entropy based on disk I/O.
This function is run in a separate process by :class:`EntropyGenerator`.
It scans the complete file system and reads every file it finds in blocks
of 1 KB. This function never returns; it has to be killed.
"""
# Continue until we are killed.
while True:
# Scan the complete file system.
for root, dirs, files in os.walk('/'):
for filename in files:
pathname = os.path.join(root, filename)
# Don't try to read device files, named pipes, etc.
if os.path.isfile(pathname):
# Read every file on the file system in blocks of 1 KB.
try:
with open(pathname) as handle:
while True:
block = handle.read(1024)
if not block:
break
except Exception:
pass
|
I am having trouble with all of my portrait mode pictures showing up sideways. The appear correctly on my computer before I import them but are all on their sides (landscape mode) in Boatlogger and cannot be rotated.
The rotation of the photos follows the EXIF information in the image file. If the rotation is not correct, then the image contains incorrect EXIF data. This can happen from some photo programs.
Contact support and they can analyze your specific pictures.
|
"""Implement Prometheus client."""
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2019 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from urllib.parse import parse_qs
from ryu.lib import hub
from pbr.version import VersionInfo
from prometheus_client import Gauge as PromGauge
from prometheus_client import generate_latest, CONTENT_TYPE_LATEST, REGISTRY
# Ryu's WSGI implementation doesn't always set QUERY_STRING
def make_wsgi_app(registry):
"""Create a WSGI app which serves the metrics from a registry."""
def prometheus_app(environ, start_response):
query_str = environ.get('QUERY_STRING', '')
params = parse_qs(query_str)
reg = registry
if 'name[]' in params:
reg = reg.restricted_registry(params['name[]'])
output = generate_latest(reg)
status = str('200 OK')
headers = [(str('Content-type'), CONTENT_TYPE_LATEST)]
start_response(status, headers)
return [output]
return prometheus_app
class PromClient: # pylint: disable=too-few-public-methods
"""Prometheus client."""
REQUIRED_LABELS = ['dp_id', 'dp_name']
_reg = REGISTRY
def __init__(self, reg=None):
if reg is not None:
self._reg = reg
self.version = VersionInfo('faucet').semantic_version().release_string()
self.faucet_version = PromGauge( # pylint: disable=unexpected-keyword-arg
'faucet_pbr_version',
'Faucet PBR version',
['version'],
registry=self._reg)
self.faucet_version.labels(version=self.version).set(1) # pylint: disable=no-member
self.server = None
self.thread = None
def start(self, prom_port, prom_addr, use_test_thread=False):
"""Start webserver."""
if not self.server:
app = make_wsgi_app(self._reg)
if use_test_thread:
# pylint: disable=import-outside-toplevel
from wsgiref.simple_server import (
make_server, WSGIRequestHandler)
import threading
class NoLoggingWSGIRequestHandler(WSGIRequestHandler):
"""Don't log requests."""
def log_message(self, *_args): # pylint: disable=arguments-differ
pass
self.server = make_server(
prom_addr, int(prom_port), app, handler_class=NoLoggingWSGIRequestHandler)
self.thread = threading.Thread(target=self.server.serve_forever)
self.thread.daemon = True
self.thread.start()
else:
self.server = hub.WSGIServer((prom_addr, int(prom_port)), app)
self.thread = hub.spawn(self.server.serve_forever)
self.thread.name = 'prometheus'
|
We follow a well-defined service plan and are supported by professionals holding rich industry experience. This helps our global clients an easy access to different entry options in India. These services such as “Turnaround of Loss Making Companies” are rendered as per the rules and regulations set up by the governing bodies. We are the professional taxation service providers from India and our experts are providing their valuable financial and investment solutions to the clients in Moti Nagar.
|
#!/usr/bin/env python3
# Legibilidad 2 (beta)
# Averigua la legibilidad de un texto
# Spanish readability calculations
# © 2016 Alejandro Muñoz Fernández
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
import sqlite3
def rare_words(wordlist):
'''
List of rare words (not in the SUBTLEX-ESP database). Fix this: make only one query instead of one per word. It'll be faster
'''
dbpath = "/home/protected/db/SUBTLEX-ESP.db"
conn = sqlite3.connect(dbpath)
rarewords = []
cur = conn.cursor()
for word in wordlist:
cur.execute('SELECT 1 FROM frecuencias WHERE palabra = ? LIMIT 1', (word,))
if not cur.fetchone():
rarewords.append(word)
conn.close()
return rarewords
|
National Weather Service radar detected a squall band offshore from Ormond Beach to New Smyrna Beach that could produce wind gusts in the 50 mph range.
Before 4:30 p.m. the line was moving rapidly west, at 40-45 mph and will produce gusts up to 55 mph as it movesthrough central and southern Volusia County.
The National Weather Service urged people to remain indoors until the squalls pass.
|
import numpy
import cPickle
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--top-unigram", type=str)
parser.add_argument("--src-w2i", type=str)
parser.add_argument("--trg-w2i", type=str)
parser.add_argument("--vocab-size", type=int)
parser.add_argument("--output", type=str)
args = parser.parse_args()
with open(args.src_w2i,'rb') as f:
src_w2i = cPickle.load(f)
with open(args.trg_w2i,'rb') as f:
trg_w2i = cPickle.load(f)
with open(args.top_unigram,'rb') as f:
top_unigram = cPickle.load(f)
new_dict = {}
for old_key in top_unigram:
if old_key == '<eps>': # Don't consider the empty string
continue
new_key = src_w2i[old_key] # Convert source word to its index
if new_key >= args.vocab_size:
continue
old_value = top_unigram[old_key] # This is a list of words (with the most probable one first)
new_value = [trg_w2i[elt] for elt in old_value if (trg_w2i[elt] < args.vocab_size)]
if len(new_value) >= 1:
new_dict[new_key] = new_value
with open(args.output,'wb') as f:
cPickle.dump(new_dict, f, -1)
|
From Pike Brewing, here is the scoop on this year’s Chocofest – Pike’s annual celebration of romance, beer, chocolate, wine, spirits, food and more. Sunday, February 10, 5:00 til 8:00. This event is quite amazing – too much good stuff all in one place.
Chocolatiers, Winemakers, and Brewers, Restaurateurs, Bakers, Cheese Makers.
FOREPLAY BEFORE THE BIG DAY: The Pike Brewing Company will celebrate Pike Chocofest on February 10, 2013.
A SWEET HISTORY: Pike Chocofest began in 2009 as a romantic experience, designed to expose the sensual relationship between chocolate and beer, as Seattle is a love-nest for both. Wine, spirits, cider and mead also cohabit-ate deliciously with chocolate, though perhaps they are not as orgasmic a combination as with beer–vintners, distillers, and mead makers might disagree. For years wineries have promoted red wine and chocolate; drinking eau de vie with chocolate truffles at the end of the meal is expected in Switzerland and France.
SPECIAL PUB HOURS ON FEBRUARY 10: On February 10, the Pike Pub will close at 4:00 pm for regular service and will reopen at 5:00 p.m. for Chocofest guests who will be able to enjoy food and drink in Pike’s Microbrewery Museum, in each of the rooms in the pub, and in the brewery cellar.
BENEFIT FOR PUGET SOUNDKEEPER ALLIANCE: Pike Chocofest is a Valentine’s gift for everyone involved, including Puget Soundkeeper Alliance for helping to keep local waters pristine. Speaking of local waters, another love potion on which most everyone agrees will be featured at Pike Chocofest 2013: Taylor United oysters on the half shell.
|
#!/usr/bin/python
import numpy as np
import scipy.interpolate as si
import mayavi.mlab as mylab
def calc_points(line):
points = np.zeros((len(line),3)) # indicies -> point coordinates
for i in range(points.shape[0]):
#points[i,0] = 2 * 0.556 * (line[i][0]-0.5)
#points[i,1] = 2 * 0.556 * (line[i][1]-0.5)
#points[i,2] = 0.798 * (line[i][2]-0.5) # z axis
points[i,0] = 0.556 * (line[i][0]-0.5)
points[i,1] = 0.556 * (line[i][1]-0.5)
points[i,2] = 0.798 * (line[i][2]-0.5) # z axis
#points[i,0] = 0.556 * (line[i][0])
#points[i,1] = 0.556 * (line[i][1])
#points[i,2] = 0.798 * (line[i][2]) # z axis
return points
def bspline(cv, n=100, degree=3):
cv = np.asarray(cv)
count = cv.shape[0]
degree = np.clip(degree,1,count-1) # max degree = count-1
kv = np.array([0]*degree + range(count-degree+1) + [count-degree]*degree,dtype='int')
u = np.linspace(0,(count-degree),num=n)
points = np.zeros((len(u),cv.shape[1]))
for i in xrange(cv.shape[1]):
points[:,i] = si.splev(u, (kv,cv[:,i],degree))
return points
# save geometry lines
def save_poly(fname, lines):
fname += "_poly.txt"
f = open(fname, 'w')
print ' ', fname
for line in lines:
points = calc_points(line)
#spoints = bspline(points, n=points.shape[0], degree=20)
##m = len(points)
m = len(points)/2
if m<4: continue
kx = 3
##if(m>3): kx = 3
##else: kx = m-1
wx = np.ones(len(points))
wx[0] = wx[-1] = 100
tck,u=si.splprep(np.transpose(points),w=wx,k=kx,s=10)
##m /= 2
##if(m<4) : m=4
spoints = np.transpose([si.splev(np.linspace(0,1,m),tck)])
f.write("%2d " % m)
for spoint in spoints:
for vert in spoint:
f.write("%0.2f " % vert)
f.write('\n')
mylab.plot3d(points[:,0], points[:,1], points[:,2], color=(1,0,0))
mylab.plot3d(spoints[:,0], spoints[:,1], spoints[:,2], color=(0,1,0))
f.close()
mylab.show()
return
|
Learn how to master the art of storytelling to reach and inspire thousands with your unique story and message. Reserve your spot to this online training here.
I created this course, this coaching mastermind package because (1) I know how speaking gave me a voice and change my life. (2) People have been asking me for this for over two years.
I’m talking about women from all walks of life; mothers, women in ministry, entrepreneurs, coaches, authors, students, trainers and influencers who all want to speak and make a great living doing so. The truth is, I believe I was born to do this but it took more than 30 years to truly own that.
Be it from a stage, in my 1:1 coaching sessions, on Facebook Live or in a group coaching space. So I sat back and asked myself, ‘Ok Liz, how did you get started? What can you teach others?’ And then I wrote out the strategy and the formula for the things that I do; the story telling, the audience connection, the marketing and branding part, how to find gigs to hosting my own events. And BOOM!!!! I created this ‘Live Online Training Program’ on how to build a speaking business from scratch - in just three months.
The she SPEAKS is offered in a group coaching space or through private 1:1 sessions. This program is not for everyone. It’s for the game changers who are ready to use their story to serve the world. It’s for women who are ready to show UP and show OUT.
Can't decide which program you should get? Register for a FREE session here so I can guide you.
It will help you craft and deliver your story with impact and meaning.
You’ll learn how to powerfully connect with your audience.
How to effortlessly have people wanting more of you.
It will help you learn how to create a legacy while influencing small communities, larger communities, and the world.
she SPEAKS will intentionally help you scale and grow your business.
It will help you understand your existing revenue streams, and help you create, generate, and grow new revenue streams.
Register today before we fill up!
Starting your own public-speaking business can be a rewarding experience. It will give you the opportunity to speak in front of a wide diverse group of people from industry professionals, schools, event planners, clubs and nonprofit organizations. Starting a public-speaking business gives you the opportunity to meet potential clients, establish credibility and set your own fees. That’s the icing on the cake!
If you’re ready to get started, we’ve got a spot for you.
|
# im2rgbfits CL0024.png -over -header det.fits
# WILL HONOR WCS FROM headerfile
# im2rgbfits.py
# ~/ACS/CL0024/color/production/color.py
# ALSO SEE pyfits.pdf (Pyfits manual)
#from coetools import *
from PIL import Image
import pyfits
import sys, os
import string
from os.path import exists, join
from numpy import *
#################################
def str2num(str, rf=0):
"""CONVERTS A STRING TO A NUMBER (INT OR FLOAT) IF POSSIBLE
ALSO RETURNS FORMAT IF rf=1"""
try:
num = string.atoi(str)
format = 'd'
except:
try:
num = string.atof(str)
format = 'f'
except:
if not string.strip(str):
num = None
format = ''
else:
words = string.split(str)
if len(words) > 1:
num = map(str2num, tuple(words))
format = 'l'
else:
num = str
format = 's'
if rf:
return (num, format)
else:
return num
def params_cl(converttonumbers=True):
"""RETURNS PARAMETERS FROM COMMAND LINE ('cl') AS DICTIONARY:
KEYS ARE OPTIONS BEGINNING WITH '-'
VALUES ARE WHATEVER FOLLOWS KEYS: EITHER NOTHING (''), A VALUE, OR A LIST OF VALUES
ALL VALUES ARE CONVERTED TO INT / FLOAT WHEN APPROPRIATE"""
list = sys.argv[:]
i = 0
dict = {}
oldkey = ""
key = ""
list.append('') # EXTRA ELEMENT SO WE COME BACK AND ASSIGN THE LAST VALUE
while i < len(list):
if striskey(list[i]) or not list[i]: # (or LAST VALUE)
if key: # ASSIGN VALUES TO OLD KEY
if value:
if len(value) == 1: # LIST OF 1 ELEMENT
value = value[0] # JUST ELEMENT
dict[key] = value
if list[i]:
key = list[i][1:] # REMOVE LEADING '-'
value = None
dict[key] = value # IN CASE THERE IS NO VALUE!
else: # VALUE (OR HAVEN'T GOTTEN TO KEYS)
if key: # (HAVE GOTTEN TO KEYS)
if value:
if converttonumbers:
value.append(str2num(list[i]))
else:
value = value + ' ' + list[i]
else:
if converttonumbers:
value = [str2num(list[i])]
else:
value = list[i]
i += 1
return dict
def striskey(str):
"""IS str AN OPTION LIKE -C or -ker
(IT'S NOT IF IT'S -2 or -.9)"""
iskey = 0
if str:
if str[0] == '-':
iskey = 1
if len(str) > 1:
iskey = str[1] not in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.']
return iskey
def strend(str, phr):
return str[-len(phr):] == phr
def decapfile(name, ext=''):
"""REMOVE EXTENSION FROM FILENAME IF PRESENT
IF ext LEFT BLANK, THEN ANY EXTENSION WILL BE REMOVED"""
if ext:
if ext[0] <> '.':
ext = '.' + ext
n = len(ext)
if name[-n:] == ext:
name = name[:-n]
else:
if strend(name, '.gz'):
name = name[-3:]
i = name.rfind('.')
if i > -1:
name = name[:i]
return name
def loadrgb(infile):
im = Image.open(infile)
im = im.transpose(Image.FLIP_TOP_BOTTOM)
# rgb = array(im.getdata())
rgb = asarray(im) # numpy
print rgb.shape
#nx, ny = im.size
#rgb.shape = (ny,nx,3)
rgb = transpose(rgb, (2,0,1))
rgb = rgb[:3] # in case there's an alpha channel on the end
rgb.flags.writeable = True # DEFAULT IS CAN'T EDIT IT!
return rgb
#################################
def im2rgbfits(infile, rgbfile='', overwrite=False, headerfile=None, flip=False):
if rgbfile == '':
rgbfile = decapfile(infile) + '_RGB.fits'
if exists(rgbfile):
if overwrite:
delfile(rgbfile)
else:
print rgbfile, 'EXISTS'
sys.exit(1)
#im = Image.open(infile)
#print 'Loading data...'
#data = array(im.getdata())
#nxc, nyc = im.size
#data.shape = (nyc,nxc,3)
#data = transpose(data, (2,0,1))
data = loadrgb(infile)
#hdu = pyfits.PrimaryHDU()
header = headerfile and pyfits.getheader(headerfile)
hdu = pyfits.PrimaryHDU(None, header)
hdulist = pyfits.HDUList([hdu])
hdulist.writeto(rgbfile)
try: # If there's a 'SCI' extension, then that's where the WCS is
header = pyfits.getheader(headerfile, 'SCI')
except:
pass
if header <> None:
if 'EXTNAME' in header.keys():
del(header['EXTNAME'])
for i in range(3):
print 'RGB'[i]
data1 = data[i]
if flip:
data1 = flipud(data1)
pyfits.append(rgbfile, data1, header)
print rgbfile, 'NOW READY FOR "Open RGB Fits Image" in ds9'
if __name__ == '__main__':
infile = sys.argv[1]
outfile = ''
if len(sys.argv) > 2:
file2 = sys.argv[2]
if file2[0] <> '-':
outfile = file2
params = params_cl()
overwrite = 'over' in params.keys()
headerfile = params.get('header', None)
im2rgbfits(infile, outfile, overwrite=overwrite, headerfile=headerfile)
#hdulist = pyfits.open(rgbfile)
#hdulist.info()
|
The discussion on productivity is usually premised on the output of workers. This basically presupposes that workers are provided with a decent wage or salary, a safe and healthy working environment, good conditions of service and security of tenure. These are the main ingredients that most would expect to drive worker productivity.
The plot falls to pieces where there is a divide between the perception and the reality. The genius of this can lie with the attitudes and dispositions of both employer and employee.
Starting with the employer, the usual mistake that is made is the taking of employees for granted. In this instance, the employer can make the cardinal mistake of moving to exploit employees by way of demanding more of them without providing the requisite resources to do the job, offering an increase in pay, and/or incentives to drive employee productivity.
A case may well be made that those employers who engage in such behaviour, do so considering that they hold the power of hiring and firing in their hands. As a consequence, they use this as a means to control workers who show signs of challenging the status quo.
Akin to this, employers, being mindful of the growing job scarcity, take the liberty of engaging in the practice of making excessive output demands of employees.
The failure of employers to engage their employees and involve them in the decision-making process, can also be a source discontent and discord. The fact that employees can sometimes be made not to feel a part of the organization is a factor that does little to motivate them into giving of their best.
The reluctance of employers to make provisions for employees to share in the profits of the organization, to engage in negotiations for improved conditions of service, attempting to deny workers their right to be unionized and to enjoy the right to freedom of association and to bargain collectively, will tend to contribute to driving down levels of productivity.
It is therefore important that decent work is promoted, for implicit in this is providing decent, quality and sustainable jobs. If a level playing field is to exist, then employers are required to respect and observe the rights and freedoms of workers. The practices, standards and policies of the workplace should therefore accord with international conventions and practices; and should be consistently applied with the labour laws.
If employers are to attract the best workers, it would be in their best interest to offer jobs which are sustainable and that will induce workers to perform at their optimum. This is conditioned on the fact that employees are comfortable in their work that is rewarding both in remuneration and job satisfaction.
By committing to the decent work agenda, employers can lead the charge in promoting decent jobs and offering protection to their employees. The decent work agenda implies access to employment in conditions of freedom; the recognition of the basic rights at work, guaranteeing the absence of discrimination or harassment; an income enabling one to satisfy basic economic, social and family needs and responsibilities; an adequate level of social protection for the worker and family members; and the right to participation and a voice at work, directly or indirectly through self-chosen representative organization.
For the purpose of treating to the subject, it is important that cognizance is taken of the International Labour Organization’s (ILO) view on the subject of Promoting Jobs: Protecting People.
|
################################################################################
#
# This program is part of the HPMon Zenpack for Zenoss.
# Copyright (C) 2008, 2009, 2010 Egor Puzanov.
#
# This program can be used under the GNU General Public License version 2
# You can find full information here: http://www.zenoss.com/oss
#
################################################################################
__doc__="""HPLogicalDisk
HPLogicalDisk is an abstraction of a harddisk.
$Id: HPLogicalDisk.py,v 1.1 2010/06/29 10:41:03 egor Exp $"""
__version__ = "$Revision: 1.1 $"[11:-2]
from ZenPacks.community.deviceAdvDetail.LogicalDisk import *
from HPComponent import *
class HPLogicalDisk(LogicalDisk, HPComponent):
"""HPLogicalDisk object"""
factory_type_information = (
{
'id' : 'HardDisk',
'meta_type' : 'HardDisk',
'description' : """Arbitrary device grouping class""",
'icon' : 'HardDisk_icon.gif',
'product' : 'ZenModel',
'factory' : 'manage_addHardDisk',
'immediate_view' : 'viewHPLogicalDisk',
'actions' :
(
{ 'id' : 'status'
, 'name' : 'Status'
, 'action' : 'viewHPLogicalDisk'
, 'permissions' : (ZEN_VIEW,)
},
{ 'id' : 'perfConf'
, 'name' : 'Template'
, 'action' : 'objTemplates'
, 'permissions' : (ZEN_CHANGE_DEVICE, )
},
{ 'id' : 'viewHistory'
, 'name' : 'Modifications'
, 'action' : 'viewHistory'
, 'permissions' : (ZEN_VIEW_MODIFICATIONS,)
},
)
},
)
def getRRDTemplates(self):
"""
Return the RRD Templates list
"""
templates = []
for tname in [self.__class__.__name__]:
templ = self.getRRDTemplateByName(tname)
if templ: templates.append(templ)
return templates
InitializeClass(HPLogicalDisk)
|
Owen Bell is most likely a familiar name to nature-themed stamp collectors. In recent years, Owen’s detailed, photo-realistic illustrations have appeared in Cocos (Keeling) Islands: Visiting Birds (2015), Jewel Beetles (2016), Endangered Wildlife (2016), Dragonflies (2017) and in the upcoming Frogs stamp issue.
The Frogs stamp issue, which will be released on 10 July 2018, presents four species of frog that inhabit Australia, three of which are at risk. Featured are the Tasmanian Tree Frog (Litoria burrowsae), the endangered Baw Baw Frog (Philoria frosti) and Australian Lace-lid (Litoria dayi), and the critically endangered Armoured Mist Frog (Litoria lorica). The intricacy of Owen Bell’s stamp illustrations help to highlight the particularities of each species, from the delicate eye-lids of the Australian Lace-lid to the mottled colouring of the Baw Baw Frog. The minisheet released with the issue features the Dainty Tree Frog, also illustrated by Owen Bell. The Dainty Tree Frog is an eye-catching species of green tree frog that occurs in areas along the coast from Cape York, Queensland, to Sydney, New South Wales.
Born and based in Western Australia, Owen Bell began working in the UK as a freelance illustrator in 1974, which fulfilled a long-held childhood ambition.
“I have never really considered doing anything else as a career, although I tossed up between being a professional musician or an artist when I was 16. Being an artist won the toss, but the music is still a huge part of my life, even though I’m not a professional, unless you count busking and the occasional party or town fair gig!” says Owen.
“Ever since I was a kid, I have always drawn and painted very detailed art. It just seemed the logical and natural thing to do and I never really questioned why, even though my Mum often did, asking why I drew ‘all that fiddly stuff’. I usually just replied ‘Because I like to’. It wasn’t until I hit my early 60s that I found out the real reason, when I was diagnosed with Asperger’s Syndrome – a form of high-functioning Autism. Many people regard Autism as a ‘disability’. Not me. I consider it a gift, as it enables to focus on detail and recreate it in my art. It also allows to maintain that focus for long periods. I often spend 10 to 12 hours working on a painting without a break. It also helps me as a musician, enabling me to learn and memorise hundreds of tunes by ear, without the need to read music. (I play a Swedish Nyckelharpa). I often wonder what I would have spent my life doing had I not been Autistic. Whatever it might have been, it wouldn’t have been as much fun and as fulfilling as what I have done,” says Owen.
Owen completed a three-year commercial art course in Perth, before leaving for the UK in 1966, where he completed a three-year Fine Arts course in the town of Farnham in Surrey, England. It was in the UK, during the 1980s, that Owen first began experimenting with digital art.
“Before completely ‘going digital’ in the late 1990s, I worked primarily in gouache. Detail was achieved simply by very close observation and painting what I saw, using tiny brushes, magnifiers and strong lighting. Going digital was a game changer, as it allows me to add even more detail than previously possible because I can zoom in several 100-per cents and work on tiny areas, even on individual pixels. Most people assume I use Photoshop, but I actually use Corel Painter, which is much more ‘painterly’, even though my ‘paintings’ end up looking more like photos,” says Owen.
Although the program Corel Painter has hundreds of tools, Owen has “no idea” what the vast majority are. He is self-taught and sticks to the tools he needs – two different airbrushes and a couple of specialised brushes and erasers.
“In essence, I paint in more or less the same way I did when using traditional media, the main difference being that now I paint with light, which is pretty magical really. The most important thing to remember with digital art, though, is backing up your work!” says Owen.
Owen began illustrating stamps for the Crown Agents Stamp Bureau in the UK in 1984. The first issue he worked on was not a wildlife subject, but a Trinidad and Tobago stamp commemorating the anniversary of the abolition of slavery. Owen began working on the World Wildlife Fund stamp collection around the same time and worked on that collection until 2015.
While never really one to enter competitions, in 2011 Owen entered a digital piece in the Corel Asia Pacific Digital Art Competition and was both “delighted and surprised” to win first prize. The entry was a painting of an Egret which Owen had completed for a WWF stamp issue some years before.
For Owen, the reference material is the starting point for his stamp illustrations.
“The reference material supplied by Australia Post is usually excellent, but … sometimes it’s necessary to supplement it with extra references which I source myself. Fortunately these days, with so many images available online it’s much easier to find than during the ‘pre-internet’ era! Back then, I had a huge library of natural history books and paid frequent trips to the library. Now I just key in the Latin name of the subject and I can usually find more images than I need,” says Owen.
Once Owen has the reference material he needs, he selects the imagery most suitable for stamp purposes. He commences with a line drawing, just so that he can show what he has in mind for the final artwork. He then blocks in the various components of the subject, whether head, legs, body, wings, eyes, individual marking and the like.
“I always start with the eyes. Get them wrong and the creature looks like a stuffed animal. Get them right and everything else works,” says Owen.
Each component is created on a separate layer, so that it can be adjusted individually. Some artworks have 100 layers or more that make up the final work.
“The beauty of this is that each layer can be worked on and adjusted individually, without the risk of ‘overpainting’. This is also useful because backgrounds are often changed in stamp designs, so layering means that the backgrounds can be easily changed in-house,” says Owen.
0:00 Image of the Dainty Tree Frog eyes.
0:02 Body of the Dainty Tree Frog appears with brownish-green overlay.
0:03 A textured affect is now visible on the face and back of the Dainty Tree Frog text.
0:05 More texture and painting effects applied to face, back and stomach of Dainty Tree Frog.
0:07 The left front leg of the frog starts to take effect.
0:12 Work on the left back leg starts, first with the background layer and then subsequent layers adding texture and depth.
0:17 Next to see work is the right front leg.
0:21 Focus now moves to the right back leg.
0:25 Work on the reed that the frog rests on commences.
0:27 A gradient layer below all existing layers is the final part of the video from Owen Bell.
The Jewel Beetles stamp issue, which was released on 6 September 2016, features four colourful Australian jewel beetles, each representing a different genus: Stigmodera gratiosa, Castiarina klugii, Temognatha alternata and Julodimorpha bakewellii. Owen’s life-like illustrations for this issue were based on jewel beetle specimens contained within the natural history collection of Museum Victoria.
“Every commission is a challenge in its own right and always an exciting one. With the Jewel Beetles stamp issue, it was trying to achieve the iridescent quality of the colours on the carapaces and the ‘Trompe l’oeil’ effect, to make it appear that the beetles are crawling across the first day cover envelope,” recalls Owen.
The Endangered Wildlife stamp issue was released on 20 September 2016, in the lead-up to that year’s Stamp Collecting Month. The stamps, illustrated by Owen, feature endangered animals that are part of conservation programs in Australia, whether in zoos or other organisations: Southern Corroboree Frog (Pseudophryne corroboree), Snow Leopard (Panthera uncia syn. Uncia uncia), Asian Elephant (Elephas maximus), Western Lowland Gorilla (Gorilla gorilla gorilla), Western Swamp Tortoise (Pseudemydura umbrina), Orange-bellied Parrot (Neophema chrysogaster) and Northern Quoll (Dasyurus hallucatus).
“The Endangered Wildlife stamp issue was unusual, as each stamp showed a different animal and each one presented its own challenges. I enjoyed painting the Snow Leopard, because in the reference photos the animal wasn’t shown in snow, so I created a snowy environment using additional landscape references. I also changed the poses and positions of the cubs, making it a more ‘intimate’ scene. Birds are always fun to paint and quite often involve painting individual feathers,” says Owen.
“The frog featured in the Endangered Wildlife issue, the Southern Corroboree Frog, was great fun to paint but also challenging. I wanted to capture the ‘moistness’ of its skin and the intricacy of its markings. That required a number of separate layers. Having figured out the techniques to achieve this, it was great to have the opportunity to use these techniques again on the Frogs stamp issue, refining them even more. For the Australian Lace-lid, I found a detailed image that showed its distinctive eyelid in sharp detail, meaning that I was able to achieve even more detail than the original source image!” says Owen.
For Stamp Collecting Month 2017 the theme was Dragonflies, which featured Australian dragonfly species (and one damselfly), selected for their visual and taxonomic diversity. The stamp designs show male dragonflies, which are often more spectacular in colouration than females.
“The biggest challenge with dragonflies and damselflies is always the wings. I spent more time painting the wings than anything else!” says Owen.
Clearly Owen’s illustrations hit the mark, because the Dragonflies stamp issue came a close second in the 2017 Australian Stamp Poll.
The Frogs stamp issue is available from 10 July 2018, online, at participating Post Offices and via mail order on 1800 331 794, while stocks last.
|
import os
from base.field import *
from jsonparser.documents import BaseJsonDocument
from jsonparser.encoder import EnumEncoder
import json
class Person(BaseJsonDocument):
Name = StringField()
Inn = StringField()
AccountNumber = StringField()
BankCode = StringField()
BankName = StringField()
class Item(BaseJsonDocument):
EntryDate = StringField()
EntryDocumentNumber = StringField()
EntryAccountNumber = StringField()
EntryAmountDebit = PyFloatField(required=True)
EntryAmountDebitBase = PyFloatField()
EntryAmountCredit = PyFloatField()
EntryAmountCreditBase = PyFloatField()
EntryAmountBase = PyFloatField()
EntryComment = StringField()
EntryDepartment = StringField()
EntryAccountPoint = StringField()
DocumentProductGroup = StringField()
DocumentValueDate = StringField()
SenderDetails = Person()
BeneficiaryDetails = Person()
DocumentTreasuryCode = StringField()
DocumentNomination = StringField()
DocumentInformation = StringField()
DocumentSourceAmount = PyFloatField()
DocumentSourceCurrency = StringField()
DocumentDestinationAmount = PyFloatField()
DocumentDestinationCurrency = StringField()
DocumentReceiveDate = StringField()
DocumentBranch = StringField()
DocumentDepartment = StringField()
DocumentActualDate = StringField()
DocumentExpiryDate = StringField()
DocumentRateLimit = StringField()
DocumentRate = PyFloatField()
DocumentRegistrationRate = PyFloatField()
DocumentSenderInstitution = StringField()
DocumentIntermediaryInstitution = StringField()
DocumentBeneficiaryInstitution = StringField()
DocumentPayee = StringField()
DocumentCorrespondentAccountNumber = StringField()
DocumentCorrespondentBankCode = StringField()
DocumentCorrespondentBankName = StringField()
DocumentKey = PyFloatField()
EntryId = PyFloatField()
class RootData(BaseJsonDocument):
items = ListField(Item())
def run_example():
dir_name = os.path.dirname(os.path.realpath(__file__))
file_path = "{0}/json/bog".format(dir_name)
with open('{0}.json'.format(file_path), 'r') as test_data_file:
a = RootData()
a.load(test_data_file.read().replace('\n', ''))
if a.is_valid():
with open("{0}_result.json".format(file_path), "w") as f:
print(a.dump(), file=f)
else:
with open("{0}_errors.json".format(file_path), "w") as f:
json.dump(a.errors, f, cls=EnumEncoder)
|
In late 2003, Norway passed a law mandating 40 percent representation of each gender on the board of publicly limited liability companies. The primary objective of this reform was to increase the representation of women in top positions in the corporate sector and decrease gender disparity in earnings within that sector. We document that the newly (post-reform) appointed female board members were observably more qualified than their female predecessors, and that the gender gap in earnings within boards fell substantially. While the reform may have improved the representation of female employees at the very top of the earnings distribution (top 5 highest earners) within firms that were mandated to increase female participation on their board, there is no evidence that these gains at the very top trickled-down. Moreover the reform had no obvious impact on highly qualified women whose qualifications mirror those of board members but who were not appointed to boards. We observe no statistically significant change in the gender wage gaps or in female representation in top positions, although standard errors are large enough that we cannot rule economically meaningful gains. Finally, there is little evidence that the reform affected the decisions of women more generally; it was not accompanied by any change in female enrollment in business education programs, or a convergence in earnings trajectories between recent male and female graduates of such programs. While young women preparing for a career in business report being aware of the reform and expect their earnings and promotion chances to benefit from it, the reform did not affect their fertility and marital plans. Overall, in the short run the reform had very little discernable impact on women in business beyond its direct effect on the newly appointed female board members.
|
#!/usr/bin/env python2
from __future__ import print_function
__author__ = "Antoine Amarilli and Fabrice Ben Hamouda"
import os, re
import argparse
def shellquote(s):
return "'" + s.replace("'", "'\\''") + "'"
def main():
parser = argparse.ArgumentParser(description="WARNING: Internal use. Please use btrsync.sh")
parser.add_argument("--status", help="Status file to write (internal use)")
parser.add_argument("root_neil", help="[[user@]host:]path/to/neil")
parser.add_argument("root_oscar", help="[[user@]host:]path/to/oscar")
args = parser.parse_args()
# Print command to be executed for Neil and Oscar
# Used by btrsync.sh
print ("ok")
regex = re.compile("^((?P<server>[^:]+):)?(?P<path>.*)$")
r_oscar = regex.search(args.root_oscar).groupdict()
r_neil = regex.search(args.root_neil).groupdict()
if r_neil["server"] == None:
root_neil = os.path.abspath(args.root_neil)
root_neil_local = root_neil
else:
root_neil = args.root_neil
root_neil_local = r_neil["path"]
if r_oscar["server"] == None:
root_oscar = os.path.abspath(args.root_oscar)
root_oscar_local = root_oscar
else:
root_oscar = args.root_oscar
root_oscar_local = r_oscar["path"]
if r_neil["server"]==None:
print ("btrsync.py --origin %s %s" % (shellquote(root_neil_local), shellquote(root_oscar)))
else:
print ("ssh %s btrsync.py --origin %s %s" % (r_neil["server"], shellquote(root_neil_local), shellquote(root_oscar)))
# if a status file is provided, pass it to the destination:
invocation = "btrsync.py %s --destination" % ("--status=" + shellquote(args.status) if args.status else "")
if r_oscar["server"]==None:
print ("%s %s %s" % (invocation, shellquote(root_neil), shellquote(root_oscar_local)))
else:
print ("ssh %s %s %s %s" % (r_oscar["server"], invocation, shellquote(root_neil), shellquote(root_oscar_local)))
if __name__ == "__main__":
main()
|
Here comes the China National Day.
From OCT.1st to OCT.7th, 2018, all staff in Zhuhai and Changsha offices will have a 7-day holiday. The factory staff will be on work as usual. During the holiday, the replies to customers' inquiries may be a little late. For urgent business, please call us directly.
May you have a happy National Day holiday!
|
from collections import namedtuple
from contextlib import contextmanager
from functools import wraps
from inspect import signature
from muk.utils import identity
class cons(namedtuple('_cons', ['car', 'cdr'])):
def walk_star(self, W):
return cons(W(self.car), W(self.cdr))
def unification(self, other, sub, ext_s, U, E):
try: UC = other._unification_cons
except AttributeError: raise E
else: return UC(self, sub, ext_s, U)
def _unification_cons(self, other_cons, sub, ext_s, U):
if other_cons.cdr == (): return U(other_cons.car, self, sub, ext_s)
if self.cdr == (): return U(self.car, other_cons, sub, ext_s)
cars_sub = U(other_cons.car, self.car, sub, ext_s)
return U(other_cons.cdr, self.cdr, cars_sub, ext_s)
def reify_s(self, sub, R):
return R(self.cdr, R(self.car, sub))
def occur_check(self, u, O, E):
return O(u, self.car) or O(u, self.cdr)
def __radd__(self, other):
if isinstance(other, list):
return list_to_cons(other, post=lambda l: self if l == [] else l)
raise NotImplemented
class ImproperListError(ValueError):
pass
def list_to_cons(l, post=identity):
if isinstance(l, (str, cons)): return l # we consider a `str` obj not an iterable obj but as an atom
λ = type(l)
try:
car, cadr, *cddr = l
except:
try:
car, *cdr = l
except:
return l
else:
cdr = λ(cdr) # again, restore correct type of the tail
if cdr == (): raise ImproperListError # otherwise outer try couldn't fail
cdr = post(cdr)
return cons(car=list_to_cons(car), cdr=list_to_cons(cdr))
else:
cddr = λ(cddr) # restore correct type of tail collecting obj
if cddr == (): return cons(car=list_to_cons(car), cdr=list_to_cons(cadr))
cdr = λ([cadr]) + cddr # reconstruct `cdr` by adapt `[cadr]` to safely apply +
return cons(car=list_to_cons(car), cdr=list_to_cons(cdr))
def cons_to_list(c, for_cdr=False):
try:
car, cdr = c
except:
if c == (): raise ImproperListError
return (([], list) if c == [] else ((c,), tuple)) if for_cdr else c
d, λ = cons_to_list(cdr, for_cdr=True)
r = λ([cons_to_list(car, for_cdr=False)]) + d
return (r, λ) if for_cdr else r
def adapt_iterables_to_conses(selector, ctor=list_to_cons):
def decorator(f):
f_sig = signature(f)
formal_args = [v.name for k, v in f_sig.parameters.items()]
selection = selector(*formal_args)
if isinstance(selection, set):
selection = {s:ctor for s in selection}
@wraps(f)
def D(*args, bypass_cons_adapter=False, **kwds):
new_args = args if bypass_cons_adapter else [c(a) for f, a in zip(formal_args, args)
for c in [selection.get(f, identity)]]
return f(*new_args, **kwds)
return D
return decorator
all_arguments = lambda *args: set(args)
def int_to_list(i):
return list(map(int, reversed(bin(i)[2:]))) if i else []
class num(cons):
@classmethod
def build(cls, obj):
if isinstance(obj, int): obj = int_to_list(obj)
c = list_to_cons(obj)
return num(c.car, c.cdr) if isinstance(c, cons) else c
def __int__(self):
def I(c, e):
return 0 if c == [] else c.car * 2**e + I(c.cdr, e+1)
return I(self, e=0)
|
MELBOURNE, 25 March: Director Girish Makwana’s feature film ‘Colour of Darkness’ has been nominated for the Oz Flix Independent Awards. OzFlix, Australia’s movie global streaming service, will present the inaugural Ozflix Independent Film Awards (the “Ozzies”) on Saturday 7 April 2018 at The Alex Theatre, St Kilda, in Melbourne.
‘Colour of Darkness’ has been nominated under the category – Best Film $2 million-5 million. AFFF Ltd. Chairman, Alan Finney, renowned for his unwavering lifetime support of Australian cinema, co-founded Ozflix in 2015 with the organization’s CEO, Ron Brown.
There are many other categories in the awards including Best Documentary, Best Performance –Male, Best Performance – Female, Best Director among others.
|
"""
NetworkNamespace = ``/bin/ls /var/run/netns``
=============================================
This specs provides list of network namespace created on the host machine.
Typical output of this command is as below::
temp_netns temp_netns_2 temp_netns_3
The ``/bin/ls /var/run/netns`` is prefered over ``/bin/ip netns list`` because it works on
all RHEL versions, no matter ip package is installed or not.
Examples:
>>> type(netns_obj)
<class 'insights.parsers.net_namespace.NetworkNamespace'>
>>> netns_obj.netns_list
['temp_netns', 'temp_netns_2', 'temp_netns_3']
>>> len(netns_obj.netns_list)
3
"""
from insights import Parser, parser, get_active_lines
from insights.parsers import SkipException
from insights.specs import Specs
@parser(Specs.namespace)
class NetworkNamespace(Parser):
def parse_content(self, content):
if not content:
raise SkipException('Nothing to parse.')
self._netns_list = []
for line in get_active_lines(content):
self._netns_list.extend(line.split())
@property
def netns_list(self):
"""
This method returns list of network namespace created
in process memory.
Returns:
`list` of network namepaces if exists.
"""
return self._netns_list
|
2016 marked the 50th Anniversary of the Star Trek franchise. Our goal was to create a tribute video that not only paid homage to notable characters and sayings, but evoked a sense of nostalgia and excitement for the future of the franchise.
In the film ‘Arrival’ there is a universal language that consist shapes and points. Pre-production filmmakers worked with a linguistic to create an actual language for the film. In an effort to get additional support from our social partners, we had the linguistic create symbols for the each platform. We crafted a post for Twitter and the pushed it through their film platform on the day of our digital release.
Gearing up for the holiday shopping season, this video was a funny way to engage the holiday crowds in a sing-a-long, and make them think of the film. It was a pre-order asset that did so well organically, that we pushed it in a paid campaign through the holiday to increase product awareness.
This film was released in December, but our window was in March. The comedy has a lot of crazy, out of control moments at an Office Christmas Party. We setup a shoot at an old corporate office, and dressed it up as if it was the back of the building that’s in the movie. The goal was to create a viewing experience that caught the eye of the user scrolling through his/her feed and it ended up being something that and just couldn’t stop watching. We used a unique URL that our target audience would relate to and identify as a part of the joke.
Rather than posting the standard image as a tune-in graphic for our Facebook Live panel with the Rings cast and director, I designed a video that played into the theme and scare tactics of the film. The VHS overlay refers to the tape that when passed, kills characters in the movie. Every few seconds the main character, Samara, glitches into the frame.
|
from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
from couchpotato.core.helpers.variable import tryInt, cleanHost
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.torrent.base import TorrentMagnetProvider
from couchpotato.environment import Env
import re
import time
import traceback
log = CPLog(__name__)
class ThePirateBay(TorrentMagnetProvider):
urls = {
'detail': '%s/torrent/%s',
'search': '%s/search/%s/%s/7/%s'
}
cat_ids = [
([207], ['720p', '1080p']),
([201], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr']),
([201, 207], ['brrip']),
([202], ['dvdr'])
]
cat_backup_id = 200
disable_provider = False
http_time_between_calls = 0
proxy_list = [
'https://thepiratebay.se',
'https://tpb.ipredator.se',
'https://depiraatbaai.be',
'https://piratereverse.info',
'https://tpb.pirateparty.org.uk',
'https://argumentomteemigreren.nl',
'https://livepirate.com',
'https://www.getpirate.com',
'https://tpb.partipirate.org',
'https://tpb.piraten.lu',
'https://kuiken.co',
]
def __init__(self):
self.domain = self.conf('domain')
super(ThePirateBay, self).__init__()
def _searchOnTitle(self, title, movie, quality, results):
page = 0
total_pages = 1
cats = self.getCatId(quality['identifier'])
while page < total_pages:
search_url = self.urls['search'] % (self.getDomain(), tryUrlencode('"%s" %s' % (title, movie['library']['year'])), page, ','.join(str(x) for x in cats))
page += 1
data = self.getHTMLData(search_url)
if data:
try:
soup = BeautifulSoup(data)
results_table = soup.find('table', attrs = {'id': 'searchResult'})
if not results_table:
return
try:
total_pages = len(soup.find('div', attrs = {'align': 'center'}).find_all('a'))
except:
pass
entries = results_table.find_all('tr')
for result in entries[2:]:
link = result.find(href = re.compile('torrent\/\d+\/'))
download = result.find(href = re.compile('magnet:'))
try:
size = re.search('Size (?P<size>.+),', unicode(result.select('font.detDesc')[0])).group('size')
except:
continue
if link and download:
def extra_score(item):
trusted = (0, 10)[result.find('img', alt = re.compile('Trusted')) is not None]
vip = (0, 20)[result.find('img', alt = re.compile('VIP')) is not None]
confirmed = (0, 30)[result.find('img', alt = re.compile('Helpers')) is not None]
moderated = (0, 50)[result.find('img', alt = re.compile('Moderator')) is not None]
return confirmed + trusted + vip + moderated
results.append({
'id': re.search('/(?P<id>\d+)/', link['href']).group('id'),
'name': link.string,
'url': download['href'],
'detail_url': self.getDomain(link['href']),
'size': self.parseSize(size),
'seeders': tryInt(result.find_all('td')[2].string),
'leechers': tryInt(result.find_all('td')[3].string),
'extra_score': extra_score,
'get_more_info': self.getMoreInfo
})
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def isEnabled(self):
return super(ThePirateBay, self).isEnabled() and self.getDomain()
def getDomain(self, url = ''):
if not self.domain:
for proxy in self.proxy_list:
prop_name = 'tpb_proxy.%s' % proxy
last_check = float(Env.prop(prop_name, default = 0))
if last_check > time.time() - 1209600:
continue
data = ''
try:
data = self.urlopen(proxy, timeout = 3, show_error = False)
except:
log.debug('Failed tpb proxy %s', proxy)
if 'title="Pirate Search"' in data:
log.debug('Using proxy: %s', proxy)
self.domain = proxy
break
Env.prop(prop_name, time.time())
if not self.domain:
log.error('No TPB proxies left, please add one in settings, or let us know which one to add on the forum.')
return None
return cleanHost(self.domain).rstrip('/') + url
def getMoreInfo(self, item):
full_description = self.getCache('tpb.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)
html = BeautifulSoup(full_description)
nfo_pre = html.find('div', attrs = {'class':'nfo'})
description = toUnicode(nfo_pre.text) if nfo_pre else ''
item['description'] = description
return item
|
Title: Prince Edward Island, divided into Counties & Parishes, with the Lots as granted by Government, Exhibiting all the New Settlements, Roads, Mills, &c, &c.
Size: Approximately 7 1/4 x 14 3/4"
Description: Liberated from Montgomery Martin's History of the British Colonies, Vol. iii, Possessions in North America. Includes inset of the Gulf of St. Lawrence. Early hand-coloring in the outline. Shows 3 vertical folds from when it was folded into the Martin's book. Minor off-setting.
Robert Montgomery Martin (c. 1801–1868), commonly referred to as "Montgomery Martin", was an Anglo-Irish author and civil servant. He served as Colonial Treasurer of Hong Kong from 1844 to 1845. He was a founding member of the Statistical Society of London (1834), the Colonial Society (1837), and the East India Association (1867).
|
"""Constant values for the Tado component."""
from PyTado.const import (
CONST_HVAC_COOL,
CONST_HVAC_DRY,
CONST_HVAC_FAN,
CONST_HVAC_HEAT,
CONST_HVAC_HOT_WATER,
CONST_HVAC_IDLE,
CONST_HVAC_OFF,
)
from homeassistant.components.climate.const import (
CURRENT_HVAC_COOL,
CURRENT_HVAC_DRY,
CURRENT_HVAC_FAN,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
FAN_AUTO,
FAN_HIGH,
FAN_LOW,
FAN_MEDIUM,
FAN_OFF,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_HOME,
)
TADO_HVAC_ACTION_TO_HA_HVAC_ACTION = {
CONST_HVAC_HEAT: CURRENT_HVAC_HEAT,
CONST_HVAC_DRY: CURRENT_HVAC_DRY,
CONST_HVAC_FAN: CURRENT_HVAC_FAN,
CONST_HVAC_COOL: CURRENT_HVAC_COOL,
CONST_HVAC_IDLE: CURRENT_HVAC_IDLE,
CONST_HVAC_OFF: CURRENT_HVAC_OFF,
CONST_HVAC_HOT_WATER: CURRENT_HVAC_HEAT,
}
# Configuration
CONF_FALLBACK = "fallback"
DATA = "data"
UPDATE_TRACK = "update_track"
# Types
TYPE_AIR_CONDITIONING = "AIR_CONDITIONING"
TYPE_HEATING = "HEATING"
TYPE_HOT_WATER = "HOT_WATER"
TYPE_BATTERY = "BATTERY"
TYPE_POWER = "POWER"
# Base modes
CONST_MODE_OFF = "OFF"
CONST_MODE_SMART_SCHEDULE = "SMART_SCHEDULE" # Use the schedule
CONST_MODE_AUTO = "AUTO"
CONST_MODE_COOL = "COOL"
CONST_MODE_HEAT = "HEAT"
CONST_MODE_DRY = "DRY"
CONST_MODE_FAN = "FAN"
CONST_LINK_OFFLINE = "OFFLINE"
CONST_FAN_OFF = "OFF"
CONST_FAN_AUTO = "AUTO"
CONST_FAN_LOW = "LOW"
CONST_FAN_MIDDLE = "MIDDLE"
CONST_FAN_HIGH = "HIGH"
# When we change the temperature setting, we need an overlay mode
CONST_OVERLAY_TADO_MODE = (
"NEXT_TIME_BLOCK" # wait until tado changes the mode automatic
)
CONST_OVERLAY_MANUAL = "MANUAL" # the user has change the temperature or mode manually
CONST_OVERLAY_TIMER = "TIMER" # the temperature will be reset after a timespan
# Heat always comes first since we get the
# min and max tempatures for the zone from
# it.
# Heat is preferred as it generally has a lower minimum temperature
ORDERED_KNOWN_TADO_MODES = [
CONST_MODE_HEAT,
CONST_MODE_COOL,
CONST_MODE_AUTO,
CONST_MODE_DRY,
CONST_MODE_FAN,
]
TADO_MODES_TO_HA_CURRENT_HVAC_ACTION = {
CONST_MODE_HEAT: CURRENT_HVAC_HEAT,
CONST_MODE_DRY: CURRENT_HVAC_DRY,
CONST_MODE_FAN: CURRENT_HVAC_FAN,
CONST_MODE_COOL: CURRENT_HVAC_COOL,
}
# These modes will not allow a temp to be set
TADO_MODES_WITH_NO_TEMP_SETTING = [CONST_MODE_AUTO, CONST_MODE_DRY, CONST_MODE_FAN]
#
# HVAC_MODE_HEAT_COOL is mapped to CONST_MODE_AUTO
# This lets tado decide on a temp
#
# HVAC_MODE_AUTO is mapped to CONST_MODE_SMART_SCHEDULE
# This runs the smart schedule
#
HA_TO_TADO_HVAC_MODE_MAP = {
HVAC_MODE_OFF: CONST_MODE_OFF,
HVAC_MODE_HEAT_COOL: CONST_MODE_AUTO,
HVAC_MODE_AUTO: CONST_MODE_SMART_SCHEDULE,
HVAC_MODE_HEAT: CONST_MODE_HEAT,
HVAC_MODE_COOL: CONST_MODE_COOL,
HVAC_MODE_DRY: CONST_MODE_DRY,
HVAC_MODE_FAN_ONLY: CONST_MODE_FAN,
}
HA_TO_TADO_FAN_MODE_MAP = {
FAN_AUTO: CONST_FAN_AUTO,
FAN_OFF: CONST_FAN_OFF,
FAN_LOW: CONST_FAN_LOW,
FAN_MEDIUM: CONST_FAN_MIDDLE,
FAN_HIGH: CONST_FAN_HIGH,
}
TADO_TO_HA_HVAC_MODE_MAP = {
value: key for key, value in HA_TO_TADO_HVAC_MODE_MAP.items()
}
TADO_TO_HA_FAN_MODE_MAP = {value: key for key, value in HA_TO_TADO_FAN_MODE_MAP.items()}
DEFAULT_TADO_PRECISION = 0.1
SUPPORT_PRESET = [PRESET_AWAY, PRESET_HOME]
TADO_SWING_OFF = "OFF"
TADO_SWING_ON = "ON"
DOMAIN = "tado"
SIGNAL_TADO_UPDATE_RECEIVED = "tado_update_received_{}_{}_{}"
UNIQUE_ID = "unique_id"
DEFAULT_NAME = "Tado"
TADO_ZONE = "Zone"
UPDATE_LISTENER = "update_listener"
# Constants for Temperature Offset
INSIDE_TEMPERATURE_MEASUREMENT = "INSIDE_TEMPERATURE_MEASUREMENT"
TEMP_OFFSET = "temperatureOffset"
TADO_OFFSET_CELSIUS = "celsius"
HA_OFFSET_CELSIUS = "offset_celsius"
TADO_OFFSET_FAHRENHEIT = "fahrenheit"
HA_OFFSET_FAHRENHEIT = "offset_fahrenheit"
TADO_TO_HA_OFFSET_MAP = {
TADO_OFFSET_CELSIUS: HA_OFFSET_CELSIUS,
TADO_OFFSET_FAHRENHEIT: HA_OFFSET_FAHRENHEIT,
}
|
What interesting problems are you working on? What advice do you have for students entering the global workforce today?
Let this magazine be your voice. Your unique perspective can inspire other Beavers to share their stories too, and connect with our thriving community of 30,000 Oregon State engineering alumni around the world.
The alumni magazine will be published twice yearly and will include stories about alumni, faculty, staff, students, donors, and industry partners. It will also uncover interesting details that go beyond the profession, the impact of faculty and student research, mentoring, and historic-nostalgic tidbits from life at Oregon State in decades past.
Please fill out the form below and tell me what you’re up to so we can share your story in future issues.
This is your alma mater, your stories, your magazine.
|
import qgl
import pygame
from pygame.locals import *
import data
from scene import Scene
from actions import *
import pygame
import view
import main_menu
import leafs
from data import filepath
from intro import Intro
DADDY_FALL = 10000 # milliseconds
ROJO_SANGRE = (0.8, 0.1, 0.05, 0)
class Separador(Scene):
def update_event(self, event):
if event.type == KEYDOWN or event.type == MOUSEBUTTONDOWN:
self.game.change_scene(Intro(self.game,True))
def __init__(self, world, score=0):
Scene.__init__(self, world)
self.score = score
self.root_node.background_color = view.CELESTE_CIELO
am = self.actionManager = Manager()
am.do( None,
delay(10) +
call(self.next)
)
def next(self):
self.game.change_scene( _Separador( self.game, self.score ) )
def update(self, dt):
if dt>1000:
return
self.actionManager.loop(dt)
class _Separador(Scene):
def update_event(self, event):
if event.type == KEYDOWN or event.type == MOUSEBUTTONDOWN:
self.game.change_scene(Intro(self.game, True))
def __init__(self, world, score=0):
Scene.__init__(self, world)
self.score = score
self.root_node.background_color = view.CELESTE_CIELO
self.font = filepath('You Are Loved.ttf')
import sound
self.initializeMusic()
clouds = self.createClouds()
clouds3 = self.createClouds()
dad = self.createDad()
clouds2 = self.createClouds()
self.accept()
diedline = self.diedline()
scoretext = self.scoreline()
scoretext.disable()
diedline.disable()
am = self.actionManager = Manager()
am.do( clouds, place( Point3(-200,200,0) ) )
dad.scale = Vector3(0.1,0.1,1)
am.do( dad, place( Point3(0,0,0) ) )
am.do( dad, repeat( rotate(360, duration=2100) ) )
am.do( dad,
scale( 10, duration=10000 ) +
spawn( call( scoretext.enable ) ) +
delay( 4000 ) +
scale( 10, duration=5000 ) +
call(lambda: sound.playMusicSound( self.crash, 1 ) ) +
call(lambda:
setattr(self.root_node, "background_color",ROJO_SANGRE) ) +
call( diedline.enable ) +
place(Point3(-2000,-2000,0))
)
clouds2.scale = Vector3(20,20,1)
am.do( clouds2, place( Point3(-5500,3500,0) ) )
am.do( clouds2, goto( Point3(-600,400,0), duration=10000 ) )
am.do( clouds2,
scale( 1.0/10, duration=10000 ) +
place( Point3(-1000, -1000, 0) )
)
clouds.scale = Vector3(2,2,1)
am.do( clouds,
place( Point3(-1000, -1000, 0) ) +
delay ( 10000 ) +
place( Point3(-600,400,0) ) +
delay( 4000 ) +
spawn(goto( Point3(-60,40,0), duration=5000 )) +
scale( 1.0/10, duration=5000 ) +
place( Point3(-1000, -1000, 0) )
)
clouds3.scale = Vector3(5,5,1)
am.do( clouds3,
place( Point3(2000, -2000, 0) ) +
delay ( 10000 ) +
delay( 4000 ) +
spawn(goto( Point3(200,-200,0), duration=5000 )) +
scale( 1.0/10, duration=5000 ) +
place( Point3(2000, -2000, 0) )
)
sound.playSoundFile("freefall.ogg",1)
def scoreline(self):
t = self.create_text("you made %i points..."%self.score)
self.add_group(t)
self.accept()
p = t.translate
t.translate = Point3(p[0], 200, p[2])
return t
def diedline(self):
t = self.create_text("and then died.")
self.add_group(t)
self.accept()
p = t.translate
t.translate = Point3(p[0], -200, p[2])
return t
def create_text(self, text):
f = leafs.TextoAlineado(text, self.font, size=1000, alignx=0.5, aligny=0.5)
group = qgl.scene.Group()
group.add(f)
return group
### Music
def initializeMusic(self):
import sound
self.music = sound.initMusicFile("01 - Songe Original mix.ogg")
self.crash = sound.initMusicFile("../sonidos/crash.wav")
### Sky
def createCloud( self, imageName, initialPos, size ):
skyGroup = qgl.scene.Group()
skyTexture = qgl.scene.state.Texture(data.filepath(imageName))
skyQuad = qgl.scene.state.Quad( size )
skyGroup.add(skyTexture)
skyGroup.add(skyQuad)
skyGroup.axis = (0,0,1)
skyGroup.angle = 0
skyGroup.translate = initialPos
return skyGroup
def createClouds( self ):
clouds = qgl.scene.Group()
c1 = self.createCloud( "cloud1.png", (-200,-800,0), (345,189) )
c3 = self.createCloud( "cloud3.png", ( 250,-200,0), (284/2,104/2) )
clouds.add(c1)
clouds.add(c3)
clouds.axis = (0,0,1)
clouds.angle = 0
clouds.translate = (0,-200,0)
self.add_group(clouds)
return clouds
def createClouds2( self ):
clouds = qgl.scene.Group()
c2 = self.createCloud( "cloud2.png", ( 0,-300,0), (527,221) )
c3 = self.createCloud( "cloud3.png", ( -250,-200,0), (284/2,104/3) )
clouds.add(c2)
clouds.add(c3)
clouds.axis = (0,0,1)
clouds.angle = 0
clouds.translate = (0,-200,0)
self.add_group(clouds)
return clouds
def createClouds3( self ):
clouds = qgl.scene.Group()
c1 = self.createCloud( "cloud1.png", (-200,-800,0), (345/2,189/2) )
c2 = self.createCloud( "cloud2.png", ( 150,-300,0), (527/2,221/2) )
clouds.add(c1)
clouds.add(c2)
clouds.axis = (0,0,1)
clouds.angle = 0
clouds.translate = (0,-200,0)
self.add_group(clouds)
return clouds
### Airplane
def createAirplane( self ):
plane = qgl.scene.Group()
planeTexture = qgl.scene.state.Texture(data.filepath("biplane.png"))
planeQuad = qgl.scene.state.Quad((100,46))
plane.add(planeTexture)
plane.add(planeQuad)
plane.axis = (0,0,1)
plane.angle = 0
plane.translate = (600,0,0)
self.add_group(plane)
return plane
### People
def createPerson(self, imageName, initialPos, size=(64,128) ):
personGroup = qgl.scene.Group()
dadTexture = qgl.scene.state.Texture(data.filepath(imageName))
dadQuad = qgl.scene.state.Quad(size)
personGroup.add(dadTexture)
personGroup.add(dadQuad)
personGroup.axis = (0,0,1)
personGroup.angle = 0
personGroup.translate = initialPos
return personGroup
def createDad(self):
dad = self.createPerson( "dad-handsup-mouth.gif", (0,700,0) )
self.add_group(dad)
return dad
def createDevil(self):
devil = qgl.scene.Group()
body = self.createPerson("body_diablo.png", (0,0,0), (49,118) )
c2 = self.createCloud( "cloud2.png", ( 0,-50,0), (527/2,221/2) )
devil.add(body)
devil.add(c2)
devil.axis = (0,0,1)
devil.angle = 0
devil.translate = (0,-600,0)
self.add_group(devil)
return devil
def createJesus(self):
jesus = self.createPerson("body_jesus.png", (200,200,0) )
self.add_group(dad)
return jesus
def createAlien(self):
alien = self.createPerson("alien_brazos_arriba.png", (0,600,0) )
alien.angle = 180
self.add_group(alien)
return alien
### Objects
def createVarita(self):
varita = self.createPerson("varita.png", (1200,0,0), (32,64) )
self.add_group(varita)
return varita
def update(self, dt):
if dt>1000:
return
self.actionManager.loop(dt)
# Handlers
def playMusic( self ):
import sound
sound.playMusicSound(self.music,1)
class Interlude(_Separador):
def __init__(self, nextScene, text="press enter...", *a):
Scene.__init__(self, *a)
self.nextScene = nextScene
self.root_node.background_color = view.CELESTE_CIELO
am = self.actionManager = Manager()
clouds = self.createClouds()
clouds.translate = (100,0,0)
clouds2 = self.createClouds2()
clouds2.translate = (-100,300, 0)
clouds3 = self.createClouds3()
clouds3.translate = (50,100,0)
dads = []
basetime = 2500.0
for d in range(10):
dad = self.createDad()
dad.translate = Point3(-300+60*d, -600, 0)
if d != 0:
am.do( dad,
delay( sum([ (basetime/(r+1)) for r in range(d)] ) - basetime ) +
move((0,1200,0), duration=basetime/(d+1))
)
dads.append( dad )
varita = self.createVarita()
font = data.filepath('You Are Loved.ttf')
figure = leafs.TextoAlineado(text, font, size=1000, alignx=0.5, aligny=0.5)
group = qgl.scene.Group()
group.add(figure)
group.translate = (0, 240,0)
self.group.add(group)
self.accept()
def createDad(self):
dad = self.createPerson( "dad-fly.gif", (0,700,0) )
self.add_group(dad)
return dad
def update_event(self, event):
if event.type == KEYDOWN and event.key == K_ESCAPE:
self.game.change_scene(Intro(self.game, True))
elif event.type == KEYDOWN or event.type == MOUSEBUTTONDOWN:
self.game.change_scene(self.nextScene(self.game))
def musique(what):
import data
pygame.mixer.music.load( data.filepath("sonidos/"+what) )
pygame.mixer.music.play()
class History(_Separador):
def __init__(self, game):
Scene.__init__(self, game)
import sound
self.root_node.background_color = (0,0,0,0)
am = self.actionManager = Manager()
luz = self.create_image("dad.png")
luz.translate = Point3(60,-10,0)
luz.scale = (12,4.5,0)
dad_hi = self.createDad()
dad_hi.translate = Point3(150, -150, 0)
script = [
("- hey...", 800),
("where am I?", 1800),
("this is not home!", 2000),
("my teleport spell must have failed", 2000),
("lets try again...", 2000),
(" ", 2000),
("ouch!", 1700),
("this didn't work", 2000),
("I'll get help from above", 2300),
("I'm going up!", 2000),
]
offset = 0
lines = []
for line, duration in script:
l = self.create_line(line)
lines.append( ( l, offset, duration) )
offset += duration
nube = [ self.create_image("nube%i.png"%i) for i in range(1, 6) ]
[ setattr(n, "translate", Point3(150, -150,0)) for n in nube ]
dad = self.create_image("dad.gif")
dad.translate = Point3(-350, -150, 0)
self.accept()
dad_hi.disable()
luz.disable()
[ n.disable() for n in nube ]
[ n.disable() for (n,a,x) in lines ]
def enable(what):
def doit():
what.enable()
return doit
def disable(what):
def doit():
what.disable()
return doit
am.do( None,
delay(20000)+
call(lambda: musique("grossini talking 1.ogg") ) +
delay(10600)+
call(lambda: musique("grossini talking 2.ogg") )
)
am.do( dad,
goto( Point3(150, -150, 0), duration=5000 ) +
call(lambda: luz.enable() ) +
call(lambda: sound.playSoundFile("farol.wav",1) ) +
delay(1500) +
call(lambda: musique("Applause.wav") ) +
delay(2500) +
call(lambda: dad.disable()) +
call(lambda: dad_hi.enable()) +
delay(6000) +
call(lambda: sound.playSoundFile("MagiaOK.wav",1) ) +
call(lambda: dad_hi.disable()) +
delay(3000) +
call(lambda: luz.disable() ) +
call(lambda: sound.playSoundFile("farol.wav",1) )
)
for (line, start, duration) in lines:
am.do( line,
delay(20000)+
delay(start)+
call(enable(line))+
delay(duration)+
call(disable(line))
)
am.do( None,
delay(20000+4*2000)+
call(lambda: sound.playSoundFile("tomato.wav",1) )
)
am.do( None,
delay(20000+5*2000)+
call(lambda:setattr(self.root_node, "background_color", (1,1,1,0)))+
delay(100)+
call(lambda:setattr(self.root_node, "background_color", (0,0,0,0)))+
delay(100)+
call(lambda:setattr(self.root_node, "background_color", (1,1,1,0)))+
delay(100)+
call(lambda:setattr(self.root_node, "background_color", (0,0,0,0)))+
delay(100)
)
am.do( None,
delay( 20000 + duration+start) +
call(lambda: self.game.change_scene(Intro(self.game, False)))
)
def enable(what):
def doit():
what.enable()
return doit
def disable(what):
def doit():
what.disable()
return doit
for i,n in enumerate(nube):
am.do( n,
delay(15500) +
delay(400*i) +
call(enable(n)) +
delay(400) +
call(disable(n))
)
def createDad(self):
dad = self.createPerson( "dad-wave.gif", (0,700,0) )
self.add_group(dad)
return dad
def create_image(self, path):
dad = self.createPerson( path, (0,700,0) )
self.add_group(dad)
return dad
def create_line(self, text):
font = data.filepath('MagicSchoolOne.ttf')
figure = leafs.TextoAlineado(text, font, size=1000, alignx=0.5, aligny=0.5)
group = qgl.scene.Group()
group.add(figure)
group.translate = (0, 0,0)
self.group.add(group)
return group
def update_event(self, event):
if event.type == KEYDOWN or event.type == MOUSEBUTTONDOWN:
self.game.change_scene(Intro(self.game, False))
class Credits(_Separador):
def __init__(self, game):
Scene.__init__(self, game)
import sound
self.root_node.background_color = (0,0,0,0)
am = self.actionManager = Manager()
def enable(what):
def doit():
what.enable()
return doit
def disable(what):
def doit():
what.disable()
return doit
script = [
("Divine Inspiration", "David"),
("Magic", "Great Grossini"),
("Hosting", "leito"),
("Hosting", "alecu"),
("coding", "dave"),
("coding", "riq"),
("coding", "alecu"),
("coding", "hugo"),
("coding", "lucio"),
("Music", "Ricardo Vecchio"),
]
offset = 0
lines = []
for cargo, nombre in script:
l1 = self.create_line(cargo)
l2 = self.create_line(nombre)
l2.translate = (0,-00,0)
lines.append( ( l1, l2 ) )
self.accept()
[ (l1.disable(), l2.disable()) for (l1,l2) in lines ]
def make_title(line):
l1, l2 = line
do_title = (
delay(100)+
call(lambda: sound.playSoundFile("tomato.wav",1) ) +
delay(2000)+
call(lambda:setattr(self.root_node, "background_color", (1,1,1,0)))+
delay(100)+
call(lambda:setattr(self.root_node, "background_color", (0,0,0,0)))+
delay(100)+
call(lambda:setattr(self.root_node, "background_color", (1,1,1,0)))+
delay(100)+
call(lambda:setattr(self.root_node, "background_color", (0,0,0,0)))+
delay(100)+
call(lambda:setattr(l2,'translate',Point3(0,00,0)))+
call(lambda:setattr(l1,'translate',Point3(0,100,0)))+
call(lambda:setattr(l2,'angle',0))+
call(lambda:setattr(l1,'angle',0))+
call(lambda: l1.enable()) +
call(lambda: l2.enable()) +
delay(1500)+
spawn(move(Point3(0,-600,0), duration=1000), target=l1)+
spawn(move(Point3(0,-600,0), duration=1000), target=l2)+
spawn(rotate(45, duration=1000), target=l1)+
spawn(rotate(-45, duration=1000), target=l2)+
delay(2500)+
call(lambda: l1.disable()) +
call(lambda: l2.disable())
)
return do_title
am.do(None, random_repeat( [ make_title(line) for line in lines ] ))
def createDad(self):
dad = self.createPerson( "dad-fly.gif", (0,700,0) )
self.add_group(dad)
return dad
def create_image(self, path):
dad = self.createPerson( path, (0,700,0) )
self.add_group(dad)
return dad
def create_line(self, text):
font = data.filepath('MagicSchoolOne.ttf')
figure = leafs.TextoAlineado(text, font, size=3000, alignx=0.5, aligny=0.5)
group = qgl.scene.Group()
group.add(figure)
group.translate = (150, 200,0)
self.group.add(group)
return group
def update_event(self, event):
if event.type == KEYDOWN or event.type == MOUSEBUTTONDOWN:
self.game.change_scene(Intro(self.game, True))
class Win(_Separador):
def __init__(self, game):
Scene.__init__(self, game)
import sound
self.root_node.background_color = view.CELESTE_CIELO
am = self.actionManager = Manager()
def enable(what):
def doit():
what.enable()
return doit
def disable(what):
def doit():
what.disable()
return doit
g1 = self.create_image("gift.png")
g1.translate = Point3(200, -160, 0)
g2 = self.create_image("gift.png")
g2.translate = Point3(300, -160, 0)
dad_hi = self.createDad()
dad_hi.translate = Point3(250, -150, 0)
dad = self.create_image("dad.gif")
dad.translate = Point3(250, -150, 0)
god = self.create_image("god.png")
god.scale = (6,2,0)
god.translate = Point3(-200,145,0)
clouds = self.createClouds()
clouds.translate = Point3(-540,380,0)
clouds.scale = (3,3,0)
clouds3 = self.createClouds2()
clouds3.translate = Point3(-200,300,0)
script = [
("- hi god!", 2000),
(" nice to see", 2000),
(" you are having fun", 2000),
(" help me!", 2000),
(" ", 3000),
(" thanks!", 2000),
]
offset = 0
lines = []
for line, duration in script:
l = self.create_line(line)
lines.append( ( l, offset, duration) )
offset += duration
self.accept()
[ n.disable() for (n,a,x) in lines ]
dad_hi.disable()
g1.disable()
g2.disable()
am.do( dad,
delay(5000) +
call(enable(dad_hi)) +
call(disable(dad)) +
delay(2000) +
call(disable(dad_hi)) +
call(enable(dad)) +
delay(8000)+
call(enable(g1)) +
call(enable(g2))
)
for (line, start, duration) in lines:
am.do( line,
delay(5000)+
delay(start)+
call(enable(line))+
delay(duration)+
call(disable(line))
)
def createDad(self):
dad = self.createPerson( "dad-fly.gif", (0,700,0) )
self.add_group(dad)
return dad
def create_image(self, path):
dad = self.createPerson( path, (0,700,0) )
self.add_group(dad)
return dad
def create_line(self, text):
font = data.filepath('MagicSchoolOne.ttf')
figure = leafs.TextoAlineado(text, font, size=1000, alignx=0.5, aligny=0.5)
group = qgl.scene.Group()
group.add(figure)
group.translate = (170, 0,0)
self.group.add(group)
return group
def update_event(self, event):
if event.type == KEYDOWN or event.type == MOUSEBUTTONDOWN:
self.game.change_scene(Intro(self.game, True))
|
Santa Monica College is a very large, public, two-year college enrolling 30,615 students and offering 84 degrees in Santa Monica, CA.
Check out the most popular majors and specific degrees students have earned at Santa Monica College.
Check out the online programs offered at Santa Monica College.
Do you have questions about furthering your education with Santa Monica College? People are listening and are ready to help. Plus, we're pretty sure others have the same question you do!
|
#!/usr/bin/env python3
import os
import time
import subprocess
import argparse
from sys import argv
from os.path import join, getsize
import hashlib
import re
description = """
New Script.
"""
verbosity = 0
verbosity_level = {'quiet': (None, -100), 'error': ('E: ', -2), 'warning': ('W: ', -1),
'info': ('', 0), 'debug': ('D: ', 1), 'verbose': ('V: ', 2), 'dump': ('Z: ', 3)}
def dprint(s, s_verbosity_in: str = "info"):
verbosity_in_prefix, verbosity_in_value = verbosity_level[s_verbosity_in]
if verbosity == verbosity_level["quiet"]:
pass
elif verbosity_in_value <= verbosity:
print(verbosity_in_prefix + s)
def parse_args():
parser = argparse.ArgumentParser(description=description)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"-v", "--verbosity", help="Increases the verbosity level. Supports up to -vvv.", action="count", default=0)
group.add_argument("-q", "--quiet", action="store_true")
global verbosity
args = parser.parse_args()
if args.quiet:
verbosity = verbosity_level["quiet"]
else:
verbosity = args.verbosity
return args
def attempt_non_collision_rename(new_name):
ii = 1
while True:
basename = os.path.splitext(new_name)
new_name = basename[0] + "_" + str(ii) + basename[1]
if not os.path.exists(new_name):
return new_name
ii += 1
if __name__ == "__main__":
args = parse_args()
top = os.getcwd()
files_arr = []
for root, dirs, files in os.walk(top, topdown=True, onerror=None, followlinks=False):
print(root, "consumes", end=" ")
print(sum(getsize(join(root, name)) for name in files), end=" ")
print("bytes in", len(files), "non-directory files")
dprint("Test code.", "error")
curr_dir = os.path.basename(root)
dirs.clear()
# checking tmsu is active
try:
command = ["tmsu", "files"]
subprocess.check_call(command)
except subprocess.CalledProcessError:
exit("failure running tmsu")
for f in files:
file_name = f"{root}/{f}"
with open(file_name, "rb") as in_file:
# getting the hash of the file
m = hashlib.sha256(in_file.read()).hexdigest()
# generating the short hash directory
short_hash = m[0:1]
if not os.path.exists(short_hash):
os.makedirs(short_hash)
relative_new_name = m + os.path.splitext(file_name)[1]
# f"{curr_dir}.{f}"
new_name = f"{short_hash}/{relative_new_name}"
# Make sure that we will not collide
if os.path.exists(new_name):
new_name = attempt_non_collision_rename(new_name)
os.rename(file_name, new_name)
print("Copied " + file_name + " as " + new_name)
# adding tags to the file
try:
cleanF = re.sub('\W+','_', f )
command = ["tmsu", "tag", f"{new_name}", f"category={curr_dir}", f"original_name={cleanF}"]
print(command)
subprocess.check_call(command)
except subprocess.CalledProcessError:
exit("failure running tmsu")
print()
|
The president's "legacy will always carry a special meaning to anyone who has faced double standards in a society telling them to settle for less," Aaron Walton, co-founder of Walton Isaacson, writes in an incisive analysis about Obama's policies in the LGBT community.
The character of Abigail Adams sings these words to a young Black servant named Lud in the Alan Jay Lerner and Leonard Bernstein musical, 1600 Pennsylvania Avenue. The short-lived Broadway production depicted life in the White House since the presidency of John Adams. More than two centuries later, Barack and Michelle Obama moved into that very house, one that slaves helped to build.
It brings me to tears when I stop and think about the magnitude of this man’s accomplishments over the last eight years—and how his exit on January 20, 2017, will leave a hole in America’s soul. The Obamas have honored our country with an unforgettable mixture of intelligence, warmth and resolve.
Although I have a great deal to say about Obama’s accomplishments as president, I want to specifically reflect on his influence on me as a gay Black man. It starts with five magical words.
If I had my doubts there would be a Black president in my lifetime, I couldn’t even begin to fathom the day when I would be able to utter those words.
Since we met in 1987, Andrew and I had become accustomed to double standards of every kind. Although we were out to friends, family members and work colleagues whom we knew to be accepting, I still practiced evasive techniques with many of my clients.
If I mentioned Andrew’s presence in my life, he was “my friend” Andrew, always hoping no one would ask, “Is he your boyfriend?” More often than not, I would refrain from mentioning him at all. While spouses and significant others often played a role in certain social work-related dinners and functions, I never pressed for Andrew’s inclusion. It never bothered him in the least. It increasingly bothered me.
Andrew and I had a commitment ceremony in 2005, which was an unbelievably moving day for everyone. Again, there were the double standards: we weren’t really “married” but we were forced to accept laws which unfairly limited Andrew’s legal rights. In fact, the laws penalized us for being together. If something happened to me, for example, Andrew would be forced to perform a fire sale of half of the company that I built.
We also had to accept the awkward designation of referring to each other as “my partner.” “Life partner” sounded too soft while “domestic partner” sounded like it could apply to Carol and Alice working together to pack lunches for the Brady kids. “Partner” also created confusion, because I have a business partner named Cory Isaacson, who is married to a woman.
As Obama pushed back on double standards, I was inspired to do the same. I started by not hesitating to say, “I have a husband and his name is Andrew.” Soon, I was saying it to anyone at any time.
Once I started to eliminate double standards in my own life, I realized that I couldn’t stop with LGBT issues. Double standards hurt us all and no one should have to tolerate them. When Latinos were unfairly vilified by the now President-elect, I encouraged my staff to address the issue head-on. As an agency, we helped create the “Turn Ignorance Around” campaign for the CHIRLA Action Fund. Some warned me that being so vocal might hurt my business, but I understood that the price of my silence—of our collective silence—on the issue was incalculable.
Barack Obama was everybody’s president in the truest sense of the word. His extraordinary eight years lifted us all. That being said, his legacy will always carry a special meaning to anyone who has faced double standards in a society telling them to settle for less.
Aaron Walton is co-founder of Walton Isaacson, “The Planet’s Most Interesting Agency,” which was recently named one of the Inc. 5000 fastest growing private companies in the United States. Walton has been named Advertising Executive of the Year at the Target Market News’ MAAX Awards; he was named to Ebony Magazine’s 2013 “POWER 100” list, and is a member of the OUT 100, a list of the most influential LGBT leaders in America.
|
"""Base class for all the objects in SymPy"""
from __future__ import print_function, division
from collections import defaultdict
from itertools import chain
from .assumptions import BasicMeta, ManagedProperties
from .cache import cacheit
from .sympify import _sympify, sympify, SympifyError
from .compatibility import (iterable, Iterator, ordered,
string_types, with_metaclass, zip_longest, range, PY3, Mapping)
from .singleton import S
from inspect import getmro
def as_Basic(expr):
"""Return expr as a Basic instance using strict sympify
or raise a TypeError; this is just a wrapper to _sympify,
raising a TypeError instead of a SympifyError."""
from sympy.utilities.misc import func_name
try:
return _sympify(expr)
except SympifyError:
raise TypeError(
'Argument must be a Basic object, not `%s`' % func_name(
expr))
class Basic(with_metaclass(ManagedProperties)):
"""
Base class for all objects in SymPy.
Conventions:
1) Always use ``.args``, when accessing parameters of some instance:
>>> from sympy import cot
>>> from sympy.abc import x, y
>>> cot(x).args
(x,)
>>> cot(x).args[0]
x
>>> (x*y).args
(x, y)
>>> (x*y).args[1]
y
2) Never use internal methods or variables (the ones prefixed with ``_``):
>>> cot(x)._args # do not use this, use cot(x).args instead
(x,)
"""
__slots__ = ['_mhash', # hash value
'_args', # arguments
'_assumptions'
]
# To be overridden with True in the appropriate subclasses
is_number = False
is_Atom = False
is_Symbol = False
is_symbol = False
is_Indexed = False
is_Dummy = False
is_Wild = False
is_Function = False
is_Add = False
is_Mul = False
is_Pow = False
is_Number = False
is_Float = False
is_Rational = False
is_Integer = False
is_NumberSymbol = False
is_Order = False
is_Derivative = False
is_Piecewise = False
is_Poly = False
is_AlgebraicNumber = False
is_Relational = False
is_Equality = False
is_Boolean = False
is_Not = False
is_Matrix = False
is_Vector = False
is_Point = False
is_MatAdd = False
is_MatMul = False
def __new__(cls, *args):
obj = object.__new__(cls)
obj._assumptions = cls.default_assumptions
obj._mhash = None # will be set by __hash__ method.
obj._args = args # all items in args must be Basic objects
return obj
def copy(self):
return self.func(*self.args)
def __reduce_ex__(self, proto):
""" Pickling support."""
return type(self), self.__getnewargs__(), self.__getstate__()
def __getnewargs__(self):
return self.args
def __getstate__(self):
return {}
def __setstate__(self, state):
for k, v in state.items():
setattr(self, k, v)
def __hash__(self):
# hash cannot be cached using cache_it because infinite recurrence
# occurs as hash is needed for setting cache dictionary keys
h = self._mhash
if h is None:
h = hash((type(self).__name__,) + self._hashable_content())
self._mhash = h
return h
def _hashable_content(self):
"""Return a tuple of information about self that can be used to
compute the hash. If a class defines additional attributes,
like ``name`` in Symbol, then this method should be updated
accordingly to return such relevant attributes.
Defining more than _hashable_content is necessary if __eq__ has
been defined by a class. See note about this in Basic.__eq__."""
return self._args
@property
def assumptions0(self):
"""
Return object `type` assumptions.
For example:
Symbol('x', real=True)
Symbol('x', integer=True)
are different objects. In other words, besides Python type (Symbol in
this case), the initial assumptions are also forming their typeinfo.
Examples
========
>>> from sympy import Symbol
>>> from sympy.abc import x
>>> x.assumptions0
{'commutative': True}
>>> x = Symbol("x", positive=True)
>>> x.assumptions0
{'commutative': True, 'complex': True, 'extended_negative': False,
'extended_nonnegative': True, 'extended_nonpositive': False,
'extended_nonzero': True, 'extended_positive': True, 'extended_real':
True, 'finite': True, 'hermitian': True, 'imaginary': False,
'infinite': False, 'negative': False, 'nonnegative': True,
'nonpositive': False, 'nonzero': True, 'positive': True, 'real':
True, 'zero': False}
"""
return {}
def compare(self, other):
"""
Return -1, 0, 1 if the object is smaller, equal, or greater than other.
Not in the mathematical sense. If the object is of a different type
from the "other" then their classes are ordered according to
the sorted_classes list.
Examples
========
>>> from sympy.abc import x, y
>>> x.compare(y)
-1
>>> x.compare(x)
0
>>> y.compare(x)
1
"""
# all redefinitions of __cmp__ method should start with the
# following lines:
if self is other:
return 0
n1 = self.__class__
n2 = other.__class__
c = (n1 > n2) - (n1 < n2)
if c:
return c
#
st = self._hashable_content()
ot = other._hashable_content()
c = (len(st) > len(ot)) - (len(st) < len(ot))
if c:
return c
for l, r in zip(st, ot):
l = Basic(*l) if isinstance(l, frozenset) else l
r = Basic(*r) if isinstance(r, frozenset) else r
if isinstance(l, Basic):
c = l.compare(r)
else:
c = (l > r) - (l < r)
if c:
return c
return 0
@staticmethod
def _compare_pretty(a, b):
from sympy.series.order import Order
if isinstance(a, Order) and not isinstance(b, Order):
return 1
if not isinstance(a, Order) and isinstance(b, Order):
return -1
if a.is_Rational and b.is_Rational:
l = a.p * b.q
r = b.p * a.q
return (l > r) - (l < r)
else:
from sympy.core.symbol import Wild
p1, p2, p3 = Wild("p1"), Wild("p2"), Wild("p3")
r_a = a.match(p1 * p2**p3)
if r_a and p3 in r_a:
a3 = r_a[p3]
r_b = b.match(p1 * p2**p3)
if r_b and p3 in r_b:
b3 = r_b[p3]
c = Basic.compare(a3, b3)
if c != 0:
return c
return Basic.compare(a, b)
@classmethod
def fromiter(cls, args, **assumptions):
"""
Create a new object from an iterable.
This is a convenience function that allows one to create objects from
any iterable, without having to convert to a list or tuple first.
Examples
========
>>> from sympy import Tuple
>>> Tuple.fromiter(i for i in range(5))
(0, 1, 2, 3, 4)
"""
return cls(*tuple(args), **assumptions)
@classmethod
def class_key(cls):
"""Nice order of classes. """
return 5, 0, cls.__name__
@cacheit
def sort_key(self, order=None):
"""
Return a sort key.
Examples
========
>>> from sympy.core import S, I
>>> sorted([S(1)/2, I, -I], key=lambda x: x.sort_key())
[1/2, -I, I]
>>> S("[x, 1/x, 1/x**2, x**2, x**(1/2), x**(1/4), x**(3/2)]")
[x, 1/x, x**(-2), x**2, sqrt(x), x**(1/4), x**(3/2)]
>>> sorted(_, key=lambda x: x.sort_key())
[x**(-2), 1/x, x**(1/4), sqrt(x), x, x**(3/2), x**2]
"""
# XXX: remove this when issue 5169 is fixed
def inner_key(arg):
if isinstance(arg, Basic):
return arg.sort_key(order)
else:
return arg
args = self._sorted_args
args = len(args), tuple([inner_key(arg) for arg in args])
return self.class_key(), args, S.One.sort_key(), S.One
def __eq__(self, other):
"""Return a boolean indicating whether a == b on the basis of
their symbolic trees.
This is the same as a.compare(b) == 0 but faster.
Notes
=====
If a class that overrides __eq__() needs to retain the
implementation of __hash__() from a parent class, the
interpreter must be told this explicitly by setting __hash__ =
<ParentClass>.__hash__. Otherwise the inheritance of __hash__()
will be blocked, just as if __hash__ had been explicitly set to
None.
References
==========
from http://docs.python.org/dev/reference/datamodel.html#object.__hash__
"""
if self is other:
return True
tself = type(self)
tother = type(other)
if tself is not tother:
try:
other = _sympify(other)
tother = type(other)
except SympifyError:
return NotImplemented
# As long as we have the ordering of classes (sympy.core),
# comparing types will be slow in Python 2, because it uses
# __cmp__. Until we can remove it
# (https://github.com/sympy/sympy/issues/4269), we only compare
# types in Python 2 directly if they actually have __ne__.
if PY3 or type(tself).__ne__ is not type.__ne__:
if tself != tother:
return False
elif tself is not tother:
return False
return self._hashable_content() == other._hashable_content()
def __ne__(self, other):
"""``a != b`` -> Compare two symbolic trees and see whether they are different
this is the same as:
``a.compare(b) != 0``
but faster
"""
return not self == other
def dummy_eq(self, other, symbol=None):
"""
Compare two expressions and handle dummy symbols.
Examples
========
>>> from sympy import Dummy
>>> from sympy.abc import x, y
>>> u = Dummy('u')
>>> (u**2 + 1).dummy_eq(x**2 + 1)
True
>>> (u**2 + 1) == (x**2 + 1)
False
>>> (u**2 + y).dummy_eq(x**2 + y, x)
True
>>> (u**2 + y).dummy_eq(x**2 + y, y)
False
"""
s = self.as_dummy()
o = _sympify(other)
o = o.as_dummy()
dummy_symbols = [i for i in s.free_symbols if i.is_Dummy]
if len(dummy_symbols) == 1:
dummy = dummy_symbols.pop()
else:
return s == o
if symbol is None:
symbols = o.free_symbols
if len(symbols) == 1:
symbol = symbols.pop()
else:
return s == o
tmp = dummy.__class__()
return s.subs(dummy, tmp) == o.subs(symbol, tmp)
# Note, we always use the default ordering (lex) in __str__ and __repr__,
# regardless of the global setting. See issue 5487.
def __repr__(self):
"""Method to return the string representation.
Return the expression as a string.
"""
from sympy.printing import sstr
return sstr(self, order=None)
def __str__(self):
from sympy.printing import sstr
return sstr(self, order=None)
# We don't define _repr_png_ here because it would add a large amount of
# data to any notebook containing SymPy expressions, without adding
# anything useful to the notebook. It can still enabled manually, e.g.,
# for the qtconsole, with init_printing().
def _repr_latex_(self):
"""
IPython/Jupyter LaTeX printing
To change the behavior of this (e.g., pass in some settings to LaTeX),
use init_printing(). init_printing() will also enable LaTeX printing
for built in numeric types like ints and container types that contain
SymPy objects, like lists and dictionaries of expressions.
"""
from sympy.printing.latex import latex
s = latex(self, mode='plain')
return "$\\displaystyle %s$" % s
_repr_latex_orig = _repr_latex_
def atoms(self, *types):
"""Returns the atoms that form the current object.
By default, only objects that are truly atomic and can't
be divided into smaller pieces are returned: symbols, numbers,
and number symbols like I and pi. It is possible to request
atoms of any type, however, as demonstrated below.
Examples
========
>>> from sympy import I, pi, sin
>>> from sympy.abc import x, y
>>> (1 + x + 2*sin(y + I*pi)).atoms()
{1, 2, I, pi, x, y}
If one or more types are given, the results will contain only
those types of atoms.
>>> from sympy import Number, NumberSymbol, Symbol
>>> (1 + x + 2*sin(y + I*pi)).atoms(Symbol)
{x, y}
>>> (1 + x + 2*sin(y + I*pi)).atoms(Number)
{1, 2}
>>> (1 + x + 2*sin(y + I*pi)).atoms(Number, NumberSymbol)
{1, 2, pi}
>>> (1 + x + 2*sin(y + I*pi)).atoms(Number, NumberSymbol, I)
{1, 2, I, pi}
Note that I (imaginary unit) and zoo (complex infinity) are special
types of number symbols and are not part of the NumberSymbol class.
The type can be given implicitly, too:
>>> (1 + x + 2*sin(y + I*pi)).atoms(x) # x is a Symbol
{x, y}
Be careful to check your assumptions when using the implicit option
since ``S(1).is_Integer = True`` but ``type(S(1))`` is ``One``, a special type
of sympy atom, while ``type(S(2))`` is type ``Integer`` and will find all
integers in an expression:
>>> from sympy import S
>>> (1 + x + 2*sin(y + I*pi)).atoms(S(1))
{1}
>>> (1 + x + 2*sin(y + I*pi)).atoms(S(2))
{1, 2}
Finally, arguments to atoms() can select more than atomic atoms: any
sympy type (loaded in core/__init__.py) can be listed as an argument
and those types of "atoms" as found in scanning the arguments of the
expression recursively:
>>> from sympy import Function, Mul
>>> from sympy.core.function import AppliedUndef
>>> f = Function('f')
>>> (1 + f(x) + 2*sin(y + I*pi)).atoms(Function)
{f(x), sin(y + I*pi)}
>>> (1 + f(x) + 2*sin(y + I*pi)).atoms(AppliedUndef)
{f(x)}
>>> (1 + x + 2*sin(y + I*pi)).atoms(Mul)
{I*pi, 2*sin(y + I*pi)}
"""
if types:
types = tuple(
[t if isinstance(t, type) else type(t) for t in types])
else:
types = (Atom,)
result = set()
for expr in preorder_traversal(self):
if isinstance(expr, types):
result.add(expr)
return result
@property
def free_symbols(self):
"""Return from the atoms of self those which are free symbols.
For most expressions, all symbols are free symbols. For some classes
this is not true. e.g. Integrals use Symbols for the dummy variables
which are bound variables, so Integral has a method to return all
symbols except those. Derivative keeps track of symbols with respect
to which it will perform a derivative; those are
bound variables, too, so it has its own free_symbols method.
Any other method that uses bound variables should implement a
free_symbols method."""
return set().union(*[a.free_symbols for a in self.args])
@property
def expr_free_symbols(self):
return set([])
def as_dummy(self):
"""Return the expression with any objects having structurally
bound symbols replaced with unique, canonical symbols within
the object in which they appear and having only the default
assumption for commutativity being True.
Examples
========
>>> from sympy import Integral, Symbol
>>> from sympy.abc import x, y
>>> r = Symbol('r', real=True)
>>> Integral(r, (r, x)).as_dummy()
Integral(_0, (_0, x))
>>> _.variables[0].is_real is None
True
Notes
=====
Any object that has structural dummy variables should have
a property, `bound_symbols` that returns a list of structural
dummy symbols of the object itself.
Lambda and Subs have bound symbols, but because of how they
are cached, they already compare the same regardless of their
bound symbols:
>>> from sympy import Lambda
>>> Lambda(x, x + 1) == Lambda(y, y + 1)
True
"""
def can(x):
d = {i: i.as_dummy() for i in x.bound_symbols}
# mask free that shadow bound
x = x.subs(d)
c = x.canonical_variables
# replace bound
x = x.xreplace(c)
# undo masking
x = x.xreplace(dict((v, k) for k, v in d.items()))
return x
return self.replace(
lambda x: hasattr(x, 'bound_symbols'),
lambda x: can(x))
@property
def canonical_variables(self):
"""Return a dictionary mapping any variable defined in
``self.bound_symbols`` to Symbols that do not clash
with any existing symbol in the expression.
Examples
========
>>> from sympy import Lambda
>>> from sympy.abc import x
>>> Lambda(x, 2*x).canonical_variables
{x: _0}
"""
from sympy.core.symbol import Symbol
from sympy.utilities.iterables import numbered_symbols
if not hasattr(self, 'bound_symbols'):
return {}
dums = numbered_symbols('_')
reps = {}
v = self.bound_symbols
# this free will include bound symbols that are not part of
# self's bound symbols
free = set([i.name for i in self.atoms(Symbol) - set(v)])
for v in v:
d = next(dums)
if v.is_Symbol:
while v.name == d.name or d.name in free:
d = next(dums)
reps[v] = d
return reps
def rcall(self, *args):
"""Apply on the argument recursively through the expression tree.
This method is used to simulate a common abuse of notation for
operators. For instance in SymPy the the following will not work:
``(x+Lambda(y, 2*y))(z) == x+2*z``,
however you can use
>>> from sympy import Lambda
>>> from sympy.abc import x, y, z
>>> (x + Lambda(y, 2*y)).rcall(z)
x + 2*z
"""
return Basic._recursive_call(self, args)
@staticmethod
def _recursive_call(expr_to_call, on_args):
"""Helper for rcall method."""
from sympy import Symbol
def the_call_method_is_overridden(expr):
for cls in getmro(type(expr)):
if '__call__' in cls.__dict__:
return cls != Basic
if callable(expr_to_call) and the_call_method_is_overridden(expr_to_call):
if isinstance(expr_to_call, Symbol): # XXX When you call a Symbol it is
return expr_to_call # transformed into an UndefFunction
else:
return expr_to_call(*on_args)
elif expr_to_call.args:
args = [Basic._recursive_call(
sub, on_args) for sub in expr_to_call.args]
return type(expr_to_call)(*args)
else:
return expr_to_call
def is_hypergeometric(self, k):
from sympy.simplify import hypersimp
return hypersimp(self, k) is not None
@property
def is_comparable(self):
"""Return True if self can be computed to a real number
(or already is a real number) with precision, else False.
Examples
========
>>> from sympy import exp_polar, pi, I
>>> (I*exp_polar(I*pi/2)).is_comparable
True
>>> (I*exp_polar(I*pi*2)).is_comparable
False
A False result does not mean that `self` cannot be rewritten
into a form that would be comparable. For example, the
difference computed below is zero but without simplification
it does not evaluate to a zero with precision:
>>> e = 2**pi*(1 + 2**pi)
>>> dif = e - e.expand()
>>> dif.is_comparable
False
>>> dif.n(2)._prec
1
"""
is_extended_real = self.is_extended_real
if is_extended_real is False:
return False
if not self.is_number:
return False
# don't re-eval numbers that are already evaluated since
# this will create spurious precision
n, i = [p.evalf(2) if not p.is_Number else p
for p in self.as_real_imag()]
if not (i.is_Number and n.is_Number):
return False
if i:
# if _prec = 1 we can't decide and if not,
# the answer is False because numbers with
# imaginary parts can't be compared
# so return False
return False
else:
return n._prec != 1
@property
def func(self):
"""
The top-level function in an expression.
The following should hold for all objects::
>> x == x.func(*x.args)
Examples
========
>>> from sympy.abc import x
>>> a = 2*x
>>> a.func
<class 'sympy.core.mul.Mul'>
>>> a.args
(2, x)
>>> a.func(*a.args)
2*x
>>> a == a.func(*a.args)
True
"""
return self.__class__
@property
def args(self):
"""Returns a tuple of arguments of 'self'.
Examples
========
>>> from sympy import cot
>>> from sympy.abc import x, y
>>> cot(x).args
(x,)
>>> cot(x).args[0]
x
>>> (x*y).args
(x, y)
>>> (x*y).args[1]
y
Notes
=====
Never use self._args, always use self.args.
Only use _args in __new__ when creating a new function.
Don't override .args() from Basic (so that it's easy to
change the interface in the future if needed).
"""
return self._args
@property
def _sorted_args(self):
"""
The same as ``args``. Derived classes which don't fix an
order on their arguments should override this method to
produce the sorted representation.
"""
return self.args
def as_poly(self, *gens, **args):
"""Converts ``self`` to a polynomial or returns ``None``.
>>> from sympy import sin
>>> from sympy.abc import x, y
>>> print((x**2 + x*y).as_poly())
Poly(x**2 + x*y, x, y, domain='ZZ')
>>> print((x**2 + x*y).as_poly(x, y))
Poly(x**2 + x*y, x, y, domain='ZZ')
>>> print((x**2 + sin(y)).as_poly(x, y))
None
"""
from sympy.polys import Poly, PolynomialError
try:
poly = Poly(self, *gens, **args)
if not poly.is_Poly:
return None
else:
return poly
except PolynomialError:
return None
def as_content_primitive(self, radical=False, clear=True):
"""A stub to allow Basic args (like Tuple) to be skipped when computing
the content and primitive components of an expression.
See Also
========
sympy.core.expr.Expr.as_content_primitive
"""
return S.One, self
def subs(self, *args, **kwargs):
"""
Substitutes old for new in an expression after sympifying args.
`args` is either:
- two arguments, e.g. foo.subs(old, new)
- one iterable argument, e.g. foo.subs(iterable). The iterable may be
o an iterable container with (old, new) pairs. In this case the
replacements are processed in the order given with successive
patterns possibly affecting replacements already made.
o a dict or set whose key/value items correspond to old/new pairs.
In this case the old/new pairs will be sorted by op count and in
case of a tie, by number of args and the default_sort_key. The
resulting sorted list is then processed as an iterable container
(see previous).
If the keyword ``simultaneous`` is True, the subexpressions will not be
evaluated until all the substitutions have been made.
Examples
========
>>> from sympy import pi, exp, limit, oo
>>> from sympy.abc import x, y
>>> (1 + x*y).subs(x, pi)
pi*y + 1
>>> (1 + x*y).subs({x:pi, y:2})
1 + 2*pi
>>> (1 + x*y).subs([(x, pi), (y, 2)])
1 + 2*pi
>>> reps = [(y, x**2), (x, 2)]
>>> (x + y).subs(reps)
6
>>> (x + y).subs(reversed(reps))
x**2 + 2
>>> (x**2 + x**4).subs(x**2, y)
y**2 + y
To replace only the x**2 but not the x**4, use xreplace:
>>> (x**2 + x**4).xreplace({x**2: y})
x**4 + y
To delay evaluation until all substitutions have been made,
set the keyword ``simultaneous`` to True:
>>> (x/y).subs([(x, 0), (y, 0)])
0
>>> (x/y).subs([(x, 0), (y, 0)], simultaneous=True)
nan
This has the added feature of not allowing subsequent substitutions
to affect those already made:
>>> ((x + y)/y).subs({x + y: y, y: x + y})
1
>>> ((x + y)/y).subs({x + y: y, y: x + y}, simultaneous=True)
y/(x + y)
In order to obtain a canonical result, unordered iterables are
sorted by count_op length, number of arguments and by the
default_sort_key to break any ties. All other iterables are left
unsorted.
>>> from sympy import sqrt, sin, cos
>>> from sympy.abc import a, b, c, d, e
>>> A = (sqrt(sin(2*x)), a)
>>> B = (sin(2*x), b)
>>> C = (cos(2*x), c)
>>> D = (x, d)
>>> E = (exp(x), e)
>>> expr = sqrt(sin(2*x))*sin(exp(x)*x)*cos(2*x) + sin(2*x)
>>> expr.subs(dict([A, B, C, D, E]))
a*c*sin(d*e) + b
The resulting expression represents a literal replacement of the
old arguments with the new arguments. This may not reflect the
limiting behavior of the expression:
>>> (x**3 - 3*x).subs({x: oo})
nan
>>> limit(x**3 - 3*x, x, oo)
oo
If the substitution will be followed by numerical
evaluation, it is better to pass the substitution to
evalf as
>>> (1/x).evalf(subs={x: 3.0}, n=21)
0.333333333333333333333
rather than
>>> (1/x).subs({x: 3.0}).evalf(21)
0.333333333333333314830
as the former will ensure that the desired level of precision is
obtained.
See Also
========
replace: replacement capable of doing wildcard-like matching,
parsing of match, and conditional replacements
xreplace: exact node replacement in expr tree; also capable of
using matching rules
sympy.core.evalf.EvalfMixin.evalf: calculates the given formula to a desired level of precision
"""
from sympy.core.containers import Dict
from sympy.utilities import default_sort_key
from sympy import Dummy, Symbol
unordered = False
if len(args) == 1:
sequence = args[0]
if isinstance(sequence, set):
unordered = True
elif isinstance(sequence, (Dict, Mapping)):
unordered = True
sequence = sequence.items()
elif not iterable(sequence):
from sympy.utilities.misc import filldedent
raise ValueError(filldedent("""
When a single argument is passed to subs
it should be a dictionary of old: new pairs or an iterable
of (old, new) tuples."""))
elif len(args) == 2:
sequence = [args]
else:
raise ValueError("subs accepts either 1 or 2 arguments")
sequence = list(sequence)
for i, s in enumerate(sequence):
if isinstance(s[0], string_types):
# when old is a string we prefer Symbol
s = Symbol(s[0]), s[1]
try:
s = [sympify(_, strict=not isinstance(_, string_types))
for _ in s]
except SympifyError:
# if it can't be sympified, skip it
sequence[i] = None
continue
# skip if there is no change
sequence[i] = None if _aresame(*s) else tuple(s)
sequence = list(filter(None, sequence))
if unordered:
sequence = dict(sequence)
if not all(k.is_Atom for k in sequence):
d = {}
for o, n in sequence.items():
try:
ops = o.count_ops(), len(o.args)
except TypeError:
ops = (0, 0)
d.setdefault(ops, []).append((o, n))
newseq = []
for k in sorted(d.keys(), reverse=True):
newseq.extend(
sorted([v[0] for v in d[k]], key=default_sort_key))
sequence = [(k, sequence[k]) for k in newseq]
del newseq, d
else:
sequence = sorted([(k, v) for (k, v) in sequence.items()],
key=default_sort_key)
if kwargs.pop('simultaneous', False): # XXX should this be the default for dict subs?
reps = {}
rv = self
kwargs['hack2'] = True
m = Dummy('subs_m')
for old, new in sequence:
com = new.is_commutative
if com is None:
com = True
d = Dummy('subs_d', commutative=com)
# using d*m so Subs will be used on dummy variables
# in things like Derivative(f(x, y), x) in which x
# is both free and bound
rv = rv._subs(old, d*m, **kwargs)
if not isinstance(rv, Basic):
break
reps[d] = new
reps[m] = S.One # get rid of m
return rv.xreplace(reps)
else:
rv = self
for old, new in sequence:
rv = rv._subs(old, new, **kwargs)
if not isinstance(rv, Basic):
break
return rv
@cacheit
def _subs(self, old, new, **hints):
"""Substitutes an expression old -> new.
If self is not equal to old then _eval_subs is called.
If _eval_subs doesn't want to make any special replacement
then a None is received which indicates that the fallback
should be applied wherein a search for replacements is made
amongst the arguments of self.
>>> from sympy import Add
>>> from sympy.abc import x, y, z
Examples
========
Add's _eval_subs knows how to target x + y in the following
so it makes the change:
>>> (x + y + z).subs(x + y, 1)
z + 1
Add's _eval_subs doesn't need to know how to find x + y in
the following:
>>> Add._eval_subs(z*(x + y) + 3, x + y, 1) is None
True
The returned None will cause the fallback routine to traverse the args and
pass the z*(x + y) arg to Mul where the change will take place and the
substitution will succeed:
>>> (z*(x + y) + 3).subs(x + y, 1)
z + 3
** Developers Notes **
An _eval_subs routine for a class should be written if:
1) any arguments are not instances of Basic (e.g. bool, tuple);
2) some arguments should not be targeted (as in integration
variables);
3) if there is something other than a literal replacement
that should be attempted (as in Piecewise where the condition
may be updated without doing a replacement).
If it is overridden, here are some special cases that might arise:
1) If it turns out that no special change was made and all
the original sub-arguments should be checked for
replacements then None should be returned.
2) If it is necessary to do substitutions on a portion of
the expression then _subs should be called. _subs will
handle the case of any sub-expression being equal to old
(which usually would not be the case) while its fallback
will handle the recursion into the sub-arguments. For
example, after Add's _eval_subs removes some matching terms
it must process the remaining terms so it calls _subs
on each of the un-matched terms and then adds them
onto the terms previously obtained.
3) If the initial expression should remain unchanged then
the original expression should be returned. (Whenever an
expression is returned, modified or not, no further
substitution of old -> new is attempted.) Sum's _eval_subs
routine uses this strategy when a substitution is attempted
on any of its summation variables.
"""
def fallback(self, old, new):
"""
Try to replace old with new in any of self's arguments.
"""
hit = False
args = list(self.args)
for i, arg in enumerate(args):
if not hasattr(arg, '_eval_subs'):
continue
arg = arg._subs(old, new, **hints)
if not _aresame(arg, args[i]):
hit = True
args[i] = arg
if hit:
rv = self.func(*args)
hack2 = hints.get('hack2', False)
if hack2 and self.is_Mul and not rv.is_Mul: # 2-arg hack
coeff = S.One
nonnumber = []
for i in args:
if i.is_Number:
coeff *= i
else:
nonnumber.append(i)
nonnumber = self.func(*nonnumber)
if coeff is S.One:
return nonnumber
else:
return self.func(coeff, nonnumber, evaluate=False)
return rv
return self
if _aresame(self, old):
return new
rv = self._eval_subs(old, new)
if rv is None:
rv = fallback(self, old, new)
return rv
def _eval_subs(self, old, new):
"""Override this stub if you want to do anything more than
attempt a replacement of old with new in the arguments of self.
See also
========
_subs
"""
return None
def xreplace(self, rule):
"""
Replace occurrences of objects within the expression.
Parameters
==========
rule : dict-like
Expresses a replacement rule
Returns
=======
xreplace : the result of the replacement
Examples
========
>>> from sympy import symbols, pi, exp
>>> x, y, z = symbols('x y z')
>>> (1 + x*y).xreplace({x: pi})
pi*y + 1
>>> (1 + x*y).xreplace({x: pi, y: 2})
1 + 2*pi
Replacements occur only if an entire node in the expression tree is
matched:
>>> (x*y + z).xreplace({x*y: pi})
z + pi
>>> (x*y*z).xreplace({x*y: pi})
x*y*z
>>> (2*x).xreplace({2*x: y, x: z})
y
>>> (2*2*x).xreplace({2*x: y, x: z})
4*z
>>> (x + y + 2).xreplace({x + y: 2})
x + y + 2
>>> (x + 2 + exp(x + 2)).xreplace({x + 2: y})
x + exp(y) + 2
xreplace doesn't differentiate between free and bound symbols. In the
following, subs(x, y) would not change x since it is a bound symbol,
but xreplace does:
>>> from sympy import Integral
>>> Integral(x, (x, 1, 2*x)).xreplace({x: y})
Integral(y, (y, 1, 2*y))
Trying to replace x with an expression raises an error:
>>> Integral(x, (x, 1, 2*x)).xreplace({x: 2*y}) # doctest: +SKIP
ValueError: Invalid limits given: ((2*y, 1, 4*y),)
See Also
========
replace: replacement capable of doing wildcard-like matching,
parsing of match, and conditional replacements
subs: substitution of subexpressions as defined by the objects
themselves.
"""
value, _ = self._xreplace(rule)
return value
def _xreplace(self, rule):
"""
Helper for xreplace. Tracks whether a replacement actually occurred.
"""
if self in rule:
return rule[self], True
elif rule:
args = []
changed = False
for a in self.args:
_xreplace = getattr(a, '_xreplace', None)
if _xreplace is not None:
a_xr = _xreplace(rule)
args.append(a_xr[0])
changed |= a_xr[1]
else:
args.append(a)
args = tuple(args)
if changed:
return self.func(*args), True
return self, False
@cacheit
def has(self, *patterns):
"""
Test whether any subexpression matches any of the patterns.
Examples
========
>>> from sympy import sin
>>> from sympy.abc import x, y, z
>>> (x**2 + sin(x*y)).has(z)
False
>>> (x**2 + sin(x*y)).has(x, y, z)
True
>>> x.has(x)
True
Note ``has`` is a structural algorithm with no knowledge of
mathematics. Consider the following half-open interval:
>>> from sympy.sets import Interval
>>> i = Interval.Lopen(0, 5); i
Interval.Lopen(0, 5)
>>> i.args
(0, 5, True, False)
>>> i.has(4) # there is no "4" in the arguments
False
>>> i.has(0) # there *is* a "0" in the arguments
True
Instead, use ``contains`` to determine whether a number is in the
interval or not:
>>> i.contains(4)
True
>>> i.contains(0)
False
Note that ``expr.has(*patterns)`` is exactly equivalent to
``any(expr.has(p) for p in patterns)``. In particular, ``False`` is
returned when the list of patterns is empty.
>>> x.has()
False
"""
return any(self._has(pattern) for pattern in patterns)
def _has(self, pattern):
"""Helper for .has()"""
from sympy.core.function import UndefinedFunction, Function
if isinstance(pattern, UndefinedFunction):
return any(f.func == pattern or f == pattern
for f in self.atoms(Function, UndefinedFunction))
pattern = sympify(pattern)
if isinstance(pattern, BasicMeta):
return any(isinstance(arg, pattern)
for arg in preorder_traversal(self))
_has_matcher = getattr(pattern, '_has_matcher', None)
if _has_matcher is not None:
match = _has_matcher()
return any(match(arg) for arg in preorder_traversal(self))
else:
return any(arg == pattern for arg in preorder_traversal(self))
def _has_matcher(self):
"""Helper for .has()"""
return lambda other: self == other
def replace(self, query, value, map=False, simultaneous=True, exact=None):
"""
Replace matching subexpressions of ``self`` with ``value``.
If ``map = True`` then also return the mapping {old: new} where ``old``
was a sub-expression found with query and ``new`` is the replacement
value for it. If the expression itself doesn't match the query, then
the returned value will be ``self.xreplace(map)`` otherwise it should
be ``self.subs(ordered(map.items()))``.
Traverses an expression tree and performs replacement of matching
subexpressions from the bottom to the top of the tree. The default
approach is to do the replacement in a simultaneous fashion so
changes made are targeted only once. If this is not desired or causes
problems, ``simultaneous`` can be set to False.
In addition, if an expression containing more than one Wild symbol
is being used to match subexpressions and the ``exact`` flag is None
it will be set to True so the match will only succeed if all non-zero
values are received for each Wild that appears in the match pattern.
Setting this to False accepts a match of 0; while setting it True
accepts all matches that have a 0 in them. See example below for
cautions.
The list of possible combinations of queries and replacement values
is listed below:
Examples
========
Initial setup
>>> from sympy import log, sin, cos, tan, Wild, Mul, Add
>>> from sympy.abc import x, y
>>> f = log(sin(x)) + tan(sin(x**2))
1.1. type -> type
obj.replace(type, newtype)
When object of type ``type`` is found, replace it with the
result of passing its argument(s) to ``newtype``.
>>> f.replace(sin, cos)
log(cos(x)) + tan(cos(x**2))
>>> sin(x).replace(sin, cos, map=True)
(cos(x), {sin(x): cos(x)})
>>> (x*y).replace(Mul, Add)
x + y
1.2. type -> func
obj.replace(type, func)
When object of type ``type`` is found, apply ``func`` to its
argument(s). ``func`` must be written to handle the number
of arguments of ``type``.
>>> f.replace(sin, lambda arg: sin(2*arg))
log(sin(2*x)) + tan(sin(2*x**2))
>>> (x*y).replace(Mul, lambda *args: sin(2*Mul(*args)))
sin(2*x*y)
2.1. pattern -> expr
obj.replace(pattern(wild), expr(wild))
Replace subexpressions matching ``pattern`` with the expression
written in terms of the Wild symbols in ``pattern``.
>>> a, b = map(Wild, 'ab')
>>> f.replace(sin(a), tan(a))
log(tan(x)) + tan(tan(x**2))
>>> f.replace(sin(a), tan(a/2))
log(tan(x/2)) + tan(tan(x**2/2))
>>> f.replace(sin(a), a)
log(x) + tan(x**2)
>>> (x*y).replace(a*x, a)
y
Matching is exact by default when more than one Wild symbol
is used: matching fails unless the match gives non-zero
values for all Wild symbols:
>>> (2*x + y).replace(a*x + b, b - a)
y - 2
>>> (2*x).replace(a*x + b, b - a)
2*x
When set to False, the results may be non-intuitive:
>>> (2*x).replace(a*x + b, b - a, exact=False)
2/x
2.2. pattern -> func
obj.replace(pattern(wild), lambda wild: expr(wild))
All behavior is the same as in 2.1 but now a function in terms of
pattern variables is used rather than an expression:
>>> f.replace(sin(a), lambda a: sin(2*a))
log(sin(2*x)) + tan(sin(2*x**2))
3.1. func -> func
obj.replace(filter, func)
Replace subexpression ``e`` with ``func(e)`` if ``filter(e)``
is True.
>>> g = 2*sin(x**3)
>>> g.replace(lambda expr: expr.is_Number, lambda expr: expr**2)
4*sin(x**9)
The expression itself is also targeted by the query but is done in
such a fashion that changes are not made twice.
>>> e = x*(x*y + 1)
>>> e.replace(lambda x: x.is_Mul, lambda x: 2*x)
2*x*(2*x*y + 1)
When matching a single symbol, `exact` will default to True, but
this may or may not be the behavior that is desired:
Here, we want `exact=False`:
>>> from sympy import Function
>>> f = Function('f')
>>> e = f(1) + f(0)
>>> q = f(a), lambda a: f(a + 1)
>>> e.replace(*q, exact=False)
f(1) + f(2)
>>> e.replace(*q, exact=True)
f(0) + f(2)
But here, the nature of matching makes selecting
the right setting tricky:
>>> e = x**(1 + y)
>>> (x**(1 + y)).replace(x**(1 + a), lambda a: x**-a, exact=False)
1
>>> (x**(1 + y)).replace(x**(1 + a), lambda a: x**-a, exact=True)
x**(-x - y + 1)
>>> (x**y).replace(x**(1 + a), lambda a: x**-a, exact=False)
1
>>> (x**y).replace(x**(1 + a), lambda a: x**-a, exact=True)
x**(1 - y)
It is probably better to use a different form of the query
that describes the target expression more precisely:
>>> (1 + x**(1 + y)).replace(
... lambda x: x.is_Pow and x.exp.is_Add and x.exp.args[0] == 1,
... lambda x: x.base**(1 - (x.exp - 1)))
...
x**(1 - y) + 1
See Also
========
subs: substitution of subexpressions as defined by the objects
themselves.
xreplace: exact node replacement in expr tree; also capable of
using matching rules
"""
from sympy.core.symbol import Dummy, Wild
from sympy.simplify.simplify import bottom_up
try:
query = _sympify(query)
except SympifyError:
pass
try:
value = _sympify(value)
except SympifyError:
pass
if isinstance(query, type):
_query = lambda expr: isinstance(expr, query)
if isinstance(value, type):
_value = lambda expr, result: value(*expr.args)
elif callable(value):
_value = lambda expr, result: value(*expr.args)
else:
raise TypeError(
"given a type, replace() expects another "
"type or a callable")
elif isinstance(query, Basic):
_query = lambda expr: expr.match(query)
if exact is None:
exact = (len(query.atoms(Wild)) > 1)
if isinstance(value, Basic):
if exact:
_value = lambda expr, result: (value.subs(result)
if all(result.values()) else expr)
else:
_value = lambda expr, result: value.subs(result)
elif callable(value):
# match dictionary keys get the trailing underscore stripped
# from them and are then passed as keywords to the callable;
# if ``exact`` is True, only accept match if there are no null
# values amongst those matched.
if exact:
_value = lambda expr, result: (value(**
{str(k)[:-1]: v for k, v in result.items()})
if all(val for val in result.values()) else expr)
else:
_value = lambda expr, result: value(**
{str(k)[:-1]: v for k, v in result.items()})
else:
raise TypeError(
"given an expression, replace() expects "
"another expression or a callable")
elif callable(query):
_query = query
if callable(value):
_value = lambda expr, result: value(expr)
else:
raise TypeError(
"given a callable, replace() expects "
"another callable")
else:
raise TypeError(
"first argument to replace() must be a "
"type, an expression or a callable")
mapping = {} # changes that took place
mask = [] # the dummies that were used as change placeholders
def rec_replace(expr):
result = _query(expr)
if result or result == {}:
new = _value(expr, result)
if new is not None and new != expr:
mapping[expr] = new
if simultaneous:
# don't let this change during rebuilding;
# XXX this may fail if the object being replaced
# cannot be represented as a Dummy in the expression
# tree, e.g. an ExprConditionPair in Piecewise
# cannot be represented with a Dummy
com = getattr(new, 'is_commutative', True)
if com is None:
com = True
d = Dummy('rec_replace', commutative=com)
mask.append((d, new))
expr = d
else:
expr = new
return expr
rv = bottom_up(self, rec_replace, atoms=True)
# restore original expressions for Dummy symbols
if simultaneous:
mask = list(reversed(mask))
for o, n in mask:
r = {o: n}
# if a sub-expression could not be replaced with
# a Dummy then this will fail; either filter
# against such sub-expressions or figure out a
# way to carry out simultaneous replacement
# in this situation.
rv = rv.xreplace(r) # if this fails, see above
if not map:
return rv
else:
if simultaneous:
# restore subexpressions in mapping
for o, n in mask:
r = {o: n}
mapping = {k.xreplace(r): v.xreplace(r)
for k, v in mapping.items()}
return rv, mapping
def find(self, query, group=False):
"""Find all subexpressions matching a query. """
query = _make_find_query(query)
results = list(filter(query, preorder_traversal(self)))
if not group:
return set(results)
else:
groups = {}
for result in results:
if result in groups:
groups[result] += 1
else:
groups[result] = 1
return groups
def count(self, query):
"""Count the number of matching subexpressions. """
query = _make_find_query(query)
return sum(bool(query(sub)) for sub in preorder_traversal(self))
def matches(self, expr, repl_dict={}, old=False):
"""
Helper method for match() that looks for a match between Wild symbols
in self and expressions in expr.
Examples
========
>>> from sympy import symbols, Wild, Basic
>>> a, b, c = symbols('a b c')
>>> x = Wild('x')
>>> Basic(a + x, x).matches(Basic(a + b, c)) is None
True
>>> Basic(a + x, x).matches(Basic(a + b + c, b + c))
{x_: b + c}
"""
expr = sympify(expr)
if not isinstance(expr, self.__class__):
return None
if self == expr:
return repl_dict
if len(self.args) != len(expr.args):
return None
d = repl_dict.copy()
for arg, other_arg in zip(self.args, expr.args):
if arg == other_arg:
continue
d = arg.xreplace(d).matches(other_arg, d, old=old)
if d is None:
return None
return d
def match(self, pattern, old=False):
"""
Pattern matching.
Wild symbols match all.
Return ``None`` when expression (self) does not match
with pattern. Otherwise return a dictionary such that::
pattern.xreplace(self.match(pattern)) == self
Examples
========
>>> from sympy import Wild
>>> from sympy.abc import x, y
>>> p = Wild("p")
>>> q = Wild("q")
>>> r = Wild("r")
>>> e = (x+y)**(x+y)
>>> e.match(p**p)
{p_: x + y}
>>> e.match(p**q)
{p_: x + y, q_: x + y}
>>> e = (2*x)**2
>>> e.match(p*q**r)
{p_: 4, q_: x, r_: 2}
>>> (p*q**r).xreplace(e.match(p*q**r))
4*x**2
The ``old`` flag will give the old-style pattern matching where
expressions and patterns are essentially solved to give the
match. Both of the following give None unless ``old=True``:
>>> (x - 2).match(p - x, old=True)
{p_: 2*x - 2}
>>> (2/x).match(p*x, old=True)
{p_: 2/x**2}
"""
pattern = sympify(pattern)
return pattern.matches(self, old=old)
def count_ops(self, visual=None):
"""wrapper for count_ops that returns the operation count."""
from sympy import count_ops
return count_ops(self, visual)
def doit(self, **hints):
"""Evaluate objects that are not evaluated by default like limits,
integrals, sums and products. All objects of this kind will be
evaluated recursively, unless some species were excluded via 'hints'
or unless the 'deep' hint was set to 'False'.
>>> from sympy import Integral
>>> from sympy.abc import x
>>> 2*Integral(x, x)
2*Integral(x, x)
>>> (2*Integral(x, x)).doit()
x**2
>>> (2*Integral(x, x)).doit(deep=False)
2*Integral(x, x)
"""
if hints.get('deep', True):
terms = [term.doit(**hints) if isinstance(term, Basic) else term
for term in self.args]
return self.func(*terms)
else:
return self
def _eval_rewrite(self, pattern, rule, **hints):
if self.is_Atom:
if hasattr(self, rule):
return getattr(self, rule)()
return self
if hints.get('deep', True):
args = [a._eval_rewrite(pattern, rule, **hints)
if isinstance(a, Basic) else a
for a in self.args]
else:
args = self.args
if pattern is None or isinstance(self, pattern):
if hasattr(self, rule):
rewritten = getattr(self, rule)(*args, **hints)
if rewritten is not None:
return rewritten
return self.func(*args) if hints.get('evaluate', True) else self
def _accept_eval_derivative(self, s):
# This method needs to be overridden by array-like objects
return s._visit_eval_derivative_scalar(self)
def _visit_eval_derivative_scalar(self, base):
# Base is a scalar
# Types are (base: scalar, self: scalar)
return base._eval_derivative(self)
def _visit_eval_derivative_array(self, base):
# Types are (base: array/matrix, self: scalar)
# Base is some kind of array/matrix,
# it should have `.applyfunc(lambda x: x.diff(self)` implemented:
return base._eval_derivative_array(self)
def _eval_derivative_n_times(self, s, n):
# This is the default evaluator for derivatives (as called by `diff`
# and `Derivative`), it will attempt a loop to derive the expression
# `n` times by calling the corresponding `_eval_derivative` method,
# while leaving the derivative unevaluated if `n` is symbolic. This
# method should be overridden if the object has a closed form for its
# symbolic n-th derivative.
from sympy import Integer
if isinstance(n, (int, Integer)):
obj = self
for i in range(n):
obj2 = obj._accept_eval_derivative(s)
if obj == obj2 or obj2 is None:
break
obj = obj2
return obj2
else:
return None
def rewrite(self, *args, **hints):
""" Rewrite functions in terms of other functions.
Rewrites expression containing applications of functions
of one kind in terms of functions of different kind. For
example you can rewrite trigonometric functions as complex
exponentials or combinatorial functions as gamma function.
As a pattern this function accepts a list of functions to
to rewrite (instances of DefinedFunction class). As rule
you can use string or a destination function instance (in
this case rewrite() will use the str() function).
There is also the possibility to pass hints on how to rewrite
the given expressions. For now there is only one such hint
defined called 'deep'. When 'deep' is set to False it will
forbid functions to rewrite their contents.
Examples
========
>>> from sympy import sin, exp
>>> from sympy.abc import x
Unspecified pattern:
>>> sin(x).rewrite(exp)
-I*(exp(I*x) - exp(-I*x))/2
Pattern as a single function:
>>> sin(x).rewrite(sin, exp)
-I*(exp(I*x) - exp(-I*x))/2
Pattern as a list of functions:
>>> sin(x).rewrite([sin, ], exp)
-I*(exp(I*x) - exp(-I*x))/2
"""
if not args:
return self
else:
pattern = args[:-1]
if isinstance(args[-1], string_types):
rule = '_eval_rewrite_as_' + args[-1]
else:
try:
rule = '_eval_rewrite_as_' + args[-1].__name__
except:
rule = '_eval_rewrite_as_' + args[-1].__class__.__name__
if not pattern:
return self._eval_rewrite(None, rule, **hints)
else:
if iterable(pattern[0]):
pattern = pattern[0]
pattern = [p for p in pattern if self.has(p)]
if pattern:
return self._eval_rewrite(tuple(pattern), rule, **hints)
else:
return self
_constructor_postprocessor_mapping = {}
@classmethod
def _exec_constructor_postprocessors(cls, obj):
# WARNING: This API is experimental.
# This is an experimental API that introduces constructor
# postprosessors for SymPy Core elements. If an argument of a SymPy
# expression has a `_constructor_postprocessor_mapping` attribute, it will
# be interpreted as a dictionary containing lists of postprocessing
# functions for matching expression node names.
clsname = obj.__class__.__name__
postprocessors = defaultdict(list)
for i in obj.args:
try:
postprocessor_mappings = (
Basic._constructor_postprocessor_mapping[cls].items()
for cls in type(i).mro()
if cls in Basic._constructor_postprocessor_mapping
)
for k, v in chain.from_iterable(postprocessor_mappings):
postprocessors[k].extend([j for j in v if j not in postprocessors[k]])
except TypeError:
pass
for f in postprocessors.get(clsname, []):
obj = f(obj)
return obj
class Atom(Basic):
"""
A parent class for atomic things. An atom is an expression with no subexpressions.
Examples
========
Symbol, Number, Rational, Integer, ...
But not: Add, Mul, Pow, ...
"""
is_Atom = True
__slots__ = []
def matches(self, expr, repl_dict={}, old=False):
if self == expr:
return repl_dict
def xreplace(self, rule, hack2=False):
return rule.get(self, self)
def doit(self, **hints):
return self
@classmethod
def class_key(cls):
return 2, 0, cls.__name__
@cacheit
def sort_key(self, order=None):
return self.class_key(), (1, (str(self),)), S.One.sort_key(), S.One
def _eval_simplify(self, **kwargs):
return self
@property
def _sorted_args(self):
# this is here as a safeguard against accidentally using _sorted_args
# on Atoms -- they cannot be rebuilt as atom.func(*atom._sorted_args)
# since there are no args. So the calling routine should be checking
# to see that this property is not called for Atoms.
raise AttributeError('Atoms have no args. It might be necessary'
' to make a check for Atoms in the calling code.')
def _aresame(a, b):
"""Return True if a and b are structurally the same, else False.
Examples
========
In SymPy (as in Python) two numbers compare the same if they
have the same underlying base-2 representation even though
they may not be the same type:
>>> from sympy import S
>>> 2.0 == S(2)
True
>>> 0.5 == S.Half
True
This routine was written to provide a query for such cases that
would give false when the types do not match:
>>> from sympy.core.basic import _aresame
>>> _aresame(S(2.0), S(2))
False
"""
from .numbers import Number
from .function import AppliedUndef, UndefinedFunction as UndefFunc
if isinstance(a, Number) and isinstance(b, Number):
return a == b and a.__class__ == b.__class__
for i, j in zip_longest(preorder_traversal(a), preorder_traversal(b)):
if i != j or type(i) != type(j):
if ((isinstance(i, UndefFunc) and isinstance(j, UndefFunc)) or
(isinstance(i, AppliedUndef) and isinstance(j, AppliedUndef))):
if i.class_key() != j.class_key():
return False
else:
return False
return True
def _atomic(e, recursive=False):
"""Return atom-like quantities as far as substitution is
concerned: Derivatives, Functions and Symbols. Don't
return any 'atoms' that are inside such quantities unless
they also appear outside, too, unless `recursive` is True.
Examples
========
>>> from sympy import Derivative, Function, cos
>>> from sympy.abc import x, y
>>> from sympy.core.basic import _atomic
>>> f = Function('f')
>>> _atomic(x + y)
{x, y}
>>> _atomic(x + f(y))
{x, f(y)}
>>> _atomic(Derivative(f(x), x) + cos(x) + y)
{y, cos(x), Derivative(f(x), x)}
"""
from sympy import Derivative, Function, Symbol
pot = preorder_traversal(e)
seen = set()
if isinstance(e, Basic):
free = getattr(e, "free_symbols", None)
if free is None:
return {e}
else:
return set()
atoms = set()
for p in pot:
if p in seen:
pot.skip()
continue
seen.add(p)
if isinstance(p, Symbol) and p in free:
atoms.add(p)
elif isinstance(p, (Derivative, Function)):
if not recursive:
pot.skip()
atoms.add(p)
return atoms
class preorder_traversal(Iterator):
"""
Do a pre-order traversal of a tree.
This iterator recursively yields nodes that it has visited in a pre-order
fashion. That is, it yields the current node then descends through the
tree breadth-first to yield all of a node's children's pre-order
traversal.
For an expression, the order of the traversal depends on the order of
.args, which in many cases can be arbitrary.
Parameters
==========
node : sympy expression
The expression to traverse.
keys : (default None) sort key(s)
The key(s) used to sort args of Basic objects. When None, args of Basic
objects are processed in arbitrary order. If key is defined, it will
be passed along to ordered() as the only key(s) to use to sort the
arguments; if ``key`` is simply True then the default keys of ordered
will be used.
Yields
======
subtree : sympy expression
All of the subtrees in the tree.
Examples
========
>>> from sympy import symbols
>>> from sympy.core.basic import preorder_traversal
>>> x, y, z = symbols('x y z')
The nodes are returned in the order that they are encountered unless key
is given; simply passing key=True will guarantee that the traversal is
unique.
>>> list(preorder_traversal((x + y)*z, keys=None)) # doctest: +SKIP
[z*(x + y), z, x + y, y, x]
>>> list(preorder_traversal((x + y)*z, keys=True))
[z*(x + y), z, x + y, x, y]
"""
def __init__(self, node, keys=None):
self._skip_flag = False
self._pt = self._preorder_traversal(node, keys)
def _preorder_traversal(self, node, keys):
yield node
if self._skip_flag:
self._skip_flag = False
return
if isinstance(node, Basic):
if not keys and hasattr(node, '_argset'):
# LatticeOp keeps args as a set. We should use this if we
# don't care about the order, to prevent unnecessary sorting.
args = node._argset
else:
args = node.args
if keys:
if keys != True:
args = ordered(args, keys, default=False)
else:
args = ordered(args)
for arg in args:
for subtree in self._preorder_traversal(arg, keys):
yield subtree
elif iterable(node):
for item in node:
for subtree in self._preorder_traversal(item, keys):
yield subtree
def skip(self):
"""
Skip yielding current node's (last yielded node's) subtrees.
Examples
========
>>> from sympy.core import symbols
>>> from sympy.core.basic import preorder_traversal
>>> x, y, z = symbols('x y z')
>>> pt = preorder_traversal((x+y*z)*z)
>>> for i in pt:
... print(i)
... if i == x+y*z:
... pt.skip()
z*(x + y*z)
z
x + y*z
"""
self._skip_flag = True
def __next__(self):
return next(self._pt)
def __iter__(self):
return self
def _make_find_query(query):
"""Convert the argument of Basic.find() into a callable"""
try:
query = sympify(query)
except SympifyError:
pass
if isinstance(query, type):
return lambda expr: isinstance(expr, query)
elif isinstance(query, Basic):
return lambda expr: expr.match(query) is not None
return query
|
Preview the contact before auto dialing them.
Calling leads, donors or community members can sometimes be more organic. An agent needs time to research the contact, understand the calling history and then call the contact. This preview dialer lets agents do just that. The dialer gives the agent flexibility while still giving the manager control over the people being called. The manager gets a recording of every call made as well as detailed reports on the call.
Stay on the call while the voter talks to the decision maker. Your call is on mute.
Volunteers are crucial to your campaign. Recruit as many as you need, for free! We do not charge per seat in a campaign.
Add a calling script and survey for every campaign. Train your agent on the script and collect information on the call and the progress. The results are synced directly into your CRM.
Recruit volunteers from around the city or even world and put them to work. Volunteers can make calls for a campaign from anywhere in the world, right from their browser. Browser calls cost the least. They can even make calls from their phone or soft phone.
View complete log of calls for a contact before calling them. Every note saved by an agent is stored and shown as history.
Build a team of volunteers to call supporters and talk to them about the issues. Let the volunteer guide and practice the script with the supporter. Once they're prepared, connect the supporter to the representative, all the while staying on the call. Once the call is complete, the volunteer can connect the supporter to another representative if needed.
Anticipating the number of free agents and the drop rate, the average ring and talk time, the predictive dialer speeds up and slows down the dialing rate to maximise the agent talk time and minimises drop rate. Works best for large campaigns.
An automated dialer for teams that don't need to be rushed. Contacts are dialed only when agents are free while skipping bad numbers, busy numbers and answering machines. You can also set the dialing rate for a faster dialing speed, at X:1.
Get detailed reports and analytics for every call made. Track and monitor all calls live.
|
import sre_parse, sre_compile, sre_constants
from sre_constants import BRANCH, SUBPATTERN
from re import VERBOSE, MULTILINE, DOTALL
import re
import cgi
import warnings
_speedups = None
class JSONFilter(object):
def __init__(self, app, mime_type='text/x-json'):
self.app = app
self.mime_type = mime_type
def __call__(self, environ, start_response):
# Read JSON POST input to jsonfilter.json if matching mime type
response = {'status': '200 OK', 'headers': []}
def json_start_response(status, headers):
response['status'] = status
response['headers'].extend(headers)
environ['jsonfilter.mime_type'] = self.mime_type
if environ.get('REQUEST_METHOD', '') == 'POST':
if environ.get('CONTENT_TYPE', '') == self.mime_type:
args = [_ for _ in [environ.get('CONTENT_LENGTH')] if _]
data = environ['wsgi.input'].read(*map(int, args))
environ['jsonfilter.json'] = simplejson.loads(data)
res = simplejson.dumps(self.app(environ, json_start_response))
jsonp = cgi.parse_qs(environ.get('QUERY_STRING', '')).get('jsonp')
if jsonp:
content_type = 'text/javascript'
res = ''.join(jsonp + ['(', res, ')'])
elif 'Opera' in environ.get('HTTP_USER_AGENT', ''):
# Opera has bunk XMLHttpRequest support for most mime types
content_type = 'text/plain'
else:
content_type = self.mime_type
headers = [
('Content-type', content_type),
('Content-length', len(res)),
]
headers.extend(response['headers'])
start_response(response['status'], headers)
return [res]
def factory(app, global_conf, **kw):
return JSONFilter(app, **kw)
ESCAPE = re.compile(r'[\x00-\x19\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"/]|[^\ -~])')
ESCAPE_DCT = {
# escape all forward slashes to prevent </script> attack
'/': '\\/',
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
# assume this produces an infinity on all machines (probably not guaranteed)
INFINITY = float('1e66666')
def floatstr(o, allow_nan=True):
# Check for specials. Note that this type of test is processor- and/or
# platform-specific, so do tests which don't depend on the internals.
if o != o:
text = 'NaN'
elif o == INFINITY:
text = 'Infinity'
elif o == -INFINITY:
text = '-Infinity'
else:
return str(o)
if not allow_nan:
raise ValueError("Out of range float values are not JSON compliant: %r"
% (o,))
return text
def encode_basestring(s):
"""
Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
def encode_basestring_ascii(s):
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
try:
encode_basestring_ascii = _speedups.encode_basestring_ascii
_need_utf8 = True
except AttributeError:
_need_utf8 = False
class JSONEncoder(object):
"""
Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8'):
"""
Constructor for JSONEncoder, with sensible defaults.
If skipkeys is False, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is True, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is True, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is True, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is True, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
self.current_indent_level = 0
if separators is not None:
self.item_separator, self.key_separator = separators
self.encoding = encoding
def _newline_indent(self):
return '\n' + (' ' * (self.indent * self.current_indent_level))
def _iterencode_list(self, lst, markers=None):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
yield '['
if self.indent is not None:
self.current_indent_level += 1
newline_indent = self._newline_indent()
separator = self.item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
separator = self.item_separator
first = True
for value in lst:
if first:
first = False
else:
yield separator
for chunk in self._iterencode(value, markers):
yield chunk
if newline_indent is not None:
self.current_indent_level -= 1
yield self._newline_indent()
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(self, dct, markers=None):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
key_separator = self.key_separator
if self.indent is not None:
self.current_indent_level += 1
newline_indent = self._newline_indent()
item_separator = self.item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = self.item_separator
first = True
if self.ensure_ascii:
encoder = encode_basestring_ascii
else:
encoder = encode_basestring
allow_nan = self.allow_nan
if self.sort_keys:
keys = dct.keys()
keys.sort()
items = [(k, dct[k]) for k in keys]
else:
items = dct.iteritems()
_encoding = self.encoding
_do_decode = (_encoding is not None
and not (_need_utf8 and _encoding == 'utf-8'))
for key, value in items:
if isinstance(key, str):
if _do_decode:
key = key.decode(_encoding)
elif isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = floatstr(key, allow_nan)
elif isinstance(key, (int, long)):
key = str(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif self.skipkeys:
continue
else:
raise TypeError("key %r is not a string" % (key,))
if first:
first = False
else:
yield item_separator
yield encoder(key)
yield key_separator
for chunk in self._iterencode(value, markers):
yield chunk
if newline_indent is not None:
self.current_indent_level -= 1
yield self._newline_indent()
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(self, o, markers=None):
if isinstance(o, basestring):
if self.ensure_ascii:
encoder = encode_basestring_ascii
else:
encoder = encode_basestring
_encoding = self.encoding
if (_encoding is not None and isinstance(o, str)
and not (_need_utf8 and _encoding == 'utf-8')):
o = o.decode(_encoding)
yield encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield floatstr(o, self.allow_nan)
elif isinstance(o, (list, tuple)):
for chunk in self._iterencode_list(o, markers):
yield chunk
elif isinstance(o, dict):
for chunk in self._iterencode_dict(o, markers):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
for chunk in self._iterencode_default(o, markers):
yield chunk
if markers is not None:
del markers[markerid]
def _iterencode_default(self, o, markers=None):
newobj = self.default(o)
return self._iterencode(newobj, markers)
def default(self, o):
"""
Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError("%r is not JSON serializable" % (o,))
def encode(self, o):
"""
Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo":["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks...
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8' and _need_utf8)):
o = o.decode(_encoding)
return encode_basestring_ascii(o)
# This doesn't pass the iterator directly to ''.join() because it
# sucks at reporting exceptions. It's going to do this internally
# anyway because it uses PySequence_Fast or similar.
chunks = list(self.iterencode(o))
return ''.join(chunks)
def iterencode(self, o):
"""
Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
return self._iterencode(o, markers)
FLAGS = (VERBOSE | MULTILINE | DOTALL)
#FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
class Scanner(object):
def __init__(self, lexicon, flags=FLAGS):
self.actions = [None]
# combine phrases into a compound pattern
s = sre_parse.Pattern()
s.flags = flags
p = []
for idx, token in enumerate(lexicon):
phrase = token.pattern
try:
subpattern = sre_parse.SubPattern(s,
[(SUBPATTERN, (idx + 1, sre_parse.parse(phrase, flags)))])
except sre_constants.error:
raise
p.append(subpattern)
self.actions.append(token)
p = sre_parse.SubPattern(s, [(BRANCH, (None, p))])
self.scanner = sre_compile.compile(p)
def iterscan(self, string, idx=0, context=None):
"""
Yield match, end_idx for each match
"""
match = self.scanner.scanner(string, idx).match
actions = self.actions
lastend = idx
end = len(string)
while True:
m = match()
if m is None:
break
matchbegin, matchend = m.span()
if lastend == matchend:
break
action = actions[m.lastindex]
if action is not None:
rval, next_pos = action(m, context)
if next_pos is not None and next_pos != matchend:
# "fast forward" the scanner
matchend = next_pos
match = self.scanner.scanner(string, matchend).match
yield rval, matchend
lastend = matchend
def pattern(pattern, flags=FLAGS):
def decorator(fn):
fn.pattern = pattern
fn.regex = re.compile(pattern, flags)
return fn
return decorator
def _floatconstants():
import struct
import sys
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
lineno, colno = linecol(doc, pos)
if end is None:
return '%s: line %d column %d (char %d)' % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
return '%s: line %d column %d - line %d column %d (char %d - %d)' % (
msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
'true': True,
'false': False,
'null': None,
}
def JSONConstant(match, context, c=_CONSTANTS):
return c[match.group(0)], None
pattern('(-?Infinity|NaN|true|false|null)')(JSONConstant)
def JSONNumber(match, context):
match = JSONNumber.regex.match(match.string, *match.span())
integer, frac, exp = match.groups()
if frac or exp:
res = float(integer + (frac or '') + (exp or ''))
else:
res = int(integer)
return res, None
pattern(r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?')(JSONNumber)
STRINGCHUNK = re.compile(r'(.*?)(["\\])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def scanstring(s, end, encoding=None, _b=BACKSLASH, _m=STRINGCHUNK.match):
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
end = chunk.end()
content, terminator = chunk.groups()
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
if terminator == '"':
break
try:
esc = s[end]
except IndexError:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
if esc != 'u':
try:
m = _b[esc]
except KeyError:
raise ValueError(
errmsg("Invalid \\escape: %r" % (esc,), s, end))
end += 1
else:
esc = s[end + 1:end + 5]
try:
m = unichr(int(esc, 16))
if len(esc) != 4 or not esc.isalnum():
raise ValueError
except ValueError:
raise ValueError(errmsg("Invalid \\uXXXX escape", s, end))
end += 5
_append(m)
return u''.join(chunks), end
def JSONString(match, context):
encoding = getattr(context, 'encoding', None)
return scanstring(match.string, match.end(), encoding)
pattern(r'"')(JSONString)
WHITESPACE = re.compile(r'\s*', FLAGS)
def JSONObject(match, context, _w=WHITESPACE.match):
pairs = {}
s = match.string
end = _w(s, match.end()).end()
nextchar = s[end:end + 1]
# trivial empty object
if nextchar == '}':
return pairs, end + 1
if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end))
end += 1
encoding = getattr(context, 'encoding', None)
iterscan = JSONScanner.iterscan
while True:
key, end = scanstring(s, end, encoding)
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise ValueError(errmsg("Expecting : delimiter", s, end))
end = _w(s, end + 1).end()
try:
value, end = iterscan(s, idx=end, context=context).next()
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
pairs[key] = value
end = _w(s, end).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == '}':
break
if nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
end = _w(s, end).end()
nextchar = s[end:end + 1]
end += 1
if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end - 1))
object_hook = getattr(context, 'object_hook', None)
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
pattern(r'{')(JSONObject)
def JSONArray(match, context, _w=WHITESPACE.match):
values = []
s = match.string
end = _w(s, match.end()).end()
# look-ahead for trivial empty array
nextchar = s[end:end + 1]
if nextchar == ']':
return values, end + 1
iterscan = JSONScanner.iterscan
while True:
try:
value, end = iterscan(s, idx=end, context=context).next()
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
values.append(value)
end = _w(s, end).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
if nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end))
end = _w(s, end).end()
return values, end
pattern(r'\[')(JSONArray)
ANYTHING = [
JSONObject,
JSONArray,
JSONString,
JSONConstant,
JSONNumber,
]
JSONScanner = Scanner(ANYTHING)
class JSONDecoder(object):
"""
Simple JSON <http://json.org> decoder
Performs the following translations in decoding:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
_scanner = Scanner(ANYTHING)
__all__ = ['__init__', 'decode', 'raw_decode']
def __init__(self, encoding=None, object_hook=None):
"""
``encoding`` determines the encoding used to interpret any ``str``
objects decoded by this instance (utf-8 by default). It has no
effect when decoding ``unicode`` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as ``unicode``.
``object_hook``, if specified, will be called with the result
of every JSON object decoded and its return value will be used in
place of the given ``dict``. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
"""
self.encoding = encoding
self.object_hook = object_hook
def decode(self, s, _w=WHITESPACE.match):
"""
Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise ValueError(errmsg("Extra data", s, end, len(s)))
return obj
def raw_decode(self, s, **kw):
"""
Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning
with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
kw.setdefault('context', self)
try:
obj, end = self._scanner.iterscan(s, **kw).next()
except StopIteration:
raise ValueError("No JSON object could be decoded")
return obj, end
#def __init__(self):
__version__ = '1.7.1'
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8'
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', **kw):
"""
Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and object
members will be pretty-printed with that indent level. An indent level
of 0 will only insert newlines. ``None`` is the most compact representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', **kw):
"""
Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, **kw):
"""
Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
If the contents of ``fp`` is encoded with an ASCII based encoding other
than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
be specified. Encodings that are not ASCII based (such as UCS-2) are
not allowed, and should be wrapped with
``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
object and passed to ``loads()``
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, **kw):
"""
Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
must be specified. Encodings that are not ASCII based (such as UCS-2)
are not allowed and should be decoded to ``unicode`` first.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if cls is None and encoding is None and object_hook is None and not kw:
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
return cls(encoding=encoding, **kw).decode(s)
def read(s):
"""
json-py API compatibility hook. Use loads(s) instead.
"""
import warnings
warnings.warn("simplejson.loads(s) should be used instead of read(s)",
DeprecationWarning)
return loads(s)
def write(obj):
"""
json-py API compatibility hook. Use dumps(s) instead.
"""
import warnings
warnings.warn("simplejson.dumps(s) should be used instead of write(s)",
DeprecationWarning)
return dumps(obj)
|
SPEND A DAY AS YOU’VE NEVER SPENT ONE BEFORE.
GLAMOUR AND RELAXATION, GLAMDAY STYLE.
© Copyright 2018 glamdaystyle. All rights reserved.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.