repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
CNS-OIST/STEPS_Example | publication_models/API_2/Anwar_J_Neurosci_2013/StochasticHH.py | 1 | 7959 | # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Okinawa Institute of Science and Technology, Japan.
#
# This script runs on STEPS 2.x http://steps.sourceforge.net
#
# H Anwar, I Hepburn, H Nedelescu, W Chen and E De Schutter
# Stochastic calcium mechanisms cause dendritic calcium spike variability
# J Neuroscience 2013
#
# *StochasticHH.py : The stochastic Hodgkin-Huxley model, used in the
# above study.
#
# Script authors: Haroon Anwar and Iain Hepburn
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# USAGE
#
# $ python StochasticHH.py *mesh* *root* *iter_n*
#
# *mesh* is the tetrahedral mesh (10um to 160um cylinder)
# *root* is the path to the location for data storage
# *iter_n* (is intened to be an integer) is an identifier number for each
# simulation iteration.
#
# E.g:
# $ python StochasticHH.py Cylinder2_dia2um_L10um_outer0_3um_0.3shell_0.3size_19156tets_adaptive.inp ~/stochHHsims/ 1
#
#
# OUTPUT
#
# In (root)/data/StochasticHH/(mesh)/(iter_n+time) directory
# 2 data files will be recorded. Each file contains one row for every
# time-point at which data is recorded, organised into the following columns:
#
# currents.dat
# Time (ms), Na current, K current, leak current
# (current units are Amps/m^2)
#
# voltage.dat
# Time (ms), voltage at mesh centre (mV)
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import steps.interface
import math
import time
from random import *
from steps.model import *
from steps.geom import *
from steps.rng import *
from steps.sim import *
from steps.saving import *
from extra.constants_hh import *
import sys
import os
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
_, meshfile_ab, root, iter_n = sys.argv
if meshfile_ab == 'Cylinder2_dia2um_L160um_outer0_0.3shell_0.3size_279152tets_adaptive.inp':
cyl160=True
else:
cyl160=False
########################### BIOCHEMICAL MODEL ###############################
mdl = Model()
r = ReactionManager()
with mdl:
ssys = SurfaceSystem.Create()
# Potassium channel
n0, n1, n2, n3, n4 = SubUnitState.Create()
Kn = SubUnit.Create([n0, n1, n2, n3, n4])
Kchan = Channel.Create([Kn])
_a_n = VDepRate(lambda V: 1.0e3 * a_n(V*1.0e3)* Qt, vrange=Vrange)
_b_n = VDepRate(lambda V: 1.0e3 * b_n(V*1.0e3)* Qt, vrange=Vrange)
# Sodium channel
m0, m1, m2, m3, h0, h1 = SubUnitState.Create()
Nam, Nah = SubUnit.Create([m0, m1, m2, m3], [h0, h1])
Nachan = Channel.Create([Nam, Nah])
_a_m = VDepRate(lambda V:1.0e3*a_m(V*1.0e3)* Qt, vrange=Vrange)
_b_m = VDepRate(lambda V:1.0e3*b_m(V*1.0e3)* Qt, vrange=Vrange)
_a_h = VDepRate(lambda V:1.0e3*a_h(V*1.0e3)* Qt, vrange=Vrange)
_b_h = VDepRate(lambda V:1.0e3*b_h(V*1.0e3)* Qt, vrange=Vrange)
# Leak channel
Leak = SubUnitState.Create()
L = Channel.Create([Leak])
with ssys:
with Kchan[...]:
n0.s <r[1]> n1.s <r[2]> n2.s <r[3]> n3.s <r[4]> n4.s
r[1].K = 4 * _a_n, 1 * _b_n
r[2].K = 3 * _a_n, 2 * _b_n
r[3].K = 2 * _a_n, 3 * _b_n
r[4].K = 1 * _a_n, 4 * _b_n
with Nachan[...]:
h0.s <r[1]> h1.s
r[1].K = _a_h, _b_h
m0.s <r[1]> m1.s <r[2]> m2.s <r[3]> m3.s
r[1].K = 3*_a_m, _b_m
r[2].K = 2*_a_m, 2*_b_m
r[3].K = _a_m, 3*_b_m
OC_K = OhmicCurr.Create(Kchan[n4], K_G, K_rev)
OC_Na = OhmicCurr.Create(Nachan[m3, h1], Na_G, Na_rev)
OC_L = OhmicCurr.Create(L[Leak], L_G, leak_rev)
##################################
########### MESH & COMPARTMENTALIZATION #################
##########Import Mesh
mesh = TetMesh.Load('./meshes/'+meshfile_ab)
with mesh:
rad, zmin, zmax = 1e-6, -200e-6, 200e-6
inner_tets, outer_tets = TetList(), TetList()
for t in mesh.tets:
c = t.center
if zmin <= c.z <= zmax and c.x**2 + c.y**2 <= rad**2:
inner_tets.append(t)
else:
outer_tets.append(t)
print(len(outer_tets), " tets in outer compartment")
print(len(inner_tets), " tets in inner compartment")
# Record voltage from the central tetrahedron
cent_tet = mesh.tets[0.0, 0.0, 0.0]
########## Create an intracellular compartment i.e. cytosolic compartment
cyto = Compartment.Create(inner_tets)
if cyl160:
# Ensure that we use points a small distance inside the boundary:
minz, maxz = mesh.bbox.min.z, mesh.bbox.max.z
memb_tris = TriList(tri for tri in mesh_stock.surface if minz < tri.center.z < maxz)
else:
print('Finding connecting triangles...')
memb_tris = inner_tets.surface & outer_tets.surface
########## Create a membrane as a surface mesh
memb = Patch.Create(memb_tris, cyto, None, ssys)
# For EField calculation
print("Creating membrane..")
membrane = Membrane.Create([memb])
print("Membrane created.")
###### TRANSLATION TOKEN
# # # # # # # # # # # # # # # # # # # # # # # # SIMULATION # # # # # # # # # # # # # # # # # # # # # #
rng = RNG('mt19937', 512, 7)
sim = Simulation('Tetexact', mdl, mesh, rng, calcMembPot=True)
#### Recording #####
dc = time.strftime('%b%d_%H_%M_%S_%Y')
runPath = os.path.join(root, 'data/StochasticHH/', meshfile_ab, f'{iter_n}__{dc}')
os.makedirs(runPath, exist_ok=True)
rs = ResultSelector(sim)
rs1 = rs.SUM(rs.TRIS(memb_tris).OC_Na.I) <<\
rs.SUM(rs.TRIS(memb_tris).OC_K.I) <<\
rs.SUM(rs.TRIS(memb_tris).OC_L.I)
rs2 = rs.TET(cent_tet).V
rs1.toFile(os.path.join(runPath, 'currents.dat.bin'))
rs2.toFile(os.path.join(runPath, 'voltage.dat.bin'))
sim.toSave(rs1, rs2, dt=TIMECONVERTER)
print("Resetting simulation object..")
sim.newRun()
print("Injecting molecules..")
sim.Temp = TEMPERATURE+273.15
surfarea = sim.memb.Area
sim.memb.L[Leak].Count = round(L_ro * surfarea)
for h, hsu in enumerate(Nah):
for m, msu in enumerate(Nam):
sim.memb.Nachan[msu, hsu].Count = round(Na_ro*surfarea*Na_facs[h*4 + m])
for n, ksu in enumerate(Kn):
sim.memb.Kchan[ksu].Count = round(K_ro*surfarea*K_facs[n])
print('Leak', round(L_ro * surfarea))
print('Na_m0h0', round(Na_ro*surfarea*Na_facs[0]))
print('Na_m1h0', round(Na_ro*surfarea*Na_facs[1]))
print('Na_m2h0', round(Na_ro*surfarea*Na_facs[2]))
print('Na_m3h0', round(Na_ro*surfarea*Na_facs[3]))
print('Na_m0h1', round(Na_ro*surfarea*Na_facs[4]))
print('Na_m1h1', round(Na_ro*surfarea*Na_facs[5]))
print('Na_m2h1', round(Na_ro*surfarea*Na_facs[6]))
print('Na_m3h1', round(Na_ro*surfarea*Na_facs[7]))
print('K_n0', round(K_ro*surfarea*K_facs[0]))
print('K_n1', round(K_ro*surfarea*K_facs[1]))
print('K_n2', round(K_ro*surfarea*K_facs[2]))
print('K_n3', round(K_ro*surfarea*K_facs[3]))
print('K_n4', round(K_ro*surfarea*K_facs[4]))
print("Targeted Injection: ", round(Na_ro*surfarea), "Na channels")
print("Targeted Injection: ", round(K_ro*surfarea), "K channels")
print("Targeted Injection: ", round(L_ro*surfarea), "Leak channels")
sim.EfieldDT = EF_DT
sim.membrane.Potential = init_pot
sim.membrane.VolRes = Ra
sim.membrane.Capac = memb_capac
rng.initialize(100*int(iter_n))
for l in range(NTIMEPOINTS):
print("Tpnt: ", l)
sim.run(TIMECONVERTER*l)
# This last part is only present for backwards compatibility with the scripts created with API_1.
# We need to save to text files, like in the original script.
with open(os.path.join(runPath, 'currents.dat'), 'w') as f:
for t, row in zip(rs1.time[0], rs1.data[0]):
f.write('%.6g' % (t * 1e3) + ' ')
for val in row:
f.write('%.6g' % (val * 0.1 / surfarea) + ' ')
f.write('\n')
with open(os.path.join(runPath, 'voltage.dat'), 'w') as f:
for t, row in zip(rs2.time[0], rs2.data[0]):
f.write('%.6g' % (t * 1e3) + ' ')
for val in row:
f.write('%.6g' % (val * 1e3) + ' ')
f.write('\n')
| gpl-2.0 | -3,676,595,133,771,194,000 | 29.033962 | 117 | 0.582737 | false | 2.43618 | false | false | false |
ChristopherLucas/txtorg | textorganizer/indexutils.py | 1 | 2090 | #from whoosh.reading import iter_docs
import threading, sys, time, os, csv, re, codecs, shutil
from collections import defaultdict
def reindex_all(reader, writer, analyzer):
for i in xrange(reader.maxDoc()):
if reader.isDeleted(i): continue
doc = reader.document(i)
p = doc.get("path")
pkid = doc.get('txtorg_id')
if p is None:
# No filepath specified, just use original document
writer.updateDocument(Term("txtorg_id",pkid),doc,analyzer)
else:
# if a path field is found, try to read the file it points to and add a contents field
edited_doc = Document()
for f in doc.getFields():
edited_doc.add(Field.cast_(f))
try:
inf = open(p)
contents = unicode(inf.read(), 'UTF-8')
inf.close()
if len(contents) > 0:
edited_doc.add(Field("contents", contents,
Field.Store.NO,
Field.Index.ANALYZED,
Field.TermVector.YES))
else:
print "warning: no content in %s" % filename
except:
print "Could not read file; skipping"
writer.updateDocument(Term("txtorg_id",pkid),edited_doc,analyzer)
def delete_index(index_path):
shutil.rmtree(index_path)
def get_fields_and_values(reader, max_vals = 30):
all_fields = defaultdict(set)
for doc in reader.iter_docs():
print "get fields?"
for field_name in reader.indexed_field_names():
all_fields[field_name] = reader.field_terms(field_name)
return(dict(all_fields))
# for i in xrange(reader.maxDoc()):
# if reader.isDeleted(i): continue
# doc = reader.document(i)
# for f in doc.getFields():
# field = Field.cast_(f)
# if len(all_fields[field.name()]) < max_vals: all_fields[field.name()].add(field.stringValue())
# return dict(all_fields)
| mit | -2,620,840,998,572,831,000 | 34.423729 | 108 | 0.548804 | false | 3.98855 | false | false | false |
sawardekar/django | test_django/settings.py | 1 | 3090 | """
Django settings for test_django project.
Generated by 'django-admin startproject' using Django 1.8.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '**6vq2nfe-!0r+wy$wgw9woam3#3$c!23ol-2+ax(l-=oeluhd'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'music',
'login',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'test_django.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test_django.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
# connect to sqlite3 database
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
### connect to postgres database
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'music',
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': 'localhost',
'PORT': '5432',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
MEDIA_URL = '/media/'
EMAIL_HOST = 'localhost'
EMAIL_PORT = 8000
| mit | 567,366,854,526,499,000 | 24.75 | 71 | 0.671197 | false | 3.452514 | false | false | false |
Stratoscale/pyracktest | py/strato/racktest/infra/rootfslabel.py | 1 | 1376 | from upseto import gitwrapper
from upseto import run
import subprocess
class RootfsLabel:
def __init__(self, rootfs, product="rootfs"):
self._rootfs = rootfs
self._product = product
if rootfs == "THIS":
self._label = run.run([
"solvent", "printlabel", "--thisProject", "--product=%s" % (self._product,)]).strip()
wrapper = gitwrapper.GitWrapper(".")
self._hint = wrapper.originURLBasename()
elif self._labelExists(self._rootfs):
self._label = self._rootfs
self._hint = self._rootfs
elif "__" in self._rootfs:
repository, product = self._rootfs.split("__")
self._label = run.run([
"solvent", "printlabel", "--repositoryBasename", repository, "--product", product]).strip()
self._hint = repository
else:
self._label = run.run([
"solvent", "printlabel", "--repositoryBasename", rootfs,
"--product=%s" % (self._product,)]).strip()
self._hint = rootfs
def label(self):
return self._label
def imageHint(self):
return self._hint
def _labelExists(self, label):
with open("/dev/null", "w") as out:
return subprocess.call(["solvent", "labelexists", "--label", label], stdout=out, stderr=out) == 0
| apache-2.0 | 4,555,124,989,347,845,000 | 36.189189 | 109 | 0.549419 | false | 4.058997 | false | false | false |
zouyapeng/horizon-newtouch | openstack_dashboard/dashboards/newtouch/server/forms.py | 1 | 2881 | from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard.dashboards.newtouch.models import Server,Service
def get_available_services():
services = Service.objects.all()
return ((service.name, service.name) for service in services)
class ServerEditServicesForm(forms.SelfHandlingForm):
services_available = forms.MultipleChoiceField(label=_('services_available'),
widget=forms.CheckboxSelectMultiple,
choices=get_available_services())
def __init__(self, request, *args, **kwargs):
super(ServerEditServicesForm, self).__init__(request, *args, **kwargs)
def handle(self, request, data):
try:
server = Server.objects.get(pk=self.initial['pk'])
server.services.clear()
print server.services.all()
for service in data['services_available']:
server.services.add(Service.objects.get(name=service).id)
server.save()
message = _('Successfully Add Services %s') % (self.initial['pk'])
messages.success(request, message)
except Exception:
exceptions.handle(request, _('Unable to Add Services.'))
return True
class EditServerForm(forms.SelfHandlingForm):
snmp_version = forms.CharField(label=_("SNMP Version"),
max_length=255)
snmp_commit = forms.CharField(label=_("SNMP Commit"),
max_length=255)
ssh_name = forms.CharField(label=_("SSH Name"),
max_length=255,
required=False)
ssh_key = forms.CharField(label=_("SSH Key"),
max_length=255,
required=False)
def __init__(self, request, *args, **kwargs):
super(EditServerForm, self).__init__(request, *args, **kwargs)
def handle(self, request, data):
pk = self.initial['pk']
snmp_version = data['snmp_version']
snmp_commit = data['snmp_commit']
ssh_name = data['ssh_name']
ssh_key = data['ssh_key']
try:
Server.objects.filter(pk=pk).update(snmp_version=snmp_version,
snmp_commit=snmp_commit,
ssh_name=ssh_name,
ssh_key=ssh_key)
server_name = Server.objects.get(pk = pk).name
message = _('Successfully update Server %s') % (server_name)
messages.success(request, message)
except Exception:
exceptions.handle(request, _('Unable to update the Server.'))
return True
| apache-2.0 | 5,850,093,691,570,547,000 | 38.465753 | 88 | 0.553974 | false | 4.761983 | false | false | false |
torchingloom/edx-platform | lms/djangoapps/branding/tests.py | 10 | 2744 | """
Tests for branding page
"""
import datetime
from pytz import UTC
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.test.utils import override_settings
from django.test.client import RequestFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.django import editable_modulestore
from xmodule.modulestore.tests.factories import CourseFactory
from courseware.tests.tests import TEST_DATA_MONGO_MODULESTORE
import student.views
FEATURES_WITH_STARTDATE = settings.FEATURES.copy()
FEATURES_WITH_STARTDATE['DISABLE_START_DATES'] = False
FEATURES_WO_STARTDATE = settings.FEATURES.copy()
FEATURES_WO_STARTDATE['DISABLE_START_DATES'] = True
@override_settings(MODULESTORE=TEST_DATA_MONGO_MODULESTORE)
class AnonymousIndexPageTest(ModuleStoreTestCase):
"""
Tests that anonymous users can access the '/' page, Need courses with start date
"""
def setUp(self):
self.store = editable_modulestore()
self.factory = RequestFactory()
self.course = CourseFactory.create()
self.course.days_early_for_beta = 5
self.course.enrollment_start = datetime.datetime.now(UTC) + datetime.timedelta(days=3)
self.store.update_item(self.course)
@override_settings(FEATURES=FEATURES_WITH_STARTDATE)
def test_none_user_index_access_with_startdate_fails(self):
"""
This is a regression test for a bug where the incoming user is
anonymous and start dates are being checked. It replaces a previous
test as it solves the issue in a different way
"""
request = self.factory.get('/')
request.user = AnonymousUser()
student.views.index(request)
@override_settings(FEATURES=FEATURES_WITH_STARTDATE)
def test_anon_user_with_startdate_index(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
@override_settings(FEATURES=FEATURES_WO_STARTDATE)
def test_anon_user_no_startdate_index(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
def test_allow_x_frame_options(self):
"""
Check the x-frame-option response header
"""
# check to see that the default setting is to ALLOW iframing
resp = self.client.get('/')
self.assertEquals(resp['X-Frame-Options'], 'ALLOW')
@override_settings(X_FRAME_OPTIONS='DENY')
def test_deny_x_frame_options(self):
"""
Check the x-frame-option response header
"""
# check to see that the override value is honored
resp = self.client.get('/')
self.assertEquals(resp['X-Frame-Options'], 'DENY')
| agpl-3.0 | -9,094,167,721,782,870,000 | 36.081081 | 94 | 0.701166 | false | 4.083333 | true | false | false |
tgbugs/pyontutils | nifstd/complete/nifga_deprecation.py | 1 | 29254 | #!/usr/bin/env python3
# this should be run at NIF-Ontology 9ef5b9e63d60f92cd01733be9d480ac3e5aee31c
# TODO need to retrieve the FMA hierarchy...
import os
from collections import defaultdict, namedtuple
import rdflib
from rdflib import URIRef, RDFS, RDF, OWL
from rdflib.namespace import SKOS
import requests
from pyontutils.scigraph import Vocabulary, Graph
from pyontutils.utils import TODAY, async_getter, TermColors as tc
from pyontutils.scig import scigPrint
from pyontutils.hierarchies import creatTree, flatten
from pyontutils.core import devconfig, OntMeta, makePrefixes, makeGraph
from pyontutils.core import NIFRID, oboInOwl
from IPython import embed
sgg = Graph(cache=True)
sgv = Vocabulary(cache=True)
Query = namedtuple('Query', ['root','relationshipType','direction','depth'])
CON = oboInOwl.consider
DBX = oboInOwl.hasDbXref # FIXME also behaves as objectProperty :/
AID = oboInOwl.hasAlternativeId
IRBC = NIFRID.isReplacedByClass
PREFIXES = makePrefixes('UBERON',
'ro',
'owl',
'skos',
)
NIFPREFIXES = makePrefixes('NIFGA',
'oboInOwl',
'replacedBy',
)
NIFPREFIXES.update(PREFIXES)
nifga_path = devconfig.ontology_local_repo + '/ttl/NIF-GrossAnatomy.ttl'
uberon_path = devconfig.ontology_local_repo + '/ttl/external/uberon.owl'
uberon_bridge_path = 'http://purl.obolibrary.org/obo/uberon/bridge/uberon-bridge-to-nifstd.owl'
#bridge_path = os.path.expanduser('~/git/NIF-Ontology/ttl/uberon-bridge-to-nifstd.ttl') # scigraph's got us
#uberon_obsolete = {'UBERON:0022988', # obsolete regional part of thalamaus
#'UBERON:0014606', # replaced by UBERON:0002434
#}
# TODO need to unpapck all the oboInOwl:hasAlternativeId entries for the purposes of resolution... (madness)
manual = {'NIFGA:nlx_144456':'UBERON:0034918', # prefer over UBERON:0002565, see note on UBERON:0034918
'NIFGA:birnlex_1248':'UBERON:0002434', # fix for what is surely and outdated bridge
'NIFGA:nlx_anat_20081242':'UBERON:0004073', # as of late latest version of uberon 'UBERON:0004073' replaces 'UBERON:0019281'
'NIFGA:nlx_59721':'UBERON:0001944', # (equivalentClass NIFGA:nlx_59721 NIFGA:birnlex_703) polutes
'NIFGA:birnlex_703':'UBERON:0001944', # insurance
#'NIFGA:birnlex_1663':'UBERON:0002265', # FIXME this is in hasDbXref ... AND equivalentClass... wat
'NIFGA:birnlex_1191':'UBERON:0001885', # this was already replaced by NIFGA:birnlex_1178, the existing equiv assertion to UBERON:0035560 is also obsolete, so we are overriding so we don't have to chase it all down again
'NIFGA:birnlex_2598':'UBERON:0000044', # UBERON:0026602 is the alternative and is a bug from the old version of the uberon to nif bridge :/ this has been fixed in the nifgad branch of the ontology but has not been propagated to scigraph
'NIFGA:nlx_anat_20090702':'UBERON:0022327', # UBERON:0032288 is an alternate id for UBERON:0022327
'NIFGA:birnlex_864':'UBERON:0014450', # UBERON:0002994 is an alternate id for UBERON:0014450
'NIFGA:birnlex_2524':'UBERON:0006725', # UBERON:0028186 is an alternate id for UBERON:0006725
'NIFGA:nlx_anat_20081245':'UBERON:0002613', # was previously deprecated without a replaced by
'NIFGA:birnlex_726':'UBERON:0001954', # 726 was already deped, this is a good match detect by moar
'NIFGA:nlx_anat_20081252':'UBERON:0014473', # already deprecated, found via moar
'NIFGA:nifext_15':'NOREP', # does not exist
'NIFGA:birnlex_9':'NOREP', # unused biopysical imateral entitiy
# affernt roles that were never well developed
'NIFGA:nlx_anat_1010':'NOREP',
'NIFGA:nlx_anat_1011003':'NOREP',
'NIFGA:nlx_anat_1011004':'NOREP',
'NIFGA:nlx_anat_1011005':'NOREP',
'NIFGA:nlx_anat_1011006':'NOREP',
'NIFGA:nlx_anat_1011007':'NOREP',
'NIFGA:nlx_anat_1011008':'NOREP',
'NIFGA:nlx_anat_1011009':'NOREP',
'NIFGA:nlx_anat_1011010':'NOREP',
'NIFGA:nlx_anat_1011011':'NOREP',
}
preflabs = ( # pulled from conflated
'NIFGA:birnlex_2596',
'NIFGA:birnlex_4101',
'NIFGA:birnlex_1184',
'NIFGA:birnlex_703',
'NIFGA:birnlex_1117',
'NIFGA:nlx_143552',
'NIFGA:birnlex_1341',
'NIFGA:birnlex_1335',
'NIFGA:birnlex_1400',
'NIFGA:birnlex_1519', # NOTE: nerve root and nerve fiber bundle are being conflated...
'NIFGA:birnlex_1277',
'NIFGA:birnlex_2523',
'NIFGA:birnlex_2528', # a real exact duple with 2529 apparently
'NIFGA:birnlex_2651', # a real exact duple with 2654 apparently
'NIFGA:nlx_anat_20081224', # other option is 'NIFGA:birnlex_932' -> Lingula
'NIFGA:nlx_anat_20081235', # other option is 'NIFGA:birnlex_1165' -> Nodulus
'NIFGA:birnlex_1588',
'NIFGA:birnlex_1106',
'NIFGA:birnlex_1582',
'NIFGA:birnlex_1589',
'NIFGA:birnlex_1414',
'NIFGA:birnlex_4081',
)
cross_over_issues = 'NIFSUB:nlx_subcell_100205'
wat = 'NIFGA:nlx_144456'
anns_to_port = [] # (SKOS.prefLabel, ) # skipping this for now :/
def invert(dict_):
output = defaultdict(list)
for k,v in dict_.items():
output[v].append(k)
return dict(output)
def review_reps(dict_):
for k,v in invert(dict_).items():
if k is None:
continue
if len(v) > 1:
kn = sgv.findById(k)
print(k, kn['labels'][0])
for s in kn['synonyms']:
print(' ' * 4, s)
for v_ in v:
n = sgv.findById(v_)
print(' ' * 8, v_, n['labels'][0])
for s in n['synonyms']:
print(' ' * 12, s)
def review_norep(list_):
print('List of norep (aka already deprecated) to review')
for curie in list_:
n = sgg.getNode(curie)
scigPrint.pprint_node(n)
def do_deprecation(replaced_by, g, additional_edges, conflated):
bmeta = OntMeta('http://ontology.neuinfo.org/NIF/ttl/bridge/',
'uberon-bridge',
'NIFSTD Uberon Bridge',
'UBERON Bridge',
('This is the bridge file that holds local NIFSTD additions to uberon. '
'This is also staging for any changes that we want to push upstream.'),
TODAY())
ontid = bmeta.path + bmeta.filename + '.ttl'
bridge = makeGraph('uberon-bridge', PREFIXES)
bridge.add_ont(ontid, *bmeta[2:])
graph = makeGraph('NIF-GrossAnatomy', NIFPREFIXES, graph=g)
#graph.g.namespace_manager._NamespaceManager__cache = {}
#g.namespace_manager.bind('UBERON','http://purl.obolibrary.org/obo/UBERON_') # this has to go in again because we reset g FIXME
udone = set('NOREP')
uedges = defaultdict(lambda:defaultdict(set))
def inner(nifga, uberon):
# check neuronames id TODO
udepr = sgv.findById(uberon)['deprecated'] if uberon != 'NOREP' else False
if udepr:
# add xref to the now deprecated uberon term
graph.add_trip(nifga, 'oboInOwl:hasDbXref', uberon)
#print('Replacement is deprecated, not replacing:', uberon)
graph.add_trip(nifga, RDFS.comment, 'xref %s is deprecated, so not using replacedBy:' % uberon)
else:
# add replaced by -> uberon
graph.add_trip(nifga, 'replacedBy:', uberon)
# add deprecated true (ok to do twice...)
graph.add_trip(nifga, OWL.deprecated, True)
# review nifga relations, specifically has_proper_part, proper_part_of
# put those relations on the uberon term in the
# if there is no uberon term raise an error so we can look into it
#if uberon not in uedges:
#uedges[uberon] = defaultdict(set)
resp = sgg.getNeighbors(nifga)
edges = resp['edges']
if nifga in additional_edges:
edges.append(additional_edges[nifga])
include = False # set this to True when running anns
for edge in edges: # FIXME TODO hierarchy extraction and porting
#print(edge)
if udepr: # skip everything if uberon is deprecated
include = False
hier = False
break
sub = edge['sub']
obj = edge['obj']
pred = edge['pred']
hier = False
if pred == 'subClassOf':
pred = RDFS.subClassOf
continue
elif pred == 'equivalentClass':
pred = OWL.equivalentClass
continue
elif pred == 'isDefinedBy':
pred = RDFS.isDefinedBy
continue
elif pred == 'http://www.obofoundry.org/ro/ro.owl#has_proper_part':
hier = True
include = True
elif pred == 'http://www.obofoundry.org/ro/ro.owl#proper_part_of':
hier = True
include = True
elif pred == 'ilx:partOf':
hier = True
include = True
if sub == nifga:
try:
obj = replaced_by[obj]
if obj == 'NOREP':
hier = False
except KeyError:
print('not in replaced_by', obj)
if type(obj) == tuple: continue # TODO
if hier:
if uberon not in uedges[obj][pred]:
uedges[obj][pred].add(uberon)
bridge.add_hierarchy(obj, pred, uberon)
else:
#bridge.add_trip(uberon, pred, obj)
pass
elif obj == nifga:
try:
sub = replaced_by[sub]
if sub == 'NOREP':
hier = False
except KeyError:
print('not in replaced_by', sub)
if type(sub) == tuple: continue # TODO
if hier:
if sub not in uedges[uberon][pred]:
uedges[uberon][pred].add(sub)
bridge.add_hierarchy(uberon, pred, sub)
else:
#bridge.add_trip(sub, pred, uberon)
pass
if False and uberon not in udone and include: # skip porting annotations and labels for now
#udone.add(uberon)
try:
label = sgv.findById(uberon)['labels'][0]
except IndexError:
WAT = sgv.findById(uberon)
embed()
bridge.add_class(uberon, label=label)
# annotations to port
for p in anns_to_port:
os_ = list(graph.g.objects(graph.expand(nifga), p))
for o in os_:
if label.lower() != o.lower(): # we can simply capitalize labels
print(label.lower())
print(o.lower())
print()
bridge.add_trip(uberon, p, o)
if p == SKOS.prefLabel and not os_:
if uberon not in conflated or (uberon in conflated and nifga in preflabs):
l = list(graph.g.objects(graph.expand(nifga), RDFS.label))[0]
bridge.add_trip(uberon, SKOS.prefLabel, l) # port label to prefLabel if no prefLabel
for nifga, uberon in replaced_by.items():
if type(uberon) == tuple:
print(uberon)
for ub in uberon:
print(ub)
inner(nifga, ub)
elif uberon == 'NOREP':
graph.add_trip(nifga, OWL.deprecated, True) # TODO check for missing edges?
elif uberon is None:
continue # BUT TODAY IS NOT THAT DAY!
else:
inner(nifga, uberon)
return graph, bridge, uedges
def print_report(report, fetch=False):
for eid, r in report.items():
out = ('**************** Report for {} ****************'
'\n\tNRID: {NRID}\n\tURID: {URID} {UDEP}\n\tMATCH: {MATCH}\n')
#if not r['MATCH']:
print(out.format(eid, **r))
if fetch:
scigPrint.pprint_node(sgg.getNode('NIFGA:' + eid))
if r['NRID']: scigPrint.pprint_node(sgg.getNode(r['NRID']))
if r['URID']: scigPrint.pprint_node(sgg.getNode(r['URID']))
def print_trees(graph, bridge):
PPO = 'ro:proper_part_of'
HPP = 'ro:has_proper_part'
hpp = HPP.replace('ro:', graph.namespaces['ro'])
ppo = PPO.replace('ro:', graph.namespaces['ro'])
a, b = creatTree(*Query(tc.red('birnlex_796'), HPP, 'OUTGOING', 10), # FIXME seems to be a last one wins bug here with birnlex_796 vs NIFGA:birnlex_796 depending on the has seed...
json=graph.make_scigraph_json(HPP))
c, d = creatTree(*Query('NIFGA:birnlex_796', hpp, 'OUTGOING', 10), graph=sgg)
j = bridge.make_scigraph_json(HPP) # issue https://github.com/RDFLib/rdflib/pull/661
e, f = creatTree(*Query('UBERON:0000955', HPP, 'OUTGOING', 10), json=j)
k_, l_ = creatTree(*Query('NIFGA:nlx_anat_101177', ppo, 'INCOMING', 10), graph=sgg)
merge = dict(d[-1]) # full tree with ppo converted to hpp
merge['nodes'].extend(l_[-1]['nodes'])
merge['edges'].extend([{'sub':e['obj'], 'pred':hpp, 'obj':e['sub']} for e in l_[-1]['edges']])
m_, n_ = creatTree(*Query('NIFGA:birnlex_796', hpp, 'OUTGOING', 10), json=merge)
print('nifga dep')
print(a)
print('nifga live')
print(c)
print('new bridge')
print(e)
print('nifga total (both directions)')
print(m_)
print('nifga white matter')
print(k_)
return a, b, c, d, e, f, k_, l_, m_, n_
def new_replaced_by(ids, existing):
out = {}
for k in ids:
if k in existing:
out[k] = existing[k]
else:
out[k] = None
return out
def make_uberon_graph():
#ub = rdflib.Graph()
#ub.parse(uberon_path) # LOL rdflib your parser is slow
SANITY = rdflib.Graph()
ont = requests.get(uberon_bridge_path).text
split_on = 263
prefs = ('xmlns:NIFSTD="http://uri.neuinfo.org/nif/nifstd/"\n'
'xmlns:UBERON="http://purl.obolibrary.org/obo/UBERON_"\n')
ont = ont[:split_on] + prefs + ont[split_on:]
SANITY.parse(data=ont)
u_replaced_by = {}
for s, o in SANITY.subject_objects(OWL.equivalentClass):
nif = SANITY.namespace_manager.qname(o)
uberon = SANITY.namespace_manager.qname(s)
if nif in u_replaced_by:
one = u_replaced_by[nif]
u_replaced_by[nif] = one, uberon
print('WE GOT DUPES', nif, one, uberon) # TODO
u_replaced_by[nif] = uberon
#print(s, o)
#print(nif, uberon)
return u_replaced_by
def make_neurolex_graph():
# neurolex test stuff
nlxpref = {'ilx':'http://uri.interlex.org/base/'}
nlxpref.update(NIFPREFIXES)
neurolex = makeGraph('neurolex-temp', nlxpref)
neurolex.g.parse('/tmp/neurolex_basic.ttl', format='turtle')
ILXPO = 'ilx:partOf'
nj = neurolex.make_scigraph_json(ILXPO)
g_, h = creatTree(*Query('NIFGA:birnlex_796', ILXPO, 'INCOMING', 10), json=nj)
i_, j_ = creatTree(*Query('NIFGA:nlx_412', ILXPO, 'INCOMING', 10), json=nj)
brht = sorted(set(flatten(h[0],[])))
wmht = sorted(set(flatten(j_[0],[])))
ufixedrb = {'NIFGA:' + k.split(':')[1]:v for k, v in u_replaced_by.items()}
b_nlx_replaced_by = new_replaced_by(brht, ufixedrb)
w_nlx_replaced_by = new_replaced_by(wmht, ufixedrb)
additional_edges = defaultdict(list) # TODO this could be fun for the future but is a nightmare atm
for edge in h[-1]['edges'] + j_[-1]['edges']:
additional_edges[edge['sub']] = edge
additional_edges[edge['obj']] = edge
#filter out bad edges becase we are lazy
additional_edges = {k:v for k, v in additional_edges.items()
if k in b_nlx_replaced_by or k in w_nlx_replaced_by}
print('neurolex tree') # computed above
print(g_)
print(i_)
return additional_edges
def do_report(nif_bridge, ub_bridge, irbcs):
report = {}
for existing_id, nif_uberon_id in nif_bridge.items():
cr = {}
cr['UDEP'] = ''
if nif_uberon_id == 'NOREP':
cr['NRID'] = ''
else:
cr['NRID'] = nif_uberon_id
if 'NIFGA:' + existing_id in manual:
cr['URID'] = ''
if nif_uberon_id == 'NOREP':
match = False
else:
match = 'MANUAL'
elif existing_id in ub_bridge:
ub_uberon_id = ub_bridge[existing_id]
cr['URID'] = ub_uberon_id
if type(nif_uberon_id) == tuple:
if ub_uberon_id in nif_uberon_id:
match = True
else:
match = False
elif ub_uberon_id != nif_uberon_id:
match = False
else:
match = True
elif 'NIFGA:' + existing_id in irbcs:
er, ub = irbcs['NIFGA:' + existing_id]
cr['NRID'] = er
cr['URID'] = ub
match = 'EXISTING REPLACED BY (%s -> %s -> %s)' % (existing_id, er, ub)
else:
match = False
cr['URID'] = ''
if cr['NRID']:
meta = sgg.getNode(nif_uberon_id)['nodes'][0]['meta']
if 'http://www.w3.org/2002/07/owl#deprecated' in meta and meta['http://www.w3.org/2002/07/owl#deprecated']:
cr['UDEP'] = 'Deprecated'
cr['MATCH'] = match
report[existing_id] = cr
return report
def make_nifga_graph(_doprint=False):
# use equivalent class mappings to build a replacement mapping
g = rdflib.Graph()
g.parse(nifga_path, format='turtle')
getQname = g.namespace_manager.qname
classes = sorted([getQname(_) for _ in g.subjects(RDF.type, OWL.Class) if type(_) is URIRef])
curies = ['NIFGA:' + n for n in classes if ':' not in n]
matches = async_getter(sgv.findById, [(c,) for c in curies])
replaced_by = {}
exact = {}
internal_equivs = {}
irbcs = {}
def equiv(curie, label):
if curie in manual:
replaced_by[curie] = manual[curie]
return manual[curie]
ec = sgg.getNeighbors(curie, relationshipType='equivalentClass')
nodes = [n for n in ec['nodes'] if n['id'] != curie]
if len(nodes) > 1:
#print('wtf node', [n['id'] for n in nodes], curie)
for node in nodes:
id_ = node['id']
label_ = node['lbl']
if id_.startswith('UBERON'):
if curie in replaced_by:
one = replaced_by[curie]
replaced_by[curie] = one, id_
print('WE GOT DUPES', curie, label, one, id_) # TODO
else:
replaced_by[curie] = id_
else:
internal_equivs[curie] = id_
elif not nodes:
node = sgg.getNode(curie)['nodes'][0]
if OWL.deprecated.toPython() in node['meta']:
print('THIS CLASS IS DEPRECATED', curie)
lbl = node['lbl']
if lbl.startswith('Predominantly white regional') or lbl.startswith('Predominantly gray regional'):
print('\tHE\'S DEAD JIM!', lbl, node['id'])
replaced_by[curie] = 'NOREP'
if IRBC in node['meta']:
existing_replaced = node['meta'][IRBC][0]
ec2 = sgg.getNeighbors(existing_replaced, relationshipType='equivalentClass')
print('\tFOUND ONE', existing_replaced)
#scigPrint.pprint_node(sgg.getNode(existing_replaced))
if ec2['edges']: # pass the buck if we can
print('\t',end='')
scigPrint.pprint_edge(ec2['edges'][0])
rb = ec2['edges'][0]['obj']
print('\tPASSING BUCK : (%s -> %s -> %s)' % (curie, existing_replaced, rb))
irbcs[curie] = (existing_replaced, rb)
replaced_by[curie] = rb
return nodes
else:
er_node = sgv.findById(existing_replaced)
if not er_node['deprecated']:
if not er_node['curie'].startswith('NIFGA:'):
print('\tPASSING BUCK : (%s -> %s)' % (curie, er_node['curie']))
return nodes
print('\tERROR: could not pass buck, we are at a dead end at', er_node) # TODO
print()
moar = [t for t in sgv.findByTerm(label) if t['curie'].startswith('UBERON')]
if moar:
#print(moar)
#replaced_by[curie] = moar[0]['curie']
if len(moar) > 1:
print('WARNING', curie, label, [(m['curie'], m['labels'][0]) for m in moar])
for node in moar:
#if node['curie'] in uberon_obsolete: # node['deprecated']?
#continue
ns = sgg.getNode(node['curie'])
assert len(ns['nodes']) == 1, "WTF IS GOING ON %s" % node['curie']
ns = ns['nodes'][0]
if _doprint:
print('Found putative replacement in moar: (%s -> %s)' % (curie, ns['id']))
if DBX in ns['meta']:
print(' ' * 8, node['curie'], ns['meta'][DBX],
node['labels'][0], node['synonyms'])
if AID in ns['meta']:
print(' ' * 8, node['curie'], ns['meta'][AID],
node['labels'][0], node['synonyms'])
if CON in ns['meta']:
print(' ' * 8, node['curie'], ns['meta'][CON],
node['labels'][0], node['synonyms'])
replaced_by[curie] = ns['id']
else:
replaced_by[curie] = None
if False: # review
print('NO FORWARD EQUIV', tc.red(curie), label) # TODO
for k,v in sorted(sgg.getNode(curie)['nodes'][0]['meta'].items()):
if type(v) == iter:
print(' ' * 4, k)
for _ in v:
print(' ' * 8, _)
else:
print(' ' * 4, k, v)
else:
node = nodes[0]
replaced_by[curie] = node['id']
exact[curie] = node['id']
return nodes
equivs = [equiv(c['curie'], c['labels'][0]) for c in matches] # async causes print issues :/
return g, matches, exact, internal_equivs, irbcs, replaced_by
def main():
u_replaced_by = make_uberon_graph()
additional_edges = make_uberon_graph()
g, matches, exact, internal_equivs, irbcs, replaced_by = make_nifga_graph()
#review_norep([m['curie'] for m in matches if m['deprecated']])
#review_reps(exact) # these all look good
#review_reps(replaced_by) # as do these
#rpob = [_['id'] for _ in sgg.getNeighbors('NIFGA:birnlex_1167', relationshipType='subClassOf')['nodes'] if 'UBERON:' not in _['id']] # these hit pretty much everything because of how the subclassing worked out, so can't use this
regional_no_replace = {k:v for k,v in replaced_by.items() if not v and sgv.findById(k)['labels'][0].startswith('Regional')}
for k in regional_no_replace:
replaced_by[k] = 'NOREP' # yes, deprecate these
#or sgv.findById(k)['labels'][0].startswith('Predominantly white regional')
#or sgv.findById(k)['labels'][0].startswith('Predominantly gray regional')
# TODO predominately gray region -> just deprecate completely these cause pretty much all of the no_match problems
# predominantly white regional part
# TODO add comments in do_deprecation
asdf = {}
for n, u in replaced_by.items():
if u in asdf:
asdf[u].add(n)
else:
asdf[u] = {n}
deprecated = [_ for _ in replaced_by if sgv.findById(_)['deprecated']]
multi = {k:v for k, v in asdf.items() if len(v) > 1}
conflated = {k:[_ for _ in v if _ not in deprecated] for k, v in multi.items() if len([_ for _ in v if _ not in deprecated]) > 1 and k != 'NOREP'}
#_ = [print(k, sgv.findById(k)['labels'][0], '\n\t', [(_, sgv.findById(_)['labels'][0]) for _ in v]) for k, v in sorted(conflated.items())]
graph, bridge, uedges = do_deprecation(replaced_by, g, {}, conflated) # additional_edges) # TODO
bridge.write()
graph.write()
#trees = print_trees(graph, bridge)
# we do this because each of these have different prefixes :(
nif_bridge = {k.split(':')[1]:v for k, v in replaced_by.items()} # some are still None
ub_bridge = {k.split(':')[1]:v for k, v in u_replaced_by.items()}
report = do_report(nif_bridge, ub_bridge, irbcs)
double_checked = {i:r for i, r in report.items() if r['MATCH']} # aka exact from above
dc_erb = {k:v for k, v in double_checked.items() if v['NRID'] != v['URID']}
no_match = {i:r for i, r in report.items() if not r['MATCH']}
no_match_udep = {i:r for i, r in no_match.items() if r['UDEP']}
no_match_not_udep = {i:r for i, r in no_match.items() if not r['UDEP']}
no_match_not_udep_region = {i:r for i, r in no_match.items()
if not r['UDEP'] and (
sgv.findById('NIFGA:' + i)['labels'][0].startswith('Regional') or
sgv.findById('NIFGA:' + i)['labels'][0].startswith('Predominantly gray regional') or
sgv.findById('NIFGA:' + i)['labels'][0].startswith('Predominantly white regional')
)}
no_match_not_udep_not_region = {i:r for i, r in no_match.items()
if not r['UDEP'] and (
not sgv.findById('NIFGA:' + i)['labels'][0].startswith('Regional') and
not sgv.findById('NIFGA:' + i)['labels'][0].startswith('Predominantly gray regional') and
not sgv.findById('NIFGA:' + i)['labels'][0].startswith('Predominantly white regional')
)}
no_replacement = {i:r for i, r in report.items() if not r['NRID']}
very_bad = {i:r for i, r in report.items() if not r['MATCH'] and r['URID'] and not r['UDEP']}
fetch = True
#print('\n>>>>>>>>>>>>>>>>>>>>>> No match uberon dep reports\n')
#print_report(no_match_udep, fetch) # These are all dealt with correctly in do_deprecation
print('\n>>>>>>>>>>>>>>>>>>>>>> Existing Replaced by\n')
#print_report(dc_erb)
#print('\n>>>>>>>>>>>>>>>>>>>>>> No match not dep reports\n')
#print_report(no_match_not_udep, fetch)
print('\n>>>>>>>>>>>>>>>>>>>>>> No match not dep +region reports\n')
#print_report(no_match_not_udep_region, fetch)
print('\n>>>>>>>>>>>>>>>>>>>>>> No match not dep -region reports\n')
#print_report(no_match_not_udep_not_region, fetch)
print('\n>>>>>>>>>>>>>>>>>>>>>> No replace reports\n')
#print_report(no_replacement, fetch)
print('\n>>>>>>>>>>>>>>>>>>>>>> No match and not deprecated reports\n')
#print_report(very_bad, fetch)
print('Total count', len(nif_bridge))
print('Match count', len(double_checked))
print('No Match count', len(no_match))
print('No Match +udep count', len(no_match_udep))
print('No Match -udep count', len(no_match_not_udep))
print('No Match -udep +region count', len(no_match_not_udep_region))
print('No Match -udep -region count', len(no_match_not_udep_not_region))
print('No replace count', len(no_replacement)) # there are none with a URID and no NRID
print('No match not deprecated count', len(very_bad))
print('Mismatch between No match and No replace', set(no_match_not_udep) ^ set(no_replacement))
assert len(nif_bridge) == len(double_checked) + len(no_match)
assert len(no_match) == len(no_match_udep) + len(no_match_not_udep)
assert len(no_match_not_udep) == len(no_match_not_udep_region) + len(no_match_not_udep_not_region)
#[scigPrint.pprint_node(sgg.getNode('NIFGA:' + _)) for _ in no_match_not_udep_not_region]
#print('>>>>>>>>>>>>> Deprecated')
#[scigPrint.pprint_node(sgg.getNode('NIFGA:' + _))
#for _ in no_match_not_udep_not_region if sgv.findById('NIFGA:' + _)['deprecated']]
#print('>>>>>>>>>>>>> Not deprecated')
#[scigPrint.pprint_node(sgg.getNode('NIFGA:' + _))
#for _ in sorted(no_match_not_udep_not_region) if not sgv.findById('NIFGA:' + _)['deprecated']]
embed()
if __name__ == '__main__':
main()
| mit | -2,009,078,740,112,054,500 | 42.275148 | 247 | 0.549121 | false | 3.272992 | false | false | false |
rspavel/spack | var/spack/repos/builtin/packages/r-interactivedisplaybase/package.py | 5 | 1208 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RInteractivedisplaybase(RPackage):
"""Base package for enabling powerful shiny web displays of Bioconductor
objects.
The interactiveDisplayBase package contains the the basic methods needed
to generate interactive Shiny based display methods for Bioconductor
objects."""
homepage = "https://bioconductor.org/packages/interactiveDisplayBase"
git = "https://git.bioconductor.org/packages/interactiveDisplayBase.git"
version('1.22.0', commit='4ce3cde1dabc01375c153ad614d77a5e28b96916')
version('1.20.0', commit='f40912c8af7afbaaf68c003a6e148d81cbe84df6')
version('1.18.0', commit='d07ea72a595877f27bf054f664f23e8f0304def8')
version('1.16.0', commit='a86aa586b589497f5449d36c2ce67a6b6055026d')
version('1.14.0', commit='e2ccc7eefdd904e3b1032dc6b3f4a28d08c1cd40')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-shiny', type=('build', 'run'))
| lgpl-2.1 | -8,164,230,157,478,357,000 | 42.142857 | 81 | 0.737583 | false | 3.089514 | false | false | false |
ramses-tech/nefertari | nefertari/__init__.py | 3 | 1851 | import logging
from pkg_resources import get_distribution
APP_NAME = __package__.split('.')[0]
_DIST = get_distribution(APP_NAME)
PROJECTDIR = _DIST.location
__version__ = _DIST.version
log = logging.getLogger(__name__)
RESERVED_PARAMS = [
'_start',
'_limit',
'_page',
'_fields',
'_count',
'_sort',
'_search_fields',
'_refresh_index',
]
def includeme(config):
from nefertari.resource import get_root_resource, get_resource_map
from nefertari.renderers import (
JsonRendererFactory, NefertariJsonRendererFactory)
from nefertari.utils import dictset
from nefertari.events import (
ModelClassIs, FieldIsChanged, subscribe_to_events,
add_field_processors)
log.info("%s %s" % (APP_NAME, __version__))
config.add_directive('get_root_resource', get_root_resource)
config.add_directive('subscribe_to_events', subscribe_to_events)
config.add_directive('add_field_processors', add_field_processors)
config.add_renderer('json', JsonRendererFactory)
config.add_renderer('nefertari_json', NefertariJsonRendererFactory)
if not hasattr(config.registry, '_root_resources'):
config.registry._root_resources = {}
if not hasattr(config.registry, '_resources_map'):
config.registry._resources_map = {}
# Map of {ModelName: model_collection_resource}
if not hasattr(config.registry, '_model_collections'):
config.registry._model_collections = {}
config.add_request_method(get_resource_map, 'resource_map', reify=True)
config.add_tween('nefertari.tweens.cache_control')
config.add_subscriber_predicate('model', ModelClassIs)
config.add_subscriber_predicate('field', FieldIsChanged)
Settings = dictset(config.registry.settings)
root = config.get_root_resource()
root.auth = Settings.asbool('auth')
| apache-2.0 | -1,996,295,607,098,615,800 | 31.473684 | 75 | 0.693139 | false | 3.615234 | true | false | false |
dvitme/odoo-addons | partner_user/__openerp__.py | 7 | 1783 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'author': 'ADHOC SA',
'auto_install': False,
'installable': True,
'category': 'Tools',
'demo_xml': [
],
'depends': [
'base',
'mail'
],
'description': """
Partners User
=============
Add partner user related fields on partner and add them in partner view. Also adds an action that allow quick creation of user.
For using the quick creation you must set a "template user" for the partner, you can do it by context or making this field visible.
""",
'license': 'AGPL-3',
'name': u'Partner User',
'test': [],
'data': [
'partner_view.xml',
'security/ir.model.access.csv',
],
'version': '8.0.1.1.0',
'website': 'www.adhoc.com.ar',
'application': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 5,099,512,440,611,145,000 | 34.66 | 132 | 0.590017 | false | 3.962222 | false | false | false |
foospidy/DbDat | plugins/oracle/check_privilege_revoke_all_context.py | 1 | 1242 | class check_privilege_revoke_all_context():
"""
check_privilege_revoke_all_context
Ensure 'ALL' Is Revoked from Unauthorized 'GRANTEE' on CONTEXT$
The CONTEXT$ table contains columns for the schema and name for a PL/SQL
package that will execute for a given application context.
"""
# References:
# http://www.davidlitchfield.com/AddendumtotheOracle12cCISGuidelines.pdf
# http://www.davidlitchfield.com/oracle_backdoors.pdf
TITLE = 'Revoke ALL from CONTEXT$'
CATEGORY = 'Privilege'
TYPE = 'sql'
SQL = "SELECT GRANTEE, PRIVILEGE FROM DBA_TAB_PRIVS WHERE TABLE_NAME = 'CONTEXT$'"
verbose = False
skip = False
result = {}
def do_check(self, *results):
self.result['level'] = 'GREEN'
output = ''
for rows in results:
for row in rows:
self.result['level'] = 'RED'
output += row[0] + ' with ' + row[1] + 'on CONTEXT$\n'
if 'GREEN' == self.result['level']:
output = 'No user with grants to CONTEXT$.'
self.result['output'] = output
return self.result
def __init__(self, parent):
print('Performing check: ' + self.TITLE)
| gpl-2.0 | -2,970,980,244,086,498,000 | 31.684211 | 91 | 0.590177 | false | 3.663717 | false | false | false |
markus61/selfstoredict | selfstoredict.py | 1 | 6408 | """SelfStoreDict for Python.
Author: markus schulte <[email protected]>
The module provides a subclassed dictionary that saves itself to a JSON file or redis-key whenever changed or when used
within a context.
"""
import json
from os.path import getmtime
from datetime import datetime, timedelta
from pathlib import Path
def adapt(parent, elem=None):
"""
called whenever a dict or list is added. needed in order to let SelfStoreDict know about changes happening to its
childs.
:param parent: the parent object of the to be constructed one. parent should always be off type SelfStorageDict and
should always be the root object.
:param elem: the element added to SelfStoreDict or it's childs
:return: the elem, converted to a subclass of dict or list that notifies it's parent
"""
if isinstance(elem, list):
return ChildList(parent, elem)
if isinstance(elem, dict):
return ChildDict(parent, elem)
return elem
class ChildList(list):
"""
a subclass of list that notifies self.parent about any change to its members
"""
def __init__(self, parent, li=None):
super(ChildList, self).__init__()
if li is None:
li = list()
self.parent = parent
for v in li:
self.append(v)
if not li:
self.parent.save()
def append(self, v):
v = adapt(self.parent, v)
super(ChildList, self).append(v)
self.parent.save()
def extend(self, v):
v = adapt(self.parent, v)
super(ChildList, self).extend(v)
self.parent.save()
def insert(self, i, v):
v = adapt(self.parent, v)
super(ChildList, self).insert(i, v)
self.parent.save()
def remove(self, v):
v = adapt(self.parent, v)
super(ChildList, self).remove(v)
self.parent.save()
def pop(self, i=None):
r = super(ChildList, self).pop(i)
self.parent.save()
return r
def clear(self):
super(ChildList, self).clear()
self.parent.save()
def __setitem__(self, k, v):
v = adapt(self.parent, v)
super(ChildList, self).__setitem__(k, v)
self.parent.save()
class ChildDict(dict):
"""
a subclass of dict that notifies self.parent about any change to its members
"""
def __init__(self, parent, d=None):
super(ChildDict, self).__init__()
if d is None:
d = dict()
self.parent = parent
for k, v in d.items():
self[k] = v
if d != {}:
self.parent.save()
def __setitem__(self, k, v):
v = adapt(self.parent, v)
super(ChildDict, self).__setitem__(k, v)
self.parent.save()
def __delitem__(self, k):
super(ChildDict, self).__delitem__(k)
self.parent.save()
def setdefault(self, k, v=None):
v = adapt(self.parent, v)
v = super(ChildDict, self).setdefault(k, v)
self.parent.save()
return v
def clear(self):
super(ChildDict, self).clear()
self.parent.save()
class FileContainer(object):
def __init__(self, path):
self.path = path
def save(self, data):
with open(self.path, "w") as fp:
json.dump(data.copy(), fp)
def load(self):
try:
with open(self.path) as fp:
for k, v in json.load(fp).items():
yield [k, v]
except FileNotFoundError:
raise FileNotFoundError
def touch(self):
Path(self.path).touch()
@property
def modified(self):
return int(getmtime(self.path))
class RedisContainer(object):
def __init__(self, key, redis):
self.key = key
self.redis = redis
self.f = 9223370527000000
def save(self, data):
self.redis.set(self.key, json.dumps(data.copy()))
self.redis.expire(self.key, self.f)
def load(self):
data = self.redis.get(self.key)
try:
jdata = json.loads(data)
except TypeError:
return
try:
for k, v in jdata.items():
yield [k, v]
except FileNotFoundError:
raise FileNotFoundError
def touch(self):
self.redis.expire(self.key, self.f)
@property
def modified(self):
ttl = self.redis.ttl(self.key)
if ttl is None:
return
delta = timedelta(seconds=self.f - ttl)
return int((datetime.now() - delta).timestamp())
class SelfStoreDict(ChildDict):
"""
This class acts like a dict but constructs all attributes from JSON. please note: it is a subclass of 'ChildDict'
but always the parent.
call the constructor with a path or a redis connection
you may add an optional initial value as a dict
"""
def __init__(self, path, data=None, redis=None):
self._saves_ = 0
self._context_ = False
self._inactive_ = True
self.parent = self
# check if there is a redis object
if redis is not None:
self.sc = RedisContainer(path, redis=redis)
else:
self.sc = FileContainer(path)
self._path_ = path
super(SelfStoreDict, self).__init__(self, data)
if data is not None:
self._inactive_ = False
self.save()
else:
self._load()
self._inactive_ = False
def _inc_saves(self):
self._saves_ += 1
def _savenow(self):
if self._inactive_:
return False
if self._context_:
return False
return True
def save(self):
if self._savenow():
self.sc.save(self.copy())
self._inc_saves()
return
@property
def saves(self):
return self._saves_
@property
def modified(self):
return self.sc.modified
def touch(self):
self.sc.touch()
def __enter__(self):
self._context_ = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._context_ = False
self._inactive_ = False
self.save()
def _load(self):
"""
called by '@path.setter' to load dict.
:return: None
"""
try:
for k, v in self.sc.load():
self[k] = v
except FileNotFoundError:
pass
| mit | 5,304,001,048,038,711,000 | 25.37037 | 119 | 0.560705 | false | 3.895441 | false | false | false |
jdeligt/Genetics | add_metadata_to_delly_manta_vcf.py | 3 | 1779 | #!/usr/bin/env python
"""
add_metadata_to_delly_manta_vcf.py
##INFO=<ID=SOMATIC is automatically added after i) the delly somatic filtering step ii) manta somatic calling
"""
import sys
import argparse
import re
import vcf
def add_meta2vcf(vcf_file):
try:
f = open(vcf_file, 'r')
except IOError:
sys.exit("Error: Can't open vcf file: {0}".format(vcf_file))
else:
with f:
vcf_header = []
vcf_chrom = []
vcf_variants = []
countline = 0
version = ""
for line in f:
countline = countline + 1
line = line.strip('\n')
if line.startswith('##'):
##Print original vcf meta-information lines##
vcf_header.append(line)
if line.startswith('##cmdline'):
find_manta = re.findall('(manta_\w+\.\w+\.\w+)', line)
version = find_manta[0]
## print header lines and Add meta-information lines with melter info to vcf
elif line.startswith("#CHROM"):
vcf_chrom.append(line)
countline = 0
else:
variant = line.split('\t')
if countline == 1:
find_delly = re.findall('(EMBL.DELLYv\w+\.\w+.\w+)', variant[7])
if not version:
version = find_delly[0]
if not "DELLY" in variant[7]:
variant[7] = variant[7]+";SVMETHOD={0}".format(find_manta[0])
vcf_variants.append("\t".join(variant))
print "\n".join(vcf_header)
print "##INFO=<ID=caller={0}".format(version)
print "\n".join(vcf_chrom)
print "\n".join(vcf_variants)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'add metadata to a SV VCF file')
required_named = parser.add_argument_group('required named arguments')
required_named.add_argument('-v', '--vcf_file', help='path/to/file.vcf', required=True)
args = parser.parse_args()
add_meta2vcf(args.vcf_file)
| mit | -1,473,262,784,085,636,000 | 27.238095 | 110 | 0.631254 | false | 2.96995 | false | false | false |
mvaled/sentry | src/sentry/api/issue_search.py | 2 | 4681 | from __future__ import absolute_import
from django.utils.functional import cached_property
from parsimonious.exceptions import IncompleteParseError
from sentry.api.event_search import (
event_search_grammar,
InvalidSearchQuery,
SearchFilter,
SearchKey,
SearchValue,
SearchVisitor,
)
from sentry.constants import STATUS_CHOICES
from sentry.search.utils import (
parse_actor_value,
parse_user_value,
parse_release,
parse_status_value,
)
class IssueSearchVisitor(SearchVisitor):
key_mappings = {
"assigned_to": ["assigned"],
"bookmarked_by": ["bookmarks"],
"subscribed_by": ["subscribed"],
"first_release": ["first-release", "firstRelease"],
"first_seen": ["age", "firstSeen"],
"last_seen": ["lastSeen"],
"active_at": ["activeSince"],
# TODO: Special case this in the backends, since they currently rely
# on date_from and date_to explicitly
"date": ["event.timestamp"],
"times_seen": ["timesSeen"],
"sentry:dist": ["dist"],
}
numeric_keys = SearchVisitor.numeric_keys.union(["times_seen"])
date_keys = SearchVisitor.date_keys.union(["active_at", "date"])
@cached_property
def is_filter_translators(self):
is_filter_translators = {
"assigned": (SearchKey("unassigned"), SearchValue(False)),
"unassigned": (SearchKey("unassigned"), SearchValue(True)),
}
for status_key, status_value in STATUS_CHOICES.items():
is_filter_translators[status_key] = (SearchKey("status"), SearchValue(status_value))
return is_filter_translators
def visit_is_filter(self, node, children):
# the key is "is" here, which we don't need
negation, _, _, search_value = children
if search_value.raw_value not in self.is_filter_translators:
raise InvalidSearchQuery(
'Invalid value for "is" search, valid values are {}'.format(
sorted(self.is_filter_translators.keys())
)
)
search_key, search_value = self.is_filter_translators[search_value.raw_value]
operator = "!=" if self.is_negated(negation) else "="
return SearchFilter(search_key, operator, search_value)
def visit_boolean_operator(self, node, children):
raise InvalidSearchQuery(
'Boolean statements containing "OR" or "AND" are not supported in this search'
)
def parse_search_query(query):
try:
tree = event_search_grammar.parse(query)
except IncompleteParseError as e:
raise InvalidSearchQuery(
"%s %s"
% (
u"Parse error: %r (column %d)." % (e.expr.name, e.column()),
"This is commonly caused by unmatched-parentheses. Enclose any text in double quotes.",
)
)
return IssueSearchVisitor().visit(tree)
def convert_actor_value(value, projects, user, environments):
return parse_actor_value(projects, value, user)
def convert_user_value(value, projects, user, environments):
return parse_user_value(value, user)
def convert_release_value(value, projects, user, environments):
return parse_release(value, projects, environments)
def convert_status_value(value, projects, user, environments):
try:
return parse_status_value(value)
except ValueError:
raise InvalidSearchQuery(u"invalid status value of '{}'".format(value))
value_converters = {
"assigned_to": convert_actor_value,
"bookmarked_by": convert_user_value,
"subscribed_by": convert_user_value,
"first_release": convert_release_value,
"release": convert_release_value,
"status": convert_status_value,
}
def convert_query_values(search_filters, projects, user, environments):
"""
Accepts a collection of SearchFilter objects and converts their values into
a specific format, based on converters specified in `value_converters`.
:param search_filters: Collection of `SearchFilter` objects.
:param projects: List of projects being searched across
:param user: The user making the search
:return: New collection of `SearchFilters`, which may have converted values.
"""
def convert_search_filter(search_filter):
if search_filter.key.name in value_converters:
converter = value_converters[search_filter.key.name]
new_value = converter(search_filter.value.raw_value, projects, user, environments)
search_filter = search_filter._replace(value=SearchValue(new_value))
return search_filter
return map(convert_search_filter, search_filters)
| bsd-3-clause | 228,605,746,677,789,060 | 33.932836 | 103 | 0.656056 | false | 4.045808 | false | false | false |
lpramuk/robottelo | tests/foreman/cli/test_usergroup.py | 1 | 16308 | # -*- encoding: utf-8 -*-
"""Test class for User Group CLI
:Requirement: Usergroup
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: UsersRoles
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
import random
from robottelo.cli.base import CLIReturnCodeError
from robottelo.cli.factory import make_ldap_auth_source
from robottelo.cli.factory import make_role
from robottelo.cli.factory import make_user
from robottelo.cli.factory import make_usergroup
from robottelo.cli.factory import make_usergroup_external
from robottelo.cli.ldapauthsource import LDAPAuthSource
from robottelo.cli.task import Task
from robottelo.cli.user import User
from robottelo.cli.usergroup import UserGroup
from robottelo.cli.usergroup import UserGroupExternal
from robottelo.config import settings
from robottelo.constants import LDAP_ATTR
from robottelo.constants import LDAP_SERVER_TYPE
from robottelo.datafactory import gen_string
from robottelo.datafactory import valid_data_list
from robottelo.decorators import run_in_one_thread
from robottelo.decorators import skip_if_not_set
from robottelo.decorators import tier1
from robottelo.decorators import tier2
from robottelo.decorators import upgrade
from robottelo.test import CLITestCase
class UserGroupTestCase(CLITestCase):
"""User group CLI related tests."""
@tier1
def test_positive_CRUD(self):
"""Create new user group with valid elements that attached group.
List the user group, update and delete it.
:id: bacef0e3-31dd-4991-93f7-f54fbe64d0f0
:expectedresults: User group is created, listed, updated and
deleted successfully.
:CaseImportance: Critical
"""
user = make_user()
ug_name = random.choice(valid_data_list())
role_name = random.choice(valid_data_list())
role = make_role({'name': role_name})
sub_user_group = make_usergroup()
# Create
user_group = make_usergroup(
{
'user-ids': user['id'],
'name': ug_name,
'role-ids': role['id'],
'user-group-ids': sub_user_group['id'],
}
)
self.assertEqual(user_group['name'], ug_name)
self.assertEqual(user_group['users'][0], user['login'])
self.assertEqual(len(user_group['roles']), 1)
self.assertEqual(user_group['roles'][0], role_name)
self.assertEqual(user_group['user-groups'][0]['usergroup'], sub_user_group['name'])
# List
result_list = UserGroup.list({'search': 'name={0}'.format(user_group['name'])})
self.assertTrue(len(result_list) > 0)
self.assertTrue(UserGroup.exists(search=('name', user_group['name'])))
# Update
new_name = random.choice(valid_data_list())
UserGroup.update({'id': user_group['id'], 'new-name': new_name})
user_group = UserGroup.info({'id': user_group['id']})
self.assertEqual(user_group['name'], new_name)
# Delete
UserGroup.delete({'name': user_group['name']})
with self.assertRaises(CLIReturnCodeError):
UserGroup.info({'name': user_group['name']})
@tier1
def test_positive_create_with_multiple_elements(self):
"""Create new user group using multiple users, roles and user
groups attached to that group.
:id: 3b0a3c3c-aab2-4e8a-b043-7462621c7333
:expectedresults: User group is created successfully and contains all
expected elements.
:CaseImportance: Critical
"""
count = 2
users = [make_user()['login'] for _ in range(count)]
roles = [make_role()['name'] for _ in range(count)]
sub_user_groups = [make_usergroup()['name'] for _ in range(count)]
user_group = make_usergroup(
{'users': users, 'roles': roles, 'user-groups': sub_user_groups}
)
self.assertEqual(sorted(users), sorted(user_group['users']))
self.assertEqual(sorted(roles), sorted(user_group['roles']))
self.assertEqual(
sorted(sub_user_groups), sorted([ug['usergroup'] for ug in user_group['user-groups']]),
)
@tier2
def test_positive_add_and_remove_elements(self):
"""Create new user group. Add and remove several element from the group.
:id: a4ce8724-d3c8-4c00-9421-aaa40394134d
:BZ: 1395229
:expectedresults: Elements are added to user group and then removed
successfully.
:CaseLevel: Integration
"""
role = make_role()
user_group = make_usergroup()
user = make_user()
sub_user_group = make_usergroup()
# Add elements by id
UserGroup.add_role({'id': user_group['id'], 'role-id': role['id']})
UserGroup.add_user({'id': user_group['id'], 'user-id': user['id']})
UserGroup.add_user_group({'id': user_group['id'], 'user-group-id': sub_user_group['id']})
user_group = UserGroup.info({'id': user_group['id']})
self.assertEqual(len(user_group['roles']), 1)
self.assertEqual(user_group['roles'][0], role['name'])
self.assertEqual(len(user_group['users']), 1)
self.assertEqual(user_group['users'][0], user['login'])
self.assertEqual(len(user_group['user-groups']), 1)
self.assertEqual(user_group['user-groups'][0]['usergroup'], sub_user_group['name'])
# Remove elements by name
UserGroup.remove_role({'id': user_group['id'], 'role': role['name']})
UserGroup.remove_user({'id': user_group['id'], 'user': user['login']})
UserGroup.remove_user_group({'id': user_group['id'], 'user-group': sub_user_group['name']})
user_group = UserGroup.info({'id': user_group['id']})
self.assertEqual(len(user_group['roles']), 0)
self.assertEqual(len(user_group['users']), 0)
self.assertEqual(len(user_group['user-groups']), 0)
@tier2
@upgrade
def test_positive_remove_user_assigned_to_usergroup(self):
"""Create new user and assign it to user group. Then remove that user.
:id: 2a2623ce-4723-4402-aae7-8675473fd8bd
:expectedresults: User should delete successfully.
:CaseLevel: Integration
:BZ: 1667704
"""
user = make_user()
user_group = make_usergroup()
UserGroup.add_user({'id': user_group['id'], 'user-id': user['id']})
with self.assertNotRaises(CLIReturnCodeError):
User.delete({'id': user['id']})
@run_in_one_thread
class ActiveDirectoryUserGroupTestCase(CLITestCase):
"""Implements Active Directory feature tests for user groups in CLI."""
@classmethod
@skip_if_not_set('ldap')
def setUpClass(cls):
"""Read settings and create LDAP auth source that can be re-used in
tests."""
super(ActiveDirectoryUserGroupTestCase, cls).setUpClass()
cls.ldap_user_name = settings.ldap.username
cls.ldap_user_passwd = settings.ldap.password
cls.base_dn = settings.ldap.basedn
cls.group_base_dn = settings.ldap.grpbasedn
cls.ldap_hostname = settings.ldap.hostname
cls.auth = make_ldap_auth_source(
{
'name': gen_string('alpha'),
'onthefly-register': 'true',
'host': cls.ldap_hostname,
'server-type': LDAP_SERVER_TYPE['CLI']['ad'],
'attr-login': LDAP_ATTR['login_ad'],
'attr-firstname': LDAP_ATTR['firstname'],
'attr-lastname': LDAP_ATTR['surname'],
'attr-mail': LDAP_ATTR['mail'],
'account': cls.ldap_user_name,
'account-password': cls.ldap_user_passwd,
'base-dn': cls.base_dn,
'groups-base': cls.group_base_dn,
}
)
def setUp(self):
"""Create new usergroup per each test"""
super(ActiveDirectoryUserGroupTestCase, self).setUp()
self.user_group = make_usergroup()
def tearDown(self):
"""Delete usergroup per each test"""
for dict in UserGroup.list():
if UserGroup.info({'id': dict['id']})['external-user-groups']:
UserGroup.delete({'id': dict['id']})
super(ActiveDirectoryUserGroupTestCase, self).tearDown()
@classmethod
@skip_if_not_set('ldap')
def tearDownClass(cls):
"""Delete the AD auth-source afterwards"""
LDAPAuthSource.delete({'id': cls.auth['server']['id']})
super(ActiveDirectoryUserGroupTestCase, cls).tearDownClass()
@tier2
@upgrade
def test_positive_create_and_refresh_external_usergroup_with_local_user(self):
"""Create and refresh external user group with AD LDAP. Verify Local user
association from user-group with external group with AD LDAP
:id: 7431979c-aea8-4984-bb7d-185f5b7c3109
:expectedresults: User group is created and refreshed successfully.
Local user is associated from user-group with external group.
:CaseLevel: Integration
:BZ: 1412209
"""
ext_user_group = make_usergroup_external(
{
'auth-source-id': self.auth['server']['id'],
'user-group-id': self.user_group['id'],
'name': 'foobargroup',
}
)
self.assertEqual(ext_user_group['auth-source'], self.auth['server']['name'])
with self.assertNotRaises(CLIReturnCodeError):
UserGroupExternal.refresh(
{'user-group-id': self.user_group['id'], 'name': 'foobargroup'}
)
user = make_user()
UserGroup.add_user({'user': user['login'], 'id': self.user_group['id']})
self.assertEqual(
User.info({'login': user['login']})['user-groups'][0]['usergroup'],
self.user_group['name'],
)
with self.assertNotRaises(CLIReturnCodeError):
UserGroupExternal.refresh(
{'user-group-id': self.user_group['id'], 'name': 'foobargroup'}
)
self.assertEqual(
User.info({'login': user['login']})['user-groups'][0]['usergroup'],
self.user_group['name'],
)
@tier2
def test_positive_automate_bz1426957(self):
"""Verify role is properly reflected on AD user.
:id: 1c1209a6-5bb8-489c-a151-bb2fce4dbbfc
:expectedresults: Roles from usergroup is applied on AD user successfully.
:CaseLevel: Integration
:BZ: 1426957, 1667704
"""
ext_user_group = make_usergroup_external(
{
'auth-source-id': self.auth['server']['id'],
'user-group-id': self.user_group['id'],
'name': 'foobargroup',
}
)
self.assertEqual(ext_user_group['auth-source'], self.auth['server']['name'])
role = make_role()
UserGroup.add_role({'id': self.user_group['id'], 'role-id': role['id']})
with self.assertNotRaises(CLIReturnCodeError):
Task.with_user(username=self.ldap_user_name, password=self.ldap_user_passwd).list()
UserGroupExternal.refresh(
{'user-group-id': self.user_group['id'], 'name': 'foobargroup'}
)
self.assertEqual(User.info({'login': self.ldap_user_name})['user-groups'][1], role['name'])
User.delete({'login': self.ldap_user_name})
@tier2
def test_negative_automate_bz1437578(self):
"""Verify error message on usergroup create with 'Domain Users' on AD user.
:id: d4caf33e-b9eb-4281-9e04-fbe1d5b035dc
:expectedresults: Error message as Domain Users is a special group in AD.
:CaseLevel: Integration
:BZ: 1437578
"""
with self.assertRaises(CLIReturnCodeError):
result = UserGroupExternal.create(
{
'auth-source-id': self.auth['server']['id'],
'user-group-id': self.user_group['id'],
'name': 'Domain Users',
}
)
self.assertEqual(
'Could not create external user group: '
'Name is not found in the authentication source'
'Name Domain Users is a special group in AD.'
' Unfortunately, we cannot obtain membership information'
' from a LDAP search and therefore sync it.',
result,
)
@run_in_one_thread
class FreeIPAUserGroupTestCase(CLITestCase):
"""Implements FreeIPA LDAP feature tests for user groups in CLI."""
@classmethod
@skip_if_not_set('ipa')
def setUpClass(cls):
"""Read settings and create LDAP auth source that can be re-used in
tests."""
super(FreeIPAUserGroupTestCase, cls).setUpClass()
cls.ldap_user_name = settings.ipa.username_ipa
cls.ldap_user_passwd = settings.ipa.password_ipa
cls.base_dn = settings.ipa.basedn_ipa
cls.group_base_dn = settings.ipa.grpbasedn_ipa
cls.ldap_hostname = settings.ipa.hostname_ipa
cls.auth = make_ldap_auth_source(
{
'name': gen_string('alpha'),
'onthefly-register': 'true',
'host': cls.ldap_hostname,
'server-type': LDAP_SERVER_TYPE['CLI']['ipa'],
'attr-login': LDAP_ATTR['login'],
'attr-firstname': LDAP_ATTR['firstname'],
'attr-lastname': LDAP_ATTR['surname'],
'attr-mail': LDAP_ATTR['mail'],
'account': cls.ldap_user_name,
'account-password': cls.ldap_user_passwd,
'base-dn': cls.base_dn,
'groups-base': cls.group_base_dn,
}
)
def setUp(self):
"""Create new usergroup per each test"""
super(FreeIPAUserGroupTestCase, self).setUp()
self.user_group = make_usergroup()
def tearDown(self):
"""Delete usergroup per each test"""
for dict in UserGroup.list():
if UserGroup.info({'id': dict['id']})['external-user-groups']:
UserGroup.delete({'id': dict['id']})
super(FreeIPAUserGroupTestCase, self).tearDown()
@classmethod
@skip_if_not_set('ipa')
def tearDownClass(cls):
"""Delete the IPA auth-source afterwards"""
LDAPAuthSource.delete({'id': cls.auth['server']['id']})
super(FreeIPAUserGroupTestCase, cls).tearDownClass()
@tier2
@upgrade
def test_positive_create_and_refresh_external_usergroup_with_local_user(self):
"""Create and Refresh external user group with FreeIPA LDAP. Verify Local user
association from user-group with external group with FreeIPA LDAP
:id: bd6152e3-51ac-4e84-b084-8bab1c4eb583
:expectedresults: User group is created successfully and assigned to correct auth
source. User group is refreshed successfully. Local user is associated from
user group with external group.
:CaseLevel: Integration
:BZ: 1412209
"""
ext_user_group = make_usergroup_external(
{
'auth-source-id': self.auth['server']['id'],
'user-group-id': self.user_group['id'],
'name': 'foobargroup',
}
)
self.assertEqual(ext_user_group['auth-source'], self.auth['server']['name'])
with self.assertNotRaises(CLIReturnCodeError):
UserGroupExternal.refresh(
{'user-group-id': self.user_group['id'], 'name': 'foobargroup'}
)
user = make_user()
UserGroup.add_user({'user': user['login'], 'id': self.user_group['id']})
self.assertEqual(
User.info({'login': user['login']})['user-groups'][0]['usergroup'],
self.user_group['name'],
)
with self.assertNotRaises(CLIReturnCodeError):
UserGroupExternal.refresh(
{'user-group-id': self.user_group['id'], 'name': 'foobargroup'}
)
print(User.info({'login': user['login']}))
self.assertEqual(
User.info({'login': user['login']})['user-groups'][0]['usergroup'],
self.user_group['name'],
)
| gpl-3.0 | 8,556,261,803,946,540,000 | 37.013986 | 99 | 0.595904 | false | 3.892124 | true | false | false |
operasoftware/tlswebprober | webprober/prober/templatetags/tags.py | 1 | 1761 | # Copyright 2010-2012 Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django import template
register = template.Library()
@register.inclusion_tag("color_value.html")
def color_value(collection):
""" {% color_value collection %}
render collection.value with the assigned color
depending on parameters, optionally with a link.
collection.value value to be rendered
collection.textcolor : The color to be used
if callable textcolor_fun(value, collection) return the text and color based on the value as (text, color)
collection.link: If present contain a URL to be linked to
"""
if not collection:
return {"valid":False}
if not isinstance(collection,dict):
return {"valid":True, "text":collection, "color":None, "debug":collection}
if not collection or "value" not in collection:
return {"valid":False}
value = collection["value"]
color = collection.get("textcolor", None)
if callable(color):
(value, color) = color(value, collection)
args = {
"valid":True,
"text":value,
"color":color,
"debug":(value, color, collection.get("color", None), collection.get("values",None))
}
if "link" in collection:
args["link"] = collection["link"];
return args
| apache-2.0 | -7,333,397,232,341,761,000 | 31.036364 | 110 | 0.712663 | false | 3.6841 | false | false | false |
adcrn/transcript-helper | stuff/audio_op.py | 1 | 2796 | # Author: Alexander Decurnou
# Team: iDev
from math import ceil
from moviepy.editor import VideoFileClip, AudioFileClip
from os.path import join
import subprocess
DEFAULT_AUDIOFILE_CODEC = 'libvorbis' # used to create webms
DEFAULT_AUDIOFILE_BITRATE = None
DEFAULT_ZEROES_PADDING = 5
DEFAULT_AUDIO_SEGMENT_DURATION_SEC = 180
def audio_extraction(path, audio_dest, audio_codec=DEFAULT_AUDIOFILE_CODEC,
audio_bitrate=DEFAULT_AUDIOFILE_BITRATE):
try:
print("Extracting audio...")
#video = VideoFileClip(vid_src)
#audio = video.audio
#audio.write_audiofile(audio_dest, codec=audio_codec,
# bitrate=audio_bitrate, verbose=False,
# progress_bar=False)
command = "ffmpeg -i {} -vn -acodec {} -y {}".format(path, audio_codec, audio_dest)
subprocess.call(command, shell=True)
print("Audio file extracted.")
except:
print("Unexpected error!")
raise
# Really hacky way of making audio-only files into audio-only webms. Yes,
# transcoding from lossy to lossy is bad, but since this will be used on mostly
# voice-only stuff, I'm not terribly worried about a loss of fidelity.
def audio_conversion(audio_src, audio_dest, audio_codec=DEFAULT_AUDIOFILE_CODEC,
audio_bitrate=DEFAULT_AUDIOFILE_BITRATE):
try:
print("Extracting audio...")
audio = AudioFileClip(audio_src)
audio.write_audiofile(audio_dest, codec=audio_codec,
bitrate=audio_bitrate, verbose=False,
progress_bar=False)
print("Audio file extracted.")
except:
print("Unexpected error!")
raise
def audio_segmentation(audio_src, audio_seg_dir,
seg_dur=DEFAULT_AUDIO_SEGMENT_DURATION_SEC,
pad_zeroes=DEFAULT_ZEROES_PADDING):
src_ext = ".webm"
audio = AudioFileClip(audio_src)
total_sec = audio.duration
start_sec = 0
print("Segmenting audio...")
while start_sec < total_sec:
end_sec = start_sec + seg_dur
if end_sec > total_sec:
end_sec = ceil(total_sec)
segment = audio.subclip(start_sec)
else:
segment = audio.subclip(start_sec, end_sec)
seg_name = "%s-%s%s" % (
str(start_sec).rjust(pad_zeroes, "0"),
str(end_sec).rjust(pad_zeroes, "0"), src_ext)
start_sec = end_sec
seg_full_path = join(audio_seg_dir, seg_name)
segment.write_audiofile(seg_full_path, codec=DEFAULT_AUDIOFILE_CODEC,
bitrate=DEFAULT_AUDIOFILE_BITRATE,
verbose=False, progress_bar=False)
print("Audio segmentation complete.")
| gpl-3.0 | -2,825,435,699,956,229,600 | 36.783784 | 91 | 0.608727 | false | 3.75302 | false | false | false |
shingonoide/odoo | addons/procurement_jit_stock/procurement_jit_stock.py | 64 | 2047 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2013 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class procurement_order(osv.osv):
_inherit = "procurement.order"
def run(self, cr, uid, ids, autocommit=False, context=None):
context = dict(context or {}, procurement_autorun_defer=True)
res = super(procurement_order, self).run(cr, uid, ids, autocommit=autocommit, context=context)
procurement_ids = self.search(cr, uid, [('move_dest_id.procurement_id', 'in', ids), ('state', 'not in', ['exception', 'cancel'])], order='id', context=context)
if procurement_ids:
return self.run(cr, uid, procurement_ids, autocommit=autocommit, context=context)
return res
class stock_move(osv.osv):
_inherit = "stock.move"
def _create_procurements(self, cr, uid, moves, context=None):
res = super(stock_move, self)._create_procurements(cr, uid, moves, context=dict(context or {}, procurement_autorun_defer=True))
self.pool['procurement.order'].run(cr, uid, res, context=context)
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -8,509,350,440,075,620,000 | 43.5 | 167 | 0.638007 | false | 4.013725 | false | false | false |
BenjamenMeyer/openstackinabox | openstackinabox/tests/models/keystone/test_roles.py | 2 | 7366 | import uuid
import ddt
from openstackinabox.tests.base import TestBase, DbFailure
from openstackinabox.models.keystone import exceptions
from openstackinabox.models.keystone.db.roles import KeystoneDbRoles
@ddt.ddt
class TestKeystoneDbRoles(TestBase):
def setUp(self):
super(TestKeystoneDbRoles, self).setUp()
self.model = KeystoneDbRoles
self.master = 'Venus'
self.db = self.get_testing_database()
self.role_info = {
'name': 'role_{0}'.format(
str(uuid.uuid4())
)
}
def tearDown(self):
super(TestKeystoneDbRoles, self).tearDown()
def test_initialization(self):
instance = self.model(
self.master,
self.db
)
self.assertEqual(self.master, instance.master)
self.assertEqual(self.db, instance.database)
self.assertIsNone(instance.admin_role_id)
self.assertIsNone(instance.viewer_role_id)
instance.initialize()
self.assertIsNotNone(instance.admin_role_id)
self.assertIsNotNone(instance.viewer_role_id)
def test_add_failure(self):
instance = self.model(
self.master,
DbFailure(),
)
with self.assertRaises(exceptions.KeystoneRoleError):
instance.add('br34k1ng4llth1ng$')
def test_add_user_role_by_id_failure(self):
instance = self.model(
self.master,
DbFailure(),
)
with self.assertRaises(exceptions.KeystoneRoleError):
instance.add_user_role_by_id(
tenant_id=0,
user_id=0,
role_id=1
)
def test_add_and_get(self):
instance = self.model(
self.master,
self.db
)
instance.initialize()
with self.assertRaises(exceptions.KeystoneRoleError):
instance.get(
self.role_info['name']
)
role_id = instance.add(
self.role_info['name']
)
role_data = instance.get(
self.role_info['name']
)
self.assertEqual(
role_id,
role_data['id']
)
self.assertEqual(
self.role_info['name'],
role_data['name']
)
@ddt.data(
'tenant',
'user',
'role',
None
)
def test_add_user_role_by_id(self, invalid_value):
role_name = 'phearB0t'
tenant = {
'name': 'megaTokyo',
'description': 'US Manga'
}
user = {
'name': 'largo',
'email': '[email protected]',
'password': '3l1t30n3$rul3',
'apikey': 'p4$$w0rd$suck'
}
tenant_id = self.tenants.add(
tenant_name=tenant['name'],
description=tenant['description'],
enabled=True
)
user_id = self.users.add(
tenant_id=tenant_id,
username=user['name'],
email=user['email'],
password=user['password'],
apikey=user['apikey'],
enabled=True
)
role_id = self.roles.add(
role_name
)
if invalid_value is None:
self.roles.add_user_role_by_id(
tenant_id=tenant_id,
user_id=user_id,
role_id=role_id,
)
user_roles = self.roles.get_user_roles(
tenant_id=tenant_id,
user_id=user_id,
)
self.assertEqual(1, len(user_roles))
for user_role in user_roles:
self.assertEqual(role_id, user_role['id'])
self.assertEqual(role_name, user_role['name'])
else:
with self.assertRaises(exceptions.KeystoneRoleError):
self.roles.add_user_role_by_id(
tenant_id=tenant_id if invalid_value != 'tenant' else None,
user_id=user_id if invalid_value != 'user' else None,
role_id=role_id if invalid_value != 'role' else None
)
def test_add_user_role_by_name(self):
role_name = 'phearB0t'
tenant = {
'name': 'megaTokyo',
'description': 'US Manga'
}
user = {
'name': 'largo',
'email': '[email protected]',
'password': '3l1t30n3$rul3',
'apikey': 'p4$$w0rd$suck'
}
tenant_id = self.tenants.add(
tenant_name=tenant['name'],
description=tenant['description'],
enabled=True
)
user_id = self.users.add(
tenant_id=tenant_id,
username=user['name'],
email=user['email'],
password=user['password'],
apikey=user['apikey'],
enabled=True
)
role_id = self.roles.add(
role_name
)
self.roles.add_user_role_by_role_name(
tenant_id=tenant_id,
user_id=user_id,
role_name=role_name
)
user_roles = self.roles.get_user_roles(
tenant_id=tenant_id,
user_id=user_id,
)
self.assertEqual(1, len(user_roles))
for user_role in user_roles:
self.assertEqual(role_id, user_role['id'])
self.assertEqual(role_name, user_role['name'])
@ddt.data(
0,
1,
20
)
def test_get_user_roles(self, role_count):
tenant = {
'name': 'megaTokyo',
'description': 'US Manga'
}
user = {
'name': 'largo',
'email': '[email protected]',
'password': '3l1t30n3$rul3',
'apikey': 'p4$$w0rd$suck'
}
tenant_id = self.tenants.add(
tenant_name=tenant['name'],
description=tenant['description'],
enabled=True
)
user_id = self.users.add(
tenant_id=tenant_id,
username=user['name'],
email=user['email'],
password=user['password'],
apikey=user['apikey'],
enabled=True
)
role_names = [
'ph34rb0t_{0}'.format(x)
for x in range(role_count)
]
roles = [
{
'name': name,
'id': self.roles.add(name)
}
for name in role_names
]
for role in roles:
self.roles.add_user_role_by_id(
tenant_id=tenant_id,
user_id=user_id,
role_id=role['id']
)
user_roles = self.roles.get_user_roles(
tenant_id=tenant_id,
user_id=user_id,
)
self.assertEqual(role_count, len(user_roles))
def find_index(rolename):
for x in range(len(roles)):
if roles[x]['name'] == rolename:
return x
return None
for user_role in user_roles:
role_index = find_index(user_role['name'])
self.assertIsNotNone(role_index)
role_info = roles[role_index]
self.assertEqual(role_info['id'], user_role['id'])
self.assertEqual(role_info['name'], user_role['name'])
| apache-2.0 | 6,227,425,451,324,088,000 | 26.3829 | 79 | 0.492941 | false | 3.868697 | true | false | false |
glassesfactory/Shimehari | shimehari/app.py | 1 | 24726 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
u"""
===============================
Shimehari.app
~~~~~~~~~~~~~
Flask などなど参考に。
ルーティングが多少複雑になっても
対応できるような作りにしたいなぁ
===============================
"""
import os
import sys
from threading import Lock
from functools import update_wrapper
from datetime import timedelta
from werkzeug.exceptions import HTTPException, InternalServerError, BadRequest
from werkzeug.routing import RequestRedirect, Rule
from .helpers import _Kouzi, findPackage, getHandlerAction, getModulesFromPyFile, getEnviron, \
lockedCachedProperty, getTemplater
from .contexts import RequestContext, AppContext
from .routing import Router
from core.config import RESTFUL_ACTIONS
from .wrappers import Request, Response
from shimehari.configuration import Config, ConfigManager
from shimehari.session import SessionStore
from shimehari.shared import _requestContextStack, _SharedRequestClass, request
from shimehari.template import _defaultTemplateCtxProcessor
from shimehari.core.exceptions import ShimehariSetupError
from shimehari.core.signals import appContextTearingDown, requestContextTearingDown, requestStarted, requestFinished, gotRequestException
_loggerLock = Lock()
defaultHost = '127.0.0.1'
defaultPort = 5959
def setupMethod(f):
def wrapperFunc(self, *args, **kwargs):
if self.debug and self._gotFirstRequest:
raise AssertionError('Setup seems to have already completed ...')
return f(self, *args, **kwargs)
return update_wrapper(wrapperFunc, f)
class Shimehari(_Kouzi):
u"""Shimehari Object は WSGI アプリケーションとして必要な機能を実装しており、
アプリケーションの中心となるオブジェクトです。
メインモジュール、または __init__.py ファイルの中で以下のように書くことで
Shimehari インスタンスを生成することができます。
.. code-block:: python
from shimehari import Shimehari
app = Shimehari(__name__)
ただし、Shimehari では通常コマンドラインでアプリケーションを生成することを推奨しているので
あなたが直接インスタンス生成のコードを書くことはそうそうないと思われます。
:param importName: アプリケーションのパッケージ名
:param staticURL: サイト内共通、静的ファイルの URL
:param staticFolder: 静的ファイルが格納されているディレクトリ
:param appFolder: アプリケーション全体が格納されているディレクトリ
:param controllerFolder: コントローラーが格納されているディレクトリ
:param viewFolder: ビューが格納されているディレクトリ
:param assetsFolder: アセットファイルが格納されているディレクトリ
:param instancePath: アプリケーションのための代替インスタンスパス
"""
currentEnv = getEnviron()
debug = None
testing = None
requestClass = Request
responseClass = Response
testClientCls = None
teardownAppContextFuncs = []
defaultConfig = {
'DEBUG': False,
'TEST': False,
'APP_DIRECTORY': 'app',
'CONTROLLER_DIRECTORY': 'controllers',
'VIEW_DIRECTORY': 'views',
#for daiginjou
'MODEL_DIRECTORY': 'models',
'PREFERRED_URL_SCHEME': 'http',
'AUTO_SETUP': True,
'TEMPLATE_ENGINE': 'jinja2',
'TRAP_HTTP_EXCEPTIONS': False,
'SERVER_NAME': None,
'PERMANENT_SESSION_LIFETIME': timedelta(days=31),
'SECRET_KEY': '_secret_shimehari'
}
templateOptions = {}
sessionStore = SessionStore()
sharedRequestClass = _SharedRequestClass
allowedMethods = set(['GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'OPTIONS', 'PATCH'])
bodylessMethods = frozenset(['GET', 'HEAD', 'OPTIONS', 'DELETE'])
def __init__(self, importName,
staticURL=None, staticFolder='static',
appFolder='app', controllerFolder='controllers',
viewFolder='views', assetsFolder='assets',
instancePath=None, isRelativeConfig=False, templateOptions={}):
_Kouzi.__init__(self, importName, appFolder=appFolder,
controllerFolder=controllerFolder, viewFolder=viewFolder)
if instancePath is None:
self._instancePath = self.getInstancePath()
self._logger = None
self.loggerName = self.importName
self.config = self.getConfig()
self.controllers = {}
self.urlValuePreprocesors = {}
self.beforeRequestFuncs = {}
self.beforeFirstRequestFuncs = []
self.urlDefaultFuncs = {}
self.afterRequestFuncs = {}
self._errorHandlers = {}
self.errorHandlerSpec = {None: self._errorHandlers}
self.buildErrorHandlers = None
self.teardownRequestContextFuncs = {}
self.templateContextProcessors = {
None: [_defaultTemplateCtxProcessor]
}
#CSRF
from shimehari.crypt import CSRF
self.csrf = CSRF(self)
self._router = Router()
self._gotFirstRequest = False
self._beforeRequestLock = Lock()
self.debug = self.config['DEBUG']
self.test = self.config['TEST']
self.sessionKey = self.config['SESSION_COOKIE_NAME']
self.useXSendFile = self.config['USE_X_SENDFILE']
self.templateOptions = templateOptions
if self.config['AUTO_SETUP']:
self.setup()
@lockedCachedProperty
def name(self):
if self.importName == '__main__':
fn = getattr(sys.modules['__main__'], '__file__', None)
if fn is None:
return '__main__'
return os.path.splitext(os.path.basename(fn))[0]
return self.importName
@property
def gotFirstRequest(self):
return self._gotFirstRequest
@property
def propagateExceptions(self):
return self.testing or self.debug
@property
def preserveContextOnException(self):
rv = self.config['PRESERVE_CONTEXT_ON_EXCEPTION']
if rv is not None:
return rv
return self.debug
@property
def logger(self):
if self._logger and self._logger.name == self.loggerName:
return self._logger
with _loggerLock:
if self._logger and self._logger.name == self.loggerName:
return self._logger
from shimehari.logging import createLogger
self._logger = rv = createLogger(self.loggerName)
return rv
def router():
u"""アプリケーションのルーティングを管理するルーターを設定します。"""
def fget(self):
return self._router
def fset(self, value):
self.setControllerFromRouter(value)
self._router = value
def fdel(self):
self.controllers = {}
del self._router
return locals()
router = property(**router())
def getInstancePath(self):
u"""インスタンスパスを返します。"""
prefix, pkgPath = findPackage(self.importName)
if prefix is None:
return os.path.join(pkgPath, 'instance')
return os.path.join(prefix, 'var', self.name + '-instance')
def getConfig(self):
u"""現在アプリケーションに適用されているコンフィグを返します。"""
configs = ConfigManager.getConfigs()
try:
# from .config import config
configs = ConfigManager.getConfigs()
except ImportError:
pass
if not configs:
cfg = Config(self.currentEnv, self.defaultConfig)
ConfigManager.addConfig(cfg)
return cfg
else:
return configs[self.currentEnv]
def saveSession(self, session, response):
u"""セッションを保存します。
:param session: 保存したいセッション
:param response: レスポンス
"""
if session.should_save:
self.sessionStore.save(session, response)
response.set_cookie(self.sessionKey, session.sid)
return response
def openSession(self, request):
sid = request.cookies.get(self.sessionKey, None) or request.values.get(self.sessionKey, None)
if sid is None:
return self.sessionStore.new()
else:
return self.sessionStore.get(sid)
def setControllerFromRouter(self, router):
u"""設定されたルーターからコントローラーをバインディングします。
:param router: ルーター
"""
if not self.controllers:
self.controllers = {}
for rule in router._rules:
self.controllers[rule.endpoint] = rule.endpoint
def addController(self, controller):
u"""アプリケーションにコントローラーを追加します。
:param controller: 追加したいコントローラー。
追加されたコントローラーはアプリケーションの管理下に置かれ、
ルーティングが自動生成されます。
"""
for action in RESTFUL_ACTIONS:
handler = getHandlerAction(controller, action)
if handler is not None:
self.controllers[handler] = handler
def addRoute(self, url, func, methods=None, **options):
rule = Rule(url, endpoint=func.__name__, methods=methods)
self.controllers[func.__name__] = func
self.router.add(rule)
def logException(self, excInfo):
self.logger.error('excepts on %s [%s]' % (request.path, request.method), exc_info=excInfo)
def injectURLDefaults(self, endpoint, values):
funcs = self.urlDefaultFuncs.get(None, ())
for func in funcs:
func(endpoint, values)
@lockedCachedProperty
def templateLoader(self):
return self.templater.templateLoader
@lockedCachedProperty
def templateEnv(self):
rv = self.templater.templateEnv()
return rv
def createTemplateEnvironment(self):
rv = self.templater.createTemplateEnvironment()
return rv
def createGlobalTemplateLoader(self):
return self.templater.dispatchLoader(self)
def updateTemplateContext(self, context):
self.templater.updateTemplateContext(context)
def setup(self):
u"""アプリケーションをセットアップします。
指定された app ディレクトリ配下にあるコントローラー、ルーターを探し出しバインドします。
"""
self.appPath = os.path.join(self.rootPath, self.appFolder)
if not os.path.isdir(self.appPath):
raise ShimehariSetupError('Application directory is not found\n%s' % self.rootPath)
sys.exit(0)
try:
__import__(self.appFolder)
self.setupTemplater()
self.setupBindController()
self.setupBindRouter()
except (ImportError, AttributeError):
raise ShimehariSetupError('Application directory is invalid')
def setupTemplater(self):
try:
self.templater = getTemplater(self, self.config['TEMPLATE_ENGINE'], templateOptions=self.templateOptions)
except Exception, e:
raise ShimehariSetupError('setup template engine was failed... \n%s' % e)
def setupBindController(self):
u"""コントローラーをバインドします"""
self.controllerPath = os.path.join(self.appPath, self.controllerFolder)
if not os.path.isdir(self.controllerPath):
raise ShimehariSetupError('Controller in the specified directory does not exist. %s' % self.controllerPath)
try:
ctrlDir = self.appFolder + '.' + self.controllerFolder
__import__(ctrlDir)
getModulesFromPyFile(self.controllerPath, self.rootPath)
except (ImportError, AttributeError), error:
raise ShimehariSetupError('setup controller was failed... \n%s' % error)
def setupBindRouter(self):
u"""ルーターをバインドします。"""
try:
routerFile = self.appFolder + '.' + 'router'
routerMod = __import__(routerFile, fromlist=['router'])
if hasattr(routerMod, 'appRoutes'):
self.router = routerMod.appRoutes
if self.hasStaticFolder:
for url in self.getStaticURLs():
self.addRoute(url + '/<path:filename>', self.sendStaticFile)
except (ImportError, AttributeError), e:
raise ShimehariSetupError('Failed to setup the router ...\n details::\n%s' % e)
@setupMethod
def beforeRequest(self, f):
u"""リクエストを処理する前に実行したいメソッドを登録します。
:param f: リクエスト処理前に実行させたい処理
"""
self.beforeRequestFuncs.setdefault(None, []).append(f)
return f
@setupMethod
def beforeFirstRequest(self, f):
u"""アプリケーションに対し初めてリクエストがあったときのみ、
リクエストを処理する前に実行したいメソッドを登録します。
:param f: 初めてのリクエスト処理前に実行したい処理
"""
self.beforeFirstRequestFuncs.append(f)
@setupMethod
def afterRequest(self, f):
u"""リクエスト処理が終わった後に実行したいメソッドを登録します。
:param f: リクエスト処理後に実行したい処理
"""
self.afterRequestFuncs.setdefault(None, []).append(f)
return f
@setupMethod
def urlValuePreprocessor(self, f):
u"""リクエストの前に実行したい処理を登録します。
:param f: 実行したい処理
"""
self.urlValuePreprocesors.setdefault(None, []).append(f)
return f
@setupMethod
def tearDownAppContext(self, f):
self.teardownAppContextFuncs.append(f)
return f
@setupMethod
def errorHandler(self, codeOrException):
def decorator(f):
self._registerErrorHandler(None, codeOrException, f)
return f
return decorator
@setupMethod
def _registerErrorHandler(self, key, codeOrException, f):
if isinstance(codeOrException, HTTPException):
codeOrException = codeOrException.code
if isinstance(codeOrException, (int, long)):
assert codeOrException != 500 or key is None
self.errorHandlerSpec.setdefault(key, {})[codeOrException] = f
else:
self.errorHandlerSpec.setdefault(key, {}).setdefault(None, []).append((codeOrException, f))
def tryTriggerBeforeFirstRequest(self):
u"""アプリケーションに対し最初のリクエストがあった時のみに行う処理を
実際に実行します。
"""
if self._gotFirstRequest:
return
with self._beforeRequestLock:
if self._gotFirstRequest:
return
self._gotFirstRequest = True
[f() for f in self.beforeFirstRequestFuncs]
def createAdapter(self, request):
u"""url adapter を生成します
:param request: 元となるリクエストオブジェクト
"""
if request is not None:
if request.environ['REQUEST_METHOD'] == 'POST':
method = request.form.get('_method', '').upper() or request.environ.get('HTTP_X_HTTP_METHOD_OVERRIDE', '').upper()
if method in self.allowedMethods:
method = method.encode('ascii', 'replace')
request.environ['REQUEST_METHOD'] = method
if method in self.bodylessMethods:
request.environ['CONTENT_LENGTH'] = 0
return self.router.bind_to_environ(request.environ)
#なんのこっちゃ
if self.config['SERVER_NAME'] is not None:
return self.router.bind(
self.config['SERVER_NAME'],
script_name=self.config['APP_ROOT'] or '/',
url_scheme=self.config['PREFERRED_URL_SCHEME'])
def appContext(self):
u"""アプリケーションコンテキストを返します。"""
return AppContext(self)
def requestContext(self, environ):
u"""リクエストコンテキストを返します。
:param environ: リクエスト環境変数
"""
return RequestContext(self, environ)
def doAppContextTearDonw(self, exc=None):
if exc is None:
exc = sys.exc_info()[1]
for func in reversed(self.teardownAppContextFuncs):
func(exc)
appContextTearingDown.send(self, exc)
def doRequestContextTearDown(self, exc=None):
if exc is None:
exc = sys.exc_info()[1]
for func in reversed(self.teardownRequestContextFuncs.get(None, ())):
func(exc)
requestContextTearingDown.send(self, exc=exc)
def preprocessRequest(self):
u"""リクエスト前に処理したい登録済みのメソッドを実行します。
:param rv: 登録した処理
"""
for func in self.urlValuePreprocesors.get(None, ()):
func(request.endpoint, request.viewArgs)
self.csrf.checkCSRFExempt()
self.csrf.csrfProtect()
for func in self.beforeRequestFuncs.get(None, ()):
rv = func()
if rv is not None:
return rv
def processResponse(self, response):
u"""レスポンスを返す前に処理したい登録済みのメソッドをを実行します。
:param response: レスポンス
"""
context = _requestContextStack.top
funcs = ()
if None in self.afterRequestFuncs:
funcs = self.afterRequestFuncs[None]
for handler in funcs:
response = handler(response)
if not self.sessionStore.isNullSession(context.session):
self.saveSession(context.session, response)
return response
def dispatchRequest(self):
u"""リクエストをもとに、レスポンスを発行します。
:param request: リクエスト
"""
self.tryTriggerBeforeFirstRequest()
try:
requestStarted.send(self)
rv = self.preprocessRequest()
if rv is None:
req = _requestContextStack.top.request
if req.routingException is not None:
self.raiseRoutingException(req)
rule = req.urlRule
rv = self.controllers[rule.endpoint](**req.viewArgs)
except Exception, e:
rv = self.makeResponse(self.handleUserException(e))
response = self.makeResponse(rv)
response = self.processResponse(response)
requestFinished.send(self, response=response)
return response
def makeResponse(self, rv):
u"""レスポンスを生成して返します。
:param rv: リクエスト
"""
status = headers = None
if isinstance(rv, tuple):
rv, status, headers = rv + (None,) * (3 - len(rv))
if rv is None:
raise ValueError('view function does not return a response.')
if not isinstance(rv, self.responseClass):
if isinstance(rv, basestring):
rv = self.responseClass(rv, headers=headers, status=status)
headers = status = None
else:
rv = self.responseClass.force_type(rv, request.environ)
if status is not None:
if isinstance(status, basestring):
rv.status = status
else:
rv.status_code = status
if headers:
rv.headers.extend(headers)
return rv
def handleException(self, e):
u"""エラーをハンドリングします。
:param e: エラー内容
"""
excType, excValue, excTb = sys.exc_info()
gotRequestException.send(self, exception=e)
handler = self.errorHandlerSpec[None].get(500)
if self.propagateExceptions:
if excValue is e:
raise excType, excValue, excTb
else:
raise e
self.logException((excType, excValue, excTb))
if handler is None:
return InternalServerError()
return handler(e)
def handleHTTPException(self, e):
handler = self.errorHandlerSpec[None].get(e.code)
if handler is None:
return e
return handler(e)
def trapHTTPException(self, e):
if self.config['TRAP_HTTP_EXCEPTIONS']:
return True
if self.config['TRAP_BAD_REQUEST_ERRORS']:
return isinstance(e, BadRequest)
return False
def handleUserException(self, e):
excType, excValue, tb = sys.exc_info()
assert excValue is e
if isinstance(e, HTTPException) and not self.trapHTTPException(e):
return self.handleHTTPException(e)
appHandlers = self.errorHandlerSpec[None].get(None, ())
for typecheck, handler in appHandlers:
if isinstance(e, typecheck):
return handler(e)
raise excType, excValue, tb
def raiseRoutingException(self, request):
if not self.debug or not isinstance(request.routingException, RequestRedirect) \
or request.method in ('GET', 'HEAD', 'OPTIONS'):
raise request.routingException
def testRequestContext(self, *args, **kwargs):
from shimehari.testing import makeTestEnvironBuilder
builder = makeTestEnvironBuilder(self, *args, **kwargs)
try:
return self.requestContext(builder.get_environ())
finally:
builder.close()
def handleBuildError(self, error, endpoint, **kwargs):
if self.buildErrorHandlers is None:
excType, excValue, tb = sys.exc_info()
if excValue is error:
raise excType, excValue, tb
else:
raise error
return self.buildErrorHandlers(error, endpoint, **kwargs)
def wsgiApp(self, environ, startResponse):
u"""WSGI アプリとして実行します。(であってるのか)
:param environ: 環境変数
:param startResponse: hoge
"""
with self.requestContext(environ):
try:
response = self.dispatchRequest()
except Exception, e:
response = self.makeResponse(self.handleException(e))
return response(environ, startResponse)
def drink(self, host=None, port=None, debug=None, **options):
u"""アプリを実行します。
:param host: ホスト名
:param port: ポート番号
:param debug: デバッグモードとして起動するかどうか
:param options: kwargs
"""
host = host or defaultHost
port = port or defaultPort
from werkzeug.serving import run_simple
if debug is not None:
self.debug = bool(debug)
options.setdefault('use_reloader', self.debug)
options.setdefault('use_debugger', self.debug)
try:
from werkzeug._internal import _log
_log('info', ' * Shimehari GKGK!')
run_simple(host, port, self, **options)
finally:
self._gotFirstRequest = False
def run(self, host=None, port=None, debug=None, **options):
u"""アプリを実行します。
単純に drink メソッドをラップしているだけです。
WSGI 周りのライブラリや既存のコードで run を自動的に呼ぶ物がおおいので念のため。
"""
self.drink(host, port, debug, **options)
def testClient(self, useCookies=True):
cls = self.testClientCls
if cls is None:
from shimehari.testing import ShimehariClient as cls
return cls(self, self.responseClass, use_cookies=useCookies)
def __call__(self, environ, startResponse):
return self.wsgiApp(environ, startResponse)
def __str__(self):
return 'Shimehari WSGI Application Framework!'
| bsd-3-clause | 7,813,359,774,298,345,000 | 31.313953 | 137 | 0.610651 | false | 3.293142 | true | false | false |
Aurous/Magic-Discord-Bot | bot.py | 1 | 3281 | from mtgsdk import Card
import json, discord
prop = {'name','multiverse_id','layout','names','mana_cost','cmc','colors','type','supertypes','subtypes','rarity','text','flavor','artist','number','power','toughness','loyalty','variations','watermark','border','timeshifted','hand','life','reserved','release_date','starter','rulings','foreign_names','printings','original_text','original_type','legalities','source','image_url','set','set_name','id'}
run = 'card_adv = Card'
def adv(str_input):
return '='.join(str_input.split(' ')).lower()
def reduce(str_input):
return '-'.join(str_input.split(' ')).lower()
def http_image(uid):
return 'https://image.deckbrew.com/mtg/multiverseid/'+ str(uid) +'.jpg'
def http_address(set,name):
return 'http://store.tcgplayer.com/magic/'+reduce(set)+'/'+reduce(name)
def http_parse(str_input):
return '%20'.join(str_input.split(' '))
client = discord.Client()
@client.event
async def on_message(message):
# we do not want the bot to reply to itself
if message.author == client.user:
return
if message.content.startswith('!magic'):
msg_com = message.content.split('-')
msg_com.pop(0)
for msg in msg_com:
if '-help' in msg.lower():
print('help')
await client.send_message(message.channel,'Magic Card Bot \n --help : This message displaying \n -s_reg : Followed by a string will search that string \n -m_uid : Searchs cards by multivesrse id \n -s_adv : Not currently finished')
elif 'm_uid' in msg.lower():
print(msg[6:])
card_m = Card.find(msg[6:])
print(http_address(card_m.set_name,card_m.name))
await client.send_message(message.channel,http_address(card_m.set_name,card_m.name))
print(http_image(card_m.multiverse_id))
await client.send_message(message.channel,http_image(card_m.multiverse_id))
elif 's_reg' in msg.lower():
print(http_parse(msg[6:]))
card_s = Card.where(name=msg[6:]).all()
for s_card in card_s:
print(http_address(s_card.set_name,s_card.name))
await client.send_message(message.channel,http_address(s_card.set_name,s_card.name))
print(http_image(s_card.multiverse_id))
await client.send_message(message.channel,http_image(s_card.multiverse_id))
elif 's_adv' in msg.lower():
await client.send_message(message.channel,'This command is disabled')
else:
print('RIP something went wrong')
await client.send_message(message.channel, 'RIP something went wrong')
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
client.run('Bot Token')
| gpl-3.0 | 8,098,310,293,257,380,000 | 59.759259 | 403 | 0.53947 | false | 3.919952 | false | false | false |
AdamRTomkins/libSpineML2NK | libSpineML2NK/nk_executable.py | 1 | 9925 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""nk_exectuable will store a single executable experiment, designed in SpineML
SpineML is a declaratice Spiking neuron modelling language.
"""
#import os # STD lib imports first
#import sys # alphabetical
#import some_third_party_lib # 3rd party stuff next
#import some_third_party_other_lib # alphabetical
import argparse
import numpy as np
import pdb
import networkx as nx
import h5py
import libSpineML
from libSpineML import smlBundle
from libSpineML import smlExperiment
from neurokernel.core_gpu import Manager
from libSpineML2NK.LPU import LPU
import nk_utils
import nk_manager
#import local_stuff # local stuff last
#import more_local_stuff
#import dont_import_two, modules_in_one_line # IMPORTANT!
#from pyflakes_cannot_handle import * # and there are other reasons it should be avoided # noqa
# Using # noqa in the line above avoids flake8 warnings about line length!
class Executable(object):
"""Executable Neurokernel Object
Can take a libSpineML bundle, or a SpineML Experiment file
"""
def __init__(self, experiment=None):
self.params = {}
if type(experiment) is str:
self.bundle = smlBundle.Bundle()
self.bundle.add_experiment(experiment,True)
exp = self.bundle.experiments[0].Experiment[0]
self.params['name'] = exp.get_name()
elif type(experiment) is smlBundle.Bundle:
self.bundle = experiment
exp = self.bundle.experiments[0].Experiment[0]
self.params['name'] = exp.get_name()
else:
self.bundle = smlBundle.Bundle()
self.params['name'] = 'No_name'
self.network = nx.MultiDiGraph()
#self.network = nx.DiGraph()
self.inputs = np.zeros((0, 0), dtype=np.double)
self.time = np.zeros((0, 0), dtype=np.double)
self.debug = False
self.log = False
def execute(self):
"""Execute the model, after processing the class
This method will create the Input file and Network file dynamically
As a minimum, this fucntion will need to create a params dictionary as required by nk_manager
with the following keys:
params['name'],
params['dt'],
params['n_dict'],
params['s_dict'],
input_file=params['input_file'],
output_file=params['output_file'],
components=params['components'])
params['steps']
"""
self.params['input_file'] = self.params['name'] + '_input.h5'
self.params['output_file'] = self.params['name'] + '_output.h5'
self.process_experiment()
self.process_network()
self.process_component()
## output the input
self.save_input()
self.save_network()
#nk_manager.launch_nk(self.params)
from nk_manager import launch_nk
launch_nk(self.params,self.debug,self.log)
def set_debug(self, debug=True):
self.debug = debug
def set_log(self,log=True):
self.log = log
def process_experiment(self,bundleIndex=0,expIndex=0):
"""Process to the experiment file to extract NK relevant objects
Each bundle can store many experiments, bundleIndex dictates the
SpineML experient to use. Similary each SpineML Experiment can
contain several experiment types, and so provisions are made for
acommodating multiple experiments.
"""
# Extract input and output files
exp = self.bundle.experiments[bundleIndex].Experiment[expIndex]
self.params['name'] = exp.get_name()
# save everything in standard units before saving
self.params['dt'] = nk_utils.units(float(exp.Simulation.AbstractIntegrationMethod.dt),'mS')
self.params['steps'] = nk_utils.units(float(exp.Simulation.duration),'S') / self.params['dt']
self.params['num_neurons'] = 0;
for n in self.bundle.networks[0].Population:
self.params['num_neurons']+= n.Neuron.size
######################################################################
# Correct dt and time to be in standard
#####################################################################
self.inputs = np.zeros((self.params['steps'], self.params['num_neurons']), dtype=np.double)
self.time = time = np.arange(0,self.params['dt']*self.params['steps'] , self.params['dt'])
# Provess Lesions
# Process Configutations
def process_network(self):
"""Process to the experiment file to extract NK relevant objects
"""
# extract input file
# extract network file
# create n_dict
# create s_dict
exp_name = self.bundle.index.keys()[0]
model_name = self.bundle.index[exp_name]['network'].keys()[0]
populations = self.bundle.index[exp_name]['network'][model_name].Population
lpu_index = 0
for p in populations:
lpu_start = lpu_index; # Start position for each neuron
for n in np.arange(0,p.Neuron.size):
self.add_neuron(p.Neuron.url,p.Neuron.Property,lpu_index,n,p.Neuron.name,exp_name)
lpu_index +=1
for i in self.bundle.index[exp_name]['experiment'][exp_name].Experiment[0].AbstractInput:
if p.Neuron.name == i.target:
self.initialise_input(i,lpu_start,p.Neuron.size)
self.params['graph'] = self.network
(n_dict, s_dict) = LPU.graph_to_dicts(self.params['graph'])
self.params['n_dict'] = n_dict
self.params['s_dict'] = s_dict
def initialise_input(self,params,lpu_start,lpu_size):
# initialise an input in the matrix for a given input to a population
itype = type(params)
if (itype == smlExperiment.TimeVaryingArrayInputType):
self.inputs = nk_utils.TimeVaryingArrayInput(params,lpu_start,lpu_size,self.time,self.inputs)
elif (itype == smlExperiment.ConstantInputType):
self.inputs = nk_utils.ConstantInput(params,lpu_start,lpu_size,self.time,self.inputs)
elif (itype == smlExperiment.ConstantArrayInputType):
self.inputs = nk_utils.ConstantArrayInput(params,lpu_start,lpu_size,self.time,self.inputs)
elif (itype == smlExperiment.TimeVaryingInputType):
self.inputs = nk_utils.TimeVaryingInput(params,lpu_start,lpu_size,self.time,self.inputs)
else:
raise TypeError('type %s is not recognised as an input type' %str(itype))
def standard_neurons(self,model):
""" provide the base neuron parameters from neurokernel, which are not in SpineML """
""" DEPRECIATED TO ALLOW C_GENERATION
URL is used to load the correct kernel
WIP: Automatic discovery of extern, spiking and public based on component and connections
external is true, to work with input generation, this will not scale well
"""
return {'model': 'SpineMLNeuron','name': 'neuron_x','extern': True,'public': False,'spiking': True,'selector': '/a[0]','V':0,"url":model}
def add_neuron(self,model,props,lpu_index,p_index,pop_name,exp_name):
""" add a neuron to the gexf population,
where p_index is the neuron index within a population
"""
neuron = self.standard_neurons(model)
for p in props:
""" p example: 'C': {'dimension': 'nS','input':{'type':'FixedValue','value':1}} """
neuron[p.name] = nk_utils.gen_value(p,p_index)
neuron['name'] = 'neuron_' +str(lpu_index) # + '_' + str(p_index)
neuron['selector'] = '/'+pop_name+'[' +str(lpu_index) +']' #+ '[' + str(p_index)+']'
# Determine if the neuron will be spiking or gpot
# requires that only one output port exists
comp = self.bundle.index[exp_name]['component'][model]
for port in comp.ComponentClass.Port:
if type(port) is libSpineML.smlComponent.AnalogSendPortType:
neuron['spiking'] = False
break
if type(port) is libSpineML.smlComponent.ImpulseSendPortType:
neuron['spiking'] = True
break
##################################
#
# Swap V out with default parameter from output port
#
##################################
self.network.add_node(str(lpu_index),attr_dict=neuron)
def process_component(self):
"""Process to the experiment file to extract NK relevant objects
"""
exp_name = self.bundle.index.keys()[0]
self.params['components'] = self.bundle.index[exp_name]['component']
###############################################
# Added for output testing
##############################################
def save_input(self):
""" save the input file before running """
with h5py.File(self.params['input_file'], 'w') as f:
f.create_dataset('array', (self.params['steps'], self.params['num_neurons']),
dtype=np.double,
data=self.inputs)
def save_network(self):
""" save the network file before running """
nx.write_gexf(self.network, self.params['input_file'] +'.gexf.gz')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--experiment', default='none', type=str,
help='Path to a SpineML experiment file')
args = parser.parse_args()
if args.experiment is not 'none':
exe = Executable(args.experiment)
exe.execute()
else:
print "No Experiment Provided"
if __name__=='__main__':
main()
| gpl-3.0 | 8,144,121,179,548,089,000 | 34.446429 | 145 | 0.590227 | false | 3.879984 | false | false | false |
animeshramesh/incremental-learning | tensorflow/utils.py | 1 | 3548 | import tensorflow as tf
import os, re
def get_checkpoints(checkpoint_dir):
'''
Finds all checkpoints in a directory and returns them in order
from least iterations to most iterations
'''
meta_list=[]
for file in os.listdir(checkpoint_dir):
if file.endswith('.meta'):
meta_list.append(os.path.join(checkpoint_dir, file[:-5]))
meta_list = sort_nicely(meta_list)
return meta_list
def sort_nicely(l):
"""
Sort the given list in the way that humans expect.
From Ned Batchelder
https://nedbatchelder.com/blog/200712/human_sorting.html
"""
def alphanum_key(s):
""" Turn a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"]
"""
def tryint(s):
try:
return int(s)
except:
return s
return [ tryint(c) for c in re.split('([0-9]+)', s) ]
l.sort(key=alphanum_key)
return l
def save(saver, sess, logdir, step):
'''Save weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
logdir: path to the snapshots directory.
step: current training step.
'''
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print('The checkpoint has been created.')
def load(saver, sess, ckpt_path):
'''Load trained weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
ckpt_path: path to checkpoint file with parameters.
'''
saver.restore(sess, ckpt_path)
print("Restored model parameters from {}".format(ckpt_path))
def optimistic_restore(session, save_file, variable_scope=''):
'''
A Caffe-style restore that loads in variables
if they exist in both the checkpoint file and the current graph.
Call this after running the global init op.
By DanielGordon10 on December 27, 2016
https://github.com/tensorflow/tensorflow/issues/312
With RalphMao tweak.
bpugh, July 21, 2017: Added a variable_scope so that a network can be
loaded within a tf.variable_scope() and still have weights restored.
'''
reader = tf.train.NewCheckpointReader(save_file)
saved_shapes = reader.get_variable_to_shape_map()
if variable_scope is '':
saved_shapes_scoped = saved_shapes
offset = 0
else:
saved_shapes_scoped = [variable_scope + '/' + x for x in saved_shapes]
offset = len(variable_scope) + 1
var_names = []
for var in tf.global_variables():
search_term = var.name.split(':')[0]
if search_term in saved_shapes_scoped:
var_names.append((var.name.split(':')[0], var.name.split(':')[0][offset:]))
name2var = dict(zip(map(lambda x:x.name.split(':')[0],
tf.global_variables()), tf.global_variables()))
restore_variables = []
with tf.variable_scope('', reuse=True):
for var_name, saved_var_name in var_names:
try:
curr_var = name2var[var_name]
var_shape = curr_var.get_shape().as_list()
if var_shape == saved_shapes[saved_var_name]:
found_variable = tf.get_variable(var_name)
restore_variables.append(found_variable.assign(reader.get_tensor(saved_var_name)))
except:
print("{} couldn't be loaded.".format(saved_var_name))
session.run(restore_variables)
| apache-2.0 | -7,966,682,305,243,552,000 | 33.446602 | 102 | 0.618377 | false | 3.770457 | false | false | false |
akaija/HTSOHM-dev | htsohm/db/material.py | 2 | 1466 | from sqlalchemy import Column, Integer, String, Float
from sqlalchemy.orm import relationship
from htsohm.db import Base, GasLoading, SurfaceArea, VoidFraction
class Material(Base):
"""Declarative class mapping to table storing material/simulation data.
Attributes:
id (int): database table primary_key.
run_id (str): identification string for run.
"""
__tablename__ = 'materials'
id = Column(Integer, primary_key=True)
run_id = Column(String(50))
seed = Column(Float)
# structure properties
unit_cell_volume = Column(Float)
number_density = Column(Float)
average_epsilon = Column(Float)
average_sigma = Column(Float)
# structure property bins
density_bin = Column(Integer)
epsilon_bin = Column(Integer)
sigma_bin = Column(Integer)
# relationships
gas_loading = relationship("GasLoading")
surface_area = relationship("SurfaceArea")
void_fraction = relationship("VoidFraction")
def __init__(self, run_id=None, seed=None, ):
"""Init material-row.
Args:
self (class): row in material table.
run_id : identification string for run (default = None).
Initializes row in materials datatable.
"""
self.seed = seed
self.run_id = run_id
def clone(self):
copy = super(Material, self).clone()
return copy
| mit | -5,930,481,133,151,788,000 | 28.32 | 75 | 0.618008 | false | 4.212644 | false | false | false |
magenta/magenta | magenta/models/onsets_frames_transcription/audio_label_data_utils.py | 1 | 14908 | # Copyright 2021 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Utilities for managing wav files and labels for transcription."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import bisect
import math
import librosa
from note_seq import audio_io
from note_seq import constants
from note_seq import sequences_lib
from note_seq.protobuf import music_pb2
import numpy as np
import tensorflow.compat.v1 as tf
def velocity_range_from_sequence(ns):
"""Derive a VelocityRange proto from a NoteSequence."""
velocities = [note.velocity for note in ns.notes]
velocity_max = np.max(velocities) if velocities else 0
velocity_min = np.min(velocities) if velocities else 0
velocity_range = music_pb2.VelocityRange(min=velocity_min, max=velocity_max)
return velocity_range
def find_inactive_ranges(note_sequence):
"""Returns ranges where no notes are active in the note_sequence."""
start_sequence = sorted(
note_sequence.notes, key=lambda note: note.start_time, reverse=True)
end_sequence = sorted(
note_sequence.notes, key=lambda note: note.end_time, reverse=True)
notes_active = 0
time = start_sequence[-1].start_time
inactive_ranges = []
if time > 0:
inactive_ranges.append(0.)
inactive_ranges.append(time)
start_sequence.pop()
notes_active += 1
# Iterate through all note on events
while start_sequence or end_sequence:
if start_sequence and (start_sequence[-1].start_time <
end_sequence[-1].end_time):
if notes_active == 0:
time = start_sequence[-1].start_time
inactive_ranges.append(time)
notes_active += 1
start_sequence.pop()
else:
notes_active -= 1
if notes_active == 0:
time = end_sequence[-1].end_time
inactive_ranges.append(time)
end_sequence.pop()
# if the last note is the same time as the end, don't add it
# remove the start instead of creating a sequence with 0 length
if inactive_ranges[-1] < note_sequence.total_time:
inactive_ranges.append(note_sequence.total_time)
else:
inactive_ranges.pop()
assert len(inactive_ranges) % 2 == 0
inactive_ranges = [(inactive_ranges[2 * i], inactive_ranges[2 * i + 1])
for i in range(len(inactive_ranges) // 2)]
return inactive_ranges
def _last_zero_crossing(samples, start, end):
"""Returns the last zero crossing in the window [start, end)."""
samples_greater_than_zero = samples[start:end] > 0
samples_less_than_zero = samples[start:end] < 0
samples_greater_than_equal_zero = samples[start:end] >= 0
samples_less_than_equal_zero = samples[start:end] <= 0
# use np instead of python for loop for speed
xings = np.logical_or(
np.logical_and(samples_greater_than_zero[:-1],
samples_less_than_equal_zero[1:]),
np.logical_and(samples_less_than_zero[:-1],
samples_greater_than_equal_zero[1:])).nonzero()[0]
return xings[-1] + start if xings.size > 0 else None
def find_split_points(note_sequence, samples, sample_rate, min_length,
max_length):
"""Returns times at which there are no notes.
The general strategy employed is to first check if there are places in the
sustained pianoroll where no notes are active within the max_length window;
if so the middle of the last gap is chosen as the split point.
If not, then it checks if there are places in the pianoroll without sustain
where no notes are active and then finds last zero crossing of the wav file
and chooses that as the split point.
If neither of those is true, then it chooses the last zero crossing within
the max_length window as the split point.
If there are no zero crossings in the entire window, then it basically gives
up and advances time forward by max_length.
Args:
note_sequence: The NoteSequence to split.
samples: The audio file as samples.
sample_rate: The sample rate (samples/second) of the audio file.
min_length: Minimum number of seconds in a split.
max_length: Maximum number of seconds in a split.
Returns:
A list of split points in seconds from the beginning of the file.
"""
if not note_sequence.notes:
return []
end_time = note_sequence.total_time
note_sequence_sustain = sequences_lib.apply_sustain_control_changes(
note_sequence)
ranges_nosustain = find_inactive_ranges(note_sequence)
ranges_sustain = find_inactive_ranges(note_sequence_sustain)
nosustain_starts = [x[0] for x in ranges_nosustain]
sustain_starts = [x[0] for x in ranges_sustain]
nosustain_ends = [x[1] for x in ranges_nosustain]
sustain_ends = [x[1] for x in ranges_sustain]
split_points = [0.]
while end_time - split_points[-1] > max_length:
max_advance = split_points[-1] + max_length
# check for interval in sustained sequence
pos = bisect.bisect_right(sustain_ends, max_advance)
if pos < len(sustain_starts) and max_advance > sustain_starts[pos]:
split_points.append(max_advance)
# if no interval, or we didn't fit, try the unmodified sequence
elif pos == 0 or sustain_starts[pos - 1] <= split_points[-1] + min_length:
# no splits available, use non sustain notes and find close zero crossing
pos = bisect.bisect_right(nosustain_ends, max_advance)
if pos < len(nosustain_starts) and max_advance > nosustain_starts[pos]:
# we fit, great, try to split at a zero crossing
zxc_start = nosustain_starts[pos]
zxc_end = max_advance
last_zero_xing = _last_zero_crossing(
samples, int(math.floor(zxc_start * sample_rate)),
int(math.ceil(zxc_end * sample_rate)))
if last_zero_xing:
last_zero_xing = float(last_zero_xing) / sample_rate
split_points.append(last_zero_xing)
else:
# give up and just return where there are at least no notes
split_points.append(max_advance)
else:
# there are no good places to cut, so just pick the last zero crossing
# check the entire valid range for zero crossings
start_sample = int(
math.ceil((split_points[-1] + min_length) * sample_rate)) + 1
end_sample = start_sample + (max_length - min_length) * sample_rate
last_zero_xing = _last_zero_crossing(samples, start_sample, end_sample)
if last_zero_xing:
last_zero_xing = float(last_zero_xing) / sample_rate
split_points.append(last_zero_xing)
else:
# give up and advance by max amount
split_points.append(max_advance)
else:
# only advance as far as max_length
new_time = min(np.mean(ranges_sustain[pos - 1]), max_advance)
split_points.append(new_time)
if split_points[-1] != end_time:
split_points.append(end_time)
# ensure that we've generated a valid sequence of splits
for prev, curr in zip(split_points[:-1], split_points[1:]):
assert curr > prev
assert curr - prev <= max_length + 1e-8
if curr < end_time:
assert curr - prev >= min_length - 1e-8
assert end_time - split_points[-1] < max_length
return split_points
def create_example(example_id, ns, wav_data, velocity_range=None):
"""Creates a tf.train.Example proto for training or testing."""
if velocity_range is None:
velocity_range = velocity_range_from_sequence(ns)
# Ensure that all sequences for training and evaluation have gone through
# sustain processing.
sus_ns = sequences_lib.apply_sustain_control_changes(ns)
example = tf.train.Example(
features=tf.train.Features(
feature={
'id':
tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[example_id.encode('utf-8')])),
'sequence':
tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[sus_ns.SerializeToString()])),
'audio':
tf.train.Feature(
bytes_list=tf.train.BytesList(value=[wav_data])),
'velocity_range':
tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[velocity_range.SerializeToString()])),
}))
return example
def process_record(wav_data,
ns,
example_id,
min_length=5,
max_length=20,
sample_rate=16000,
allow_empty_notesequence=False,
load_audio_with_librosa=False):
"""Split a record into chunks and create an example proto.
To use the full length audio and notesequence, set min_length=0 and
max_length=-1.
Args:
wav_data: audio data in WAV format.
ns: corresponding NoteSequence.
example_id: id for the example proto
min_length: minimum length in seconds for audio chunks.
max_length: maximum length in seconds for audio chunks.
sample_rate: desired audio sample rate.
allow_empty_notesequence: whether an empty NoteSequence is allowed.
load_audio_with_librosa: Use librosa for sampling. Works with 24-bit wavs.
Yields:
Example protos.
"""
try:
if load_audio_with_librosa:
samples = audio_io.wav_data_to_samples_librosa(wav_data, sample_rate)
else:
samples = audio_io.wav_data_to_samples(wav_data, sample_rate)
except audio_io.AudioIOReadError as e:
print('Exception %s', e)
return
samples = librosa.util.normalize(samples, norm=np.inf)
# Add padding to samples if notesequence is longer.
pad_to_samples = int(math.ceil(ns.total_time * sample_rate))
padding_needed = pad_to_samples - samples.shape[0]
if padding_needed > 5 * sample_rate:
raise ValueError(
'Would have padded {} more than 5 seconds to match note sequence total '
'time. ({} original samples, {} sample rate, {} sample seconds, '
'{} sequence seconds) This likely indicates a problem with the source '
'data.'.format(
example_id, samples.shape[0], sample_rate,
samples.shape[0] / sample_rate, ns.total_time))
samples = np.pad(samples, (0, max(0, padding_needed)), 'constant')
if max_length == min_length:
splits = np.arange(0, ns.total_time, max_length)
elif max_length > 0:
splits = find_split_points(ns, samples, sample_rate, min_length, max_length)
else:
splits = [0, ns.total_time]
velocity_range = velocity_range_from_sequence(ns)
for start, end in zip(splits[:-1], splits[1:]):
if end - start < min_length:
continue
if start == 0 and end == ns.total_time:
new_ns = ns
else:
new_ns = sequences_lib.extract_subsequence(ns, start, end)
if not new_ns.notes and not allow_empty_notesequence:
tf.logging.warning('skipping empty sequence')
continue
if start == 0 and end == ns.total_time:
new_samples = samples
else:
# the resampling that happen in crop_wav_data is really slow
# and we've already done it once, avoid doing it twice
new_samples = audio_io.crop_samples(samples, sample_rate, start,
end - start)
new_wav_data = audio_io.samples_to_wav_data(new_samples, sample_rate)
yield create_example(
example_id, new_ns, new_wav_data, velocity_range=velocity_range)
def mix_sequences(individual_samples, sample_rate, individual_sequences):
"""Mix multiple audio/notesequence pairs together.
All sequences will be repeated until they are as long as the longest sequence.
Note that the mixed sequence will contain only the (sustain-processed) notes
from the individual sequences. All other control changes and metadata will not
be preserved.
Args:
individual_samples: A list of audio samples to mix.
sample_rate: Rate at which to interpret the samples
individual_sequences: A list of NoteSequences to mix.
Returns:
mixed_samples: The mixed audio.
mixed_sequence: The mixed NoteSequence.
"""
# Normalize samples and sequence velocities before mixing.
# This ensures that the velocities/loudness of the individual samples
# are treated equally.
for i, samples in enumerate(individual_samples):
individual_samples[i] = librosa.util.normalize(samples, norm=np.inf)
for sequence in individual_sequences:
velocities = [note.velocity for note in sequence.notes]
velocity_max = np.max(velocities)
for note in sequence.notes:
note.velocity = int(
(note.velocity / velocity_max) * constants.MAX_MIDI_VELOCITY)
# Ensure that samples are always at least as long as their paired sequences.
for i, (samples, sequence) in enumerate(
zip(individual_samples, individual_sequences)):
if len(samples) / sample_rate < sequence.total_time:
padding = int(math.ceil(
(sequence.total_time - len(samples) / sample_rate) * sample_rate))
individual_samples[i] = np.pad(samples, [0, padding], 'constant')
# Repeat each ns/wav pair to be as long as the longest wav.
max_duration = np.max([len(s) for s in individual_samples]) / sample_rate
extended_samples = []
extended_sequences = []
for samples, sequence in zip(individual_samples, individual_sequences):
extended_samples.append(
audio_io.repeat_samples_to_duration(samples, sample_rate, max_duration))
extended_sequences.append(
sequences_lib.repeat_sequence_to_duration(
sequence, max_duration,
sequence_duration=len(samples) / sample_rate))
# Mix samples and sequences together
mixed_samples = np.zeros_like(extended_samples[0])
for samples in extended_samples:
mixed_samples += samples / len(extended_samples)
mixed_sequence = music_pb2.NoteSequence()
mixed_sequence.ticks_per_quarter = constants.STANDARD_PPQ
del mixed_sequence.notes[:]
for sequence in extended_sequences:
# Process sustain changes before copying notes.
sus_sequence = sequences_lib.apply_sustain_control_changes(sequence)
if sus_sequence.total_time > mixed_sequence.total_time:
mixed_sequence.total_time = sus_sequence.total_time
# TODO(fjord): Manage instrument/program numbers.
mixed_sequence.notes.extend(sus_sequence.notes)
return mixed_samples, mixed_sequence
| apache-2.0 | -474,272,086,295,592,960 | 36.741772 | 80 | 0.669775 | false | 3.716779 | false | false | false |
82Flex/DCRM | WEIPDCRM/manage/admin/build.py | 1 | 15565 | # coding:utf-8
"""
DCRM - Darwin Cydia Repository Manager
Copyright (C) 2017 WU Zheng <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import unicode_literals
import os
import bz2
import gzip
import shutil
import hashlib
import subprocess
from PIL import Image
from django.conf import settings
from django.contrib.sites.models import Site
from django.forms import ModelForm
from django.contrib import admin
from django.urls import reverse
from django.utils.safestring import mark_safe
from django_rq import queues
from preferences import preferences
from django.contrib.admin.actions import delete_selected
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
from suit.widgets import AutosizedTextarea
from WEIPDCRM.models.build import Build
from WEIPDCRM.models.package import Package
from WEIPDCRM.models.version import Version
from WEIPDCRM.models.release import Release
from WEIPDCRM.models.debian_package import DebianPackage
from WEIPDCRM.tools import mkdir_p
if settings.ENABLE_REDIS is True:
import django_rq
def build_procedure(conf):
"""
This is the main package list building procedure.
"""
if not conf["build_p_diff"]:
# Build Package file
build_all_versions_enabled = conf["build_all"]
# Get Package List QuerySet
if build_all_versions_enabled:
version_set = Version.objects.filter(enabled=True).order_by('-id')
version_count = version_set.count()
else:
version_set = Version.objects.raw(
"SELECT * FROM `WEIPDCRM_version` "
"WHERE `enabled` = TRUE "
"GROUP BY `c_package` "
"ORDER BY `c_package`, `id` DESC"
)
version_count = 0
for version in version_set:
version_count += 1
# Check Empty
if version_count == 0:
raise ValueError(_("No enabled package available."))
# Preparing Temp Directory
build_temp_path = os.path.join(settings.TEMP_ROOT, str(conf["build_uuid"]))
if not os.path.exists(build_temp_path):
mkdir_p(build_temp_path)
# Create Temp Package file
build_temp_package = open(os.path.join(build_temp_path, "Packages"), "wb+")
# Generate Control List
depiction_url = ""
if preferences.Setting.advanced_mode:
site = Site.objects.get(id=settings.SITE_ID)
scheme = "http"
if settings.SECURE_SSL is True:
scheme = "https"
depiction_url = "%s://%s" % (scheme, site.domain)
for version_instance in version_set:
# !!! HERE WE SHOULD USE ADVANCED CONTROL DICT !!!
control_dict = version_instance.get_advanced_control_dict()
if (not version_instance.custom_depiction) and len(depiction_url) != 0:
control_dict["Depiction"] = depiction_url + version_instance.get_absolute_url()
if version_instance.online_icon is not None and len(str(version_instance.online_icon)) > 0:
control_dict["Icon"] = depiction_url + os.path.join(str(preferences.Setting.resources_alias), version_instance.online_icon.name)
DebianPackage.get_control_content(control_dict, build_temp_package)
build_temp_package.write("\n".encode("utf-8"))
# Compression Gzip
build_temp_package.seek(0)
if conf["build_compression"] == 1 \
or conf["build_compression"] == 2 \
or conf["build_compression"] == 5 \
or conf["build_compression"] == 6:
build_temp_package_gz = gzip.open(os.path.join(build_temp_path, "Packages.gz"), mode="wb")
while True:
cache = build_temp_package.read(16 * 1024) # 16k cache
if not cache:
break
build_temp_package_gz.write(cache)
build_temp_package_gz.close()
# Compression Bzip
build_temp_package.seek(0)
if conf["build_compression"] == 3 \
or conf["build_compression"] == 4 \
or conf["build_compression"] == 5 \
or conf["build_compression"] == 6:
build_temp_package_bz2 = bz2.BZ2File(os.path.join(build_temp_path, "Packages.bz2"), mode="wb")
while True:
cache = build_temp_package.read(16 * 1024) # 16k cache
if not cache:
break
build_temp_package_bz2.write(cache)
build_temp_package_bz2.close()
# Close original Package file
build_temp_package.close()
# Release
active_release = Release.objects.get(id=conf["build_release"])
active_release_control_dict = active_release.get_control_field()
build_temp_release = open(os.path.join(build_temp_path, "Release"), mode="wb")
DebianPackage.get_control_content(active_release_control_dict, build_temp_release)
# Checksum
if conf["build_secure"] is True:
def hash_file(hash_obj, file_path):
with open(file_path, "rb") as f:
for block in iter(lambda: f.read(65535), b""):
hash_obj.update(block)
checksum_list = [
"Packages",
"Packages.gz",
"Packages.bz2"
]
build_validation_titles = [
"MD5Sum", "SHA1", "SHA256", "SHA512"
]
build_validation_methods = [
hashlib.md5, hashlib.sha1, hashlib.sha256, hashlib.sha512
]
# Using a loop to iter different validation methods
for build_validation_index in range(0, 3):
if conf["build_validation"] > build_validation_index:
build_temp_release.write((build_validation_titles[build_validation_index] + ":\n").encode("utf-8"))
for checksum_instance in checksum_list:
checksum_path = os.path.join(build_temp_path, checksum_instance)
if os.path.exists(checksum_path):
m2 = build_validation_methods[build_validation_index]()
hash_file(m2, checksum_path)
p_hash = m2.hexdigest()
p_size = os.path.getsize(checksum_path)
build_temp_release.write(
(" " + p_hash +
" " + str(p_size) +
" " + checksum_instance +
"\n").encode("utf-8")
)
build_temp_release.close()
if conf["build_secure"] is True:
# GPG Signature
"""
Use 'gpg --gen-key' to generate GnuPG key before using this function.
"""
password = preferences.Setting.gpg_password
if password is not None and len(password) > 0:
subprocess.check_call(
["gpg", "-abs", "--homedir", os.path.join(settings.BASE_DIR, '.gnupg'), "--batch", "--yes", "--passphrase", password, "-o",
os.path.join(build_temp_path, "Release.gpg"),
os.path.join(build_temp_path, "Release"),
]
)
else:
subprocess.check_call(
["gpg", "-abs", "--homedir", os.path.join(settings.BASE_DIR, '.gnupg'), "--batch", "--yes", "-o",
os.path.join(build_temp_path, "Release.gpg"),
os.path.join(build_temp_path, "Release"),
]
)
# Preparing Directory
release_root = os.path.join(
settings.MEDIA_ROOT,
"releases",
str(active_release.id),
)
build_path = os.path.join(
release_root,
"builds",
str(conf["build_uuid"])
)
if not os.path.isdir(build_path):
mkdir_p(build_path)
# Publish
rename_list = [
"Release",
"Release.gpg",
"Packages",
"Packages.gz",
"Packages.bz2"
]
for rename_instance in rename_list:
rename_path = os.path.join(build_temp_path, rename_instance)
rename_to_path = os.path.join(build_path, rename_instance)
active_path = os.path.join(release_root, rename_instance)
if os.path.exists(rename_path):
if os.path.exists(active_path):
os.unlink(active_path)
shutil.copyfile(rename_path, active_path)
os.chmod(active_path, 0o755)
# os.rename(rename_path, rename_to_path)
shutil.move(rename_path, rename_to_path)
os.chmod(rename_to_path, 0o755)
else:
if os.path.exists(rename_to_path):
os.unlink(rename_to_path)
if os.path.exists(active_path):
os.unlink(active_path)
def thumb_png(png_path):
img = Image.open(png_path)
img.thumbnail((60, 60), Image.ANTIALIAS)
img.save(png_path)
# Cydia Icon
cydia_icon_path = os.path.join(release_root, "CydiaIcon.png")
if os.path.exists(cydia_icon_path):
os.unlink(cydia_icon_path)
if active_release.icon is not None and len(str(active_release.icon)) > 0:
src_path = os.path.join(settings.MEDIA_ROOT, active_release.icon.name)
if os.path.exists(src_path):
shutil.copyfile(
src_path,
cydia_icon_path
)
else:
src_path = os.path.join(settings.STATIC_ROOT, "img/CydiaIcon.png")
if os.path.exists(src_path):
shutil.copyfile(
src_path,
cydia_icon_path
)
if os.path.exists(cydia_icon_path):
thumb_png(cydia_icon_path)
os.chmod(cydia_icon_path, 0o755)
build_instance = Build.objects.get(uuid=str(conf["build_uuid"]))
if build_instance is not None:
build_instance.is_finished = True
build_instance.save()
else:
# TODO: Pdiffs Feature
pass
class BuildForm(ModelForm):
class Meta(object):
widgets = {
'details': AutosizedTextarea,
}
class BuildAdmin(admin.ModelAdmin):
form = BuildForm
actions = [delete_selected]
list_display = ('uuid', 'active_release', 'is_finished', 'created_at')
search_fields = ['uuid']
fieldsets = [
(_('General'), {
'fields': ['active_release', 'job_link', 'details']
}),
(_('History'), {
'fields': ['created_at']
}),
]
change_form_template = "admin/build/change_form.html"
change_list_template = "admin/build/change_list.html"
def job_link(self, obj):
if obj.job_id is None:
if obj.is_finished:
return mark_safe("<img src=\"/static/admin/img/icon-yes.svg\" alt=\"True\" /> %s" % _('Finished'))
else:
return mark_safe("<img src=\"/static/admin/img/icon-unknown.svg\" alt=\"Unknown\" /> %s" % _('Unknown'))
m_job = queues.get_queue('high').fetch_job(obj.job_id)
if m_job is None:
return _('No such job')
if m_job.is_failed:
status_str = mark_safe("<img src=\"/static/admin/img/icon-no.svg\" alt=\"False\" /> %s" % _('Failed'))
elif m_job.is_finished:
if obj.is_finished:
status_str = mark_safe("<img src=\"/static/admin/img/icon-yes.svg\" alt=\"True\" /> %s" % _('Finished'))
else:
status_str = mark_safe(
"<img src=\"/static/admin/img/icon-unknown.svg\" alt=\"Unknown\" /> %s" % _('Unknown'))
else:
status_str = mark_safe("<img src=\"/static/img/icon-loading.svg\" width=\"13\" alt=\"Loading\" "
"onload=\"setTimeout(function () { window.location.reload(); }, 2000);\" /> "
"%s" % _("Processing..."))
return mark_safe('<a href="%s" target="_blank">%s</a>' % (
reverse('rq_job_detail', kwargs={
'queue_index': 1,
'job_id': m_job.id
}),
status_str
))
job_link.short_description = _("Job")
job_link.allow_tags = True
def has_add_permission(self, request):
return preferences.Setting.active_release is not None and Package.objects.count() != 0
def get_readonly_fields(self, request, obj=None):
if not obj:
return ['active_release', 'job_link', 'created_at']
else:
return ['active_release', 'job_link', 'created_at', 'details']
def save_model(self, request, obj, form, change):
"""
Set the active release, call building procedure, and then save.
:type obj: Build
"""
setting = preferences.Setting
obj.active_release = setting.active_release
super(BuildAdmin, self).save_model(request, obj, form, change)
if setting.active_release is not None:
build_args = {
"build_uuid": obj.uuid,
"build_all": setting.downgrade_support,
"build_p_diff": setting.enable_pdiffs,
"build_compression": setting.packages_compression,
"build_secure": setting.gpg_signature,
"build_validation": setting.packages_validation,
"build_release": obj.active_release.id,
}
if settings.ENABLE_REDIS is True:
queue = django_rq.get_queue('high')
build_job = queue.enqueue(build_procedure, build_args)
obj.job_id = build_job.id
messages.info(request, mark_safe(
_("The Build \"<a href=\"{job_detail}\">{obj}</a>\" generating job has been added to the \"<a href=\"{jobs}\">high</a>\" queue.").format(
job_detail=reverse('rq_job_detail', kwargs={
'queue_index': 1,
'job_id': build_job.id,
}),
obj=str(obj),
jobs=reverse('rq_jobs', args=(1, )),
)
))
else:
build_procedure(build_args)
messages.info(request, _("The Build \"%s\" generating job has been finished.") % str(obj))
obj.save()
| agpl-3.0 | -7,491,214,560,196,319,000 | 39.219638 | 157 | 0.539608 | false | 4.055498 | false | false | false |
lino-framework/book | lino_book/projects/min9/settings.py | 1 | 2375 | # -*- coding: UTF-8 -*-
# Copyright 2012-2021 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
from lino.projects.std.settings import *
from lino.utils import i2d
class Site(Site):
title = "Lino Mini 9"
project_model = 'contacts.Person'
languages = 'en de fr'
user_types_module = 'lino_xl.lib.xl.user_types'
demo_fixtures = """std demo demo2 checkdata""".split()
default_build_method = 'weasy2pdf'
the_demo_date = i2d(20141023)
webdav_protocol = 'davlink'
def get_installed_apps(self):
yield super(Site, self).get_installed_apps()
# yield 'lino.modlib.users'
yield 'lino_book.projects.min9.modlib.contacts'
yield 'lino_xl.lib.excerpts'
yield 'lino_xl.lib.addresses'
yield 'lino_xl.lib.phones'
yield 'lino_xl.lib.reception'
yield 'lino_xl.lib.courses'
yield 'lino_xl.lib.sepa'
yield 'lino_xl.lib.notes'
# yield 'lino_xl.lib.projects'
yield 'lino_xl.lib.humanlinks'
yield 'lino_xl.lib.households'
yield 'lino_xl.lib.calview'
# yield 'lino_xl.lib.extensible'
yield 'lino_xl.lib.pages'
yield 'lino.modlib.export_excel'
yield 'lino_xl.lib.dupable_partners'
yield 'lino.modlib.checkdata'
yield 'lino.modlib.tinymce'
# yield 'lino.modlib.wkhtmltopdf'
yield 'lino.modlib.weasyprint'
yield 'lino_xl.lib.appypod'
yield 'lino.modlib.notify'
yield 'lino.modlib.changes'
yield 'lino.modlib.comments'
yield 'lino.modlib.uploads'
yield 'lino_xl.lib.properties'
yield 'lino_xl.lib.cv'
yield 'lino_xl.lib.b2c'
yield 'lino_xl.lib.sales'
yield 'lino_xl.lib.finan'
def get_plugin_configs(self):
"""
Change the default value of certain plugin settings.
"""
yield super(Site, self).get_plugin_configs()
yield ('countries', 'country_code', 'BE')
yield ('b2c', 'import_statements_path', self.project_dir.child('sepa_in'))
def do_site_startup(self):
# lino_xl.lib.reception requires some workflow to be imported
from lino_xl.lib.cal.workflows import feedback
super(Site, self).do_site_startup()
SITE = Site(globals())
# ALLOWED_HOSTS = ['*']
DEBUG = True
# SECRET_KEY = "20227" # see :djangoticket:`20227`
| bsd-2-clause | 7,751,269,490,660,809,000 | 32.450704 | 82 | 0.621053 | false | 3.108639 | false | false | false |
liuyonggg/learning_python | leetcode/wordpattern.py | 1 | 1080 | '''
Given a pattern and a string str, find if str follows the same pattern.
Here follow means a full match, such that there is a bijection between a letter in pattern and a non-empty word in str.
Examples:
pattern = "abba", str = "dog cat cat dog" should return true.
pattern = "abba", str = "dog cat cat fish" should return false.
pattern = "aaaa", str = "dog cat cat dog" should return false.
pattern = "abba", str = "dog dog dog dog" should return false.
Notes:
You may assume pattern contains only lowercase letters, and str contains lowercase letters separated by a single space.
'''
def wordPattern(pattern, str):
s = pattern
t = str.split()
return map(s.find, s) == map(t.index, t)
if __name__ == "__main__":
assert (wordPattern('abba', 'dog cat cat dog'))
assert (not wordPattern('abba', 'dog dog cat dog'))
assert (not wordPattern('abba', 'dog cat cat fish'))
assert (not wordPattern('aaaa', 'dog cat cat dog'))
assert (wordPattern('aaaa', 'dog dog dog dog'))
assert (not wordPattern('abba', 'dog dog dog dog'))
print ('done')
| mit | -168,464,825,315,798,340 | 39 | 119 | 0.676852 | false | 3.564356 | false | false | false |
Diti24/python-ivi | ivi/dicon/diconGP700.py | 1 | 20788 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2014-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import time
from .. import ivi
from .. import scpi
class diconGP700(scpi.common.IdnCommand, scpi.common.ErrorQuery, scpi.common.Reset,
scpi.common.SelfTest, scpi.common.Memory,
ivi.Driver):
"DiCon Fiberoptics GP700 Programmable Fiberoptic Instrument"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'GP700')
super(diconGP700, self).__init__(*args, **kwargs)
self._identity_description = "DiCon Fiberoptics GP700 Programmable Fiberoptic Instrument"
self._identity_identifier = ""
self._identity_revision = ""
self._identity_vendor = ""
self._identity_instrument_manufacturer = "DiCon Fiberoptics Inc"
self._identity_instrument_model = ""
self._identity_instrument_firmware_revision = ""
self._identity_specification_major_version = 0
self._identity_specification_minor_version = 0
self._identity_supported_instrument_models = ['GP700']
self._self_test_delay = 5
self._memory_size = 8
self._memory_offset = 1
self._config = ""
self._attenuator_count = 0
self._attenuator_name = list()
self._attenuator_level = list()
self._attenuator_level_max = list()
self._filter_count = 0
self._filter_name = list()
self._filter_wavelength = list()
self._filter_wavelength_max = list()
self._filter_wavelength_min = list()
self._matrix_input_count = 0
self._matrix_input_name = list()
self._matrix_output_count = 0
self._matrix_input_output = list()
self._switch_count = 0
self._switch_name = list()
self._switch_output = list()
self._switch_input = list()
self._switch_output_count = list()
self._switch_input_count = list()
self._add_property('attenuators[].level',
self._get_attenuator_level,
self._set_attenuator_level,
None,
ivi.Doc("""
Specifies the level of the attenuator module. The units are dB.
"""))
self._add_property('attenuators[].level_max',
self._get_attenuator_level_max,
None,
None,
ivi.Doc("""
Returns the maximum attenuation level supported. The units are dB.
"""))
self._add_property('attenuators[].name',
self._get_attenuator_name,
None,
None,
ivi.Doc("""
Returns the name of the attenuator module.
"""))
self._add_property('filters[].wavelength',
self._get_filter_wavelength,
self._set_filter_wavelength,
None,
ivi.Doc("""
Specifies the center wavelength of the filter module. The units are nm.
"""))
self._add_property('filters[].wavelength_max',
self._get_filter_wavelength_max,
None,
None,
ivi.Doc("""
Returns the maximum center wavelength of the filter. The units are nm.
"""))
self._add_property('filters[].wavelength_min',
self._get_filter_wavelength_min,
None,
None,
ivi.Doc("""
Returns the minimum center wavelength of the filter. The units are nm.
"""))
self._add_property('filters[].name',
self._get_filter_name,
None,
None,
ivi.Doc("""
Returns the name of the filter module.
"""))
self._add_property('switches[].output',
self._get_switch_output,
self._set_switch_output,
None,
ivi.Doc("""
Specify switch output connection.
"""))
self._add_property('switches[].output_count',
self._get_switch_output_count,
None,
None,
ivi.Doc("""
Query number of outputs supported by switch.
"""))
self._add_property('switches[].input',
self._get_switch_input,
self._set_switch_input,
None,
ivi.Doc("""
Specify switch input connection.
"""))
self._add_property('switches[].input_count',
self._get_switch_input_count,
None,
None,
ivi.Doc("""
Query number of inputs supported by switch.
"""))
self._add_method('switches[].get',
self._switch_get,
ivi.Doc("""
Get current switch input and output configuration.
"""))
self._add_method('switches[].set',
self._switch_set,
ivi.Doc("""
Set switch input and output configuration.
"""))
self._add_property('switches[].name',
self._get_switch_name,
None,
None,
ivi.Doc("""
Returns the name of the switch module.
"""))
self._add_method('memory.save',
self._memory_save,
ivi.Doc("""
Save device configuration to the specified memory slot.
"""))
self._add_method('memory.recall',
self._memory_recall,
ivi.Doc("""
Recall device configuration from the specified memory slot.
"""))
if self._initialized_from_constructor:
self._init_channels()
def _initialize(self, resource = None, id_query = False, reset = False, **keywargs):
"Opens an I/O session to the instrument."
super(diconGP700, self)._initialize(resource, id_query, reset, **keywargs)
# interface clear
if not self._driver_operation_simulate:
self._clear()
# check ID
if id_query and not self._driver_operation_simulate:
id = self.identity.instrument_model
id_check = self._instrument_id
id_short = id[:len(id_check)]
if id_short != id_check:
raise Exception("Instrument ID mismatch, expecting %s, got %s", id_check, id_short)
# reset
if reset:
self.utility_reset()
if not self._initialized_from_constructor:
self._init_channels()
def _utility_disable(self):
pass
def _utility_lock_object(self):
pass
def _utility_reset(self):
if not self._driver_operation_simulate:
self._write("*RST")
time.sleep(0.1)
self._clear()
self.driver_operation.invalidate_all_attributes()
def _utility_unlock_object(self):
pass
def _init_channels(self):
try:
super(diconGP700, self)._init_channels()
except AttributeError:
pass
config = self._get_config()
self._attenuator_count = 0
self._attenuator_name = list()
self._attenuator_level = list()
self._attenuator_level_max = list()
self._filter_count = 0
self._filter_name = list()
self._filter_wavelength = list()
self._filter_wavelength_max = list()
self._filter_wavelength_min = list()
self._matrix_input_count = 0
self._matrix_input_name = list()
self._matrix_output_count = 0
self._matrix_input_output = list()
self._switch_count = 0
self._switch_name = list()
self._switch_output = list()
self._switch_input = list()
self._switch_output_count = list()
self._switch_input_count = list()
lst = config.split(",")
lst = [x.strip() for x in lst]
lst.sort()
for itm in lst:
v = itm.split(" ")
if len(itm) == 0:
continue
if v[0] == 'MATRIX':
self._matrix_input_count = int(v[1][5:])
self._matrix_output_count = int(v[2][6:])
elif itm[0] == 'A':
if v[0] not in self._attenuator_name:
self._attenuator_count += 1
self._attenuator_name.append(v[0])
self._attenuator_level.append(0.0)
self._attenuator_level_max.append(0.0)
i = ivi.get_index(self._attenuator_name, v[0])
self._attenuator_level[i] = 0.0
self._attenuator_level_max[i] = float(v[1])
elif itm[0] == 'F':
if v[0] not in self._filter_name:
self._filter_count += 1
self._filter_name.append(v[0])
self._filter_wavelength.append(0.0)
self._filter_wavelength_min.append(0.0)
self._filter_wavelength_max.append(0.0)
i = ivi.get_index(self._filter_name, v[0])
self._filter_wavelength[i] = 0.0
self._filter_wavelength_min[i] = float(v[1][3:])
self._filter_wavelength_max[i] = float(v[2][3:])
elif itm[0] == 'M':
if v[0] not in self._switch_name:
self._switch_count += 1
self._switch_name.append(v[0])
self._switch_input.append(0)
self._switch_output.append(0)
self._switch_input_count.append(0)
self._switch_output_count.append(0)
i = ivi.get_index(self._switch_name, v[0])
self._switch_input[i] = 1
self._switch_output[i] = 0
self._switch_input_count[i] = int(v[2][1:])
self._switch_output_count[i] = int(v[1][1:])
elif itm[0] == 'P':
if v[0] not in self._switch_name:
self._switch_count += 1
self._switch_name.append(v[0])
self._switch_input.append(0)
self._switch_output.append(0)
self._switch_input_count.append(0)
self._switch_output_count.append(0)
i = ivi.get_index(self._switch_name, v[0])
self._switch_input[i] = 1
self._switch_output[i] = 0
self._switch_input_count[i] = 1
self._switch_output_count[i] = int(v[1][7:])
elif itm[0] == 'S':
cnt = int(v[0][1:])
for i in range(cnt):
n = 'S%02d' % (i+1)
if n not in self._switch_name:
self._switch_count += 1
self._switch_name.append(n)
self._switch_input.append(0)
self._switch_output.append(0)
self._switch_input_count.append(0)
self._switch_output_count.append(0)
i = ivi.get_index(self._switch_name, n)
self._switch_input[i] = 1
self._switch_output[i] = 1
self._switch_input_count[i] = 1
self._switch_output_count[i] = 2
self.attenuators._set_list(self._attenuator_name)
self.filters._set_list(self._filter_name)
self.switches._set_list(self._switch_name)
def _get_config(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._config = self._ask("system:config?")
self._set_cache_valid()
return self._config
def _get_attenuator_level(self, index):
index = ivi.get_index(self._attenuator_name, index)
name = self._attenuator_name[index]
if not self._driver_operation_simulate and not self._get_cache_valid():
resp = self._ask("%s?" % (name))
self._attenuator_level = float(resp)
self._set_cache_valid()
return self._attenuator_level[index]
def _set_attenuator_level(self, index, value):
index = ivi.get_index(self._attenuator_name, index)
name = self._attenuator_name[index]
value = float(value)
if value < 0 or value > self._attenuator_level_max[index]:
raise ivi.OutOfRangeException()
if not self._driver_operation_simulate:
self._write("%s %f" % (name, value))
self._attenuator_level[index] = value
self._set_cache_valid()
def _get_attenuator_level_max(self, index):
index = ivi.get_index(self._attenuator_name, index)
return self._attenuator_level_max[index]
def _get_attenuator_name(self, index):
index = ivi.get_index(self._attenuator_name, index)
return self._attenuator_name[index]
def _get_filter_wavelength(self, index):
index = ivi.get_index(self._filter_name, index)
name = self._filter_name[index]
if not self._driver_operation_simulate and not self._get_cache_valid():
resp = self._ask("%s?" % (name))
self._filter_wavelength = float(resp)
self._set_cache_valid()
return self._filter_wavelength[index]
def _set_filter_wavelength(self, index, value):
index = ivi.get_index(self._filter_name, index)
name = self._filter_name[index]
value = float(value)
if value < self._filter_wavelength_min[index] or value > self._filter_wavelength_max[index]:
raise ivi.OutOfRangeException()
if not self._driver_operation_simulate:
self._write("%s %f" % (name, value))
self._filter_wavelength[index] = value
self._set_cache_valid()
def _get_filter_wavelength_max(self, index):
index = ivi.get_index(self._filter_name, index)
return self._filter_wavelength[index]
def _get_filter_wavelength_min(self, index):
index = ivi.get_index(self._filter_name, index)
return self._filter_wavelength[index]
def _get_filter_name(self, index):
index = ivi.get_index(self._filter_name, index)
return self._filter_name[index]
def _get_switch_output(self, index):
return self._switch_get(index)[0]
def _set_switch_output(self, index, value):
self._switch_set(index, value)
def _get_switch_output_count(self, index):
index = ivi.get_index(self._switch_name, index)
return self._switch_output_count[index]
def _get_switch_input(self, index):
return self._switch_get(index)[1]
def _set_switch_input(self, index, value):
index = ivi.get_index(self._switch_name, index)
self._switch_set(index, self._switch_output[index], value)
def _get_switch_input_count(self, index):
index = ivi.get_index(self._switch_name, index)
return self._switch_input_count[index]
def _switch_get(self, index):
index = ivi.get_index(self._switch_name, index)
name = self._switch_name[index]
if name[0] == 'M':
if not self._driver_operation_simulate:
if not self._get_cache_valid('switch_output', index) or not self._get_cache_valid('switch_input', index):
#if True:
resp = self._ask("%s?" % name)
lst = resp.split(',')
self._switch_output[index] = int(lst[0].strip())
self._switch_input[index] = int(lst[1].strip())
self._set_cache_valid(True, 'switch_output', index)
self._set_cache_valid(True, 'switch_input', index)
return (self._switch_output[index], self._switch_input[index])
elif name[0] == 'P' or name[0] == 'S':
if not self._driver_operation_simulate:
if not self._get_cache_valid('switch_output', index):
#if True:
resp = self._ask("%s?" % name)
self._switch_output[index] = int(resp.strip())
self._switch_input[index] = 1
self._set_cache_valid(True, 'switch_output', index)
self._set_cache_valid(True, 'switch_input', index)
return (self._switch_output[index], self._switch_input[index])
def _switch_set(self, index, output, input=None):
index = ivi.get_index(self._switch_name, index)
name = self._switch_name[index]
output = int(output)
if input is not None:
input = int(input)
if name[0] == 'M':
if output < 0 or output > self._switch_output_count[index]:
raise ivi.OutOfRangeException()
if input is not None and (input < 1 or input > self._switch_input_count[index]):
raise ivi.OutOfRangeException()
if not self._driver_operation_simulate:
if input is None:
self._write("%s %d" % (name, output))
else:
self._write("%s %d, %d" % (name, output, input))
else:
self._switch_output[index] = output
self._set_cache_valid(True, 'switch_output', index)
if input is not None:
self._switch_input[index] = input
self._set_cache_valid(True, 'switch_input', index)
elif name[0] == 'P' or name[0] == 'S':
if output < 1 or output > self._switch_output_count[index]:
raise ivi.OutOfRangeException()
if input is not None and input != 1:
raise ivi.OutOfRangeException()
if not self._driver_operation_simulate:
self._write("%s %d" % (name, output))
else:
self._switch_output[index] = output
self._switch_input[index] = 1
self._set_cache_valid(True, 'switch_output', index)
self._set_cache_valid(True, 'switch_input', index)
def _get_switch_name(self, index):
index = ivi.get_index(self._switch_name, index)
return self._switch_name[index]
| mit | 4,720,886,702,923,921,000 | 39.681018 | 121 | 0.502117 | false | 4.32183 | true | false | false |
cjvogl/finite_volume_seismic_model | 2d/setplot_both.py | 1 | 4889 |
"""
Set up the plot figures, axes, and items to be done for each frame.
This module is imported by the plotting routines and then the
function setplot is called to set the plot parameters.
"""
import numpy as np
from mapping import Mapping
from clawpack.clawutil.data import ClawData
import dtopotools_horiz_okada_and_1d as dtopotools
reload(dtopotools)
from clawpack.geoclaw.data import LAT2METER
length_scale = 1.0e-3 # m to km
xlimits = [-150e3*length_scale,200e3*length_scale]
ylimits_fault = [-175e3*length_scale,0.0*length_scale]
ylimits_surface = [-0.3,0.5]
#--------------------------
def setplot(plotdata):
#--------------------------
"""
Specify what is to be plotted at each frame.
Input: plotdata, an instance of clawpack.visclaw.data.ClawPlotData.
Output: a modified version of plotdata.
"""
fault = dtopotools.Fault()
fault.read(plotdata.outdir + '/fault.data')
mapping = Mapping(fault)
xp1 = mapping.xp1*length_scale
xp2 = mapping.xp2*length_scale
yp1 = mapping.yp1*length_scale
yp2 = mapping.yp2*length_scale
gaugedata = ClawData()
gaugedata.read(plotdata.outdir + '/gauges.data', force=True)
ngauges = gaugedata.ngauges
xc = np.zeros(ngauges)
for j in range(ngauges):
g = plotdata.getgauge(j)
xc[j] = g.location[0]
fault.create_dtopography(xc/LAT2METER,np.array([0.]),[1.0],y_disp=True)
from clawpack.visclaw import colormaps
plotdata.clearfigures() # clear any old figures,axes,items data
plotdata.format = 'binary'
def mapc2p(xc,yc):
xp,yp = mapping.mapc2p(xc,yc)
return xp*length_scale,yp*length_scale
def plot_fault(current_data):
from pylab import linspace, plot, xlabel, ylabel, tick_params
xl = linspace(xp1,xp2,100)
yl = linspace(yp1,yp2,100)
plot(xl,yl,'k',linewidth=3)
tick_params(labelsize=25)
xlabel('kilometers',fontsize=25)
ylabel('kilometers',fontsize=25)
def sigmatr(current_data):
# return -trace(sigma)
q = current_data.q
return -(q[0,:,:] + q[1,:,:])
def plot_vertical_displacement(current_data):
from pylab import plot,zeros,ylabel,tick_params
t = current_data.t
ys = zeros(ngauges)
for gaugeno in range(ngauges):
g = plotdata.getgauge(gaugeno)
for k in range(1,len(g.t)):
if g.t[k] > t:
break
dt = g.t[k] - g.t[k-1]
v = 0.5*(g.q[4,k]+g.q[4,k-1])
ys[gaugeno] += dt*v
plot(xc[:ngauges]*length_scale,ys,linewidth=3)
plot(xc*length_scale,fault.dtopo.dZ[0,0,:],linestyle='--',color='r',linewidth=3)
tick_params(labelsize=25)
ylabel('meters',fontsize=25)
# Figure for surface
plotfigure = plotdata.new_plotfigure(name='surface', figno=1)
plotfigure.kwargs = {'figsize':(11,4)}
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits_surface
plotaxes.title_with_t = False
plotaxes.title = ''
plotaxes.scaled = False
plotaxes.afteraxes = plot_vertical_displacement
# Figure for fault
plotfigure = plotdata.new_plotfigure(name='fault', figno=2)
plotfigure.kwargs = {'figsize':(11,6)}
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits_fault
plotaxes.title = ''
plotaxes.title_with_t = False
plotaxes.scaled = True
plotaxes.afteraxes = plot_fault
# Set up for item on these axes:
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = sigmatr
plotitem.pcolor_cmap = colormaps.blue_white_red
plotitem.pcolor_cmin = -1e6
plotitem.pcolor_cmax = 1e6
plotitem.add_colorbar = False
plotitem.amr_celledges_show = [0]
plotitem.amr_patchedges_show = [0]
plotitem.MappedGrid = True
plotitem.mapc2p = mapc2p
# Parameters used only when creating html and/or latex hardcopy
# e.g., via clawpack.visclaw.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html' # pointer for top of index
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
plotdata.parallel = True
return plotdata
| gpl-3.0 | 2,835,834,433,527,814,700 | 32.258503 | 88 | 0.633667 | false | 3.162354 | false | false | false |
blitzagency/django-chatterbox | chatterbox/drivers/facebook.py | 1 | 1460 | import logging
from six.moves.urllib.parse import parse_qsl
from .oauth import OAuth2
from .oauth import BEARER_URI
log = logging.getLogger(__name__)
class Facebook(OAuth2):
# General info about the provider
provider_url = 'https://facebook.com/'
docs_url = 'https://developers.facebook.com/docs/'
category = 'Social'
api_path = 'chatterbox.api.facebook.Facebook'
refresh_url = None
# URLs to interact with the API
authorize_url = 'https://www.facebook.com/dialog/oauth'
access_token_url = 'https://graph.facebook.com/oauth/access_token'
api_domain = 'graph.facebook.com'
bearer_type = BEARER_URI
available_permissions = [
(None, 'read your basic, public information'), # public_profile
('email', 'access your email address'),
('read_stream', 'access to read the posts your news feed, or your profile'),
('user_about_me', 'access your profile information'),
('user_checkins', 'access your checkins'),
('user_events', 'access your events'),
('user_groups', 'access your groups'),
('user_likes', 'access the things you like'),
('user_location', 'access your location'),
('user_photos', 'access your photos'),
('user_status', 'access your most recent status'),
]
def parse_token(self, content):
data = dict(parse_qsl(content))
data['expires_in'] = data.get('expires', None)
return data
| mit | 505,490,276,797,752,770 | 33.761905 | 84 | 0.639726 | false | 3.852243 | false | false | false |
cgallemore/djvasa | djvasa/main.py | 1 | 5398 | import argparse
import pystache
import os
import random
import sys
from djvasa.templates import View
class Project(object):
_secret_key = None
def __init__(self, **kwargs):
self.project_name = raw_input("What's the name of your project? ")
self.heroku = kwargs.get('heroku')
self.mysql = kwargs.get('mysql')
self.postgres = kwargs.get('postgres') or self.heroku
self.hg = kwargs.get('hg')
self.git = False if self.hg else True
self.full_name = raw_input("What's your full name? ")
self.email = raw_input("What's your email? ")
self.project_path = self.project_root = os.path.join(os.getcwd(), self.project_name)
self.renderer = pystache.Renderer()
self.view = View(self.project_name, **self._kwargs)
def _create_file(self, names):
for file_name, template_name in names:
self.view.template_name = template_name
with open(os.path.join(self.project_path, file_name), 'w+') as f:
f.write(self.renderer.render(self.view))
@property
def secret_key(self):
if not self._secret_key:
chars = "!@#$%^&*(-_=+)abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
self._secret_key = ''.join(random.choice(chars) for c in range(50))
return self._secret_key
@property
def _kwargs(self):
return {
'heroku': self.heroku,
'mysql': self.mysql,
'postgres': self.postgres,
'full_name': self.full_name,
'email': self.email,
'secret_key': self.secret_key
}
@property
def root_files(self):
files = {
'manage.py': 'manage',
'requirements.txt': 'pip_requirements',
'Vagrantfile': 'vagrantfile'
}
if self.hg:
files['.hgignore'] = 'hgignore'
else:
files['.gitignore'] = 'gitignore'
if self.heroku:
files['Procfile'] = 'procfile'
return files.items()
@property
def django_files(self):
files = {
'settings.py': 'settings',
'settingslocal.py': 'settings_local',
'urls.py': 'urls',
'wsgi.py': 'wsgi'
}
return files.items()
@property
def salt_files(self):
files = {
'top.sls': 'top',
'%s.sls' % self.project_name: 'salt_project',
'requirements.sls': 'requirements',
'motd': 'motd'
}
if self.mysql:
files['mysql.sls'] = 'mysql'
if self.postgres:
files['pg_hba.conf'] = 'pgconf'
files['postgres.sls'] = 'postgres'
return files.items()
def initialize(self):
# Create root directory
os.mkdir(self.project_name)
self._create_file(self.root_files)
# Create project
os.chdir(self.project_path)
self.project_path = os.path.join(os.getcwd(), self.project_name)
os.mkdir(self.project_name)
open(os.path.join(self.project_path, '__init__.py'), 'w+').close()
self._create_file(self.django_files)
os.chdir(self.project_name)
# Create static directories
os.mkdir('public')
os.mkdir('templates')
os.makedirs('static/css')
os.makedirs('static/js')
os.makedirs('static/img')
os.makedirs('static/less')
os.chdir('templates')
self.project_path = os.path.join(os.getcwd())
self._create_file([('base.html', 'base')])
os.chdir(self.project_root)
self.project_path = os.path.join(os.getcwd(), 'salt')
os.makedirs('salt/roots/salt')
if self.postgres:
# Create the pillar directories
os.mkdir('pillar')
# Create minion
self._create_file([('minion', 'minion')])
self.project_path = os.path.join(os.getcwd(), 'salt', 'roots', 'salt')
self._create_file(self.salt_files)
if self.postgres:
# create pillar directory and postgres settings.
pillar = os.path.join(self.project_root, 'pillar')
os.chdir(pillar)
self.project_path = pillar
self._create_file([
('top.sls', 'pillar_top'),
('settings.sls', 'pillar_settings')
])
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--heroku', action='store_true', default=False, help="Initialize the project for "
"deployment to Heroku.")
parser.add_argument('--mysql', action='store_true', default=False, help='Initialize the project with MySQL.')
parser.add_argument('--postgres', action='store_true', default=False, help="Initialize the project with Postgres.")
parser.add_argument('--hg', action='store_true', default=False, help="Initialize project for mercurial.")
args = parser.parse_args()
if args.mysql and args.postgres:
sys.exit("You can only enable one database, you enabled both MySQL and Postgres.")
if args.mysql and args.heroku:
sys.exit("Enable MySQL is not valid with the heroku option. By default postgres is enabled with "
"the heroku option is used.")
project = Project(**vars(args))
project.initialize()
| mit | -4,106,207,016,604,869,600 | 31.715152 | 119 | 0.564839 | false | 3.820241 | false | false | false |
kmadathil/sanskrit_parser | sanskrit_parser/generator/cmd_line.py | 1 | 9432 | # -*- coding: utf-8 -*-
"""
@author: Karthik Madathil (github: @kmadathil)
"""
from argparse import ArgumentParser, Action
import logging
from sanskrit_parser.base.sanskrit_base import SLP1, DEVANAGARI
from sanskrit_parser.generator.paninian_object import PaninianObject
from sanskrit_parser.generator.prakriya import Prakriya, PrakriyaVakya
from sanskrit_parser.generator.pratyaya import * # noqa: F403
from sanskrit_parser.generator.dhatu import * # noqa: F403
from sanskrit_parser.generator.pratipadika import * # noqa: F403
from sanskrit_parser.generator.sutras_yaml import sutra_list
from sanskrit_parser import enable_file_logger, enable_console_logger
logger = logging.getLogger(__name__)
def run_pp(s, verbose=False):
pl = []
# Assemble list of inputs
for i in range(len(s)):
def _gen_obj(s, i):
if isinstance(s[i], tuple) or isinstance(s[i], list):
lelem = [_gen_obj(s[i], ii) for (ii, ss) in enumerate(s[i])]
else:
lelem = s[i]
return lelem
lelem = _gen_obj(s, i)
pl.append(lelem)
p = Prakriya(sutra_list, PrakriyaVakya(pl))
p.execute()
if verbose:
p.describe()
o = p.output()
return o
# Insert all sup vibhaktis one after the other, with avasAnas
# Return results with avasAnas stripped as 8x3 list of lists
def generate_vibhakti(pratipadika, verbose=False):
r = []
for ix, s in enumerate(sups): # noqa: F405
if verbose:
logger.info(f"Vibhakti {ix+1} {s}")
else:
logger.debug(f"Vibhakti {ix+1} {s}")
r.append([])
for jx, ss in enumerate(s):
# For nitya eka/dvi/bahuvacana, generate only the appropriate
if (((jx == 0) and pratipadika.hasTag("nityEkavacana")) or
((jx == 1) and pratipadika.hasTag("nityadvivacana")) or
((jx == 2) and pratipadika.hasTag("nityabahuvacana")) or
(not (pratipadika.hasTag("nityEkavacana") or
pratipadika.hasTag("nityadvivacana") or
pratipadika.hasTag("nityabahuvacana")))):
t = [(pratipadika, ss), avasAna] # noqa: F405
_r = run_pp(t, verbose)
r[-1].append(_r)
p = [''.join([str(x) for x in y]) for y in _r]
pp = ", ".join([x.strip('.') for x in p])
if verbose:
logger.info(f"Vacana {jx+1} {ss} {pp}")
else:
logger.debug(f"Vacana {jx+1} {ss} {pp}")
return r
last_option = False
class CustomAction(Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
# if nargs is not None:
# raise ValueError("nargs not allowed")
super(CustomAction, self).__init__(option_strings, dest, nargs, **kwargs)
logger.debug(f"Initializing CustomAction {option_strings}, {dest}")
def __call__(self, parser, namespace, values, option_string=None):
logger.debug('%r %r %r' % (namespace, values, option_string))
global last_option
assert not last_option, f"Option {option_string} added after avasana"
if getattr(namespace, self.dest) is None:
_n = []
# This tracks the hierarchical input list
setattr(namespace, self.dest, _n)
# Last item of this is always the current level of the input
setattr(namespace, "pointer", [_n])
if values is not None:
if isinstance(values, str):
values = [values]
for v in values:
assert v in globals(), f"{v} is not defined!"
getattr(namespace, "pointer")[-1].append(globals()[v])
else:
if option_string == "-o": # Open
_l = []
# Add a new level at the end of current list
getattr(namespace, "pointer")[-1].append(_l)
# Designate new list as current list
getattr(namespace, "pointer").append(_l)
elif option_string == "-c": # Close
# Current is updated to previous
getattr(namespace, "pointer").pop()
elif option_string == "-a": # AvasAna
# Add avasana
lav = getattr(namespace, self.dest)
setattr(namespace, self.dest, [lav, avasAna]) # noqa: F405
last_option = True
else:
logger.error(f"Unrecognized Option {option_string}")
class CustomActionString(Action):
def __init__(self, option_strings, dest, nargs=None, encoding=SLP1, **kwargs):
# if nargs is not None:
# raise ValueError("nargs not allowed")
self.encoding = encoding
super(CustomActionString, self).__init__(option_strings, dest, nargs, **kwargs)
logger.debug(f"Initializing CustomAction {option_strings}, {dest}")
def __call__(self, parser, namespace, values, option_string=None):
global last_option
assert not last_option, f"Option {option_string} added after avasana"
encoding = self.encoding
def _exec(value):
# Shortcuts for two input tests not using predefined objects
# If a string in the first place ends with * it's an anga
# Else it's a pada
# For everything else, use predefined objects
if (value[-1] == "*"):
value = value[:-1]
value = PaninianObject(value, encoding) # noqa: F405
value.setTag("aNga")
elif (value[-1] == "_"):
value = value[:-1]
value = PaninianObject(value, encoding) # noqa: F405
value.setTag("pada")
else:
value = PaninianObject(value, encoding) # noqa: F405
getattr(namespace, "pointer")[-1].append(value)
logger.info('%r %r %r' % (namespace, values, option_string))
if getattr(namespace, self.dest) is None:
_n = []
# This tracks the hierarchical input list
setattr(namespace, self.dest, _n)
# Last item of this is always the current level of the input
setattr(namespace, "pointer", [_n])
if isinstance(values, list):
for v in values:
_exec(v)
else:
_exec(values)
def get_args(argv=None):
"""
Argparse routine.
Returns args variable
"""
parser = ArgumentParser(description='Paninian Generator: Prakriti + Pratyaya')
# String to encode
parser.add_argument('--debug', action='store_true')
parser.add_argument('-p', '--pratyaya', nargs="+", dest="inputs", action=CustomAction)
parser.add_argument('-d', '--dhatu', dest="inputs", action=CustomAction)
parser.add_argument('-t', '--pratipadika', dest="inputs", action=CustomAction)
parser.add_argument('-s', '--string', nargs="+", dest="inputs", encoding=SLP1, action=CustomActionString)
parser.add_argument('-o', nargs="?", dest="inputs", action=CustomAction, help="Open bracket") # Open Brace
parser.add_argument('-c', nargs="?", dest="inputs", action=CustomAction, help="Close bracket")
parser.add_argument('-a', nargs="?", dest="inputs", action=CustomAction, help="Avasana")
parser.add_argument("--vibhakti", action="store_true", help="generate all vibhaktis")
parser.add_argument("--gen-test", action="store_true", help="generate vibhakti test")
parser.add_argument("--verbose", action="store_true", help="verbose")
return parser.parse_args(argv)
def cmd_line():
# Logging
enable_console_logger()
args = get_args()
if args.debug:
enable_file_logger(level=logging.DEBUG)
logger.info(f"Inputs {args.inputs}")
for i in args.inputs:
def _i(x):
if isinstance(x, list):
for _x in x:
_i(_x)
else:
logger.info(f"{x} {x.tags}")
_i(i)
logger.info("End Inputs")
if args.vibhakti:
if ((len(args.inputs) != 1) or (not isinstance(args.inputs[0], Pratipadika))): # noqa: F405
logger.info(f"Need a single pratipadika for vibhaktis, got {len(args.inputs)} inputs, first one of type {type(args.inputs[0])}")
logger.info("Simplifying")
r = run_pp(args.inputs, args.verbose)
logger.debug(f"Output: {[''.join([str(x) for x in y]) for y in r]}")
assert len(r) == 1, "Got multiple outputs"
pp = PaninianObject.join_objects(r)
logger.info(f"Output {pp} {pp.tags}")
else:
pp = args.inputs[0]
r = generate_vibhakti(pp, args.verbose)
print("Output")
if args.gen_test:
rr = [[[y[0].transcoded(DEVANAGARI) for y in va] if len(va) > 1 else va[0][0].transcoded(DEVANAGARI) for va in vi] for vi in r]
print(f"prAtipadika[\"{str(pp)}\"] = {str(pp)}")
print(f"viBakti[\"{str(pp)}\"] = [")
for vi in rr:
print(f"{vi},")
print("]")
else:
for ix, vi in enumerate(r):
print(f"{', '.join(['/'.join([''.join([x.transcoded(DEVANAGARI) for x in y]).strip('।') for y in va]) for va in vi])}")
else:
r = run_pp(args.inputs, args.verbose)
print(f"Output: {[''.join([str(x) for x in y]) for y in r]}")
| mit | -4,587,673,785,871,555,000 | 41.286996 | 140 | 0.568293 | false | 3.504274 | false | false | false |
xolox/python-deb-pkg-tools | deb_pkg_tools/gpg.py | 1 | 25753 | # Debian packaging tools: GPG key pair generation.
#
# Author: Peter Odding <[email protected]>
# Last Change: April 18, 2020
# URL: https://github.com/xolox/python-deb-pkg-tools
"""
GPG key pair generation and signing of ``Release`` files.
The :mod:`deb_pkg_tools.gpg` module is used to manage GPG key pairs. It allows
callers to specify which GPG key pair and/or key ID they want to use and will
automatically generate GPG key pairs that don't exist yet.
.. _GnuPG 2.1 compatibility:
GnuPG 2.1 compatibility
-----------------------
In 2018 the :mod:`deb_pkg_tools.gpg` module got a major update to enable
compatibility with GnuPG >= 2.1:
- The :mod:`deb_pkg_tools.gpg` module was first integrated into deb-pkg-tools
in 2013 and was developed based on GnuPG 1.4.10 which was the version
included in Ubuntu 10.04.
- Ubuntu 18.04 includes GnuPG 2.2.4 which differs from 1.4.10 in several
backwards incompatible ways that require changes in deb-pkg-tools which
directly affect the users of deb-pkg-tools (the API has changed).
The following sections discuss the concrete changes:
.. contents::
:local:
Storage of secret keys
~~~~~~~~~~~~~~~~~~~~~~
The storage of secret keys has changed in a backwards incompatible way, such
that the ``--secret-keyring`` command line option is now obsolete and ignored.
The GnuPG documentation suggests to use an `ephemeral home directory`_ as a
replacement for ``--secret-keyring``. To enable compatibility with GnuPG >= 2.1
while at the same time preserving compatibility with older releases, the
:class:`GPGKey` class gained a new :attr:`~GPGKey.directory` property:
- When GnuPG >= 2.1 is detected :attr:`~GPGKey.directory` is required.
- When GnuPG < 2.1 is detected :attr:`~GPGKey.directory` may be specified and
will be respected, but you can also use "the old calling convention" where
the :attr:`~GPGKey.public_key_file`, :attr:`~GPGKey.secret_key_file` and
:attr:`~GPGKey.key_id` properties are specified separately.
- The documentation of the :class:`GPGKey` initializer explains how to enable
compatibility with old and new versions GnuPG versions at the same time
(using the same Python code).
Unattended key generation
~~~~~~~~~~~~~~~~~~~~~~~~~
The default behavior of ``gpg --batch --gen-key`` has changed:
- The user is now presented with a GUI prompt that asks to specify a pass
phrase for the new key, at which point the supposedly unattended key
generation is effectively blocked on user input...
- To avoid the GUI prompt the new ``%no-protection`` option needs to be added
to the batch file, but of course that option will not be recognized by older
GnuPG releases, so it needs to be added conditionally.
.. _ephemeral home directory: https://www.gnupg.org/documentation/manuals/gnupg/Ephemeral-home-directories.html#Ephemeral-home-directories
"""
# Standard library modules.
import logging
import multiprocessing
import os.path
import tempfile
# External dependencies.
from executor import execute, quote
from humanfriendly import Timer, coerce_boolean, parse_path
from humanfriendly.decorators import cached
from humanfriendly.text import compact
from property_manager import PropertyManager, cached_property, mutable_property
# Modules included in our package.
from deb_pkg_tools.utils import find_installed_version, makedirs
from deb_pkg_tools.version import Version
# Public identifiers that require documentation.
__all__ = (
"EntropyGenerator",
"FORCE_ENTROPY",
"GPGKey",
"GPG_AGENT_VARIABLE",
"create_directory",
"generate_entropy",
"have_updated_gnupg",
"initialize_gnupg",
"logger",
)
# Initialize a logger.
logger = logging.getLogger(__name__)
FORCE_ENTROPY = coerce_boolean(os.environ.get('DPT_FORCE_ENTROPY', 'false'))
"""
:data:`True` to allow :func:`GPGKey.generate_key_pair()` to force the system to
generate entropy based on disk I/O , :data:`False` to disallow this behavior
(the default).
This was added to facilitate the deb-pkg-tools test suite running on Travis CI.
It is assumed that this rather obscure functionality will only ever be useful
in the same context: Running a test suite in a virtualization environment with
very low entropy.
The environment variable ``$DPT_FORCE_ENTROPY`` can be used to control the
value of this variable (see :func:`~humanfriendly.coerce_boolean()` for
acceptable values).
"""
GPG_AGENT_VARIABLE = 'GPG_AGENT_INFO'
"""The name of the environment variable used to communicate between the GPG agent and :man:`gpg` processes (a string)."""
def create_directory(pathname):
"""
Create a GnuPG directory with sane permissions (to avoid GnuPG warnings).
:param pathname: The directory to create (a string).
"""
makedirs(pathname)
os.chmod(pathname, 0o700)
@cached
def have_updated_gnupg():
"""
Check which version of GnuPG is installed.
:returns: :data:`True` if GnuPG >= 2.1 is installed,
:data:`False` for older versions.
"""
gnupg_version = find_installed_version('gnupg')
return Version(gnupg_version) >= Version('2.1')
def initialize_gnupg():
"""
Make sure the ``~/.gnupg`` directory exists.
Older versions of GPG can/will fail when the ``~/.gnupg`` directory doesn't
exist (e.g. in a newly created chroot). GPG itself creates the directory
after noticing that it's missing, but then still fails! Later runs work
fine however. To avoid this problem we make sure ``~/.gnupg`` exists before
we run GPG.
"""
create_directory(parse_path('~/.gnupg'))
class GPGKey(PropertyManager):
"""
Container for generating GPG key pairs and signing release files.
This class is used to sign ``Release`` files in Debian package
repositories. If the given GPG key pair doesn't exist yet it will be
automatically created without user interaction (except gathering of
entropy, which is not something I can automate :-).
"""
def __init__(self, **options):
"""
Initialize a :class:`GPGKey` object.
:param options: Refer to the initializer of the superclass
(:class:`~property_manager.PropertyManager`)
for details about argument handling.
There are two ways to specify the location of a GPG key pair:
- The old way applies to GnuPG < 2.1 and uses :attr:`public_key_file`
and :attr:`secret_key_file`.
- The new way applies to GnuPG >= 2.1 and uses :attr:`directory`.
If you don't specify anything the user's default key pair will be used.
Specifying all three properties enables isolation from the user's
default keyring that's compatible with old and new GnuPG installations
at the same time.
You can also use :attr:`key_id` to select a specific existing GPG key
pair, possibly in combination with the previously mentioned properties.
When the caller has specified a custom location for the GPG key pair
but the associated files don't exist yet a new GPG key pair will be
automatically generated. This requires that :attr:`name` and
:attr:`description` have been set.
"""
# Initialize our superclass.
super(GPGKey, self).__init__(**options)
# Initialize ourselves based on the GnuPG version.
if have_updated_gnupg():
self.check_new_usage()
else:
self.check_old_usage()
self.set_old_defaults()
self.check_old_files()
self.check_key_id()
self.generate_key_pair()
def check_key_id(self):
"""Raise :exc:`~exceptions.EnvironmentError` when a key ID has been specified but the key pair doesn't exist."""
if self.key_id and not self.existing_files:
raise EnvironmentError(compact(
"The key ID {key_id} was specified but the configured key pair doesn't exist!",
key_id=self.key_id,
))
def check_new_usage(self):
"""
Raise an exception when detecting a backwards incompatibility.
:raises: :exc:`~exceptions.TypeError` as described below.
When GnuPG >= 2.1 is installed the :func:`check_new_usage()` method is
called to make sure that the caller is aware of the changes in API
contract that this implies. We do so by raising an exception when both
of the following conditions hold:
- The caller is using the old calling convention of setting
:attr:`public_key_file` and :attr:`secret_key_file` (which
confirms that the intention is to use an isolated GPG key).
- The caller is not using the new calling convention of setting
:attr:`directory` (even though this is required to use an isolated
GPG key with GnuPG >= 2.1).
"""
if self.old_usage and not self.new_usage:
raise TypeError(compact("""
You're running GnuPG >= 2.1 which requires changes to how
deb_pkg_tools.gpg.GPGKey is used and unfortunately our
caller hasn't been updated to support this. Please refer
to the the deb-pkg-tools 5.0 release notes for details.
"""))
def check_old_files(self):
"""
Raise an exception when we risk overwriting an existing public or secret key file.
:returns: A list of filenames with existing files.
:raises: :exc:`~exceptions.EnvironmentError` as described below.
When GnuPG < 2.1 is installed :func:`check_old_files()` is called to
ensure that when :attr:`public_key_file` and :attr:`secret_key_file`
have been provided, either both of the files already exist or neither
one exists. This avoids accidentally overwriting an existing file that
wasn't generated by deb-pkg-tools and shouldn't be touched at all.
"""
if len(self.existing_files) == 1:
raise EnvironmentError(compact(
"Refusing to overwrite existing key file! ({filename})",
filename=self.existing_files[0],
))
def check_old_usage(self):
"""
Raise an exception when either the public or the secret key hasn't been provided.
:raises: :exc:`~exceptions.TypeError` as described below.
When GnuPG < 2.1 is installed :func:`check_old_usage()` is called
to ensure that :attr:`public_key_file` and :attr:`secret_key_file`
are either both provided or both omitted.
"""
if self.secret_key_file and not self.public_key_file:
raise TypeError(compact("""
The secret key file {filename} was provided without a
corresponding public key file! Please provide both or
neither.
""", filename=self.secret_key_file))
elif self.public_key_file and not self.secret_key_file:
raise TypeError(compact("""
The public key file {filename} was provided without a
corresponding secret key file! Please provide both or
neither.
""", filename=self.public_key_file))
def generate_key_pair(self):
"""
Generate a missing GPG key pair on demand.
:raises: :exc:`~exceptions.TypeError` when the GPG key pair needs to be
generated (because it doesn't exist yet) but no :attr:`name`
and :attr:`description` were provided.
"""
logger.debug("Checking if GPG key pair exists ..")
if self.existing_files:
logger.debug("Assuming key pair exists (found existing files: %s).", self.existing_files)
return
elif not (self.name and self.description):
raise TypeError("Can't generate GPG key pair without 'name' and 'description'!")
logger.info("Generating GPG key pair: %s (%s)", self.name, self.description)
# Make sure all of the required directories exist and have sane
# permissions (to avoid GnuPG warnings).
required_dirs = set([self.directory_default, self.directory_effective])
if not have_updated_gnupg():
required_dirs.update([
os.path.dirname(self.public_key_file),
os.path.dirname(self.public_key_file),
])
for directory in required_dirs:
create_directory(directory)
# Use a temporary file for the `gpg --batch --gen-key' batch instructions.
fd, temporary_file = tempfile.mkstemp(suffix='.txt')
try:
with open(temporary_file, 'w') as handle:
handle.write(self.batch_script)
handle.write('\n')
# Inform the operator that this may take a while.
logger.info(compact("""
Please note: Generating a GPG key pair can take a long time. If
you are logged into a virtual machine or a remote server over
SSH, now is a good time to familiarize yourself with the
concept of entropy and how to make more of it :-)
"""))
timer = Timer()
with EntropyGenerator():
gen_key_cmd = self.scoped_command
gen_key_cmd += ['--batch', '--gen-key', temporary_file]
execute(*gen_key_cmd, logger=logger)
logger.info("Finished generating GPG key pair in %s.", timer)
finally:
os.unlink(temporary_file)
# Reset cached properties after key generation.
self.clear_cached_properties()
def set_old_defaults(self):
"""Fall back to the default public and secret key files for GnuPG < 2.1."""
if not self.public_key_file and not self.secret_key_file:
self.public_key_file = os.path.join(self.directory_effective, 'pubring.gpg')
self.secret_key_file = os.path.join(self.directory_effective, 'secring.gpg')
@cached_property
def batch_script(self):
"""A GnuPG batch script suitable for ``gpg --batch --gen-key`` (a string)."""
logger.debug("Generating batch script for 'gpg --batch --gen-key' ..")
lines = [
'Key-Type: RSA',
'Key-Length: 1024',
'Subkey-Type: ELG-E',
'Subkey-Length: 1024',
'Name-Real: %s' % self.name,
'Name-Comment: %s' % self.description,
'Name-Email: none',
'Expire-Date: 0',
]
if have_updated_gnupg():
# GnuPG >= 2.1 prompts the operator to pick a password
# interactively unless '%no-protection' is used. Also
# %secring has been obsoleted and is now ignored.
logger.debug("Specializing batch script for GnuPG >= 2.1 ..")
lines.append('%no-protection')
else:
logger.debug("Specializing batch script for GnuPG < 2.1 ..")
lines.append('%%pubring %s' % self.public_key_file)
lines.append('%%secring %s' % self.secret_key_file)
lines.append('%commit')
text = '\n'.join(lines)
logger.debug("Here's the complete batch script:\n%s", text)
return text
@mutable_property
def command_name(self):
"""The name of the GnuPG program (a string, defaults to :man:`gpg`)."""
return 'gpg'
@mutable_property
def description(self):
"""
The description of the GPG key pair (a string or :data:`None`).
Used only when the key pair is generated because it doesn't exist yet.
"""
@mutable_property
def directory(self):
"""
The pathname of the GnuPG home directory to use (a string or :data:`None`).
This property was added in deb-pkg-tools 5.0 to enable compatibility
with GnuPG >= 2.1 which changed the storage of secret keys in a
backwards incompatible way by obsoleting the ``--secret-keyring``
command line option. The GnuPG documentation suggests to use an
`ephemeral home directory`_ as a replacement and that's why the
:attr:`directory` property was added.
"""
@cached_property
def directory_default(self):
"""The pathname of the default GnuPG home directory (a string)."""
return parse_path('~/.gnupg')
@cached_property
def directory_effective(self):
"""The pathname of the GnuPG home directory that will actually be used (a string)."""
return self.directory or self.directory_default
@cached_property
def existing_files(self):
"""
A list of strings with the filenames of existing GnuPG data files.
The content of this list depends on the GnuPG version:
- On GnuPG >= 2.1 and/or when :attr:`directory` has been set (also on
GnuPG < 2.1) any files in or below :attr:`directory` are included.
- On GnuPG < 2.1 :attr:`public_key_file` and :attr:`secret_key_file`
are included (only if the properties are set and the files exist of
course).
"""
filenames = []
if have_updated_gnupg() or self.new_usage:
# New usage is mandatory in combination with GnuPG >= 2.1 and
# optional but supported in combination with GnuPG < 2.1.
if os.path.isdir(self.directory_effective):
for root, dirs, files in os.walk(self.directory_effective):
filenames.extend(os.path.join(root, fn) for fn in files)
if self.old_usage and not have_updated_gnupg():
# Old usage is only possibly in combination with GnuPG < 2.1.
candidates = (self.public_key_file, self.secret_key_file)
filenames.extend(fn for fn in candidates if os.path.isfile(fn))
return filenames
@cached_property
def identifier(self):
"""
A unique identifier for the GPG key pair (a string).
The output of the ``gpg --list-keys --with-colons`` command is parsed
to extract a unique identifier for the GPG key pair:
- When a fingerprint is available this is preferred.
- Otherwise a long key ID will be returned (assuming one is available).
- If neither can be extracted :exc:`~exceptions.EnvironmentError` is raised.
If an isolated key pair is being used the :attr:`directory` option
should be used instead of the :attr:`public_key_file` and
:attr:`secret_key_file` properties, even if GnuPG < 2.1 is being used.
This is necessary because of what appears to be a bug in GnuPG, see
`this mailing list thread`_ for more discussion.
.. _this mailing list thread: https://lists.gnupg.org/pipermail/gnupg-users/2002-March/012144.html
"""
listing = execute(' '.join([self.gpg_command, '--list-keys', '--with-colons']), capture=True)
parsed_listing = [line.split(':') for line in listing.splitlines()]
# Look for an 'fpr:*' line with a key fingerprint.
for fields in parsed_listing:
if len(fields) >= 10 and fields[0] == 'fpr' and fields[9].isalnum():
return fields[9]
# Look for an 'pub:*' line with a long key ID.
for fields in parsed_listing:
if len(fields) >= 5 and fields[0] == 'pub' and fields[4].isalnum():
return fields[4]
# Explain what went wrong, try to provide hints.
msg = "Failed to get unique ID of GPG key pair!"
if self.old_usage and not self.new_usage:
msg += " Use of the 'directory' option may help to resolve this."
raise EnvironmentError(msg)
@property
def gpg_command(self):
"""
The GPG command line that can be used to sign using the key, export the key, etc (a string).
The value of :attr:`gpg_command` is based on :attr:`scoped_command`
combined with the ``--no-default-keyring``
The documentation of :func:`GPGKey.__init__()` contains two examples.
"""
command = self.scoped_command
if not have_updated_gnupg():
command.extend((
'--no-default-keyring',
'--keyring', self.public_key_file,
'--secret-keyring', self.secret_key_file,
))
if self.key_id:
command.extend(('--recipient', self.key_id))
if self.use_agent:
command.append('--use-agent')
return quote(command)
@mutable_property
def key_id(self):
"""
The key ID of an existing key pair to use (a string or :data:`None`).
If this option is provided then the key pair must already exist.
"""
@mutable_property
def name(self):
"""
The name of the GPG key pair (a string or :data:`None`).
Used only when the key pair is generated because it doesn't exist yet.
"""
@property
def new_usage(self):
""":data:`True` if the new API is being used, :data:`False` otherwise."""
return bool(self.directory)
@property
def old_usage(self):
""":data:`True` if the old API is being used, :data:`False` otherwise."""
return bool(self.public_key_file or self.secret_key_file)
@mutable_property
def public_key_file(self):
"""
The pathname of the public key file (a string or :data:`None`).
This is only used when GnuPG < 2.1 is installed.
"""
@property
def scoped_command(self):
"""
The GPG program name and optional ``--homedir`` command line option (a list of strings).
The name of the GPG program is taken from :attr:`command_name` and the
``--homedir`` option is only added when :attr:`directory` is set.
"""
command = [self.command_name]
if self.directory:
command.append('--homedir')
command.append(self.directory)
return command
@mutable_property
def secret_key_file(self):
"""
The pathname of the secret key file (a string or :data:`None`).
This is only used when GnuPG < 2.1 is installed.
"""
@property
def use_agent(self):
"""
Whether to enable the use of the `GPG agent`_ (a boolean).
This property checks whether the environment variable given by
:data:`GPG_AGENT_VARIABLE` is set to a nonempty value. If it is then
:attr:`gpg_command` will include the ``--use-agent`` option. This makes
it possible to integrate repository signing with the GPG agent, so that
a password is asked for once instead of every time something is signed.
.. _GPG agent: http://linux.die.net/man/1/gpg-agent
"""
return bool(os.environ.get(GPG_AGENT_VARIABLE))
class EntropyGenerator(object):
"""
Force the system to generate entropy based on disk I/O.
The `deb-pkg-tools` test suite runs on Travis CI which uses virtual
machines to isolate tests. Because the `deb-pkg-tools` test suite generates
several GPG keys it risks the chance of getting stuck and being killed
after 10 minutes of inactivity. This happens because of a lack of entropy
which is a very common problem in virtualized environments.
There are tricks to use fake entropy to avoid this problem:
- The `rng-tools` package/daemon can feed ``/dev/random`` based on
``/dev/urandom``. Unfortunately this package doesn't work on Travis CI
because they use OpenVZ which uses read only ``/dev/random`` devices.
- GPG version 2 supports the ``--debug-quick-random`` option but I haven't
investigated how easy it is to switch.
Instances of this class can be used as a context manager to generate
endless disk I/O which is one of the few sources of entropy on virtualized
systems. Entropy generation is enabled when the environment variable
``$DPT_FORCE_ENTROPY`` is set to ``yes``, ``true`` or ``1``.
"""
def __init__(self):
"""Initialize a :class:`EntropyGenerator` object."""
self.enabled = coerce_boolean(os.environ.get('DPT_FORCE_ENTROPY', 'false'))
if self.enabled:
self.process = multiprocessing.Process(target=generate_entropy)
def __enter__(self):
"""Enable entropy generation."""
if self.enabled:
logger.warning("Forcing entropy generation using disk I/O, performance will suffer ..")
self.process.start()
def __exit__(self, exc_type, exc_value, traceback):
"""Disable entropy generation."""
if self.enabled:
self.process.terminate()
logger.debug("Terminated entropy generation.")
def generate_entropy():
"""
Force the system to generate entropy based on disk I/O.
This function is run in a separate process by :class:`EntropyGenerator`.
It scans the complete file system and reads every file it finds in blocks
of 1 KB. This function never returns; it has to be killed.
"""
# Continue until we are killed.
while True:
# Scan the complete file system.
for root, dirs, files in os.walk('/'):
for filename in files:
pathname = os.path.join(root, filename)
# Don't try to read device files, named pipes, etc.
if os.path.isfile(pathname):
# Read every file on the file system in blocks of 1 KB.
try:
with open(pathname) as handle:
while True:
block = handle.read(1024)
if not block:
break
except Exception:
pass
| mit | 1,790,061,008,065,040,600 | 39.619874 | 138 | 0.633907 | false | 4.144351 | false | false | false |
anarkiwi/faucet | faucet/prom_client.py | 6 | 3515 | """Implement Prometheus client."""
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2019 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from urllib.parse import parse_qs
from ryu.lib import hub
from pbr.version import VersionInfo
from prometheus_client import Gauge as PromGauge
from prometheus_client import generate_latest, CONTENT_TYPE_LATEST, REGISTRY
# Ryu's WSGI implementation doesn't always set QUERY_STRING
def make_wsgi_app(registry):
"""Create a WSGI app which serves the metrics from a registry."""
def prometheus_app(environ, start_response):
query_str = environ.get('QUERY_STRING', '')
params = parse_qs(query_str)
reg = registry
if 'name[]' in params:
reg = reg.restricted_registry(params['name[]'])
output = generate_latest(reg)
status = str('200 OK')
headers = [(str('Content-type'), CONTENT_TYPE_LATEST)]
start_response(status, headers)
return [output]
return prometheus_app
class PromClient: # pylint: disable=too-few-public-methods
"""Prometheus client."""
REQUIRED_LABELS = ['dp_id', 'dp_name']
_reg = REGISTRY
def __init__(self, reg=None):
if reg is not None:
self._reg = reg
self.version = VersionInfo('faucet').semantic_version().release_string()
self.faucet_version = PromGauge( # pylint: disable=unexpected-keyword-arg
'faucet_pbr_version',
'Faucet PBR version',
['version'],
registry=self._reg)
self.faucet_version.labels(version=self.version).set(1) # pylint: disable=no-member
self.server = None
self.thread = None
def start(self, prom_port, prom_addr, use_test_thread=False):
"""Start webserver."""
if not self.server:
app = make_wsgi_app(self._reg)
if use_test_thread:
# pylint: disable=import-outside-toplevel
from wsgiref.simple_server import (
make_server, WSGIRequestHandler)
import threading
class NoLoggingWSGIRequestHandler(WSGIRequestHandler):
"""Don't log requests."""
def log_message(self, *_args): # pylint: disable=arguments-differ
pass
self.server = make_server(
prom_addr, int(prom_port), app, handler_class=NoLoggingWSGIRequestHandler)
self.thread = threading.Thread(target=self.server.serve_forever)
self.thread.daemon = True
self.thread.start()
else:
self.server = hub.WSGIServer((prom_addr, int(prom_port)), app)
self.thread = hub.spawn(self.server.serve_forever)
self.thread.name = 'prometheus'
| apache-2.0 | 6,146,038,869,145,513,000 | 38.055556 | 94 | 0.637838 | false | 4.007982 | true | false | false |
amunozf/legibilidad | rarewords.py | 1 | 1297 | #!/usr/bin/env python3
# Legibilidad 2 (beta)
# Averigua la legibilidad de un texto
# Spanish readability calculations
# © 2016 Alejandro Muñoz Fernández
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
import sqlite3
def rare_words(wordlist):
'''
List of rare words (not in the SUBTLEX-ESP database). Fix this: make only one query instead of one per word. It'll be faster
'''
dbpath = "/home/protected/db/SUBTLEX-ESP.db"
conn = sqlite3.connect(dbpath)
rarewords = []
cur = conn.cursor()
for word in wordlist:
cur.execute('SELECT 1 FROM frecuencias WHERE palabra = ? LIMIT 1', (word,))
if not cur.fetchone():
rarewords.append(word)
conn.close()
return rarewords | gpl-3.0 | -6,993,172,489,587,939,000 | 34 | 128 | 0.718702 | false | 3.655367 | false | false | false |
vseledkin/LV_groundhog | experiments/nmt/utils/binarize_top_unigram.py | 3 | 1093 | import numpy
import cPickle
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--top-unigram", type=str)
parser.add_argument("--src-w2i", type=str)
parser.add_argument("--trg-w2i", type=str)
parser.add_argument("--vocab-size", type=int)
parser.add_argument("--output", type=str)
args = parser.parse_args()
with open(args.src_w2i,'rb') as f:
src_w2i = cPickle.load(f)
with open(args.trg_w2i,'rb') as f:
trg_w2i = cPickle.load(f)
with open(args.top_unigram,'rb') as f:
top_unigram = cPickle.load(f)
new_dict = {}
for old_key in top_unigram:
if old_key == '<eps>': # Don't consider the empty string
continue
new_key = src_w2i[old_key] # Convert source word to its index
if new_key >= args.vocab_size:
continue
old_value = top_unigram[old_key] # This is a list of words (with the most probable one first)
new_value = [trg_w2i[elt] for elt in old_value if (trg_w2i[elt] < args.vocab_size)]
if len(new_value) >= 1:
new_dict[new_key] = new_value
with open(args.output,'wb') as f:
cPickle.dump(new_dict, f, -1)
| bsd-3-clause | -8,987,425,407,344,949,000 | 31.147059 | 97 | 0.662397 | false | 2.795396 | false | false | false |
jrugis/cell_mesh | poly.py | 1 | 1938 | #!/usr/bin/python
import numpy as np
import scipy.interpolate as si
import mayavi.mlab as mylab
def calc_points(line):
points = np.zeros((len(line),3)) # indicies -> point coordinates
for i in range(points.shape[0]):
#points[i,0] = 2 * 0.556 * (line[i][0]-0.5)
#points[i,1] = 2 * 0.556 * (line[i][1]-0.5)
#points[i,2] = 0.798 * (line[i][2]-0.5) # z axis
points[i,0] = 0.556 * (line[i][0]-0.5)
points[i,1] = 0.556 * (line[i][1]-0.5)
points[i,2] = 0.798 * (line[i][2]-0.5) # z axis
#points[i,0] = 0.556 * (line[i][0])
#points[i,1] = 0.556 * (line[i][1])
#points[i,2] = 0.798 * (line[i][2]) # z axis
return points
def bspline(cv, n=100, degree=3):
cv = np.asarray(cv)
count = cv.shape[0]
degree = np.clip(degree,1,count-1) # max degree = count-1
kv = np.array([0]*degree + range(count-degree+1) + [count-degree]*degree,dtype='int')
u = np.linspace(0,(count-degree),num=n)
points = np.zeros((len(u),cv.shape[1]))
for i in xrange(cv.shape[1]):
points[:,i] = si.splev(u, (kv,cv[:,i],degree))
return points
# save geometry lines
def save_poly(fname, lines):
fname += "_poly.txt"
f = open(fname, 'w')
print ' ', fname
for line in lines:
points = calc_points(line)
#spoints = bspline(points, n=points.shape[0], degree=20)
##m = len(points)
m = len(points)/2
if m<4: continue
kx = 3
##if(m>3): kx = 3
##else: kx = m-1
wx = np.ones(len(points))
wx[0] = wx[-1] = 100
tck,u=si.splprep(np.transpose(points),w=wx,k=kx,s=10)
##m /= 2
##if(m<4) : m=4
spoints = np.transpose([si.splev(np.linspace(0,1,m),tck)])
f.write("%2d " % m)
for spoint in spoints:
for vert in spoint:
f.write("%0.2f " % vert)
f.write('\n')
mylab.plot3d(points[:,0], points[:,1], points[:,2], color=(1,0,0))
mylab.plot3d(spoints[:,0], spoints[:,1], spoints[:,2], color=(0,1,0))
f.close()
mylab.show()
return
| gpl-3.0 | -8,972,178,347,469,171,000 | 30.258065 | 87 | 0.566563 | false | 2.459391 | false | false | false |
moustakas/impy | image/im2rgbfits.py | 2 | 5419 | # im2rgbfits CL0024.png -over -header det.fits
# WILL HONOR WCS FROM headerfile
# im2rgbfits.py
# ~/ACS/CL0024/color/production/color.py
# ALSO SEE pyfits.pdf (Pyfits manual)
#from coetools import *
from PIL import Image
import pyfits
import sys, os
import string
from os.path import exists, join
from numpy import *
#################################
def str2num(str, rf=0):
"""CONVERTS A STRING TO A NUMBER (INT OR FLOAT) IF POSSIBLE
ALSO RETURNS FORMAT IF rf=1"""
try:
num = string.atoi(str)
format = 'd'
except:
try:
num = string.atof(str)
format = 'f'
except:
if not string.strip(str):
num = None
format = ''
else:
words = string.split(str)
if len(words) > 1:
num = map(str2num, tuple(words))
format = 'l'
else:
num = str
format = 's'
if rf:
return (num, format)
else:
return num
def params_cl(converttonumbers=True):
"""RETURNS PARAMETERS FROM COMMAND LINE ('cl') AS DICTIONARY:
KEYS ARE OPTIONS BEGINNING WITH '-'
VALUES ARE WHATEVER FOLLOWS KEYS: EITHER NOTHING (''), A VALUE, OR A LIST OF VALUES
ALL VALUES ARE CONVERTED TO INT / FLOAT WHEN APPROPRIATE"""
list = sys.argv[:]
i = 0
dict = {}
oldkey = ""
key = ""
list.append('') # EXTRA ELEMENT SO WE COME BACK AND ASSIGN THE LAST VALUE
while i < len(list):
if striskey(list[i]) or not list[i]: # (or LAST VALUE)
if key: # ASSIGN VALUES TO OLD KEY
if value:
if len(value) == 1: # LIST OF 1 ELEMENT
value = value[0] # JUST ELEMENT
dict[key] = value
if list[i]:
key = list[i][1:] # REMOVE LEADING '-'
value = None
dict[key] = value # IN CASE THERE IS NO VALUE!
else: # VALUE (OR HAVEN'T GOTTEN TO KEYS)
if key: # (HAVE GOTTEN TO KEYS)
if value:
if converttonumbers:
value.append(str2num(list[i]))
else:
value = value + ' ' + list[i]
else:
if converttonumbers:
value = [str2num(list[i])]
else:
value = list[i]
i += 1
return dict
def striskey(str):
"""IS str AN OPTION LIKE -C or -ker
(IT'S NOT IF IT'S -2 or -.9)"""
iskey = 0
if str:
if str[0] == '-':
iskey = 1
if len(str) > 1:
iskey = str[1] not in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.']
return iskey
def strend(str, phr):
return str[-len(phr):] == phr
def decapfile(name, ext=''):
"""REMOVE EXTENSION FROM FILENAME IF PRESENT
IF ext LEFT BLANK, THEN ANY EXTENSION WILL BE REMOVED"""
if ext:
if ext[0] <> '.':
ext = '.' + ext
n = len(ext)
if name[-n:] == ext:
name = name[:-n]
else:
if strend(name, '.gz'):
name = name[-3:]
i = name.rfind('.')
if i > -1:
name = name[:i]
return name
def loadrgb(infile):
im = Image.open(infile)
im = im.transpose(Image.FLIP_TOP_BOTTOM)
# rgb = array(im.getdata())
rgb = asarray(im) # numpy
print rgb.shape
#nx, ny = im.size
#rgb.shape = (ny,nx,3)
rgb = transpose(rgb, (2,0,1))
rgb = rgb[:3] # in case there's an alpha channel on the end
rgb.flags.writeable = True # DEFAULT IS CAN'T EDIT IT!
return rgb
#################################
def im2rgbfits(infile, rgbfile='', overwrite=False, headerfile=None, flip=False):
if rgbfile == '':
rgbfile = decapfile(infile) + '_RGB.fits'
if exists(rgbfile):
if overwrite:
delfile(rgbfile)
else:
print rgbfile, 'EXISTS'
sys.exit(1)
#im = Image.open(infile)
#print 'Loading data...'
#data = array(im.getdata())
#nxc, nyc = im.size
#data.shape = (nyc,nxc,3)
#data = transpose(data, (2,0,1))
data = loadrgb(infile)
#hdu = pyfits.PrimaryHDU()
header = headerfile and pyfits.getheader(headerfile)
hdu = pyfits.PrimaryHDU(None, header)
hdulist = pyfits.HDUList([hdu])
hdulist.writeto(rgbfile)
try: # If there's a 'SCI' extension, then that's where the WCS is
header = pyfits.getheader(headerfile, 'SCI')
except:
pass
if header <> None:
if 'EXTNAME' in header.keys():
del(header['EXTNAME'])
for i in range(3):
print 'RGB'[i]
data1 = data[i]
if flip:
data1 = flipud(data1)
pyfits.append(rgbfile, data1, header)
print rgbfile, 'NOW READY FOR "Open RGB Fits Image" in ds9'
if __name__ == '__main__':
infile = sys.argv[1]
outfile = ''
if len(sys.argv) > 2:
file2 = sys.argv[2]
if file2[0] <> '-':
outfile = file2
params = params_cl()
overwrite = 'over' in params.keys()
headerfile = params.get('header', None)
im2rgbfits(infile, outfile, overwrite=overwrite, headerfile=headerfile)
#hdulist = pyfits.open(rgbfile)
#hdulist.info()
| gpl-2.0 | 8,847,682,197,896,846,000 | 27.521053 | 93 | 0.506736 | false | 3.46262 | false | false | false |
anksp21/Community-Zenpacks | ZenPacks.community.HPMon/ZenPacks/community/HPMon/HPLogicalDisk.py | 2 | 2225 | ################################################################################
#
# This program is part of the HPMon Zenpack for Zenoss.
# Copyright (C) 2008, 2009, 2010 Egor Puzanov.
#
# This program can be used under the GNU General Public License version 2
# You can find full information here: http://www.zenoss.com/oss
#
################################################################################
__doc__="""HPLogicalDisk
HPLogicalDisk is an abstraction of a harddisk.
$Id: HPLogicalDisk.py,v 1.1 2010/06/29 10:41:03 egor Exp $"""
__version__ = "$Revision: 1.1 $"[11:-2]
from ZenPacks.community.deviceAdvDetail.LogicalDisk import *
from HPComponent import *
class HPLogicalDisk(LogicalDisk, HPComponent):
"""HPLogicalDisk object"""
factory_type_information = (
{
'id' : 'HardDisk',
'meta_type' : 'HardDisk',
'description' : """Arbitrary device grouping class""",
'icon' : 'HardDisk_icon.gif',
'product' : 'ZenModel',
'factory' : 'manage_addHardDisk',
'immediate_view' : 'viewHPLogicalDisk',
'actions' :
(
{ 'id' : 'status'
, 'name' : 'Status'
, 'action' : 'viewHPLogicalDisk'
, 'permissions' : (ZEN_VIEW,)
},
{ 'id' : 'perfConf'
, 'name' : 'Template'
, 'action' : 'objTemplates'
, 'permissions' : (ZEN_CHANGE_DEVICE, )
},
{ 'id' : 'viewHistory'
, 'name' : 'Modifications'
, 'action' : 'viewHistory'
, 'permissions' : (ZEN_VIEW_MODIFICATIONS,)
},
)
},
)
def getRRDTemplates(self):
"""
Return the RRD Templates list
"""
templates = []
for tname in [self.__class__.__name__]:
templ = self.getRRDTemplateByName(tname)
if templ: templates.append(templ)
return templates
InitializeClass(HPLogicalDisk)
| gpl-2.0 | 5,101,791,954,218,777,000 | 33.230769 | 80 | 0.449438 | false | 4.151119 | false | false | false |
iraklikhitarishvili/data2class | jsonparser/examples/bog_example.py | 1 | 2544 | import os
from base.field import *
from jsonparser.documents import BaseJsonDocument
from jsonparser.encoder import EnumEncoder
import json
class Person(BaseJsonDocument):
Name = StringField()
Inn = StringField()
AccountNumber = StringField()
BankCode = StringField()
BankName = StringField()
class Item(BaseJsonDocument):
EntryDate = StringField()
EntryDocumentNumber = StringField()
EntryAccountNumber = StringField()
EntryAmountDebit = PyFloatField(required=True)
EntryAmountDebitBase = PyFloatField()
EntryAmountCredit = PyFloatField()
EntryAmountCreditBase = PyFloatField()
EntryAmountBase = PyFloatField()
EntryComment = StringField()
EntryDepartment = StringField()
EntryAccountPoint = StringField()
DocumentProductGroup = StringField()
DocumentValueDate = StringField()
SenderDetails = Person()
BeneficiaryDetails = Person()
DocumentTreasuryCode = StringField()
DocumentNomination = StringField()
DocumentInformation = StringField()
DocumentSourceAmount = PyFloatField()
DocumentSourceCurrency = StringField()
DocumentDestinationAmount = PyFloatField()
DocumentDestinationCurrency = StringField()
DocumentReceiveDate = StringField()
DocumentBranch = StringField()
DocumentDepartment = StringField()
DocumentActualDate = StringField()
DocumentExpiryDate = StringField()
DocumentRateLimit = StringField()
DocumentRate = PyFloatField()
DocumentRegistrationRate = PyFloatField()
DocumentSenderInstitution = StringField()
DocumentIntermediaryInstitution = StringField()
DocumentBeneficiaryInstitution = StringField()
DocumentPayee = StringField()
DocumentCorrespondentAccountNumber = StringField()
DocumentCorrespondentBankCode = StringField()
DocumentCorrespondentBankName = StringField()
DocumentKey = PyFloatField()
EntryId = PyFloatField()
class RootData(BaseJsonDocument):
items = ListField(Item())
def run_example():
dir_name = os.path.dirname(os.path.realpath(__file__))
file_path = "{0}/json/bog".format(dir_name)
with open('{0}.json'.format(file_path), 'r') as test_data_file:
a = RootData()
a.load(test_data_file.read().replace('\n', ''))
if a.is_valid():
with open("{0}_result.json".format(file_path), "w") as f:
print(a.dump(), file=f)
else:
with open("{0}_errors.json".format(file_path), "w") as f:
json.dump(a.errors, f, cls=EnumEncoder)
| bsd-2-clause | 8,811,415,984,244,913,000 | 33.378378 | 69 | 0.704403 | false | 4.378657 | false | false | false |
RobinMorisset/Btrsync | btrsync_cmd.py | 1 | 1925 | #!/usr/bin/env python2
from __future__ import print_function
__author__ = "Antoine Amarilli and Fabrice Ben Hamouda"
import os, re
import argparse
def shellquote(s):
return "'" + s.replace("'", "'\\''") + "'"
def main():
parser = argparse.ArgumentParser(description="WARNING: Internal use. Please use btrsync.sh")
parser.add_argument("--status", help="Status file to write (internal use)")
parser.add_argument("root_neil", help="[[user@]host:]path/to/neil")
parser.add_argument("root_oscar", help="[[user@]host:]path/to/oscar")
args = parser.parse_args()
# Print command to be executed for Neil and Oscar
# Used by btrsync.sh
print ("ok")
regex = re.compile("^((?P<server>[^:]+):)?(?P<path>.*)$")
r_oscar = regex.search(args.root_oscar).groupdict()
r_neil = regex.search(args.root_neil).groupdict()
if r_neil["server"] == None:
root_neil = os.path.abspath(args.root_neil)
root_neil_local = root_neil
else:
root_neil = args.root_neil
root_neil_local = r_neil["path"]
if r_oscar["server"] == None:
root_oscar = os.path.abspath(args.root_oscar)
root_oscar_local = root_oscar
else:
root_oscar = args.root_oscar
root_oscar_local = r_oscar["path"]
if r_neil["server"]==None:
print ("btrsync.py --origin %s %s" % (shellquote(root_neil_local), shellquote(root_oscar)))
else:
print ("ssh %s btrsync.py --origin %s %s" % (r_neil["server"], shellquote(root_neil_local), shellquote(root_oscar)))
# if a status file is provided, pass it to the destination:
invocation = "btrsync.py %s --destination" % ("--status=" + shellquote(args.status) if args.status else "")
if r_oscar["server"]==None:
print ("%s %s %s" % (invocation, shellquote(root_neil), shellquote(root_oscar_local)))
else:
print ("ssh %s %s %s %s" % (r_oscar["server"], invocation, shellquote(root_neil), shellquote(root_oscar_local)))
if __name__ == "__main__":
main()
| gpl-3.0 | 3,168,805,924,806,372,000 | 34.648148 | 120 | 0.649351 | false | 2.921093 | false | false | false |
massimo-nocentini/on-python | microkanren/muk/sexp.py | 1 | 3546 |
from collections import namedtuple
from contextlib import contextmanager
from functools import wraps
from inspect import signature
from muk.utils import identity
class cons(namedtuple('_cons', ['car', 'cdr'])):
def walk_star(self, W):
return cons(W(self.car), W(self.cdr))
def unification(self, other, sub, ext_s, U, E):
try: UC = other._unification_cons
except AttributeError: raise E
else: return UC(self, sub, ext_s, U)
def _unification_cons(self, other_cons, sub, ext_s, U):
if other_cons.cdr == (): return U(other_cons.car, self, sub, ext_s)
if self.cdr == (): return U(self.car, other_cons, sub, ext_s)
cars_sub = U(other_cons.car, self.car, sub, ext_s)
return U(other_cons.cdr, self.cdr, cars_sub, ext_s)
def reify_s(self, sub, R):
return R(self.cdr, R(self.car, sub))
def occur_check(self, u, O, E):
return O(u, self.car) or O(u, self.cdr)
def __radd__(self, other):
if isinstance(other, list):
return list_to_cons(other, post=lambda l: self if l == [] else l)
raise NotImplemented
class ImproperListError(ValueError):
pass
def list_to_cons(l, post=identity):
if isinstance(l, (str, cons)): return l # we consider a `str` obj not an iterable obj but as an atom
λ = type(l)
try:
car, cadr, *cddr = l
except:
try:
car, *cdr = l
except:
return l
else:
cdr = λ(cdr) # again, restore correct type of the tail
if cdr == (): raise ImproperListError # otherwise outer try couldn't fail
cdr = post(cdr)
return cons(car=list_to_cons(car), cdr=list_to_cons(cdr))
else:
cddr = λ(cddr) # restore correct type of tail collecting obj
if cddr == (): return cons(car=list_to_cons(car), cdr=list_to_cons(cadr))
cdr = λ([cadr]) + cddr # reconstruct `cdr` by adapt `[cadr]` to safely apply +
return cons(car=list_to_cons(car), cdr=list_to_cons(cdr))
def cons_to_list(c, for_cdr=False):
try:
car, cdr = c
except:
if c == (): raise ImproperListError
return (([], list) if c == [] else ((c,), tuple)) if for_cdr else c
d, λ = cons_to_list(cdr, for_cdr=True)
r = λ([cons_to_list(car, for_cdr=False)]) + d
return (r, λ) if for_cdr else r
def adapt_iterables_to_conses(selector, ctor=list_to_cons):
def decorator(f):
f_sig = signature(f)
formal_args = [v.name for k, v in f_sig.parameters.items()]
selection = selector(*formal_args)
if isinstance(selection, set):
selection = {s:ctor for s in selection}
@wraps(f)
def D(*args, bypass_cons_adapter=False, **kwds):
new_args = args if bypass_cons_adapter else [c(a) for f, a in zip(formal_args, args)
for c in [selection.get(f, identity)]]
return f(*new_args, **kwds)
return D
return decorator
all_arguments = lambda *args: set(args)
def int_to_list(i):
return list(map(int, reversed(bin(i)[2:]))) if i else []
class num(cons):
@classmethod
def build(cls, obj):
if isinstance(obj, int): obj = int_to_list(obj)
c = list_to_cons(obj)
return num(c.car, c.cdr) if isinstance(c, cons) else c
def __int__(self):
def I(c, e):
return 0 if c == [] else c.car * 2**e + I(c.cdr, e+1)
return I(self, e=0)
| mit | -3,657,997,878,783,638,000 | 29.247863 | 104 | 0.570218 | false | 3.179695 | false | false | false |
RedHatInsights/insights-core | insights/parsers/net_namespace.py | 1 | 1326 | """
NetworkNamespace = ``/bin/ls /var/run/netns``
=============================================
This specs provides list of network namespace created on the host machine.
Typical output of this command is as below::
temp_netns temp_netns_2 temp_netns_3
The ``/bin/ls /var/run/netns`` is prefered over ``/bin/ip netns list`` because it works on
all RHEL versions, no matter ip package is installed or not.
Examples:
>>> type(netns_obj)
<class 'insights.parsers.net_namespace.NetworkNamespace'>
>>> netns_obj.netns_list
['temp_netns', 'temp_netns_2', 'temp_netns_3']
>>> len(netns_obj.netns_list)
3
"""
from insights import Parser, parser, get_active_lines
from insights.parsers import SkipException
from insights.specs import Specs
@parser(Specs.namespace)
class NetworkNamespace(Parser):
def parse_content(self, content):
if not content:
raise SkipException('Nothing to parse.')
self._netns_list = []
for line in get_active_lines(content):
self._netns_list.extend(line.split())
@property
def netns_list(self):
"""
This method returns list of network namespace created
in process memory.
Returns:
`list` of network namepaces if exists.
"""
return self._netns_list
| apache-2.0 | 2,550,812,489,670,395,400 | 26.061224 | 90 | 0.635747 | false | 3.683333 | false | false | false |
jerbob92/CouchPotatoServer | couchpotato/core/providers/torrent/thepiratebay/main.py | 3 | 5605 | from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
from couchpotato.core.helpers.variable import tryInt, cleanHost
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.torrent.base import TorrentMagnetProvider
from couchpotato.environment import Env
import re
import time
import traceback
log = CPLog(__name__)
class ThePirateBay(TorrentMagnetProvider):
urls = {
'detail': '%s/torrent/%s',
'search': '%s/search/%s/%s/7/%s'
}
cat_ids = [
([207], ['720p', '1080p']),
([201], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr']),
([201, 207], ['brrip']),
([202], ['dvdr'])
]
cat_backup_id = 200
disable_provider = False
http_time_between_calls = 0
proxy_list = [
'https://thepiratebay.se',
'https://tpb.ipredator.se',
'https://depiraatbaai.be',
'https://piratereverse.info',
'https://tpb.pirateparty.org.uk',
'https://argumentomteemigreren.nl',
'https://livepirate.com',
'https://www.getpirate.com',
'https://tpb.partipirate.org',
'https://tpb.piraten.lu',
'https://kuiken.co',
]
def __init__(self):
self.domain = self.conf('domain')
super(ThePirateBay, self).__init__()
def _searchOnTitle(self, title, movie, quality, results):
page = 0
total_pages = 1
cats = self.getCatId(quality['identifier'])
while page < total_pages:
search_url = self.urls['search'] % (self.getDomain(), tryUrlencode('"%s" %s' % (title, movie['library']['year'])), page, ','.join(str(x) for x in cats))
page += 1
data = self.getHTMLData(search_url)
if data:
try:
soup = BeautifulSoup(data)
results_table = soup.find('table', attrs = {'id': 'searchResult'})
if not results_table:
return
try:
total_pages = len(soup.find('div', attrs = {'align': 'center'}).find_all('a'))
except:
pass
entries = results_table.find_all('tr')
for result in entries[2:]:
link = result.find(href = re.compile('torrent\/\d+\/'))
download = result.find(href = re.compile('magnet:'))
try:
size = re.search('Size (?P<size>.+),', unicode(result.select('font.detDesc')[0])).group('size')
except:
continue
if link and download:
def extra_score(item):
trusted = (0, 10)[result.find('img', alt = re.compile('Trusted')) is not None]
vip = (0, 20)[result.find('img', alt = re.compile('VIP')) is not None]
confirmed = (0, 30)[result.find('img', alt = re.compile('Helpers')) is not None]
moderated = (0, 50)[result.find('img', alt = re.compile('Moderator')) is not None]
return confirmed + trusted + vip + moderated
results.append({
'id': re.search('/(?P<id>\d+)/', link['href']).group('id'),
'name': link.string,
'url': download['href'],
'detail_url': self.getDomain(link['href']),
'size': self.parseSize(size),
'seeders': tryInt(result.find_all('td')[2].string),
'leechers': tryInt(result.find_all('td')[3].string),
'extra_score': extra_score,
'get_more_info': self.getMoreInfo
})
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def isEnabled(self):
return super(ThePirateBay, self).isEnabled() and self.getDomain()
def getDomain(self, url = ''):
if not self.domain:
for proxy in self.proxy_list:
prop_name = 'tpb_proxy.%s' % proxy
last_check = float(Env.prop(prop_name, default = 0))
if last_check > time.time() - 1209600:
continue
data = ''
try:
data = self.urlopen(proxy, timeout = 3, show_error = False)
except:
log.debug('Failed tpb proxy %s', proxy)
if 'title="Pirate Search"' in data:
log.debug('Using proxy: %s', proxy)
self.domain = proxy
break
Env.prop(prop_name, time.time())
if not self.domain:
log.error('No TPB proxies left, please add one in settings, or let us know which one to add on the forum.')
return None
return cleanHost(self.domain).rstrip('/') + url
def getMoreInfo(self, item):
full_description = self.getCache('tpb.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)
html = BeautifulSoup(full_description)
nfo_pre = html.find('div', attrs = {'class':'nfo'})
description = toUnicode(nfo_pre.text) if nfo_pre else ''
item['description'] = description
return item
| gpl-3.0 | -7,151,975,899,739,110,000 | 36.119205 | 164 | 0.487957 | false | 4.136531 | false | false | false |
turbokongen/home-assistant | homeassistant/components/tado/const.py | 2 | 4136 | """Constant values for the Tado component."""
from PyTado.const import (
CONST_HVAC_COOL,
CONST_HVAC_DRY,
CONST_HVAC_FAN,
CONST_HVAC_HEAT,
CONST_HVAC_HOT_WATER,
CONST_HVAC_IDLE,
CONST_HVAC_OFF,
)
from homeassistant.components.climate.const import (
CURRENT_HVAC_COOL,
CURRENT_HVAC_DRY,
CURRENT_HVAC_FAN,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
FAN_AUTO,
FAN_HIGH,
FAN_LOW,
FAN_MEDIUM,
FAN_OFF,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_HOME,
)
TADO_HVAC_ACTION_TO_HA_HVAC_ACTION = {
CONST_HVAC_HEAT: CURRENT_HVAC_HEAT,
CONST_HVAC_DRY: CURRENT_HVAC_DRY,
CONST_HVAC_FAN: CURRENT_HVAC_FAN,
CONST_HVAC_COOL: CURRENT_HVAC_COOL,
CONST_HVAC_IDLE: CURRENT_HVAC_IDLE,
CONST_HVAC_OFF: CURRENT_HVAC_OFF,
CONST_HVAC_HOT_WATER: CURRENT_HVAC_HEAT,
}
# Configuration
CONF_FALLBACK = "fallback"
DATA = "data"
UPDATE_TRACK = "update_track"
# Types
TYPE_AIR_CONDITIONING = "AIR_CONDITIONING"
TYPE_HEATING = "HEATING"
TYPE_HOT_WATER = "HOT_WATER"
TYPE_BATTERY = "BATTERY"
TYPE_POWER = "POWER"
# Base modes
CONST_MODE_OFF = "OFF"
CONST_MODE_SMART_SCHEDULE = "SMART_SCHEDULE" # Use the schedule
CONST_MODE_AUTO = "AUTO"
CONST_MODE_COOL = "COOL"
CONST_MODE_HEAT = "HEAT"
CONST_MODE_DRY = "DRY"
CONST_MODE_FAN = "FAN"
CONST_LINK_OFFLINE = "OFFLINE"
CONST_FAN_OFF = "OFF"
CONST_FAN_AUTO = "AUTO"
CONST_FAN_LOW = "LOW"
CONST_FAN_MIDDLE = "MIDDLE"
CONST_FAN_HIGH = "HIGH"
# When we change the temperature setting, we need an overlay mode
CONST_OVERLAY_TADO_MODE = (
"NEXT_TIME_BLOCK" # wait until tado changes the mode automatic
)
CONST_OVERLAY_MANUAL = "MANUAL" # the user has change the temperature or mode manually
CONST_OVERLAY_TIMER = "TIMER" # the temperature will be reset after a timespan
# Heat always comes first since we get the
# min and max tempatures for the zone from
# it.
# Heat is preferred as it generally has a lower minimum temperature
ORDERED_KNOWN_TADO_MODES = [
CONST_MODE_HEAT,
CONST_MODE_COOL,
CONST_MODE_AUTO,
CONST_MODE_DRY,
CONST_MODE_FAN,
]
TADO_MODES_TO_HA_CURRENT_HVAC_ACTION = {
CONST_MODE_HEAT: CURRENT_HVAC_HEAT,
CONST_MODE_DRY: CURRENT_HVAC_DRY,
CONST_MODE_FAN: CURRENT_HVAC_FAN,
CONST_MODE_COOL: CURRENT_HVAC_COOL,
}
# These modes will not allow a temp to be set
TADO_MODES_WITH_NO_TEMP_SETTING = [CONST_MODE_AUTO, CONST_MODE_DRY, CONST_MODE_FAN]
#
# HVAC_MODE_HEAT_COOL is mapped to CONST_MODE_AUTO
# This lets tado decide on a temp
#
# HVAC_MODE_AUTO is mapped to CONST_MODE_SMART_SCHEDULE
# This runs the smart schedule
#
HA_TO_TADO_HVAC_MODE_MAP = {
HVAC_MODE_OFF: CONST_MODE_OFF,
HVAC_MODE_HEAT_COOL: CONST_MODE_AUTO,
HVAC_MODE_AUTO: CONST_MODE_SMART_SCHEDULE,
HVAC_MODE_HEAT: CONST_MODE_HEAT,
HVAC_MODE_COOL: CONST_MODE_COOL,
HVAC_MODE_DRY: CONST_MODE_DRY,
HVAC_MODE_FAN_ONLY: CONST_MODE_FAN,
}
HA_TO_TADO_FAN_MODE_MAP = {
FAN_AUTO: CONST_FAN_AUTO,
FAN_OFF: CONST_FAN_OFF,
FAN_LOW: CONST_FAN_LOW,
FAN_MEDIUM: CONST_FAN_MIDDLE,
FAN_HIGH: CONST_FAN_HIGH,
}
TADO_TO_HA_HVAC_MODE_MAP = {
value: key for key, value in HA_TO_TADO_HVAC_MODE_MAP.items()
}
TADO_TO_HA_FAN_MODE_MAP = {value: key for key, value in HA_TO_TADO_FAN_MODE_MAP.items()}
DEFAULT_TADO_PRECISION = 0.1
SUPPORT_PRESET = [PRESET_AWAY, PRESET_HOME]
TADO_SWING_OFF = "OFF"
TADO_SWING_ON = "ON"
DOMAIN = "tado"
SIGNAL_TADO_UPDATE_RECEIVED = "tado_update_received_{}_{}_{}"
UNIQUE_ID = "unique_id"
DEFAULT_NAME = "Tado"
TADO_ZONE = "Zone"
UPDATE_LISTENER = "update_listener"
# Constants for Temperature Offset
INSIDE_TEMPERATURE_MEASUREMENT = "INSIDE_TEMPERATURE_MEASUREMENT"
TEMP_OFFSET = "temperatureOffset"
TADO_OFFSET_CELSIUS = "celsius"
HA_OFFSET_CELSIUS = "offset_celsius"
TADO_OFFSET_FAHRENHEIT = "fahrenheit"
HA_OFFSET_FAHRENHEIT = "offset_fahrenheit"
TADO_TO_HA_OFFSET_MAP = {
TADO_OFFSET_CELSIUS: HA_OFFSET_CELSIUS,
TADO_OFFSET_FAHRENHEIT: HA_OFFSET_FAHRENHEIT,
}
| apache-2.0 | 8,073,339,454,037,373,000 | 23.915663 | 88 | 0.691973 | false | 2.506667 | false | false | false |
italomaia/turtle-linux | games/gHell/lib/separador.py | 1 | 21974 | import qgl
import pygame
from pygame.locals import *
import data
from scene import Scene
from actions import *
import pygame
import view
import main_menu
import leafs
from data import filepath
from intro import Intro
DADDY_FALL = 10000 # milliseconds
ROJO_SANGRE = (0.8, 0.1, 0.05, 0)
class Separador(Scene):
def update_event(self, event):
if event.type == KEYDOWN or event.type == MOUSEBUTTONDOWN:
self.game.change_scene(Intro(self.game,True))
def __init__(self, world, score=0):
Scene.__init__(self, world)
self.score = score
self.root_node.background_color = view.CELESTE_CIELO
am = self.actionManager = Manager()
am.do( None,
delay(10) +
call(self.next)
)
def next(self):
self.game.change_scene( _Separador( self.game, self.score ) )
def update(self, dt):
if dt>1000:
return
self.actionManager.loop(dt)
class _Separador(Scene):
def update_event(self, event):
if event.type == KEYDOWN or event.type == MOUSEBUTTONDOWN:
self.game.change_scene(Intro(self.game, True))
def __init__(self, world, score=0):
Scene.__init__(self, world)
self.score = score
self.root_node.background_color = view.CELESTE_CIELO
self.font = filepath('You Are Loved.ttf')
import sound
self.initializeMusic()
clouds = self.createClouds()
clouds3 = self.createClouds()
dad = self.createDad()
clouds2 = self.createClouds()
self.accept()
diedline = self.diedline()
scoretext = self.scoreline()
scoretext.disable()
diedline.disable()
am = self.actionManager = Manager()
am.do( clouds, place( Point3(-200,200,0) ) )
dad.scale = Vector3(0.1,0.1,1)
am.do( dad, place( Point3(0,0,0) ) )
am.do( dad, repeat( rotate(360, duration=2100) ) )
am.do( dad,
scale( 10, duration=10000 ) +
spawn( call( scoretext.enable ) ) +
delay( 4000 ) +
scale( 10, duration=5000 ) +
call(lambda: sound.playMusicSound( self.crash, 1 ) ) +
call(lambda:
setattr(self.root_node, "background_color",ROJO_SANGRE) ) +
call( diedline.enable ) +
place(Point3(-2000,-2000,0))
)
clouds2.scale = Vector3(20,20,1)
am.do( clouds2, place( Point3(-5500,3500,0) ) )
am.do( clouds2, goto( Point3(-600,400,0), duration=10000 ) )
am.do( clouds2,
scale( 1.0/10, duration=10000 ) +
place( Point3(-1000, -1000, 0) )
)
clouds.scale = Vector3(2,2,1)
am.do( clouds,
place( Point3(-1000, -1000, 0) ) +
delay ( 10000 ) +
place( Point3(-600,400,0) ) +
delay( 4000 ) +
spawn(goto( Point3(-60,40,0), duration=5000 )) +
scale( 1.0/10, duration=5000 ) +
place( Point3(-1000, -1000, 0) )
)
clouds3.scale = Vector3(5,5,1)
am.do( clouds3,
place( Point3(2000, -2000, 0) ) +
delay ( 10000 ) +
delay( 4000 ) +
spawn(goto( Point3(200,-200,0), duration=5000 )) +
scale( 1.0/10, duration=5000 ) +
place( Point3(2000, -2000, 0) )
)
sound.playSoundFile("freefall.ogg",1)
def scoreline(self):
t = self.create_text("you made %i points..."%self.score)
self.add_group(t)
self.accept()
p = t.translate
t.translate = Point3(p[0], 200, p[2])
return t
def diedline(self):
t = self.create_text("and then died.")
self.add_group(t)
self.accept()
p = t.translate
t.translate = Point3(p[0], -200, p[2])
return t
def create_text(self, text):
f = leafs.TextoAlineado(text, self.font, size=1000, alignx=0.5, aligny=0.5)
group = qgl.scene.Group()
group.add(f)
return group
### Music
def initializeMusic(self):
import sound
self.music = sound.initMusicFile("01 - Songe Original mix.ogg")
self.crash = sound.initMusicFile("../sonidos/crash.wav")
### Sky
def createCloud( self, imageName, initialPos, size ):
skyGroup = qgl.scene.Group()
skyTexture = qgl.scene.state.Texture(data.filepath(imageName))
skyQuad = qgl.scene.state.Quad( size )
skyGroup.add(skyTexture)
skyGroup.add(skyQuad)
skyGroup.axis = (0,0,1)
skyGroup.angle = 0
skyGroup.translate = initialPos
return skyGroup
def createClouds( self ):
clouds = qgl.scene.Group()
c1 = self.createCloud( "cloud1.png", (-200,-800,0), (345,189) )
c3 = self.createCloud( "cloud3.png", ( 250,-200,0), (284/2,104/2) )
clouds.add(c1)
clouds.add(c3)
clouds.axis = (0,0,1)
clouds.angle = 0
clouds.translate = (0,-200,0)
self.add_group(clouds)
return clouds
def createClouds2( self ):
clouds = qgl.scene.Group()
c2 = self.createCloud( "cloud2.png", ( 0,-300,0), (527,221) )
c3 = self.createCloud( "cloud3.png", ( -250,-200,0), (284/2,104/3) )
clouds.add(c2)
clouds.add(c3)
clouds.axis = (0,0,1)
clouds.angle = 0
clouds.translate = (0,-200,0)
self.add_group(clouds)
return clouds
def createClouds3( self ):
clouds = qgl.scene.Group()
c1 = self.createCloud( "cloud1.png", (-200,-800,0), (345/2,189/2) )
c2 = self.createCloud( "cloud2.png", ( 150,-300,0), (527/2,221/2) )
clouds.add(c1)
clouds.add(c2)
clouds.axis = (0,0,1)
clouds.angle = 0
clouds.translate = (0,-200,0)
self.add_group(clouds)
return clouds
### Airplane
def createAirplane( self ):
plane = qgl.scene.Group()
planeTexture = qgl.scene.state.Texture(data.filepath("biplane.png"))
planeQuad = qgl.scene.state.Quad((100,46))
plane.add(planeTexture)
plane.add(planeQuad)
plane.axis = (0,0,1)
plane.angle = 0
plane.translate = (600,0,0)
self.add_group(plane)
return plane
### People
def createPerson(self, imageName, initialPos, size=(64,128) ):
personGroup = qgl.scene.Group()
dadTexture = qgl.scene.state.Texture(data.filepath(imageName))
dadQuad = qgl.scene.state.Quad(size)
personGroup.add(dadTexture)
personGroup.add(dadQuad)
personGroup.axis = (0,0,1)
personGroup.angle = 0
personGroup.translate = initialPos
return personGroup
def createDad(self):
dad = self.createPerson( "dad-handsup-mouth.gif", (0,700,0) )
self.add_group(dad)
return dad
def createDevil(self):
devil = qgl.scene.Group()
body = self.createPerson("body_diablo.png", (0,0,0), (49,118) )
c2 = self.createCloud( "cloud2.png", ( 0,-50,0), (527/2,221/2) )
devil.add(body)
devil.add(c2)
devil.axis = (0,0,1)
devil.angle = 0
devil.translate = (0,-600,0)
self.add_group(devil)
return devil
def createJesus(self):
jesus = self.createPerson("body_jesus.png", (200,200,0) )
self.add_group(dad)
return jesus
def createAlien(self):
alien = self.createPerson("alien_brazos_arriba.png", (0,600,0) )
alien.angle = 180
self.add_group(alien)
return alien
### Objects
def createVarita(self):
varita = self.createPerson("varita.png", (1200,0,0), (32,64) )
self.add_group(varita)
return varita
def update(self, dt):
if dt>1000:
return
self.actionManager.loop(dt)
# Handlers
def playMusic( self ):
import sound
sound.playMusicSound(self.music,1)
class Interlude(_Separador):
def __init__(self, nextScene, text="press enter...", *a):
Scene.__init__(self, *a)
self.nextScene = nextScene
self.root_node.background_color = view.CELESTE_CIELO
am = self.actionManager = Manager()
clouds = self.createClouds()
clouds.translate = (100,0,0)
clouds2 = self.createClouds2()
clouds2.translate = (-100,300, 0)
clouds3 = self.createClouds3()
clouds3.translate = (50,100,0)
dads = []
basetime = 2500.0
for d in range(10):
dad = self.createDad()
dad.translate = Point3(-300+60*d, -600, 0)
if d != 0:
am.do( dad,
delay( sum([ (basetime/(r+1)) for r in range(d)] ) - basetime ) +
move((0,1200,0), duration=basetime/(d+1))
)
dads.append( dad )
varita = self.createVarita()
font = data.filepath('You Are Loved.ttf')
figure = leafs.TextoAlineado(text, font, size=1000, alignx=0.5, aligny=0.5)
group = qgl.scene.Group()
group.add(figure)
group.translate = (0, 240,0)
self.group.add(group)
self.accept()
def createDad(self):
dad = self.createPerson( "dad-fly.gif", (0,700,0) )
self.add_group(dad)
return dad
def update_event(self, event):
if event.type == KEYDOWN and event.key == K_ESCAPE:
self.game.change_scene(Intro(self.game, True))
elif event.type == KEYDOWN or event.type == MOUSEBUTTONDOWN:
self.game.change_scene(self.nextScene(self.game))
def musique(what):
import data
pygame.mixer.music.load( data.filepath("sonidos/"+what) )
pygame.mixer.music.play()
class History(_Separador):
def __init__(self, game):
Scene.__init__(self, game)
import sound
self.root_node.background_color = (0,0,0,0)
am = self.actionManager = Manager()
luz = self.create_image("dad.png")
luz.translate = Point3(60,-10,0)
luz.scale = (12,4.5,0)
dad_hi = self.createDad()
dad_hi.translate = Point3(150, -150, 0)
script = [
("- hey...", 800),
("where am I?", 1800),
("this is not home!", 2000),
("my teleport spell must have failed", 2000),
("lets try again...", 2000),
(" ", 2000),
("ouch!", 1700),
("this didn't work", 2000),
("I'll get help from above", 2300),
("I'm going up!", 2000),
]
offset = 0
lines = []
for line, duration in script:
l = self.create_line(line)
lines.append( ( l, offset, duration) )
offset += duration
nube = [ self.create_image("nube%i.png"%i) for i in range(1, 6) ]
[ setattr(n, "translate", Point3(150, -150,0)) for n in nube ]
dad = self.create_image("dad.gif")
dad.translate = Point3(-350, -150, 0)
self.accept()
dad_hi.disable()
luz.disable()
[ n.disable() for n in nube ]
[ n.disable() for (n,a,x) in lines ]
def enable(what):
def doit():
what.enable()
return doit
def disable(what):
def doit():
what.disable()
return doit
am.do( None,
delay(20000)+
call(lambda: musique("grossini talking 1.ogg") ) +
delay(10600)+
call(lambda: musique("grossini talking 2.ogg") )
)
am.do( dad,
goto( Point3(150, -150, 0), duration=5000 ) +
call(lambda: luz.enable() ) +
call(lambda: sound.playSoundFile("farol.wav",1) ) +
delay(1500) +
call(lambda: musique("Applause.wav") ) +
delay(2500) +
call(lambda: dad.disable()) +
call(lambda: dad_hi.enable()) +
delay(6000) +
call(lambda: sound.playSoundFile("MagiaOK.wav",1) ) +
call(lambda: dad_hi.disable()) +
delay(3000) +
call(lambda: luz.disable() ) +
call(lambda: sound.playSoundFile("farol.wav",1) )
)
for (line, start, duration) in lines:
am.do( line,
delay(20000)+
delay(start)+
call(enable(line))+
delay(duration)+
call(disable(line))
)
am.do( None,
delay(20000+4*2000)+
call(lambda: sound.playSoundFile("tomato.wav",1) )
)
am.do( None,
delay(20000+5*2000)+
call(lambda:setattr(self.root_node, "background_color", (1,1,1,0)))+
delay(100)+
call(lambda:setattr(self.root_node, "background_color", (0,0,0,0)))+
delay(100)+
call(lambda:setattr(self.root_node, "background_color", (1,1,1,0)))+
delay(100)+
call(lambda:setattr(self.root_node, "background_color", (0,0,0,0)))+
delay(100)
)
am.do( None,
delay( 20000 + duration+start) +
call(lambda: self.game.change_scene(Intro(self.game, False)))
)
def enable(what):
def doit():
what.enable()
return doit
def disable(what):
def doit():
what.disable()
return doit
for i,n in enumerate(nube):
am.do( n,
delay(15500) +
delay(400*i) +
call(enable(n)) +
delay(400) +
call(disable(n))
)
def createDad(self):
dad = self.createPerson( "dad-wave.gif", (0,700,0) )
self.add_group(dad)
return dad
def create_image(self, path):
dad = self.createPerson( path, (0,700,0) )
self.add_group(dad)
return dad
def create_line(self, text):
font = data.filepath('MagicSchoolOne.ttf')
figure = leafs.TextoAlineado(text, font, size=1000, alignx=0.5, aligny=0.5)
group = qgl.scene.Group()
group.add(figure)
group.translate = (0, 0,0)
self.group.add(group)
return group
def update_event(self, event):
if event.type == KEYDOWN or event.type == MOUSEBUTTONDOWN:
self.game.change_scene(Intro(self.game, False))
class Credits(_Separador):
def __init__(self, game):
Scene.__init__(self, game)
import sound
self.root_node.background_color = (0,0,0,0)
am = self.actionManager = Manager()
def enable(what):
def doit():
what.enable()
return doit
def disable(what):
def doit():
what.disable()
return doit
script = [
("Divine Inspiration", "David"),
("Magic", "Great Grossini"),
("Hosting", "leito"),
("Hosting", "alecu"),
("coding", "dave"),
("coding", "riq"),
("coding", "alecu"),
("coding", "hugo"),
("coding", "lucio"),
("Music", "Ricardo Vecchio"),
]
offset = 0
lines = []
for cargo, nombre in script:
l1 = self.create_line(cargo)
l2 = self.create_line(nombre)
l2.translate = (0,-00,0)
lines.append( ( l1, l2 ) )
self.accept()
[ (l1.disable(), l2.disable()) for (l1,l2) in lines ]
def make_title(line):
l1, l2 = line
do_title = (
delay(100)+
call(lambda: sound.playSoundFile("tomato.wav",1) ) +
delay(2000)+
call(lambda:setattr(self.root_node, "background_color", (1,1,1,0)))+
delay(100)+
call(lambda:setattr(self.root_node, "background_color", (0,0,0,0)))+
delay(100)+
call(lambda:setattr(self.root_node, "background_color", (1,1,1,0)))+
delay(100)+
call(lambda:setattr(self.root_node, "background_color", (0,0,0,0)))+
delay(100)+
call(lambda:setattr(l2,'translate',Point3(0,00,0)))+
call(lambda:setattr(l1,'translate',Point3(0,100,0)))+
call(lambda:setattr(l2,'angle',0))+
call(lambda:setattr(l1,'angle',0))+
call(lambda: l1.enable()) +
call(lambda: l2.enable()) +
delay(1500)+
spawn(move(Point3(0,-600,0), duration=1000), target=l1)+
spawn(move(Point3(0,-600,0), duration=1000), target=l2)+
spawn(rotate(45, duration=1000), target=l1)+
spawn(rotate(-45, duration=1000), target=l2)+
delay(2500)+
call(lambda: l1.disable()) +
call(lambda: l2.disable())
)
return do_title
am.do(None, random_repeat( [ make_title(line) for line in lines ] ))
def createDad(self):
dad = self.createPerson( "dad-fly.gif", (0,700,0) )
self.add_group(dad)
return dad
def create_image(self, path):
dad = self.createPerson( path, (0,700,0) )
self.add_group(dad)
return dad
def create_line(self, text):
font = data.filepath('MagicSchoolOne.ttf')
figure = leafs.TextoAlineado(text, font, size=3000, alignx=0.5, aligny=0.5)
group = qgl.scene.Group()
group.add(figure)
group.translate = (150, 200,0)
self.group.add(group)
return group
def update_event(self, event):
if event.type == KEYDOWN or event.type == MOUSEBUTTONDOWN:
self.game.change_scene(Intro(self.game, True))
class Win(_Separador):
def __init__(self, game):
Scene.__init__(self, game)
import sound
self.root_node.background_color = view.CELESTE_CIELO
am = self.actionManager = Manager()
def enable(what):
def doit():
what.enable()
return doit
def disable(what):
def doit():
what.disable()
return doit
g1 = self.create_image("gift.png")
g1.translate = Point3(200, -160, 0)
g2 = self.create_image("gift.png")
g2.translate = Point3(300, -160, 0)
dad_hi = self.createDad()
dad_hi.translate = Point3(250, -150, 0)
dad = self.create_image("dad.gif")
dad.translate = Point3(250, -150, 0)
god = self.create_image("god.png")
god.scale = (6,2,0)
god.translate = Point3(-200,145,0)
clouds = self.createClouds()
clouds.translate = Point3(-540,380,0)
clouds.scale = (3,3,0)
clouds3 = self.createClouds2()
clouds3.translate = Point3(-200,300,0)
script = [
("- hi god!", 2000),
(" nice to see", 2000),
(" you are having fun", 2000),
(" help me!", 2000),
(" ", 3000),
(" thanks!", 2000),
]
offset = 0
lines = []
for line, duration in script:
l = self.create_line(line)
lines.append( ( l, offset, duration) )
offset += duration
self.accept()
[ n.disable() for (n,a,x) in lines ]
dad_hi.disable()
g1.disable()
g2.disable()
am.do( dad,
delay(5000) +
call(enable(dad_hi)) +
call(disable(dad)) +
delay(2000) +
call(disable(dad_hi)) +
call(enable(dad)) +
delay(8000)+
call(enable(g1)) +
call(enable(g2))
)
for (line, start, duration) in lines:
am.do( line,
delay(5000)+
delay(start)+
call(enable(line))+
delay(duration)+
call(disable(line))
)
def createDad(self):
dad = self.createPerson( "dad-fly.gif", (0,700,0) )
self.add_group(dad)
return dad
def create_image(self, path):
dad = self.createPerson( path, (0,700,0) )
self.add_group(dad)
return dad
def create_line(self, text):
font = data.filepath('MagicSchoolOne.ttf')
figure = leafs.TextoAlineado(text, font, size=1000, alignx=0.5, aligny=0.5)
group = qgl.scene.Group()
group.add(figure)
group.translate = (170, 0,0)
self.group.add(group)
return group
def update_event(self, event):
if event.type == KEYDOWN or event.type == MOUSEBUTTONDOWN:
self.game.change_scene(Intro(self.game, True))
| gpl-3.0 | 4,949,236,643,092,968,000 | 30.391429 | 85 | 0.492264 | false | 3.559695 | false | false | false |
patbeagan1/libbeagan | scripts/file_management/shard.py | 1 | 3521 | #!/usr/bin/env python3
import os
import time
import subprocess
import argparse
from sys import argv
from os.path import join, getsize
import hashlib
import re
description = """
New Script.
"""
verbosity = 0
verbosity_level = {'quiet': (None, -100), 'error': ('E: ', -2), 'warning': ('W: ', -1),
'info': ('', 0), 'debug': ('D: ', 1), 'verbose': ('V: ', 2), 'dump': ('Z: ', 3)}
def dprint(s, s_verbosity_in: str = "info"):
verbosity_in_prefix, verbosity_in_value = verbosity_level[s_verbosity_in]
if verbosity == verbosity_level["quiet"]:
pass
elif verbosity_in_value <= verbosity:
print(verbosity_in_prefix + s)
def parse_args():
parser = argparse.ArgumentParser(description=description)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"-v", "--verbosity", help="Increases the verbosity level. Supports up to -vvv.", action="count", default=0)
group.add_argument("-q", "--quiet", action="store_true")
global verbosity
args = parser.parse_args()
if args.quiet:
verbosity = verbosity_level["quiet"]
else:
verbosity = args.verbosity
return args
def attempt_non_collision_rename(new_name):
ii = 1
while True:
basename = os.path.splitext(new_name)
new_name = basename[0] + "_" + str(ii) + basename[1]
if not os.path.exists(new_name):
return new_name
ii += 1
if __name__ == "__main__":
args = parse_args()
top = os.getcwd()
files_arr = []
for root, dirs, files in os.walk(top, topdown=True, onerror=None, followlinks=False):
print(root, "consumes", end=" ")
print(sum(getsize(join(root, name)) for name in files), end=" ")
print("bytes in", len(files), "non-directory files")
dprint("Test code.", "error")
curr_dir = os.path.basename(root)
dirs.clear()
# checking tmsu is active
try:
command = ["tmsu", "files"]
subprocess.check_call(command)
except subprocess.CalledProcessError:
exit("failure running tmsu")
for f in files:
file_name = f"{root}/{f}"
with open(file_name, "rb") as in_file:
# getting the hash of the file
m = hashlib.sha256(in_file.read()).hexdigest()
# generating the short hash directory
short_hash = m[0:1]
if not os.path.exists(short_hash):
os.makedirs(short_hash)
relative_new_name = m + os.path.splitext(file_name)[1]
# f"{curr_dir}.{f}"
new_name = f"{short_hash}/{relative_new_name}"
# Make sure that we will not collide
if os.path.exists(new_name):
new_name = attempt_non_collision_rename(new_name)
os.rename(file_name, new_name)
print("Copied " + file_name + " as " + new_name)
# adding tags to the file
try:
cleanF = re.sub('\W+','_', f )
command = ["tmsu", "tag", f"{new_name}", f"category={curr_dir}", f"original_name={cleanF}"]
print(command)
subprocess.check_call(command)
except subprocess.CalledProcessError:
exit("failure running tmsu")
print()
| mit | -5,081,354,538,176,762,000 | 31.601852 | 115 | 0.530815 | false | 3.925307 | false | false | false |
kaushik94/sympy | sympy/core/basic.py | 1 | 67914 | """Base class for all the objects in SymPy"""
from __future__ import print_function, division
from collections import defaultdict
from itertools import chain
from .assumptions import BasicMeta, ManagedProperties
from .cache import cacheit
from .sympify import _sympify, sympify, SympifyError
from .compatibility import (iterable, Iterator, ordered,
string_types, with_metaclass, zip_longest, range, PY3, Mapping)
from .singleton import S
from inspect import getmro
def as_Basic(expr):
"""Return expr as a Basic instance using strict sympify
or raise a TypeError; this is just a wrapper to _sympify,
raising a TypeError instead of a SympifyError."""
from sympy.utilities.misc import func_name
try:
return _sympify(expr)
except SympifyError:
raise TypeError(
'Argument must be a Basic object, not `%s`' % func_name(
expr))
class Basic(with_metaclass(ManagedProperties)):
"""
Base class for all objects in SymPy.
Conventions:
1) Always use ``.args``, when accessing parameters of some instance:
>>> from sympy import cot
>>> from sympy.abc import x, y
>>> cot(x).args
(x,)
>>> cot(x).args[0]
x
>>> (x*y).args
(x, y)
>>> (x*y).args[1]
y
2) Never use internal methods or variables (the ones prefixed with ``_``):
>>> cot(x)._args # do not use this, use cot(x).args instead
(x,)
"""
__slots__ = ['_mhash', # hash value
'_args', # arguments
'_assumptions'
]
# To be overridden with True in the appropriate subclasses
is_number = False
is_Atom = False
is_Symbol = False
is_symbol = False
is_Indexed = False
is_Dummy = False
is_Wild = False
is_Function = False
is_Add = False
is_Mul = False
is_Pow = False
is_Number = False
is_Float = False
is_Rational = False
is_Integer = False
is_NumberSymbol = False
is_Order = False
is_Derivative = False
is_Piecewise = False
is_Poly = False
is_AlgebraicNumber = False
is_Relational = False
is_Equality = False
is_Boolean = False
is_Not = False
is_Matrix = False
is_Vector = False
is_Point = False
is_MatAdd = False
is_MatMul = False
def __new__(cls, *args):
obj = object.__new__(cls)
obj._assumptions = cls.default_assumptions
obj._mhash = None # will be set by __hash__ method.
obj._args = args # all items in args must be Basic objects
return obj
def copy(self):
return self.func(*self.args)
def __reduce_ex__(self, proto):
""" Pickling support."""
return type(self), self.__getnewargs__(), self.__getstate__()
def __getnewargs__(self):
return self.args
def __getstate__(self):
return {}
def __setstate__(self, state):
for k, v in state.items():
setattr(self, k, v)
def __hash__(self):
# hash cannot be cached using cache_it because infinite recurrence
# occurs as hash is needed for setting cache dictionary keys
h = self._mhash
if h is None:
h = hash((type(self).__name__,) + self._hashable_content())
self._mhash = h
return h
def _hashable_content(self):
"""Return a tuple of information about self that can be used to
compute the hash. If a class defines additional attributes,
like ``name`` in Symbol, then this method should be updated
accordingly to return such relevant attributes.
Defining more than _hashable_content is necessary if __eq__ has
been defined by a class. See note about this in Basic.__eq__."""
return self._args
@property
def assumptions0(self):
"""
Return object `type` assumptions.
For example:
Symbol('x', real=True)
Symbol('x', integer=True)
are different objects. In other words, besides Python type (Symbol in
this case), the initial assumptions are also forming their typeinfo.
Examples
========
>>> from sympy import Symbol
>>> from sympy.abc import x
>>> x.assumptions0
{'commutative': True}
>>> x = Symbol("x", positive=True)
>>> x.assumptions0
{'commutative': True, 'complex': True, 'extended_negative': False,
'extended_nonnegative': True, 'extended_nonpositive': False,
'extended_nonzero': True, 'extended_positive': True, 'extended_real':
True, 'finite': True, 'hermitian': True, 'imaginary': False,
'infinite': False, 'negative': False, 'nonnegative': True,
'nonpositive': False, 'nonzero': True, 'positive': True, 'real':
True, 'zero': False}
"""
return {}
def compare(self, other):
"""
Return -1, 0, 1 if the object is smaller, equal, or greater than other.
Not in the mathematical sense. If the object is of a different type
from the "other" then their classes are ordered according to
the sorted_classes list.
Examples
========
>>> from sympy.abc import x, y
>>> x.compare(y)
-1
>>> x.compare(x)
0
>>> y.compare(x)
1
"""
# all redefinitions of __cmp__ method should start with the
# following lines:
if self is other:
return 0
n1 = self.__class__
n2 = other.__class__
c = (n1 > n2) - (n1 < n2)
if c:
return c
#
st = self._hashable_content()
ot = other._hashable_content()
c = (len(st) > len(ot)) - (len(st) < len(ot))
if c:
return c
for l, r in zip(st, ot):
l = Basic(*l) if isinstance(l, frozenset) else l
r = Basic(*r) if isinstance(r, frozenset) else r
if isinstance(l, Basic):
c = l.compare(r)
else:
c = (l > r) - (l < r)
if c:
return c
return 0
@staticmethod
def _compare_pretty(a, b):
from sympy.series.order import Order
if isinstance(a, Order) and not isinstance(b, Order):
return 1
if not isinstance(a, Order) and isinstance(b, Order):
return -1
if a.is_Rational and b.is_Rational:
l = a.p * b.q
r = b.p * a.q
return (l > r) - (l < r)
else:
from sympy.core.symbol import Wild
p1, p2, p3 = Wild("p1"), Wild("p2"), Wild("p3")
r_a = a.match(p1 * p2**p3)
if r_a and p3 in r_a:
a3 = r_a[p3]
r_b = b.match(p1 * p2**p3)
if r_b and p3 in r_b:
b3 = r_b[p3]
c = Basic.compare(a3, b3)
if c != 0:
return c
return Basic.compare(a, b)
@classmethod
def fromiter(cls, args, **assumptions):
"""
Create a new object from an iterable.
This is a convenience function that allows one to create objects from
any iterable, without having to convert to a list or tuple first.
Examples
========
>>> from sympy import Tuple
>>> Tuple.fromiter(i for i in range(5))
(0, 1, 2, 3, 4)
"""
return cls(*tuple(args), **assumptions)
@classmethod
def class_key(cls):
"""Nice order of classes. """
return 5, 0, cls.__name__
@cacheit
def sort_key(self, order=None):
"""
Return a sort key.
Examples
========
>>> from sympy.core import S, I
>>> sorted([S(1)/2, I, -I], key=lambda x: x.sort_key())
[1/2, -I, I]
>>> S("[x, 1/x, 1/x**2, x**2, x**(1/2), x**(1/4), x**(3/2)]")
[x, 1/x, x**(-2), x**2, sqrt(x), x**(1/4), x**(3/2)]
>>> sorted(_, key=lambda x: x.sort_key())
[x**(-2), 1/x, x**(1/4), sqrt(x), x, x**(3/2), x**2]
"""
# XXX: remove this when issue 5169 is fixed
def inner_key(arg):
if isinstance(arg, Basic):
return arg.sort_key(order)
else:
return arg
args = self._sorted_args
args = len(args), tuple([inner_key(arg) for arg in args])
return self.class_key(), args, S.One.sort_key(), S.One
def __eq__(self, other):
"""Return a boolean indicating whether a == b on the basis of
their symbolic trees.
This is the same as a.compare(b) == 0 but faster.
Notes
=====
If a class that overrides __eq__() needs to retain the
implementation of __hash__() from a parent class, the
interpreter must be told this explicitly by setting __hash__ =
<ParentClass>.__hash__. Otherwise the inheritance of __hash__()
will be blocked, just as if __hash__ had been explicitly set to
None.
References
==========
from http://docs.python.org/dev/reference/datamodel.html#object.__hash__
"""
if self is other:
return True
tself = type(self)
tother = type(other)
if tself is not tother:
try:
other = _sympify(other)
tother = type(other)
except SympifyError:
return NotImplemented
# As long as we have the ordering of classes (sympy.core),
# comparing types will be slow in Python 2, because it uses
# __cmp__. Until we can remove it
# (https://github.com/sympy/sympy/issues/4269), we only compare
# types in Python 2 directly if they actually have __ne__.
if PY3 or type(tself).__ne__ is not type.__ne__:
if tself != tother:
return False
elif tself is not tother:
return False
return self._hashable_content() == other._hashable_content()
def __ne__(self, other):
"""``a != b`` -> Compare two symbolic trees and see whether they are different
this is the same as:
``a.compare(b) != 0``
but faster
"""
return not self == other
def dummy_eq(self, other, symbol=None):
"""
Compare two expressions and handle dummy symbols.
Examples
========
>>> from sympy import Dummy
>>> from sympy.abc import x, y
>>> u = Dummy('u')
>>> (u**2 + 1).dummy_eq(x**2 + 1)
True
>>> (u**2 + 1) == (x**2 + 1)
False
>>> (u**2 + y).dummy_eq(x**2 + y, x)
True
>>> (u**2 + y).dummy_eq(x**2 + y, y)
False
"""
s = self.as_dummy()
o = _sympify(other)
o = o.as_dummy()
dummy_symbols = [i for i in s.free_symbols if i.is_Dummy]
if len(dummy_symbols) == 1:
dummy = dummy_symbols.pop()
else:
return s == o
if symbol is None:
symbols = o.free_symbols
if len(symbols) == 1:
symbol = symbols.pop()
else:
return s == o
tmp = dummy.__class__()
return s.subs(dummy, tmp) == o.subs(symbol, tmp)
# Note, we always use the default ordering (lex) in __str__ and __repr__,
# regardless of the global setting. See issue 5487.
def __repr__(self):
"""Method to return the string representation.
Return the expression as a string.
"""
from sympy.printing import sstr
return sstr(self, order=None)
def __str__(self):
from sympy.printing import sstr
return sstr(self, order=None)
# We don't define _repr_png_ here because it would add a large amount of
# data to any notebook containing SymPy expressions, without adding
# anything useful to the notebook. It can still enabled manually, e.g.,
# for the qtconsole, with init_printing().
def _repr_latex_(self):
"""
IPython/Jupyter LaTeX printing
To change the behavior of this (e.g., pass in some settings to LaTeX),
use init_printing(). init_printing() will also enable LaTeX printing
for built in numeric types like ints and container types that contain
SymPy objects, like lists and dictionaries of expressions.
"""
from sympy.printing.latex import latex
s = latex(self, mode='plain')
return "$\\displaystyle %s$" % s
_repr_latex_orig = _repr_latex_
def atoms(self, *types):
"""Returns the atoms that form the current object.
By default, only objects that are truly atomic and can't
be divided into smaller pieces are returned: symbols, numbers,
and number symbols like I and pi. It is possible to request
atoms of any type, however, as demonstrated below.
Examples
========
>>> from sympy import I, pi, sin
>>> from sympy.abc import x, y
>>> (1 + x + 2*sin(y + I*pi)).atoms()
{1, 2, I, pi, x, y}
If one or more types are given, the results will contain only
those types of atoms.
>>> from sympy import Number, NumberSymbol, Symbol
>>> (1 + x + 2*sin(y + I*pi)).atoms(Symbol)
{x, y}
>>> (1 + x + 2*sin(y + I*pi)).atoms(Number)
{1, 2}
>>> (1 + x + 2*sin(y + I*pi)).atoms(Number, NumberSymbol)
{1, 2, pi}
>>> (1 + x + 2*sin(y + I*pi)).atoms(Number, NumberSymbol, I)
{1, 2, I, pi}
Note that I (imaginary unit) and zoo (complex infinity) are special
types of number symbols and are not part of the NumberSymbol class.
The type can be given implicitly, too:
>>> (1 + x + 2*sin(y + I*pi)).atoms(x) # x is a Symbol
{x, y}
Be careful to check your assumptions when using the implicit option
since ``S(1).is_Integer = True`` but ``type(S(1))`` is ``One``, a special type
of sympy atom, while ``type(S(2))`` is type ``Integer`` and will find all
integers in an expression:
>>> from sympy import S
>>> (1 + x + 2*sin(y + I*pi)).atoms(S(1))
{1}
>>> (1 + x + 2*sin(y + I*pi)).atoms(S(2))
{1, 2}
Finally, arguments to atoms() can select more than atomic atoms: any
sympy type (loaded in core/__init__.py) can be listed as an argument
and those types of "atoms" as found in scanning the arguments of the
expression recursively:
>>> from sympy import Function, Mul
>>> from sympy.core.function import AppliedUndef
>>> f = Function('f')
>>> (1 + f(x) + 2*sin(y + I*pi)).atoms(Function)
{f(x), sin(y + I*pi)}
>>> (1 + f(x) + 2*sin(y + I*pi)).atoms(AppliedUndef)
{f(x)}
>>> (1 + x + 2*sin(y + I*pi)).atoms(Mul)
{I*pi, 2*sin(y + I*pi)}
"""
if types:
types = tuple(
[t if isinstance(t, type) else type(t) for t in types])
else:
types = (Atom,)
result = set()
for expr in preorder_traversal(self):
if isinstance(expr, types):
result.add(expr)
return result
@property
def free_symbols(self):
"""Return from the atoms of self those which are free symbols.
For most expressions, all symbols are free symbols. For some classes
this is not true. e.g. Integrals use Symbols for the dummy variables
which are bound variables, so Integral has a method to return all
symbols except those. Derivative keeps track of symbols with respect
to which it will perform a derivative; those are
bound variables, too, so it has its own free_symbols method.
Any other method that uses bound variables should implement a
free_symbols method."""
return set().union(*[a.free_symbols for a in self.args])
@property
def expr_free_symbols(self):
return set([])
def as_dummy(self):
"""Return the expression with any objects having structurally
bound symbols replaced with unique, canonical symbols within
the object in which they appear and having only the default
assumption for commutativity being True.
Examples
========
>>> from sympy import Integral, Symbol
>>> from sympy.abc import x, y
>>> r = Symbol('r', real=True)
>>> Integral(r, (r, x)).as_dummy()
Integral(_0, (_0, x))
>>> _.variables[0].is_real is None
True
Notes
=====
Any object that has structural dummy variables should have
a property, `bound_symbols` that returns a list of structural
dummy symbols of the object itself.
Lambda and Subs have bound symbols, but because of how they
are cached, they already compare the same regardless of their
bound symbols:
>>> from sympy import Lambda
>>> Lambda(x, x + 1) == Lambda(y, y + 1)
True
"""
def can(x):
d = {i: i.as_dummy() for i in x.bound_symbols}
# mask free that shadow bound
x = x.subs(d)
c = x.canonical_variables
# replace bound
x = x.xreplace(c)
# undo masking
x = x.xreplace(dict((v, k) for k, v in d.items()))
return x
return self.replace(
lambda x: hasattr(x, 'bound_symbols'),
lambda x: can(x))
@property
def canonical_variables(self):
"""Return a dictionary mapping any variable defined in
``self.bound_symbols`` to Symbols that do not clash
with any existing symbol in the expression.
Examples
========
>>> from sympy import Lambda
>>> from sympy.abc import x
>>> Lambda(x, 2*x).canonical_variables
{x: _0}
"""
from sympy.core.symbol import Symbol
from sympy.utilities.iterables import numbered_symbols
if not hasattr(self, 'bound_symbols'):
return {}
dums = numbered_symbols('_')
reps = {}
v = self.bound_symbols
# this free will include bound symbols that are not part of
# self's bound symbols
free = set([i.name for i in self.atoms(Symbol) - set(v)])
for v in v:
d = next(dums)
if v.is_Symbol:
while v.name == d.name or d.name in free:
d = next(dums)
reps[v] = d
return reps
def rcall(self, *args):
"""Apply on the argument recursively through the expression tree.
This method is used to simulate a common abuse of notation for
operators. For instance in SymPy the the following will not work:
``(x+Lambda(y, 2*y))(z) == x+2*z``,
however you can use
>>> from sympy import Lambda
>>> from sympy.abc import x, y, z
>>> (x + Lambda(y, 2*y)).rcall(z)
x + 2*z
"""
return Basic._recursive_call(self, args)
@staticmethod
def _recursive_call(expr_to_call, on_args):
"""Helper for rcall method."""
from sympy import Symbol
def the_call_method_is_overridden(expr):
for cls in getmro(type(expr)):
if '__call__' in cls.__dict__:
return cls != Basic
if callable(expr_to_call) and the_call_method_is_overridden(expr_to_call):
if isinstance(expr_to_call, Symbol): # XXX When you call a Symbol it is
return expr_to_call # transformed into an UndefFunction
else:
return expr_to_call(*on_args)
elif expr_to_call.args:
args = [Basic._recursive_call(
sub, on_args) for sub in expr_to_call.args]
return type(expr_to_call)(*args)
else:
return expr_to_call
def is_hypergeometric(self, k):
from sympy.simplify import hypersimp
return hypersimp(self, k) is not None
@property
def is_comparable(self):
"""Return True if self can be computed to a real number
(or already is a real number) with precision, else False.
Examples
========
>>> from sympy import exp_polar, pi, I
>>> (I*exp_polar(I*pi/2)).is_comparable
True
>>> (I*exp_polar(I*pi*2)).is_comparable
False
A False result does not mean that `self` cannot be rewritten
into a form that would be comparable. For example, the
difference computed below is zero but without simplification
it does not evaluate to a zero with precision:
>>> e = 2**pi*(1 + 2**pi)
>>> dif = e - e.expand()
>>> dif.is_comparable
False
>>> dif.n(2)._prec
1
"""
is_extended_real = self.is_extended_real
if is_extended_real is False:
return False
if not self.is_number:
return False
# don't re-eval numbers that are already evaluated since
# this will create spurious precision
n, i = [p.evalf(2) if not p.is_Number else p
for p in self.as_real_imag()]
if not (i.is_Number and n.is_Number):
return False
if i:
# if _prec = 1 we can't decide and if not,
# the answer is False because numbers with
# imaginary parts can't be compared
# so return False
return False
else:
return n._prec != 1
@property
def func(self):
"""
The top-level function in an expression.
The following should hold for all objects::
>> x == x.func(*x.args)
Examples
========
>>> from sympy.abc import x
>>> a = 2*x
>>> a.func
<class 'sympy.core.mul.Mul'>
>>> a.args
(2, x)
>>> a.func(*a.args)
2*x
>>> a == a.func(*a.args)
True
"""
return self.__class__
@property
def args(self):
"""Returns a tuple of arguments of 'self'.
Examples
========
>>> from sympy import cot
>>> from sympy.abc import x, y
>>> cot(x).args
(x,)
>>> cot(x).args[0]
x
>>> (x*y).args
(x, y)
>>> (x*y).args[1]
y
Notes
=====
Never use self._args, always use self.args.
Only use _args in __new__ when creating a new function.
Don't override .args() from Basic (so that it's easy to
change the interface in the future if needed).
"""
return self._args
@property
def _sorted_args(self):
"""
The same as ``args``. Derived classes which don't fix an
order on their arguments should override this method to
produce the sorted representation.
"""
return self.args
def as_poly(self, *gens, **args):
"""Converts ``self`` to a polynomial or returns ``None``.
>>> from sympy import sin
>>> from sympy.abc import x, y
>>> print((x**2 + x*y).as_poly())
Poly(x**2 + x*y, x, y, domain='ZZ')
>>> print((x**2 + x*y).as_poly(x, y))
Poly(x**2 + x*y, x, y, domain='ZZ')
>>> print((x**2 + sin(y)).as_poly(x, y))
None
"""
from sympy.polys import Poly, PolynomialError
try:
poly = Poly(self, *gens, **args)
if not poly.is_Poly:
return None
else:
return poly
except PolynomialError:
return None
def as_content_primitive(self, radical=False, clear=True):
"""A stub to allow Basic args (like Tuple) to be skipped when computing
the content and primitive components of an expression.
See Also
========
sympy.core.expr.Expr.as_content_primitive
"""
return S.One, self
def subs(self, *args, **kwargs):
"""
Substitutes old for new in an expression after sympifying args.
`args` is either:
- two arguments, e.g. foo.subs(old, new)
- one iterable argument, e.g. foo.subs(iterable). The iterable may be
o an iterable container with (old, new) pairs. In this case the
replacements are processed in the order given with successive
patterns possibly affecting replacements already made.
o a dict or set whose key/value items correspond to old/new pairs.
In this case the old/new pairs will be sorted by op count and in
case of a tie, by number of args and the default_sort_key. The
resulting sorted list is then processed as an iterable container
(see previous).
If the keyword ``simultaneous`` is True, the subexpressions will not be
evaluated until all the substitutions have been made.
Examples
========
>>> from sympy import pi, exp, limit, oo
>>> from sympy.abc import x, y
>>> (1 + x*y).subs(x, pi)
pi*y + 1
>>> (1 + x*y).subs({x:pi, y:2})
1 + 2*pi
>>> (1 + x*y).subs([(x, pi), (y, 2)])
1 + 2*pi
>>> reps = [(y, x**2), (x, 2)]
>>> (x + y).subs(reps)
6
>>> (x + y).subs(reversed(reps))
x**2 + 2
>>> (x**2 + x**4).subs(x**2, y)
y**2 + y
To replace only the x**2 but not the x**4, use xreplace:
>>> (x**2 + x**4).xreplace({x**2: y})
x**4 + y
To delay evaluation until all substitutions have been made,
set the keyword ``simultaneous`` to True:
>>> (x/y).subs([(x, 0), (y, 0)])
0
>>> (x/y).subs([(x, 0), (y, 0)], simultaneous=True)
nan
This has the added feature of not allowing subsequent substitutions
to affect those already made:
>>> ((x + y)/y).subs({x + y: y, y: x + y})
1
>>> ((x + y)/y).subs({x + y: y, y: x + y}, simultaneous=True)
y/(x + y)
In order to obtain a canonical result, unordered iterables are
sorted by count_op length, number of arguments and by the
default_sort_key to break any ties. All other iterables are left
unsorted.
>>> from sympy import sqrt, sin, cos
>>> from sympy.abc import a, b, c, d, e
>>> A = (sqrt(sin(2*x)), a)
>>> B = (sin(2*x), b)
>>> C = (cos(2*x), c)
>>> D = (x, d)
>>> E = (exp(x), e)
>>> expr = sqrt(sin(2*x))*sin(exp(x)*x)*cos(2*x) + sin(2*x)
>>> expr.subs(dict([A, B, C, D, E]))
a*c*sin(d*e) + b
The resulting expression represents a literal replacement of the
old arguments with the new arguments. This may not reflect the
limiting behavior of the expression:
>>> (x**3 - 3*x).subs({x: oo})
nan
>>> limit(x**3 - 3*x, x, oo)
oo
If the substitution will be followed by numerical
evaluation, it is better to pass the substitution to
evalf as
>>> (1/x).evalf(subs={x: 3.0}, n=21)
0.333333333333333333333
rather than
>>> (1/x).subs({x: 3.0}).evalf(21)
0.333333333333333314830
as the former will ensure that the desired level of precision is
obtained.
See Also
========
replace: replacement capable of doing wildcard-like matching,
parsing of match, and conditional replacements
xreplace: exact node replacement in expr tree; also capable of
using matching rules
sympy.core.evalf.EvalfMixin.evalf: calculates the given formula to a desired level of precision
"""
from sympy.core.containers import Dict
from sympy.utilities import default_sort_key
from sympy import Dummy, Symbol
unordered = False
if len(args) == 1:
sequence = args[0]
if isinstance(sequence, set):
unordered = True
elif isinstance(sequence, (Dict, Mapping)):
unordered = True
sequence = sequence.items()
elif not iterable(sequence):
from sympy.utilities.misc import filldedent
raise ValueError(filldedent("""
When a single argument is passed to subs
it should be a dictionary of old: new pairs or an iterable
of (old, new) tuples."""))
elif len(args) == 2:
sequence = [args]
else:
raise ValueError("subs accepts either 1 or 2 arguments")
sequence = list(sequence)
for i, s in enumerate(sequence):
if isinstance(s[0], string_types):
# when old is a string we prefer Symbol
s = Symbol(s[0]), s[1]
try:
s = [sympify(_, strict=not isinstance(_, string_types))
for _ in s]
except SympifyError:
# if it can't be sympified, skip it
sequence[i] = None
continue
# skip if there is no change
sequence[i] = None if _aresame(*s) else tuple(s)
sequence = list(filter(None, sequence))
if unordered:
sequence = dict(sequence)
if not all(k.is_Atom for k in sequence):
d = {}
for o, n in sequence.items():
try:
ops = o.count_ops(), len(o.args)
except TypeError:
ops = (0, 0)
d.setdefault(ops, []).append((o, n))
newseq = []
for k in sorted(d.keys(), reverse=True):
newseq.extend(
sorted([v[0] for v in d[k]], key=default_sort_key))
sequence = [(k, sequence[k]) for k in newseq]
del newseq, d
else:
sequence = sorted([(k, v) for (k, v) in sequence.items()],
key=default_sort_key)
if kwargs.pop('simultaneous', False): # XXX should this be the default for dict subs?
reps = {}
rv = self
kwargs['hack2'] = True
m = Dummy('subs_m')
for old, new in sequence:
com = new.is_commutative
if com is None:
com = True
d = Dummy('subs_d', commutative=com)
# using d*m so Subs will be used on dummy variables
# in things like Derivative(f(x, y), x) in which x
# is both free and bound
rv = rv._subs(old, d*m, **kwargs)
if not isinstance(rv, Basic):
break
reps[d] = new
reps[m] = S.One # get rid of m
return rv.xreplace(reps)
else:
rv = self
for old, new in sequence:
rv = rv._subs(old, new, **kwargs)
if not isinstance(rv, Basic):
break
return rv
@cacheit
def _subs(self, old, new, **hints):
"""Substitutes an expression old -> new.
If self is not equal to old then _eval_subs is called.
If _eval_subs doesn't want to make any special replacement
then a None is received which indicates that the fallback
should be applied wherein a search for replacements is made
amongst the arguments of self.
>>> from sympy import Add
>>> from sympy.abc import x, y, z
Examples
========
Add's _eval_subs knows how to target x + y in the following
so it makes the change:
>>> (x + y + z).subs(x + y, 1)
z + 1
Add's _eval_subs doesn't need to know how to find x + y in
the following:
>>> Add._eval_subs(z*(x + y) + 3, x + y, 1) is None
True
The returned None will cause the fallback routine to traverse the args and
pass the z*(x + y) arg to Mul where the change will take place and the
substitution will succeed:
>>> (z*(x + y) + 3).subs(x + y, 1)
z + 3
** Developers Notes **
An _eval_subs routine for a class should be written if:
1) any arguments are not instances of Basic (e.g. bool, tuple);
2) some arguments should not be targeted (as in integration
variables);
3) if there is something other than a literal replacement
that should be attempted (as in Piecewise where the condition
may be updated without doing a replacement).
If it is overridden, here are some special cases that might arise:
1) If it turns out that no special change was made and all
the original sub-arguments should be checked for
replacements then None should be returned.
2) If it is necessary to do substitutions on a portion of
the expression then _subs should be called. _subs will
handle the case of any sub-expression being equal to old
(which usually would not be the case) while its fallback
will handle the recursion into the sub-arguments. For
example, after Add's _eval_subs removes some matching terms
it must process the remaining terms so it calls _subs
on each of the un-matched terms and then adds them
onto the terms previously obtained.
3) If the initial expression should remain unchanged then
the original expression should be returned. (Whenever an
expression is returned, modified or not, no further
substitution of old -> new is attempted.) Sum's _eval_subs
routine uses this strategy when a substitution is attempted
on any of its summation variables.
"""
def fallback(self, old, new):
"""
Try to replace old with new in any of self's arguments.
"""
hit = False
args = list(self.args)
for i, arg in enumerate(args):
if not hasattr(arg, '_eval_subs'):
continue
arg = arg._subs(old, new, **hints)
if not _aresame(arg, args[i]):
hit = True
args[i] = arg
if hit:
rv = self.func(*args)
hack2 = hints.get('hack2', False)
if hack2 and self.is_Mul and not rv.is_Mul: # 2-arg hack
coeff = S.One
nonnumber = []
for i in args:
if i.is_Number:
coeff *= i
else:
nonnumber.append(i)
nonnumber = self.func(*nonnumber)
if coeff is S.One:
return nonnumber
else:
return self.func(coeff, nonnumber, evaluate=False)
return rv
return self
if _aresame(self, old):
return new
rv = self._eval_subs(old, new)
if rv is None:
rv = fallback(self, old, new)
return rv
def _eval_subs(self, old, new):
"""Override this stub if you want to do anything more than
attempt a replacement of old with new in the arguments of self.
See also
========
_subs
"""
return None
def xreplace(self, rule):
"""
Replace occurrences of objects within the expression.
Parameters
==========
rule : dict-like
Expresses a replacement rule
Returns
=======
xreplace : the result of the replacement
Examples
========
>>> from sympy import symbols, pi, exp
>>> x, y, z = symbols('x y z')
>>> (1 + x*y).xreplace({x: pi})
pi*y + 1
>>> (1 + x*y).xreplace({x: pi, y: 2})
1 + 2*pi
Replacements occur only if an entire node in the expression tree is
matched:
>>> (x*y + z).xreplace({x*y: pi})
z + pi
>>> (x*y*z).xreplace({x*y: pi})
x*y*z
>>> (2*x).xreplace({2*x: y, x: z})
y
>>> (2*2*x).xreplace({2*x: y, x: z})
4*z
>>> (x + y + 2).xreplace({x + y: 2})
x + y + 2
>>> (x + 2 + exp(x + 2)).xreplace({x + 2: y})
x + exp(y) + 2
xreplace doesn't differentiate between free and bound symbols. In the
following, subs(x, y) would not change x since it is a bound symbol,
but xreplace does:
>>> from sympy import Integral
>>> Integral(x, (x, 1, 2*x)).xreplace({x: y})
Integral(y, (y, 1, 2*y))
Trying to replace x with an expression raises an error:
>>> Integral(x, (x, 1, 2*x)).xreplace({x: 2*y}) # doctest: +SKIP
ValueError: Invalid limits given: ((2*y, 1, 4*y),)
See Also
========
replace: replacement capable of doing wildcard-like matching,
parsing of match, and conditional replacements
subs: substitution of subexpressions as defined by the objects
themselves.
"""
value, _ = self._xreplace(rule)
return value
def _xreplace(self, rule):
"""
Helper for xreplace. Tracks whether a replacement actually occurred.
"""
if self in rule:
return rule[self], True
elif rule:
args = []
changed = False
for a in self.args:
_xreplace = getattr(a, '_xreplace', None)
if _xreplace is not None:
a_xr = _xreplace(rule)
args.append(a_xr[0])
changed |= a_xr[1]
else:
args.append(a)
args = tuple(args)
if changed:
return self.func(*args), True
return self, False
@cacheit
def has(self, *patterns):
"""
Test whether any subexpression matches any of the patterns.
Examples
========
>>> from sympy import sin
>>> from sympy.abc import x, y, z
>>> (x**2 + sin(x*y)).has(z)
False
>>> (x**2 + sin(x*y)).has(x, y, z)
True
>>> x.has(x)
True
Note ``has`` is a structural algorithm with no knowledge of
mathematics. Consider the following half-open interval:
>>> from sympy.sets import Interval
>>> i = Interval.Lopen(0, 5); i
Interval.Lopen(0, 5)
>>> i.args
(0, 5, True, False)
>>> i.has(4) # there is no "4" in the arguments
False
>>> i.has(0) # there *is* a "0" in the arguments
True
Instead, use ``contains`` to determine whether a number is in the
interval or not:
>>> i.contains(4)
True
>>> i.contains(0)
False
Note that ``expr.has(*patterns)`` is exactly equivalent to
``any(expr.has(p) for p in patterns)``. In particular, ``False`` is
returned when the list of patterns is empty.
>>> x.has()
False
"""
return any(self._has(pattern) for pattern in patterns)
def _has(self, pattern):
"""Helper for .has()"""
from sympy.core.function import UndefinedFunction, Function
if isinstance(pattern, UndefinedFunction):
return any(f.func == pattern or f == pattern
for f in self.atoms(Function, UndefinedFunction))
pattern = sympify(pattern)
if isinstance(pattern, BasicMeta):
return any(isinstance(arg, pattern)
for arg in preorder_traversal(self))
_has_matcher = getattr(pattern, '_has_matcher', None)
if _has_matcher is not None:
match = _has_matcher()
return any(match(arg) for arg in preorder_traversal(self))
else:
return any(arg == pattern for arg in preorder_traversal(self))
def _has_matcher(self):
"""Helper for .has()"""
return lambda other: self == other
def replace(self, query, value, map=False, simultaneous=True, exact=None):
"""
Replace matching subexpressions of ``self`` with ``value``.
If ``map = True`` then also return the mapping {old: new} where ``old``
was a sub-expression found with query and ``new`` is the replacement
value for it. If the expression itself doesn't match the query, then
the returned value will be ``self.xreplace(map)`` otherwise it should
be ``self.subs(ordered(map.items()))``.
Traverses an expression tree and performs replacement of matching
subexpressions from the bottom to the top of the tree. The default
approach is to do the replacement in a simultaneous fashion so
changes made are targeted only once. If this is not desired or causes
problems, ``simultaneous`` can be set to False.
In addition, if an expression containing more than one Wild symbol
is being used to match subexpressions and the ``exact`` flag is None
it will be set to True so the match will only succeed if all non-zero
values are received for each Wild that appears in the match pattern.
Setting this to False accepts a match of 0; while setting it True
accepts all matches that have a 0 in them. See example below for
cautions.
The list of possible combinations of queries and replacement values
is listed below:
Examples
========
Initial setup
>>> from sympy import log, sin, cos, tan, Wild, Mul, Add
>>> from sympy.abc import x, y
>>> f = log(sin(x)) + tan(sin(x**2))
1.1. type -> type
obj.replace(type, newtype)
When object of type ``type`` is found, replace it with the
result of passing its argument(s) to ``newtype``.
>>> f.replace(sin, cos)
log(cos(x)) + tan(cos(x**2))
>>> sin(x).replace(sin, cos, map=True)
(cos(x), {sin(x): cos(x)})
>>> (x*y).replace(Mul, Add)
x + y
1.2. type -> func
obj.replace(type, func)
When object of type ``type`` is found, apply ``func`` to its
argument(s). ``func`` must be written to handle the number
of arguments of ``type``.
>>> f.replace(sin, lambda arg: sin(2*arg))
log(sin(2*x)) + tan(sin(2*x**2))
>>> (x*y).replace(Mul, lambda *args: sin(2*Mul(*args)))
sin(2*x*y)
2.1. pattern -> expr
obj.replace(pattern(wild), expr(wild))
Replace subexpressions matching ``pattern`` with the expression
written in terms of the Wild symbols in ``pattern``.
>>> a, b = map(Wild, 'ab')
>>> f.replace(sin(a), tan(a))
log(tan(x)) + tan(tan(x**2))
>>> f.replace(sin(a), tan(a/2))
log(tan(x/2)) + tan(tan(x**2/2))
>>> f.replace(sin(a), a)
log(x) + tan(x**2)
>>> (x*y).replace(a*x, a)
y
Matching is exact by default when more than one Wild symbol
is used: matching fails unless the match gives non-zero
values for all Wild symbols:
>>> (2*x + y).replace(a*x + b, b - a)
y - 2
>>> (2*x).replace(a*x + b, b - a)
2*x
When set to False, the results may be non-intuitive:
>>> (2*x).replace(a*x + b, b - a, exact=False)
2/x
2.2. pattern -> func
obj.replace(pattern(wild), lambda wild: expr(wild))
All behavior is the same as in 2.1 but now a function in terms of
pattern variables is used rather than an expression:
>>> f.replace(sin(a), lambda a: sin(2*a))
log(sin(2*x)) + tan(sin(2*x**2))
3.1. func -> func
obj.replace(filter, func)
Replace subexpression ``e`` with ``func(e)`` if ``filter(e)``
is True.
>>> g = 2*sin(x**3)
>>> g.replace(lambda expr: expr.is_Number, lambda expr: expr**2)
4*sin(x**9)
The expression itself is also targeted by the query but is done in
such a fashion that changes are not made twice.
>>> e = x*(x*y + 1)
>>> e.replace(lambda x: x.is_Mul, lambda x: 2*x)
2*x*(2*x*y + 1)
When matching a single symbol, `exact` will default to True, but
this may or may not be the behavior that is desired:
Here, we want `exact=False`:
>>> from sympy import Function
>>> f = Function('f')
>>> e = f(1) + f(0)
>>> q = f(a), lambda a: f(a + 1)
>>> e.replace(*q, exact=False)
f(1) + f(2)
>>> e.replace(*q, exact=True)
f(0) + f(2)
But here, the nature of matching makes selecting
the right setting tricky:
>>> e = x**(1 + y)
>>> (x**(1 + y)).replace(x**(1 + a), lambda a: x**-a, exact=False)
1
>>> (x**(1 + y)).replace(x**(1 + a), lambda a: x**-a, exact=True)
x**(-x - y + 1)
>>> (x**y).replace(x**(1 + a), lambda a: x**-a, exact=False)
1
>>> (x**y).replace(x**(1 + a), lambda a: x**-a, exact=True)
x**(1 - y)
It is probably better to use a different form of the query
that describes the target expression more precisely:
>>> (1 + x**(1 + y)).replace(
... lambda x: x.is_Pow and x.exp.is_Add and x.exp.args[0] == 1,
... lambda x: x.base**(1 - (x.exp - 1)))
...
x**(1 - y) + 1
See Also
========
subs: substitution of subexpressions as defined by the objects
themselves.
xreplace: exact node replacement in expr tree; also capable of
using matching rules
"""
from sympy.core.symbol import Dummy, Wild
from sympy.simplify.simplify import bottom_up
try:
query = _sympify(query)
except SympifyError:
pass
try:
value = _sympify(value)
except SympifyError:
pass
if isinstance(query, type):
_query = lambda expr: isinstance(expr, query)
if isinstance(value, type):
_value = lambda expr, result: value(*expr.args)
elif callable(value):
_value = lambda expr, result: value(*expr.args)
else:
raise TypeError(
"given a type, replace() expects another "
"type or a callable")
elif isinstance(query, Basic):
_query = lambda expr: expr.match(query)
if exact is None:
exact = (len(query.atoms(Wild)) > 1)
if isinstance(value, Basic):
if exact:
_value = lambda expr, result: (value.subs(result)
if all(result.values()) else expr)
else:
_value = lambda expr, result: value.subs(result)
elif callable(value):
# match dictionary keys get the trailing underscore stripped
# from them and are then passed as keywords to the callable;
# if ``exact`` is True, only accept match if there are no null
# values amongst those matched.
if exact:
_value = lambda expr, result: (value(**
{str(k)[:-1]: v for k, v in result.items()})
if all(val for val in result.values()) else expr)
else:
_value = lambda expr, result: value(**
{str(k)[:-1]: v for k, v in result.items()})
else:
raise TypeError(
"given an expression, replace() expects "
"another expression or a callable")
elif callable(query):
_query = query
if callable(value):
_value = lambda expr, result: value(expr)
else:
raise TypeError(
"given a callable, replace() expects "
"another callable")
else:
raise TypeError(
"first argument to replace() must be a "
"type, an expression or a callable")
mapping = {} # changes that took place
mask = [] # the dummies that were used as change placeholders
def rec_replace(expr):
result = _query(expr)
if result or result == {}:
new = _value(expr, result)
if new is not None and new != expr:
mapping[expr] = new
if simultaneous:
# don't let this change during rebuilding;
# XXX this may fail if the object being replaced
# cannot be represented as a Dummy in the expression
# tree, e.g. an ExprConditionPair in Piecewise
# cannot be represented with a Dummy
com = getattr(new, 'is_commutative', True)
if com is None:
com = True
d = Dummy('rec_replace', commutative=com)
mask.append((d, new))
expr = d
else:
expr = new
return expr
rv = bottom_up(self, rec_replace, atoms=True)
# restore original expressions for Dummy symbols
if simultaneous:
mask = list(reversed(mask))
for o, n in mask:
r = {o: n}
# if a sub-expression could not be replaced with
# a Dummy then this will fail; either filter
# against such sub-expressions or figure out a
# way to carry out simultaneous replacement
# in this situation.
rv = rv.xreplace(r) # if this fails, see above
if not map:
return rv
else:
if simultaneous:
# restore subexpressions in mapping
for o, n in mask:
r = {o: n}
mapping = {k.xreplace(r): v.xreplace(r)
for k, v in mapping.items()}
return rv, mapping
def find(self, query, group=False):
"""Find all subexpressions matching a query. """
query = _make_find_query(query)
results = list(filter(query, preorder_traversal(self)))
if not group:
return set(results)
else:
groups = {}
for result in results:
if result in groups:
groups[result] += 1
else:
groups[result] = 1
return groups
def count(self, query):
"""Count the number of matching subexpressions. """
query = _make_find_query(query)
return sum(bool(query(sub)) for sub in preorder_traversal(self))
def matches(self, expr, repl_dict={}, old=False):
"""
Helper method for match() that looks for a match between Wild symbols
in self and expressions in expr.
Examples
========
>>> from sympy import symbols, Wild, Basic
>>> a, b, c = symbols('a b c')
>>> x = Wild('x')
>>> Basic(a + x, x).matches(Basic(a + b, c)) is None
True
>>> Basic(a + x, x).matches(Basic(a + b + c, b + c))
{x_: b + c}
"""
expr = sympify(expr)
if not isinstance(expr, self.__class__):
return None
if self == expr:
return repl_dict
if len(self.args) != len(expr.args):
return None
d = repl_dict.copy()
for arg, other_arg in zip(self.args, expr.args):
if arg == other_arg:
continue
d = arg.xreplace(d).matches(other_arg, d, old=old)
if d is None:
return None
return d
def match(self, pattern, old=False):
"""
Pattern matching.
Wild symbols match all.
Return ``None`` when expression (self) does not match
with pattern. Otherwise return a dictionary such that::
pattern.xreplace(self.match(pattern)) == self
Examples
========
>>> from sympy import Wild
>>> from sympy.abc import x, y
>>> p = Wild("p")
>>> q = Wild("q")
>>> r = Wild("r")
>>> e = (x+y)**(x+y)
>>> e.match(p**p)
{p_: x + y}
>>> e.match(p**q)
{p_: x + y, q_: x + y}
>>> e = (2*x)**2
>>> e.match(p*q**r)
{p_: 4, q_: x, r_: 2}
>>> (p*q**r).xreplace(e.match(p*q**r))
4*x**2
The ``old`` flag will give the old-style pattern matching where
expressions and patterns are essentially solved to give the
match. Both of the following give None unless ``old=True``:
>>> (x - 2).match(p - x, old=True)
{p_: 2*x - 2}
>>> (2/x).match(p*x, old=True)
{p_: 2/x**2}
"""
pattern = sympify(pattern)
return pattern.matches(self, old=old)
def count_ops(self, visual=None):
"""wrapper for count_ops that returns the operation count."""
from sympy import count_ops
return count_ops(self, visual)
def doit(self, **hints):
"""Evaluate objects that are not evaluated by default like limits,
integrals, sums and products. All objects of this kind will be
evaluated recursively, unless some species were excluded via 'hints'
or unless the 'deep' hint was set to 'False'.
>>> from sympy import Integral
>>> from sympy.abc import x
>>> 2*Integral(x, x)
2*Integral(x, x)
>>> (2*Integral(x, x)).doit()
x**2
>>> (2*Integral(x, x)).doit(deep=False)
2*Integral(x, x)
"""
if hints.get('deep', True):
terms = [term.doit(**hints) if isinstance(term, Basic) else term
for term in self.args]
return self.func(*terms)
else:
return self
def _eval_rewrite(self, pattern, rule, **hints):
if self.is_Atom:
if hasattr(self, rule):
return getattr(self, rule)()
return self
if hints.get('deep', True):
args = [a._eval_rewrite(pattern, rule, **hints)
if isinstance(a, Basic) else a
for a in self.args]
else:
args = self.args
if pattern is None or isinstance(self, pattern):
if hasattr(self, rule):
rewritten = getattr(self, rule)(*args, **hints)
if rewritten is not None:
return rewritten
return self.func(*args) if hints.get('evaluate', True) else self
def _accept_eval_derivative(self, s):
# This method needs to be overridden by array-like objects
return s._visit_eval_derivative_scalar(self)
def _visit_eval_derivative_scalar(self, base):
# Base is a scalar
# Types are (base: scalar, self: scalar)
return base._eval_derivative(self)
def _visit_eval_derivative_array(self, base):
# Types are (base: array/matrix, self: scalar)
# Base is some kind of array/matrix,
# it should have `.applyfunc(lambda x: x.diff(self)` implemented:
return base._eval_derivative_array(self)
def _eval_derivative_n_times(self, s, n):
# This is the default evaluator for derivatives (as called by `diff`
# and `Derivative`), it will attempt a loop to derive the expression
# `n` times by calling the corresponding `_eval_derivative` method,
# while leaving the derivative unevaluated if `n` is symbolic. This
# method should be overridden if the object has a closed form for its
# symbolic n-th derivative.
from sympy import Integer
if isinstance(n, (int, Integer)):
obj = self
for i in range(n):
obj2 = obj._accept_eval_derivative(s)
if obj == obj2 or obj2 is None:
break
obj = obj2
return obj2
else:
return None
def rewrite(self, *args, **hints):
""" Rewrite functions in terms of other functions.
Rewrites expression containing applications of functions
of one kind in terms of functions of different kind. For
example you can rewrite trigonometric functions as complex
exponentials or combinatorial functions as gamma function.
As a pattern this function accepts a list of functions to
to rewrite (instances of DefinedFunction class). As rule
you can use string or a destination function instance (in
this case rewrite() will use the str() function).
There is also the possibility to pass hints on how to rewrite
the given expressions. For now there is only one such hint
defined called 'deep'. When 'deep' is set to False it will
forbid functions to rewrite their contents.
Examples
========
>>> from sympy import sin, exp
>>> from sympy.abc import x
Unspecified pattern:
>>> sin(x).rewrite(exp)
-I*(exp(I*x) - exp(-I*x))/2
Pattern as a single function:
>>> sin(x).rewrite(sin, exp)
-I*(exp(I*x) - exp(-I*x))/2
Pattern as a list of functions:
>>> sin(x).rewrite([sin, ], exp)
-I*(exp(I*x) - exp(-I*x))/2
"""
if not args:
return self
else:
pattern = args[:-1]
if isinstance(args[-1], string_types):
rule = '_eval_rewrite_as_' + args[-1]
else:
try:
rule = '_eval_rewrite_as_' + args[-1].__name__
except:
rule = '_eval_rewrite_as_' + args[-1].__class__.__name__
if not pattern:
return self._eval_rewrite(None, rule, **hints)
else:
if iterable(pattern[0]):
pattern = pattern[0]
pattern = [p for p in pattern if self.has(p)]
if pattern:
return self._eval_rewrite(tuple(pattern), rule, **hints)
else:
return self
_constructor_postprocessor_mapping = {}
@classmethod
def _exec_constructor_postprocessors(cls, obj):
# WARNING: This API is experimental.
# This is an experimental API that introduces constructor
# postprosessors for SymPy Core elements. If an argument of a SymPy
# expression has a `_constructor_postprocessor_mapping` attribute, it will
# be interpreted as a dictionary containing lists of postprocessing
# functions for matching expression node names.
clsname = obj.__class__.__name__
postprocessors = defaultdict(list)
for i in obj.args:
try:
postprocessor_mappings = (
Basic._constructor_postprocessor_mapping[cls].items()
for cls in type(i).mro()
if cls in Basic._constructor_postprocessor_mapping
)
for k, v in chain.from_iterable(postprocessor_mappings):
postprocessors[k].extend([j for j in v if j not in postprocessors[k]])
except TypeError:
pass
for f in postprocessors.get(clsname, []):
obj = f(obj)
return obj
class Atom(Basic):
"""
A parent class for atomic things. An atom is an expression with no subexpressions.
Examples
========
Symbol, Number, Rational, Integer, ...
But not: Add, Mul, Pow, ...
"""
is_Atom = True
__slots__ = []
def matches(self, expr, repl_dict={}, old=False):
if self == expr:
return repl_dict
def xreplace(self, rule, hack2=False):
return rule.get(self, self)
def doit(self, **hints):
return self
@classmethod
def class_key(cls):
return 2, 0, cls.__name__
@cacheit
def sort_key(self, order=None):
return self.class_key(), (1, (str(self),)), S.One.sort_key(), S.One
def _eval_simplify(self, **kwargs):
return self
@property
def _sorted_args(self):
# this is here as a safeguard against accidentally using _sorted_args
# on Atoms -- they cannot be rebuilt as atom.func(*atom._sorted_args)
# since there are no args. So the calling routine should be checking
# to see that this property is not called for Atoms.
raise AttributeError('Atoms have no args. It might be necessary'
' to make a check for Atoms in the calling code.')
def _aresame(a, b):
"""Return True if a and b are structurally the same, else False.
Examples
========
In SymPy (as in Python) two numbers compare the same if they
have the same underlying base-2 representation even though
they may not be the same type:
>>> from sympy import S
>>> 2.0 == S(2)
True
>>> 0.5 == S.Half
True
This routine was written to provide a query for such cases that
would give false when the types do not match:
>>> from sympy.core.basic import _aresame
>>> _aresame(S(2.0), S(2))
False
"""
from .numbers import Number
from .function import AppliedUndef, UndefinedFunction as UndefFunc
if isinstance(a, Number) and isinstance(b, Number):
return a == b and a.__class__ == b.__class__
for i, j in zip_longest(preorder_traversal(a), preorder_traversal(b)):
if i != j or type(i) != type(j):
if ((isinstance(i, UndefFunc) and isinstance(j, UndefFunc)) or
(isinstance(i, AppliedUndef) and isinstance(j, AppliedUndef))):
if i.class_key() != j.class_key():
return False
else:
return False
return True
def _atomic(e, recursive=False):
"""Return atom-like quantities as far as substitution is
concerned: Derivatives, Functions and Symbols. Don't
return any 'atoms' that are inside such quantities unless
they also appear outside, too, unless `recursive` is True.
Examples
========
>>> from sympy import Derivative, Function, cos
>>> from sympy.abc import x, y
>>> from sympy.core.basic import _atomic
>>> f = Function('f')
>>> _atomic(x + y)
{x, y}
>>> _atomic(x + f(y))
{x, f(y)}
>>> _atomic(Derivative(f(x), x) + cos(x) + y)
{y, cos(x), Derivative(f(x), x)}
"""
from sympy import Derivative, Function, Symbol
pot = preorder_traversal(e)
seen = set()
if isinstance(e, Basic):
free = getattr(e, "free_symbols", None)
if free is None:
return {e}
else:
return set()
atoms = set()
for p in pot:
if p in seen:
pot.skip()
continue
seen.add(p)
if isinstance(p, Symbol) and p in free:
atoms.add(p)
elif isinstance(p, (Derivative, Function)):
if not recursive:
pot.skip()
atoms.add(p)
return atoms
class preorder_traversal(Iterator):
"""
Do a pre-order traversal of a tree.
This iterator recursively yields nodes that it has visited in a pre-order
fashion. That is, it yields the current node then descends through the
tree breadth-first to yield all of a node's children's pre-order
traversal.
For an expression, the order of the traversal depends on the order of
.args, which in many cases can be arbitrary.
Parameters
==========
node : sympy expression
The expression to traverse.
keys : (default None) sort key(s)
The key(s) used to sort args of Basic objects. When None, args of Basic
objects are processed in arbitrary order. If key is defined, it will
be passed along to ordered() as the only key(s) to use to sort the
arguments; if ``key`` is simply True then the default keys of ordered
will be used.
Yields
======
subtree : sympy expression
All of the subtrees in the tree.
Examples
========
>>> from sympy import symbols
>>> from sympy.core.basic import preorder_traversal
>>> x, y, z = symbols('x y z')
The nodes are returned in the order that they are encountered unless key
is given; simply passing key=True will guarantee that the traversal is
unique.
>>> list(preorder_traversal((x + y)*z, keys=None)) # doctest: +SKIP
[z*(x + y), z, x + y, y, x]
>>> list(preorder_traversal((x + y)*z, keys=True))
[z*(x + y), z, x + y, x, y]
"""
def __init__(self, node, keys=None):
self._skip_flag = False
self._pt = self._preorder_traversal(node, keys)
def _preorder_traversal(self, node, keys):
yield node
if self._skip_flag:
self._skip_flag = False
return
if isinstance(node, Basic):
if not keys and hasattr(node, '_argset'):
# LatticeOp keeps args as a set. We should use this if we
# don't care about the order, to prevent unnecessary sorting.
args = node._argset
else:
args = node.args
if keys:
if keys != True:
args = ordered(args, keys, default=False)
else:
args = ordered(args)
for arg in args:
for subtree in self._preorder_traversal(arg, keys):
yield subtree
elif iterable(node):
for item in node:
for subtree in self._preorder_traversal(item, keys):
yield subtree
def skip(self):
"""
Skip yielding current node's (last yielded node's) subtrees.
Examples
========
>>> from sympy.core import symbols
>>> from sympy.core.basic import preorder_traversal
>>> x, y, z = symbols('x y z')
>>> pt = preorder_traversal((x+y*z)*z)
>>> for i in pt:
... print(i)
... if i == x+y*z:
... pt.skip()
z*(x + y*z)
z
x + y*z
"""
self._skip_flag = True
def __next__(self):
return next(self._pt)
def __iter__(self):
return self
def _make_find_query(query):
"""Convert the argument of Basic.find() into a callable"""
try:
query = sympify(query)
except SympifyError:
pass
if isinstance(query, type):
return lambda expr: isinstance(expr, query)
elif isinstance(query, Basic):
return lambda expr: expr.match(query) is not None
return query
| bsd-3-clause | -4,783,632,093,631,095,000 | 31.479197 | 103 | 0.5283 | false | 4.146407 | false | false | false |
Zulan/PBStats | tests/Updater/Mods/Updater/Assets/Python/Extras/simplejson.py | 4 | 33784 | import sre_parse, sre_compile, sre_constants
from sre_constants import BRANCH, SUBPATTERN
from re import VERBOSE, MULTILINE, DOTALL
import re
import cgi
import warnings
_speedups = None
class JSONFilter(object):
def __init__(self, app, mime_type='text/x-json'):
self.app = app
self.mime_type = mime_type
def __call__(self, environ, start_response):
# Read JSON POST input to jsonfilter.json if matching mime type
response = {'status': '200 OK', 'headers': []}
def json_start_response(status, headers):
response['status'] = status
response['headers'].extend(headers)
environ['jsonfilter.mime_type'] = self.mime_type
if environ.get('REQUEST_METHOD', '') == 'POST':
if environ.get('CONTENT_TYPE', '') == self.mime_type:
args = [_ for _ in [environ.get('CONTENT_LENGTH')] if _]
data = environ['wsgi.input'].read(*map(int, args))
environ['jsonfilter.json'] = simplejson.loads(data)
res = simplejson.dumps(self.app(environ, json_start_response))
jsonp = cgi.parse_qs(environ.get('QUERY_STRING', '')).get('jsonp')
if jsonp:
content_type = 'text/javascript'
res = ''.join(jsonp + ['(', res, ')'])
elif 'Opera' in environ.get('HTTP_USER_AGENT', ''):
# Opera has bunk XMLHttpRequest support for most mime types
content_type = 'text/plain'
else:
content_type = self.mime_type
headers = [
('Content-type', content_type),
('Content-length', len(res)),
]
headers.extend(response['headers'])
start_response(response['status'], headers)
return [res]
def factory(app, global_conf, **kw):
return JSONFilter(app, **kw)
ESCAPE = re.compile(r'[\x00-\x19\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"/]|[^\ -~])')
ESCAPE_DCT = {
# escape all forward slashes to prevent </script> attack
'/': '\\/',
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
# assume this produces an infinity on all machines (probably not guaranteed)
INFINITY = float('1e66666')
def floatstr(o, allow_nan=True):
# Check for specials. Note that this type of test is processor- and/or
# platform-specific, so do tests which don't depend on the internals.
if o != o:
text = 'NaN'
elif o == INFINITY:
text = 'Infinity'
elif o == -INFINITY:
text = '-Infinity'
else:
return str(o)
if not allow_nan:
raise ValueError("Out of range float values are not JSON compliant: %r"
% (o,))
return text
def encode_basestring(s):
"""
Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
def encode_basestring_ascii(s):
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
try:
encode_basestring_ascii = _speedups.encode_basestring_ascii
_need_utf8 = True
except AttributeError:
_need_utf8 = False
class JSONEncoder(object):
"""
Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8'):
"""
Constructor for JSONEncoder, with sensible defaults.
If skipkeys is False, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is True, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is True, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is True, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is True, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
self.current_indent_level = 0
if separators is not None:
self.item_separator, self.key_separator = separators
self.encoding = encoding
def _newline_indent(self):
return '\n' + (' ' * (self.indent * self.current_indent_level))
def _iterencode_list(self, lst, markers=None):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
yield '['
if self.indent is not None:
self.current_indent_level += 1
newline_indent = self._newline_indent()
separator = self.item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
separator = self.item_separator
first = True
for value in lst:
if first:
first = False
else:
yield separator
for chunk in self._iterencode(value, markers):
yield chunk
if newline_indent is not None:
self.current_indent_level -= 1
yield self._newline_indent()
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(self, dct, markers=None):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
key_separator = self.key_separator
if self.indent is not None:
self.current_indent_level += 1
newline_indent = self._newline_indent()
item_separator = self.item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = self.item_separator
first = True
if self.ensure_ascii:
encoder = encode_basestring_ascii
else:
encoder = encode_basestring
allow_nan = self.allow_nan
if self.sort_keys:
keys = dct.keys()
keys.sort()
items = [(k, dct[k]) for k in keys]
else:
items = dct.iteritems()
_encoding = self.encoding
_do_decode = (_encoding is not None
and not (_need_utf8 and _encoding == 'utf-8'))
for key, value in items:
if isinstance(key, str):
if _do_decode:
key = key.decode(_encoding)
elif isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = floatstr(key, allow_nan)
elif isinstance(key, (int, long)):
key = str(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif self.skipkeys:
continue
else:
raise TypeError("key %r is not a string" % (key,))
if first:
first = False
else:
yield item_separator
yield encoder(key)
yield key_separator
for chunk in self._iterencode(value, markers):
yield chunk
if newline_indent is not None:
self.current_indent_level -= 1
yield self._newline_indent()
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(self, o, markers=None):
if isinstance(o, basestring):
if self.ensure_ascii:
encoder = encode_basestring_ascii
else:
encoder = encode_basestring
_encoding = self.encoding
if (_encoding is not None and isinstance(o, str)
and not (_need_utf8 and _encoding == 'utf-8')):
o = o.decode(_encoding)
yield encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield floatstr(o, self.allow_nan)
elif isinstance(o, (list, tuple)):
for chunk in self._iterencode_list(o, markers):
yield chunk
elif isinstance(o, dict):
for chunk in self._iterencode_dict(o, markers):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
for chunk in self._iterencode_default(o, markers):
yield chunk
if markers is not None:
del markers[markerid]
def _iterencode_default(self, o, markers=None):
newobj = self.default(o)
return self._iterencode(newobj, markers)
def default(self, o):
"""
Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError("%r is not JSON serializable" % (o,))
def encode(self, o):
"""
Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo":["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks...
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8' and _need_utf8)):
o = o.decode(_encoding)
return encode_basestring_ascii(o)
# This doesn't pass the iterator directly to ''.join() because it
# sucks at reporting exceptions. It's going to do this internally
# anyway because it uses PySequence_Fast or similar.
chunks = list(self.iterencode(o))
return ''.join(chunks)
def iterencode(self, o):
"""
Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
return self._iterencode(o, markers)
FLAGS = (VERBOSE | MULTILINE | DOTALL)
#FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
class Scanner(object):
def __init__(self, lexicon, flags=FLAGS):
self.actions = [None]
# combine phrases into a compound pattern
s = sre_parse.Pattern()
s.flags = flags
p = []
for idx, token in enumerate(lexicon):
phrase = token.pattern
try:
subpattern = sre_parse.SubPattern(s,
[(SUBPATTERN, (idx + 1, sre_parse.parse(phrase, flags)))])
except sre_constants.error:
raise
p.append(subpattern)
self.actions.append(token)
p = sre_parse.SubPattern(s, [(BRANCH, (None, p))])
self.scanner = sre_compile.compile(p)
def iterscan(self, string, idx=0, context=None):
"""
Yield match, end_idx for each match
"""
match = self.scanner.scanner(string, idx).match
actions = self.actions
lastend = idx
end = len(string)
while True:
m = match()
if m is None:
break
matchbegin, matchend = m.span()
if lastend == matchend:
break
action = actions[m.lastindex]
if action is not None:
rval, next_pos = action(m, context)
if next_pos is not None and next_pos != matchend:
# "fast forward" the scanner
matchend = next_pos
match = self.scanner.scanner(string, matchend).match
yield rval, matchend
lastend = matchend
def pattern(pattern, flags=FLAGS):
def decorator(fn):
fn.pattern = pattern
fn.regex = re.compile(pattern, flags)
return fn
return decorator
def _floatconstants():
import struct
import sys
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
lineno, colno = linecol(doc, pos)
if end is None:
return '%s: line %d column %d (char %d)' % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
return '%s: line %d column %d - line %d column %d (char %d - %d)' % (
msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
'true': True,
'false': False,
'null': None,
}
def JSONConstant(match, context, c=_CONSTANTS):
return c[match.group(0)], None
pattern('(-?Infinity|NaN|true|false|null)')(JSONConstant)
def JSONNumber(match, context):
match = JSONNumber.regex.match(match.string, *match.span())
integer, frac, exp = match.groups()
if frac or exp:
res = float(integer + (frac or '') + (exp or ''))
else:
res = int(integer)
return res, None
pattern(r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?')(JSONNumber)
STRINGCHUNK = re.compile(r'(.*?)(["\\])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def scanstring(s, end, encoding=None, _b=BACKSLASH, _m=STRINGCHUNK.match):
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
end = chunk.end()
content, terminator = chunk.groups()
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
if terminator == '"':
break
try:
esc = s[end]
except IndexError:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
if esc != 'u':
try:
m = _b[esc]
except KeyError:
raise ValueError(
errmsg("Invalid \\escape: %r" % (esc,), s, end))
end += 1
else:
esc = s[end + 1:end + 5]
try:
m = unichr(int(esc, 16))
if len(esc) != 4 or not esc.isalnum():
raise ValueError
except ValueError:
raise ValueError(errmsg("Invalid \\uXXXX escape", s, end))
end += 5
_append(m)
return u''.join(chunks), end
def JSONString(match, context):
encoding = getattr(context, 'encoding', None)
return scanstring(match.string, match.end(), encoding)
pattern(r'"')(JSONString)
WHITESPACE = re.compile(r'\s*', FLAGS)
def JSONObject(match, context, _w=WHITESPACE.match):
pairs = {}
s = match.string
end = _w(s, match.end()).end()
nextchar = s[end:end + 1]
# trivial empty object
if nextchar == '}':
return pairs, end + 1
if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end))
end += 1
encoding = getattr(context, 'encoding', None)
iterscan = JSONScanner.iterscan
while True:
key, end = scanstring(s, end, encoding)
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise ValueError(errmsg("Expecting : delimiter", s, end))
end = _w(s, end + 1).end()
try:
value, end = iterscan(s, idx=end, context=context).next()
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
pairs[key] = value
end = _w(s, end).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == '}':
break
if nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
end = _w(s, end).end()
nextchar = s[end:end + 1]
end += 1
if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end - 1))
object_hook = getattr(context, 'object_hook', None)
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
pattern(r'{')(JSONObject)
def JSONArray(match, context, _w=WHITESPACE.match):
values = []
s = match.string
end = _w(s, match.end()).end()
# look-ahead for trivial empty array
nextchar = s[end:end + 1]
if nextchar == ']':
return values, end + 1
iterscan = JSONScanner.iterscan
while True:
try:
value, end = iterscan(s, idx=end, context=context).next()
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
values.append(value)
end = _w(s, end).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
if nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end))
end = _w(s, end).end()
return values, end
pattern(r'\[')(JSONArray)
ANYTHING = [
JSONObject,
JSONArray,
JSONString,
JSONConstant,
JSONNumber,
]
JSONScanner = Scanner(ANYTHING)
class JSONDecoder(object):
"""
Simple JSON <http://json.org> decoder
Performs the following translations in decoding:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
_scanner = Scanner(ANYTHING)
__all__ = ['__init__', 'decode', 'raw_decode']
def __init__(self, encoding=None, object_hook=None):
"""
``encoding`` determines the encoding used to interpret any ``str``
objects decoded by this instance (utf-8 by default). It has no
effect when decoding ``unicode`` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as ``unicode``.
``object_hook``, if specified, will be called with the result
of every JSON object decoded and its return value will be used in
place of the given ``dict``. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
"""
self.encoding = encoding
self.object_hook = object_hook
def decode(self, s, _w=WHITESPACE.match):
"""
Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise ValueError(errmsg("Extra data", s, end, len(s)))
return obj
def raw_decode(self, s, **kw):
"""
Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning
with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
kw.setdefault('context', self)
try:
obj, end = self._scanner.iterscan(s, **kw).next()
except StopIteration:
raise ValueError("No JSON object could be decoded")
return obj, end
#def __init__(self):
__version__ = '1.7.1'
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8'
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', **kw):
"""
Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and object
members will be pretty-printed with that indent level. An indent level
of 0 will only insert newlines. ``None`` is the most compact representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', **kw):
"""
Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, **kw):
"""
Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
If the contents of ``fp`` is encoded with an ASCII based encoding other
than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
be specified. Encodings that are not ASCII based (such as UCS-2) are
not allowed, and should be wrapped with
``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
object and passed to ``loads()``
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, **kw):
"""
Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
must be specified. Encodings that are not ASCII based (such as UCS-2)
are not allowed and should be decoded to ``unicode`` first.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if cls is None and encoding is None and object_hook is None and not kw:
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
return cls(encoding=encoding, **kw).decode(s)
def read(s):
"""
json-py API compatibility hook. Use loads(s) instead.
"""
import warnings
warnings.warn("simplejson.loads(s) should be used instead of read(s)",
DeprecationWarning)
return loads(s)
def write(obj):
"""
json-py API compatibility hook. Use dumps(s) instead.
"""
import warnings
warnings.warn("simplejson.dumps(s) should be used instead of write(s)",
DeprecationWarning)
return dumps(obj)
| gpl-2.0 | -2,590,146,868,092,183,600 | 34.405172 | 79 | 0.547626 | false | 4.177569 | false | false | false |
BenWiederhake/House-Of-Tweets | tools/PhotoMiner/checkout_hot_poli.py | 1 | 9492 | #!/usr/bin/env python3
# NOTE: Before running this script, you may want to either get my cache,
# or run 'fetch.py' from the branch 'crawler-fetch'.
import json
import nice
import os
import subprocess
# If True:
# - only export images where CHOICES_PRIORITY is still relevant
# (i.e., more than one image and no entry in CHOICES_OVERRIDE)
# - no pols.json written
# - non-standard filenames
# - no thumbnails
# If False:
# - export "all" images, defaulting to the order in CHOICES_PRIORITY.
# - pols.json written
# - standard filenames, identical in scheme to the old ones.
CHOICE_MODE = False
# To make the following dict shorter:
w, l, c, s, g = 'wiki', 'die linke', 'cxu', 'spd', 'gruene'
CHOICES_OVERRIDE = {
# 'pid': 'slug',
# Recommended: open a new editor and just write down entries like '52g',
# and let regexes do the rest.
'0': l,
'4': c,
'5': w,
'6': w,
'7': s,
'9': s,
'12': g,
'14': w,
'16': c,
'22': s,
'23': s,
'24': l,
'25': w,
'28': g,
'29': g,
'32': c,
'33': l,
'34': w,
'40': c,
'41': c,
'42': l,
'43': s,
'45': l,
'56': g,
'59': w,
'60': w,
'61': c,
'62': w,
'64': w,
'67': s,
'68': s,
'70': s,
'74': l,
'76': l,
'77': g,
'78': s,
'85': w,
'88': g,
'89': w,
'91': g,
'95': s,
'97': l,
'98': s,
'99': s,
'104': w,
'105': w,
'111': c,
'114': s,
'117': s,
'118': s,
'124': c,
'125': w,
'127': s,
'130': w,
'132': w,
'133': l,
'134': w,
'142': l,
'145': w,
'147': s,
'150': w,
'153': w,
'156': l,
'159': w,
'162': c,
'165': c,
'166': l,
'172': w,
'173': s,
'175': l,
'176': w,
'177': w,
'178': s,
'179': s,
'181': g,
'182': w,
'183': c,
'184': c,
'186': w,
'188': s,
'189': c,
'190': w,
'196': s,
'204': s,
'209': w,
'211': s,
'214': w,
'215': g,
'217': w,
'218': g,
'224': c,
'226': l,
'229': s,
'231': g,
'233': w,
'234': l,
'238': c,
'239': w,
'240': s,
'243': w,
'244': s,
'245': s,
'252': l,
'254': w,
'257': w,
'259': w,
'260': w,
'261': s,
'264': c,
'265': w,
'267': w,
'268': s,
'270': c,
'271': w,
'272': c,
'273': s,
'275': g,
'276': c,
'278': w,
'282': l,
'283': w,
'284': g,
'287': l,
'288': w,
'290': w,
'291': g,
'293': c,
'294': w,
'295': g,
'298': c,
'299': w,
'301': g,
'309': s,
'313': s,
'314': l,
'315': w,
'317': l,
'319': g,
'320': s,
'321': c,
'325': l,
'326': w,
'328': l,
'329': c,
'332': g,
'335': s,
'339': l,
'341': w,
'344': l,
'346': w,
'348': g,
'350': s,
'351': w,
'356': w,
'357': s,
'360': w,
'361': w,
'369': g,
'373': l,
'375': w,
'379': w,
'385': w,
'386': w,
'389': g,
'392': w,
'393': c,
'395': s,
'397': l,
'398': g,
'399': g,
}
CHOICES_PRIORITY = [
'twitter', # Just in case we ever do that
'spd',
'die linke',
'gruene',
'wiki', # Not the best source of images
'cxu', # Often enough worse than Wikipedia's images
]
DIR_PREFIX = 'preview'
os.mkdir(DIR_PREFIX) # If this fails: you should always start from scratch here!
def convert(*args):
try:
subprocess.run(['convert', *args],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
check=True)
except subprocess.CalledProcessError as e:
print('stdout:\n{}\nstderr:\n{}\n'.format(e.stdout, e.stderr))
raise
def checkout(pid, fields):
img_prefix = os.path.join(DIR_PREFIX, pid)
dl_path = nice.get(fields['url'])
freshest_path = dl_path
# Provide '_raw' for intermediate processing
raw_dst_path = img_prefix + '_raw.jpg'
if fields.get('is_compressed'):
with open(raw_dst_path, 'wb') as raw_fp:
subprocess.run(['unzip', '-p', dl_path],
stdout=raw_fp, stderr=subprocess.PIPE, check=True)
freshest_path = raw_dst_path
else:
# Need '../' to get out of 'preview/'
os.symlink('../' + dl_path, raw_dst_path)
# Something about digitally rotated images (Michael Grosse-Brömer, 154)
# doesn't work as it should.
inject = []
if '154' in pid:
inject = ['-rotate', '-90']
# Provide ready-to-use image
convert(freshest_path,
'-resize', '330x330^',
'-gravity', 'north',
'-extent', '330x330',
*inject,
'-strip',
img_prefix + '.jpg')
if not CHOICE_MODE:
# Provide thumbnail
convert(freshest_path,
'-thumbnail', '75x75^',
'-gravity', 'north',
'-extent', '75x75',
*inject,
img_prefix + '_t.jpg')
# TODO: Use '-strip'.
# Don't do it right now in order to
# avoid blowing up 'heavy' even more.
# Retract '_raw'
os.remove(raw_dst_path)
entry = {
'pathToImage': pid + '.jpg',
'pathToThumb': pid + '_t.jpg',
'license': fields['license'],
}
if 'copyright' in fields:
entry['copyright'] = fields['copyright']
return entry
def choose_img(pid, imgs):
if pid in CHOICES_OVERRIDE:
choice = CHOICES_OVERRIDE[pid]
elif len(imgs) == 1:
choice = list(imgs.keys())[0]
else:
print('[WARN] No human selection for ' + pid)
appliccable = [ch for ch in CHOICES_PRIORITY if ch in imgs]
assert len(appliccable) > 0, (imgs.keys(), CHOICES_PRIORITY)
choice = appliccable[0]
return imgs[choice]
SPOOF_USERS = {
'hot': {
"twittering": {
"twitterId": "4718199753",
"twitterUserName": "HouseOfTweetsSB"
},
"self_bird": "amsel",
"party": "Gr\u00fcn",
"name": "House Of Tweets",
"pid": "hot",
"cv": {
"en": "A good bird person. Very reliable. But also killed. In bird culture, that was considered a 'dick move'.",
"de": "Uhh, keine Ahnung, ich kenn das Zitat nur auf Englisch."
},
"images": {
"pathToThumb": "tgroup_greengr\u00fcn.jpg",
"pathToImage": "group_greengr\u00fcn.jpg"
},
"citizen_bird": "amsel"
},
'523': {
"twittering": {
"twitterId": "237115617",
"twitterUserName": "sc_ontour"
},
"self_bird": "girlitz",
"party": "SPD",
"name": "Stephan Schweitzer",
"pid": "523",
"cv": {
"en": "Schweitzer, born in Saarland, used to work in the Willy-Brandt-Haus in Berlin for four years. He started out as head of Astrid Klug's office, then became the head of the department for communication. Lastly, he was technical director for the election campaign. Before transferring to Berlin, the certified public administration specialist directed the affairs of the Saar-SPD. His career began as publicist for the Saarlouis county in 1993.",
"de": "Schweitzer, ein gebürtiger Saarländer, arbeitete zuvor vier Jahre im Willy-Brandt-Haus in Berlin, zunächst als Büroleiter der damaligen SPD-Bundesgeschäftsführerin Astrid Klug, dann als Abteilungsleiter für Kommunikation und zuletzt als technischer Wahlkampfleiter im Bundestagswahlkampf. Vor seinem Wechsel nach Berlin hatte der Diplom-Verwaltungswirt, der seine Laufbahn 1993 als Pressesprecher des Landkreises Saarlouis begann, die Geschäfte der Saar-SPD geführt."
},
"images": {
"pathToThumb": "523_t.jpg",
"pathToImage": "523.jpg"
},
"citizen_bird": "zaunkoenig"
}
}
def prune_convert(pols):
pols = {poli['pid']: poli for poli in pols if 'twittering' in poli}
for poli in pols.values():
del poli['imgs']
for (pid, poli) in SPOOF_USERS.items():
assert pid == poli['pid']
pols[pid] = poli
return pols
def run():
with open('converge_each.json', 'r') as fp:
pols = json.load(fp)
for e in pols:
if 'twittering' not in e:
print('[INFO] Skipping (not twittering) ' + e['full_name'])
continue
if len(e['imgs']) == 0:
print('[WARN] No images at all for ' + e['full_name'])
continue
print('[INFO] Checking out files for ' + e['full_name'])
if not CHOICE_MODE:
fields = choose_img(e['pid'], e['imgs'])
e['images'] = checkout(e['pid'], fields)
elif len(e['imgs']) >= 2 and e['pid'] not in CHOICES_OVERRIDE:
for slug, fields in e['imgs'].items():
checkout(e['pid'] + slug, fields)
if not CHOICE_MODE:
print('[INFO] CHOICE_MODE = False, so I\'ll write out pols.json')
pols = prune_convert(pols)
with open('pols.json', 'w') as fp:
json.dump(pols, fp, sort_keys=True, indent=2)
else:
print('[INFO] CHOICE_MODE = True, so not writing anything')
if __name__ == '__main__':
if not CHOICE_MODE:
print('[INFO] If there\'s many complaints about missing human choices, re-run with CHOICE_MODE = True')
run()
print('Done.')
| gpl-3.0 | -5,207,065,709,094,016,000 | 23.952632 | 486 | 0.507171 | false | 2.940155 | false | false | false |
jrichte43/ProjectEuler | Problem-0402/solutions.py | 1 | 1258 |
__problem_title__ = "Integer-valued polynomials"
__problem_url___ = "https://projecteuler.net/problem=402"
__problem_description__ = "It can be shown that the polynomial + 4 + 2 + 5 is a multiple of 6 " \
"for every integer . It can also be shown that 6 is the largest " \
"integer satisfying this property. Define M( , , ) as the maximum such " \
"that + + + is a multiple of for all integers . For example, M(4, 2, " \
"5) = 6. Also, define S( ) as the sum of M( , , ) for all 0 < , , ≤ . " \
"We can verify that S(10) = 1972 and S(10000) = 2024258331114. Let F " \
"be the Fibonacci sequence: F = 0, F = 1 and F = F + F for ≥ 2. Find " \
"the last 9 digits of Σ S(F ) for 2 ≤ ≤ 1234567890123."
import timeit
class Solution():
@staticmethod
def solution1():
pass
@staticmethod
def time_solutions():
setup = 'from __main__ import Solution'
print('Solution 1:', timeit.timeit('Solution.solution1()', setup=setup, number=1))
if __name__ == '__main__':
s = Solution()
print(s.solution1())
s.time_solutions()
| gpl-3.0 | -311,755,246,571,413,500 | 38.03125 | 100 | 0.52442 | false | 3.652047 | false | false | false |
idegtiarov/gnocchi-rep | gnocchi/storage/__init__.py | 1 | 6326 | # -*- encoding: utf-8 -*-
#
# Copyright © 2014-2015 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from oslo_config import cfg
from stevedore import driver
from gnocchi import exceptions
OPTS = [
cfg.StrOpt('driver',
default='file',
help='Storage driver to use'),
]
Measure = collections.namedtuple('Measure', ['timestamp', 'value'])
class Metric(object):
def __init__(self, id, archive_policy,
created_by_user_id=None,
created_by_project_id=None,
name=None,
resource_id=None):
self.id = id
self.archive_policy = archive_policy
self.created_by_user_id = created_by_user_id
self.created_by_project_id = created_by_project_id
self.name = name
self.resource_id = resource_id
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.id)
def __hash__(self):
return id(self)
def __str__(self):
return str(self.id)
def __eq__(self, other):
return (isinstance(self, Metric)
and self.id == other.id
and self.archive_policy == other.archive_policy
and self.created_by_user_id == other.created_by_user_id
and self.created_by_project_id == other.created_by_project_id
and self.name == other.name
and self.resource_id == other.resource_id)
class InvalidQuery(Exception):
pass
class MetricDoesNotExist(Exception):
"""Error raised when this metric does not exist."""
def __init__(self, metric):
self.metric = metric
super(MetricDoesNotExist, self).__init__(
"Metric %s does not exist" % metric)
class AggregationDoesNotExist(Exception):
"""Error raised when the aggregation method doesn't exists for a metric."""
def __init__(self, metric, method):
self.metric = metric
self.method = method
super(AggregationDoesNotExist, self).__init__(
"Aggregation method '%s' for metric %s does not exist" %
(method, metric))
class MetricAlreadyExists(Exception):
"""Error raised when this metric already exists."""
def __init__(self, metric):
self.metric = metric
super(MetricAlreadyExists, self).__init__(
"Metric %s already exists" % metric)
class NoDeloreanAvailable(Exception):
"""Error raised when trying to insert a value that is too old."""
def __init__(self, first_timestamp, bad_timestamp):
self.first_timestamp = first_timestamp
self.bad_timestamp = bad_timestamp
super(NoDeloreanAvailable, self).__init__(
"%s is before %s" % (bad_timestamp, first_timestamp))
class MetricUnaggregatable(Exception):
"""Error raised when metrics can't be aggregated."""
def __init__(self, metrics, reason):
self.metrics = metrics
self.reason = reason
super(MetricUnaggregatable, self).__init__(
"Metrics %s can't be aggregated: %s"
% (" ,".join((str(m.id) for m in metrics)), reason))
def _get_driver(name, conf):
"""Return the driver named name.
:param name: The name of the driver.
:param conf: The conf to pass to the driver.
"""
d = driver.DriverManager('gnocchi.storage',
name).driver
return d(conf)
def get_driver(conf):
"""Return the configured driver."""
return _get_driver(conf.storage.driver,
conf.storage)
class StorageDriver(object):
@staticmethod
def __init__(conf):
pass
@staticmethod
def create_metric(metric):
"""Create a metric.
:param metric: The metric object.
"""
raise exceptions.NotImplementedError
@staticmethod
def add_measures(metric, measures):
"""Add a measure to a metric.
:param metric: The metric measured.
:param measures: The actual measures.
"""
raise exceptions.NotImplementedError
@staticmethod
def get_measures(metric, from_timestamp=None, to_timestamp=None,
aggregation='mean'):
"""Get a measure to a metric.
:param metric: The metric measured.
:param from timestamp: The timestamp to get the measure from.
:param to timestamp: The timestamp to get the measure to.
:param aggregation: The type of aggregation to retrieve.
"""
raise exceptions.NotImplementedError
@staticmethod
def delete_metric(metric):
raise exceptions.NotImplementedError
@staticmethod
def get_cross_metric_measures(metrics, from_timestamp=None,
to_timestamp=None, aggregation='mean',
needed_overlap=None):
"""Get aggregated measures of multiple entities.
:param entities: The entities measured to aggregate.
:param from timestamp: The timestamp to get the measure from.
:param to timestamp: The timestamp to get the measure to.
:param aggregation: The type of aggregation to retrieve.
"""
raise exceptions.NotImplementedError
@staticmethod
def search_value(metrics, query, from_timestamp=None,
to_timestamp=None,
aggregation='mean'):
"""Search for an aggregated value that realizes a predicate.
:param metrics: The list of metrics to look into.
:param query: The query being sent.
:param from_timestamp: The timestamp to get the measure from.
:param to_timestamp: The timestamp to get the measure to.
:param aggregation: The type of aggregation to retrieve.
"""
raise exceptions.NotImplementedError
| apache-2.0 | 7,452,011,963,883,705,000 | 30.625 | 79 | 0.62087 | false | 4.432376 | false | false | false |
gytdau/holiday-thingy | holiday_bot/Messenger.py | 1 | 1372 | import json
class Messenger:
"""
Messenger is a wrapper for either a Message or SlackClient instance.
"""
def __init__(self, service, channel=None):
self.service = service
self.service_type = type(service).__name__
self.channel = channel
def reply(self, message):
if self.service_type == "Message":
self.service.reply(message)
else:
self.service.send_message(self.channel, message)
def send(self, message):
if self.service_type == "Message":
self.service.send(message)
else:
self.service.send_message(self.channel, message)
def send_attachments(self, attachments):
if self.service_type == "Message":
self.service.send_webapi('', json.dumps(attachments))
else:
self.service.send_message(self.channel, '', json.dumps(attachments))
def full_name(self):
if self.service_type == "Message":
return self.service.channel._client.users[self.service.body['user']][u'real_name']
else:
return "*Unknown Person*" # Or should I throw an error?
def sender_id(self):
if self.service_type == "Message":
return self.service.channel._client.users[self.service.body['user']]['id']
else:
return 0 # Or should I throw an error?
| mit | 3,061,471,265,636,078,600 | 33.3 | 94 | 0.599125 | false | 4.035294 | false | false | false |
DuCorey/bokeh | bokeh/sampledata/airports.py | 15 | 1027 | """ The data in airports.json is a subset of US airports with field
elevations > 1500 meters. The query result was taken from
.. code-block:: none
http://services.nationalmap.gov/arcgis/rest/services/GlobalMap/GlobalMapWFS/MapServer/10/query
on October 15, 2015.
"""
from __future__ import absolute_import
from bokeh.util.dependencies import import_required
pd = import_required('pandas',
'airports sample data requires Pandas (http://pandas.pydata.org) to be installed')
import json
import os
from . import _data_dir
with open(os.path.join(_data_dir(), 'airports.json'), 'r') as data_file:
content = data_file.read()
airports = json.loads(content)
schema = [['attributes', 'nam'], ['attributes', 'zv3'], ['geometry', 'x'], ['geometry', 'y']]
data = pd.io.json.json_normalize(airports['features'], meta=schema)
data.rename(columns={'attributes.nam': 'name', 'attributes.zv3': 'elevation'}, inplace=True)
data.rename(columns={'geometry.x': 'x', 'geometry.y': 'y'}, inplace=True)
| bsd-3-clause | 113,116,088,189,705,220 | 37.037037 | 98 | 0.69036 | false | 3.389439 | false | true | false |
baskiotisn/soccersimulator | soccersimulator/mdpsoccer.py | 1 | 22276 | # -*- coding: utf-8 -*-<
import math
import threading
from collections import namedtuple
from threading import Lock
from copy import deepcopy
from .utils import Vector2D, MobileMixin
from .events import SoccerEvents
from . import settings
from .utils import dict_to_json
import random
import time
import zipfile
import traceback
import logging
logger = logging.getLogger("soccersimulator.mdpsoccer")
###############################################################################
# SoccerAction
###############################################################################
class SoccerAction(object):
""" Action d'un joueur : comporte un vecteur acceleration et un vecteur shoot.
"""
def __init__(self, acceleration=None, shoot=None,name=None):
self.acceleration = acceleration or Vector2D()
self.shoot = shoot or Vector2D()
self.name = name or ""
def copy(self):
return deepcopy(self)
def set_name(self,name):
self.name = name
return self
def __str__(self):
return "Acc:%s, Shoot:%s, Name:%s" % (str(self.acceleration), str(self.shoot), str(self.name))
def __repr__(self):
return "SoccerAction(%s,%s,%s)" % (self.acceleration.__repr__(),self.shoot.__repr__(),self.name)
def __eq__(self, other):
return (other.acceleration == self.acceleration) and (other.shoot == self.shoot)
def __add__(self, other):
return SoccerAction(self.acceleration + other.acceleration, self.shoot + other.shoot)
def __sub__(self, other):
return Vector2D(self.acceleration - other.acceleration, self.shoot - other.shoot)
def __iadd__(self, other):
self.acceleration += other.acceleration
self.shoot += other.shoot
return self
def __isub__(self, other):
self.acceleration -= other.acceleration
self.shoot -= other.shoot
return self
def to_dict(self):
return {"acceleration":self.acceleration,"shoot":self.shoot,"name":self.name}
###############################################################################
# Ball
###############################################################################
class Ball(MobileMixin):
def __init__(self,position=None,vitesse=None,**kwargs):
super(Ball,self).__init__(position,vitesse,**kwargs)
def next(self,sum_of_shoots):
vitesse = self.vitesse.copy()
vitesse.norm = self.vitesse.norm - settings.ballBrakeSquare * self.vitesse.norm ** 2 - settings.ballBrakeConstant * self.vitesse.norm
## decomposition selon le vecteur unitaire de ball.speed
snorm = sum_of_shoots.norm
if snorm > 0:
u_s = sum_of_shoots.copy()
u_s.normalize()
u_t = Vector2D(-u_s.y, u_s.x)
speed_abs = abs(vitesse.dot(u_s))
speed_ortho = vitesse.dot(u_t)
speed_tmp = Vector2D(speed_abs * u_s.x - speed_ortho * u_s.y, speed_abs * u_s.y + speed_ortho * u_s.x)
speed_tmp += sum_of_shoots
vitesse = speed_tmp
self.vitesse = vitesse.norm_max(settings.maxBallAcceleration).copy()
self.position += self.vitesse
def inside_goal(self):
return (self.position.x < 0 or self.position.x > settings.GAME_WIDTH)\
and abs(self.position.y - (settings.GAME_HEIGHT / 2.)) < settings.GAME_GOAL_HEIGHT / 2.
def __repr__(self):
return "Ball(%s,%s)" % (self.position.__repr__(),self.vitesse.__repr__())
def __str__(self):
return "Ball: pos: %s, vit: %s" %(str(self.position),str(self.vitesse))
###############################################################################
# PlayerState
###############################################################################
class PlayerState(MobileMixin):
""" Represente la configuration d'un joueur : un etat mobile (position, vitesse), et une action SoccerAction
"""
def __init__(self, position=None, vitesse=None,**kwargs):
"""
:param position: position du joueur
:param acceleration: acceleration du joueur
:param action: action SoccerAction du joueur
:return:
"""
super(PlayerState,self).__init__(position,vitesse)
self.action = kwargs.pop('action', SoccerAction())
self.last_shoot = kwargs.pop('last_shoot', 0)
self.__dict__.update(kwargs)
def to_dict(self):
return {"position":self.position,"vitesse":self.vitesse,"action":self.action,"last_shoot":self.last_shoot}
def __str__(self):
return "pos: %s, vit: %s, action:%s" %(str(self.position),str(self.acceleration),str(self.action))
def __repr__(self):
return "PlayerState(position=%s,vitesse=%s,action=%s,last_shoot=%d)" % \
(self.position.__repr__(),self.vitesse.__repr__(),self.action.__repr__(),self.last_shoot)
@property
def acceleration(self):
"""
:return: Vector2D Action acceleration du joueur
"""
return self.action.acceleration.norm_max(settings.maxPlayerAcceleration)
@acceleration.setter
def acceleration(self,v):
self.action.acceleration = v
@property
def shoot(self):
""" Vector2D Action shoot du joueur
:return:
"""
return self.action.shoot.norm_max(settings.maxPlayerShoot)
@shoot.setter
def shoot(self,v):
self.action.shoot = v
def next(self, ball, action=None):
""" Calcul le prochain etat en fonction de l'action et de la position de la balle
:param ball:
:param action:
:return: Action shoot effectue
"""
if not (hasattr(action,"acceleration") and hasattr(action,"shoot")):
action = SoccerAction()
self.action = action.copy()
self.vitesse *= (1 - settings.playerBrackConstant)
self.vitesse = (self.vitesse + self.acceleration).norm_max(settings.maxPlayerSpeed)
self.position += self.vitesse
if self.position.x < 0 or self.position.x > settings.GAME_WIDTH \
or self.position.y < 0 or self.position.y > settings.GAME_HEIGHT:
self.position.x = max(0, min(settings.GAME_WIDTH, self.position.x))
self.position.y = max(0, min(settings.GAME_HEIGHT, self.position.y))
self.vitesse = Vector2D()
if self.shoot.norm == 0 or not self.can_shoot():
self._dec_shoot()
return Vector2D()
self._reset_shoot()
if self.position.distance(ball.position) > (settings.PLAYER_RADIUS + settings.BALL_RADIUS):
return Vector2D()
return self._rd_angle(self.shoot,(self.vitesse.angle-self.shoot.angle)*(0 if self.vitesse.norm==0 else 1),\
self.position.distance(ball.position)/(settings.PLAYER_RADIUS+settings.BALL_RADIUS))
@staticmethod
def _rd_angle(shoot,dangle,dist):
eliss = lambda x, alpha: (math.exp(alpha*x)-1)/(math.exp(alpha)-1)
dangle = abs((dangle+math.pi*2) %(math.pi*2) -math.pi)
dangle_factor =eliss(1.-max(dangle-math.pi/2,0)/(math.pi/2.),5)
norm_factor = eliss(shoot.norm/settings.maxPlayerShoot,4)
dist_factor = eliss(dist,10)
angle_prc = (1-(1.-dangle_factor)*(1.-norm_factor)*(1.-0.5*dist_factor))*settings.shootRandomAngle*math.pi/2.
norm_prc = 1-0.3*dist_factor*dangle_factor
return Vector2D(norm=shoot.norm*norm_prc,
angle=shoot.angle+2*(random.random()-0.5)*angle_prc)
def can_shoot(self):
""" Le joueur peut-il shooter
:return:
"""
return self.last_shoot <= 0
def _dec_shoot(self):
self.last_shoot -= 1
def _reset_shoot(self):
self.last_shoot = settings.nbWithoutShoot
def copy(self):
return deepcopy(self)
###############################################################################
# SoccerState
###############################################################################
class SoccerState(object):
""" Etat d'un tour du jeu. Contient la balle, l'ensemble des etats des joueurs, le score et
le numero de l'etat.
"""
def __init__(self,states=None,ball=None,**kwargs):
self.states = states or dict()
self.ball = ball or Ball()
self.strategies = kwargs.pop('strategies',dict())
self.score = kwargs.pop('score', {1: 0, 2: 0})
self.step = kwargs.pop('step', 0)
self.max_steps = kwargs.pop('max_steps', settings.MAX_GAME_STEPS)
self.goal = kwargs.pop('goal', 0)
self.__dict__.update(kwargs)
def __str__(self):
return ("Step: %d, %s " %(self.step,str(self.ball)))+\
" ".join("(%d,%d):%s" %(k[0],k[1],str(p)) for k,p in sorted(self.states.items()))+\
(" score : %d-%d" %(self.score_team1,self.score_team2))
def __repr__(self):
return self.__str__()
def to_dict(self):
return dict(states=dict_to_json(self.states),
strategies=dict_to_json( self.strategies),
ball=self.ball,score=dict_to_json(self.score),step=self.step,
max_steps=self.max_steps,goal=self.goal)
def player_state(self, id_team, id_player):
""" renvoie la configuration du joueur
:param id_team: numero de la team du joueur
:param id_player: numero du joueur
:return:
"""
return self.states[(id_team, id_player)]
@property
def players(self):
""" renvoie la liste des cles des joueurs (idteam,idplayer)
:return: liste des cles
"""
return sorted(self.states.keys())
def nb_players(self, team):
""" nombre de joueurs de la team team
:param team: 1 ou 2
:return:
"""
return len([x for x in self.states.keys() if x[0] == team])
def get_score_team(self, idx):
""" score de la team idx : 1 ou 2
:param idx: numero de la team
:return:
"""
return self.score[idx]
@property
def score_team1(self):
return self.get_score_team(1)
@property
def score_team2(self):
return self.get_score_team(2)
def copy(self):
return deepcopy(self)
def apply_actions(self, actions=None,strategies=None):
if strategies: self.strategies.update(strategies)
sum_of_shoots = Vector2D()
self.goal = 0
if actions:
for k, c in self.states.items():
if k in actions:
sum_of_shoots += c.next(self.ball, actions[k])
self.ball.next(sum_of_shoots)
self.step += 1
if self.ball.inside_goal():
self._do_goal(2 if self.ball.position.x <= 0 else 1)
return
if self.ball.position.x < 0:
self.ball.position.x = -self.ball.position.x
self.ball.vitesse.x = -self.ball.vitesse.x
if self.ball.position.y < 0:
self.ball.position.y = -self.ball.position.y
self.ball.vitesse.y = -self.ball.vitesse.y
if self.ball.position.x > settings.GAME_WIDTH:
self.ball.position.x = 2 * settings.GAME_WIDTH - self.ball.position.x
self.ball.vitesse.x = -self.ball.vitesse.x
if self.ball.position.y > settings.GAME_HEIGHT:
self.ball.position.y = 2 * settings.GAME_HEIGHT - self.ball.position.y
self.ball.vitesse.y = -self.ball.vitesse.y
def _do_goal(self, idx):
self.score[idx]+=1
self.goal = idx
@classmethod
def create_initial_state(cls, nb_players_1=0, nb_players_2=0,max_steps=settings.MAX_GAME_STEPS):
""" Creer un etat initial avec le nombre de joueurs indique
:param nb_players_1: nombre de joueur de la team 1
:param nb_players_2: nombre de joueur de la teamp 2
:return:
"""
state = cls()
state.reset_state(nb_players_1=nb_players_1,nb_players_2= nb_players_2)
return state
def reset_state(self, nb_players_1=0, nb_players_2=0):
if nb_players_1 == 0 and self.nb_players(1) > 0:
nb_players_1 = self.nb_players(1)
if nb_players_2 == 0 and self.nb_players(2) > 0:
nb_players_2 = self.nb_players(2)
quarters = [i * settings.GAME_HEIGHT / 4. for i in range(1, 4)]
rows = [settings.GAME_WIDTH * 0.1, settings.GAME_WIDTH * 0.35, settings.GAME_WIDTH * (1 - 0.35),
settings.GAME_WIDTH * (1 - 0.1)]
if nb_players_1 == 1:
self.states[(1, 0)] = PlayerState(position=Vector2D(rows[0], quarters[1]))
if nb_players_2 == 1:
self.states[(2, 0)] = PlayerState(position=Vector2D(rows[3], quarters[1]))
if nb_players_1 == 2:
self.states[(1, 0)] = PlayerState(position=Vector2D(rows[0], quarters[0]))
self.states[(1, 1)] = PlayerState(position=Vector2D(rows[0], quarters[2]))
if nb_players_2 == 2:
self.states[(2, 0)] = PlayerState(position=Vector2D(rows[3], quarters[0]))
self.states[(2, 1)] = PlayerState(position=Vector2D(rows[3], quarters[2]))
if nb_players_1 == 3:
self.states[(1, 0)] = PlayerState(position=Vector2D(rows[0], quarters[1]))
self.states[(1, 1)] = PlayerState(position=Vector2D(rows[0], quarters[0]))
self.states[(1, 2)] = PlayerState(position=Vector2D(rows[0], quarters[2]))
if nb_players_2 == 3:
self.states[(2, 0)] = PlayerState(position=Vector2D(rows[3], quarters[1]))
self.states[(2, 1)] = PlayerState(position=Vector2D(rows[3], quarters[0]))
self.states[(2, 2)] = PlayerState(position=Vector2D(rows[3], quarters[2]))
if nb_players_1 == 4:
self.states[(1, 0)] = PlayerState(position=Vector2D(rows[0], quarters[0]))
self.states[(1, 1)] = PlayerState(position=Vector2D(rows[0], quarters[2]))
self.states[(1, 2)] = PlayerState(position=Vector2D(rows[1], quarters[0]))
self.states[(1, 3)] = PlayerState(position=Vector2D(rows[1], quarters[2]))
if nb_players_2 == 4:
self.states[(2, 0)] = PlayerState(position=Vector2D(rows[3], quarters[0]))
self.states[(2, 1)] = PlayerState(position=Vector2D(rows[3], quarters[2]))
self.states[(2, 2)] = PlayerState(position=Vector2D(rows[2], quarters[0]))
self.states[(2, 3)] = PlayerState(position=Vector2D(rows[2], quarters[2]))
self.ball = Ball(Vector2D(settings.GAME_WIDTH / 2, settings.GAME_HEIGHT / 2),Vector2D())
self.goal = 0
###############################################################################
# SoccerTeam
###############################################################################
class Player(object):
def __init__(self,name=None,strategy=None):
self.name = name or ""
self.strategy = strategy
def to_dict(self):
return dict(name=self.name)
def __str__(self):
return "%s (%s)" %(self.name,str(self.strategy))
def __repr__(self):
return self.__str__()
def to_dict(self):
return {"name":self.name,"strategy":self.strategy.__repr__()}
class SoccerTeam(object):
""" Equipe de foot. Comporte une liste ordonnee de Player.
"""
def __init__(self, name=None, players=None, login=None):
"""
:param name: nom de l'equipe
:param players: liste de joueur Player(name,strategy)
:return:
"""
self.name, self.players, self.login = name or "", players or [], login or ""
def to_dict(self):
return {"name":self.name,"players":self.players,"login":self.login}
def __iter__(self):
return iter(self.players)
def __str__(self):
return str(self.name)+"("+self.login+")"+": "+" ".join(str(p) for p in self.players)
def __repr__(self):
return self.__str__()
def add(self,name,strategy):
self.players.append(Player(name,strategy))
return self
@property
def players_name(self):
"""
:return: liste des noms des joueurs de l'equipe
"""
return [x.name for x in self.players]
def player_name(self, idx):
"""
:param idx: numero du joueur
:return: nom du joueur
"""
return self.players[idx].name
@property
def strategies(self):
"""
:return: liste des strategies des joueurs
"""
return [x.strategy for x in self.players]
def strategy(self, idx):
"""
:param idx: numero du joueur
:return: strategie du joueur
"""
return self.players[idx].strategy
def compute_strategies(self, state, id_team):
""" calcule les actions de tous les joueurs
:param state: etat courant
:param id_team: numero de l'equipe
:return: dictionnaire action des joueurs
"""
return dict([((id_team, i), x.strategy.compute_strategy(state.copy(), id_team, i)) for i, x in
enumerate(self.players) if hasattr( x.strategy,"compute_strategy")])
@property
def nb_players(self):
"""
:return: nombre de joueurs
"""
return len(self.players)
def copy(self):
return deepcopy(self)
###############################################################################
# Simulation
###############################################################################
class Simulation(object):
def __init__(self,team1=None,team2=None, max_steps = settings.MAX_GAME_STEPS,initial_state=None,**kwargs):
self.team1, self.team2 = team1 or SoccerTeam(),team2 or SoccerTeam()
self.initial_state = initial_state or SoccerState.create_initial_state(self.team1.nb_players,self.team2.nb_players,max_steps)
self.state = self.initial_state.copy()
self.max_steps = max_steps
self.state.max_steps = self.initial_state.max_steps = max_steps
self.listeners = SoccerEvents()
self._thread = None
self._on_going = False
self._thread = None
self._kill = False
self.states = []
self.error = False
self.replay = type(self.team1.strategy(0))==str or type(self.team1.strategy(0)) == unicode
for s in self.team1.strategies + self.team2.strategies:
self.listeners += s
self.__dict__.update(kwargs)
def reset(self):
self.replay = type(self.team1.strategy(0))==str or type(self.team1.strategy(0)) == unicode
self._thread = None
self._kill = False
self._on_going = False
if self.replay:
return
self.states = []
self.state = self.get_initial_state()
self.error = False
def to_dict(self):
return dict(team1=self.team1,team2=self.team2,state=self.state,max_steps=self.max_steps,states=self.states,initial_state=self.initial_state)
def get_initial_state(self):
return self.initial_state.copy()
def start_thread(self):
if not self._thread or not self._thread.isAlive():
self._kill = False
self._thread = threading.Thread(target=self.start)
self._thread.start()
def kill(self):
self._kill = True
def set_state(self,state):
state.score = self.state.score
self.state = state
self.state.max_steps = self.max_steps
self.state.step = len(self.states)
def start(self):
if self._on_going:
return
if self.replay:
self.state = self.states[0]
self.begin_match()
while not self.stop():
self.next_step()
self.end_match()
self._on_going = False
return self
@property
def step(self):
return self.state.step
def get_score_team(self,i):
return self.state.get_score_team(i)
def next_step(self):
if self.stop():
return
if self.replay:
self.state = self.states[self.state.step+1]
else:
actions=dict()
strategies=dict()
for i,t in enumerate([self.team1,self.team2]):
try:
actions.update(t.compute_strategies(self.state, i+1))
strategies.update(dict([((i,j),s.name) for j,s in enumerate(t.strategies)]))
except Exception as e:
time.sleep(0.0001)
logger.debug("%s" % (traceback.format_exc(),))
logger.warning("%s" %(e,))
self.state.step=self.max_steps
self.state.score[2-i]=100
self.error = True
logger.warning("Error for team %d -- loose match" % ((i+1),))
self.states.append(self.state.copy())
return
self.state.apply_actions(actions,strategies)
self.states.append(self.state.copy())
self.update_round()
def get_team(self,idx):
if idx==1:
return self.team1
if idx == 2:
return self.team2
def stop(self):
return self._kill or self.state.step >= self.max_steps or (self.replay and self.step+1>=len(self.states))
def update_round(self):
self.listeners.update_round(self.team1,self.team2,self.state.copy())
if self.state.goal > 0:
self.end_round()
def begin_round(self):
if not self.replay:
score=dict(self.state.score)
self.set_state(self.get_initial_state())
self.listeners.begin_round(self.team1,self.team2,self.state.copy())
self.states.append(self.state.copy())
self.listeners.begin_round(self.team1,self.team2,self.state.copy())
def end_round(self):
self.listeners.end_round(self.team1, self.team2, self.state.copy())
if not self.stop():
self.begin_round()
def begin_match(self):
self._on_going = True
self._kill = False
self.listeners.begin_match(self.team1,self.team2,self.state.copy())
self.begin_round()
def end_match(self):
self._kill = True
self.listeners.end_match(self.team1,self.team2,self.state.copy())
self.replay = True
def send_strategy(self,key):
self.listeners.send_strategy(key)
| gpl-2.0 | 6,704,401,290,783,556,000 | 40.405204 | 148 | 0.565092 | false | 3.493726 | false | false | false |
sapcc/monasca-api | monasca_tempest_tests/tests/api/helpers.py | 1 | 5784 | # (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import time
import six.moves.urllib.parse as urlparse
from tempest.common.utils import data_utils
NUM_ALARM_DEFINITIONS = 2
NUM_MEASUREMENTS = 100
def create_metric(name='name-1',
dimensions={
'key-1': 'value-1',
'key-2': 'value-2'
},
timestamp=None,
value=0.0,
value_meta={
'key-1': 'value-1',
'key-2': 'value-2'
},
):
metric = {}
if name is not None:
metric['name'] = name
if dimensions is not None:
metric['dimensions'] = dimensions
if timestamp is not None:
metric['timestamp'] = timestamp
else:
metric['timestamp'] = int(time.time() * 1000)
if value is not None:
metric['value'] = value
if value_meta is not None:
metric['value_meta'] = value_meta
return metric
def create_notification(name=data_utils.rand_name('notification-'),
type='EMAIL',
address='[email protected]',
period=0):
notification = {}
if name is not None:
notification['name'] = name
if type is not None:
notification['type'] = type
if address is not None:
notification['address'] = address
if period is not None:
notification['period'] = period
return notification
def create_alarm_definition(name=None,
description=None,
expression=None,
match_by=None,
severity=None,
alarm_actions=None,
ok_actions=None,
undetermined_actions=None):
alarm_definition = {}
if name is not None:
alarm_definition['name'] = name
if description is not None:
alarm_definition['description'] = description
if expression is not None:
alarm_definition['expression'] = expression
if match_by is not None:
alarm_definition['match_by'] = match_by
if severity is not None:
alarm_definition['severity'] = severity
if alarm_actions is not None:
alarm_definition['alarm_actions'] = alarm_actions
if ok_actions is not None:
alarm_definition['ok_actions'] = ok_actions
if undetermined_actions is not None:
alarm_definition['undetermined_actions'] = undetermined_actions
return alarm_definition
def delete_alarm_definitions(monasca_client):
# Delete alarm definitions
resp, response_body = monasca_client.list_alarm_definitions()
elements = response_body['elements']
if elements:
for element in elements:
alarm_def_id = element['id']
monasca_client.delete_alarm_definition(alarm_def_id)
def timestamp_to_iso(timestamp):
time_utc = datetime.datetime.utcfromtimestamp(timestamp / 1000.0)
time_iso_base = time_utc.strftime("%Y-%m-%dT%H:%M:%S")
time_iso_base += 'Z'
return time_iso_base
def timestamp_to_iso_millis(timestamp):
time_utc = datetime.datetime.utcfromtimestamp(timestamp / 1000.0)
time_iso_base = time_utc.strftime("%Y-%m-%dT%H:%M:%S")
time_iso_microsecond = time_utc.strftime(".%f")
time_iso_millisecond = time_iso_base + time_iso_microsecond[0:4] + 'Z'
return time_iso_millisecond
def get_query_param(uri, query_param_name):
query_param_val = None
parsed_uri = urlparse.urlparse(uri)
for query_param in parsed_uri.query.split('&'):
parsed_query_name, parsed_query_val = query_param.split('=', 1)
if query_param_name == parsed_query_name:
query_param_val = parsed_query_val
return query_param_val
def get_expected_elements_inner_offset_limit(all_elements, offset, limit, inner_key):
expected_elements = []
total_statistics = 0
if offset is None:
offset_id = 0
offset_time = ""
else:
offset_tuple = offset.split('_')
offset_id = int(offset_tuple[0]) if len(offset_tuple) > 1 else 0
offset_time = offset_tuple[1] if len(offset_tuple) > 1 else offset_tuple[0]
for element in all_elements:
element_id = int(element['id'])
if offset_id is not None and element_id < offset_id:
continue
next_element = None
for value in element[inner_key]:
if (element_id == offset_id and value[0] > offset_time) or \
element_id > offset_id:
if not next_element:
next_element = element.copy()
next_element[inner_key] = [value]
else:
next_element[inner_key].append(value)
total_statistics += 1
if total_statistics >= limit:
break
if next_element:
expected_elements.append(next_element)
if total_statistics >= limit:
break
for i in xrange(len(expected_elements)):
expected_elements[i]['id'] = str(i)
return expected_elements
| apache-2.0 | -6,628,142,678,852,287,000 | 33.224852 | 85 | 0.591805 | false | 4.119658 | false | false | false |
OstapHEP/ostap | ostap/math/covtransform.py | 1 | 4364 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
## @file ostap/math/covtransform.py
# Transformation of covariance matrices
# for \f$ y = y ( x ) \f$, it gets
# \f$ C_y = J C_x J^\mathrm{T} \f$,
# where \f$ J = \left( \frac{\partial y }{\partial x } \right) \f$
# @author Vanya BELYAEV [email protected]
# @date 2020-05-14
# =============================================================================
""" Transformation of covariand matrices
- for y = y ( x ) it gets C(y) = J C(x) J^T,
- where J is Jacobi matrix
"""
# =============================================================================
from __future__ import print_function
# =============================================================================
__author__ = "Vanya BELYAEV [email protected]"
__date__ = "2009-09-12"
__version__ = ""
# =============================================================================
__all__ = (
'transform' , ## transfrom covarinance matrix
)
# =============================================================================
from builtins import range
import ROOT
# =============================================================================
# logging
# =============================================================================
from ostap.logger.logger import getLogger
if '__main__' == __name__ : logger = getLogger ( 'ostap.math.covtransform' )
else : logger = getLogger ( __name__ )
# =============================================================================
from ostap.core.core import Ostap, VE
from ostap.math.derivative import Partial
import ostap.math.linalg
# =============================================================================
# =============================================================================
## Transform the covariance nmatrix C at point X to the variables Y(X)
# for \f$ y = y ( x ) \f$, it gets
# \f[ C(y)= J C(x) J^\mathrm{T} \f],
# where \f$ J = \left( \frac{\partial y }{\partial x } \right) \f$
# @code
# X = 1 , 2
# C = Ostap.SymMatrix(2)()
# C[ 0 , 0 ] = 0.20
# C[ 1 , 1 ] = 0.05
# C[ 1 , 1 ] = 0.30
# r = lambda x , y : (x*x+y*y)**2
# phi = lambda x , y : math.atan2 ( y , x )
# C_polar = transform ( C , X , r , phi )
# @endcode
# @param C "old" covatiance matrix
# @param X "old" varibales (arary iof values)
# @param Y "new" variables (array of callables)
# @return covarinance matrix for variables Y
def transform ( C , X , *Y ) :
""" Transform the covariance nmatrix C at point X to the variables Y(X)
>>> X = 1 , 2
>>> C = Ostap.SymMatrix(2)()
>>> C [ 0 , 0 ] = 0.20
>>> C [ 0 , 1 ] = 0.05
>>> C [ 1 , 1 ] = 0.30
>>> r = lambda x , y : (x*x+y*y)**2
>>> phi = lambda x , y : math.atan2 ( y , x )
>>> C_polar = transform ( C , X , r , phi )
"""
ny = len ( Y )
assert 1 <= ny , 'Invalid size of Y!'
nx = len ( X )
if C is None and 1 <= nx :
C = Ostap.SymMatrix ( nx ) ()
for i , x in enumerate ( X ) :
xx = VE ( x )
C [ i, i ] = xx.cov2 ()
shape = C.shape
assert shape [ 0 ] == shape[1] and shape[0] == nx , 'Invalid shape of matrix C!'
CC = Ostap.SymMatrix ( nx ) ()
for i in range ( CC.kRows ) :
CC [ i , i ] = C ( i , i )
for j in range ( i + 1 , CC.kCols ) :
v = 0.5 * ( C ( i , j ) + C ( j , i ) )
CC [ i , j ] = v
XX = Ostap.Vector ( nx ) ()
for i , x in enumerate ( X ) :
XX [ i ] = float ( x )
## get vector-with-errors
XX = Ostap.VectorE ( nx ) ( XX , CC )
R = XX.transform ( *Y )
return R.cov2()
# =============================================================================
if '__main__' == __name__ :
from ostap.utils.docme import docme
docme ( __name__ , logger = logger )
# =============================================================================
## The END
# =============================================================================
| bsd-3-clause | -2,332,939,205,737,670,000 | 36.619469 | 86 | 0.35472 | false | 3.499599 | false | false | false |
tensorflow/tfx | tfx/orchestration/portable/execution_watcher.py | 1 | 4660 | # Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides a gRPC service for updating remote job info to MLMD."""
from concurrent import futures
from typing import Optional
from absl import logging
import grpc
from tfx.orchestration import metadata
from tfx.proto.orchestration import execution_watcher_pb2
from tfx.proto.orchestration import execution_watcher_pb2_grpc
from ml_metadata.proto import metadata_store_pb2
def generate_service_stub(
address: str,
creds: Optional[grpc.ChannelCredentials] = None,
) -> execution_watcher_pb2_grpc.ExecutionWatcherServiceStub:
"""Generates a gRPC service stub for a given server address."""
channel = grpc.secure_channel(
address, creds) if creds else grpc.insecure_channel(address)
return execution_watcher_pb2_grpc.ExecutionWatcherServiceStub(channel)
class ExecutionWatcher(
execution_watcher_pb2_grpc.ExecutionWatcherServiceServicer):
"""A gRPC service server for updating remote job info to MLMD.
Attributes:
local_address: Local network address to the server.
address: Remote network address to the server, same as local_address if not
configured.
"""
def __init__(self,
port: int,
mlmd_connection: metadata.Metadata,
execution: metadata_store_pb2.Execution,
address: Optional[str] = None,
creds: Optional[grpc.ServerCredentials] = None):
"""Initializes the gRPC server.
Args:
port: Which port the service will be using.
mlmd_connection: ML metadata connection.
execution: The MLMD Execution to keep track of.
address: Remote address used to contact the server. Should be formatted as
an ipv4 or ipv6 address in the format `address:port`. If left as
None, server will use local address.
creds: gRPC server credentials. If left as None, server will use an
insecure port.
"""
super().__init__()
self._port = port
self._address = address
self._creds = creds
self._mlmd_connection = mlmd_connection
self._server = self._create_server()
if not execution.HasField('id'):
raise ValueError(
'execution id must be set to be tracked by ExecutionWatcher.')
self._execution = execution
def UpdateExecutionInfo(
self, request: execution_watcher_pb2.UpdateExecutionInfoRequest,
context: grpc.ServicerContext
) -> execution_watcher_pb2.UpdateExecutionInfoResponse:
"""Updates the `custom_properties` field of Execution object in MLMD."""
logging.info('Received request to update execution info: updates %s, '
'execution_id %s', request.updates, request.execution_id)
if request.execution_id != self._execution.id:
context.set_code(grpc.StatusCode.NOT_FOUND)
context.set_details(
'Execution with given execution_id not tracked by server: '
f'{request.execution_id}')
return execution_watcher_pb2.UpdateExecutionInfoResponse()
for key, value in request.updates.items():
self._execution.custom_properties[key].CopyFrom(
value)
# Only the execution is needed
with self._mlmd_connection as m:
m.store.put_executions((self._execution,))
return execution_watcher_pb2.UpdateExecutionInfoResponse()
def _create_server(self):
"""Creates a gRPC server and add `self` on to it."""
result = grpc.server(futures.ThreadPoolExecutor())
execution_watcher_pb2_grpc.add_ExecutionWatcherServiceServicer_to_server(
self, result)
if self._creds is None:
result.add_insecure_port(self.local_address)
else:
result.add_secure_port(self.local_address, self._creds)
return result
@property
def local_address(self) -> str:
# Local network address to the server.
return f'localhost:{self._port}'
@property
def address(self) -> str:
return self._address or self.local_address
def start(self):
"""Starts the server."""
self._server.start()
def stop(self):
"""Stops the server."""
self._server.stop(grace=None)
| apache-2.0 | -8,736,963,928,753,540,000 | 36.28 | 80 | 0.700215 | false | 4.138544 | false | false | false |
AdaDoom3/AdaDoom3 | Tools/compile-materials.py | 1 | 5285 | ## textures/base_door/light_panel1
## {
## qer_editorimage textures/base_door/stedoorframe2.tga
## bumpmap textures/base_door/stedoorframe2_local.tga
## diffusemap textures/base_door/stedoorframe2_d.tga
## specularmap textures/base_door/stedoorframe2_s.tga
## {
## if ( parm7 == 0 )
## blend add
## map textures/base_door/stedoorframered_add.tga
## rgb 5
## }
## {
## if ( parm7 == 1 )
## blend add
## map textures/base_door/stedoorframegrn_add.tga
## rgb 5
## }
## }
import fileinput
import re
##############
## Patterns ##
##############
WHITE_SPACE = r'[\\s]+'
NUM_PATTERN = r'[0-9]' + WHITE_SPACE + r'[-+0-9]'
TEX_PATH = r'(savegames|fonts|textures|guis|ui|guisurfs|particles|lights|models|env)[\\/][a-z_\\/0-9A-Z]*'
re.compile (NUM_PATTERN)
re.compile (WHITE_SPACE)
#############
## Globals ##
#############
mode = 0
in_material = False
blend_mode = 0
is_blend = False
did_diffuse = False
did_specular = False
did_bumpmap = False
s = []
##################
## get_material ##
##################
def get_material (line):
mat_name = re.search (TEX_PATH, line)
if mat_name:
mat_name = re.sub (r'[\\/]', '_', mat_name.group (0))
else:
mat_name = re.search (r'_[a-z_\\/0-9A-Z]*', line)
return mat_name.group (0)
return mat_name
##################
## process_line ##
##################
def process_line (line, next_line):
global mode
global in_material
global is_blend
global blend_mode
global s
## Strip the EOL
line = line.strip ()
line = re.sub (r'//.*', '', line)
if re.search (r'^table', line):
return
## Ignore empty lines
if not line:
return
if re.search (r'{', line):
s.append ('Crap')
if re.search (r'}', line):
if len (s) != 0:
s.pop ()
if len (s) == 0 and in_material:
in_material = False
print ('}')
## See if we are at the start of a material
if not in_material and re.search (r'^' + TEX_PATH, line):
in_material = True
print ('Material')
print ('{')
print (' Name {string {\"' + get_material (line) + '\"}}')
elif in_material:
## A "blended" texture
if re.search (r'^blend' + WHITE_SPACE, line):
## Handle blend modes
if re.search (r'[dD]iffuse[mM]ap', line):
is_blend = True
blend_mode = 0
elif re.search (r'[sS]pecular[mM]ap', line):
is_blend = True
blend_mode = 1
elif re.search (r'[bB]ump[mM]ap', line):
is_blend = True
blend_mode = 2
else:
blend_mode = -1
## Handle a blended texture and ignore other attributes
elif is_blend and re.search (r'^[mM]ap' + WHITE_SPACE, line):
is_blend = False
if re.search (r'addnormals', line):
return
elif blend_mode == 0:
print (' Texture (attrib = "diffuse") {string {\"' + get_material (line) + '\"}}')
elif blend_mode == 1:
print (' Texture (attrib = "specular") {string {\"' + get_material (line) + '\"}}')
elif blend_mode == 2:
print (' Texture (attrib = "normal") {string {\"' + get_material (line) + '\"}}')
## Normal path for diffuse, specular, and normal textures
elif re.search (r'^[dD]iffuse[mM]ap', line):
print (' Texture (attrib = "diffuse") {string {\"' + get_material (line) + '\"}}')
elif re.search (r'^[sS]pecular[mM]ap', line):
print (' Texture (attrib = "specular") {string {\"' + get_material (line) + '\"}}')
elif re.search (r'^[bB]ump[mM]ap', line):
print (' Texture (attrib = "normal") {string {\"' + get_material (line) + '\"}}')
elif re.search (r'^qer_editorimage', line):
print (' Texture (attrib = "editor") {string {\"' + get_material (line) + '\"}}')
##########
## Main ##
##########
## Iterate over the file line by line
first_iteration = True
previous_line = ''
for current_line in fileinput.input():
## We have to save up 2 lines before processing
if not first_iteration:
process_line (previous_line, current_line)
else:
first_iteration = False
previous_line = current_line
## Handle the remaining line
if previous_line:
process_line (previous_line, '')
#######################
## replace_key_value ##
#######################
# def replace_key_value (line, key, new_key, kind, is_array):
# global found_key_value
# ## No need to waste time
# if found_key_value:
# return line
# ## String key value needs to be wrapped in quotes
# if not re.search (r' ' + key + ' ', line):
# return line
# ## We must have found one
# found_key_value = True
# if kind == "string":
# text = re.sub (key + WHITE_SPACE, " " + new_key + " {string {\"", line)
# if text != line:
# text = text + "}}"
# ## Array types need an extra curly level
# elif not is_array:
# text = re.sub (r"\"" + key + "\" \"", " " + new_key + " {" + kind + " {", line)
# if text != line:
# text = re.sub (r"\"", "}}", text.rstrip ())
# ## Otherwise it is a normal discrete or numeric kind
# else:
# text = re.sub (r"\"" + key + "\" \"", " " + new_key + " {" + kind + " {{", line)
# if text != line:
# text = re.sub (r"\"", "}}}", text.rstrip ())
# ## Give the result
# return text
| gpl-3.0 | -8,753,381,104,364,044,000 | 25.293532 | 106 | 0.541154 | false | 2.989253 | false | false | false |
darth-dodo/what_2_watch | data_import.py | 1 | 1541 | from reddit_to_csv import sub2csv
import csv
import sqlite3
'''insert into table sub_categories first'''
'''make sure constraints are met'''
# -- for foreign_keys support (on delete cascade)
# --con.execute("PRAGMA foreign_keys = ON") for python
def csv_2_list(csv_name):
with open(csv_name) as f:
data = [list(line) for line in csv.reader(f)]
return data
def list_2_database(data_list, category_id=0, sub_category_id=0):
conn = sqlite3.connect('w2w.db')
conn.execute("PRAGMA foreign_keys = ON")
for link in data_list:
cursor = conn.cursor()
data_2_insert = [category_id, sub_category_id,
link[0].decode('utf-8'), link[1].decode('utf-8')]
# print data_2_insert
cursor.execute(
"insert into all_links (c_fid, s_fid,link_title, link_url)values (?,?,?,?)", data_2_insert)
conn.commit()
conn.close()
'''standupcomedy'''
# sub_csv_name = sub2csv('standupcomedy', 450)
# sub_data = csv_2_list(sub_csv_name)
# list_2_database(sub_data, category_id=2, sub_category_id=1)
# '''Music sub categories'''
# sub_csv_name = sub2csv('musicvideos', 200)
# sub_data = csv_2_list(sub_csv_name)
# list_2_database(sub_data, category_id=4, sub_category_id=1)
# sub_csv_name = sub2csv('coversongs', 200)
# sub_data = csv_2_list(sub_csv_name)
# list_2_database(sub_data, category_id=4, sub_category_id=2)
sub_csv_name = sub2csv('AcousticCovers', 200)
sub_data = csv_2_list(sub_csv_name)
list_2_database(sub_data, category_id=4, sub_category_id=3)
| mit | -1,786,287,117,802,510,000 | 29.82 | 103 | 0.651525 | false | 2.786618 | false | false | false |
bskari/sparkfun-avc | control/test/benchmark.py | 1 | 3105 | """Benchmarks the parts of the system."""
import time
from control.command import Command
from control.simple_waypoint_generator import SimpleWaypointGenerator
from control.location_filter import LocationFilter
from control.telemetry import Telemetry
from control.test.dummy_driver import DummyDriver
from control.test.dummy_logger import DummyLogger
# pylint: disable=invalid-name
# pylint: disable=protected-access
# pylint: disable=line-too-long
def benchmark_location_filter_update_gps():
"""Benchmark the location filter GPS update."""
location_filter = LocationFilter(0.0, 0.0, 0.0)
iterations = 100
start = time.time()
for _ in range(iterations):
location_filter.update_gps(100.0, 100.0, 1.0, 1.0, 20.0, 4.5)
end = time.time()
print(
'{} iterations of LocationFilter.update_gps, each took {:.5}'.format(
iterations,
(end - start) / float(iterations)
)
)
def benchmark_location_filter_update_compass():
"""Benchmark the location filter compass update."""
location_filter = LocationFilter(0.0, 0.0, 0.0)
iterations = 100
start = time.time()
for _ in range(iterations):
location_filter.update_compass(20.0)
end = time.time()
print(
'{} iterations of LocationFilter.update_compass, each took {:.5}'.format(
iterations,
(end - start) / float(iterations)
)
)
def benchmark_location_filter_update_dead_reckoning():
"""Benchmark the location filter with dead reckoning and no other input."""
location_filter = LocationFilter(0.0, 0.0, 0.0)
iterations = 1000
start = time.time()
for _ in range(iterations):
location_filter.update_dead_reckoning()
end = time.time()
print(
'{} iterations of LocationFilter.update_dead_reckoning, each took {:.5}'.format(
iterations,
(end - start) / float(iterations)
)
)
def benchmark_command_run_course_iterator():
"""Benchmark the logic for driving the car."""
logger = DummyLogger()
telemetry = Telemetry(logger)
waypoint_generator = SimpleWaypointGenerator(
SimpleWaypointGenerator.get_waypoints_from_file_name(
'paths/solid-state-depot.kmz'
)
)
driver = DummyDriver(telemetry, logger)
command = Command(telemetry, driver, waypoint_generator, logger)
iterations = 250
start = time.time()
iterator = command._run_course_iterator()
step = None
for step in zip(range(iterations), iterator):
pass
assert step is not None
assert step[0] == iterations - 1
end = time.time()
print(
'{} iterations of Command._run_course_iterator, each took {:.5}'.format(
iterations,
(end - start) / float(iterations)
)
)
def main():
"""Runs all the benchmarks."""
benchmark_location_filter_update_gps()
benchmark_location_filter_update_compass()
benchmark_location_filter_update_dead_reckoning()
benchmark_command_run_course_iterator()
if __name__ == '__main__':
main()
| mit | 755,614,114,784,942,000 | 29.145631 | 88 | 0.648953 | false | 3.86675 | false | false | false |
davidyezsetz/kuma | vendor/packages/translate-toolkit/translate/filters/helpers.py | 7 | 2818 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2004-2006 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""a set of helper functions for filters..."""
import operator
def countmatch(str1, str2, countstr):
"""checks whether countstr occurs the same number of times in str1 and str2"""
return str1.count(countstr) == str2.count(countstr)
def funcmatch(str1, str2, func, *args):
"""returns whether the result of func is the same for str1 and str2"""
return func(str1, *args) == func(str2, *args)
def countsmatch(str1, str2, countlist):
"""checks whether each element in countlist occurs the same number of times in str1 and str2"""
return reduce(operator.and_, [countmatch(str1, str2, countstr) for countstr in countlist], True)
def funcsmatch(str1, str2, funclist):
"""checks whether the results of each func in funclist match for str1 and str2"""
return reduce(operator.and_, [funcmatch(str1, str2, funcstr) for funcstr in funclist], True)
def filtercount(str1, func):
"""returns the number of characters in str1 that pass func"""
return len(filter(func, str1))
def filtertestmethod(testmethod, strfilter):
"""returns a version of the testmethod that operates on filtered strings using strfilter"""
def filteredmethod(str1, str2):
return testmethod(strfilter(str1), strfilter(str2))
filteredmethod.__doc__ = testmethod.__doc__
filteredmethod.name = getattr(testmethod, 'name', testmethod.__name__)
return filteredmethod
def multifilter(str1, strfilters, *args):
"""passes str1 through a list of filters"""
for strfilter in strfilters:
str1 = strfilter(str1, *args)
return str1
def multifiltertestmethod(testmethod, strfilters):
"""returns a version of the testmethod that operates on filtered strings using strfilter"""
def filteredmethod(str1, str2):
return testmethod(multifilter(str1, strfilters), multifilter(str2, strfilters))
filteredmethod.__doc__ = testmethod.__doc__
filteredmethod.name = getattr(testmethod, 'name', testmethod.__name__)
return filteredmethod
| mpl-2.0 | -5,246,357,640,311,967,000 | 41.059701 | 100 | 0.726757 | false | 3.78255 | true | false | false |
blacknred0/mdup | main.py | 1 | 9061 | '''
Created on Feb 28, 2017
@contact: Irving Duran
@author: [email protected]
@summary: Collect and send via SMS your current month data usage.
'''
# TODO: Rewrite to accept one high-level argument (instead of two separate)
# python scripts to be used an an input in crontab
import os
import datetime
import sys
import mdup
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from pathlib import Path
prog_path = os.path.dirname(os.path.realpath(sys.argv[0])) #get python file path
os.chdir(prog_path) #change working directory
conf = pd.read_table('conf', sep='=', header=None) #store config file
# TODO: Better way to extract results and storing it
used, left, daysleft, dataused, datesnap, startday, datacap = mdup.get_data(prog_path, conf)
comb = used + ',' + left + ',' + daysleft + ',' + dataused + ',' + datesnap + '\n'
fp = Path('isp.log')
# file exists append new data, else create headers and dump data
if fp.is_file():
###############################################################################
# Convert strings to date and check if there is a newer snapshot from Mediacom
# if there if new snapshot, continue, else quit
###############################################################################
dt_datesnap = datetime.datetime.strptime(datesnap, '%m/%d/%Y %H:%M')
last_dt_datesnap = datetime.datetime.strptime(pd.read_csv('isp.log')['datesnap']
.tail(1).to_string(header=False, index=False),
'%m/%d/%Y %H:%M')
if last_dt_datesnap >= dt_datesnap:
print('No need to dump new data since latest version exist on the log file.',
'\nWill still continue and run prediction.')
#mdup.kill(dvr, disp) #try to started services
###############################################################################
# Gather date information to align with reporting month
###############################################################################
today = datetime.date.today() #return today's date as a string
#source http://stackoverflow.com/questions/37396329/finding-first-day-of-the-month-in-python
if today.day > startday:
today += datetime.timedelta(1)
startdate = str(today.replace(day=startday)) #return XXth of the previous month
else:
#source http://stackoverflow.com/questions/36155332/how-to-get-the-first-day-and-last-day-of-current-month-in-python
startdate = str(datetime.date(today.year, today.month - 1, startday)) #return XXth of the previous month
enddate = mdup.add_months(datetime.datetime(*[int(item) for item in startdate.split('-')]), 1).strftime("%Y-%m-%d")
###############################################################################
# Build prediction model using linear regression
###############################################################################
df = pd.read_csv('isp.log')
df.replace(r'( \d:\d\d)|( \d\d:\d\d)', '', inplace=True, regex=True) #remove time
df['datesnap'] = pd.to_datetime(df['datesnap'], format="%m/%d/%Y") #fix date issue
df = df[df['datesnap'] > startdate] #select records on the current month
X = df.as_matrix(columns=['daysleft']) # current days
y = df.as_matrix(columns=['dataused']) # data usage to predict
model = LinearRegression()
model.fit(X, y)
# create and sort descending order for days left
# then predict data usage based on days left on the month by excluding
# day zero from the selection
X_predict = np.arange(np.min(X)); X_predict = X_predict[:0:-1]
X_predict = X_predict[:, np.newaxis] #transpose
y_predict = model.predict(X_predict) #predict data usage
#fc = np.concatenate((X_predict, y_predict), axis=1) #forecast
# calculate the over usage based on 50GB blocks at $10 a piece.
f_msg = str('\n[Mediacom] With ' + str(np.min(X)) + ' days left, ' +
'your current ' + dataused + 'GB and projected ' +
str(np.max(np.round(y_predict, decimals=1))) + 'GB data usage.')
b_msg = str(' That is ~' + str(np.round(np.max(y_predict)-datacap, decimals=0).astype(int)) +
'GB or ~$' + str(mdup.round10(((np.max(y_predict)-datacap)/50) * 10)) +
' over.')
# if over usage data prediction is less than zero,
# don't append prediction over usage
dta_msg = str(f_msg +
'' if np.round(np.max(y_predict)-datacap, decimals=0).astype(int) < 0
else f_msg + b_msg)
###############################################################################
# Email the prediction results
###############################################################################
username = conf.iloc[2][1]
password = conf.iloc[3][1]
to = sys.argv[2].split(sep=',')
mdup.email_msg(username, password, to, dta_msg)
#mdup.kill(dvr, disp) #try to started services
print('DONE processing the whole thing.')
sys.exit(0)
else:
f = open('isp.log', mode='a')
f.write(comb)
f.close()
###############################################################################
# Gather date information to align with reporting month
###############################################################################
today = datetime.date.today() # return today's date as a string
#source http://stackoverflow.com/questions/37396329/finding-first-day-of-the-month-in-python
if today.day > startday:
today += datetime.timedelta(1)
startdate = str(today.replace(day=startday)) #return XXth of the previous month
else:
#source http://stackoverflow.com/questions/36155332/how-to-get-the-first-day-and-last-day-of-current-month-in-python
startdate = str(datetime.date(today.year, today.month - 1, startday)) #return XXth of the previous month
enddate = mdup.add_months(datetime.datetime(*[int(item) for item in startdate.split('-')]), 1).strftime("%Y-%m-%d")
###############################################################################
# Build prediction model using linear regression
###############################################################################
df = pd.read_csv('isp.log')
df.replace(r'( \d:\d\d)|( \d\d:\d\d)', '', inplace=True, regex=True) #remove time
df['datesnap'] = pd.to_datetime(df['datesnap'], format="%m/%d/%Y") #fix date issue
df = df[df['datesnap'] > startdate] #select records on the current month
X = df.as_matrix(columns=['daysleft']) # current days
y = df.as_matrix(columns=['dataused']) # data usage to predict
model = LinearRegression()
model.fit(X, y)
# create and sort descending order for days left
# then predict data usage based on days left on the month
X_predict = np.arange(np.min(X)); X_predict = X_predict[::-1]
X_predict = X_predict[:, np.newaxis] #transpose
y_predict = model.predict(X_predict) #predict data usage
#fc = np.concatenate((X_predict, y_predict), axis=1) #forecast
# calculate the over usage based on 50GB blocks at $10 a piece.
f_msg = str('\n[Mediacom] With ' + str(np.min(X)) + ' days left, ' +
'your current ' + dataused + 'GB and projected ' +
str(np.max(np.round(y_predict, decimals=1))) + 'GB data usage.')
b_msg = str(' That is ~' + str(np.round(np.max(y_predict)-datacap, decimals=0).astype(int)) +
'GB or ~$' + str(mdup.round10(((np.max(y_predict)-datacap)/50) * 10)) +
' over.')
# if over usage data prediction is less than zero,
# don't append prediction over usage
dta_msg = str(f_msg +
'' if np.round(np.max(y_predict)-datacap, decimals=0).astype(int) < 0
else f_msg + b_msg)
###############################################################################
# Email the prediction results
###############################################################################
username = conf.iloc[2][1]
password = conf.iloc[3][1]
to = sys.argv[2].split(sep=',')
mdup.email_msg(username, password, to, dta_msg)
#mdup.kill(dvr, disp) #try to started services
print('DONE processing the whole thing.')
sys.exit(0)
else:
f = open('isp.log', 'w')
f.write('used,left,daysleft,dataused,datesnap\n') #write header
f.write(comb)
f.close()
print('Creating new file since it does not exist. Next run you should get a prediction.')
#mdup.kill(dvr, disp) #try to started services
sys.exit(0)
| apache-2.0 | -2,506,896,233,334,858,000 | 56.713376 | 128 | 0.526432 | false | 4.021749 | false | false | false |
Shinoby1992/xstream | resources/lib/handler/pluginHandler.py | 1 | 6324 | import sys
import os
import json
from resources.lib.config import cConfig
from resources.lib import common, logger
class cPluginHandler:
def __init__(self):
self.addon = common.addon
self.rootFolder = common.addonPath
self.settingsFile = os.path.join(self.rootFolder, 'resources', 'settings.xml')
self.profilePath = common.profilePath
self.pluginDBFile = os.path.join(self.profilePath,'pluginDB')
logger.info('profile folder: %s' % self.profilePath)
logger.info('root folder: %s' % self.rootFolder)
self.defaultFolder = os.path.join(self.rootFolder, 'sites')
logger.info('default sites folder: %s' % self.defaultFolder)
def getAvailablePlugins(self):
pluginDB = self.__getPluginDB()
# default plugins
update = False
fileNames = self.__getFileNamesFromFolder(self.defaultFolder)
for fileName in fileNames:
plugin = {'name':'', 'icon':'', 'settings':'', 'modified':0}
if fileName in pluginDB:
plugin.update(pluginDB[fileName])
try:
modTime = os.path.getmtime(os.path.join(self.defaultFolder,fileName+'.py'))
except OSError:
modTime = 0
if fileName not in pluginDB or modTime > plugin['modified']:
logger.info('load plugin: ' + str(fileName))
# try to import plugin
pluginData = self.__getPluginData(fileName)
if pluginData:
pluginData['modified'] = modTime
pluginDB[fileName] = pluginData
update = True
# check pluginDB for obsolete entries
deletions = []
for pluginID in pluginDB:
if pluginID not in fileNames:
deletions.append(pluginID)
for id in deletions:
del pluginDB[id]
if update or deletions:
self.__updateSettings(pluginDB)
self.__updatePluginDB(pluginDB)
return self.getAvailablePluginsFromDB()
def getAvailablePluginsFromDB(self):
plugins = []
oConfig = cConfig()
iconFolder = os.path.join(self.rootFolder, 'resources','art','sites')
pluginDB = self.__getPluginDB()
for pluginID in pluginDB:
plugin = pluginDB[pluginID]
pluginSettingsName = 'plugin_%s' % pluginID
plugin['id'] = pluginID
if 'icon' in plugin:
plugin['icon'] = os.path.join(iconFolder, plugin['icon'])
else:
plugin['icon'] = ''
# existieren zu diesem plugin die an/aus settings
if oConfig.getSetting(pluginSettingsName) == 'true':
plugins.append(plugin)
return plugins
def __updatePluginDB(self, data):
if not os.path.exists(self.profilePath):
os.makedirs(self.profilePath)
file = open(self.pluginDBFile, 'w')
json.dump(data,file)
file.close()
def __getPluginDB(self):
if not os.path.exists(self.pluginDBFile):
return dict()
file = open(self.pluginDBFile, 'r')
try:
data = json.load(file)
except ValueError:
logger.error("pluginDB seems corrupt, creating new one")
data = dict()
file.close()
return data
def __updateSettings(self, pluginData):
'''
data (dict): containing plugininformations
'''
xmlString = '<plugin_settings>%s</plugin_settings>'
import xml.etree.ElementTree as ET
tree = ET.parse(self.settingsFile)
#find Element for plugin Settings
pluginElem = False
for elem in tree.findall('category'):
if elem.attrib['label']=='30022':
pluginElem = elem
break
if not pluginElem:
logger.info('could not update settings, pluginElement not found')
return False
pluginElements = pluginElem.findall('setting')
for elem in pluginElements:
pluginElem.remove(elem)
# add plugins to settings
for pluginID in sorted(pluginData):
plugin = pluginData[pluginID]
subEl = ET.SubElement(pluginElem,'setting', {'type': 'lsep', 'label':plugin['name']})
subEl.tail = '\n\t'
attrib = {'default': 'false', 'type': 'bool'}
attrib['id'] = 'plugin_%s' % pluginID
attrib['label'] = plugin['name']
subEl = ET.SubElement(pluginElem, 'setting', attrib)
subEl.tail = '\n\t'
if 'settings' in plugin:
customSettings = []
try:
customSettings = ET.XML(xmlString % plugin['settings']).findall('setting')
except:
logger.info('Parsing of custom settings for % failed.' % plugin['name'])
for setting in customSettings:
setting.tail = '\n\t'
pluginElem.append(setting)
pluginElements = pluginElem.findall('setting')[-1].tail = '\n'
try:
ET.dump(pluginElem)
except:
logger.info('Settings update failed')
return
tree.write(self.settingsFile)
def __getFileNamesFromFolder(self, sFolder):
aNameList = []
items = os.listdir(sFolder)
for sItemName in items:
if sItemName.endswith('.py'):
sItemName = os.path.basename(sItemName[:-3])
aNameList.append(sItemName)
return aNameList
def __getPluginData(self, fileName):
pluginData = {}
try:
plugin = __import__(fileName, globals(), locals())
pluginData['name'] = plugin.SITE_NAME
except Exception, e:
logger.error("Can't import plugin: %s :%s" % (fileName, e))
return False
try:
pluginData['icon'] = plugin.SITE_ICON
except:
pass
try:
pluginData['settings'] = plugin.SITE_SETTINGS
except:
pass
return pluginData
| gpl-3.0 | 7,924,670,292,942,928,000 | 37.279503 | 97 | 0.545225 | false | 4.481928 | false | false | false |
mscuthbert/abjad | abjad/tools/abjadbooktools/test/test_LaTeXDocumentHandler_syntax_error.py | 1 | 2098 | # -*- encoding: utf-8 -*-
import platform
import unittest
from abjad.tools import abjadbooktools
from abjad.tools import systemtools
@unittest.skipIf(
platform.python_implementation() != 'CPython',
'Only for CPython.',
)
class TestLaTeXDocumentHandler_syntax_error(unittest.TestCase):
def test_syntax_error_1(self):
input_file_contents = [
'<abjad>',
'foo bar baz',
'</abjad>',
]
document_handler = abjadbooktools.LaTeXDocumentHandler(
input_file_contents=input_file_contents,
)
self.assertRaises(
abjadbooktools.AbjadBookError,
document_handler.__call__,
)
def test_syntax_error_2(self):
input_file_contents = [
'<abjad>[allow_exceptions=true]',
'foo bar baz',
'</abjad>',
]
document_handler = abjadbooktools.LaTeXDocumentHandler(
input_file_contents=input_file_contents,
)
rebuilt_source = document_handler(return_source=True)
assert rebuilt_source == systemtools.TestManager.clean_string(
'''
<abjad>[allow_exceptions=true]
foo bar baz
</abjad>
%%% ABJADBOOK START %%%
\\begin{lstlisting}
>>> foo bar baz
File "<stdin>", line 1
foo bar baz
^
SyntaxError: invalid syntax
\\end{lstlisting}
%%% ABJADBOOK END %%%
''',
)
def test_syntax_error_3(self):
input_file_contents = [
'<abjad>[allow_exceptions=true]',
'foo bar baz',
'</abjad>',
'',
'<abjad>',
'foo bar baz',
'</abjad>',
]
document_handler = abjadbooktools.LaTeXDocumentHandler(
input_file_contents=input_file_contents,
)
self.assertRaises(
abjadbooktools.AbjadBookError,
document_handler.__call__
) | gpl-3.0 | 3,024,261,067,868,072,000 | 28.152778 | 70 | 0.513346 | false | 4.221328 | true | false | false |
hayatoy/dataflow-tutorial | tutorial7.py | 1 | 2908 |
# -*- coding: utf-8 -*-
# WindowでGroupByの区間を区切る
import apache_beam as beam
# Dataflowの基本設定
# ジョブ名、プロジェクト名、一時ファイルの置き場を指定します。
options = beam.options.pipeline_options.PipelineOptions()
gcloud_options = options.view_as(
beam.options.pipeline_options.GoogleCloudOptions)
gcloud_options.job_name = 'dataflow-tutorial7'
gcloud_options.project = 'PROJECTID'
gcloud_options.staging_location = 'gs://PROJECTID/staging'
gcloud_options.temp_location = 'gs://PROJECTID/temp'
# Dataflowのスケール設定
# Workerの最大数や、マシンタイプ等を設定します。
# WorkerのDiskサイズはデフォルトで250GB(Batch)、420GB(Streaming)と大きいので、
# ここで必要サイズを指定する事をオススメします。
worker_options = options.view_as(beam.options.pipeline_options.WorkerOptions)
worker_options.disk_size_gb = 20
worker_options.max_num_workers = 2
# worker_options.num_workers = 2
# worker_options.machine_type = 'n1-standard-8'
# 実行環境の切り替え
# DirectRunner: ローカルマシンで実行します
# DataflowRunner: Dataflow上で実行します
# options.view_as(beam.options.pipeline_options.StandardOptions).runner = 'DirectRunner'
options.view_as(beam.options.pipeline_options.StandardOptions).runner = 'DataflowRunner'
def assign_timevalue(v):
# pcollectionのデータにタイムスタンプを付加する
# 後段のwindowはこのタイムスタンプを基準に分割される
# ここでは適当に乱数でタイムスタンプを入れている
import apache_beam.transforms.window as window
import random
import time
return window.TimestampedValue(v, int(time.time()) + random.randint(0, 1))
def modify_data3(kvpair):
# groupbyによりkeyとそのkeyを持つデータのリストのタプルが渡される
# windowで分割されているのでデータ数が少なくなる
# kvpair = (u'word only', [4, 4, 6, 6, 7])
return {'count_type': kvpair[0],
'sum': sum(kvpair[1])
}
p7 = beam.Pipeline(options=options)
query = 'SELECT * FROM [PROJECTID:testdataset.testtable3] LIMIT 20'
(p7 | 'read' >> beam.io.Read(beam.io.BigQuerySource(project='PROJECTID', use_standard_sql=False, query=query))
| "assign tv" >> beam.Map(assign_timevalue)
| 'window' >> beam.WindowInto(beam.window.FixedWindows(1))
| 'pair' >> beam.Map(lambda x: (x['count_type'], x['word_count']))
| "groupby" >> beam.GroupByKey()
| 'modify' >> beam.Map(modify_data3)
| 'write' >> beam.io.Write(beam.io.BigQuerySink(
'testdataset.testtable5',
schema='count_type:STRING, sum:INTEGER',
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE))
)
p7.run() # .wait_until_finish()
| apache-2.0 | -6,978,195,330,371,331,000 | 32.5 | 110 | 0.724295 | false | 2.305927 | false | false | false |
brandicted/nefertari-es | tests/test_fields.py | 2 | 7580 | import datetime
from mock import Mock, patch
import pytest
from elasticsearch_dsl.exceptions import ValidationException
from elasticsearch_dsl.utils import AttrList
from nefertari_es import fields
from .fixtures import (
id_model,
story_model,
tag_model,
person_model,
parent_model,
)
class TestFieldHelpers(object):
def test_custom_mapping_mixin(self):
class DummyBase(object):
def to_dict(self):
return {'foo': 1, 'bar': 2}
class DummyField(fields.CustomMappingMixin, DummyBase):
_custom_mapping = {'foo': 3, 'zoo': 4}
obj = DummyField()
assert obj.to_dict() == {'foo': 3, 'bar': 2, 'zoo': 4}
class TestFields(object):
def test_basefieldmixin(self):
class DummyBase(object):
def __init__(self, required=False):
self.required = required
class DummyField(fields.BaseFieldMixin, DummyBase):
pass
field = DummyField(primary_key=True)
assert field._primary_key
assert field.required
def test_drop_invalid_kwargs(self):
class DummyBase(object):
pass
class DummyField(fields.BaseFieldMixin, DummyBase):
_valid_kwargs = ('foo',)
field = DummyField()
assert field.drop_invalid_kwargs({'foo': 1, 'bar': 2}) == {
'foo': 1}
def test_idfield(self):
field = fields.IdField()
assert field._primary_key
assert not field._required
def test_idfield_empty(self):
field = fields.IdField()
assert field._empty() is None
def test_intervalfield_to_python(self):
from datetime import timedelta
field = fields.IntervalField()
val = field._to_python(600)
assert isinstance(val, timedelta)
assert val.total_seconds() == 600
class TestDateTimeField(object):
def test_to_python_no_data(self):
obj = fields.DateTimeField()
assert obj._to_python({}) is None
assert obj._to_python([]) is None
assert obj._to_python(None) is None
assert obj._to_python('') is None
def test_to_python_datetime(self):
obj = fields.DateTimeField()
date = datetime.datetime.now()
assert obj._to_python(date) is date
def test_to_python_string_parse(self):
obj = fields.DateTimeField()
expected = datetime.datetime(year=2000, month=11, day=12)
assert obj._to_python('2000-11-12') == expected
def test_to_python_parse_failed(self):
obj = fields.DateTimeField()
with pytest.raises(ValidationException) as ex:
obj._to_python('asd')
expected = 'Could not parse datetime from the value'
assert expected in str(ex.value)
class TestTimeField(object):
def test_to_python_no_data(self):
obj = fields.TimeField()
assert obj._to_python({}) is None
assert obj._to_python([]) is None
assert obj._to_python(None) is None
assert obj._to_python('') is None
def test_to_python_time(self):
obj = fields.TimeField()
time = datetime.datetime.now().time()
assert obj._to_python(time) is time
def test_to_python_datetime(self):
obj = fields.TimeField()
date = datetime.datetime.now()
assert obj._to_python(date) == date.time()
def test_to_python_string_parse(self):
obj = fields.TimeField()
expected = datetime.time(17, 40)
assert obj._to_python('2000-11-12 17:40') == expected
def test_to_python_parse_failed(self):
obj = fields.TimeField()
with pytest.raises(ValidationException) as ex:
obj._to_python('asd')
expected = 'Could not parse time from the value'
assert expected in str(ex.value)
class TestRelationshipField(object):
def test_to_dict_nested(self, story_model,
person_model, tag_model):
story_model._nested_relationships = ('author', 'tags')
req = Mock()
s = story_model(name='Moby Dick')
assert s.to_dict(request=req) == {
'name': 'Moby Dick',
'_pk': 'Moby Dick',
'_type': 'Story'
}
s.author = person_model(name='Melville')
assert s.to_dict(request=req)['author'] == {
'_pk': 'Melville', '_type': 'Person', 'name': 'Melville'}
s.tags = [tag_model(name='whaling'), tag_model(name='literature')]
assert s.to_dict(request=req)['tags'] == [
{'_pk': 'whaling', '_type': 'Tag', 'name': 'whaling'},
{'_pk': 'literature', '_type': 'Tag', 'name': 'literature'}]
def test_to_dict_not_nested(self, story_model,
person_model, tag_model):
req = Mock()
s = story_model(name='Moby Dick')
assert s.to_dict(request=req) == {
'name': 'Moby Dick',
'_pk': 'Moby Dick',
'_type': 'Story'
}
s.author = person_model(name='Melville')
assert s.to_dict(request=req)['author'] == 'Melville'
t1 = tag_model(name='whaling')
t2 = tag_model(name='literature')
s.tags = [t1, t2]
assert s.to_dict(request=req)['tags'] == ['whaling', 'literature']
def test_to_dict_es(self, story_model, person_model, tag_model):
s = story_model(name='Moby Dick')
assert s.to_dict() == {'name': 'Moby Dick'}
a = person_model(name='Melville')
s.author = a
assert s.to_dict()['author'] == 'Melville'
t1 = tag_model(name='whaling')
t2 = tag_model(name='literature')
s.tags = [t1, t2]
assert s.to_dict()['tags'] == ['whaling', 'literature']
class TestReferenceField(object):
def _get_field(self):
return fields.ReferenceField(
'Foo', uselist=False, backref_name='zoo')
def test_init(self):
field = self._get_field()
assert field._doc_class_name == 'Foo'
assert not field._multi
assert field._backref_kwargs == {'name': 'zoo'}
def test_drop_invalid_kwargs(self):
field = self._get_field()
kwargs = {'required': True, 'backref_required': True, 'Foo': 1}
assert field.drop_invalid_kwargs(kwargs) == {
'required': True, 'backref_required': True}
@patch('nefertari_es.meta.get_document_cls')
def test_doc_class(self, mock_get):
field = self._get_field()
assert field._doc_class_name == 'Foo'
klass = field._doc_class
mock_get.assert_called_once_with('Foo')
assert klass == mock_get()
def test_empty_not_required(self):
field = self._get_field()
field._required = False
field._multi = True
val = field.empty()
assert isinstance(val, AttrList)
assert len(val) == 0
field._multi = False
assert field.empty() is None
@patch('nefertari_es.meta.get_document_cls')
def test_clean(self, mock_get):
mock_get.return_value = dict
field = self._get_field()
field._doc_class
val = 'asdasdasdasd'
assert field.clean(val) is val
class TestIdField(object):
def test_read_only(self, id_model):
d = id_model()
with pytest.raises(AttributeError) as e:
d.id = 'fail'
assert str(e.value) == 'id is read-only'
def test_sync_id(self, id_model):
d = id_model()
assert d.id is None
# simulate a save
d._id = 'ID'
d._sync_id_field()
assert d.id == d._id
| apache-2.0 | 5,352,501,933,605,441,000 | 30.583333 | 74 | 0.575858 | false | 3.651252 | true | false | false |
quequino/Revolution | script.tvguidedixie/gui.py | 1 | 72339 | #
# Copyright (C) 2014 Sean Poyser - With acknowledgement to some original code by twinther (Tommy Winther)
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with XBMC; see the file COPYING. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/copyleft/gpl.html
#
import datetime
import threading
import time
import xbmc
import xbmcgui
import source as src
from notification import Notification
from strings import *
import buggalo
import streaming
import xbmcaddon
import xbmc
import os
import shutil
import urllib
import dixie
import deleteDB
xbmcgui.Window(10000).setProperty('TVG_TEST_TEXT', 'THIS IS A TEST')
ADDON = xbmcaddon.Addon(id = 'script.tvguidedixie')
HOME = ADDON.getAddonInfo('path')
TITLE = 'OnTapp.TV'
VERSION = '2.3.2'
MASHMODE = (ADDON.getSetting('mashmode') == 'true')
SKIN = ADDON.getSetting('dixie.skin')
GMTOFFSET = dixie.GetGMTOffset()
TRAILERS = ADDON.getSetting('trailers.addon')
USTV = ADDON.getSetting('ustv.addon')
datapath = xbmc.translatePath(ADDON.getAddonInfo('profile'))
extras = os.path.join(datapath, 'extras')
skinfolder = os.path.join(datapath, extras, 'skins')
mashpath = os.path.join(skinfolder, 'Mash Up')
skinpath = os.path.join(skinfolder, SKIN)
mashfile = os.path.join(xbmc.translatePath('special://profile/addon_data/plugin.video.movie25/Dixie/mashup.ini'))
if MASHMODE:
PATH = mashpath
else:
PATH = skinpath
dixie.SetSetting('mashmode', 'false')
if TRAILERS == 'HD-Trailers.net':
trailers = 'XBMC.RunAddon(plugin.video.hdtrailers_net)'
if TRAILERS == 'Apple iTunes Trailers':
trailers = 'XBMC.RunAddon(plugin.video.itunes_trailers)'
if USTV == 'Hulu':
ustv = 'XBMC.RunAddon(plugin.video.hulu)'
if USTV == 'Hulu-Beta':
ustv = 'XBMC.RunAddon(plugin.video.hulu-beta)'
if USTV == 'USTV VoD':
ustv = 'XBMC.RunAddon(plugin.video.ustvvod)'
xml_file = os.path.join('script-tvguide-main.xml')
if os.path.join(SKIN, 'extras', 'skins', 'Default', '720p', xml_file):
XML = xml_file
DEBUG = False
MODE_EPG = 'EPG'
MODE_TV = 'TV'
MODE_OSD = 'OSD'
ACTION_LEFT = 1
ACTION_RIGHT = 2
ACTION_UP = 3
ACTION_DOWN = 4
ACTION_PAGE_UP = 5
ACTION_PAGE_DOWN = 6
ACTION_SELECT_ITEM = 7
ACTION_PARENT_DIR = 9
ACTION_PREVIOUS_MENU = 10
ACTION_SHOW_INFO = 11
ACTION_NEXT_ITEM = 14
ACTION_PREV_ITEM = 15
ACTION_MOUSE_WHEEL_UP = 104
ACTION_MOUSE_WHEEL_DOWN = 105
ACTION_MOUSE_MOVE = 107
ACTION_TOUCH_TAP = 401
ACTION_TOUCH_LONGPRESS = 411
ACTION_GESTURE_SWIPE_LEFT = 511
ACTION_GESTURE_SWIPE_RIGHT = 521
ACTION_GESTURE_SWIPE_UP = 531
ACTION_GESTURE_SWIPE_DOWN = 541
ACTION_GESTURE_ZOOM = 502
ACTION_GESTURE_ROTATE = 503
ACTION_GESTURE_PAN = 504
KEY_NAV_BACK = 92
KEY_CONTEXT_MENU = 117
KEY_HOME = 159
KEY_SUPER_SEARCH = 77
CHANNELS_PER_PAGE = 8
TEXT_COLOR = '0xffffffff'
FOCUSED_COLOR = '0xffffffff'
SHADOW_COLOR = 'None'
REMOVE_STRM_FILE = strings(REMOVE_STRM_FILE)
CHOOSE_STRM_FILE = strings(CHOOSE_STRM_FILE)
REMIND_PROGRAM = strings(REMIND_PROGRAM)
DONT_REMIND_PROGRAM = strings(DONT_REMIND_PROGRAM)
HALF_HOUR = datetime.timedelta(minutes = 30)
try:
#load cfg from file
f = open(os.path.join(PATH, 'epg.cfg'))
cfg = f.readlines()
f.close()
for l in cfg:
l = l.strip()
#sanity check on text
pts = l.split('=')
if len(pts) == 2:
exec(l)
except:
pass
def debug(s):
if DEBUG: xbmc.log(str(s), xbmc.LOGDEBUG)
class Point(object):
def __init__(self):
self.x = self.y = 0
def __repr__(self):
return 'Point(x=%d, y=%d)' % (self.x, self.y)
class EPGView(object):
def __init__(self):
self.top = self.left = self.right = self.bottom = self.width = self.cellHeight = 0
class ControlAndProgram(object):
def __init__(self, control, program):
self.control = control
self.program = program
class TVGuide(xbmcgui.WindowXML):
C_MAIN_DATE = 4000
C_MAIN_TITLE = 4020
C_MAIN_TIME = 4021
C_MAIN_DESCRIPTION = 4022
C_MAIN_IMAGE = 4023
C_MAIN_LOGO = 4024
C_MAIN_TIMEBAR = 4100
C_MAIN_LOADING = 4200
C_MAIN_LOADING_PROGRESS = 4201
C_MAIN_LOADING_TIME_LEFT = 4202
C_MAIN_LOADING_CANCEL = 4203
C_MAIN_MOUSE_CONTROLS = 4300
C_MAIN_MOUSE_HOME = 4301
C_MAIN_MOUSE_LEFT = 4302
C_MAIN_MOUSE_UP = 4303
C_MAIN_MOUSE_DOWN = 4304
C_MAIN_MOUSE_RIGHT = 4305
C_MAIN_MOUSE_EXIT = 4306
C_MAIN_BACKGROUND = 4600
C_MAIN_EPG = 5000
C_MAIN_EPG_VIEW_MARKER = 5001
C_MAIN_OSD = 6000
C_MAIN_OSD_TITLE = 6001
C_MAIN_OSD_TIME = 6002
C_MAIN_OSD_DESCRIPTION = 6003
C_MAIN_OSD_CHANNEL_LOGO = 6004
C_MAIN_OSD_CHANNEL_TITLE = 6005
C_MAIN_BLACKOUT = 9999
def __new__(cls):
return super(TVGuide, cls).__new__(cls, XML, PATH)
def __init__(self):
super(TVGuide, self).__init__()
self.initialized = False
self.refresh = False
self.notification = None
self.redrawingEPG = False
self.timebarVisible = False
self.isClosing = False
self.controlAndProgramList = list()
self.ignoreMissingControlIds = list()
self.channelIdx = 0
self.focusPoint = Point()
self.epgView = EPGView()
self.streamingService = streaming.StreamsService()
self.player = xbmc.Player()
self.database = None
self.categoriesList = ADDON.getSetting('categories').split('|')
if self.categoriesList[0] == '':
self.categoriesList = []
self.mode = MODE_EPG
self.currentChannel = None
self.osdEnabled = ADDON.getSetting('enable.osd') == 'true' and ADDON.getSetting('alternative.playback') != 'true'
self.alternativePlayback = ADDON.getSetting('alternative.playback') == 'true'
self.osdChannel = None
self.osdProgram = None
self.touch = False
self.prevCtrl = -1
if ADDON.getSetting('enable.touch') == 'true':
self.touch = True
# find nearest half hour
self.viewStartDate = datetime.datetime.today()
self.viewStartDate -= datetime.timedelta(minutes = self.viewStartDate.minute % 30, seconds = self.viewStartDate.second)
def getControl(self, controlId):
try:
return super(TVGuide, self).getControl(controlId)
except:
if controlId in self.ignoreMissingControlIds:
return None
if not self.isClosing:
xbmcgui.Dialog().ok(buggalo.getRandomHeading(), strings(SKIN_ERROR_LINE1), strings(SKIN_ERROR_LINE2), strings(SKIN_ERROR_LINE3))
self.close()
return None
def close(self):
try:
self.timer.cancel()
del self.timer
except:
pass
if not self.isClosing:
self.isClosing = True
if self.player.isPlaying():
self.player.stop()
if self.database:
self.database.close(self.final)
else:
self.final()
def final(self):
xbmcgui.WindowXML.close(self)
@buggalo.buggalo_try_except({'method' : 'TVGuide.onInit'})
def onInit(self):
if self.initialized:
if self.refresh:
self.refresh = False
self.database.resetChannels()
self.onRedrawEPG(self.channelIdx, self.viewStartDate)
# onInit(..) is invoked again by XBMC after a video addon exits after being invoked by XBMC.RunPlugin(..)
return
self.initialized = True
self._hideControl(self.C_MAIN_MOUSE_CONTROLS, self.C_MAIN_OSD)
self._showControl(self.C_MAIN_EPG, self.C_MAIN_LOADING)
self._showControl(self.C_MAIN_BLACKOUT)
self.setControlLabel(self.C_MAIN_LOADING_TIME_LEFT, strings(BACKGROUND_UPDATE_IN_PROGRESS))
self.setFocusId(self.C_MAIN_LOADING_CANCEL)
control = self.getControl(self.C_MAIN_EPG_VIEW_MARKER)
if control:
left, top = control.getPosition()
self.focusPoint.x = left
self.focusPoint.y = top
self.epgView.left = left
self.epgView.top = top
self.epgView.right = left + control.getWidth()
self.epgView.bottom = top + control.getHeight()
self.epgView.width = control.getWidth()
self.epgView.cellHeight = (control.getHeight() / CHANNELS_PER_PAGE)
try:
self.database = src.Database(CHANNELS_PER_PAGE)
except src.SourceNotConfiguredException:
self.onSourceNotConfigured()
self.close()
return
self.database.initializeS(self.onSourceInitializedS, self.isSourceInitializationCancelled)
self.updateTimebar()
@buggalo.buggalo_try_except({'method' : 'TVGuide.onAction'})
def onAction(self, action):
debug('Mode is: %s' % self.mode)
try:
program = self._getProgramFromControl(controlInFocus)
if program is None:
return
# if program is not None:
# self._showContextMenu(program)
except:
pass
if self.mode == MODE_TV:
self.onActionTVMode(action)
elif self.mode == MODE_OSD:
self.onActionOSDMode(action)
elif self.mode == MODE_EPG:
self.onActionEPGMode(action)
def onActionTVMode(self, action):
if action.getId() == ACTION_PAGE_UP:
self._channelUp()
elif action.getId() == ACTION_PAGE_DOWN:
self._channelDown()
elif not self.osdEnabled:
pass # skip the rest of the actions
elif action.getId() in [ACTION_PARENT_DIR, KEY_NAV_BACK, KEY_CONTEXT_MENU, ACTION_PREVIOUS_MENU]:
self.viewStartDate = datetime.datetime.today()
self.viewStartDate -= datetime.timedelta(minutes = self.viewStartDate.minute % 30, seconds = self.viewStartDate.second)
self.onRedrawEPG(self.channelIdx, self.viewStartDate)
elif action.getId() == ACTION_SHOW_INFO:
self._showOsd()
def onActionOSDMode(self, action):
if action.getId() == ACTION_SHOW_INFO:
self._hideOsd()
elif action.getId() in [ACTION_PARENT_DIR, KEY_NAV_BACK, KEY_CONTEXT_MENU, ACTION_PREVIOUS_MENU]:
self._hideOsd()
self.viewStartDate = datetime.datetime.today()
self.viewStartDate -= datetime.timedelta(minutes = self.viewStartDate.minute % 30, seconds = self.viewStartDate.second)
self.onRedrawEPG(self.channelIdx, self.viewStartDate)
elif action.getId() == ACTION_SELECT_ITEM:
if self.playChannel(self.osdChannel):
self._hideOsd()
elif action.getId() == ACTION_PAGE_UP:
self._channelUp()
self._showOsd()
elif action.getId() == ACTION_PAGE_DOWN:
self._channelDown()
self._showOsd()
elif action.getId() == ACTION_UP:
self.osdChannel = self.database.getPreviousChannel(self.osdChannel)
self.osdProgram = self.database.getCurrentProgram(self.osdChannel)
self._showOsd()
elif action.getId() == ACTION_DOWN:
self.osdChannel = self.database.getNextChannel(self.osdChannel)
self.osdProgram = self.database.getCurrentProgram(self.osdChannel)
self._showOsd()
elif action.getId() == ACTION_LEFT:
previousProgram = self.database.getPreviousProgram(self.osdProgram)
if previousProgram:
self.osdProgram = previousProgram
self._showOsd()
elif action.getId() == ACTION_RIGHT:
nextProgram = self.database.getNextProgram(self.osdProgram)
if nextProgram:
self.osdProgram = nextProgram
self._showOsd()
def onActionEPGMode(self, action):
actionId = self.checkTouch(action)
if actionId == None:
return
if actionId in [ACTION_PARENT_DIR, KEY_NAV_BACK, ACTION_PREVIOUS_MENU]:
self.close()
return
elif actionId == ACTION_MOUSE_MOVE:
self._showControl(self.C_MAIN_MOUSE_CONTROLS)
return
elif actionId == KEY_CONTEXT_MENU:
if self.player.isPlaying():
self._hideEpg()
controlInFocus = None
currentFocus = self.focusPoint
try:
controlInFocus = self.getFocus()
if controlInFocus in [elem.control for elem in self.controlAndProgramList]:
(left, top) = controlInFocus.getPosition()
currentFocus = Point()
currentFocus.x = left + (controlInFocus.getWidth() / 2)
currentFocus.y = top + (controlInFocus.getHeight() / 2)
except Exception, e:
control = self._findControlAt(self.focusPoint)
if control is None and len(self.controlAndProgramList) > 0:
control = self.controlAndProgramList[0].control
if control is not None:
if not self.touch:
self.setFocus(control)
return
if actionId == ACTION_LEFT:
self._left(currentFocus)
elif actionId == ACTION_RIGHT:
self._right(currentFocus)
elif actionId == ACTION_UP:
self._up(currentFocus)
elif actionId == ACTION_DOWN:
self._down(currentFocus)
elif actionId == ACTION_NEXT_ITEM:
self._nextDay()
elif actionId == ACTION_PREV_ITEM:
self._previousDay()
elif actionId == ACTION_PAGE_UP:
self._moveUp(CHANNELS_PER_PAGE)
elif actionId == ACTION_PAGE_DOWN:
self._moveDown(CHANNELS_PER_PAGE)
elif actionId == ACTION_MOUSE_WHEEL_UP:
self._moveUp(scrollEvent = True)
elif actionId == ACTION_MOUSE_WHEEL_DOWN:
self._moveDown(scrollEvent = True)
elif actionId == KEY_HOME:
self.viewStartDate = datetime.datetime.today()
self.viewStartDate -= datetime.timedelta(minutes = self.viewStartDate.minute % 30, seconds = self.viewStartDate.second)
self.onRedrawEPG(self.channelIdx, self.viewStartDate)
elif actionId in [KEY_CONTEXT_MENU] and controlInFocus is not None:
program = self._getProgramFromControl(controlInFocus)
if program is not None:
self._showContextMenu(program)
elif actionId == KEY_SUPER_SEARCH:
try:
program = self._getProgramFromControl(controlInFocus)
xbmc.executebuiltin('ActivateWindow(%d,"plugin://%s/?mode=%d&keyword=%s")' % (10025,'plugin.program.super.favourites', 0, urllib.quote_plus(program.title)))
except:
pass
def checkTouch(self, action):
id = action.getId()
if id not in [ACTION_GESTURE_ZOOM, ACTION_GESTURE_ROTATE, ACTION_GESTURE_PAN, ACTION_TOUCH_TAP, ACTION_TOUCH_LONGPRESS, ACTION_GESTURE_SWIPE_LEFT, ACTION_GESTURE_SWIPE_RIGHT, ACTION_GESTURE_SWIPE_UP, ACTION_GESTURE_SWIPE_DOWN]:
return id
if id in [ACTION_GESTURE_ZOOM, ACTION_GESTURE_ROTATE]:
return id
if id == ACTION_TOUCH_TAP:
return id
try: controlInFocus = self.getFocus()
except: controlInFocus = None
if controlInFocus:
if self._getProgramFromControl(controlInFocus) != None:
return id
#never triggered due to back action
#if id == ACTION_TOUCH_LONGPRESS:
# return KEY_HOME
if id == ACTION_GESTURE_SWIPE_LEFT:
self.onClick(self.C_MAIN_MOUSE_LEFT)
return None
if id == ACTION_GESTURE_SWIPE_RIGHT:
self.onClick(self.C_MAIN_MOUSE_RIGHT)
return None
if id == ACTION_GESTURE_SWIPE_UP:
#return ACTION_MOUSE_WHEEL_UP
self.onClick(self.C_MAIN_MOUSE_UP)
return None
if id == ACTION_GESTURE_SWIPE_DOWN:
#return ACTION_MOUSE_WHEEL_DOWN
self.onClick(self.C_MAIN_MOUSE_DOWN)
return None
return id
@buggalo.buggalo_try_except({'method' : 'TVGuide.onClick'})
def onClick(self, controlId):
if controlId in [self.C_MAIN_LOADING_CANCEL, self.C_MAIN_MOUSE_EXIT]:
self.close()
return
if self.isClosing:
return
if controlId == self.C_MAIN_MOUSE_HOME:
self.viewStartDate = datetime.datetime.today()
self.viewStartDate -= datetime.timedelta(minutes = self.viewStartDate.minute % 30, seconds = self.viewStartDate.second)
self.onRedrawEPG(self.channelIdx, self.viewStartDate)
return
elif controlId == self.C_MAIN_MOUSE_LEFT:
self.viewStartDate -= datetime.timedelta(hours = 2)
self.onRedrawEPG(self.channelIdx, self.viewStartDate)
return
elif controlId == self.C_MAIN_MOUSE_UP:
self._moveUp(count = CHANNELS_PER_PAGE)
return
elif controlId == self.C_MAIN_MOUSE_DOWN:
self._moveDown(count = CHANNELS_PER_PAGE)
return
elif controlId == self.C_MAIN_MOUSE_RIGHT:
when = self.viewStartDate + datetime.timedelta(hours = 2)
if when.date() > self.database.updateLimit:
return
self.viewStartDate = when
self.onRedrawEPG(self.channelIdx, self.viewStartDate)
return
prevCtrl = self.prevCtrl
self.prevCtrl = controlId
if self.touch:
if prevCtrl != self.prevCtrl:
return
program = self._getProgramFromControl(self.getControl(controlId))
if program is None:
return
if self.touch:
self._showContextMenu(program)
return
self.tryProgram(program)
def tryProgram(self, program):
if self.playChannel(program.channel):
return
result = self.streamingService.detectStream(program.channel)
if not result:
if self.touch:
return
# could not detect stream, show context menu
self._showContextMenu(program)
elif type(result) == str:
# one single stream detected, save it and start streaming
self.database.setCustomStreamUrl(program.channel, result)
self.playChannel(program.channel)
else:
# multiple matches, let user decide
d = ChooseStreamAddonDialog(result)
d.doModal()
if d.stream is not None:
self.database.setCustomStreamUrl(program.channel, d.stream)
self.playChannel(program.channel)
def _showContextMenu(self, program):
self._hideControl(self.C_MAIN_MOUSE_CONTROLS)
d = PopupMenu(self.database, program, not program.notificationScheduled, self.touch)
d.doModal()
buttonClicked = d.buttonClicked
del d
if buttonClicked == PopupMenu.C_POPUP_REMIND:
if program.notificationScheduled:
self.notification.removeNotification(program)
else:
self.notification.addNotification(program)
self.onRedrawEPG(self.channelIdx, self.viewStartDate)
elif buttonClicked == PopupMenu.C_POPUP_CHOOSE_STREAM:
d = StreamSetupDialog(self.database, program.channel)
d.doModal()
del d
self._showContextMenu(program)
return
elif buttonClicked == PopupMenu.C_POPUP_PLAY:
if self.touch:
self.tryProgram(program)
else:
self.playChannel(program.channel)
elif buttonClicked == PopupMenu.C_POPUP_CHANNELS:
d = ChannelsMenu(self.database)
d.doModal()
del d
self.onRedrawEPG(self.channelIdx, self.viewStartDate)
elif buttonClicked == PopupMenu.C_POPUP_CATEGORIES:
d = CategoriesMenu(self.database, self.categoriesList)
d.doModal()
self.categoriesList = d.currentCategories
del d
dixie.SetSetting('categories', '|'.join(self.categoriesList))
self.onRedrawEPG(self.channelIdx, self.viewStartDate)
elif buttonClicked == PopupMenu.C_POPUP_SETTINGS:
addonPath = HOME
script = os.path.join(addonPath, 'openSettings.py')
args = ''
cmd = 'AlarmClock(%s,RunScript(%s,%s),%d,True)' % ('launch', script, args, 0)
xbmc.executebuiltin(cmd)
self.close()
elif buttonClicked == PopupMenu.C_POPUP_IPLAYER:
xbmc.executebuiltin('XBMC.RunAddon(plugin.video.iplayer)')
elif buttonClicked == PopupMenu.C_POPUP_ITVPLAYER:
xbmc.executebuiltin('XBMC.RunAddon(plugin.video.itv)')
elif buttonClicked == PopupMenu.C_POPUP_OTTOOLS:
self.refresh = True
xbmc.executebuiltin('XBMC.RunAddon(script.tvguidedixie.tools)')
elif buttonClicked == PopupMenu.C_POPUP_USTV:
xbmc.executebuiltin(ustv)
elif buttonClicked == PopupMenu.C_POPUP_SUPERFAVES:
xbmc.executebuiltin('XBMC.RunAddon(plugin.program.super.favourites)')
# import sys
# sfAddon = xbmcaddon.Addon(id = 'plugin.program.super.favourites')
# sfPath = sfAddon.getAddonInfo('path')
# sys.path.insert(0, sfPath)
# import chooser
# chooser.Main()
elif buttonClicked == PopupMenu.C_POPUP_VPN:
xbmc.executebuiltin('XBMC.RunScript(special://home/addons/plugin.program.vpnicity/menu.py,%s)' % self.database.getStreamUrl(program.channel))
elif buttonClicked == PopupMenu.C_POPUP_SUPER_SEARCH:
xbmc.executebuiltin('ActivateWindow(%d,"plugin://%s/?mode=%d&keyword=%s",return)' % (10025,'plugin.program.super.favourites', 0, urllib.quote_plus(program.title)))
elif buttonClicked == PopupMenu.C_POPUP_QUIT:
self.close()
def setFocusId(self, controlId):
control = self.getControl(controlId)
if control:
self.setFocus(control)
def setFocus(self, control):
debug('setFocus %d' % control.getId())
if control in [elem.control for elem in self.controlAndProgramList]:
debug('Focus before %s' % self.focusPoint)
(left, top) = control.getPosition()
if left > self.focusPoint.x or left + control.getWidth() < self.focusPoint.x:
self.focusPoint.x = left
self.focusPoint.y = top + (control.getHeight() / 2)
debug('New focus at %s' % self.focusPoint)
super(TVGuide, self).setFocus(control)
@buggalo.buggalo_try_except({'method' : 'TVGuide.onFocus'})
def onFocus(self, controlId):
try:
controlInFocus = self.getControl(controlId)
except Exception:
return
program = self._getProgramFromControl(controlInFocus)
if program is None:
return
self.setControlLabel(self.C_MAIN_TITLE, '[B]%s[/B]' % program.title)
self.setControlLabel(self.C_MAIN_TIME, '[B]%s - %s[/B]' % (self.formatTime(program.startDate+GMTOFFSET), self.formatTime(program.endDate+GMTOFFSET)))
if program.description:
description = program.description
else:
description = strings(NO_DESCRIPTION)
self.setControlText(self.C_MAIN_DESCRIPTION, description)
if program.channel.logo is not None:
self.setControlImage(self.C_MAIN_LOGO, program.channel.logo)
if program.imageSmall is not None:
self.setControlImage(self.C_MAIN_IMAGE, program.imageSmall)
if ADDON.getSetting('program.background.enabled') == 'true' and program.imageLarge is not None:
self.setControlImage(self.C_MAIN_BACKGROUND, program.imageLarge)
if not self.osdEnabled and self.player.isPlaying():
self.player.stop()
def _left(self, currentFocus):
control = self._findControlOnLeft(currentFocus)
if control is not None:
self.setFocus(control)
elif control is None:
self.viewStartDate -= datetime.timedelta(hours = 2)
self.focusPoint.x = self.epgView.right
self.onRedrawEPG(self.channelIdx, self.viewStartDate, focusFunction=self._findControlOnLeft)
def _right(self, currentFocus):
control = self._findControlOnRight(currentFocus)
if control is not None:
self.setFocus(control)
return
when = self.viewStartDate + datetime.timedelta(hours = 2)
if when.date() > self.database.updateLimit:
return
self.viewStartDate = when
self.focusPoint.x = self.epgView.left
self.onRedrawEPG(self.channelIdx, self.viewStartDate, focusFunction=self._findControlOnRight)
def _up(self, currentFocus):
currentFocus.x = self.focusPoint.x
control = self._findControlAbove(currentFocus)
if control is not None:
self.setFocus(control)
elif control is None:
self.focusPoint.y = self.epgView.bottom
self.onRedrawEPG(self.channelIdx - CHANNELS_PER_PAGE, self.viewStartDate, focusFunction=self._findControlAbove)
def _down(self, currentFocus):
currentFocus.x = self.focusPoint.x
control = self._findControlBelow(currentFocus)
if control is not None:
self.setFocus(control)
elif control is None:
self.focusPoint.y = self.epgView.top
self.onRedrawEPG(self.channelIdx + CHANNELS_PER_PAGE, self.viewStartDate, focusFunction=self._findControlBelow)
def _nextDay(self):
self.viewStartDate += datetime.timedelta(days = 1)
self.onRedrawEPG(self.channelIdx, self.viewStartDate)
def _previousDay(self):
self.viewStartDate -= datetime.timedelta(days = 1)
self.onRedrawEPG(self.channelIdx, self.viewStartDate)
def _moveUp(self, count = 1, scrollEvent = False):
if scrollEvent:
self.onRedrawEPG(self.channelIdx - count, self.viewStartDate)
else:
self.focusPoint.y = self.epgView.bottom
self.onRedrawEPG(self.channelIdx - count, self.viewStartDate, focusFunction = self._findControlAbove)
def _moveDown(self, count = 1, scrollEvent = False):
if scrollEvent:
self.onRedrawEPG(self.channelIdx + count, self.viewStartDate)
else:
self.focusPoint.y = self.epgView.top
self.onRedrawEPG(self.channelIdx + count, self.viewStartDate, focusFunction=self._findControlBelow)
def _channelUp(self):
channel = self.database.getNextChannel(self.currentChannel)
self.playChannel(channel)
def _channelDown(self):
channel = self.database.getPreviousChannel(self.currentChannel)
self.playChannel(channel)
def playChannel(self, channel):
self.currentChannel = channel
wasPlaying = self.player.isPlaying()
url = self.database.getStreamUrl(channel)
if url:
if not wasPlaying:
self._hideControl(self.C_MAIN_BLACKOUT)
path = os.path.join(ADDON.getAddonInfo('path'), 'player.py')
xbmc.executebuiltin('XBMC.RunScript(%s,%s,%d)' % (path, url, self.osdEnabled))
if not wasPlaying:
self._hideEpg()
threading.Timer(2, self.waitForPlayBackStopped).start()
self.osdProgram = self.database.getCurrentProgram(self.currentChannel)
return url is not None
def waitForPlayBackStopped(self):
for retry in range(0, 100):
time.sleep(0.1)
if self.player.isPlaying():
break
self._showControl(self.C_MAIN_BLACKOUT)
while self.player.isPlaying() and not xbmc.abortRequested and not self.isClosing:
time.sleep(0.5)
self.onPlayBackStopped()
def _showOsd(self):
if not self.osdEnabled:
return
if self.mode != MODE_OSD:
self.osdChannel = self.currentChannel
if self.osdProgram is not None:
self.setControlLabel(self.C_MAIN_OSD_TITLE, '[B]%s[/B]' % self.osdProgram.title)
self.setControlLabel(self.C_MAIN_OSD_TIME, '[B]%s - %s[/B]' % (self.formatTime(self.osdProgram.startDate), self.formatTime(self.osdProgram.endDate)))
self.setControlText(self.C_MAIN_OSD_DESCRIPTION, self.osdProgram.description)
self.setControlLabel(self.C_MAIN_OSD_CHANNEL_TITLE, self.osdChannel.title)
if self.osdProgram.channel.logo is not None:
self.setControlImage(self.C_MAIN_OSD_CHANNEL_LOGO, self.osdProgram.channel.logo)
else:
self.setControlImage(self.C_MAIN_OSD_CHANNEL_LOGO, '')
self.mode = MODE_OSD
self._showControl(self.C_MAIN_OSD)
def _hideOsd(self):
self.mode = MODE_TV
self._hideControl(self.C_MAIN_OSD)
def _hideEpg(self):
self._hideControl(self.C_MAIN_EPG)
self.mode = MODE_TV
self._clearEpg()
def onRedrawEPG(self, channelStart, startTime, focusFunction = None):
if self.redrawingEPG or (self.database is not None and self.database.updateInProgress) or self.isClosing:
debug('onRedrawEPG - already redrawing')
return # ignore redraw request while redrawing
debug('onRedrawEPG')
self.redrawingEPG = True
self.mode = MODE_EPG
self._showControl(self.C_MAIN_EPG)
self.updateTimebar(scheduleTimer = False)
# show Loading screen
self.setControlLabel(self.C_MAIN_LOADING_TIME_LEFT, strings(CALCULATING_REMAINING_TIME))
self._showControl(self.C_MAIN_LOADING)
self.setFocusId(self.C_MAIN_LOADING_CANCEL)
self.hideTimebar()
# remove existing controls
self._clearEpg()
try:
self.channelIdx, channels, programs = self.database.getEPGView(channelStart, startTime, clearExistingProgramList = False, categories = self.categoriesList, nmrChannels = CHANNELS_PER_PAGE)
if len(programs) == 0:
self.channelIdx, channels, programs = self.database.getEPGView(channelStart, startTime, clearExistingProgramList = False, nmrChannels = CHANNELS_PER_PAGE)
except src.SourceException:
self.onEPGLoadError()
return
channelsWithoutPrograms = list(channels)
# date and time row
self.setControlLabel(self.C_MAIN_DATE, self.formatDate(self.viewStartDate))
for col in range(1, 5):
self.setControlLabel(4000 + col, self.formatTime(startTime))
startTime += HALF_HOUR
if programs is None:
self.onEPGLoadError()
return
# set channel logo or text
for idx in range(0, CHANNELS_PER_PAGE):
if idx >= len(channels):
self.setControlImage(4110 + idx, ' ')
self.setControlLabel(4010 + idx, ' ')
else:
channel = channels[idx]
self.setControlLabel(4010 + idx, channel.title)
if channel.logo is not None:
self.setControlImage(4110 + idx, channel.logo)
else:
self.setControlImage(4110 + idx, ' ')
for program in programs:
idx = channels.index(program.channel)
if program.channel in channelsWithoutPrograms:
channelsWithoutPrograms.remove(program.channel)
startDelta = program.startDate - self.viewStartDate + GMTOFFSET
stopDelta = program.endDate - self.viewStartDate + GMTOFFSET
cellStart = self._secondsToXposition(startDelta.seconds)
if startDelta.days < 0:
cellStart = self.epgView.left
cellWidth = self._secondsToXposition(stopDelta.seconds) - cellStart
if cellStart + cellWidth > self.epgView.right:
cellWidth = self.epgView.right - cellStart
if cellWidth > 1:
if program.notificationScheduled:
noFocusTexture = 'tvguide-program-red.png'
focusTexture = 'tvguide-program-red-focus.png'
else:
noFocusTexture = 'tvguide-program-grey.png'
focusTexture = 'tvguide-program-grey-focus.png'
if cellWidth < 25:
title = '' # Text will overflow outside the button if it is too narrow
else:
title = program.title
control = xbmcgui.ControlButton(
cellStart,
self.epgView.top + self.epgView.cellHeight * idx,
cellWidth - 2,
self.epgView.cellHeight - 2,
title,
noFocusTexture = noFocusTexture,
focusTexture = focusTexture,
textColor = TEXT_COLOR,
focusedColor = FOCUSED_COLOR,
shadowColor = SHADOW_COLOR
)
self.controlAndProgramList.append(ControlAndProgram(control, program))
for channel in channelsWithoutPrograms:
idx = channels.index(channel)
control = xbmcgui.ControlButton(
self.epgView.left,
self.epgView.top + self.epgView.cellHeight * idx,
(self.epgView.right - self.epgView.left) - 2,
self.epgView.cellHeight - 2,
strings(NO_PROGRAM_AVAILABLE),
noFocusTexture='tvguide-program-grey.png',
focusTexture='tvguide-program-grey-focus.png',
textColor = TEXT_COLOR,
focusedColor = FOCUSED_COLOR,
shadowColor = SHADOW_COLOR
)
now = datetime.datetime.today()
then = now + datetime.timedelta(minutes = 24*60)
program = src.Program(channel, strings(NO_PROGRAM_AVAILABLE), now, then, "", "")
self.controlAndProgramList.append(ControlAndProgram(control, program))
# add program controls
if focusFunction is None:
focusFunction = self._findControlAt
focusControl = focusFunction(self.focusPoint)
controls = [elem.control for elem in self.controlAndProgramList]
self.addControls(controls)
if focusControl is not None:
debug('onRedrawEPG - setFocus %d' % focusControl.getId())
self.setFocus(focusControl)
self.ignoreMissingControlIds.extend([elem.control.getId() for elem in self.controlAndProgramList])
if focusControl is None and len(self.controlAndProgramList) > 0:
self.setFocus(self.controlAndProgramList[0].control)
self._hideControl(self.C_MAIN_LOADING)
self.showTimebar()
self.redrawingEPG = False
def _clearEpg(self):
controls = [elem.control for elem in self.controlAndProgramList]
try:
self.removeControls(controls)
except RuntimeError:
for elem in self.controlAndProgramList:
try:
self.removeControl(elem.control)
except RuntimeError:
pass # happens if we try to remove a control that doesn't exist
del self.controlAndProgramList[:]
def onEPGLoadError(self):
print 'Delete DB OnTapp.TV - onEPGLoadError'
deleteDB.deleteDB()
self.redrawingEPG = False
self._hideControl(self.C_MAIN_LOADING)
xbmcgui.Dialog().ok(strings(LOAD_ERROR_TITLE), strings(LOAD_ERROR_LINE1), strings(LOAD_ERROR_LINE2), strings(LOAD_ERROR_LINE3))
print '****** OnTapp.TV. Possible unicode text error. *******'
self.close()
def onSourceNotConfigured(self):
self.redrawingEPG = False
self._hideControl(self.C_MAIN_LOADING)
xbmcgui.Dialog().ok(strings(LOAD_ERROR_TITLE), strings(LOAD_ERROR_LINE1), strings(CONFIGURATION_ERROR_LINE2))
self.close()
def isSourceInitializationCancelled(self):
return xbmc.abortRequested or self.isClosing
def onSourceInitializedS(self, success):
self.database.initializeP(self.onSourceInitializedP, self.isSourceInitializationCancelled)
def onSourceInitializedP(self, success):
if success:
self.notification = Notification(self.database, ADDON.getAddonInfo('path'))
self.onRedrawEPG(0, self.viewStartDate)
# def onSourceProgressUpdate(self, percentageComplete):
# control = self.getControl(self.C_MAIN_LOADING_PROGRESS)
# if percentageComplete < 1:
# if control:
# control.setPercent(1)
# self.progressStartTime = datetime.datetime.now()
# self.progressPreviousPercentage = percentageComplete
# elif percentageComplete != self.progressPreviousPercentage:
# if control:
# control.setPercent(percentageComplete)
# self.progressPreviousPercentage = percentageComplete
# delta = datetime.datetime.now() - self.progressStartTime
#
# if percentageComplete < 20:
# self.setControlLabel(self.C_MAIN_LOADING_TIME_LEFT, strings(CALCULATING_REMAINING_TIME))
# else:
# secondsLeft = int(delta.seconds) / float(percentageComplete) * (100.0 - percentageComplete)
# if secondsLeft > 30:
# secondsLeft -= secondsLeft % 10
# self.setControlLabel(self.C_MAIN_LOADING_TIME_LEFT, strings(TIME_LEFT) % secondsLeft)
#
# return not xbmc.abortRequested and not self.isClosing
def onPlayBackStopped(self):
if not self.player.isPlaying() and not self.isClosing:
self._hideControl(self.C_MAIN_OSD)
self.viewStartDate = datetime.datetime.today()
self.viewStartDate -= datetime.timedelta(minutes = self.viewStartDate.minute % 30, seconds = self.viewStartDate.second)
self.onRedrawEPG(self.channelIdx, self.viewStartDate)
return
def _secondsToXposition(self, seconds):
return self.epgView.left + (seconds * self.epgView.width / 7200)
def _findControlOnRight(self, point):
distanceToNearest = 10000
nearestControl = None
for elem in self.controlAndProgramList:
control = elem.control
(left, top) = control.getPosition()
x = left + (control.getWidth() / 2)
y = top + (control.getHeight() / 2)
if point.x < x and point.y == y:
distance = abs(point.x - x)
if distance < distanceToNearest:
distanceToNearest = distance
nearestControl = control
return nearestControl
def _findControlOnLeft(self, point):
distanceToNearest = 10000
nearestControl = None
for elem in self.controlAndProgramList:
control = elem.control
(left, top) = control.getPosition()
x = left + (control.getWidth() / 2)
y = top + (control.getHeight() / 2)
if point.x > x and point.y == y:
distance = abs(point.x - x)
if distance < distanceToNearest:
distanceToNearest = distance
nearestControl = control
return nearestControl
def _findControlBelow(self, point):
nearestControl = None
for elem in self.controlAndProgramList:
control = elem.control
(leftEdge, top) = control.getPosition()
y = top + (control.getHeight() / 2)
if point.y < y:
rightEdge = leftEdge + control.getWidth()
if(leftEdge <= point.x < rightEdge
and (nearestControl is None or nearestControl.getPosition()[1] > top)):
nearestControl = control
return nearestControl
def _findControlAbove(self, point):
nearestControl = None
for elem in self.controlAndProgramList:
control = elem.control
(leftEdge, top) = control.getPosition()
y = top + (control.getHeight() / 2)
if point.y > y:
rightEdge = leftEdge + control.getWidth()
if(leftEdge <= point.x < rightEdge
and (nearestControl is None or nearestControl.getPosition()[1] < top)):
nearestControl = control
return nearestControl
def _findControlAt(self, point):
for elem in self.controlAndProgramList:
control = elem.control
(left, top) = control.getPosition()
bottom = top + control.getHeight()
right = left + control.getWidth()
if left <= point.x <= right and top <= point.y <= bottom:
return control
return None
def _getProgramFromControl(self, control):
for elem in self.controlAndProgramList:
if elem.control == control:
return elem.program
return None
def _hideControl(self, *controlIds):
"""
Visibility is inverted in skin
"""
for controlId in controlIds:
control = self.getControl(controlId)
if control:
control.setVisible(True)
def _showControl(self, *controlIds):
"""
Visibility is inverted in skin
"""
for controlId in controlIds:
control = self.getControl(controlId)
if control:
control.setVisible(False)
def formatTime(self, timestamp):
format = xbmc.getRegion('time').replace(':%S', '').replace('%H%H', '%H')
return timestamp.strftime(format)
def formatDate(self, timestamp):
format = xbmc.getRegion('dateshort')
return timestamp.strftime(format)
def setControlImage(self, controlId, image):
control = self.getControl(controlId)
if control:
control.setImage(image.encode('utf-8'))
def setControlLabel(self, controlId, label):
control = self.getControl(controlId)
if control and label:
control.setLabel(label)
def setControlText(self, controlId, text):
control = self.getControl(controlId)
if control:
control.setText(text)
def hideTimebar(self):
try:
self.timebarVisible = False
self.getControl(self.C_MAIN_TIMEBAR).setVisible(self.timebarVisible)
except:
pass
def showTimebar(self):
try:
self.timebarVisible = True
self.getControl(self.C_MAIN_TIMEBAR).setVisible(self.timebarVisible)
except:
pass
def updateTimebar(self, scheduleTimer = True):
try:
# move timebar to current time
timeDelta = datetime.datetime.today() - self.viewStartDate
control = self.getControl(self.C_MAIN_TIMEBAR)
if control:
(x, y) = control.getPosition()
try:
# Sometimes raises:
# exceptions.RuntimeError: Unknown exception thrown from the call "setVisible"
control.setVisible(timeDelta.days == 0 and self.timebarVisible)
except:
pass
control.setPosition(self._secondsToXposition(timeDelta.seconds), y)
if scheduleTimer and not xbmc.abortRequested and not self.isClosing:
threading.Timer(1, self.updateTimebar).start()
except Exception:
buggalo.onExceptionRaised()
class PopupMenu(xbmcgui.WindowXMLDialog):
C_POPUP_PLAY = 4000
C_POPUP_CHOOSE_STREAM = 4001
C_POPUP_REMIND = 4002
C_POPUP_CHANNELS = 4003
C_POPUP_QUIT = 4004
C_POPUP_CHANNEL_LOGO = 4100
C_POPUP_CHANNEL_TITLE = 4101
C_POPUP_PROGRAM_TITLE = 4102
C_POPUP_CATEGORIES = 4005
C_POPUP_SETTINGS = 4007
C_POPUP_IPLAYER = 4008
C_POPUP_ITVPLAYER = 4010
C_POPUP_OTTOOLS = 4014
C_POPUP_USTV = 4011
C_POPUP_SUPER_SEARCH = 4009
C_POPUP_SUPERFAVES = 4012
C_POPUP_VPN = 4013
C_POPUP_HOME = 4006
def __new__(cls, database, program, showRemind, touch):
xml_file = os.path.join('script-tvguide-menu.xml')
if os.path.join(SKIN, 'extras', 'skins', 'Default', '720p', xml_file):
XML = xml_file
return super(PopupMenu, cls).__new__(cls, XML, PATH)
def __init__(self, database, program, showRemind, touch):
"""
@type database: source.Database
@param program:
@type program: source.Program
@param showRemind:
"""
super(PopupMenu, self).__init__()
self.database = database
self.program = program
self.showRemind = showRemind
self.buttonClicked = None
self.touch = touch
@buggalo.buggalo_try_except({'method' : 'PopupMenu.onInit'})
def onInit(self):
# self.getControl(self.C_POPUP_OTTOOLS).setVisible(False) RD -Temporary hide of the 4oD button until a new use is found for it.
programTitleControl = self.getControl(self.C_POPUP_PROGRAM_TITLE)
programTitleControl.setLabel(self.program.title)
playControl = self.getControl(self.C_POPUP_PLAY)
playControl.setLabel(strings(WATCH_CHANNEL, self.program.channel.title))
#isPlayable = self.program.channel.isPlayable()
isPlayable = self.database.isPlayable(self.program.channel)
if not isPlayable:
playControl.setEnabled(False)
self.setFocusId(self.C_POPUP_REMIND)
# self.getControl(self.C_POPUP_REMIND).setVisible(False)
# self.setFocusId(self.C_POPUP_CHOOSE_STREAM)
if self.touch or self.program.title == strings(NO_PROGRAM_AVAILABLE):
playControl.setEnabled(True)
self.setFocusId(self.C_POPUP_PLAY)
channelLogoControl = self.getControl(self.C_POPUP_CHANNEL_LOGO)
channelTitleControl = self.getControl(self.C_POPUP_CHANNEL_TITLE)
if self.program.channel.logo is not None:
channelLogoControl.setImage(self.program.channel.logo)
channelTitleControl.setVisible(False)
else:
channelLogoControl.setVisible(False)
channelTitleControl.setLabel(self.program.channel.title)
if self.database.getCustomStreamUrl(self.program.channel):
try: self.getControl(self.C_POPUP_CHOOSE_STREAM).setLabel(REMOVE_STRM_FILE)
except: pass
xbmcgui.Window(10000).setProperty('TVG_CHOOSE', REMOVE_STRM_FILE)
else:
try: self.getControl(self.C_POPUP_CHOOSE_STREAM).setLabel(CHOOSE_STRM_FILE)
except: pass
xbmcgui.Window(10000).setProperty('TVG_CHOOSE', CHOOSE_STRM_FILE)
if self.showRemind:
try: self.getControl(self.C_POPUP_REMIND).setLabel(REMIND_PROGRAM)
except: pass
xbmcgui.Window(10000).setProperty('TVG_REMIND', REMIND_PROGRAM)
else:
try: self.getControl(self.C_POPUP_REMIND).setLabel(DONT_REMIND_PROGRAM)
except: pass
xbmcgui.Window(10000).setProperty('TVG_REMIND', DONT_REMIND_PROGRAM)
try:
ctrl = self.getControl(5000)
self.setFocusId(5000)
except:
pass
xbmcgui.Window(10000).clearProperty('TVG_popup_id')
@buggalo.buggalo_try_except({'method' : 'PopupMenu.onAction'})
def onAction(self, action):
try:
id = int(xbmcgui.Window(10000).getProperty('TVG_popup_id'))
self.buttonClicked = id
self.close()
except:
pass
if action.getId() in [ACTION_PARENT_DIR, ACTION_PREVIOUS_MENU, KEY_NAV_BACK, KEY_CONTEXT_MENU]:
self.close()
return
@buggalo.buggalo_try_except({'method' : 'PopupMenu.onClick'})
def onClick(self, controlId):
if controlId == self.C_POPUP_CHOOSE_STREAM and self.database.getCustomStreamUrl(self.program.channel):
self.database.deleteCustomStreamUrl(self.program.channel)
chooseStrmControl = self.getControl(self.C_POPUP_CHOOSE_STREAM)
chooseStrmControl.setLabel(CHOOSE_STRM_FILE)
if not self.database.isPlayable(self.program.channel):
playControl = self.getControl(self.C_POPUP_PLAY)
playControl.setEnabled(False)
else:
self.buttonClicked = controlId
self.close()
def onFocus(self, controlId):
pass
class ChannelsMenu(xbmcgui.WindowXMLDialog):
C_CHANNELS_LIST = 6000
C_CHANNELS_SELECTION_VISIBLE = 6001
C_CHANNELS_SELECTION = 6002
C_CHANNELS_SAVE = 6003
C_CHANNELS_CANCEL = 6004
def __new__(cls, database):
xml_file = os.path.join('script-tvguide-channels.xml')
if os.path.join(SKIN, 'extras', 'skins', 'Default', '720p', xml_file):
XML = xml_file
return super(ChannelsMenu, cls).__new__(cls, XML, PATH)
def __init__(self, database):
"""
@type database: source.Database
"""
super(ChannelsMenu, self).__init__()
self.database = database
self.channelList = database.getChannelList(onlyVisible = False)
self.swapInProgress = False
@buggalo.buggalo_try_except({'method' : 'ChannelsMenu.onInit'})
def onInit(self):
self.updateChannelList()
self.setFocusId(self.C_CHANNELS_LIST)
@buggalo.buggalo_try_except({'method' : 'ChannelsMenu.onAction'})
def onAction(self, action):
if action.getId() in [ACTION_PARENT_DIR, ACTION_PREVIOUS_MENU, KEY_NAV_BACK, KEY_CONTEXT_MENU]:
self.close()
return
if self.getFocusId() == self.C_CHANNELS_LIST and action.getId() == ACTION_LEFT:
listControl = self.getControl(self.C_CHANNELS_LIST)
idx = listControl.getSelectedPosition()
buttonControl = self.getControl(self.C_CHANNELS_SELECTION)
buttonControl.setLabel('[B]%s[/B]' % self.channelList[idx].title)
self.getControl(self.C_CHANNELS_SELECTION_VISIBLE).setVisible(False)
self.setFocusId(self.C_CHANNELS_SELECTION)
elif self.getFocusId() == self.C_CHANNELS_SELECTION and action.getId() in [ACTION_RIGHT, ACTION_SELECT_ITEM]:
self.getControl(self.C_CHANNELS_SELECTION_VISIBLE).setVisible(True)
xbmc.sleep(350)
self.setFocusId(self.C_CHANNELS_LIST)
elif self.getFocusId() == self.C_CHANNELS_SELECTION and action.getId() == ACTION_UP:
listControl = self.getControl(self.C_CHANNELS_LIST)
idx = listControl.getSelectedPosition()
if idx > 0:
self.swapChannels(idx, idx - 1)
elif self.getFocusId() == self.C_CHANNELS_SELECTION and action.getId() == ACTION_DOWN:
listControl = self.getControl(self.C_CHANNELS_LIST)
idx = listControl.getSelectedPosition()
if idx < listControl.size() - 1:
self.swapChannels(idx, idx + 1)
@buggalo.buggalo_try_except({'method' : 'ChannelsMenu.onClick'})
def onClick(self, controlId):
if controlId == self.C_CHANNELS_LIST:
listControl = self.getControl(self.C_CHANNELS_LIST)
item = listControl.getSelectedItem()
channel = self.channelList[int(item.getProperty('idx'))]
channel.visible = 0 if channel.visible else 1
if channel.visible:
iconImage = 'tvguide-channel-visible.png'
else:
iconImage = 'tvguide-channel-hidden.png'
item.setIconImage(iconImage)
elif controlId == self.C_CHANNELS_SAVE:
self.database.saveChannelList(self.close, self.channelList)
elif controlId == self.C_CHANNELS_CANCEL:
self.close()
def onFocus(self, controlId):
pass
def updateChannelList(self):
listControl = self.getControl(self.C_CHANNELS_LIST)
listControl.reset()
for idx, channel in enumerate(self.channelList):
if channel.visible:
iconImage = 'tvguide-channel-visible.png'
else:
iconImage = 'tvguide-channel-hidden.png'
item = xbmcgui.ListItem('%3d. %s' % (idx+1, channel.title), iconImage = iconImage)
item.setProperty('idx', str(idx))
listControl.addItem(item)
def updateListItem(self, idx, item):
channel = self.channelList[idx]
item.setLabel('%3d. %s' % (idx+1, channel.title))
if channel.visible:
iconImage = 'tvguide-channel-visible.png'
else:
iconImage = 'tvguide-channel-hidden.png'
item.setIconImage(iconImage)
item.setProperty('idx', str(idx))
def swapChannels(self, fromIdx, toIdx):
if self.swapInProgress:
return
self.swapInProgress = True
c = self.channelList[fromIdx]
self.channelList[fromIdx] = self.channelList[toIdx]
self.channelList[toIdx] = c
# recalculate weight
for idx, channel in enumerate(self.channelList):
channel.weight = idx
listControl = self.getControl(self.C_CHANNELS_LIST)
self.updateListItem(fromIdx, listControl.getListItem(fromIdx))
self.updateListItem(toIdx, listControl.getListItem(toIdx))
listControl.selectItem(toIdx)
xbmc.sleep(50)
self.swapInProgress = False
class StreamSetupDialog(xbmcgui.WindowXMLDialog):
C_STREAM_STRM_TAB = 101
C_STREAM_FAVOURITES_TAB = 102
C_STREAM_ADDONS_TAB = 103
C_STREAM_PLAYLIST_TAB = 104
C_STREAM_MASHUP_TAB = 105
C_STREAM_STRM_BROWSE = 1001
C_STREAM_STRM_FILE_LABEL = 1005
C_STREAM_STRM_PREVIEW = 1002
C_STREAM_STRM_OK = 1003
C_STREAM_STRM_CANCEL = 1004
C_STREAM_FAVOURITES = 2001
C_STREAM_FAVOURITES_PREVIEW = 2002
C_STREAM_FAVOURITES_OK = 2003
C_STREAM_FAVOURITES_CANCEL = 2004
C_STREAM_ADDONS = 3001
C_STREAM_ADDONS_STREAMS = 3002
C_STREAM_ADDONS_NAME = 3003
C_STREAM_ADDONS_DESCRIPTION = 3004
C_STREAM_ADDONS_PREVIEW = 3005
C_STREAM_ADDONS_OK = 3006
C_STREAM_ADDONS_CANCEL = 3007
C_STREAM_MASHUP = 4001
C_STREAM_MASHUP_STREAMS = 4002
C_STREAM_MASHUP_NAME = 4003
C_STREAM_MASHUP_DESCRIPTION = 4004
C_STREAM_MASHUP_PREVIEW = 4005
C_STREAM_MASHUP_OK = 4006
C_STREAM_MASHUP_CANCEL = 4007
C_STREAM_PLAYLIST = 5001
C_STREAM_PLAYLIST_PREVIEW = 5002
C_STREAM_PLAYLIST_OK = 5003
C_STREAM_PLAYLIST_CANCEL = 5004
C_STREAM_VISIBILITY_MARKER = 100
VISIBLE_STRM = 'strm'
VISIBLE_FAVOURITES = 'favourites'
VISIBLE_ADDONS = 'addons'
VISIBLE_MASHUP = 'mashup'
VISIBLE_PLAYLIST = 'playlist'
def __new__(cls, database, channel):
xml_file = os.path.join('script-tvguide-streamsetup.xml')
if os.path.join(SKIN, 'extras', 'skins', 'Default', '720p', xml_file):
XML = xml_file
return super(StreamSetupDialog, cls).__new__(cls, XML, PATH)
def __init__(self, database, channel):
"""
@type database: source.Database
@type channel:source.Channel
"""
super(StreamSetupDialog, self).__init__()
self.database = database
self.channel = channel
self.player = xbmc.Player()
self.previousAddonId = None
self.previousProvider = None
self.strmFile = None
self.streamingService = streaming.StreamsService()
def close(self):
if self.player.isPlaying():
self.player.stop()
super(StreamSetupDialog, self).close()
@buggalo.buggalo_try_except({'method' : 'StreamSetupDialog.onInit'})
def onInit(self):
self.getControl(self.C_STREAM_VISIBILITY_MARKER).setLabel(self.VISIBLE_STRM)
if not os.path.exists(mashfile):
self.getControl(self.C_STREAM_MASHUP_TAB).setVisible(False)
favourites = self.streamingService.loadFavourites()
items = list()
for label, value in favourites:
item = xbmcgui.ListItem(label)
item.setProperty('stream', value)
items.append(item)
listControl = self.getControl(StreamSetupDialog.C_STREAM_FAVOURITES)
listControl.addItems(items)
items = list()
for id in self.streamingService.getAddons():
try:
addon = xbmcaddon.Addon(id) # raises Exception if addon is not installed
item = xbmcgui.ListItem(addon.getAddonInfo('name'), iconImage=addon.getAddonInfo('icon'))
item.setProperty('addon_id', id)
items.append(item)
except Exception:
pass
listControl = self.getControl(StreamSetupDialog.C_STREAM_ADDONS)
listControl.addItems(items)
self.updateAddonInfo()
items = list()
for provider in self.streamingService.getMashup():
try:
item = xbmcgui.ListItem(provider, iconImage=self.streamingService.getMashupIcon(provider))
item.setProperty('provider', provider)
items.append(item)
except:
pass
listControl = self.getControl(StreamSetupDialog.C_STREAM_MASHUP)
listControl.addItems(items)
self.updateMashupInfo()
playlist = self.streamingService.loadPlaylist()
items = list()
for label, value in playlist:
item = xbmcgui.ListItem(label)
item.setProperty('stream', value)
items.append(item)
listControl = self.getControl(StreamSetupDialog.C_STREAM_PLAYLIST)
listControl.addItems(items)
@buggalo.buggalo_try_except({'method' : 'StreamSetupDialog.onAction'})
def onAction(self, action):
if action.getId() in [ACTION_PARENT_DIR, ACTION_PREVIOUS_MENU, KEY_NAV_BACK, KEY_CONTEXT_MENU]:
self.close()
return
elif self.getFocusId() == self.C_STREAM_ADDONS:
self.updateAddonInfo()
elif self.getFocusId() == self.C_STREAM_MASHUP:
self.updateMashupInfo()
@buggalo.buggalo_try_except({'method' : 'StreamSetupDialog.onClick'})
def onClick(self, controlId):
if controlId == self.C_STREAM_STRM_BROWSE:
stream = xbmcgui.Dialog().browse(1, ADDON.getLocalizedString(30304), 'video', mask='.xsp|.strm')
if stream:
self.database.setCustomStreamUrl(self.channel, stream)
self.getControl(self.C_STREAM_STRM_FILE_LABEL).setText(stream)
self.strmFile = stream
elif controlId == self.C_STREAM_ADDONS_OK:
listControl = self.getControl(self.C_STREAM_ADDONS_STREAMS)
item = listControl.getSelectedItem()
if item:
stream = item.getProperty('stream')
self.database.setCustomStreamUrl(self.channel, stream)
self.close()
elif controlId == self.C_STREAM_FAVOURITES_OK:
listControl = self.getControl(self.C_STREAM_FAVOURITES)
item = listControl.getSelectedItem()
if item:
stream = item.getProperty('stream')
self.database.setCustomStreamUrl(self.channel, stream)
self.close()
elif controlId == self.C_STREAM_PLAYLIST_OK:
listControl = self.getControl(self.C_STREAM_PLAYLIST)
item = listControl.getSelectedItem()
if item:
stream = item.getProperty('stream')
self.database.setCustomStreamUrl(self.channel, stream)
self.close()
elif controlId == self.C_STREAM_MASHUP_OK:
listControl = self.getControl(self.C_STREAM_MASHUP_STREAMS)
item = listControl.getSelectedItem()
if item:
stream = item.getProperty('stream')
self.database.setCustomStreamUrl(self.channel, stream)
self.close()
elif controlId == self.C_STREAM_STRM_OK:
self.database.setCustomStreamUrl(self.channel, self.strmFile)
self.close()
elif controlId in [self.C_STREAM_ADDONS_CANCEL, self.C_STREAM_FAVOURITES_CANCEL, self.C_STREAM_STRM_CANCEL, self.C_STREAM_PLAYLIST_CANCEL, self.C_STREAM_MASHUP_CANCEL]:
self.close()
elif controlId in [self.C_STREAM_ADDONS_PREVIEW, self.C_STREAM_FAVOURITES_PREVIEW, self.C_STREAM_STRM_PREVIEW, self.C_STREAM_PLAYLIST_PREVIEW, self.C_STREAM_MASHUP_PREVIEW]:
if self.player.isPlaying():
self.player.stop()
self.getControl(self.C_STREAM_ADDONS_PREVIEW).setLabel(strings(PREVIEW_STREAM))
self.getControl(self.C_STREAM_FAVOURITES_PREVIEW).setLabel(strings(PREVIEW_STREAM))
self.getControl(self.C_STREAM_PLAYLIST_PREVIEW).setLabel(strings(PREVIEW_STREAM))
self.getControl(self.C_STREAM_STRM_PREVIEW).setLabel(strings(PREVIEW_STREAM))
self.getControl(self.C_STREAM_MASHUP_PREVIEW).setLabel(strings(PREVIEW_STREAM))
return
stream = None
windowed = None
visible = self.getControl(self.C_STREAM_VISIBILITY_MARKER).getLabel()
if visible == self.VISIBLE_ADDONS:
listControl = self.getControl(self.C_STREAM_ADDONS_STREAMS)
item = listControl.getSelectedItem()
if item:
stream = item.getProperty('stream')
elif visible == self.VISIBLE_FAVOURITES:
listControl = self.getControl(self.C_STREAM_FAVOURITES)
item = listControl.getSelectedItem()
if item:
stream = item.getProperty('stream')
elif visible == self.VISIBLE_PLAYLIST:
listControl = self.getControl(self.C_STREAM_PLAYLIST)
item = listControl.getSelectedItem()
if item:
stream = item.getProperty('stream')
elif visible == self.VISIBLE_MASHUP:
listControl = self.getControl(self.C_STREAM_MASHUP_STREAMS)
item = listControl.getSelectedItem()
if item:
stream = item.getProperty('stream')
elif visible == self.VISIBLE_STRM:
stream = self.strmFile
if stream is not None:
path = os.path.join(ADDON.getAddonInfo('path'), 'player.py')
xbmc.executebuiltin('XBMC.RunScript(%s,%s,%d)' % (path, stream, 1))
retries = 10
while retries > 0 and not self.player.isPlaying():
retries -= 1
xbmc.sleep(1000)
if self.player.isPlaying():
self.getControl(self.C_STREAM_MASHUP_PREVIEW).setLabel(strings(STOP_PREVIEW))
self.getControl(self.C_STREAM_ADDONS_PREVIEW).setLabel(strings(STOP_PREVIEW))
self.getControl(self.C_STREAM_FAVOURITES_PREVIEW).setLabel(strings(STOP_PREVIEW))
self.getControl(self.C_STREAM_PLAYLIST_PREVIEW).setLabel(strings(STOP_PREVIEW))
self.getControl(self.C_STREAM_STRM_PREVIEW).setLabel(strings(STOP_PREVIEW))
@buggalo.buggalo_try_except({'method' : 'StreamSetupDialog.onFocus'})
def onFocus(self, controlId):
if controlId == self.C_STREAM_STRM_TAB:
self.getControl(self.C_STREAM_VISIBILITY_MARKER).setLabel(self.VISIBLE_STRM)
elif controlId == self.C_STREAM_FAVOURITES_TAB:
self.getControl(self.C_STREAM_VISIBILITY_MARKER).setLabel(self.VISIBLE_FAVOURITES)
elif controlId == self.C_STREAM_ADDONS_TAB:
self.getControl(self.C_STREAM_VISIBILITY_MARKER).setLabel(self.VISIBLE_ADDONS)
elif controlId == self.C_STREAM_PLAYLIST_TAB:
self.getControl(self.C_STREAM_VISIBILITY_MARKER).setLabel(self.VISIBLE_PLAYLIST)
elif controlId == self.C_STREAM_MASHUP_TAB:
self.getControl(self.C_STREAM_VISIBILITY_MARKER).setLabel(self.VISIBLE_MASHUP)
def updateAddonInfo(self):
listControl = self.getControl(self.C_STREAM_ADDONS)
item = listControl.getSelectedItem()
if item is None:
return
if item.getProperty('addon_id') == self.previousAddonId:
return
self.previousAddonId = item.getProperty('addon_id')
addon = xbmcaddon.Addon(id = item.getProperty('addon_id'))
self.getControl(self.C_STREAM_ADDONS_NAME).setLabel('[B]%s[/B]' % addon.getAddonInfo('name'))
self.getControl(self.C_STREAM_ADDONS_DESCRIPTION).setText(addon.getAddonInfo('description'))
streams = self.streamingService.getAddonStreams(item.getProperty('addon_id'))
items = list()
for (label, stream) in streams:
item = xbmcgui.ListItem(label)
item.setProperty('stream', stream)
items.append(item)
listControl = self.getControl(StreamSetupDialog.C_STREAM_ADDONS_STREAMS)
listControl.reset()
listControl.addItems(items)
def updateMashupInfo(self):
pass
listControl = self.getControl(self.C_STREAM_MASHUP)
item = listControl.getSelectedItem()
if item is None:
return
provider = item.getProperty('provider')
if provider == self.previousProvider:
return
self.previousProvider = provider
self.getControl(self.C_STREAM_MASHUP_NAME).setLabel('[B]%s[/B]' % provider)
self.getControl(self.C_STREAM_MASHUP_DESCRIPTION).setText('')
streams = self.streamingService.getMashupStreams(provider)
items = list()
for (label, stream) in streams:
if label.upper() != 'ICON':
item = xbmcgui.ListItem(label)
item.setProperty('stream', stream)
items.append(item)
listControl = self.getControl(StreamSetupDialog.C_STREAM_MASHUP_STREAMS)
listControl.reset()
listControl.addItems(items)
class ChooseStreamAddonDialog(xbmcgui.WindowXMLDialog):
C_SELECTION_LIST = 1000
def __new__(cls, addons):
xml_file = os.path.join('script-tvguide-streamaddon.xml')
if os.path.join(SKIN, skinfolder, 'Default', '720p', xml_file):
XML = xml_file
return super(ChooseStreamAddonDialog, cls).__new__(cls, XML, PATH)
def __init__(self, addons):
super(ChooseStreamAddonDialog, self).__init__()
self.addons = addons
self.stream = None
@buggalo.buggalo_try_except({'method' : 'ChooseStreamAddonDialog.onInit'})
def onInit(self):
items = list()
for id, label, url in self.addons:
try:
addon = xbmcaddon.Addon(id)
item = xbmcgui.ListItem(label, addon.getAddonInfo('name'), addon.getAddonInfo('icon'))
item.setProperty('stream', url)
items.append(item)
except:
item = xbmcgui.ListItem(label, '', id)
item.setProperty('stream', url)
items.append(item)
listControl = self.getControl(ChooseStreamAddonDialog.C_SELECTION_LIST)
listControl.addItems(items)
self.setFocus(listControl)
@buggalo.buggalo_try_except({'method' : 'ChooseStreamAddonDialog.onAction'})
def onAction(self, action):
if action.getId() in [ACTION_PARENT_DIR, ACTION_PREVIOUS_MENU, KEY_NAV_BACK]:
self.close()
@buggalo.buggalo_try_except({'method' : 'ChooseStreamAddonDialog.onClick'})
def onClick(self, controlId):
if controlId == ChooseStreamAddonDialog.C_SELECTION_LIST:
listControl = self.getControl(ChooseStreamAddonDialog.C_SELECTION_LIST)
self.stream = listControl.getSelectedItem().getProperty('stream')
self.close()
@buggalo.buggalo_try_except({'method' : 'ChooseStreamAddonDialog.onFocus'})
def onFocus(self, controlId):
pass
class CategoriesMenu(xbmcgui.WindowXMLDialog):
C_CATEGORIES_LIST = 7000
C_CATEGORIES_SELECTION = 7001
C_CATEGORIES_SAVE = 7002
C_CATEGORIES_CANCEL = 7003
def __new__(cls, database, categoriesList):
xml_file = os.path.join('script-tvguide-categories.xml')
if os.path.join(SKIN, 'extras', 'skins', 'Default', '720p', xml_file):
XML = xml_file
return super(CategoriesMenu, cls).__new__(cls, XML, PATH)
def __init__(self, database, categoriesList):
"""
@type database: source.Database
"""
super(CategoriesMenu, self).__init__()
self.database = database
self.allCategories = database.getCategoriesList()
if categoriesList:
self.currentCategories = list(categoriesList)
else:
self.currentCategories = list()
self.workingCategories = list(self.currentCategories)
self.swapInProgress = False
@buggalo.buggalo_try_except({'method' : 'CategoriesMenu.onInit'})
def onInit(self):
self.updateCategoriesList()
self.setFocusId(self.C_CATEGORIES_LIST)
@buggalo.buggalo_try_except({'method' : 'CategoriesMenu.onAction'})
def onAction(self, action):
if action.getId() in [ACTION_PARENT_DIR, ACTION_PREVIOUS_MENU, KEY_NAV_BACK, KEY_CONTEXT_MENU]:
self.close()
return
@buggalo.buggalo_try_except({'method' : 'CategoriesMenu.onClick'})
def onClick(self, controlId):
if controlId == self.C_CATEGORIES_LIST:
listControl = self.getControl(self.C_CATEGORIES_LIST)
item = listControl.getSelectedItem()
category = self.allCategories[int(item.getProperty('idx'))]
if category in self.workingCategories:
self.workingCategories.remove(category)
else:
self.workingCategories.append(category)
if category in self.workingCategories:
iconImage = 'tvguide-categories-visible.png'
else:
iconImage = 'tvguide-categories-hidden.png'
item.setIconImage(iconImage)
elif controlId == self.C_CATEGORIES_SAVE:
self.currentCategories = self.workingCategories
self.close()
elif controlId == self.C_CATEGORIES_CANCEL:
self.close()
def onFocus(self, controlId):
pass
def updateCategoriesList(self):
listControl = self.getControl(self.C_CATEGORIES_LIST)
listControl.reset()
for idx, category in enumerate(self.allCategories):
if category in self.workingCategories:
iconImage = 'tvguide-categories-visible.png'
else:
iconImage = 'tvguide-categories-hidden.png'
item = xbmcgui.ListItem('%3d. %s' % (idx+1, category), iconImage = iconImage)
item.setProperty('idx', str(idx))
listControl.addItem(item)
| gpl-2.0 | 1,359,988,126,903,621,000 | 36.55919 | 235 | 0.611883 | false | 3.807917 | false | false | false |
elenaoat/AutobahnPython | examples/websocket/multiproto/server2.py | 19 | 2348 | ###############################################################################
##
## Copyright 2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys
from twisted.internet import reactor
from twisted.python import log
from twisted.web.server import Site
from twisted.web.static import Data
from autobahn.websocket import WebSocketServerFactory, \
WebSocketServerProtocol
from autobahn.resource import WebSocketResource
class Echo1ServerProtocol(WebSocketServerProtocol):
def onMessage(self, msg, binary):
self.sendMessage("Echo 1 - " + msg)
class Echo2ServerProtocol(WebSocketServerProtocol):
def onMessage(self, msg, binary):
self.sendMessage("Echo 2 - " + msg)
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'debug':
log.startLogging(sys.stdout)
debug = True
else:
debug = False
factory1 = WebSocketServerFactory("ws://localhost:9000",
debug = debug,
debugCodePaths = debug)
factory1.protocol = Echo1ServerProtocol
resource1 = WebSocketResource(factory1)
factory2 = WebSocketServerFactory("ws://localhost:9000",
debug = debug,
debugCodePaths = debug)
factory2.protocol = Echo2ServerProtocol
resource2 = WebSocketResource(factory2)
## Establish a dummy root resource
root = Data("", "text/plain")
## and our WebSocket servers under different paths ..
root.putChild("echo1", resource1)
root.putChild("echo2", resource2)
## both under one Twisted Web Site
site = Site(root)
reactor.listenTCP(9000, site)
reactor.run()
| apache-2.0 | -3,127,271,166,840,233,500 | 30.306667 | 79 | 0.618825 | false | 4.480916 | false | false | false |
slavik-m/genetic-algorithm | src/python/genetic_algorithm.py | 1 | 7108 | __author__ = 'ViS'
import random
import math
import itertools
M = 10
class Individual:
def __init__(self, num, val, fitness):
self.num = num
self.val = val
self.fitness = M - fitness
class Population:
def __init__(self):
self.individuals = []
self.fitness_avg = 0
def calculate_fitness_avg(self):
sum = 0
for i in self.individuals:
sum += i.fitness
self.fitness_avg = sum / len(self.individuals)
def calculate(fitness_fn, opt):
opt['t'] = int(opt['min'] + opt['max'] / opt['step'])
global gen_max_val, gen_count, M
gen_max_val = 0
def calculate_optimal():
global gen_max_val, gen_count
for i in range(1, 20):
num = 2 ** i
if (num - 1) >= opt['t']:
gen_count = len(bin(num - 1)[2:])
gen_max_val = num - 1
break
calculate_optimal()
def generate_start_population(gen_max_val):
population = Population()
for i in range(0, opt['population_count']):
val = random.randint(0, gen_max_val)
x = val * opt['step']
fitness = eval(fitness_fn)
population.individuals.append(Individual(i, val, fitness))
population.calculate_fitness_avg()
return population
def selection(population):
individuals_offsprings = []
if opt['selection_type'] == 'TOURNEY':
for i in range(0, opt['population_count']):
source_idx = random.randint(0, opt['population_count'] - 1)
target_idx = random.randint(0, opt['population_count'] - 1)
source = population.individuals[source_idx].fitness
target = population.individuals[target_idx].fitness
if source > target:
individuals_offsprings.insert(i, population.individuals[source_idx])
else:
individuals_offsprings.insert(i, population.individuals[target_idx])
return individuals_offsprings
def pair_cross(individ_s, individ_t, cross_point):
children = []
first_part_source = bin(individ_s.val)[2:].zfill(gen_count)[0:cross_point]
first_part_target = bin(individ_t.val)[2:].zfill(gen_count)[0:cross_point]
second_part_source = bin(individ_s.val)[2:].zfill(gen_count)[cross_point:]
second_part_target = bin(individ_t.val)[2:].zfill(gen_count)[cross_point:]
val1 = first_part_source + second_part_target
val2 = first_part_target + second_part_source
x = int(val1, 2) * opt['step']
fitness1 = eval(fitness_fn)
x = int(val2, 2) * opt['step']
fitness2 = eval(fitness_fn)
child1 = Individual(0, int(val1, 2), fitness1)
child2 = Individual(0, int(val2, 2), fitness2)
children.append(child1)
children.append(child2)
return children
def cross(individuals_offsprings, gen_count):
new_population = Population()
pair = []
pair_count = int(opt['population_count'] / 2)
next_idx = 0
pc = 0.7 # Chance of crossing
while pair_count > 0:
for i in range(0, opt['population_count']):
if random.random() < pc:
pair.append(individuals_offsprings[i])
next_idx = i + 1
break
for i in range(next_idx, opt['population_count']):
if random.random() < pc:
if len(pair) > 1:
if (pair[1]) == individuals_offsprings[i]:
pair.insert(1, individuals_offsprings[i])
else:
i = 0
break
else:
pair.insert(1, individuals_offsprings[i])
children = pair_cross(pair[0], pair[1], int(math.floor(random.random() * (gen_count - 1) + 1)))
new_population.individuals.append(children)
pair_count -= 1
new_population.individuals = list(itertools.chain.from_iterable(new_population.individuals))
for i in range(0, opt['population_count']):
new_population.individuals[i].num = i
new_population.calculate_fitness_avg()
return new_population
def mutation_gen(undividual, mutagen):
if undividual[mutagen] == '1':
undividualSrt = undividual[0:mutagen-1] + '0' + undividual[mutagen+1:]
else:
undividualSrt = undividual[0:mutagen-1] + '1' + undividual[mutagen+1:]
return undividualSrt
def mutation(population):
Pm = 0.3 # Chance of mutation
new_population = Population()
for i in range(0, opt['population_count']):
if random.random() < Pm:
mutagen = int(math.floor(random.random() * (gen_count - 1)))
val = int(mutation_gen(bin(population.individuals[i].val)[2:].zfill(gen_count), mutagen), 2)
x = val * opt['step']
fitness = eval(fitness_fn)
new_population.individuals.insert(i, Individual(i, val, fitness))
else:
new_population.individuals.insert(i, population.individuals[i])
new_population.calculate_fitness_avg()
return new_population
def start():
population = generate_start_population(gen_max_val)
start_population = population
selection_population = Population()
cross_population = Population()
mutation_population = Population()
coefZ = 4
population_chache = []
stop = False
for t in range(0, opt['t'] * 2):
selection_population = selection(population)
cross_population = cross(selection_population, gen_count)
population_chache.insert(t % coefZ, cross_population.fitness_avg)
if len(population_chache) > 3:
if population_chache[0] == population_chache[1] and population_chache[1] == population_chache[2] and \
population_chache[2] == population_chache[3]:
stop = True
if stop:
population = cross_population
break
if t != (opt['t'] * 2 - 1):
mutation_population = mutation(cross_population)
population = mutation_population
else:
population = cross_population
population_chache[t % coefZ or 0] = population.fitness_avg
for i in range(1, opt['population_count']):
result = population.individuals[0].val
temp = population.individuals[0].fitness
if temp < population.individuals[i].fitness:
temp = population.individuals[i].fitness
result = population.individuals[i].val
return {
"start_population": start_population,
"population": population,
"x": result * opt['step']
}
return start()
| mit | -7,957,650,740,815,945,000 | 32.214953 | 118 | 0.549522 | false | 3.914097 | false | false | false |
jsbronder/inhibitor | inhibitor/actions.py | 1 | 5706 | import os
import shutil
import util
import glob
import tarfile
import types
class InhibitorAction(object):
"""
Basic action. Handles running through the action_sequence and catching
errors that can be passed back up in order to do cleaning first.
@param name - String representing this action
@param resume - Allow the action sequence to resume where it left off it
it was previously interrupted.
"""
def __init__(self, name='BlankAction', resume=False):
self.name = name
self.action_sequence = []
self.resume = resume
self.statedir = None
self.istate = None
def get_action_sequence(self):
return []
def post_conf(self, inhibitor_state):
self.istate = inhibitor_state
self.statedir = inhibitor_state.paths.state.pjoin(self.name)
if os.path.isdir(self.statedir) and not self.resume:
self.clear_resume()
os.makedirs(self.statedir)
elif not os.path.exists(self.statedir):
os.makedirs(self.statedir)
self.resume = False
elif len(os.listdir(self.statedir)) == 0:
self.resume = False
def run(self):
for action in self.get_action_sequence():
resume_path = self.statedir.pjoin('resume-%s-%s' % (self.name, action.name))
if ( self.resume
and action.always == False
and os.path.exists(resume_path) ):
continue
# Errors are caught by Inhibitor()
util.info("Running %s" % action.name)
action.run()
open(resume_path, 'w').close()
self.clear_resume()
def clear_resume(self):
for f in glob.iglob(self.statedir.pjoin('resume-%s-*' % self.name)):
os.unlink(f)
os.rmdir(self.statedir)
class InhibitorSnapshot(InhibitorAction):
"""
Create a snapshot of an InhibitorSource
@param snapshot_source - Source that we will generate a snapshot from.
@param name - Unique string to identify the source.
@param exclude - A string, list or tuple of patterns to not include in
the snapshot. Passed to rsync --exclude.
@param include - String, passed to glob, of toplevel paths to include
in the snapshot.
"""
def __init__(self, snapshot_source, name, exclude=None, include=None):
super(InhibitorSnapshot, self).__init__(name='snapshot')
self.dest = None
self.builddir = None
self.tarname = None
self.dest = None
self.name = name
self.src = snapshot_source
self.src.keep = True
self.src.dest = util.Path('/')
if exclude:
if type(exclude) == types.StringType:
self.exclude = exclude.split(' ')
elif type(exclude) in (types.ListType, types.TupleType):
self.exclude = exclude
else:
raise util.InhibitorError("Unrecognized exclude pattern.")
else:
self.exclude = False
if include:
if type(include) == types.StringType:
self.include = include.split(' ')
elif type(include) in (types.ListType, types.TupleType):
self.include = include
else:
raise util.InhibitorError("Unrecognized include pattern.")
else:
self.include = False
def get_action_sequence(self):
return [
util.Step(self.sync, always=False),
util.Step(self.pack, always=False),
]
def post_conf(self, inhibitor_state):
super(InhibitorSnapshot, self).post_conf(inhibitor_state)
self.src.post_conf(inhibitor_state)
self.src.init()
self.tarname = 'snapshot-' + self.name
self.dest = inhibitor_state.paths.stages.pjoin(self.tarname+'.tar.bz2')
self.builddir = inhibitor_state.paths.build.pjoin(self.tarname)
def sync(self):
if os.path.exists(self.builddir):
shutil.rmtree(self.builddir)
elif os.path.islink(self.builddir):
os.unlink(self.builddir)
os.makedirs(self.builddir)
exclude_cmd = ''
if self.exclude:
for i in self.exclude:
exclude_cmd += " --exclude='%s'" % i
if self.include:
for pattern in self.include:
paths = [self.src.cachedir.pjoin(pattern)]
if '*' in pattern:
paths = glob.glob(self.src.cachedir.pjoin(pattern))
for path in paths:
dest = path.replace(self.src.cachedir, self.builddir)
if not os.path.lexists( os.path.dirname(dest) ):
os.makedirs( os.path.dirname(dest) )
util.cmd('rsync -a %s %s/ %s/' % (
exclude_cmd,
path,
dest
))
else:
util.cmd('rsync -a %s %s/ %s/' % (exclude_cmd, self.src.cachedir, self.builddir))
def pack(self):
archive = tarfile.open(self.dest, 'w:bz2')
archive.add(self.builddir,
arcname = '/',
recursive = True
)
archive.close()
util.info('%s is ready.' % self.dest)
def get_snappath(self):
if self.dest:
return self.dest
else:
raise util.InhibitorError("Cannot get snappath until post_conf has been called.")
| bsd-3-clause | -7,309,664,444,589,774,000 | 34.886792 | 93 | 0.545566 | false | 4.155863 | false | false | false |
istb-mia/miapy | miapy/data/creation/writer.py | 1 | 3795 | import abc
import os
import numpy as np
import h5py
import miapy.data.indexexpression as expr
class Writer(metaclass=abc.ABCMeta):
"""Represents the abstract dataset writer."""
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def __del__(self):
self.close()
@abc.abstractmethod
def close(self):
"""Close the writer."""
pass
@abc.abstractmethod
def open(self):
"""Open the writer."""
pass
@abc.abstractmethod
def reserve(self, entry: str, shape: tuple, dtype=None):
"""Reserve space in the dataset for later writing.
Args:
entry(str): The dataset entry to be created.
shape(tuple): The shape to be reserved.
dtype: The dtype.
"""
pass
@abc.abstractmethod
def fill(self, entry: str, data, index: expr.IndexExpression=None):
"""Fill parts of a reserved dataset entry.
Args:
entry(str): The dataset entry to be filled.
data: The data to write.
index(expr.IndexExpression): The slicing expression.
"""
pass
@abc.abstractmethod
def write(self, entry: str, data, dtype=None):
"""Create and write entry.
Args:
entry(str): The dataset entry to be written.
data: The data to write.
dtype: The dtype.
"""
pass
class Hdf5Writer(Writer):
"""Represents the dataset writer for HDF5 files."""
str_type = h5py.special_dtype(vlen=str)
def __init__(self, file_path: str) -> None:
"""Initializes a new instance.
Args:
file_path(str): The path to the dataset file to write.
"""
self.h5 = None # type: h5py.File
self.file_path = file_path
def close(self):
if self.h5 is not None:
self.h5.close()
self.h5 = None
def open(self):
self.h5 = h5py.File(self.file_path, libver='latest')
def reserve(self, entry: str, shape: tuple, dtype=None):
# special string handling (in order not to use length limited strings)
if dtype is str or dtype == 'str' or (isinstance(dtype, np.dtype) and dtype.type == np.str_):
dtype = self.str_type
self.h5.create_dataset(entry, shape, dtype=dtype)
def fill(self, entry: str, data, index: expr.IndexExpression=None):
# special string handling (in order not to use length limited strings)
if self.h5[entry].dtype is self.str_type:
data = np.asarray(data, dtype=object)
if index is None:
index = expr.IndexExpression()
self.h5[entry][index.expression] = data
def write(self, entry: str, data, dtype=None):
# special string handling (in order not to use length limited strings)
if dtype is str or dtype == 'str' or (isinstance(dtype, np.dtype) and dtype.type == np.str_):
dtype = self.str_type
data = np.asarray(data, dtype=object)
if entry in self.h5:
del self.h5[entry]
self.h5.create_dataset(entry, dtype=dtype, data=data)
def get_writer(file_path: str) -> Writer:
""" Get the dataset writer corresponding to the file extension.
Args:
file_path(str): The path of the dataset file to be written.
Returns:
Writer: Writer corresponding to dataset file extension.
"""
extension = os.path.splitext(file_path)[1]
if extension not in writer_registry:
raise ValueError('unknown dataset file extension "{}"'.format(extension))
return writer_registry[extension](file_path)
writer_registry = {'.h5': Hdf5Writer, '.hdf5': Hdf5Writer}
| apache-2.0 | -5,185,869,669,824,150,000 | 28.192308 | 101 | 0.596838 | false | 3.977987 | false | false | false |
davismathew/netbot-django | bootcamp/utils/loadconfig.py | 1 | 1081 | import os
import ConfigParser
from bootcamp.settings import ENVIRONMENT
def readconf():
config = ConfigParser.ConfigParser()
config.read('/etc/netaut.conf')
return config
# def project_path(type):
# config = readconf()
# if type == 'project':
# path = config.get('paths', 'project_path')
# elif type == 'play':
# path = config.get('paths', 'project_path')
# elif type == 'resultout':
# path = config.get('paths', 'result_path')
# return os.listdir(path)
def get_vars(type):
config = readconf()
if type == 'project':
vars = config.get(ENVIRONMENT, 'project_path')
elif type == 'play':
vars = config.get(ENVIRONMENT, 'project_path')
elif type == 'resultout':
vars = config.get(ENVIRONMENT, 'result_path')
elif type == 'baseurl':
vars = config.get(ENVIRONMENT, 'baseurl')
elif type == 'ansibengineemc':
vars = config.get(ENVIRONMENT, 'ansibengineemc')
elif type == 'ansibenginemtn':
vars = config.get(ENVIRONMENT, 'ansibenginemtn')
return vars | mit | 8,785,802,740,819,030,000 | 29.055556 | 56 | 0.617946 | false | 3.410095 | true | false | false |
mc706/task-burndown | task_burndown/urls.py | 1 | 1091 | from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from django.views.generic import TemplateView
urlpatterns = patterns('',
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^api/', include('task_burndown.api')),
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^login/', 'accounts.views.login_user', name='login'),
url(r'^logout/', 'accounts.views.logout_user', name='logout'),
url(r'^register/', 'accounts.views.register', name='register'),
url(r'^$', 'accounts.views.home', name='home'),
url(r'^robots\.txt$',
TemplateView.as_view(template_name='robots.txt', content_type='text/plain'), name="robots"),
url(r'^humans\.txt$',
TemplateView.as_view(template_name='humans.txt', content_type='text/plain'), name="humans")
)
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| mit | -8,829,211,339,080,916,000 | 46.434783 | 100 | 0.690192 | false | 3.474522 | false | true | false |
tomsimonart/GLM-web-interface | GLM/source/libs/rainbow.py | 1 | 3798 | #!/usr/bin/env python3
# By Infected
# 2016
import os
import sys
def check_verbosity():
dir = os.path.dirname(__file__)
abs_path = os.path.join(dir, '../../verbosity')
try:
with open(abs_path, 'r') as verbosity:
VERBOSITY = int(verbosity.readline()) # Verbosity level
SVERBOSITY = list(
map(lambda x: x.strip('\n'), verbosity.readlines())
) # Specific verbosity
except:
print('No verbosity file.')
VERBOSITY = 1
SVERBOSITY = []
return VERBOSITY, SVERBOSITY
CODE = '\x1b['
colors = {
"BOLD": 1,
"D": 0,
"BLACK": 30,
"RED": 31,
"GREEN": 32,
"YELLOW": 33,
"BLUE": 34,
"MAGENTA": 35,
"CYAN": 36,
"WHITE": 37
}
effects = {
"UNDERLINE": 4,
"BLINK": 5,
"INVERT": 7,
"STRIP": 9
}
def color(text='', fg="D", bold=True, bg=None, fx=None) -> str:
fg = fg.upper() if type(fg) == str else "D"
bg = bg.upper() if type(bg) == str else None
fx = fx.upper() if type(fx) == str else None
string = CODE
# Bold
if bold:
string += str(colors["BOLD"])
else:
string += str(colors["D"])
# Color part
string += ";"
string += str(colors[fg])
# Fx part
if fx is not None:
string += ";"
string += str(effects[fx])
# Bg part
if bg is not None:
string += ";"
string += str(colors[bg] + 10)
# Text part
string += 'm'
string += str(text)
# End part
string += CODE
string += str(colors["D"])
string += "m" # End
return string
STATUS = color('⚑', 'GREEN')
WARNING = color('⚑', 'YELLOW')
ERROR = color('⚑', 'RED')
FATAL = color('⌁', 'RED', False, None, 'INVERT')
def msg(message, priority=0, function=None, *data, **verbose):
VERBOSITY, SVERBOSITY = check_verbosity()
print_ = True
if 'level' in verbose:
if type(verbose['level']) is int:
if verbose['level'] <= VERBOSITY:
print_ = True
else:
print_ = False
if 'slevel' in verbose:
if type(verbose['slevel']) is str:
if verbose['slevel'] in SVERBOSITY:
print_ = True
if print_:
if priority <= 0:
# status
mode = STATUS
message = color(message, 'GREEN')
print(mode, end=" ")
if priority == 1:
# Warning
mode = WARNING
message = color(message, 'YELLOW')
print(mode, end=" ")
if priority == 2:
# Error
mode = ERROR
message = color(message, 'RED')
print(mode, end=" ", file=sys.stderr)
if priority >= 3:
# Fatal
mode = FATAL
message = color(message, 'RED', False, None, 'invert')
print(mode, end=" ", file=sys.stderr)
if function is not None:
function_color = 'BLUE'
function += ": "
if priority >= 2:
print(color(function, function_color), end="", file=sys.stderr)
else:
print(color(function, function_color), end="")
if priority >= 2:
print(message, end="", file=sys.stderr)
else:
print(message, end="")
if data is not ():
if priority >= 2:
print("\t" + color("|", 'YELLOW'), end="", file=sys.stderr)
print(color(" " + str(list(data)), "MAGENTA"), file=sys.stderr)
else:
print("\t" + color("|", 'YELLOW'), end="")
print(color(" " + str(list(data)), "MAGENTA"))
else:
if priority >= 2:
print(file=sys.stderr)
else:
print()
| mit | 268,586,680,848,380,480 | 24.266667 | 79 | 0.480475 | false | 3.651252 | false | false | false |
ecreall/lagendacommun | lac/views/admin_process/edit_smart_folder.py | 1 | 1673 | # Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
from pyramid.view import view_config
from dace.processinstance.core import DEFAULTMAPPING_ACTIONS_VIEWS
from pontus.default_behavior import Cancel
from pontus.form import FormView
from pontus.schema import select
from lac.content.processes.admin_process.behaviors import (
EditSmartFolder)
from lac.content.smart_folder import (
SmartFolderSchema, SmartFolder)
from lac import _
@view_config(
name='editsmartfolder',
context=SmartFolder,
renderer='pontus:templates/views_templates/grid.pt',
)
class EditSmartFolderView(FormView):
title = _('Edit the smart folder')
schema = select(SmartFolderSchema(factory=SmartFolder, editable=True),
['title',
'description',
'filters',
'view_type',
'classifications',
'icon_data',
'style',
'add_as_a_block'])
behaviors = [EditSmartFolder, Cancel]
formid = 'formeditsmartfolder'
name = 'editsmartfolder'
requirements = {'css_links':[],
'js_links':['lac:static/js/smart_folder_management.js',
'lac:static/js/contextual_help_smart_folder.js']}
def before_update(self):
if self.context.parents:
self.schema.children.remove(self.schema.get('add_as_a_block'))
def default_data(self):
return self.context
DEFAULTMAPPING_ACTIONS_VIEWS.update({EditSmartFolder: EditSmartFolderView}) | agpl-3.0 | 974,138,662,456,670,800 | 30 | 81 | 0.637776 | false | 3.810934 | false | false | false |
Samreay/ChainConsumer | chainconsumer/colors.py | 1 | 2969 | # -*- coding: utf-8 -*-
from matplotlib.colors import rgb2hex
import matplotlib.pyplot as plt
import numpy as np
# Colours drawn from material designs colour pallet at https://material.io/guidelines/style/color.html
class Colors(object):
def __init__(self):
self.color_map = {
"blue": "#1976D2",
"lblue": "#4FC3F7",
"red": "#E53935",
"green": "#43A047",
"lgreen": "#8BC34A",
"purple": "#673AB7",
"cyan": "#4DD0E1",
"magenta": "#E91E63",
"yellow": "#F2D026",
"black": "#333333",
"grey": "#9E9E9E",
"orange": "#FB8C00",
"amber": "#FFB300",
"brown": "#795548",
}
self.aliases = {
"b": "blue",
"r": "red",
"g": "green",
"k": "black",
"m": "magenta",
"c": "cyan",
"o": "orange",
"y": "yellow",
"a": "amber",
"p": "purple",
"e": "grey",
"lg": "lgreen",
"lb": "lblue",
}
self.default_colors = ["blue", "lgreen", "red", "purple", "yellow", "grey", "lblue", "magenta", "green", "brown", "black", "orange"]
def format(self, color):
if isinstance(color, np.ndarray):
color = rgb2hex(color)
if color[0] == "#":
return color
elif color in self.color_map:
return self.color_map[color]
elif color in self.aliases:
alias = self.aliases[color]
return self.color_map[alias]
else:
raise ValueError("Color %s is not mapped. Please give a hex code" % color)
def get_formatted(self, list_colors):
return [self.format(c) for c in list_colors]
def get_default(self):
return self.get_formatted(self.default_colors)
def get_colormap(self, num, cmap_name, scale=0.7): # pragma: no cover
color_list = self.get_formatted(plt.get_cmap(cmap_name)(np.linspace(0.05, 0.9, num)))
scales = scale + (1 - scale) * np.abs(1 - np.linspace(0, 2, num))
scaled = [self.scale_colour(c, s) for c, s in zip(color_list, scales)]
return scaled
def scale_colour(self, colour, scalefactor): # pragma: no cover
if isinstance(colour, np.ndarray):
r, g, b = colour[:3] * 255.0
else:
hexx = colour.strip("#")
if scalefactor < 0 or len(hexx) != 6:
return hexx
r, g, b = int(hexx[:2], 16), int(hexx[2:4], 16), int(hexx[4:], 16)
r = self._clamp(int(r * scalefactor))
g = self._clamp(int(g * scalefactor))
b = self._clamp(int(b * scalefactor))
return "#%02x%02x%02x" % (r, g, b)
def _clamp(self, val, minimum=0, maximum=255):
if val < minimum:
return minimum
if val > maximum:
return maximum
return val
| mit | 1,061,258,463,298,375,700 | 33.126437 | 140 | 0.496127 | false | 3.412644 | false | false | false |
ekaputra07/wpcdesk | wpcdesk/comment_editor.py | 1 | 4371 | # -*- coding: utf-8 -*-
# wpcdesk - WordPress Comment Desktop
# Copyright (C) 2012 Eka Putra - [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from PyQt4 import QtGui, QtCore
from gui.comment_window import Ui_CommentWindow
from wpcdesk_threads import EditCommentThread, DeleteCommentThread
class CommentEditor(QtGui.QDialog):
def __init__(self, parent=None, data=None):
QtGui.QDialog.__init__(self, parent)
self.ui = Ui_CommentWindow()
self.ui.setupUi(self)
self.ui.progressBar.hide()
self.set_validator()
self.parent = parent
self.data = data
self.fill_form(self.data)
QtCore.QObject.connect(self.ui.btn_save, QtCore.SIGNAL("clicked()"), self.saveComment)
QtCore.QObject.connect(self.ui.btn_delete, QtCore.SIGNAL("clicked()"), self.deleteComment)
self.edit_comment_thread = EditCommentThread()
self.edit_comment_thread.is_loading.connect(self.loading)
self.edit_comment_thread.is_success.connect(self.edit_status)
self.delete_comment_thread = DeleteCommentThread(self.data)
self.delete_comment_thread.is_loading.connect(self.loading)
self.delete_comment_thread.is_success.connect(self.delete_status)
def set_validator(self):
# Email Validator
email_pattern = QtCore.QRegExp( r"^([a-zA-Z0-9_\.\-\+])+\@(([a-zA-Z0-9\-])+\.)+([a-zA-Z0-9]{2,4})+$" )
email_validator = QtGui.QRegExpValidator(email_pattern , self )
self.ui.edit_email.setValidator(email_validator)
def fill_form(self, data):
self.comment_id = data['comment_id']
self.ui.lbl_post.setText(data['comment_post'])
self.ui.lbl_date.setText(data['comment_date'])
self.ui.edit_name.setText(data['comment_author'])
self.ui.edit_email.setText(data['comment_email'])
self.ui.edit_comment.setText(data['comment_content'])
if data['comment_status'] == 'Approved':
self.ui.cb_status.setChecked(True)
else:
self.ui.cb_status.setChecked(False)
def saveComment(self):
data = {}
if self.ui.cb_status.isChecked():
data['status'] = 'approve'
else:
data['status'] = 'hold'
data['content'] = str(self.ui.edit_comment.toPlainText())
data['author'] = str(self.ui.edit_name.text())
data['author_email'] = str(self.ui.edit_email.text())
self.edit_comment_thread.set_comment_id(int(self.data['comment_id']))
self.edit_comment_thread.set_data(data)
self.edit_comment_thread.start()
def deleteComment(self):
answer = QtGui.QMessageBox.question(self, 'Confirmation','Are you sure want to delete this comment?', QtGui.QMessageBox.Yes|QtGui.QMessageBox.Cancel)
if answer == QtGui.QMessageBox.Yes:
self.delete_comment_thread.start()
else:
return
def loading(self, is_loading):
if is_loading:
self.ui.progressBar.show()
else:
self.ui.progressBar.hide()
def edit_status(self, status):
if status:
self.parent.loadComments()
QtGui.QMessageBox.information(self, 'Comment updated!','Comment successfuly updated.', QtGui.QMessageBox.Ok)
else:
QtGui.QMessageBox.warning(self, 'Failed!','Failed to update comment.', QtGui.QMessageBox.Ok)
def delete_status(self, status):
if status:
self.parent.loadComments()
QtGui.QMessageBox.information(self, 'Comment Deleted','Comment successfuly deleted.', QtGui.QMessageBox.Ok)
self.close()
else:
QtGui.QMessageBox.warning(self, 'Failed!','Failed to delete comment.', QtGui.QMessageBox.Ok)
| gpl-3.0 | 4,248,496,486,360,316,000 | 39.472222 | 157 | 0.653855 | false | 3.732707 | false | false | false |
selectel/pyte | benchmark.py | 1 | 1226 | """
benchmark
~~~~~~~~~
A simple script for running benchmarks on captured process output.
Example run::
$ BENCHMARK=tests/captured/ls.input python benchmark.py
.....................
ls.input: Mean +- std dev: 644 ns +- 23 ns
:copyright: (c) 2016-2021 by pyte authors and contributors,
see AUTHORS for details.
:license: LGPL, see LICENSE for more details.
"""
import io
import os.path
import sys
from functools import partial
try:
from pyperf import Runner
except ImportError:
sys.exit("``perf`` not found. Try installing it via ``pip install perf``.")
import pyte
def make_benchmark(path, screen_cls):
with io.open(path, "rt", encoding="utf-8") as handle:
data = handle.read()
stream = pyte.Stream(screen_cls(80, 24))
return partial(stream.feed, data)
if __name__ == "__main__":
benchmark = os.environ["BENCHMARK"]
sys.argv.extend(["--inherit-environ", "BENCHMARK"])
runner = Runner()
for screen_cls in [pyte.Screen, pyte.DiffScreen, pyte.HistoryScreen]:
name = os.path.basename(benchmark) + "->" + screen_cls.__name__
runner.bench_func(name, make_benchmark(benchmark, screen_cls))
| lgpl-3.0 | 7,462,292,348,859,635,000 | 25.085106 | 79 | 0.628874 | false | 3.659701 | false | false | false |
codelieche/codelieche.com | apps/wenjuan/models/question.py | 1 | 3710 | # -*- coding:utf-8 -*-
from django.db import models
from account.models import User
class Job(models.Model):
"""
问卷 Model
"""
name = models.SlugField(verbose_name="网址", max_length=40, unique=True)
title = models.CharField(verbose_name="标题", max_length=128)
questions = models.ManyToManyField(verbose_name="问题", to="Question", blank=True)
time_start = models.DateTimeField(verbose_name="开始时间", blank=True, null=True)
# 问卷开始时间 过期时间
time_expired = models.DateTimeField(verbose_name="过期时间", blank=True, null=True)
description = models.CharField(verbose_name="描述", max_length=512)
time_added = models.DateTimeField(verbose_name="添加时间", auto_now_add=True, blank=True)
is_active = models.BooleanField(verbose_name="启用", blank=True, default=True)
# 有些问卷是需要用户登录才可以回答的
is_authenticated = models.BooleanField(verbose_name="需要用户登录", blank=True, default=True)
def __str__(self):
return self.title
class Meta:
verbose_name = "问卷"
verbose_name_plural = verbose_name
class Question(models.Model):
"""
问题 Model
"""
CATEGORY_CHOICES = (
("text", "文本"),
("radio", "单选"),
("checkbox", "多选")
)
title = models.CharField(verbose_name="问题", max_length=128)
description = models.CharField(verbose_name="描述", max_length=512, blank=True)
category = models.CharField(verbose_name="类型", choices=CATEGORY_CHOICES, max_length=10,
default="text", blank=True)
# 回答的唯一性,通过在提交的时候做检验
is_unique = models.BooleanField(verbose_name="回答需要唯一", blank=True, default=False)
def __str__(self):
return self.title
class Meta:
verbose_name = "问题"
verbose_name_plural = verbose_name
class Choice(models.Model):
"""
答案选项Choice
"""
question = models.ForeignKey(to="question", verbose_name="问题", related_name="choices", on_delete=models.CASCADE)
option = models.CharField(verbose_name="选项", max_length=1)
value = models.CharField(verbose_name="选项值", max_length=128)
def __str__(self):
return "{}:{}".format(self.question, self.value)
class Meta:
verbose_name = "问题答案选项"
verbose_name_plural = verbose_name
class Answer(models.Model):
"""
回答Model
"""
question = models.ForeignKey(to="question", verbose_name="问题", on_delete=models.CASCADE)
option = models.CharField(verbose_name="回答选项", blank=True, max_length=1, null=True)
answer = models.CharField(verbose_name="回答", max_length=128)
def __str__(self):
return "问题:(ID:{}):Answer:{}".format(self.question_id, self.answer)
class Meta:
verbose_name = "问题回答"
verbose_name_plural = verbose_name
class Report(models.Model):
"""
问卷回答 Model
"""
job = models.ForeignKey(to="job", verbose_name="问卷", on_delete=models.CASCADE)
user = models.ForeignKey(to=User, verbose_name="用户", blank=True, null=True, on_delete=models.SET_NULL)
ip = models.GenericIPAddressField(verbose_name="回答者IP", blank=True, null=True)
time_added = models.DateTimeField(verbose_name="添加时间", blank=True, auto_now_add=True)
answers = models.ManyToManyField(verbose_name="问卷回答", to="answer", blank=True)
def __str__(self):
return "Report:{}".format(self.pk)
class Meta:
verbose_name = "问卷回答"
verbose_name_plural = verbose_name
| mit | 3,347,785,241,776,150,500 | 31.990291 | 116 | 0.645968 | false | 2.975482 | false | false | false |
ingof/ems-collector | tools/ems-gen-graphs.py | 1 | 4875 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import contextlib
import errno
import os
import subprocess
import sys
import time
mysql_socket_path = "/var/run/mysqld/mysqld.sock"
mysql_user = "emsdata"
mysql_password = "emsdata"
mysql_db_name = "ems_data"
@contextlib.contextmanager
def flock(path, wait_delay = 1):
while True:
try:
fd = os.open(path, os.O_CREAT | os.O_EXCL | os.O_RDWR)
except OSError, e:
if e.errno != errno.EEXIST:
raise
time.sleep(wait_delay)
continue
else:
break
try:
yield fd
finally:
os.close(fd)
os.unlink(path)
def check_interval():
timespans = {
"day" : "1 day",
"halfweek": "3 day",
"week" : "1 week",
"month" : "1 month"
}
return timespans.get(interval, None)
def get_time_format():
formats = {
"day" : "%H:%M",
"halfweek": "%H:%M (%a)",
"week" : "%a, %Hh"
}
return formats.get(interval, "%d.%m")
def do_graphdata(sensor, filename):
datafile = open(filename, "w")
process = subprocess.Popen(["mysql", "-A", "-u%s" % mysql_user, "-p%s" % mysql_password, mysql_db_name ],
shell = False, stdin = subprocess.PIPE, stdout = datafile)
process.communicate("""
set @starttime = subdate(now(), interval %s);
set @endtime = now();
select time, value from (
select adddate(if(starttime < @starttime, @starttime, starttime), interval 1 second) time, value from numeric_data
where sensor = %d and endtime >= @starttime
union all
select if(endtime > @endtime, @endtime, endtime) time, value from numeric_data
where sensor = %d and endtime >= @starttime)
t1 order by time;
""" % (timespan_clause, sensor, sensor))
datafile.close()
def do_plot(name, filename, ylabel, definitions):
i = 1
for definition in definitions:
do_graphdata(definition[0], "/tmp/file%d.dat" % i)
i = i + 1
filename = filename + "-" + interval + ".png"
process = subprocess.Popen("gnuplot", shell = False, stdin = subprocess.PIPE)
process.stdin.write("set terminal png font 'arial' 12 size 800, 450\n")
process.stdin.write("set grid lc rgb '#aaaaaa' lt 1 lw 0,5\n")
process.stdin.write("set title '%s'\n" % name)
process.stdin.write("set xdata time\n")
process.stdin.write("set xlabel 'Datum'\n")
process.stdin.write("set ylabel '%s'\n" % ylabel)
process.stdin.write("set timefmt '%Y-%m-%d %H:%M:%S'\n")
process.stdin.write("set format x '%s'\n" % get_time_format())
process.stdin.write("set xtics autofreq rotate by -45\n")
process.stdin.write("set ytics autofreq\n")
process.stdin.write("set output '%s'\n" % os.path.join(targetpath, filename))
process.stdin.write("plot")
for i in range(1, len(definitions) + 1):
definition = definitions[i - 1]
process.stdin.write(" '/tmp/file%d.dat' using 1:3 with %s lw 2 title '%s'" %
(i, definition[2], definition[1]))
if i != len(definitions):
process.stdin.write(",")
process.stdin.write("\n")
process.stdin.close()
process.wait()
for i in range(1, len(definitions) + 1) :
os.remove("/tmp/file%d.dat" % i)
# main starts here
if len(sys.argv) != 3:
sys.exit(1)
interval = sys.argv[2]
timespan_clause = check_interval()
if timespan_clause == None:
sys.exit(1)
retries = 30
while not os.path.exists(mysql_socket_path) and retries > 0:
print "MySQL socket not found, waiting another %d seconds" % retries
retries = retries - 1
time.sleep(1)
if retries == 0:
sys.exit(2)
targetpath = sys.argv[1]
if not os.path.isdir(targetpath):
os.makedirs(targetpath)
with flock("/tmp/graph-gen.lock"):
definitions = [ [ 11, "Außentemperatur", "lines smooth bezier" ],
[ 12, "Ged. Außentemperatur", "lines" ] ]
do_plot("Aussentemperatur", "aussentemp", "Temperatur (°C)", definitions)
definitions = [ [ 13, "Raum-Soll", "lines" ],
[ 14, "Raum-Ist", "lines smooth bezier" ] ]
do_plot("Raumtemperatur", "raumtemp", "Temperatur (°C)", definitions)
definitions = [ [ 1, "Kessel-Soll", "lines" ],
[ 2, "Kessel-Ist", "lines smooth bezier" ],
[ 6, "Vorlauf HK1", "lines smooth bezier" ],
[ 8, "Vorlauf HK2", "lines smooth bezier" ],
[ 10, "Rücklauf", "lines smooth bezier" ] ]
do_plot("Temperaturen", "kessel", "Temperatur (°C)", definitions)
definitions = [ [ 3, "Solltemperatur", "lines" ],
[ 4, "Isttemperatur", "lines smooth bezier" ] ]
do_plot("Warmwasser", "ww", "Temperatur (°C)", definitions)
| gpl-3.0 | 6,674,389,353,177,872,000 | 33.28169 | 126 | 0.577034 | false | 3.232404 | false | false | false |
stefan-andritoiu/upm | examples/python/grovecollision.py | 4 | 2277 | #!/usr/bin/env python
# Author: Zion Orent <[email protected]>
# Copyright (c) 2015 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import time, sys, signal, atexit
from upm import pyupm_grovecollision as upmGrovecollision
def main():
# The was tested with the Grove Collision Sensor
# Instantiate a Grove Collision on digital pin D2
myGrovecollision = upmGrovecollision.GroveCollision(2)
## Exit handlers ##
# This stops python from printing a stacktrace when you hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This lets you run code on exit,
# including functions from myGrovecollision
def exitHandler():
print("Exiting")
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
collisionState = False
print("No collision")
while(1):
if (myGrovecollision.isColliding() and not collisionState):
print("Collision!")
collisionState = True
elif (not myGrovecollision.isColliding() and collisionState):
print("No collision")
collisionState = False
if __name__ == '__main__':
main()
| mit | 4,105,925,778,254,278,000 | 36.95 | 73 | 0.72332 | false | 4.177982 | false | false | false |
aplicatii-romanesti/allinclusive-kodi-pi | .kodi/addons/plugin.video.kidsplace/channels/nickjr.py | 1 | 2501 | import urllib, re
import helper
import json
thisChannel = "nickjr"
baseLink = "http://www.nickjr.com"
apiLink = "http://www.nickjr.com/common/data/kids/get-kids-config-data.jhtml?fsd=/dynaboss&urlAlias=%s&af=false"
extractPlaylist = re.compile("<h2 id=\".*?\"><span>(.*?)</span></h2>.*?<ul>(.*?)</ul>",re.DOTALL)
def mainPage():
page = helper.load_page(apiLink%("kids-video-landing"))
data = json.loads(page)
items = data['config']['promos'][0]['items']
for category in items:
catName = helper.removeHtmlSpecialChars(category['title'])
catLink = apiLink%(category['urlAlias'])
catImg = baseLink+category['thumbnail'];
helper.addDirectoryItem(catName, {"channel":thisChannel,"action":"showCategory","link":catLink}, catImg)
helper.endOfDirectory()
def showCategory(link):
page = helper.load_page(urllib.unquote(link))
page = page.replace("\xED","\xc3\xad")
data = json.loads(page)
items = data['config']['promos'][0]['items']
for video in items:
vidName = helper.removeHtmlSpecialChars(video['title'])
vidId = video['id']
vidImg = video['thumbnail']
helper.addDirectoryItem(vidName, {"channel":thisChannel,"action":"playVideo","link":vidId}, vidImg, False)
helper.endOfDirectory()
def playVideo(link):
playlistLink = "http://www.nickjr.com/dynamo/video/data/mrssGen.jhtml?type=network&loc=sidebar&hub=njParentsHub&mode=playlist&mgid=mgid:cms:item:nickjr.com:"
playlistLink = playlistLink+link
page = helper.load_page(playlistLink,True)
media = helper.extractMrss(page)
player = media[0]['player']
link = media[0]['url']
response = urllib.urlopen(urllib.unquote(player))
mediaPlayer = response.geturl()
page = helper.load_page(urllib.unquote(link))
extractRtmpUrls = re.compile("<rendition.*?height=[\"\']+([0-9]*)[\"\']+.*?>[\n\ \t]*<src>(.*?)</src>[\n\ \t]*</rendition>")
streamUrl = ""
streamHeight = 0
for rtmpItem in extractRtmpUrls.finditer(page):
if rtmpItem.group(1)>streamHeight:
streamUrl = rtmpItem.group(2)
streamUrl = streamUrl + " swfUrl=" + mediaPlayer + " swfVfy=1"
helper.setResolvedUrl(streamUrl)
params = helper.get_params()
if len(params) == 1:
mainPage()
else:
if params['action'] == "showCategory":
showCategory(params['link'])
if params['action'] == "playVideo":
playVideo(params['link'])
| apache-2.0 | -8,844,964,255,450,748,000 | 32.346667 | 161 | 0.637745 | false | 3.352547 | false | false | false |
robocomp/robocomp-robolab | components/hardware/external_control/joystickSimulatorController/src/specificworker.py | 1 | 4413 | #
# Copyright (C) 2019 by Bartlomiej Kocot
#
# This file is part of RoboComp
#
# RoboComp is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RoboComp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RoboComp. If not, see <http://www.gnu.org/licenses/>.
#
import sys, os, traceback, time,Ice,math,copy
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
from PySide import *
from genericworker import *
# If RoboComp was compiled with Python bindings you can use InnerModel in Python
# sys.path.append('/opt/robocomp/lib')
# import librobocomp_qmat
# import librobocomp_osgviewer
# import librobocomp_innermodel
class SpecificWorker(GenericWorker):
def __init__(self, proxy_map):
super(SpecificWorker, self).__init__(proxy_map)
self.timer.stop()
self.differentialrobot_proxy = proxy_map["DifferentialRobotProxy"]
self.mousePress = False
self.x=225
self.y=225
self.setGeometry(50, 50, 500, 500)
self.setWindowTitle("Joystick Simulator Controller")
self.setStyleSheet("QMainWindow {background: 'white';}");
self.show()
self.Speed=0.0
self.Rotation=0.0
self.addJoystickImage()
def setParams(self, params):
return True
def addJoystickImage(self):
self.Circle = QtGui.QLabel(self)
self.JoyStick = QtGui.QLabel(self)
self.SpeedText = QtGui.QLabel(self)
self.SpeedValue = QtGui.QLabel(self)
self.RotationText = QtGui.QLabel(self)
self.RotationValue = QtGui.QLabel(self)
circlePixmap = QtGui.QPixmap('src/img/circle.png')
joystickPixmap = QtGui.QPixmap('src/img/joystick.png')
self.Circle.setPixmap(circlePixmap)
self.Circle.resize(200,200)
self.Circle.move(150, 150)
self.Circle.show()
self.JoyStick.setObjectName("JoyStick")
self.JoyStick.setPixmap(joystickPixmap)
self.JoyStick.resize(50,50)
self.JoyStick.move(225,225)
self.JoyStick.show()
self.SpeedText.setText("Speed: ")
self.SpeedText.move(400,20)
self.SpeedText.show()
self.RotationText.setText("Rotation: ")
self.RotationText.move(400,40)
self.RotationText.show()
self.SpeedValue.setText("0")
self.SpeedValue.move(450,20)
self.SpeedValue.show()
self.RotationValue.setText("0")
self.RotationValue.move(465,40)
self.RotationValue.show()
def mousePressEvent(self, event):
if self.distance(event.x()-25,event.y()-25,self.x,self.y)<=25:
self.mousePress=True
def mouseReleaseEvent(self, event):
self.mousePress=False
self.comeBack()
self.x=225
self.y=225
def mouseMoveEvent(self,event):
if self.mousePress == True:
if self.distance(event.x()-25,event.y()-25,225,225) < 100:
self.x=event.x()-25
self.y=event.y()-25
self.setPosition(self.x,self.y)
else:
sin,cos=self.trigAlpha(event.x()-25,event.y()-25)
self.x,self.y=self.findPoint(cos,sin)
self.setPosition(self.x,self.y)
def setPosition(self,x,y):
self.JoyStick.move(x,y)
self.Speed=(225-y)*22
self.Rotation=(x-225)*0.02
self.SpeedValue.setText(str(self.Speed))
self.RotationValue.setText(str(self.Rotation))
self.differentialrobot_proxy.setSpeedBase(self.Speed, self.Rotation)
def comeBack(self):
self.JoyStick.move(225,225)
self.Speed = 0
self.Rotation = 0
self.SpeedValue.setText(str(self.Speed))
self.RotationValue.setText(str(self.Rotation))
self.differentialrobot_proxy.setSpeedBase(self.Speed, self.Rotation)
def distance(self,x1,y1,x2,y2):
result = (x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)
result = math.sqrt(result)
return result
def trigAlpha(self,x,y):
vecA_X=100.0
vecA_Y=0
vecB_X=x-225.0
vecB_Y=y-225.0
vecA_length=math.sqrt(vecA_X*vecA_X+vecA_Y*vecA_Y)
vecB_length=math.sqrt(vecB_X*vecB_X+vecB_Y*vecB_Y)
cosAlpha=(vecA_X*vecB_X+vecA_Y*vecB_Y)/(vecA_length*vecB_length)
sinAlpha=(vecA_X*vecB_Y-vecA_Y*vecB_X)/(vecA_length*vecB_length)
return sinAlpha,cosAlpha
def findPoint(self,cos,sin):
pointX=225+100*cos
pointY=225+100*sin
return pointX,pointY
@QtCore.Slot()
def compute(self):
return True
| gpl-3.0 | -6,077,114,154,922,575,000 | 31.688889 | 80 | 0.722864 | false | 2.725757 | false | false | false |
hgrimelid/feincms | feincms/admin/filterspecs.py | 1 | 3277 | # encoding=utf-8
# Thanks to http://www.djangosnippets.org/snippets/1051/
#
# Authors: Marinho Brandao <marinho at gmail.com>
# Guilherme M. Gondim (semente) <semente at taurinus.org>
from django.contrib.admin.filterspecs import FilterSpec, ChoicesFilterSpec
from django.utils.encoding import smart_unicode
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
class ParentFilterSpec(ChoicesFilterSpec):
"""
Improved list_filter display for parent Pages by nicely indenting hierarchy
In theory this would work with any mptt model which uses a "title" attribute.
my_model_field.page_parent_filter = True
"""
def __init__(self, f, request, params, model, model_admin, field_path=None):
from feincms.utils import shorten_string
super(ParentFilterSpec, self).__init__(f, request, params, model, model_admin)
parent_ids = model.objects.exclude(parent=None).values_list("parent__id", flat=True).order_by("parent__id").distinct()
parents = model.objects.filter(pk__in=parent_ids).values_list("pk", "title", "level")
self.lookup_choices = [(pk, "%s%s" % (" " * level, shorten_string(title, max_length=25))) for pk, title, level in parents]
def choices(self, cl):
yield {
'selected': self.lookup_val is None,
'query_string': cl.get_query_string({}, [self.lookup_kwarg]),
'display': _('All')
}
for pk, title in self.lookup_choices:
yield {
'selected': pk == int(self.lookup_val or '0'),
'query_string': cl.get_query_string({self.lookup_kwarg: pk}),
'display': mark_safe(smart_unicode(title))
}
def title(self):
return _('Parent')
class CategoryFilterSpec(ChoicesFilterSpec):
"""
Customization of ChoicesFilterSpec which sorts in the user-expected format
my_model_field.category_filter = True
"""
def __init__(self, f, request, params, model, model_admin, field_path=None):
super(CategoryFilterSpec, self).__init__(f, request, params, model, model_admin)
# Restrict results to categories which are actually in use:
self.lookup_choices = [
(i.pk, unicode(i)) for i in f.related.parent_model.objects.exclude(**{
f.related.var_name: None
})
]
self.lookup_choices.sort(key=lambda i: i[1])
def choices(self, cl):
yield {
'selected': self.lookup_val is None,
'query_string': cl.get_query_string({}, [self.lookup_kwarg]),
'display': _('All')
}
for pk, title in self.lookup_choices:
yield {
'selected': pk == int(self.lookup_val or '0'),
'query_string': cl.get_query_string({self.lookup_kwarg: pk}),
'display': mark_safe(smart_unicode(title))
}
def title(self):
return _('Category')
# registering the filter
FilterSpec.filter_specs.insert(0,
(lambda f: getattr(f, 'parent_filter', False), ParentFilterSpec)
)
FilterSpec.filter_specs.insert(1,
(lambda f: getattr(f, 'category_filter', False), CategoryFilterSpec)
)
| bsd-3-clause | 7,906,961,818,176,670,000 | 35.010989 | 135 | 0.615197 | false | 3.698646 | false | false | false |
endlessm/chromium-browser | third_party/libvpx/source/libvpx/tools/3D-Reconstruction/MotionEST/MotionEST.py | 7 | 3453 | ## Copyright (c) 2020 The WebM project authors. All Rights Reserved.
##
## Use of this source code is governed by a BSD-style license
## that can be found in the LICENSE file in the root of the source
## tree. An additional intellectual property rights grant can be found
## in the file PATENTS. All contributing project authors may
## be found in the AUTHORS file in the root of the source tree.
##
#coding : utf - 8
import numpy as np
import numpy.linalg as LA
import matplotlib.pyplot as plt
from Util import drawMF, MSE
"""The Base Class of Estimators"""
class MotionEST(object):
"""
constructor:
cur_f: current frame
ref_f: reference frame
blk_sz: block size
"""
def __init__(self, cur_f, ref_f, blk_sz):
self.cur_f = cur_f
self.ref_f = ref_f
self.blk_sz = blk_sz
#convert RGB to YUV
self.cur_yuv = np.array(self.cur_f.convert('YCbCr'), dtype=np.int)
self.ref_yuv = np.array(self.ref_f.convert('YCbCr'), dtype=np.int)
#frame size
self.width = self.cur_f.size[0]
self.height = self.cur_f.size[1]
#motion field size
self.num_row = self.height // self.blk_sz
self.num_col = self.width // self.blk_sz
#initialize motion field
self.mf = np.zeros((self.num_row, self.num_col, 2))
"""estimation function Override by child classes"""
def motion_field_estimation(self):
pass
"""
distortion of a block:
cur_r: current row
cur_c: current column
mv: motion vector
metric: distortion metric
"""
def block_dist(self, cur_r, cur_c, mv, metric=MSE):
cur_x = cur_c * self.blk_sz
cur_y = cur_r * self.blk_sz
h = min(self.blk_sz, self.height - cur_y)
w = min(self.blk_sz, self.width - cur_x)
cur_blk = self.cur_yuv[cur_y:cur_y + h, cur_x:cur_x + w, :]
ref_x = int(cur_x + mv[1])
ref_y = int(cur_y + mv[0])
if 0 <= ref_x < self.width - w and 0 <= ref_y < self.height - h:
ref_blk = self.ref_yuv[ref_y:ref_y + h, ref_x:ref_x + w, :]
else:
ref_blk = np.zeros((h, w, 3))
return metric(cur_blk, ref_blk)
"""
distortion of motion field
"""
def distortion(self, mask=None, metric=MSE):
loss = 0
count = 0
for i in xrange(self.num_row):
for j in xrange(self.num_col):
if mask is not None and mask[i, j]:
continue
loss += self.block_dist(i, j, self.mf[i, j], metric)
count += 1
return loss / count
"""evaluation compare the difference with ground truth"""
def motion_field_evaluation(self, ground_truth):
loss = 0
count = 0
gt = ground_truth.mf
mask = ground_truth.mask
for i in xrange(self.num_row):
for j in xrange(self.num_col):
if mask is not None and mask[i][j]:
continue
loss += LA.norm(gt[i, j] - self.mf[i, j])
count += 1
return loss / count
"""render the motion field"""
def show(self, ground_truth=None, size=10):
cur_mf = drawMF(self.cur_f, self.blk_sz, self.mf)
if ground_truth is None:
n_row = 1
else:
gt_mf = drawMF(self.cur_f, self.blk_sz, ground_truth)
n_row = 2
plt.figure(figsize=(n_row * size, size * self.height / self.width))
plt.subplot(1, n_row, 1)
plt.imshow(cur_mf)
plt.title('Estimated Motion Field')
if ground_truth is not None:
plt.subplot(1, n_row, 2)
plt.imshow(gt_mf)
plt.title('Ground Truth')
plt.tight_layout()
plt.show()
| bsd-3-clause | -3,653,202,839,931,690,500 | 28.512821 | 71 | 0.609904 | false | 3.06117 | false | false | false |
tkw1536/PythonCaseClass | case_class/exceptions.py | 1 | 6411 | """
Exceptions for the case_class module
Copyright (c) 2016 Tom Wiesing -- licensed under MIT, see LICENSE
"""
class CaseClassException(Exception):
""" Base Exception for all exceptions raised by the case_class module. """
pass
#
# Instantiation of CaseClasses
#
class NotInstantiableClassException(CaseClassException):
""" Exception that is raised when a class can not be instantiated. """
def __init__(self, msg, cls):
""" Creates a new NotInstantiableClassException instance.
:param msg: Message representing this NotInstantiableException.
:type msg: str
:param cls: Class that the user tried to instantiate.
:type cls: type
"""
super(NotInstantiableClassException, self).__init__(msg)
self.msg = msg
self.cls = cls
class NotInstantiableAbstractCaseClassException \
(NotInstantiableClassException):
""" Exception that is raised when an AbstractCaseClass is instantiated. """
def __init__(self, cls):
""" Creates a new NotInstantiableAbstractCaseClassException instance.
:param cls: AbstractCaseClass that can not be instantiated
:type cls: type
"""
super(NotInstantiableAbstractCaseClassException, self).__init__(
"Can not instantiate AbstractCaseClass %s" % (cls.__name__,), cls)
class NoCaseToCaseInheritanceException(Exception):
""" Exception that is raised when the user tries to
inherit from a CaseClass or AbstractCaseClass subclass. """
def __init__(self, name):
""" Creates a new NoCaseToCaseInheritanceException instance.
:param name: Name of the class the user tried to create.
:type name: str
"""
super(NoCaseToCaseInheritanceException, self).__init__(
"Unable to create class %s: " % (name,) +
"Case-to-case inheritance is prohibited. ")
self.name = name
#
# Signatures
#
class SignatureException(CaseClassException):
""" Base class for all exceptions related to signatures. """
pass
class MissingArgument(SignatureException):
""" Exception indicating that the value for a given argument is not
specefied fully. """
def __init__(self, name):
""" Creates a new NoSuchArgument instance.
:param,name: Name of the argument that does not have a value.
:type name. str
"""
super(MissingArgument, self).__init__("MissingArgument: Missing " +
"value for %s. " % (
name,))
self.__name = name #: str
@property
def name(self):
""" The name of the argument that does not have a value.
:rtype: str
"""
return self.__name
class NoSuchArgument(SignatureException):
""" Exception indicating that an argument does not exist. """
def __init__(self, name):
""" Creates a new NoSuchArgument instance.
:param,name: Name of the argument that does not exist.
:type name. str
"""
super(NoSuchArgument, self).__init__("NoSuchArgument: No argument " +
"%s exists. " % (name,))
self.__name = name #: str
@property
def name(self):
""" The name of the argument that does not exist.
:rtype: str
"""
return self.__name
class NoDefaultValue(SignatureException):
""" Exception indicating that an argument has no default value. """
def __init__(self, name):
""" Creates a new NoDefaultValue instance.
:param,name: Name of the argument that has no default.
:type name. str
"""
super(NoDefaultValue, self).__init__("NoDefaultValue: Argument " +
"%s has no default. " % (name,))
self.__name = name #: str
@property
def name(self):
""" The name of the argument that has no associated default value.
:rtype: str
"""
return self.__name
class AppliedSignatureException(CaseClassException):
""" Base class for all exceptions related to applied signatures. """
pass
class TooManyArguments(AppliedSignatureException):
""" Exception indicating that too many arguments were passed to a
signature. """
def __init__(self):
""" Creates a new TooManyArguments instance. """
super(TooManyArguments, self).__init__("TooManyArguments: Too many " +
"arguments were passed to the" +
" signature. ")
class TooManyKeyWordArguments(AppliedSignatureException):
""" Exception indicating that too many arguments were passed to a
signature. """
def __init__(self):
""" Creates a new TooManyKeyWordArguments instance. """
super(TooManyKeyWordArguments, self).__init__(
"TooManyKeyWordArguments: Too many " +
"arguments were passed to the" +
" signature. ")
class DoubleArgumentValue(AppliedSignatureException):
""" Exception indicating that an argument was passed more than once. """
def __init__(self, name):
""" Creates a new DoubleArgumentValue instance.
:param name: Name of the argument that was passed more than once.
:type name: str
"""
super(DoubleArgumentValue, self).__init__(
"DoubleArgumentValue: Argument %s was passed more " % (name,) +
"than once. ")
self.__name = name #: str
@property
def name(self):
""" The name of the argument that was passed more than once.
:rtype: str
"""
return self.__name
class ExtractorException(CaseClassException):
""" Common base class related to all extractors. """
pass
class ExtractorDoesNotMatch(ExtractorException):
""" raised when an extractor does not match a certain pattern. """
pass
__all__ = ["CaseClassException", "NotInstantiableClassException",
"NotInstantiableAbstractCaseClassException",
"NoCaseToCaseInheritanceException", "SignatureException",
"MissingArgument", "NoSuchArgument", "NoDefaultValue",
"AppliedSignatureException", "TooManyArguments",
"TooManyKeyWordArguments", "DoubleArgumentValue"]
| mit | -2,436,562,700,801,174,000 | 27.367257 | 79 | 0.609265 | false | 4.689832 | false | false | false |
kambysese/mne-python | mne/tests/test_morph.py | 4 | 42270 | # -*- coding: utf-8 -*-
# Author: Tommy Clausner <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import pytest
import numpy as np
from numpy.testing import (assert_array_less, assert_allclose,
assert_array_equal)
from scipy.spatial.distance import cdist
from scipy.sparse import csr_matrix
import mne
from mne import (SourceEstimate, VolSourceEstimate, VectorSourceEstimate,
read_evokeds, SourceMorph, compute_source_morph,
read_source_morph, read_source_estimate,
read_forward_solution, grade_to_vertices,
setup_volume_source_space, make_forward_solution,
make_sphere_model, make_ad_hoc_cov, VolVectorSourceEstimate,
read_freesurfer_lut)
from mne.datasets import testing
from mne.fixes import _get_img_fdata
from mne.minimum_norm import (apply_inverse, read_inverse_operator,
make_inverse_operator)
from mne.source_space import (get_volume_labels_from_aseg, _get_mri_info_data,
_get_atlas_values, _add_interpolator,
_grid_interp)
from mne.transforms import quat_to_rot
from mne.utils import (requires_nibabel, check_version, requires_version,
requires_dipy, requires_h5py, catch_logging)
from mne.fixes import _get_args
# Setup paths
data_path = testing.data_path(download=False)
sample_dir = op.join(data_path, 'MEG', 'sample')
subjects_dir = op.join(data_path, 'subjects')
fname_evoked = op.join(sample_dir, 'sample_audvis-ave.fif')
fname_trans = op.join(sample_dir, 'sample_audvis_trunc-trans.fif')
fname_inv_vol = op.join(sample_dir,
'sample_audvis_trunc-meg-vol-7-meg-inv.fif')
fname_fwd_vol = op.join(sample_dir,
'sample_audvis_trunc-meg-vol-7-fwd.fif')
fname_vol_w = op.join(sample_dir,
'sample_audvis_trunc-grad-vol-7-fwd-sensmap-vol.w')
fname_inv_surf = op.join(sample_dir,
'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif')
fname_aseg = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
fname_fmorph = op.join(data_path, 'MEG', 'sample',
'fsaverage_audvis_trunc-meg')
fname_smorph = op.join(sample_dir, 'sample_audvis_trunc-meg')
fname_t1 = op.join(subjects_dir, 'sample', 'mri', 'T1.mgz')
fname_vol = op.join(subjects_dir, 'sample', 'bem', 'sample-volume-7mm-src.fif')
fname_brain = op.join(subjects_dir, 'sample', 'mri', 'brain.mgz')
fname_aseg = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
fname_fs_vol = op.join(subjects_dir, 'fsaverage', 'bem',
'fsaverage-vol7-nointerp-src.fif.gz')
fname_aseg_fs = op.join(subjects_dir, 'fsaverage', 'mri', 'aseg.mgz')
fname_stc = op.join(sample_dir, 'fsaverage_audvis_trunc-meg')
def _real_vec_stc():
inv = read_inverse_operator(fname_inv_surf)
evoked = read_evokeds(fname_evoked, baseline=(None, 0))[0].crop(0, 0.01)
return apply_inverse(evoked, inv, pick_ori='vector')
def test_sourcemorph_consistency():
"""Test SourceMorph class consistency."""
assert _get_args(SourceMorph.__init__)[1:] == \
mne.morph._SOURCE_MORPH_ATTRIBUTES
@testing.requires_testing_data
def test_sparse_morph():
"""Test sparse morphing."""
rng = np.random.RandomState(0)
vertices_fs = [np.sort(rng.permutation(np.arange(10242))[:4]),
np.sort(rng.permutation(np.arange(10242))[:6])]
data = rng.randn(10, 1)
stc_fs = SourceEstimate(data, vertices_fs, 1, 1, 'fsaverage')
spheres_fs = [mne.read_surface(op.join(
subjects_dir, 'fsaverage', 'surf', '%s.sphere.reg' % hemi))[0]
for hemi in ('lh', 'rh')]
spheres_sample = [mne.read_surface(op.join(
subjects_dir, 'sample', 'surf', '%s.sphere.reg' % hemi))[0]
for hemi in ('lh', 'rh')]
morph_fs_sample = compute_source_morph(
stc_fs, 'fsaverage', 'sample', sparse=True, spacing=None,
subjects_dir=subjects_dir)
stc_sample = morph_fs_sample.apply(stc_fs)
offset = 0
orders = list()
for v1, s1, v2, s2 in zip(stc_fs.vertices, spheres_fs,
stc_sample.vertices, spheres_sample):
dists = cdist(s1[v1], s2[v2])
order = np.argmin(dists, axis=-1)
assert_array_less(dists[np.arange(len(order)), order], 1.5) # mm
orders.append(order + offset)
offset += len(order)
assert_allclose(stc_fs.data, stc_sample.data[np.concatenate(orders)])
# Return
morph_sample_fs = compute_source_morph(
stc_sample, 'sample', 'fsaverage', sparse=True, spacing=None,
subjects_dir=subjects_dir)
stc_fs_return = morph_sample_fs.apply(stc_sample)
offset = 0
orders = list()
for v1, s, v2 in zip(stc_fs.vertices, spheres_fs, stc_fs_return.vertices):
dists = cdist(s[v1], s[v2])
order = np.argmin(dists, axis=-1)
assert_array_less(dists[np.arange(len(order)), order], 1.5) # mm
orders.append(order + offset)
offset += len(order)
assert_allclose(stc_fs.data, stc_fs_return.data[np.concatenate(orders)])
@testing.requires_testing_data
def test_xhemi_morph():
"""Test cross-hemisphere morphing."""
stc = read_source_estimate(fname_stc, subject='sample')
# smooth 1 for speed where possible
smooth = 4
spacing = 4
n_grade_verts = 2562
stc = compute_source_morph(
stc, 'sample', 'fsaverage_sym', smooth=smooth, warn=False,
spacing=spacing, subjects_dir=subjects_dir).apply(stc)
morph = compute_source_morph(
stc, 'fsaverage_sym', 'fsaverage_sym', smooth=1, xhemi=True,
warn=False, spacing=[stc.vertices[0], []],
subjects_dir=subjects_dir)
stc_xhemi = morph.apply(stc)
assert stc_xhemi.data.shape[0] == n_grade_verts
assert stc_xhemi.rh_data.shape[0] == 0
assert len(stc_xhemi.vertices[1]) == 0
assert stc_xhemi.lh_data.shape[0] == n_grade_verts
assert len(stc_xhemi.vertices[0]) == n_grade_verts
# complete reversal mapping
morph = compute_source_morph(
stc, 'fsaverage_sym', 'fsaverage_sym', smooth=smooth, xhemi=True,
warn=False, spacing=stc.vertices, subjects_dir=subjects_dir)
mm = morph.morph_mat
assert mm.shape == (n_grade_verts * 2,) * 2
assert mm.size > n_grade_verts * 2
assert mm[:n_grade_verts, :n_grade_verts].size == 0 # L to L
assert mm[n_grade_verts:, n_grade_verts:].size == 0 # R to L
assert mm[n_grade_verts:, :n_grade_verts].size > n_grade_verts # L to R
assert mm[:n_grade_verts, n_grade_verts:].size > n_grade_verts # R to L
# more complicated reversal mapping
vertices_use = [stc.vertices[0], np.arange(10242)]
n_src_verts = len(vertices_use[1])
assert vertices_use[0].shape == (n_grade_verts,)
assert vertices_use[1].shape == (n_src_verts,)
# ensure it's sufficiently diffirent to manifest round-trip errors
assert np.in1d(vertices_use[1], stc.vertices[1]).mean() < 0.3
morph = compute_source_morph(
stc, 'fsaverage_sym', 'fsaverage_sym', smooth=smooth, xhemi=True,
warn=False, spacing=vertices_use, subjects_dir=subjects_dir)
mm = morph.morph_mat
assert mm.shape == (n_grade_verts + n_src_verts, n_grade_verts * 2)
assert mm[:n_grade_verts, :n_grade_verts].size == 0
assert mm[n_grade_verts:, n_grade_verts:].size == 0
assert mm[:n_grade_verts, n_grade_verts:].size > n_grade_verts
assert mm[n_grade_verts:, :n_grade_verts].size > n_src_verts
# morph forward then back
stc_xhemi = morph.apply(stc)
morph = compute_source_morph(
stc_xhemi, 'fsaverage_sym', 'fsaverage_sym', smooth=smooth,
xhemi=True, warn=False, spacing=stc.vertices,
subjects_dir=subjects_dir)
stc_return = morph.apply(stc_xhemi)
for hi in range(2):
assert_array_equal(stc_return.vertices[hi], stc.vertices[hi])
correlation = np.corrcoef(stc.data.ravel(), stc_return.data.ravel())[0, 1]
assert correlation > 0.9 # not great b/c of sparse grade + small smooth
@testing.requires_testing_data
@pytest.mark.parametrize('smooth, lower, upper, n_warn, dtype', [
(None, 0.959, 0.963, 0, float),
(3, 0.968, 0.971, 2, complex),
('nearest', 0.98, 0.99, 0, float),
])
def test_surface_source_morph_round_trip(smooth, lower, upper, n_warn, dtype):
"""Test round-trip morphing yields similar STCs."""
kwargs = dict(smooth=smooth, warn=True, subjects_dir=subjects_dir)
stc = mne.read_source_estimate(fname_smorph)
if dtype is complex:
stc.data = 1j * stc.data
assert_array_equal(stc.data.real, 0.)
if smooth == 'nearest' and not check_version('scipy', '1.3'):
with pytest.raises(ValueError, match='required to use nearest'):
morph = compute_source_morph(stc, 'sample', 'fsaverage', **kwargs)
return
with pytest.warns(None) as w:
morph = compute_source_morph(stc, 'sample', 'fsaverage', **kwargs)
w = [ww for ww in w if 'vertices not included' in str(ww.message)]
assert len(w) == n_warn
assert morph.morph_mat.shape == (20484, len(stc.data))
stc_fs = morph.apply(stc)
morph_back = compute_source_morph(
stc_fs, 'fsaverage', 'sample', spacing=stc.vertices, **kwargs)
assert morph_back.morph_mat.shape == (len(stc.data), 20484)
stc_back = morph_back.apply(stc_fs)
corr = np.corrcoef(stc.data.ravel(), stc_back.data.ravel())[0, 1]
assert lower <= corr <= upper
# check the round-trip power
assert_power_preserved(stc, stc_back)
def assert_power_preserved(orig, new, limits=(1., 1.05)):
"""Assert that the power is preserved during a round-trip morph."""
__tracebackhide__ = True
for kind in ('real', 'imag'):
numer = np.linalg.norm(getattr(orig.data, kind))
denom = np.linalg.norm(getattr(new.data, kind))
if numer == denom == 0.: # no data of this type
continue
power_ratio = numer / denom
min_, max_ = limits
assert min_ < power_ratio < max_, f'Power ratio {kind} = {power_ratio}'
@requires_h5py
@testing.requires_testing_data
def test_surface_vector_source_morph(tmpdir):
"""Test surface and vector source estimate morph."""
inverse_operator_surf = read_inverse_operator(fname_inv_surf)
stc_surf = read_source_estimate(fname_smorph, subject='sample')
stc_surf.crop(0.09, 0.1) # for faster computation
stc_vec = _real_vec_stc()
source_morph_surf = compute_source_morph(
inverse_operator_surf['src'], subjects_dir=subjects_dir,
smooth=1, warn=False) # smooth 1 for speed
assert source_morph_surf.subject_from == 'sample'
assert source_morph_surf.subject_to == 'fsaverage'
assert source_morph_surf.kind == 'surface'
assert isinstance(source_morph_surf.src_data, dict)
assert isinstance(source_morph_surf.src_data['vertices_from'], list)
assert isinstance(source_morph_surf, SourceMorph)
stc_surf_morphed = source_morph_surf.apply(stc_surf)
assert isinstance(stc_surf_morphed, SourceEstimate)
stc_vec_morphed = source_morph_surf.apply(stc_vec)
with pytest.raises(ValueError, match="Invalid value for the 'output'"):
source_morph_surf.apply(stc_surf, output='nifti1')
# check if correct class after morphing
assert isinstance(stc_surf_morphed, SourceEstimate)
assert isinstance(stc_vec_morphed, VectorSourceEstimate)
# check __repr__
assert 'surface' in repr(source_morph_surf)
# check loading and saving for surf
source_morph_surf.save(tmpdir.join('42.h5'))
source_morph_surf_r = read_source_morph(tmpdir.join('42.h5'))
assert (all([read == saved for read, saved in
zip(sorted(source_morph_surf_r.__dict__),
sorted(source_morph_surf.__dict__))]))
# check wrong subject correction
stc_surf.subject = None
assert isinstance(source_morph_surf.apply(stc_surf), SourceEstimate)
# degenerate
stc_vol = read_source_estimate(fname_vol_w, 'sample')
with pytest.raises(TypeError, match='stc_from must be an instance'):
source_morph_surf.apply(stc_vol)
@requires_h5py
@requires_nibabel()
@requires_dipy()
@pytest.mark.slowtest
@testing.requires_testing_data
def test_volume_source_morph_basic(tmpdir):
"""Test volume source estimate morph, special cases and exceptions."""
import nibabel as nib
inverse_operator_vol = read_inverse_operator(fname_inv_vol)
stc_vol = read_source_estimate(fname_vol_w, 'sample')
# check for invalid input type
with pytest.raises(TypeError, match='src must be'):
compute_source_morph(src=42)
# check for raising an error if neither
# inverse_operator_vol['src'][0]['subject_his_id'] nor subject_from is set,
# but attempting to perform a volume morph
src = inverse_operator_vol['src']
assert src._subject is None # already None on disk (old!)
with pytest.raises(ValueError, match='subject_from could not be inferred'):
with pytest.warns(RuntimeWarning, match='recommend regenerating'):
compute_source_morph(src=src, subjects_dir=subjects_dir)
# check infer subject_from from src[0]['subject_his_id']
src[0]['subject_his_id'] = 'sample'
with pytest.raises(ValueError, match='Inter-hemispheric morphing'):
compute_source_morph(src=src, subjects_dir=subjects_dir, xhemi=True)
with pytest.raises(ValueError, match='Only surface.*sparse morph'):
compute_source_morph(src=src, sparse=True, subjects_dir=subjects_dir)
# terrible quality but fast
zooms = 20
kwargs = dict(zooms=zooms, niter_sdr=(1,), niter_affine=(1,))
source_morph_vol = compute_source_morph(
subjects_dir=subjects_dir, src=fname_inv_vol,
subject_from='sample', **kwargs)
shape = (13,) * 3 # for the given zooms
assert source_morph_vol.subject_from == 'sample'
# the brain used in sample data has shape (255, 255, 255)
assert tuple(source_morph_vol.sdr_morph.domain_shape) == shape
assert tuple(source_morph_vol.pre_affine.domain_shape) == shape
# proofs the above
assert_array_equal(source_morph_vol.zooms, (zooms,) * 3)
# assure proper src shape
mri_size = (src[0]['mri_height'], src[0]['mri_depth'], src[0]['mri_width'])
assert source_morph_vol.src_data['src_shape_full'] == mri_size
fwd = read_forward_solution(fname_fwd_vol)
fwd['src'][0]['subject_his_id'] = 'sample' # avoid further warnings
source_morph_vol = compute_source_morph(
fwd['src'], 'sample', 'sample', subjects_dir=subjects_dir,
**kwargs)
# check wrong subject_to
with pytest.raises(IOError, match='cannot read file'):
compute_source_morph(fwd['src'], 'sample', '42',
subjects_dir=subjects_dir)
# two different ways of saving
source_morph_vol.save(tmpdir.join('vol'))
# check loading
source_morph_vol_r = read_source_morph(tmpdir.join('vol-morph.h5'))
# check for invalid file name handling ()
with pytest.raises(IOError, match='not found'):
read_source_morph(tmpdir.join('42'))
# check morph
stc_vol_morphed = source_morph_vol.apply(stc_vol)
# old way, verts do not match
assert not np.array_equal(stc_vol_morphed.vertices[0], stc_vol.vertices[0])
# vector
stc_vol_vec = VolVectorSourceEstimate(
np.tile(stc_vol.data[:, np.newaxis], (1, 3, 1)),
stc_vol.vertices, 0, 1)
stc_vol_vec_morphed = source_morph_vol.apply(stc_vol_vec)
assert isinstance(stc_vol_vec_morphed, VolVectorSourceEstimate)
for ii in range(3):
assert_allclose(stc_vol_vec_morphed.data[:, ii], stc_vol_morphed.data)
# check output as NIfTI
assert isinstance(source_morph_vol.apply(stc_vol_vec, output='nifti2'),
nib.Nifti2Image)
# check for subject_from mismatch
source_morph_vol_r.subject_from = '42'
with pytest.raises(ValueError, match='subject_from must match'):
source_morph_vol_r.apply(stc_vol_morphed)
# check if nifti is in grid morph space with voxel_size == spacing
img_morph_res = source_morph_vol.apply(stc_vol, output='nifti1')
# assure morph spacing
assert isinstance(img_morph_res, nib.Nifti1Image)
assert img_morph_res.header.get_zooms()[:3] == (zooms,) * 3
# assure src shape
img_mri_res = source_morph_vol.apply(stc_vol, output='nifti1',
mri_resolution=True)
assert isinstance(img_mri_res, nib.Nifti1Image)
assert (img_mri_res.shape == (src[0]['mri_height'], src[0]['mri_depth'],
src[0]['mri_width']) +
(img_mri_res.shape[3],))
# check if nifti is defined resolution with voxel_size == (5., 5., 5.)
img_any_res = source_morph_vol.apply(stc_vol, output='nifti1',
mri_resolution=(5., 5., 5.))
assert isinstance(img_any_res, nib.Nifti1Image)
assert img_any_res.header.get_zooms()[:3] == (5., 5., 5.)
# check if morph outputs correct data
assert isinstance(stc_vol_morphed, VolSourceEstimate)
# check if loaded and saved objects contain the same
assert (all([read == saved for read, saved in
zip(sorted(source_morph_vol_r.__dict__),
sorted(source_morph_vol.__dict__))]))
# check __repr__
assert 'volume' in repr(source_morph_vol)
# check Nifti2Image
assert isinstance(
source_morph_vol.apply(stc_vol, mri_resolution=True,
mri_space=True, output='nifti2'),
nib.Nifti2Image)
# Degenerate conditions
with pytest.raises(TypeError, match='output must be'):
source_morph_vol.apply(stc_vol, output=1)
with pytest.raises(ValueError, match='subject_from does not match'):
compute_source_morph(src=src, subject_from='42')
with pytest.raises(ValueError, match='output'):
source_morph_vol.apply(stc_vol, output='42')
with pytest.raises(ValueError, match='subject_to cannot be None'):
compute_source_morph(src, 'sample', None,
subjects_dir=subjects_dir)
# Check if not morphed, but voxel size not boolean, raise ValueError.
# Note that this check requires dipy to not raise the dipy ImportError
# before checking if the actual voxel size error will raise.
with pytest.raises(ValueError, match='Cannot infer original voxel size'):
stc_vol.as_volume(inverse_operator_vol['src'], mri_resolution=4)
stc_surf = read_source_estimate(fname_stc, 'sample')
with pytest.raises(TypeError, match='stc_from must be an instance'):
source_morph_vol.apply(stc_surf)
# src_to
source_morph_vol = compute_source_morph(
fwd['src'], subject_from='sample', src_to=fwd['src'],
subject_to='sample', subjects_dir=subjects_dir, **kwargs)
stc_vol_2 = source_morph_vol.apply(stc_vol)
# new way, verts match
assert_array_equal(stc_vol.vertices[0], stc_vol_2.vertices[0])
stc_vol_bad = VolSourceEstimate(
stc_vol.data[:-1], [stc_vol.vertices[0][:-1]],
stc_vol.tmin, stc_vol.tstep)
match = (
'vertices do not match between morph \\(4157\\) and stc \\(4156\\).*'
'\n.*\n.*\n.*Vertices were likely excluded during forward computatio.*'
)
with pytest.raises(ValueError, match=match):
source_morph_vol.apply(stc_vol_bad)
# nifti outputs and stc equiv
img_vol = source_morph_vol.apply(stc_vol, output='nifti1')
img_vol_2 = stc_vol_2.as_volume(src=fwd['src'], mri_resolution=False)
assert_allclose(img_vol.affine, img_vol_2.affine)
img_vol = img_vol.get_fdata()
img_vol_2 = img_vol_2.get_fdata()
assert img_vol.shape == img_vol_2.shape
assert_allclose(img_vol, img_vol_2)
@requires_h5py
@requires_nibabel()
@requires_dipy()
@pytest.mark.slowtest
@testing.requires_testing_data
@pytest.mark.parametrize(
'subject_from, subject_to, lower, upper, dtype, morph_mat', [
('sample', 'fsaverage', 5.9, 6.1, float, False),
('fsaverage', 'fsaverage', 0., 0.1, float, False),
('sample', 'sample', 0., 0.1, complex, False),
('sample', 'sample', 0., 0.1, float, True), # morph_mat
('sample', 'fsaverage', 10, 12, float, True), # morph_mat
])
def test_volume_source_morph_round_trip(
tmpdir, subject_from, subject_to, lower, upper, dtype, morph_mat,
monkeypatch):
"""Test volume source estimate morph round-trips well."""
import nibabel as nib
from nibabel.processing import resample_from_to
src = dict()
if morph_mat:
# ~1.5 minutes with pos=7. (4157 morphs!) for sample, so only test
# morph_mat computation mode with a few labels
label_names = sorted(get_volume_labels_from_aseg(fname_aseg))[1:2]
if 'sample' in (subject_from, subject_to):
src['sample'] = setup_volume_source_space(
'sample', subjects_dir=subjects_dir,
volume_label=label_names, mri=fname_aseg)
assert sum(s['nuse'] for s in src['sample']) == 12
if 'fsaverage' in (subject_from, subject_to):
src['fsaverage'] = setup_volume_source_space(
'fsaverage', subjects_dir=subjects_dir,
volume_label=label_names[:3], mri=fname_aseg_fs)
assert sum(s['nuse'] for s in src['fsaverage']) == 16
else:
assert not morph_mat
if 'sample' in (subject_from, subject_to):
src['sample'] = mne.read_source_spaces(fname_vol)
src['sample'][0]['subject_his_id'] = 'sample'
assert src['sample'][0]['nuse'] == 4157
if 'fsaverage' in (subject_from, subject_to):
# Created to save space with:
#
# bem = op.join(op.dirname(mne.__file__), 'data', 'fsaverage',
# 'fsaverage-inner_skull-bem.fif')
# src_fsaverage = mne.setup_volume_source_space(
# 'fsaverage', pos=7., bem=bem, mindist=0,
# subjects_dir=subjects_dir, add_interpolator=False)
# mne.write_source_spaces(fname_fs_vol, src_fsaverage,
# overwrite=True)
#
# For speed we do it without the interpolator because it's huge.
src['fsaverage'] = mne.read_source_spaces(fname_fs_vol)
src['fsaverage'][0].update(
vol_dims=np.array([23, 29, 25]), seg_name='brain')
_add_interpolator(src['fsaverage'])
assert src['fsaverage'][0]['nuse'] == 6379
src_to, src_from = src[subject_to], src[subject_from]
del src
# No SDR just for speed once everything works
kwargs = dict(niter_sdr=(), niter_affine=(1,),
subjects_dir=subjects_dir, verbose=True)
morph_from_to = compute_source_morph(
src=src_from, src_to=src_to, subject_to=subject_to, **kwargs)
morph_to_from = compute_source_morph(
src=src_to, src_to=src_from, subject_to=subject_from, **kwargs)
nuse = sum(s['nuse'] for s in src_from)
assert nuse > 10
use = np.linspace(0, nuse - 1, 10).round().astype(int)
data = np.eye(nuse)[:, use]
if dtype is complex:
data = data * 1j
vertices = [s['vertno'] for s in src_from]
stc_from = VolSourceEstimate(data, vertices, 0, 1)
with catch_logging() as log:
stc_from_rt = morph_to_from.apply(
morph_from_to.apply(stc_from, verbose='debug'))
log = log.getvalue()
assert 'individual volume morph' in log
maxs = np.argmax(stc_from_rt.data, axis=0)
src_rr = np.concatenate([s['rr'][s['vertno']] for s in src_from])
dists = 1000 * np.linalg.norm(src_rr[use] - src_rr[maxs], axis=1)
mu = np.mean(dists)
# fsaverage=5.99; 7.97 without additional src_ras_t fix
# fsaverage=7.97; 25.4 without src_ras_t fix
assert lower <= mu < upper, f'round-trip distance {mu}'
# check that pre_affine is close to identity when subject_to==subject_from
if subject_to == subject_from:
for morph in (morph_to_from, morph_from_to):
assert_allclose(
morph.pre_affine.affine, np.eye(4), atol=1e-2)
# check that power is more or less preserved (labelizing messes with this)
if morph_mat:
if subject_to == 'fsaverage':
limits = (18, 18.5)
else:
limits = (7, 7.5)
else:
limits = (1, 1.2)
stc_from_unit = stc_from.copy().crop(0, 0)
stc_from_unit._data.fill(1.)
stc_from_unit_rt = morph_to_from.apply(morph_from_to.apply(stc_from_unit))
assert_power_preserved(stc_from_unit, stc_from_unit_rt, limits=limits)
if morph_mat:
fname = tmpdir.join('temp-morph.h5')
morph_to_from.save(fname)
morph_to_from = read_source_morph(fname)
assert morph_to_from.vol_morph_mat is None
morph_to_from.compute_vol_morph_mat(verbose=True)
morph_to_from.save(fname, overwrite=True)
morph_to_from = read_source_morph(fname)
assert isinstance(morph_to_from.vol_morph_mat, csr_matrix), 'csr'
# equivalence (plus automatic calling)
assert morph_from_to.vol_morph_mat is None
monkeypatch.setattr(mne.morph, '_VOL_MAT_CHECK_RATIO', 0.)
with catch_logging() as log:
with pytest.warns(RuntimeWarning, match=r'calling morph\.compute'):
stc_from_rt_lin = morph_to_from.apply(
morph_from_to.apply(stc_from, verbose='debug'))
assert isinstance(morph_from_to.vol_morph_mat, csr_matrix), 'csr'
log = log.getvalue()
assert 'sparse volume morph matrix' in log
assert_allclose(stc_from_rt.data, stc_from_rt_lin.data)
del stc_from_rt_lin
stc_from_unit_rt_lin = morph_to_from.apply(
morph_from_to.apply(stc_from_unit))
assert_allclose(stc_from_unit_rt.data, stc_from_unit_rt_lin.data)
del stc_from_unit_rt_lin
del stc_from, stc_from_rt
# before and after morph, check the proportion of vertices
# that are inside and outside the brainmask.mgz
brain = nib.load(op.join(subjects_dir, subject_from, 'mri', 'brain.mgz'))
mask = _get_img_fdata(brain) > 0
if subject_from == subject_to == 'sample':
for stc in [stc_from_unit, stc_from_unit_rt]:
img = stc.as_volume(src_from, mri_resolution=True)
img = nib.Nifti1Image( # abs to convert complex
np.abs(_get_img_fdata(img)[:, :, :, 0]), img.affine)
img = _get_img_fdata(resample_from_to(img, brain, order=1))
assert img.shape == mask.shape
in_ = img[mask].astype(bool).mean()
out = img[~mask].astype(bool).mean()
if morph_mat:
out_max = 0.001
in_min, in_max = 0.005, 0.007
else:
out_max = 0.02
in_min, in_max = 0.97, 0.98
assert out < out_max, f'proportion out of volume {out}'
assert in_min < in_ < in_max, f'proportion inside volume {in_}'
@pytest.mark.slowtest
@testing.requires_testing_data
def test_morph_stc_dense():
"""Test morphing stc."""
subject_from = 'sample'
subject_to = 'fsaverage'
stc_from = read_source_estimate(fname_smorph, subject='sample')
stc_to = read_source_estimate(fname_fmorph)
# make sure we can specify grade
stc_from.crop(0.09, 0.1) # for faster computation
stc_to.crop(0.09, 0.1) # for faster computation
assert_array_equal(stc_to.time_as_index([0.09, 0.1], use_rounding=True),
[0, len(stc_to.times) - 1])
# After dep change this to:
morph = compute_source_morph(
subject_to=subject_to, spacing=3, smooth=12, src=stc_from,
subjects_dir=subjects_dir, precompute=True)
assert morph.vol_morph_mat is None # a no-op for surface
stc_to1 = morph.apply(stc_from)
assert_allclose(stc_to.data, stc_to1.data, atol=1e-5)
mean_from = stc_from.data.mean(axis=0)
mean_to = stc_to1.data.mean(axis=0)
assert np.corrcoef(mean_to, mean_from).min() > 0.999
vertices_to = grade_to_vertices(subject_to, grade=3,
subjects_dir=subjects_dir)
# make sure we can fill by morphing
with pytest.warns(RuntimeWarning, match='consider increasing'):
morph = compute_source_morph(
stc_from, subject_from, subject_to, spacing=None, smooth=1,
subjects_dir=subjects_dir)
stc_to5 = morph.apply(stc_from)
assert stc_to5.data.shape[0] == 163842 + 163842
# Morph vector data
stc_vec = _real_vec_stc()
stc_vec_to1 = compute_source_morph(
stc_vec, subject_from, subject_to, subjects_dir=subjects_dir,
spacing=vertices_to, smooth=1, warn=False).apply(stc_vec)
assert stc_vec_to1.subject == subject_to
assert stc_vec_to1.tmin == stc_vec.tmin
assert stc_vec_to1.tstep == stc_vec.tstep
assert len(stc_vec_to1.lh_vertno) == 642
assert len(stc_vec_to1.rh_vertno) == 642
# Degenerate conditions
# Morphing to a density that is too high should raise an informative error
# (here we need to push to grade=6, but for some subjects even grade=5
# will break)
with pytest.raises(ValueError, match='Cannot use icosahedral grade 6 '):
compute_source_morph(
stc_to1, subject_from=subject_to, subject_to=subject_from,
spacing=6, subjects_dir=subjects_dir)
del stc_to1
with pytest.raises(ValueError, match='smooth.* has to be at least 1'):
compute_source_morph(
stc_from, subject_from, subject_to, spacing=5, smooth=-1,
subjects_dir=subjects_dir)
# subject from mismatch
with pytest.raises(ValueError, match="subject_from does not match"):
compute_source_morph(stc_from, subject_from='foo',
subjects_dir=subjects_dir)
# only one set of vertices
with pytest.raises(ValueError, match="grade.*list must have two elements"):
compute_source_morph(
stc_from, subject_from=subject_from, spacing=[vertices_to[0]],
subjects_dir=subjects_dir)
@testing.requires_testing_data
def test_morph_stc_sparse():
"""Test morphing stc with sparse=True."""
subject_from = 'sample'
subject_to = 'fsaverage'
# Morph sparse data
# Make a sparse stc
stc_from = read_source_estimate(fname_smorph, subject='sample')
stc_from.vertices[0] = stc_from.vertices[0][[100, 500]]
stc_from.vertices[1] = stc_from.vertices[1][[200]]
stc_from._data = stc_from._data[:3]
stc_to_sparse = compute_source_morph(
stc_from, subject_from=subject_from, subject_to=subject_to,
spacing=None, sparse=True, subjects_dir=subjects_dir).apply(stc_from)
assert_allclose(np.sort(stc_from.data.sum(axis=1)),
np.sort(stc_to_sparse.data.sum(axis=1)))
assert len(stc_from.rh_vertno) == len(stc_to_sparse.rh_vertno)
assert len(stc_from.lh_vertno) == len(stc_to_sparse.lh_vertno)
assert stc_to_sparse.subject == subject_to
assert stc_from.tmin == stc_from.tmin
assert stc_from.tstep == stc_from.tstep
stc_from.vertices[0] = np.array([], dtype=np.int64)
stc_from._data = stc_from._data[:1]
stc_to_sparse = compute_source_morph(
stc_from, subject_from, subject_to, spacing=None, sparse=True,
subjects_dir=subjects_dir).apply(stc_from)
assert_allclose(np.sort(stc_from.data.sum(axis=1)),
np.sort(stc_to_sparse.data.sum(axis=1)))
assert len(stc_from.rh_vertno) == len(stc_to_sparse.rh_vertno)
assert len(stc_from.lh_vertno) == len(stc_to_sparse.lh_vertno)
assert stc_to_sparse.subject == subject_to
assert stc_from.tmin == stc_from.tmin
assert stc_from.tstep == stc_from.tstep
# Degenerate cases
with pytest.raises(ValueError, match='spacing must be set to None'):
compute_source_morph(
stc_from, subject_from=subject_from, subject_to=subject_to,
spacing=5, sparse=True, subjects_dir=subjects_dir)
with pytest.raises(ValueError, match='xhemi=True can only be used with'):
compute_source_morph(
stc_from, subject_from=subject_from, subject_to=subject_to,
spacing=None, sparse=True, xhemi=True, subjects_dir=subjects_dir)
@requires_nibabel()
@testing.requires_testing_data
@pytest.mark.parametrize('sl, n_real, n_mri, n_orig', [
# First and last should add up, middle can have overlap should be <= sum
(slice(0, 1), 37, 138, 8),
(slice(1, 2), 51, 204, 12),
(slice(0, 2), 88, 324, 20),
])
def test_volume_labels_morph(tmpdir, sl, n_real, n_mri, n_orig):
"""Test generating a source space from volume label."""
import nibabel as nib
n_use = (sl.stop - sl.start) // (sl.step or 1)
# see gh-5224
evoked = mne.read_evokeds(fname_evoked)[0].crop(0, 0)
evoked.pick_channels(evoked.ch_names[:306:8])
evoked.info.normalize_proj()
n_ch = len(evoked.ch_names)
lut, _ = read_freesurfer_lut()
label_names = sorted(get_volume_labels_from_aseg(fname_aseg))
use_label_names = label_names[sl]
src = setup_volume_source_space(
'sample', subjects_dir=subjects_dir, volume_label=use_label_names,
mri=fname_aseg)
assert len(src) == n_use
assert src.kind == 'volume'
n_src = sum(s['nuse'] for s in src)
sphere = make_sphere_model('auto', 'auto', evoked.info)
fwd = make_forward_solution(evoked.info, fname_trans, src, sphere)
assert fwd['sol']['data'].shape == (n_ch, n_src * 3)
inv = make_inverse_operator(evoked.info, fwd, make_ad_hoc_cov(evoked.info),
loose=1.)
stc = apply_inverse(evoked, inv)
assert stc.data.shape == (n_src, 1)
img = stc.as_volume(src, mri_resolution=True)
assert img.shape == (86, 86, 86, 1)
n_on = np.array(img.dataobj).astype(bool).sum()
aseg_img = _get_img_fdata(nib.load(fname_aseg))
n_got_real = np.in1d(
aseg_img.ravel(), [lut[name] for name in use_label_names]).sum()
assert n_got_real == n_real
# - This was 291 on `main` before gh-5590
# - Refactoring transforms it became 279 with a < 1e-8 change in vox_mri_t
# - Dropped to 123 once nearest-voxel was used in gh-7653
# - Jumped back up to 330 with morphing fixes actually correctly
# interpolating across all volumes
assert aseg_img.shape == img.shape[:3]
assert n_on == n_mri
for ii in range(2):
# should work with (ii=0) or without (ii=1) the interpolator
if ii:
src[0]['interpolator'] = None
img = stc.as_volume(src, mri_resolution=False)
n_on = np.array(img.dataobj).astype(bool).sum()
# was 20 on `main` before gh-5590
# then 44 before gh-7653, which took it back to 20
assert n_on == n_orig
# without the interpolator, this should fail
assert src[0]['interpolator'] is None
with pytest.raises(RuntimeError, match=r'.*src\[0\], .* mri_resolution'):
stc.as_volume(src, mri_resolution=True)
@pytest.fixture(scope='session', params=[testing._pytest_param()])
def _mixed_morph_srcs():
# create a mixed source space
labels_vol = ['Left-Cerebellum-Cortex', 'Right-Cerebellum-Cortex']
src = mne.setup_source_space('sample', spacing='oct3',
add_dist=False, subjects_dir=subjects_dir)
src += mne.setup_volume_source_space(
'sample', mri=fname_aseg, pos=10.0,
volume_label=labels_vol, subjects_dir=subjects_dir,
add_interpolator=True, verbose=True)
# create the destination space
src_fs = mne.read_source_spaces(
op.join(subjects_dir, 'fsaverage', 'bem', 'fsaverage-ico-5-src.fif'))
src_fs += mne.setup_volume_source_space(
'fsaverage', pos=7., volume_label=labels_vol,
subjects_dir=subjects_dir, add_interpolator=False, verbose=True)
del labels_vol
with pytest.raises(ValueError, match='src_to must be provided .* mixed'):
mne.compute_source_morph(
src=src, subject_from='sample', subject_to='fsaverage',
subjects_dir=subjects_dir)
with pytest.warns(RuntimeWarning, match='not included in smoothing'):
morph = mne.compute_source_morph(
src=src, subject_from='sample', subject_to='fsaverage',
subjects_dir=subjects_dir, niter_affine=[1, 0, 0],
niter_sdr=[1, 0, 0], src_to=src_fs, smooth=5, verbose=True)
return morph, src, src_fs
@requires_nibabel()
@requires_dipy()
@pytest.mark.parametrize('vector', (False, True))
def test_mixed_source_morph(_mixed_morph_srcs, vector):
"""Test mixed source space morphing."""
import nibabel as nib
morph, src, src_fs = _mixed_morph_srcs
# Test some basic properties in the subject's own space
lut, _ = read_freesurfer_lut()
ids = [lut[s['seg_name']] for s in src[2:]]
del lut
vertices = [s['vertno'] for s in src]
n_vertices = sum(len(v) for v in vertices)
data = np.zeros((n_vertices, 3, 1))
data[:, 1] = 1.
klass = mne.MixedVectorSourceEstimate
if not vector:
data = data[:, 1]
klass = klass._scalar_class
stc = klass(data, vertices, 0, 1, 'sample')
vol_info = _get_mri_info_data(fname_aseg, data=True)
rrs = np.concatenate([src[2]['rr'][sp['vertno']] for sp in src[2:]])
n_want = np.in1d(_get_atlas_values(vol_info, rrs), ids).sum()
img = _get_img_fdata(stc.volume().as_volume(src, mri_resolution=False))
assert img.astype(bool).sum() == n_want
img_res = nib.load(fname_aseg)
n_want = np.in1d(_get_img_fdata(img_res), ids).sum()
img = _get_img_fdata(stc.volume().as_volume(src, mri_resolution=True))
assert img.astype(bool).sum() > n_want # way more get interpolated into
with pytest.raises(TypeError, match='stc_from must be an instance'):
morph.apply(1.)
# Now actually morph
stc_fs = morph.apply(stc)
img = stc_fs.volume().as_volume(src_fs, mri_resolution=False)
vol_info = _get_mri_info_data(fname_aseg_fs, data=True)
rrs = np.concatenate([src_fs[2]['rr'][sp['vertno']] for sp in src_fs[2:]])
n_want = np.in1d(_get_atlas_values(vol_info, rrs), ids).sum()
with pytest.raises(ValueError, match=r'stc\.subject does not match src s'):
stc_fs.volume().as_volume(src, mri_resolution=False)
img = _get_img_fdata(
stc_fs.volume().as_volume(src_fs, mri_resolution=False))
assert img.astype(bool).sum() == n_want # correct number of voxels
# Morph separate parts and compare to morphing the entire one
stc_fs_surf = morph.apply(stc.surface())
stc_fs_vol = morph.apply(stc.volume())
stc_fs_2 = stc_fs.__class__(
np.concatenate([stc_fs_surf.data, stc_fs_vol.data]),
stc_fs_surf.vertices + stc_fs_vol.vertices, stc_fs.tmin, stc_fs.tstep,
stc_fs.subject)
assert_allclose(stc_fs.data, stc_fs_2.data)
def _rand_affine(rng):
quat = rng.randn(3)
quat /= 5 * np.linalg.norm(quat)
affine = np.eye(4)
affine[:3, 3] = rng.randn(3) / 5.
affine[:3, :3] = quat_to_rot(quat)
return affine
_shapes = (
(10, 10, 10),
(20, 5, 10),
(5, 10, 20),
)
_affines = (
[[2, 0, 0, 1],
[0, 0, 1, -1],
[0, -1, 0, 2],
[0, 0, 0, 1]],
np.eye(4),
np.eye(4)[[0, 2, 1, 3]],
'rand',
)
@requires_nibabel()
@requires_version('dipy', '1.3')
@pytest.mark.parametrize('from_shape', _shapes)
@pytest.mark.parametrize('from_affine', _affines)
@pytest.mark.parametrize('to_shape', _shapes)
@pytest.mark.parametrize('to_affine', _affines)
@pytest.mark.parametrize('order', [0, 1])
@pytest.mark.parametrize('seed', [0, 1])
def test_resample_equiv(from_shape, from_affine, to_shape, to_affine,
order, seed):
"""Test resampling equivalences."""
rng = np.random.RandomState(seed)
from_data = rng.randn(*from_shape)
is_rand = False
if isinstance(to_affine, str):
assert to_affine == 'rand'
to_affine = _rand_affine(rng)
is_rand = True
if isinstance(from_affine, str):
assert from_affine == 'rand'
from_affine = _rand_affine(rng)
is_rand = True
to_affine = np.array(to_affine, float)
assert to_affine.shape == (4, 4)
from_affine = np.array(from_affine, float)
assert from_affine.shape == (4, 4)
#
# 1. nibabel.processing.resample_from_to
#
# for a 1mm iso / 256 -> 5mm / 51 one sample takes ~486 ms
from nibabel.processing import resample_from_to
from nibabel.spatialimages import SpatialImage
start = np.linalg.norm(from_data)
got_nibabel = resample_from_to(
SpatialImage(from_data, from_affine),
(to_shape, to_affine), order=order).get_fdata()
end = np.linalg.norm(got_nibabel)
assert end > 0.05 * start # not too much power lost
#
# 2. dipy.align.imaffine
#
# ~366 ms
import dipy.align.imaffine
interp = 'linear' if order == 1 else 'nearest'
got_dipy = dipy.align.imaffine.AffineMap(
None, to_shape, to_affine,
from_shape, from_affine).transform(
from_data, interpolation=interp, resample_only=True)
# XXX possibly some error in dipy or nibabel (/SciPy), or some boundary
# condition?
nib_different = (
(is_rand and order == 1) or
(from_affine[0, 0] == 2. and not
np.allclose(from_affine, to_affine))
)
nib_different = nib_different and not (
is_rand and from_affine[0, 0] == 2 and order == 0)
if nib_different:
assert not np.allclose(got_dipy, got_nibabel), 'nibabel fixed'
else:
assert_allclose(got_dipy, got_nibabel, err_msg='dipy<->nibabel')
#
# 3. mne.source_space._grid_interp
#
# ~339 ms
trans = np.linalg.inv(from_affine) @ to_affine # to -> from
interp = _grid_interp(from_shape, to_shape, trans, order=order)
got_mne = np.asarray(
interp @ from_data.ravel(order='F')).reshape(to_shape, order='F')
if order == 1:
assert_allclose(got_mne, got_dipy, err_msg='MNE<->dipy')
else:
perc = 100 * np.isclose(got_mne, got_dipy).mean()
assert 83 < perc <= 100
| bsd-3-clause | 4,909,157,670,620,142,000 | 41.826748 | 79 | 0.630447 | false | 3.168428 | true | false | false |
molmod/hipart | hipart/tests/test_scripts.py | 1 | 2143 | # -*- coding: utf-8 -*-
# HiPart is a program to analyze the electronic structure of molecules with
# fuzzy-atom partitioning methods.
# Copyright (C) 2007 - 2012 Toon Verstraelen <[email protected]>
#
# This file is part of HiPart.
#
# HiPart is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HiPart is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
#--
import os, sys, subprocess
from hipart.tests.utils import setup_gaussian
# These tests just run the scripts to see if they do not crash on a simple
# example.
def test_scripts():
schemes = ["becke", "hirsh", "hirshi", "isa"]
fns_script = [
"hi-bond-orders.py", "hi-charges.py", "hi-dipoles.py",
"hi-esp-test.py", "hi-multipoles.py", "hi-net-overlap.py",
"hi-overlap-matrices-orb.py", "hi-spin-charges.py",
]
for scheme in schemes:
for fn_script in fns_script:
yield check_script, fn_script, scheme
def check_script(fn_script, scheme):
fn_script = os.path.abspath(os.path.join("scripts", fn_script))
tmpdir, fn_fchk, fn_densities = setup_gaussian("hf_sto3g", "sto3g")
if scheme in ["hirsh", "hirshi"]:
args = (fn_script, fn_fchk, scheme, fn_densities)
else:
args = (fn_script, fn_fchk, scheme)
retcode = run(args)
assert(retcode==0)
def run(args):
f = file(args[0], "r")
mod_args = ("/usr/bin/env", "python", "-") + args[1:]
proc = subprocess.Popen(mod_args, stdin=f, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=os.getcwd())
outdata, errdata = proc.communicate()
f.close()
print outdata
print errdata
return proc.returncode
| gpl-3.0 | -2,299,937,459,381,667,000 | 32.484375 | 111 | 0.677555 | false | 3.256839 | false | false | false |
samcheck/Scripts | py3/BingBackground/bing_background.py | 1 | 2674 | #!/usr/bin/env python3
# bing_background.py - downoads the bing homepage background and sets it as the
# desktop backgroud.
import logging
import os
from urllib.request import urlopen
import time
from bs4 import BeautifulSoup
from selenium import webdriver
from pyvirtualdisplay import Display
def download_link(directory, link):
download_path = os.path.join(directory, os.path.basename(link))
# Check if file already exists
if os.path.exists(download_path):
logging.info('File {} already exists, skipping.'.format(link))
else:
# Download the img
logging.info('Downloading {}'.format(link))
with urlopen(link) as image, open(download_path, 'wb') as f:
f.write(image.read())
def change_wp(directory, link):
# Changes wallpaper for Unity / Gnome desktop
desktop = os.environ.get("DESKTOP_SESSION")
if desktop in ["gnome", "ubuntu", "unity"]:
img_path = os.path.join(directory, os.path.basename(link))
command = "gsettings set org.gnome.desktop.background picture-uri file://{}".format(img_path)
os.system(command)
else:
logging.error('No command to change wallpaper.')
def setup_download_dir(save_dir):
download_dir = os.path.join(os.getcwd(), save_dir)
if not os.path.exists(download_dir):
os.mkdir(download_dir)
return download_dir
def main():
# set up logging
logging.basicConfig(filename='bing_bg.log', filemode='w', level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
url = "https://www.bing.com/"
save_dir = "images"
dl_dir = setup_download_dir(save_dir)
# set up a virtual display to use with selenium
display = Display(visible=0, size=(800, 600))
display.start()
# Launch a Firefox instance
driver = webdriver.Firefox()
driver.get(url)
logging.info('Downloading page {}'.format(url))
time.sleep(6) #hacky sleep to allow bing homepage to load so we can grab the image
# Parse the bing homepage
soup = BeautifulSoup(driver.page_source, "html.parser")
# clean up browser and stop virtual display
driver.quit() # seems to spit out "'NoneType' object has no attribute 'path'"
display.stop()
# Find the URL elements
link = soup.find_all('div', {'id': 'bgDiv'})[0].attrs['style']
img_link = link[(link.find('url("')+5):link.find('");')]
logging.info('Found link: {}'.format(img_link))
# Download and change wallpaper
download_link(dl_dir, img_link)
change_wp(dl_dir, img_link)
if __name__ == "__main__":
main()
| mit | 5,856,054,562,960,298,000 | 31.216867 | 101 | 0.652206 | false | 3.653005 | false | false | false |
Plain-Andy-legacy/android_external_chromium_org | build/android/adb_logcat_printer.py | 44 | 7089 | #!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Shutdown adb_logcat_monitor and print accumulated logs.
To test, call './adb_logcat_printer.py <base_dir>' where
<base_dir> contains 'adb logcat -v threadtime' files named as
logcat_<deviceID>_<sequenceNum>
The script will print the files to out, and will combine multiple
logcats from a single device if there is overlap.
Additionally, if a <base_dir>/LOGCAT_MONITOR_PID exists, the script
will attempt to terminate the contained PID by sending a SIGINT and
monitoring for the deletion of the aforementioned file.
"""
# pylint: disable=W0702
import cStringIO
import logging
import optparse
import os
import re
import signal
import sys
import time
# Set this to debug for more verbose output
LOG_LEVEL = logging.INFO
def CombineLogFiles(list_of_lists, logger):
"""Splices together multiple logcats from the same device.
Args:
list_of_lists: list of pairs (filename, list of timestamped lines)
logger: handler to log events
Returns:
list of lines with duplicates removed
"""
cur_device_log = ['']
for cur_file, cur_file_lines in list_of_lists:
# Ignore files with just the logcat header
if len(cur_file_lines) < 2:
continue
common_index = 0
# Skip this step if list just has empty string
if len(cur_device_log) > 1:
try:
line = cur_device_log[-1]
# Used to make sure we only splice on a timestamped line
if re.match(r'^\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{3} ', line):
common_index = cur_file_lines.index(line)
else:
logger.warning('splice error - no timestamp in "%s"?', line.strip())
except ValueError:
# The last line was valid but wasn't found in the next file
cur_device_log += ['***** POSSIBLE INCOMPLETE LOGCAT *****']
logger.info('Unable to splice %s. Incomplete logcat?', cur_file)
cur_device_log += ['*'*30 + ' %s' % cur_file]
cur_device_log.extend(cur_file_lines[common_index:])
return cur_device_log
def FindLogFiles(base_dir):
"""Search a directory for logcat files.
Args:
base_dir: directory to search
Returns:
Mapping of device_id to a sorted list of file paths for a given device
"""
logcat_filter = re.compile(r'^logcat_(\w+)_(\d+)$')
# list of tuples (<device_id>, <seq num>, <full file path>)
filtered_list = []
for cur_file in os.listdir(base_dir):
matcher = logcat_filter.match(cur_file)
if matcher:
filtered_list += [(matcher.group(1), int(matcher.group(2)),
os.path.join(base_dir, cur_file))]
filtered_list.sort()
file_map = {}
for device_id, _, cur_file in filtered_list:
if device_id not in file_map:
file_map[device_id] = []
file_map[device_id] += [cur_file]
return file_map
def GetDeviceLogs(log_filenames, logger):
"""Read log files, combine and format.
Args:
log_filenames: mapping of device_id to sorted list of file paths
logger: logger handle for logging events
Returns:
list of formatted device logs, one for each device.
"""
device_logs = []
for device, device_files in log_filenames.iteritems():
logger.debug('%s: %s', device, str(device_files))
device_file_lines = []
for cur_file in device_files:
with open(cur_file) as f:
device_file_lines += [(cur_file, f.read().splitlines())]
combined_lines = CombineLogFiles(device_file_lines, logger)
# Prepend each line with a short unique ID so it's easy to see
# when the device changes. We don't use the start of the device
# ID because it can be the same among devices. Example lines:
# AB324: foo
# AB324: blah
device_logs += [('\n' + device[-5:] + ': ').join(combined_lines)]
return device_logs
def ShutdownLogcatMonitor(base_dir, logger):
"""Attempts to shutdown adb_logcat_monitor and blocks while waiting."""
try:
monitor_pid_path = os.path.join(base_dir, 'LOGCAT_MONITOR_PID')
with open(monitor_pid_path) as f:
monitor_pid = int(f.readline())
logger.info('Sending SIGTERM to %d', monitor_pid)
os.kill(monitor_pid, signal.SIGTERM)
i = 0
while True:
time.sleep(.2)
if not os.path.exists(monitor_pid_path):
return
if not os.path.exists('/proc/%d' % monitor_pid):
logger.warning('Monitor (pid %d) terminated uncleanly?', monitor_pid)
return
logger.info('Waiting for logcat process to terminate.')
i += 1
if i >= 10:
logger.warning('Monitor pid did not terminate. Continuing anyway.')
return
except (ValueError, IOError, OSError):
logger.exception('Error signaling logcat monitor - continuing')
def main(argv):
parser = optparse.OptionParser(usage='Usage: %prog [options] <log dir>')
parser.add_option('--output-path',
help='Output file path (if unspecified, prints to stdout)')
options, args = parser.parse_args(argv)
if len(args) != 1:
parser.error('Wrong number of unparsed args')
base_dir = args[0]
if options.output_path:
output_file = open(options.output_path, 'w')
else:
output_file = sys.stdout
log_stringio = cStringIO.StringIO()
logger = logging.getLogger('LogcatPrinter')
logger.setLevel(LOG_LEVEL)
sh = logging.StreamHandler(log_stringio)
sh.setFormatter(logging.Formatter('%(asctime)-2s %(levelname)-8s'
' %(message)s'))
logger.addHandler(sh)
try:
# Wait at least 5 seconds after base_dir is created before printing.
#
# The idea is that 'adb logcat > file' output consists of 2 phases:
# 1 Dump all the saved logs to the file
# 2 Stream log messages as they are generated
#
# We want to give enough time for phase 1 to complete. There's no
# good method to tell how long to wait, but it usually only takes a
# second. On most bots, this code path won't occur at all, since
# adb_logcat_monitor.py command will have spawned more than 5 seconds
# prior to called this shell script.
try:
sleep_time = 5 - (time.time() - os.path.getctime(base_dir))
except OSError:
sleep_time = 5
if sleep_time > 0:
logger.warning('Monitor just started? Sleeping %.1fs', sleep_time)
time.sleep(sleep_time)
assert os.path.exists(base_dir), '%s does not exist' % base_dir
ShutdownLogcatMonitor(base_dir, logger)
separator = '\n' + '*' * 80 + '\n\n'
for log in GetDeviceLogs(FindLogFiles(base_dir), logger):
output_file.write(log)
output_file.write(separator)
with open(os.path.join(base_dir, 'eventlog')) as f:
output_file.write('\nLogcat Monitor Event Log\n')
output_file.write(f.read())
except:
logger.exception('Unexpected exception')
logger.info('Done.')
sh.flush()
output_file.write('\nLogcat Printer Event Log\n')
output_file.write(log_stringio.getvalue())
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause | -6,038,289,180,201,884,000 | 32.28169 | 79 | 0.659472 | false | 3.540959 | false | false | false |
msincenselee/vnpy | vnpy/gateway/oanda/oanda_common.py | 1 | 2259 | import time
from datetime import datetime, timedelta, timezone
from typing import TYPE_CHECKING
from vnpy.trader.constant import Direction, Interval, OrderType, Status
if TYPE_CHECKING:
# noinspection PyUnresolvedReferences
from vnpy.gateway.oanda import OandaGateway # noqa
STATUS_OANDA2VT = {
"PENDING": Status.NOTTRADED,
"FILLED": Status.ALLTRADED,
"CANCELLED": Status.CANCELLED,
# "TRIGGERED": Status.REJECTED,
}
STOP_ORDER_STATUS_OANDA2VT = {
"Untriggered": Status.NOTTRADED,
"Triggered": Status.NOTTRADED,
# Active: triggered and placed.
# since price is market price, placed == AllTraded?
"Active": Status.ALLTRADED,
"Cancelled": Status.CANCELLED,
"Rejected": Status.REJECTED,
}
DIRECTION_VT2OANDA = {Direction.LONG: "Buy", Direction.SHORT: "Sell"}
DIRECTION_OANDA2VT = {v: k for k, v in DIRECTION_VT2OANDA.items()}
DIRECTION_OANDA2VT.update({
"None": Direction.LONG
})
OPPOSITE_DIRECTION = {
Direction.LONG: Direction.SHORT,
Direction.SHORT: Direction.LONG,
}
ORDER_TYPE_VT2OANDA = {
OrderType.LIMIT: "LIMIT",
OrderType.MARKET: "MARKET",
OrderType.STOP: "STOP",
}
ORDER_TYPE_OANDA2VT = {v: k for k, v in ORDER_TYPE_VT2OANDA.items()}
ORDER_TYPE_OANDA2VT.update({
'LIMIT_ORDER': OrderType.LIMIT,
'MARKET_ORDER': OrderType.MARKET,
'STOP_ORDER': OrderType.STOP,
})
INTERVAL_VT2OANDA = {
Interval.MINUTE: "M1",
Interval.HOUR: "H1",
Interval.DAILY: "D",
Interval.WEEKLY: "W",
}
INTERVAL_VT2OANDA_INT = {
Interval.MINUTE: 1,
Interval.HOUR: 60,
Interval.DAILY: 60 * 24,
Interval.WEEKLY: 60 * 24 * 7,
}
INTERVAL_VT2OANDA_DELTA = {
Interval.MINUTE: timedelta(minutes=1),
Interval.HOUR: timedelta(hours=1),
Interval.DAILY: timedelta(days=1),
Interval.WEEKLY: timedelta(days=7),
}
utc_tz = timezone.utc
local_tz = datetime.now(timezone.utc).astimezone().tzinfo
def generate_timestamp(expire_after: float = 30) -> int:
"""
:param expire_after: expires in seconds.
:return: timestamp in milliseconds
"""
return int(time.time() * 1000 + expire_after * 1000)
def parse_datetime(dt: str) -> datetime:
return datetime.fromisoformat(dt[:-4])
def parse_time(dt: str) -> str:
return dt[11:26]
| mit | 1,327,175,221,755,135,500 | 25.267442 | 71 | 0.684816 | false | 2.980211 | false | false | false |
Sungup/Undine | undine/utils/system.py | 1 | 1063 | from __future__ import print_function
from collections import namedtuple
import multiprocessing
import platform
import socket
import sys
class System:
@staticmethod
def is_window():
return platform.system() == 'Windows'
@staticmethod
def is_linux():
return platform.system() == 'Linux'
@staticmethod
def is_mac():
return platform.system() == 'Darwin'
@staticmethod
def cpu_cores():
return multiprocessing.cpu_count()
@staticmethod
def host_info():
HostInfo = namedtuple('HostInfo', ['name', 'ipv4'])
name = socket.gethostname()
return HostInfo(name, socket.gethostbyname(name))
@staticmethod
def version():
return 'v0.1.0b'
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def print_console_header(string, splitter='-'):
str_size = len(string) + 2
left_size = (80 - str_size) // 2
right_size = 80 - left_size - str_size
return '{1} {0} {2}'.format(string, splitter*left_size, splitter*right_size)
| mit | 4,274,021,166,244,103,700 | 21.145833 | 80 | 0.631232 | false | 3.823741 | false | false | false |
pwwang/bioprocs | bioprocs/scripts/imtherapy/pVACseq.py | 1 | 3365 | from pathlib import Path
from diot import Diot
from bioprocs.utils import shell2 as shell
from bioprocs.utils.reference import vcfIndex
from bioprocs.utils.tsvio2 import TsvReader, TsvWriter, TsvRecord
{% from os import path %}
{% from pyppl.utils import alwaysList %}
infile = {{i.infile | quote}}
afile = {{i.afile | ?path.isfile | =readlines | !alwaysList
| ?:len(_) == 1 and not _[0] | =:None | repr}}
outfile = {{o.outfile | quote}}
outdir = {{o.outdir | quote}}
bcftools = {{args.bcftools | quote}}
allele = {{args.allele | ?path.isfile | =readlines | !alwaysList
| ?:len(_) == 1 and not _[0] | =:None | repr}}
netmhc = {{args.netmhc | quote}}
iedb_mhc_i = {{args.iedb_mhc_i | quote}}
pvacseq = {{args.pvacseq | quote}}
bdtool = {{args.bdtool | quote}}
nthread = {{args.nthread | quote}}
params = {{args.params | repr}}
infile = vcfIndex(infile)
# get alleles
allele = afile or allele
if not allele:
raise ValueError('No allele has been specified.')
allele = ','.join(allele)
shell.load_config(pvacseq = pvacseq, bcftools = bcftools)
bdtools = [ 'MHCflurry','MHCnuggetsI','MHCnuggetsII','NNalign','NetMHC',
'NetMHCIIpan','NetMHCcons','NetMHCpan','PickPocket','SMM','SMMPMBEC','SMMalign']
bdtools = {bdt.lower():bdt for bdt in bdtools}
# get sample name
sample = shell.bcftools.query(l = infile).splitlines()[0]
shell.rm_rf(Path(outdir).joinpath('MHC_Class_I', sample + '.tsv'), _debug = True)
params.t = nthread
params._ = [infile, sample, allele, bdtools[bdtool], outdir]
params.k = params.get('k', True)
params.iedb_install_directory = Path(iedb_mhc_i).parent
shell.fg.pvacseq.run(**params)
# filter the epitopes with IC50(MT) >= 500 (SB) and IC50(WT) < 2000 (WB)
# Chromosome Start Stop Reference Variant Transcript Transcript Support Level Ensembl Gene ID Variant Type Mutation Protein Position Gene Name HGVSc HGVSp HLA Allele Peptide Length Sub-peptide Position Mutation Position MT Epitope Seq WT Epitope Seq Best MT Score Method Best MT Score Corresponding WT Score Corresponding Fold Change Tumor DNA Depth Tumor DNA VAF Tumor RNA Depth Tumor RNA VAF Normal Depth Normal VAF Gene Expression Transcript Expression Median MT Score Median WT Score Median Fold Change NetMHC WT Score NetMHC MT Score cterm_7mer_gravy_score max_7mer_gravy_score difficult_n_terminal_residue c_terminal_cysteine c_terminal_proline cysteine_count n_terminal_asparagine asparagine_proline_bond_count
reader = TsvReader(Path(outdir).joinpath('MHC_Class_I', sample + '.all_epitopes.tsv'))
writer = TsvWriter(outfile)
writer.cnames = ['HLA_allele', 'Peptide', 'Affinity', 'Gene', 'ENSG', 'ENST', 'Ref_peptide', 'Ref_affinity', 'Mutation', 'AAChange']
writer.writeHead()
for r in reader:
out = TsvRecord()
out.HLA_allele = r['HLA Allele']
out.Peptide = r['MT Epitope Seq']
out.Affinity = r['Best MT Score']
out.Gene = r['Gene Name']
out.ENSG = r['Ensembl Gene ID']
out.ENST = r['Transcript']
out.Ref_peptide = r['WT Epitope Seq']
out.Ref_affinity = r['Corresponding WT Score']
out.Mutation = r.Chromosome + ':' + r.Start + '-' + r.Stop + '.' + r.Reference + '/' + r.Variant
out.AAChange = r.Mutation
if float(out.Affinity) > 500 or float(out.Ref_affinity) < 2000:
continue
writer.write(out)
| mit | 6,464,409,861,217,951,000 | 48.485294 | 717 | 0.682021 | false | 2.73355 | false | false | false |
Temzasse/ntu-crysis | server/crysis/cms/facebookUpdate/facebook.py | 1 | 1198 | from facepy import GraphAPI
from django.conf import settings
import os
# Initialize the Graph API with a valid access token (optional,
# but will allow you to do all sorts of fun stuff).
# oauth_access_token = 'EAABZC0OOt2wQBAOcKcpbbYiuFyEONLyqOsdUrODvEBLXq6ZCPXBcI1oZA4UZCPrIkXcZBOzkF9ue0AXNRAEjeE4tfJHy4GwjGfT4CZArvkwmTDGLnU2T1eiixAPm7q4GsPQPVAsDbWdZCEWGwANtKwZAWmeo85xX8tdvfiZBc7Mu6JQZDZD'
oauth_access_token = 'EAACEdEose0cBAPRtQdvettZAmH7ZA6GiRtCx4AFUPfTZBLUPTIjBZCKIVWZCpgYXw5V3sK8c4g7q5bZBUvpMh2M1aq4ZCiYPMwLIIilhFZCFdX4SrBKi5WPFWVrEl5Y1sZACCMkIJUJm6eyPFFXNd3ankhGuJFDfZB53v86bFFtYEzZCrXQj4bU6TPw'
graph = GraphAPI(oauth_access_token)
# Get my latest posts
# graph.get('me/posts')
# Post a photo of a parrot
# graph.post(
# path = 'me/photos',
# source = open('Untitled.png','rb')
# )
def updateFacebook():
file = open('../templates/shelter.txt', 'r')
graph.post(
path='me/feed',
message=file.read(),
)
return True
def updateFacebookv2():
file = open(os.path.join(
settings.BASE_DIR,
'cms/templates/shelter.txt'), 'r'
)
graph.post(
path='me/feed',
message=file.read(),
)
return True
| mit | -7,475,239,108,665,978,000 | 26.227273 | 211 | 0.732888 | false | 2.312741 | false | false | false |
kmjungersen/pyscan | pyscan/main.py | 1 | 3076 | """
pyscan
======
main.py
This file houses the core of the application
"""
# imports
import csv
import pprint
import requests
from bs4 import BeautifulSoup
from pyscan.local import *
class Barcode():
def __init__(self, barcode_id=None, barcode_ids=None, autosave=False):
"""
:return:
"""
self.number = barcode_id
self.autosave = autosave
self.base_url = BASE_URL
self.data = {}
self.item_name = ''
self.description = ''
self.pp = pprint.PrettyPrinter()
if barcode_id:
self.data = self.retrieve()
self.item_name = self.data.get('itemname').decode('ascii')
self.description = self.data.get('description').decode('ascii')
elif barcode_ids:
pass
self.save_file = SAVE_FILE_PATH
def retrieve(self, barcode=None):
"""
:param barcode:
:return:
"""
if barcode:
self.number = barcode
url = self.base_url.format(
API_KEY=API_KEY,
number=self.number,
)
r = requests.get(url)
document = r.json()
self.data = document
# self.__convert_unicode_characters()
return document
def save(self, file_path=None):
"""
"""
with open(self.save_file, 'a') as save_file:
save_file.write(str('{number}\n'.format(
number=self.number,
)))
def continuous_input(self):
"""
:return:
"""
done = False
print('Keep scanning codes to save them. When finished, type "done"!')
while not done:
code = input('Enter a barcode ---> ')
if code == 'done':
break
self.pp.pprint(self.retrieve(code))
self.save()
def batch_retrieve(self, barcode_ids):
"""
:return:
"""
barcode_metadata_list = []
for barcode in barcode_ids:
metadata = self.retrieve(barcode)
barcode_metadata_list.append(metadata)
return barcode_metadata_list
def csv_write(self):
"""
:return:
"""
with open('foo.csv', 'a') as csvfile:
code_writer = csv.writer(csvfile, delimiter=',')
row = [
self.number,
self.item_name,
self.description
]
code_writer.writerow(row)
def __convert_unicode_characters(self):
"""
:return:
"""
for key, value in self.data.items():
if type(value) is not int:
converted_string = BeautifulSoup(value)
self.data[key] = converted_string
def __eq__(self, other):
"""
:param other:
:return:
"""
return self.number
def __repr__(self):
"""
:return:
"""
return str(self.data)
if __name__ == '__main__':
b = Barcode(pretzel)
| apache-2.0 | 1,413,329,931,169,230,300 | 16.780347 | 79 | 0.491873 | false | 4.213699 | false | false | false |
spacemansteve/AdsDataSqlSync | tests/tests/test_metrics.py | 2 | 8763 |
import sys
import os
PROJECT_HOME = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../'))
sys.path.append(PROJECT_HOME)
import unittest
from datetime import datetime
from mock import Mock, patch
from adsdata.metrics import Metrics
from adsdata.models import NonBibTable
class metrics_test(unittest.TestCase):
"""tests for generation of metrics database"""
t1 = NonBibTable()
t1.bibcode = "1998PPGeo..22..553A"
t1.refereed = False
t1.authors = ["Arnfield, A. L."]
t1.downloads = []
t1.reads = [1, 2, 3, 4]
t1.downloads = [0, 1, 2, 3]
t1.citations = []
t1.id = 11
t1.reference = ["1997BoLMe..85..475M"]
t2 = NonBibTable()
t2.bibcode = "1997BoLMe..85..475M"
t2.refereed = True
t2.authors = ["Meesters, A. G. C. A.", "Bink, N. J.", "Henneken, E. A. C.", "Vugts, H. F.", "Cannemeijer, F."]
t2.downloads = []
t2.reads = []
t2.citations = ["2006QJRMS.132..779R", "2008Sci...320.1622D", "1998PPGeo..22..553A"]
t2.id = 3
t2.reference = ["1994BoLMe..71..393V", "1994GPC.....9...53M", "1997BoLMe..85...81M"]
test_data = [t1, t2]
def setUp(self):
# perhaps not useful with only two sample data sets
self.no_citations = [x for x in metrics_test.test_data if not x.citations]
self.citations = [x for x in metrics_test.test_data if x.citations]
def test_trivial_fields(self):
"""test fields that are not transformed"""
with patch('sqlalchemy.create_engine'):
met = Metrics()
for record in self.no_citations:
metrics_dict = met.row_view_to_metrics(record, None)
self.assertEqual(record.bibcode, metrics_dict.bibcode, 'bibcode check')
self.assertEqual(record.citations, metrics_dict.citations, 'citations check')
self.assertEqual(record.reads, metrics_dict.reads, 'reads check')
self.assertEqual(record.downloads, metrics_dict.downloads, 'downloads check')
def test_num_fields(self):
"""test fields based on length of other fields"""
with patch('sqlalchemy.create_engine'):
met = Metrics()
for record in self.no_citations:
metrics_dict = met.row_view_to_metrics(record, None)
self.assertEqual(metrics_dict.citation_num, len(record.citations), 'citation number check')
self.assertEqual(metrics_dict.reference_num, len(record.reference), 'reference number check')
self.assertEqual(metrics_dict.author_num, len(record.authors), 'author number check')
self.assertEqual(metrics_dict.refereed_citation_num, 0, 'refereed citation num')
def test_with_citations(self):
"""test a bibcode that has citations"""
test_row = metrics_test.t2
t2_year = int(metrics_test.t2.bibcode[:4])
today = datetime.today()
t2_age = max(1.0, today.year - t2_year + 1)
# we mock row view select for citation data with hard coded results
# for row_view_to_metrics to use ([refereed, len(reference), bibcode], ...)
m = Mock()
m.schema = "None"
m.execute.return_value = (
[True, 1, "1994BoLMe..71..393V"],
[False, 1, "1994GPC.....9...53M"],
[True, 1, "1997BoLMe..85...81M"])
with patch('sqlalchemy.create_engine'):
met = Metrics()
metrics_dict = met.row_view_to_metrics(metrics_test.t2, m)
self.assertEqual(len(metrics_dict.citations), 3, 'citations check')
self.assertEqual(len(metrics_dict.refereed_citations), 2, 'refereed citations check')
self.assertEqual(metrics_dict.refereed_citations[0], "1994BoLMe..71..393V", 'refereed citations check')
self.assertEqual(metrics_dict.refereed_citations[1], "1997BoLMe..85...81M", 'refereed citations check')
rn_citation_data_0 = {'ref_norm': 0.2, 'pubyear': 1997, 'auth_norm': 0.2,
'bibcode': '1994BoLMe..71..393V', 'cityear': 1994}
self.assertEqual(metrics_dict.rn_citation_data[0], rn_citation_data_0, 'rn citation data')
self.assertAlmostEqual(metrics_dict.an_refereed_citations, 2. / t2_age, 5, 'an refereed citations')
self.assertAlmostEqual(metrics_dict.rn_citations, .6, 5, 'rn citations')
def test_validate_lists(self):
"""test validation code for lists
send both matching and mismatching data to metrics list validation and verify correct responses"""
# base data
rn_citation_data1 = Mock()
rn_citation_data1.refereed_citations = ["2015MNRAS.447.1618S", "2016MNRAS.456.1886S", "2015MNRAS.451..149J"]
rn_citation_data1.rn_citation_data = [{"ref_norm": 0.0125, "pubyear": 2014, "auth_norm": 0.3333333333333333,
"bibcode": "2015MNRAS.447.1618S", "cityear": 2015},
{"ref_norm": 0.012048192771084338, "pubyear": 2014, "auth_norm": 0.3333333333333333,
"bibcode": "2016MNRAS.456.1886S", "cityear": 2016},
{"ref_norm": 0.02702702702702703, "pubyear": 2014, "auth_norm": 0.3333333333333333,
"bibcode": "2015MNRAS.451..149J", "cityear": 2015}]
# only slightly different than base, should not be a mismatch
rn_citation_data1a = Mock()
rn_citation_data1a.refereed_citations = ["2015MNRAS.447.1618S", "2016MNRAS.456.1886S", "2015MNRAS.451..149Z"]
rn_citation_data1a.rn_citation_data = [{"ref_norm": 0.0125, "pubyear": 2014, "auth_norm": 0.3333333333333333,
"bibcode": "2015MNRAS.447.1618Z", "cityear": 2015},
{"ref_norm": 0.012048192771084338, "pubyear": 2014, "auth_norm": 0.3333333333333333,
"bibcode": "2016MNRAS.456.1886S", "cityear": 2016},
{"ref_norm": 0.02702702702702703, "pubyear": 2014, "auth_norm": 0.3333333333333333,
"bibcode": "2015MNRAS.451..149J", "cityear": 2015}]
# very different from base, should be a mismatch
rn_citation_data2 = Mock()
rn_citation_data2.refereed_citations = ["2015MNRAS.447.1618Z", "2016MNRAS.456.1886Z", "2015MNRAS.451..149Z", "2015MNRAS.451..149Y"]
rn_citation_data2.rn_citation_data = [{"ref_norm": 0.0125, "pubyear": 2014, "auth_norm": 0.3333333333333333,
"bibcode": "2015MNRAS.447.1618Z", "cityear": 2015},
{"ref_norm": 0.012048192771084338, "pubyear": 2014, "auth_norm": 0.3333333333333333,
"bibcode": "2016MNRAS.456.1886Z", "cityear": 2016},
{"ref_norm": 0.02702702702702703, "pubyear": 2014, "auth_norm": 0.3333333333333333,
"bibcode": "2015MNRAS.451..149Z", "cityear": 2015},
{"ref_norm": 0.02702702702702703, "pubyear": 2014, "auth_norm": 0.3333333333333333,
"bibcode": "2015MNRAS.451..149Y", "cityear": 2015}]
logger = Mock()
mismatch = Metrics.field_mismatch('2014AJ....147..124M', 'rn_citation_data', rn_citation_data1, rn_citation_data1, logger)
self.assertFalse(mismatch, 'validate rn_citation_data')
mismatch = Metrics.field_mismatch('2014AJ....147..124M', 'rn_citation_data', rn_citation_data1, rn_citation_data2, logger)
self.assertTrue(mismatch, 'validate rn_citation_data')
mismatch = Metrics.field_mismatch('2014AJ....147..124M', 'rn_citation_data', rn_citation_data1, rn_citation_data1a, logger)
self.assertFalse(mismatch, 'validate rn_citation_data')
mismatch = Metrics.field_mismatch('2014AJ....147..124M', 'refereed_citations', rn_citation_data1a, rn_citation_data1a, logger)
self.assertFalse(mismatch, 'validate refereed_citations')
mismatch = Metrics.field_mismatch('2014AJ....147..124M', 'refereed_citations', rn_citation_data1, rn_citation_data1, logger)
self.assertFalse(mismatch, 'validate refereed_citations')
mismatch = Metrics.field_mismatch('2014AJ....147..124M', 'refereed_citations', rn_citation_data1, rn_citation_data2, logger)
self.assertTrue(mismatch, 'validate refereed_citations')
if __name__ == '__main__':
unittest.main(verbosity=2)
| gpl-3.0 | 3,286,390,973,501,658,600 | 55.173077 | 139 | 0.584161 | false | 3.408401 | true | false | false |
tdsymonds/djangocms-typedjs | setup.py | 1 | 1343 | import os
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='djangocms-typedjs',
version='1.0.0',
license='MIT License',
description='A Django CMS plugin that implements the Typed.js jQuery plugin.',
long_description=README,
url='https://github.com/tdsymonds/djangocms-typedjs',
author='Tom Symonds',
author_email='[email protected]',
keywords='djangocms-typedjs, typedjs, typing, django',
packages=[
'djangocms_typedjs',
],
include_package_data=True,
install_requires=[
'django-cms>=3.2',
'djangocms-text-ckeditor',
],
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| mit | -8,282,335,474,856,264,000 | 30.756098 | 82 | 0.600894 | false | 3.699725 | false | true | false |
lilsweetcaligula/Online-Judges | lintcode/medium/convert_sorted_list_to_balanced_bst/py/convert_sorted_list_to_balanced_bst.py | 1 | 1225 | # coding:utf-8
'''
@Copyright:LintCode
@Author: lilsweetcaligula
@Problem: http://www.lintcode.com/problem/convert-sorted-list-to-balanced-bst
@Language: Python
@Datetime: 17-02-16 16:14
'''
"""
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param head: The first node of linked list.
@return: a tree node
"""
def sortedListToBST(self, head, end=None):
if head == None:
return None
slow = head
fast = head
prev = slow
while fast != None and fast.next != None:
prev = slow
slow = slow.next
fast = fast.next.next
prev.next = None
mid = slow
left = head if mid != head else None
right = mid.next
root = TreeNode(mid.val)
root.left = self.sortedListToBST(left)
root.right = self.sortedListToBST(right)
return root | mit | -7,702,063,534,747,963,000 | 22.576923 | 78 | 0.537143 | false | 3.79257 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.