repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
apoorvemohan/haas | haas/drivers/switches/nexus.py | 1 | 2520 | # Copyright 2013-2014 Massachusetts Open Cloud Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS
# IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""A switch driver for the Nexus 5500
Currently the driver uses telnet to connect to the switch's console; in the
long term we want to be using SNMP.
"""
import os
import pexpect
import re
from haas.dev_support import no_dry_run
@no_dry_run
def apply_networking(net_map, config):
def set_access_vlan(port, vlan_id):
"""Set a port to access a given vlan.
This function expects to be called while in the config prompt, and
leaves you there when done.
"""
console.sendline('int ethernet 1/%s' % port)
# set it first to switch mode then to access mode:
console.expect(r'[\r\n]+.+# ')
console.sendline('sw')
console.expect(r'[\r\n]+.+# ')
console.sendline('sw mode access')
console.expect(r'[\r\n]+.+# ')
if vlan_id is None:
# turn the port off
console.sendline('no sw')
else:
# set the vlan:
console.sendline('sw access vlan %s' % vlan_id)
console.expect(r'[\r\n]+.+# ')
# back out to config_prompt
console.sendline('exit')
console.expect(r'[\r\n]+.+# ')
nexus_ip = config['ip']
nexus_user = config['user']
nexus_pass = config['pass']
try:
console = pexpect.spawn('telnet ' + nexus_ip)
console.expect('login: ')
console.sendline(nexus_user)
console.expect('password: ')
console.sendline(nexus_pass)
console.expect(r'[\r\n]+.+# ')
console.sendline('config terminal')
console.expect(r'[\r\n]+.+# ')
for port_id in net_map:
set_access_vlan(port_id, net_map[port_id])
except IOError as e:
print "Connection error while connecting to the Nexus switch({0}): {1}".format(e.errno, e.strerror)
except:
print "Unexpected error while connecting to the Nexus switch:", sys.exc_info()[0]
raise
sys.Exit(1)
| apache-2.0 | -8,500,708,156,257,501,000 | 32.6 | 107 | 0.63254 | false |
ProjectQ-Framework/FermiLib-Plugin-Psi4 | fermilibpluginpsi4/_run_psi4.py | 1 | 8415 | # FermiLib plugin to interface with Psi4
#
# Copyright (C) 2017 ProjectQ-Framework (www.projectq.ch)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Functions to prepare psi4 input and run calculations."""
from __future__ import absolute_import
import os
import re
import subprocess
def create_geometry_string(geometry):
"""This function converts MolecularData geometry to psi4 geometry.
Args:
geometry: A list of tuples giving the coordinates of each atom.
example is [('H', (0, 0, 0)), ('H', (0, 0, 0.7414))]. Distances in
angstrom. Use atomic symbols to specify atoms.
Returns:
geo_string: A string giving the geometry for each atom on a line, e.g.:
H 0. 0. 0.
H 0. 0. 0.7414
"""
geo_string = ''
for item in geometry:
atom = item[0]
coordinates = item[1]
line = '{} {} {} {}'.format(atom,
coordinates[0],
coordinates[1],
coordinates[2])
if len(geo_string) > 0:
geo_string += '\n'
geo_string += line
return geo_string
def generate_psi4_input(molecule,
run_scf,
run_mp2,
run_cisd,
run_ccsd,
run_fci,
verbose,
tolerate_error,
memory,
template_file):
"""This function creates and saves a psi4 input file.
Args:
molecule: An instance of the MolecularData class.
run_scf: Boolean to run SCF calculation.
run_mp2: Boolean to run MP2 calculation.
run_cisd: Boolean to run CISD calculation.
run_ccsd: Boolean to run CCSD calculation.
run_fci: Boolean to FCI calculation.
verbose: Boolean whether to print calculation results to screen.
tolerate_error: Whether to fail or merely warn when Psi4 fails.
memory: Int giving amount of memory to allocate in MB.
template_file(str): Specify the filename of a Psi4 template
Returns:
input_file: A string giving the name of the saved input file.
"""
# Create Psi4 geometry string.
geo_string = create_geometry_string(molecule.geometry)
# Find the psi4_directory.
psi4_directory = os.path.dirname(os.path.realpath(__file__))
# Parse input template.
if template_file is None:
template_file = psi4_directory + '/_psi4_template'
input_template = []
with open(template_file, 'r') as stream:
for line in stream:
input_template += [line]
# Populate contents of input file based on automatic parameters.
input_content = [re.sub('&THIS_DIRECTORY',
psi4_directory, line)
for line in input_template]
# Populate contents of input file based on MolecularData parameters.
input_content = [re.sub('&geometry', str(molecule.geometry), line)
for line in input_content]
input_content = [re.sub('&basis', molecule.basis, line)
for line in input_content]
input_content = [re.sub('&charge', str(molecule.charge), line)
for line in input_content]
input_content = [re.sub('&multiplicity', str(molecule.multiplicity), line)
for line in input_content]
input_content = [re.sub('&description', str(molecule.description), line)
for line in input_content]
input_content = [re.sub('&mol_filename', str(molecule.filename), line)
for line in input_content]
input_content = [re.sub('&geo_string', geo_string, line)
for line in input_content]
# Populate contents of input file based on provided calculation parameters.
input_content = [re.sub('&run_scf', str(run_scf), line)
for line in input_content]
input_content = [re.sub('&run_mp2', str(run_mp2), line)
for line in input_content]
input_content = [re.sub('&run_cisd', str(run_cisd), line)
for line in input_content]
input_content = [re.sub('&run_ccsd', str(run_ccsd), line)
for line in input_content]
input_content = [re.sub('&run_fci', str(run_fci), line)
for line in input_content]
input_content = [re.sub('&tolerate_error', str(tolerate_error), line)
for line in input_content]
input_content = [re.sub('&verbose', str(verbose), line)
for line in input_content]
input_content = [re.sub('&memory', str(memory), line)
for line in input_content]
# Write input file and return handle.
input_file = molecule.filename + '.inp'
with open(input_file, 'w') as stream:
stream.write(''.join(input_content))
return input_file
def clean_up(molecule, delete_input=True, delete_output=False):
input_file = molecule.filename + '.inp'
output_file = molecule.filename + '.out'
run_directory = os.getcwd()
for local_file in os.listdir(run_directory):
if local_file.endswith('.clean'):
os.remove(run_directory + '/' + local_file)
try:
os.remove('timer.dat')
except:
pass
if delete_input:
os.remove(input_file)
if delete_output:
os.remove(output_file)
def run_psi4(molecule,
run_scf=True,
run_mp2=False,
run_cisd=False,
run_ccsd=False,
run_fci=False,
verbose=False,
tolerate_error=False,
delete_input=True,
delete_output=False,
memory=8000,
template_file=None):
"""This function runs a Psi4 calculation.
Args:
molecule: An instance of the MolecularData class.
run_scf: Optional boolean to run SCF calculation.
run_mp2: Optional boolean to run MP2 calculation.
run_cisd: Optional boolean to run CISD calculation.
run_ccsd: Optional boolean to run CCSD calculation.
run_fci: Optional boolean to FCI calculation.
verbose: Boolean whether to print calculation results to screen.
tolerate_error: Optional boolean to warn or raise when Psi4 fails.
delete_input: Optional boolean to delete psi4 input file.
delete_output: Optional boolean to delete psi4 output file.
memory: Optional int giving amount of memory to allocate in MB.
template_file(str): Path to Psi4 template file
Returns:
molecule: The updated MolecularData object.
Raises:
psi4 errors: An error from psi4.
"""
# Prepare input.
input_file = generate_psi4_input(molecule,
run_scf,
run_mp2,
run_cisd,
run_ccsd,
run_fci,
verbose,
tolerate_error,
memory,
template_file)
# Run psi4.
output_file = molecule.filename + '.out'
try:
process = subprocess.Popen(['psi4', input_file, output_file])
process.wait()
except:
print('Psi4 calculation for {} has failed.'.format(molecule.name))
process.kill()
clean_up(molecule, delete_input, delete_output)
if not tolerate_error:
raise
else:
clean_up(molecule, delete_input, delete_output)
# Return updated molecule instance.
molecule.load()
return molecule
| lgpl-3.0 | 7,315,284,371,011,495,000 | 37.424658 | 79 | 0.579085 | false |
kubevirt/vAdvisor | vadvisor/virt/parser.py | 1 | 1627 | from xml.etree.ElementTree import XMLParser
class GuestXmlParser:
int_tags = ["currentMemory", "memory"]
int_attribs = ["index", "port", "startport", "vram"]
def __init__(self):
self.json = {}
self.stack = [self.json]
self.catogory = None
def start(self, tag, attrib):
self.tag = tag
for attr in self.int_attribs:
if attrib.get(attr):
attrib[attr] = int(attrib[attr])
if tag in ("devices", "clock"):
self.category = tag
self.stack[-1][tag] = []
self.stack.append(self.stack[-1][tag])
elif tag == "emulator":
self.stack[-2][tag] = attrib
self.stack.append(attrib)
elif isinstance(self.stack[-1], dict):
self.stack[-1][tag] = attrib
self.stack.append(attrib)
elif self.category == "devices":
device = {"family": tag}
device.update(attrib)
self.stack[-1].append(device)
self.stack.append(device)
elif self.category == "clock":
self.stack[-1].append(attrib)
self.stack.append(attrib)
def end(self, tag):
self.stack.pop()
def data(self, data):
if data and data.strip():
if self.tag in self.int_tags:
self.stack[-1]["value"] = int(data)
else:
self.stack[-1]["value"] = data
def close(self):
return self.json
def parse_domain_xml(xml):
target = GuestXmlParser()
parser = XMLParser(target=target)
parser.feed(xml)
return parser.close()
| gpl-3.0 | 795,762,384,408,859,900 | 27.54386 | 56 | 0.533497 | false |
gri-is/lodjob | crom_scripts/deprecated/acquisition_manual.py | 1 | 1582 | import pprint
from cromulent.model import * # imports models
from cromulent.vocab import * # imports model subcomponents
from utils.aat_labels import aat_labels
from utils.aat_label_fetcher import get_or_fetch
from utils.data_parsing import find_values
from utils.crom_helpers import props, toJSON, toString, printString, printAttr,\
type_maker
make_type = type_maker(vocab='aat:', getter=get_or_fetch,
labels=aat_labels, Class=Type)
# start by looking at knoedler_purchase_info + knoedler tables
a = Acquisition(ident='k-purchase-1000')
place = Place()
place.label = 'Art Gallery'
a.took_place_at = place
# for each seller id use:
# purchase.knoedler_purchase_sellers_collection[0].purchase_seller_uid
# or
# purchase.knoedler_purchase_sellers_collection[0].gpi_people.person_ulan
seller = Actor(ident='ulan-person-23300')
seller.label = 'Seller'
a.transferred_title_from = seller
timespan = TimeSpan()
timespan.label = 'When'
timespan.end_of_the_end = '1890-10-16' # "1890-01-05T00:00:00Z"
timespan.begin_of_the_begin = '1890-10-16' # "1890-01-04T00:00:00Z"
a.timespan = timespan
obj_id = 'k-object-1000'# purchase.knoedler.object_id
obj = ManMadeObject(ident=obj_id)
a.transferred_title_of = obj
# for each buyer id use:
# purchase.knoedler_purchase_buyers_collection[0].purchase_buyer_uid
# or
# purchases.knoedler_purchase_buyers_collection[0].gpi_people.person_ulan
buyer = Group(ident='500304270') # Knoedler's ULAN ID (consider UID instead)
buyer.label = 'Buyer'
a.transferred_title_to = buyer
printString(a)
| agpl-3.0 | -9,017,786,665,425,611,000 | 31.285714 | 80 | 0.733881 | false |
moggers87/django-bitfield | bitfield/tests/tests.py | 1 | 17505 | from __future__ import absolute_import
import pickle
from django.db import connection, models
from django.db.models import F
from django.test import TestCase
from bitfield import BitHandler, Bit, BitField
from bitfield.tests import BitFieldTestModel, CompositeBitFieldTestModel, BitFieldTestModelForm
from bitfield.compat import bitand, bitor
try:
from django.db.models.base import simple_class_factory # noqa
except ImportError:
# Django 1.5 muffed up the base class which breaks the pickle tests
# Note, it's fixed again in 1.6.
from django.db.models import base
_model_unpickle = base.model_unpickle
def simple_class_factory(model, attrs):
return model
def model_unpickle(model, attrs, factory):
return _model_unpickle(model, attrs)
setattr(base, 'simple_class_factory', simple_class_factory)
setattr(base, 'model_unpickle', model_unpickle)
class BitHandlerTest(TestCase):
def test_comparison(self):
bithandler_1 = BitHandler(0, ('FLAG_0', 'FLAG_1', 'FLAG_2', 'FLAG_3'))
bithandler_2 = BitHandler(1, ('FLAG_0', 'FLAG_1', 'FLAG_2', 'FLAG_3'))
bithandler_3 = BitHandler(0, ('FLAG_0', 'FLAG_1', 'FLAG_2', 'FLAG_3'))
assert bithandler_1 == bithandler_1
assert bithandler_1 != bithandler_2
assert bithandler_1 == bithandler_3
def test_defaults(self):
bithandler = BitHandler(0, ('FLAG_0', 'FLAG_1', 'FLAG_2', 'FLAG_3'))
# Default value of 0.
self.assertEquals(int(bithandler), 0)
# Test bit numbers.
self.assertEquals(int(bithandler.FLAG_0.number), 0)
self.assertEquals(int(bithandler.FLAG_1.number), 1)
self.assertEquals(int(bithandler.FLAG_2.number), 2)
self.assertEquals(int(bithandler.FLAG_3.number), 3)
# Negative test non-existant key.
self.assertRaises(AttributeError, lambda: bithandler.FLAG_4)
# Test bool().
self.assertEquals(bool(bithandler.FLAG_0), False)
self.assertEquals(bool(bithandler.FLAG_1), False)
self.assertEquals(bool(bithandler.FLAG_2), False)
self.assertEquals(bool(bithandler.FLAG_3), False)
def test_nonzero_default(self):
bithandler = BitHandler(1, ('FLAG_0', 'FLAG_1', 'FLAG_2', 'FLAG_3'))
self.assertEquals(bool(bithandler.FLAG_0), True)
self.assertEquals(bool(bithandler.FLAG_1), False)
self.assertEquals(bool(bithandler.FLAG_2), False)
self.assertEquals(bool(bithandler.FLAG_3), False)
bithandler = BitHandler(2, ('FLAG_0', 'FLAG_1', 'FLAG_2', 'FLAG_3'))
self.assertEquals(bool(bithandler.FLAG_0), False)
self.assertEquals(bool(bithandler.FLAG_1), True)
self.assertEquals(bool(bithandler.FLAG_2), False)
self.assertEquals(bool(bithandler.FLAG_3), False)
bithandler = BitHandler(3, ('FLAG_0', 'FLAG_1', 'FLAG_2', 'FLAG_3'))
self.assertEquals(bool(bithandler.FLAG_0), True)
self.assertEquals(bool(bithandler.FLAG_1), True)
self.assertEquals(bool(bithandler.FLAG_2), False)
self.assertEquals(bool(bithandler.FLAG_3), False)
bithandler = BitHandler(4, ('FLAG_0', 'FLAG_1', 'FLAG_2', 'FLAG_3'))
self.assertEquals(bool(bithandler.FLAG_0), False)
self.assertEquals(bool(bithandler.FLAG_1), False)
self.assertEquals(bool(bithandler.FLAG_2), True)
self.assertEquals(bool(bithandler.FLAG_3), False)
def test_mutation(self):
bithandler = BitHandler(0, ('FLAG_0', 'FLAG_1', 'FLAG_2', 'FLAG_3'))
self.assertEquals(bool(bithandler.FLAG_0), False)
self.assertEquals(bool(bithandler.FLAG_1), False)
self.assertEquals(bool(bithandler.FLAG_2), False)
self.assertEquals(bool(bithandler.FLAG_3), False)
bithandler = BitHandler(bithandler | 1, bithandler._keys)
self.assertEquals(bool(bithandler.FLAG_0), True)
self.assertEquals(bool(bithandler.FLAG_1), False)
self.assertEquals(bool(bithandler.FLAG_2), False)
self.assertEquals(bool(bithandler.FLAG_3), False)
bithandler ^= 3
self.assertEquals(int(bithandler), 2)
self.assertEquals(bool(bithandler & 1), False)
bithandler.FLAG_0 = False
self.assertEquals(bithandler.FLAG_0, False)
bithandler.FLAG_1 = True
self.assertEquals(bithandler.FLAG_0, False)
self.assertEquals(bithandler.FLAG_1, True)
bithandler.FLAG_2 = False
self.assertEquals(bithandler.FLAG_0, False)
self.assertEquals(bithandler.FLAG_1, True)
self.assertEquals(bithandler.FLAG_2, False)
class BitTest(TestCase):
def test_int(self):
bit = Bit(0)
self.assertEquals(int(bit), 1)
self.assertEquals(bool(bit), True)
self.assertFalse(not bit)
def test_comparison(self):
self.assertEquals(Bit(0), Bit(0))
self.assertNotEquals(Bit(1), Bit(0))
self.assertNotEquals(Bit(0, 0), Bit(0, 1))
self.assertEquals(Bit(0, 1), Bit(0, 1))
self.assertEquals(Bit(0), 1)
def test_and(self):
self.assertEquals(1 & Bit(2), 0)
self.assertEquals(1 & Bit(0), 1)
self.assertEquals(1 & ~Bit(0), 0)
self.assertEquals(Bit(0) & Bit(2), 0)
self.assertEquals(Bit(0) & Bit(0), 1)
self.assertEquals(Bit(0) & ~Bit(0), 0)
def test_or(self):
self.assertEquals(1 | Bit(2), 5)
self.assertEquals(1 | Bit(5), 33)
self.assertEquals(1 | ~Bit(2), -5)
self.assertEquals(Bit(0) | Bit(2), 5)
self.assertEquals(Bit(0) | Bit(5), 33)
self.assertEquals(Bit(0) | ~Bit(2), -5)
def test_xor(self):
self.assertEquals(1 ^ Bit(2), 5)
self.assertEquals(1 ^ Bit(0), 0)
self.assertEquals(1 ^ Bit(1), 3)
self.assertEquals(1 ^ Bit(5), 33)
self.assertEquals(1 ^ ~Bit(2), -6)
self.assertEquals(Bit(0) ^ Bit(2), 5)
self.assertEquals(Bit(0) ^ Bit(0), 0)
self.assertEquals(Bit(0) ^ Bit(1), 3)
self.assertEquals(Bit(0) ^ Bit(5), 33)
self.assertEquals(Bit(0) ^ ~Bit(2), -6)
class BitFieldTest(TestCase):
def test_basic(self):
# Create instance and make sure flags are working properly.
instance = BitFieldTestModel.objects.create(flags=1)
self.assertTrue(instance.flags.FLAG_0)
self.assertFalse(instance.flags.FLAG_1)
self.assertFalse(instance.flags.FLAG_2)
self.assertFalse(instance.flags.FLAG_3)
def test_regression_1425(self):
# Creating new instances shouldn't allow negative values.
instance = BitFieldTestModel.objects.create(flags=-1)
self.assertEqual(instance.flags._value, 15)
self.assertTrue(instance.flags.FLAG_0)
self.assertTrue(instance.flags.FLAG_1)
self.assertTrue(instance.flags.FLAG_2)
self.assertTrue(instance.flags.FLAG_3)
cursor = connection.cursor()
flags_field = BitFieldTestModel._meta.get_field_by_name('flags')[0]
flags_db_column = flags_field.db_column or flags_field.name
cursor.execute("INSERT INTO %s (%s) VALUES (-1)" % (BitFieldTestModel._meta.db_table, flags_db_column))
# There should only be the one row we inserted through the cursor.
instance = BitFieldTestModel.objects.get(flags=-1)
self.assertTrue(instance.flags.FLAG_0)
self.assertTrue(instance.flags.FLAG_1)
self.assertTrue(instance.flags.FLAG_2)
self.assertTrue(instance.flags.FLAG_3)
instance.save()
self.assertEqual(BitFieldTestModel.objects.filter(flags=15).count(), 2)
self.assertEqual(BitFieldTestModel.objects.filter(flags__lt=0).count(), 0)
def test_select(self):
BitFieldTestModel.objects.create(flags=3)
self.assertTrue(BitFieldTestModel.objects.filter(flags=BitFieldTestModel.flags.FLAG_1).exists())
self.assertTrue(BitFieldTestModel.objects.filter(flags=BitFieldTestModel.flags.FLAG_0).exists())
self.assertFalse(BitFieldTestModel.objects.exclude(flags=BitFieldTestModel.flags.FLAG_0).exists())
self.assertFalse(BitFieldTestModel.objects.exclude(flags=BitFieldTestModel.flags.FLAG_1).exists())
def test_update(self):
instance = BitFieldTestModel.objects.create(flags=0)
self.assertFalse(instance.flags.FLAG_0)
BitFieldTestModel.objects.filter(pk=instance.pk).update(flags=bitor(F('flags'), BitFieldTestModel.flags.FLAG_1))
instance = BitFieldTestModel.objects.get(pk=instance.pk)
self.assertTrue(instance.flags.FLAG_1)
BitFieldTestModel.objects.filter(pk=instance.pk).update(flags=bitor(F('flags'), ((~BitFieldTestModel.flags.FLAG_0 | BitFieldTestModel.flags.FLAG_3))))
instance = BitFieldTestModel.objects.get(pk=instance.pk)
self.assertFalse(instance.flags.FLAG_0)
self.assertTrue(instance.flags.FLAG_1)
self.assertTrue(instance.flags.FLAG_3)
self.assertFalse(BitFieldTestModel.objects.filter(flags=BitFieldTestModel.flags.FLAG_0).exists())
BitFieldTestModel.objects.filter(pk=instance.pk).update(flags=bitand(F('flags'), ~BitFieldTestModel.flags.FLAG_3))
instance = BitFieldTestModel.objects.get(pk=instance.pk)
self.assertFalse(instance.flags.FLAG_0)
self.assertTrue(instance.flags.FLAG_1)
self.assertFalse(instance.flags.FLAG_3)
def test_update_with_handler(self):
instance = BitFieldTestModel.objects.create(flags=0)
self.assertFalse(instance.flags.FLAG_0)
instance.flags.FLAG_1 = True
BitFieldTestModel.objects.filter(pk=instance.pk).update(flags=bitor(F('flags'), instance.flags))
instance = BitFieldTestModel.objects.get(pk=instance.pk)
self.assertTrue(instance.flags.FLAG_1)
def test_negate(self):
BitFieldTestModel.objects.create(flags=BitFieldTestModel.flags.FLAG_0 | BitFieldTestModel.flags.FLAG_1)
BitFieldTestModel.objects.create(flags=BitFieldTestModel.flags.FLAG_1)
self.assertEqual(BitFieldTestModel.objects.filter(flags=~BitFieldTestModel.flags.FLAG_0).count(), 1)
self.assertEqual(BitFieldTestModel.objects.filter(flags=~BitFieldTestModel.flags.FLAG_1).count(), 0)
self.assertEqual(BitFieldTestModel.objects.filter(flags=~BitFieldTestModel.flags.FLAG_2).count(), 2)
def test_default_value(self):
instance = BitFieldTestModel.objects.create()
self.assertTrue(instance.flags.FLAG_0)
self.assertTrue(instance.flags.FLAG_1)
self.assertFalse(instance.flags.FLAG_2)
self.assertFalse(instance.flags.FLAG_3)
def test_binary_capacity(self):
import math
from django.db.models.fields import BigIntegerField
# Local maximum value, slow canonical algorithm
MAX_COUNT = int(math.floor(math.log(BigIntegerField.MAX_BIGINT, 2)))
# Big flags list
flags = ['f' + str(i) for i in range(100)]
try:
BitField(flags=flags[:MAX_COUNT])
except ValueError:
self.fail("It should work well with these flags")
self.assertRaises(ValueError, BitField, flags=flags[:(MAX_COUNT + 1)])
def test_dictionary_init(self):
flags = {
0: 'zero',
1: 'first',
10: 'tenth',
2: 'second',
'wrongkey': 'wrongkey',
100: 'bigkey',
-100: 'smallkey',
}
try:
bf = BitField(flags)
except ValueError:
self.fail("It should work well with these flags")
self.assertEquals(bf.flags, ['zero', 'first', 'second', '', '', '', '', '', '', '', 'tenth'])
self.assertRaises(ValueError, BitField, flags={})
self.assertRaises(ValueError, BitField, flags={'wrongkey': 'wrongkey'})
self.assertRaises(ValueError, BitField, flags={'1': 'non_int_key'})
def test_defaults_as_key_names(self):
class TestModel(models.Model):
flags = BitField(flags=(
'FLAG_0',
'FLAG_1',
'FLAG_2',
'FLAG_3',
), default=('FLAG_1', 'FLAG_2'))
field = TestModel._meta.get_field('flags')
self.assertEquals(field.default, TestModel.flags.FLAG_1 | TestModel.flags.FLAG_2)
class BitFieldSerializationTest(TestCase):
def test_can_unserialize_bithandler(self):
data = b"cdjango.db.models.base\nmodel_unpickle\np0\n(cbitfield.tests.models\nBitFieldTestModel\np1\n(lp2\ncdjango.db.models.base\nsimple_class_factory\np3\ntp4\nRp5\n(dp6\nS'flags'\np7\nccopy_reg\n_reconstructor\np8\n(cbitfield.types\nBitHandler\np9\nc__builtin__\nobject\np10\nNtp11\nRp12\n(dp13\nS'_value'\np14\nI1\nsS'_keys'\np15\n(S'FLAG_0'\np16\nS'FLAG_1'\np17\nS'FLAG_2'\np18\nS'FLAG_3'\np19\ntp20\nsbsS'_state'\np21\ng8\n(cdjango.db.models.base\nModelState\np22\ng10\nNtp23\nRp24\n(dp25\nS'adding'\np26\nI00\nsS'db'\np27\nS'default'\np28\nsbsS'id'\np29\nI1\nsb."
inst = pickle.loads(data)
self.assertTrue(inst.flags.FLAG_0)
self.assertFalse(inst.flags.FLAG_1)
def test_pickle_integration(self):
inst = BitFieldTestModel.objects.create(flags=1)
data = pickle.dumps(inst)
inst = pickle.loads(data)
self.assertEquals(type(inst.flags), BitHandler)
self.assertEquals(int(inst.flags), 1)
def test_added_field(self):
data = b"cdjango.db.models.base\nmodel_unpickle\np0\n(cbitfield.tests.models\nBitFieldTestModel\np1\n(lp2\ncdjango.db.models.base\nsimple_class_factory\np3\ntp4\nRp5\n(dp6\nS'flags'\np7\nccopy_reg\n_reconstructor\np8\n(cbitfield.types\nBitHandler\np9\nc__builtin__\nobject\np10\nNtp11\nRp12\n(dp13\nS'_value'\np14\nI1\nsS'_keys'\np15\n(S'FLAG_0'\np16\nS'FLAG_1'\np17\nS'FLAG_2'\np18\ntp19\nsbsS'_state'\np20\ng8\n(cdjango.db.models.base\nModelState\np21\ng10\nNtp22\nRp23\n(dp24\nS'adding'\np25\nI00\nsS'db'\np27\nS'default'\np27\nsbsS'id'\np28\nI1\nsb."
inst = pickle.loads(data)
self.assertTrue('FLAG_3' in inst.flags.keys())
class CompositeBitFieldTest(TestCase):
def test_get_flag(self):
inst = CompositeBitFieldTestModel()
self.assertEqual(inst.flags.FLAG_0, inst.flags_1.FLAG_0)
self.assertEqual(inst.flags.FLAG_4, inst.flags_2.FLAG_4)
self.assertRaises(AttributeError, lambda: inst.flags.flag_NA)
def test_set_flag(self):
inst = CompositeBitFieldTestModel()
flag_0_original = bool(inst.flags.FLAG_0)
self.assertEqual(bool(inst.flags_1.FLAG_0), flag_0_original)
flag_4_original = bool(inst.flags.FLAG_4)
self.assertEqual(bool(inst.flags_2.FLAG_4), flag_4_original)
# flip flags' bits
inst.flags.FLAG_0 = not flag_0_original
inst.flags.FLAG_4 = not flag_4_original
# check to make sure the bit flips took effect
self.assertNotEqual(bool(inst.flags.FLAG_0), flag_0_original)
self.assertNotEqual(bool(inst.flags_1.FLAG_0), flag_0_original)
self.assertNotEqual(bool(inst.flags.FLAG_4), flag_4_original)
self.assertNotEqual(bool(inst.flags_2.FLAG_4), flag_4_original)
def set_flag():
inst.flags.flag_NA = False
self.assertRaises(AttributeError, set_flag)
def test_hasattr(self):
inst = CompositeBitFieldTestModel()
self.assertEqual(hasattr(inst.flags, 'flag_0'),
hasattr(inst.flags_1, 'flag_0'))
self.assertEqual(hasattr(inst.flags, 'flag_4'),
hasattr(inst.flags_2, 'flag_4'))
class BitFormFieldTest(TestCase):
def test_form_new_invalid(self):
invalid_data_dicts = [
{'flags': ['FLAG_0', 'FLAG_FLAG']},
{'flags': ['FLAG_4']},
{'flags': [1, 2]}
]
for invalid_data in invalid_data_dicts:
form = BitFieldTestModelForm(data=invalid_data)
self.assertFalse(form.is_valid())
def test_form_new(self):
data_dicts = [
{'flags': ['FLAG_0', 'FLAG_1']},
{'flags': ['FLAG_3']},
{'flags': []},
{}
]
for data in data_dicts:
form = BitFieldTestModelForm(data=data)
self.failUnless(form.is_valid())
instance = form.save()
flags = data['flags'] if 'flags' in data else []
for k in BitFieldTestModel.flags:
self.assertEquals(bool(getattr(instance.flags, k)), k in flags)
def test_form_update(self):
instance = BitFieldTestModel.objects.create(flags=0)
for k in BitFieldTestModel.flags:
self.assertFalse(bool(getattr(instance.flags, k)))
data = {'flags': ['FLAG_0', 'FLAG_1']}
form = BitFieldTestModelForm(data=data, instance=instance)
self.failUnless(form.is_valid())
instance = form.save()
for k in BitFieldTestModel.flags:
self.assertEquals(bool(getattr(instance.flags, k)), k in data['flags'])
data = {'flags': ['FLAG_2', 'FLAG_3']}
form = BitFieldTestModelForm(data=data, instance=instance)
self.failUnless(form.is_valid())
instance = form.save()
for k in BitFieldTestModel.flags:
self.assertEquals(bool(getattr(instance.flags, k)), k in data['flags'])
data = {'flags': []}
form = BitFieldTestModelForm(data=data, instance=instance)
self.failUnless(form.is_valid())
instance = form.save()
for k in BitFieldTestModel.flags:
self.assertFalse(bool(getattr(instance.flags, k)))
| apache-2.0 | -1,557,695,423,243,655,400 | 42.87218 | 578 | 0.649357 | false |
adewynter/Tools | MLandDS/SpeechRecognition/dataPreprocessor.py | 1 | 5683 | # Data preprocessor for speech recognition
# Arguably the most important part of our infrastructure
# WARNING -- Disgustingly long class
# (c) Adrian deWynter, 2017, where applicable
from __future__ import print_function
from six.moves import urllib,xrange
from random import shuffle
from enum import Enum
import os,re,gzip,wave
import skimage.io
import numpy as np
CHUNK = 4096
width=512
height=512
pcm_path = "data/spoken_numbers_pcm/" # 8 bit
wav_path = "data/spoken_numbers_wav/" # 16 bit s16le
PATH = pcm_path
############
# Misc utils
############
def speaker(wav):
return re.sub(r'_.*', '', wav[2:])
def get_speakers(local_path=PATH):
files = os.listdir(local_path)
return list(set(map(speaker,files)))
def load_wav_file(name):
file = wave.open(name, "rb")
chunk = []
data0 = file.readframes(CHUNK)
while data0 != '':
data = numpy.fromstring(data0, dtype='uint8') # Alter datatype for efficiency
data = (data + 128) / 255. # 0-1 for Better convergence
chunk.extend(data)
data0 = file.readframes(CHUNK)
chunk = chunk[0:CHUNK * 2]
chunk.extend(numpy.zeros(CHUNK * 2 - len(chunk))) # Pad
# I think the file should be closed, no?
return chunk
##############
# Batch utils
##############
def spectro_batch(batch_size=10):
return spectro_batch_generator(batch_size)
def spectro_batch_generator(batch_size,width=64,local_path="data/spoken_numbers_64x64/"):
batch,labels = [],[]
files=os.listdir(local_path)
while True:
shuffle(files)
for image_name in files:
image = skimage.io.imread(local_path+image_name).astype(numpy.float32)
data = image/255. # 0-1 for better convergence
data = data.reshape([width*height]) # tensorflow.matmul needs flattened matrices wtf
batch.append(list(data))
labels.append(dense_to_one_hot(int(image_name[0])))
if len(batch) >= batch_size:
yield batch, labels
batch = []
labels = []
def word_batch_generator(batch_size=10,target=Target.word,local_path=PATH):
batch_waves = []
labels = []
speakers=get_speakers()
files = os.listdir(local_path)
while True:
shuffle(files)
for wav in files:
if not wav.endswith(".png"):continue
if target==Target.digits: labels.append(dense_to_one_hot(int(wav[0])))
if target==Target.speaker: labels.append(one_hot_from_item(speaker(wav), speakers))
chunk = load_wav_file(local_path+wav)
batch_waves.append(chunk)
# batch_waves.append(chunks[input_width])
if len(batch_waves) >= batch_size:
yield batch_waves, labels
batch_waves = [] # Reset for next batch
labels = []
def wave_batch_generator(batch_size=10,target=Target.speaker,local_path=PATH):
batch_waves,labels = [],[]
speakers=get_speakers()
files = os.listdir(local_path)
while True:
shuffle(files)
for wav in files:
if not wav.endswith(".wav"):continue
if target==Target.digits: labels.append(dense_to_one_hot(int(wav[0])))
if target==Target.speaker: labels.append(one_hot_from_item(speaker(wav), speakers))
chunk = load_wav_file(local_path+wav)
batch_waves.append(chunk)
# batch_waves.append(chunks[input_width])
if len(batch_waves) >= batch_size:
yield batch_waves, labels
batch_waves = []
labels = []
##########
# Classes
##########
# Labels (orthogonal features)
class Target(Enum):
digits=1
speaker=2
words_per_minute=3
word_phonemes=4
word=5
sentence=6
sentiment=7
# Data set
class DataSet(object):
def __init__(self, images, labels, fake_data=False, one_hot=False, load=False):
if fake_data:
self._num_examples = 10000
self.one_hot = one_hot
else:
num = len(images)
assert num == len(labels), ('images.shape: %s labels.shape: %s' % (images.shape, labels.shape))
print("len(images) %d" % num)
self._num_examples = num
self.cache={}
self._image_names = numpy.array(images)
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
self._images=[]
if load:
self._images=self.load(self._image_names)
@property
def images(self):
return self._images
@property
def image_names(self):
return self._image_names
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
# only apply to a subset of all images at one time
def load(self,image_names):
print("loading %d images"%len(image_names))
return list(map(self.load_image,image_names)) # python3 map object WTF
def load_image(self,image_name):
if image_name in self.cache:
return self.cache[image_name]
else:
image = skimage.io.imread(DATA_DIR+ image_name).astype(numpy.float32)
# images = numpy.multiply(images, 1.0 / 255.0)
self.cache[image_name]=image
return image
# Return the next batch_size examples
def next_batch(self, batch_size, fake_data=False):
if fake_data:
fake_image = [1] * width * height
if self.one_hot:
fake_label = [1] + [0] * 9
else:
fake_label = 0
return [fake_image for _ in xrange(batch_size)], [
fake_label for _ in xrange(batch_size)]
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
self._epochs_completed += 1
# Shuffle the data
perm = numpy.arange(self._num_examples)
numpy.random.shuffle(perm)
self._image_names = self._image_names[perm]
self._labels = self._labels[perm]
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self.load(self._image_names[start:end]), self._labels[start:end]
if __name__ == "__main__":
pass | mit | -1,158,133,404,492,838,400 | 22.105691 | 98 | 0.672356 | false |
MerlijnWajer/lewd | src/net.py | 1 | 1721 | """
This file is part of the LEd Wall Daemon (lewd) project
Copyright (c) 2009-2012 by ``brainsmoke'' and Merlijn Wajer (``Wizzup'')
lewd is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
lewd is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with lewd. If not, see <http://www.gnu.org/licenses/>.
See the file COPYING, included in this distribution,
for details about the copyright.
"""
import asyncore, socket
import spiscreen
class LEDConnection(asyncore.dispatcher_with_send):
def __init__(self, conn, sock, addr):
asyncore.dispatcher_with_send.__init__(self, sock)
self.data = ''
def handle_read(self):
data = self.recv(12*10*3)
self.data += data
if len(self.data) < 12*10*3:
return
screen.push_data(self.data[:12*10*3])
self.data = self.data[12*10*3:]
class SocketServer(asyncore.dispatcher):
def __init__(self, port):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.bind(('', port))
self.listen(5)
def handle_accept(self):
conn, addr = self.accept()
LEDConnection(self, conn, addr)
screen = spiscreen.SPIScreen()
s = SocketServer(8000)
asyncore.loop()
| gpl-3.0 | 3,525,649,452,878,843,400 | 29.732143 | 74 | 0.669959 | false |
landscapeio/pylint-common | setup.py | 1 | 1633 | # -*- coding: UTF-8 -*-
import sys
from setuptools import find_packages, setup
_version = '0.2.5'
_packages = find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"])
_short_description = ("pylint-common is a Pylint plugin to improve Pylint "
"error analysis of the standard Python library")
_classifiers = (
'Development Status :: 6 - Mature',
'Environment :: Console',
'Intended Audience :: Developers',
'Operating System :: Unix',
'Topic :: Software Development :: Quality Assurance',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
)
if sys.version_info < (2, 7):
# pylint 1.4 dropped support for Python 2.6
_install_requires = [
'pylint>=1.0,<1.4',
'astroid>=1.0,<1.3.0',
'logilab-common>=0.60.0,<0.63',
'pylint-plugin-utils>=0.2.6',
]
else:
_install_requires = [
'pylint>=1.0',
'pylint-plugin-utils>=0.2.6',
]
setup(
name='pylint-common',
url='https://github.com/landscapeio/pylint-common',
author='landscape.io',
author_email='[email protected]',
description=_short_description,
version=_version,
packages=_packages,
install_requires=_install_requires,
license='GPLv2',
classifiers=_classifiers,
keywords='pylint stdlib plugin',
zip_safe=False # see https://github.com/landscapeio/prospector/issues/18#issuecomment-49857277
)
| gpl-2.0 | -4,498,307,940,988,987,400 | 28.690909 | 99 | 0.620943 | false |
barberscore/barberscore-api | project/apps/salesforce/models.py | 1 | 21997 | import json
# Third-Party
from model_utils import Choices
from distutils.util import strtobool
# Local
from apps.bhs.models import Convention, Award, Chart, Group, Person
from apps.registration.models import Contest, Session, Assignment, Entry
class SfConvention:
def parse_sf_notification(n):
d = {}
# Created
if hasattr(n, 'sf_CreatedDate'):
d['created'] = n.sf_CreatedDate.cdata
# Modified
if hasattr(n, 'sf_LastModifiedDate'):
d['modified'] = n.sf_LastModifiedDate.cdata
# UUID
if hasattr(n, 'sf_BS_UUID__c'):
d['id'] = n.sf_BS_UUID__c.cdata
# Status
if hasattr(n, 'sf_BS_Status__c'):
d['status'] = int(float(n.sf_BS_Status__c.cdata))
# Name
if hasattr(n, 'sf_Name'):
d['name'] = str(n.sf_Name.cdata)
# District
if hasattr(n, 'sf_BS_District__c'):
d['district'] = int(float(n.sf_BS_District__c.cdata))
# Season
if hasattr(n, 'sf_BS_Season__c'):
season = int(float(n.sf_BS_Season__c.cdata))
d['season'] = season
# Panel
if hasattr(n, 'sf_BS_Panel__c'):
d['panel'] = int(float(n.sf_BS_Panel__c.cdata))
# Year
if hasattr(n, 'sf_Year__c'):
d['year'] = int(n.sf_Year__c.cdata)
# Open Date
if hasattr(n, 'sf_Open_Date__c'):
d['open_date'] = n.sf_Open_Date__c.cdata
# Close Date
if hasattr(n, 'sf_Close_Date__c'):
d['close_date'] = n.sf_Close_Date__c.cdata
# Start Date
if hasattr(n, 'sf_Start_Date__c'):
d['start_date'] = n.sf_Start_Date__c.cdata
# End Date
if hasattr(n, 'sf_End_Date__c'):
d['end_date'] = n.sf_End_Date__c.cdata
# Venue
if hasattr(n, 'sf_Venue__c'):
d['venue_name'] = n.sf_Venue__c.cdata
# Location
if hasattr(n, 'sf_Location__c'):
d['location'] = n.sf_Location__c.cdata
# Time Zone
if hasattr(n, 'sf_Time_Zone__c'):
d['timezone'] = n.sf_Time_Zone__c.cdata
# Description
d['description'] = n.sf_Description__c.cdata if hasattr(n, 'sf_Description__c') else ""
# Divisions
if hasattr(n, 'sf_BS_Division__c'):
d['divisions'] = n.sf_BS_Division__c.cdata
# Kinds
if hasattr(n, 'sf_BS_Kind__c'):
d['kinds'] = n.sf_BS_Kind__c.cdata
# Return parsed dict
return d
class SfAward:
def parse_sf_notification(n):
d = {}
# Created
if hasattr(n, 'sf_CreatedDate'):
d['created'] = n.sf_CreatedDate.cdata
# Modified
if hasattr(n, 'sf_LastModifiedDate'):
d['modified'] = n.sf_LastModifiedDate.cdata
# UUID
if hasattr(n, 'sf_BS_UUID__c'):
d['id'] = n.sf_BS_UUID__c.cdata
# Name
if hasattr(n, 'sf_Name'):
d['name'] = n.sf_Name.cdata
# Status
if hasattr(n, 'sf_BS_Status__c'):
d['status'] = int(float(n.sf_BS_Status__c.cdata))
# Kind
if hasattr(n, 'sf_BS_Kind__c'):
d['kind'] = int(float(n.sf_BS_Kind__c.cdata))
# Gender
d['gender'] = int(float(n.sf_BS_Classification__c.cdata)) if hasattr(n, 'sf_BS_Classification__c') else None
# Level
if hasattr(n, 'sf_BS_Level__c'):
d['level'] = int(float(n.sf_BS_Level__c.cdata))
# Season
if hasattr(n, 'sf_BS_Season__c'):
d['season'] = int(float(n.sf_BS_Season__c.cdata))
# District
if hasattr(n, 'sf_BS_District__c'):
d['district'] = int(float(n.sf_BS_District__c.cdata))
# Divisions
d['division'] = int(float(n.sf_BS_Division__c.cdata)) if hasattr(n, 'sf_BS_Division__c') else None
# Is Single
if hasattr(n, 'sf_is_single__c'):
d['is_single'] = bool(strtobool(n.sf_is_single__c.cdata))
# Threshold
d['threshold'] = float(n.sf_Threshold__c.cdata) if hasattr(n, 'sf_Threshold__c') else None
# Minimum
d['minimum'] = float(n.sf_Minimum__c.cdata) if hasattr(n, 'sf_Minimum__c') else None
# advance
d['advance'] = float(n.sf_Advance__c.cdata) if hasattr(n, 'sf_Advance__c') else None
# spots
d['spots'] = int(float(n.sf_Spots__c.cdata)) if hasattr(n, 'sf_Spots__c') else None
# Description
d['description'] = n.sf_Description__c.cdata if hasattr(n, 'sf_Description__c') else ""
# Notes
d['notes'] = n.sf_Notes__c.cdata if hasattr(n, 'sf_Notes__c') else ""
# Age
d['age'] = int(float(n.sf_BS_Age__c.cdata)) if hasattr(n, 'sf_BS_Age__c') else None
# Is Novice
if hasattr(n, 'sf_is_novice__c'):
d['is_novice'] = bool(strtobool(n.sf_is_novice__c.cdata))
# Size
d['size'] = int(float(n.sf_BS_Size__c.cdata)) if hasattr(n, 'sf_BS_Size__c') else None
# Size Range
d['size_range'] = n.sf_Size_Range__c.cdata if hasattr(n, 'sf_Size_Range__c') else None
# Scope
d['scope'] = int(float(n.sf_BS_Scope__c.cdata)) if hasattr(n, 'sf_BS_Scope__c') else None
# Scope Range
d['scope_range'] = n.sf_Scope_Range__c.cdata if hasattr(n, 'sf_Scope_Range__c') else None
# Tree Sort
d['tree_sort'] = int(float(n.sf_Tree_Sort__c.cdata)) if hasattr(n, 'sf_Tree_Sort__c') else None
# Return parsed dict
return d
class SfChart:
def parse_sf_notification(n):
d = {}
# Created
if hasattr(n, 'sf_CreatedDate'):
d['created'] = n.sf_CreatedDate.cdata
# Modified
if hasattr(n, 'sf_LastModifiedDate'):
d['modified'] = n.sf_LastModifiedDate.cdata
# UUID
if hasattr(n, 'sf_BS_UUID__c'):
d['id'] = n.sf_BS_UUID__c.cdata
# Status
if hasattr(n, 'sf_BS_Status__c'):
d['status'] = int(float(n.sf_BS_Status__c.cdata))
# Name
if hasattr(n, 'sf_Name'):
d['title'] = n.sf_Name.cdata
# Arrangers
if hasattr(n, 'sf_Arrangers__c'):
d['arrangers'] = n.sf_Arrangers__c.cdata
# Composer
d['composers'] = n.sf_Composers__c.cdata if hasattr(n, 'sf_Composers__c') else ""
# Lyricist
d['lyricists'] = n.sf_Lyricists__c.cdata if hasattr(n, 'sf_Lyricists__c') else ""
# Holders
d['holders'] = n.sf_Holders__c.cdata if hasattr(n, 'sf_Holders__c') else ""
# Description
d['description'] = n.sf_Description__c.cdata if hasattr(n, 'sf_Description__c') else ""
# Notes
d['notes'] = n.sf_Notes__c.cdata if hasattr(n, 'sf_Notes__c') else ""
# Return parsed dict
return d
class SfGroup:
def parse_sf_notification(n):
d = {}
# Created
if hasattr(n, 'sf_CreatedDate'):
d['created'] = n.sf_CreatedDate.cdata
# Modified
if hasattr(n, 'sf_LastModifiedDate'):
d['modified'] = n.sf_LastModifiedDate.cdata
# UUID
if hasattr(n, 'sf_BS_UUID__c'):
d['id'] = n.sf_BS_UUID__c.cdata
# Name
if hasattr(n, 'sf_Name'):
d['name'] = n.sf_Name.cdata
# Status
if hasattr(n, 'sf_BS_Status__c'):
d['status'] = int(float(n.sf_BS_Status__c.cdata))
# Kind
if hasattr(n, 'sf_BS_Kind__c'):
d['kind'] = int(float(n.sf_BS_Kind__c.cdata))
# Gender
if hasattr(n, 'sf_BS_Classification__c'):
d['gender'] = int(float(n.sf_BS_Classification__c.cdata))
# District
if hasattr(n, 'sf_BS_District__c'):
d['district'] = int(float(n.sf_BS_District__c.cdata))
# Divisions
d['division'] = int(float(n.sf_BS_Division__c.cdata)) if hasattr(n, 'sf_BS_Division__c') else None
# bhs_id
if hasattr(n, 'sf_cfg_Member_Id__c') and n.sf_cfg_Member_Id__c.cdata.isalnum():
# Is a Chorus
# code
d['code'] = n.sf_cfg_Member_Id__c.cdata if hasattr(n, 'sf_cfg_Member_Id__c') else ""
elif hasattr(n, 'sf_cfg_Member_Id__c'):
# Is a Quartet
d['bhs_id'] = int(n.sf_cfg_Member_Id__c.cdata) if hasattr(n, 'sf_cfg_Member_Id__c') else None
# Return parsed dict
return d
class SfPerson:
def parse_sf_notification(n):
d = {}
# Created
if hasattr(n, 'sf_CreatedDate'):
d['created'] = n.sf_CreatedDate.cdata
# Modified
if hasattr(n, 'sf_LastModifiedDate'):
d['modified'] = n.sf_LastModifiedDate.cdata
# UUID
if hasattr(n, 'sf_BS_UUID__c'):
d['id'] = n.sf_BS_UUID__c.cdata
# Status
if hasattr(n, 'sf_BS_Status__c'):
d['status'] = int(float(n.sf_BS_Status__c.cdata))
# Name
if hasattr(n, 'sf_FirstName') and hasattr(n, 'sf_LastName'):
d['name'] = n.sf_FirstName.cdata + " " + n.sf_LastName.cdata
# First Name
d['first_name'] = n.sf_FirstName.cdata if hasattr(n, 'sf_FirstName') else ""
# Last Name
d['last_name'] = n.sf_LastName.cdata if hasattr(n, 'sf_LastName') else ""
# part
d['part'] = int(float(n.sf_BS_VoicePart__c.cdata)) if hasattr(n, 'sf_BS_VoicePart__c') else None
# Gender
d['gender'] = int(float(n.sf_BS_Gender__c.cdata)) if hasattr(n, 'sf_BS_Gender__c') else None
# Email
d['email'] = n.sf_npe01__HomeEmail__c.cdata if hasattr(n, 'sf_npe01__HomeEmail__c') else ""
# Home Phone
d['home_phone'] = n.sf_HomePhone.cdata if hasattr(n, 'sf_HomePhone') else ""
# Cell Phone
d['cell_phone'] = n.sf_MobilePhone.cdata if hasattr(n, 'sf_MobilePhone') else ""
# BHS ID
d['bhs_id'] = int(n.sf_cfg_Member_Number__c.cdata) if hasattr(n, 'sf_cfg_Member_Number__c') else None
# Return parsed dict
return d
class SfSession:
def parse_sf_notification(n):
d = {}
# Created
if hasattr(n, 'sf_CreatedDate'):
d['created'] = n.sf_CreatedDate.cdata
# Modified
if hasattr(n, 'sf_LastModifiedDate'):
d['modified'] = n.sf_LastModifiedDate.cdata
# UUID
if hasattr(n, 'sf_BS_UUID__c'):
d['id'] = n.sf_BS_UUID__c.cdata
# Status
if hasattr(n, 'sf_BS_Status__c'):
d['status'] = int(float(n.sf_BS_Status__c.cdata))
# Kind
if hasattr(n, 'sf_BS_Kind__c'):
d['kind'] = int(float(n.sf_BS_Kind__c.cdata))
# Num Rounds
if hasattr(n, 'sf_Num_rounds__c'):
d['num_rounds'] = int(float(n.sf_Num_rounds__c.cdata))
# Is Invitational
if hasattr(n, 'sf_is_invitational__c'):
d['is_invitational'] = bool(strtobool(n.sf_is_invitational__c.cdata))
# Description
d['description'] = n.sf_Description__c.cdata if hasattr(n, 'sf_Description__c') else ""
# Notes
d['notes'] = n.sf_Notes__c.cdata if hasattr(n, 'sf_Notes__c') else ""
# Footnotes
d['footnotes'] = n.sf_Footnotes__c.cdata if hasattr(n, 'sf_Footnotes__c') else ""
if hasattr(n, 'sf_BS_Convention_UUID__c'):
d['convention_id'] = n.sf_BS_Convention_UUID__c.cdata
# Name
if hasattr(n, 'sf_Name'):
d['name'] = n.sf_Name.cdata
# District
if hasattr(n, 'sf_BS_District__c'):
d['district'] = int(float(n.sf_BS_District__c.cdata))
# Season
if hasattr(n, 'sf_BS_Season__c'):
d['season'] = int(float(n.sf_BS_Season__c.cdata))
# Panel
if hasattr(n, 'sf_BS_Panel__c'):
d['panel'] = int(float(n.sf_BS_Panel__c.cdata))
# Year
if hasattr(n, 'sf_Year__c'):
d['year'] = int(n.sf_Year__c.cdata)
# Open Date
if hasattr(n, 'sf_Open_Date__c'):
d['open_date'] = n.sf_Open_Date__c.cdata
# Close Date
if hasattr(n, 'sf_Close_Date__c'):
d['close_date'] = n.sf_Close_Date__c.cdata
# Start Date
if hasattr(n, 'sf_Start_Date__c'):
d['start_date'] = n.sf_Start_Date__c.cdata
# End Date
if hasattr(n, 'sf_End_Date__c'):
d['end_date'] = n.sf_End_Date__c.cdata
# Venue
if hasattr(n, 'sf_Venue__c'):
d['venue_name'] = n.sf_Venue__c.cdata
# Location
if hasattr(n, 'sf_Location__c'):
d['location'] = n.sf_Location__c.cdata
# Time Zone
if hasattr(n, 'sf_Time_Zone__c'):
d['timezone'] = n.sf_Time_Zone__c.cdata
# Divisions
if hasattr(n, 'sf_BS_Division__c'):
d['divisions'] = n.sf_BS_Division__c.cdata
# Return parsed dict
return d
class SfContest:
def parse_sf_notification(n):
d = {}
# Created
if hasattr(n, 'sf_CreatedDate'):
d['created'] = n.sf_CreatedDate.cdata
# Modified
if hasattr(n, 'sf_LastModifiedDate'):
d['modified'] = n.sf_LastModifiedDate.cdata
# UUID
if hasattr(n, 'sf_BS_UUID__c'):
d['id'] = n.sf_BS_UUID__c.cdata
# Award ID
if hasattr(n, 'sf_BS_Award_UUID__c'):
d['award_id'] = n.sf_BS_Award_UUID__c.cdata
# Name
if hasattr(n, 'sf_Name'):
d['name'] = n.sf_Name.cdata
# Kind
if hasattr(n, 'sf_BS_Kind__c'):
d['kind'] = int(float(n.sf_BS_Kind__c.cdata))
# Gender
d['gender'] = int(float(n.sf_BS_Classification__c.cdata)) if hasattr(n, 'sf_BS_Classification__c') else None
# Level
if hasattr(n, 'sf_BS_Level__c'):
d['level'] = int(float(n.sf_BS_Level__c.cdata))
# Season
if hasattr(n, 'sf_BS_Season__c'):
d['season'] = int(float(n.sf_BS_Season__c.cdata))
# Description
d['description'] = n.sf_Description__c.cdata if hasattr(n, 'sf_Description__c') else ""
# District
if hasattr(n, 'sf_BS_District__c'):
d['district'] = int(float(n.sf_BS_District__c.cdata))
# Divisions
d['division'] = int(float(n.sf_BS_Division__c.cdata)) if hasattr(n, 'sf_BS_Division__c') else None
# Age
d['age'] = int(float(n.sf_BS_Age__c.cdata)) if hasattr(n, 'sf_BS_Age__c') else None
# Is Novice
if hasattr(n, 'sf_is_novice__c'):
d['is_novice'] = bool(strtobool(n.sf_is_novice__c.cdata))
# Is Single
if hasattr(n, 'sf_is_single__c'):
d['is_single'] = bool(strtobool(n.sf_is_single__c.cdata))
# Size
d['size'] = int(float(n.sf_BS_Size__c.cdata)) if hasattr(n, 'sf_BS_Size__c') else None
# Size Range
d['size_range'] = n.sf_Size_Range__c.cdata if hasattr(n, 'sf_Size_Range__c') else None
# Scope
d['scope'] = int(float(n.sf_BS_Scope__c.cdata)) if hasattr(n, 'sf_BS_Scope__c') else None
# Scope Range
d['scope_range'] = n.sf_Scope_Range__c.cdata if hasattr(n, 'sf_Scope_Range__c') else None
# Tree Sort
d['tree_sort'] = int(float(n.sf_Tree_Sort__c.cdata)) if hasattr(n, 'sf_Tree_Sort__c') else None
# Session ID
if hasattr(n, 'sf_BS_Session_UUID__c'):
d['session_id'] = n.sf_BS_Session_UUID__c.cdata
# Return parsed dict
return d
class SfAssignment:
def parse_sf_notification(n):
d = {}
# Created
if hasattr(n, 'sf_CreatedDate'):
d['created'] = n.sf_CreatedDate.cdata
# Modified
if hasattr(n, 'sf_LastModifiedDate'):
d['modified'] = n.sf_LastModifiedDate.cdata
# UUID
if hasattr(n, 'sf_BS_UUID__c'):
d['id'] = n.sf_BS_UUID__c.cdata
# Kind
if hasattr(n, 'sf_BS_Type__c'):
d['kind'] = int(float(n.sf_BS_Type__c.cdata))
# Category
if hasattr(n, 'sf_BS_Category__c'):
d['category'] = int(float(n.sf_BS_Category__c.cdata))
# Person ID
if hasattr(n, 'sf_BS_Contact_UUID__c'):
d['person_id'] = n.sf_BS_Contact_UUID__c.cdata
# Name
d['name'] = n.sf_Name__c.cdata if hasattr(n, 'sf_Name__c') else None
# First Name
d['first_name'] = n.sf_FirstName__c.cdata if hasattr(n, 'sf_FirstName__c') else None
# Last Name
d['last_name'] = n.sf_LastName__c.cdata if hasattr(n, 'sf_LastName__c') else None
# District
if hasattr(n, 'sf_BS_District__c'):
d['district'] = int(float(n.sf_BS_District__c.cdata))
# Area
if hasattr(n, 'sf_Area__c'):
d['area'] = n.sf_Area__c.cdata
# Email
d['email'] = n.sf_HomeEmail__c.cdata if hasattr(n, 'sf_HomeEmail__c') else None
# Cell Phone
d['cell_phone'] = n.sf_MobilePhone__c.cdata if hasattr(n, 'sf_MobilePhone__c') else None
# Airports
d['airports'] = n.sf_Airports__c.cdata if hasattr(n, 'sf_Airports__c') else None
# BHS ID
d['bhs_id'] = int(n.sf_cfg_Member_Number__c.cdata) if hasattr(n, 'sf_cfg_Member_Number__c') else None
# Session ID
if hasattr(n, 'sf_BS_Session_UUID__c'):
d['session_id'] = n.sf_BS_Session_UUID__c.cdata
# Return parsed dict
return d
class SfEntry:
def parse_sf_notification(n):
d = {}
# Created
if hasattr(n, 'sf_CreatedDate'):
d['created'] = n.sf_CreatedDate.cdata
# Modified
if hasattr(n, 'sf_LastModifiedDate'):
d['modified'] = n.sf_LastModifiedDate.cdata
# UUID
if hasattr(n, 'sf_BS_UUID__c'):
d['id'] = n.sf_BS_UUID__c.cdata
# Status
if hasattr(n, 'sf_BS_Status__c'):
d['status'] = int(float(n.sf_BS_Status__c.cdata))
# Is Evaluation
if hasattr(n, 'sf_is_evaluation__c'):
d['is_evaluation'] = bool(strtobool(n.sf_is_evaluation__c.cdata))
# Is Private
if hasattr(n, 'sf_is_private__c'):
d['is_private'] = bool(strtobool(n.sf_is_private__c.cdata))
# Is MT
if hasattr(n, 'sf_is_mt__c'):
d['is_mt'] = bool(strtobool(n.sf_is_mt__c.cdata))
# Is Senior
if hasattr(n, 'sf_is_senior__c'):
d['is_senior'] = bool(strtobool(n.sf_is_senior__c.cdata))
# Is Youth
if hasattr(n, 'sf_is_youth__c'):
d['is_youth'] = bool(strtobool(n.sf_is_youth__c.cdata))
# Draw
d['draw'] = int(float(n.sf_Draw_Order__c.cdata)) if hasattr(n, 'sf_Draw_Order__c') else None
# Prelim
d['prelim'] = float(n.sf_Prelim__c.cdata) if hasattr(n, 'sf_Prelim__c') else None
# Base
d['base'] = float(n.sf_Base__c.cdata) if hasattr(n, 'sf_Base__c') else None
# Participants
d['participants'] = n.sf_Participants__c.cdata if hasattr(n, 'sf_Participants__c') else ""
# POS
d['pos'] = int(float(n.sf_Persons_On_Stage__c.cdata)) if hasattr(n, 'sf_Persons_On_Stage__c') else None
# Area
if hasattr(n, 'sf_Organization__c'):
d['area'] = n.sf_Organization__c.cdata
# Chapters
d['chapters'] = n.sf_Chapters__c.cdata if hasattr(n, 'sf_Chapters__c') else ""
# Description
d['description'] = n.sf_Description__c.cdata if hasattr(n, 'sf_Description__c') else ""
# Notes
d['notes'] = n.sf_Notes__c.cdata if hasattr(n, 'sf_Notes__c') else ""
# Group ID
if hasattr(n, 'sf_BS_Account_UUID__c'):
d['group_id'] = n.sf_BS_Account_UUID__c.cdata
# Name
if hasattr(n, 'sf_Name'):
d['name'] = n.sf_Name.cdata
# Kind
if hasattr(n, 'sf_BS_Kind__c'):
d['kind'] = int(float(n.sf_BS_Kind__c.cdata))
# Gender
if hasattr(n, 'sf_BS_Classification__c'):
d['gender'] = int(float(n.sf_BS_Classification__c.cdata))
# District
if hasattr(n, 'sf_BS_District__c'):
d['district'] = int(float(n.sf_BS_District__c.cdata))
# Divisions
d['division'] = int(float(n.sf_BS_Division__c.cdata)) if hasattr(n, 'sf_BS_Division__c') else None
if hasattr(n, 'sf_cfg_Member_Id__c'):
if (n.sf_cfg_Member_Id__c.cdata.isdigit()):
# BHS ID
d['bhs_id'] = int(n.sf_cfg_Member_Id__c.cdata)
else:
# code
d['code'] = n.sf_cfg_Member_Id__c.cdata
# Session ID
if hasattr(n, 'sf_BS_Session_UUID__c'):
d['session_id'] = n.sf_BS_Session_UUID__c.cdata
# Return parsed dict
return d
class SfEntryContest:
def parse_sf_notification(n):
d = {}
# Contest UUID
if hasattr(n, 'sf_BS_Contest_UUID__c'):
d['contest_id'] = n.sf_BS_Contest_UUID__c.cdata
# Entry UUID
if hasattr(n, 'sf_BS_Entry_UUID__c'):
d['entry_id'] = n.sf_BS_Entry_UUID__c.cdata
# Is Deleted
if hasattr(n, 'sf_IsDeleted'):
d['deleted'] = bool(strtobool(n.sf_IsDeleted.cdata))
# Return parsed dict
return d
class SfGroupChart:
def parse_sf_notification(n):
d = {}
# Group UUID
if hasattr(n, 'sf_BS_Account_UUID__c'):
d['group_id'] = n.sf_BS_Account_UUID__c.cdata
# Chart UUID
if hasattr(n, 'sf_BS_Chart_UUID__c'):
d['chart_id'] = n.sf_BS_Chart_UUID__c.cdata
# Is Deleted
if hasattr(n, 'sf_IsDeleted'):
d['deleted'] = bool(strtobool(n.sf_IsDeleted.cdata))
# Return parsed dict
return d
| bsd-2-clause | -6,375,111,616,845,028,000 | 28.927891 | 116 | 0.519525 | false |
aaronn/django-rest-framework-passwordless | drfpasswordless/urls.py | 1 | 1129 | from drfpasswordless.settings import api_settings
from django.urls import path
from drfpasswordless.views import (
ObtainEmailCallbackToken,
ObtainMobileCallbackToken,
ObtainAuthTokenFromCallbackToken,
VerifyAliasFromCallbackToken,
ObtainEmailVerificationCallbackToken,
ObtainMobileVerificationCallbackToken,
)
app_name = 'drfpasswordless'
urlpatterns = [
path(api_settings.PASSWORDLESS_AUTH_PREFIX + 'email/', ObtainEmailCallbackToken.as_view(), name='auth_email'),
path(api_settings.PASSWORDLESS_AUTH_PREFIX + 'mobile/', ObtainMobileCallbackToken.as_view(), name='auth_mobile'),
path(api_settings.PASSWORDLESS_AUTH_PREFIX + 'token/', ObtainAuthTokenFromCallbackToken.as_view(), name='auth_token'),
path(api_settings.PASSWORDLESS_VERIFY_PREFIX + 'email/', ObtainEmailVerificationCallbackToken.as_view(), name='verify_email'),
path(api_settings.PASSWORDLESS_VERIFY_PREFIX + 'mobile/', ObtainMobileVerificationCallbackToken.as_view(), name='verify_mobile'),
path(api_settings.PASSWORDLESS_VERIFY_PREFIX, VerifyAliasFromCallbackToken.as_view(), name='verify_token'),
]
| mit | 6,030,230,326,098,474,000 | 52.761905 | 134 | 0.772365 | false |
yuanchima/Activation-Visualization-Histogram | datasets/svhn.py | 1 | 2526 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
import scipy.io
import scipy.ndimage as sn
import h5py
from util import log
# __PATH__ = os.path.abspath(os.path.dirname(__file__))
__PATH__ = './datasets/svhn'
rs = np.random.RandomState(123)
class Dataset(object):
def __init__(self, ids, name='default',
max_examples=None, is_train=True):
self._ids = list(ids)
self.name = name
self.is_train = is_train
if max_examples is not None:
self._ids = self._ids[:max_examples]
filename = 'data.hy'
file = os.path.join(__PATH__, filename)
log.info("Reading %s ...", file)
try:
self.data = h5py.File(file, 'r')
except:
raise IOError('Dataset not found. Please make sure the dataset was downloaded.')
log.info("Reading Done: %s", file)
def get_data(self, id):
# preprocessing and data augmentation
m = self.data[id]['image'].value/255.
l = self.data[id]['label'].value.astype(np.float32)
# Data augmentation: rotate 0, 90, 180, 270
"""
rot_num = np.floor(np.random.rand(1)*4)
for i in range(rot_num):
m = np.rot90(m, axes=(0, 1))
m = m + np.random.randn(*m.shape) * 1e-2
"""
return m, l
@property
def ids(self):
return self._ids
def __len__(self):
return len(self.ids)
def __repr__(self):
return 'Dataset (%s, %d examples)' % (
self.name,
len(self)
)
def get_data_info():
return np.array([32, 32, 10, 3])
def get_conv_info():
return np.array([64, 128, 256])
def get_vis_info():
return np.array([[128, 128], [64, 128], [64, 64], [10, 16], [5, 8], [2, 5]])
def create_default_splits(is_train=True):
ids = all_ids()
n = len(ids)
num_trains = 73257
dataset_train = Dataset(ids[:num_trains], name='train', is_train=False)
dataset_test = Dataset(ids[num_trains:], name='test', is_train=False)
return dataset_train, dataset_test
def all_ids():
id_filename = 'id.txt'
id_txt = os.path.join(__PATH__, id_filename)
try:
with open(id_txt, 'r') as fp:
_ids = [s.strip() for s in fp.readlines() if s]
except:
raise IOError('Dataset not found. Please make sure the dataset was downloaded.')
rs.shuffle(_ids)
return _ids
| mit | 8,197,584,550,295,132,000 | 25.041237 | 96 | 0.563737 | false |
simplelist/python_test01 | mechine/KNY.py | 1 | 3559 | #-*- coding: utf-8 -*-
from numpy import *
import operator
# 读取数据到矩阵
def file2matrix(filename):
# 打开数据文件,读取每行内容
fr = open(filename)
arrayOLines = fr.readlines()
# 初始化矩阵
numberOfLines = len(arrayOLines)
returnMat = zeros((numberOfLines,3))
# 初始化类标签向量
classLabelVector = []
# 循环读取每一行数据
index = 0
for line in arrayOLines:
# 去掉回车符
line = line.strip()
# 提取4个数据项
listFromLine = line.split('\t')
# 将前三项数据存入矩阵
returnMat[index,:] = listFromLine[0:3]
# 将第四项数据存入向量
classLabelVector.append(int(listFromLine[-1]))
index += 1
return returnMat,classLabelVector
# 数据归一化
def autoNorm(dataSet):
# 读取矩阵中数据项的最大和最小值
minVals = dataSet.min(0)
maxVals = dataSet.max(0)
# 获得最大和最小值间差值
ranges = maxVals - minVals
# 初始化输出结果
normDataSet = zeros(shape(dataSet))
# 获取矩阵的行数
m = dataSet.shape[0]
# 矩阵运算:实现归一化公式中的 oldValue - min 一步
normDataSet = dataSet - tile(minVals, (m,1))
# 矩阵除法:实现归一化公式中的除法
normDataSet = normDataSet/tile(ranges, (m,1))
# 返回归一化后的数据,数据范围及最小值矩阵
return normDataSet, ranges, minVals
# kNN算法实现
def classify0(inX, dataSet, labels, k):
# 获取样本数据数量
dataSetSize = dataSet.shape[0]
# 矩阵运算,计算测试数据与每个样本数据对应数据项的差值
diffMat = tile(inX, (dataSetSize,1)) - dataSet
# sqDistances 上一步骤结果平方和
sqDiffMat = diffMat**2
sqDistances = sqDiffMat.sum(axis=1)
# 取平方根,得到距离向量
distances = sqDistances**0.5
# 按照距离从低到高排序
sortedDistIndicies = distances.argsort()
classCount={}
# 依次取出最近的样本数据
for i in range(k):
# 记录该样本数据所属的类别
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1
# 对类别出现的频次进行排序,从高到低
sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True)
# 返回出现频次最高的类别
return sortedClassCount[0][0]
# 算法测试
def datingClassTest():
# 设定测试数据的比例
hoRatio = 0.10
# 读取数据
datingDataMat,datingLabels = file2matrix('datingTestSet2.txt')
# 归一化数据
normMat, ranges, minVals = autoNorm(datingDataMat)
# 数据总行数
m = normMat.shape[0]
# 测试数据行数
numTestVecs = int(m*hoRatio)
# 初始化错误率
errorCount = 0.0
# 循环读取每行测试数据
for i in range(numTestVecs):
# 对该测试人员进行分类
classifierResult = classify0(normMat[i,:],normMat[numTestVecs:m,:],datingLabels[numTestVecs:m],3)
# 打印KNN算法分类结果和真实的分类
print "the classifier came back with: %d, the real answer is: %d" % (classifierResult, datingLabels[i])
# 判断KNN算法结果是否准确
if (classifierResult != datingLabels[i]): errorCount += 1.0
# 打印错误率
print "the total error rate is: %f" % (errorCount/float(numTestVecs))
# 执行算法测试
datingClassTest() | lgpl-3.0 | 314,112,716,281,400,060 | 21 | 111 | 0.641604 | false |
darrencheng0817/AlgorithmLearning | Python/leetcode/uglyNumber.py | 1 | 2890 | '''
Created on 2015年12月11日
https://leetcode.com/problems/ugly-number/
https://leetcode.com/problems/ugly-number-ii/
https://leetcode.com/problems/super-ugly-number/
@author: Darren
'''
'''
Write a program to check whether a given number is an ugly number.
Ugly numbers are positive numbers whose prime factors only include 2, 3, 5. For example, 6, 8 are ugly while 14 is not ugly since it includes another prime factor 7.
Note that 1 is typically treated as an ugly number.
'''
def isUlgyNumber(num):
if not num:
return False
if num==1:
return True
if num%2==0:
return isUlgyNumber(num//2)
if num%3==0:
return isUlgyNumber(num//3)
if num%5==0:
return isUlgyNumber(num//5)
return False
print(isUlgyNumber(14))
'''
Write a program to find the n-th ugly number.
Ugly numbers are positive numbers whose prime factors only include 2, 3, 5. For example, 1, 2, 3, 4, 5, 6, 8, 9, 10, 12 is the sequence of the first 10 ugly numbers.
Note that 1 is typically treated as an ugly number.
'''
def ulgyNumber(N):
if N<1:
raise Exception("Invalid Input")
if N==1:
return 1
res=[1]*N
count=[0]*3
primes=[2,3,5]
for i in range(1,N):
nextNum=min([prime*res[count[j]] for j,prime in enumerate(primes)])
for j,prime in enumerate(primes):
if nextNum==prime*res[count[j]]:
count[j]+=1
res[i]=nextNum
return res
print(ulgyNumber(10))
'''
Write a program to find the nth super ugly number.
Super ugly numbers are positive numbers whose all prime factors are in the given prime list primes of size k. For example, [1, 2, 4, 7, 8, 13, 14, 16, 19, 26, 28, 32] is the sequence of the first 12 super ugly numbers given primes = [2, 7, 13, 19] of size 4.
Note:
(1) 1 is a super ugly number for any given primes.
(2) The given numbers in primes are in ascending order.
(3) 0 < k ≤ 100, 0 < n ≤ 106, 0 < primes[i] < 1000.
'''
def nthSuperUglyNumber(n, primes):
"""
:type n: int
:type primes: List[int]
:rtype: int
"""
if n==1:
return 1
res=[1]*n
count=[0]*len(primes)
for __index in range(1,n):
nextNum=min([prime*res[count[index]] for index,prime in enumerate(primes)])
for index,prime in enumerate(primes):
if nextNum==prime*res[count[index]]:
count[index]+=1
res[__index]=nextNum
return res[-1]
n=200000
primes=[2,3,5,13,19,29,31,41,43,53,59,73,83,89,97,103,107,109,127,137,139,149,163,173,179,193,197,199,211,223,227,229,239,241,251,257,263,269,271,281,317,331,337,347,353,359,367,373,379,389,397,409,419,421,433,449,457,461,463,479,487,509,521,523,541,547,563,569,577,593,599,601,613,619,631,641,659,673,683,701,709,719,733,739,743,757,761,769,773,809,811,829,857,859,881,919,947,953,967,971]
print(nthSuperUglyNumber(n, primes)) | mit | 3,427,185,567,020,352,500 | 32.5 | 390 | 0.651042 | false |
proggy/fns | frogs.py | 1 | 1401 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright notice
# ----------------
#
# Copyright (C) 2014 Daniel Jung
# Contact: [email protected]
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
#
"""Frog definitions for this package. Requires the frog module."""
__created__ = '2014-09-27'
__modified__ = '2014-09-27'
from frog import Frog
import fns
# dashify
f = Frog(inmap=dict(files='$@'),
usage='dashify [options] FILES',
optdoc=dict(alldots='do not even preserve the last dot',
verbose='be verbose',
test='list changes without actually renaming any files',
nolower='do not switch to lowercase'))
#allow_interspersed_args=False
f(fns.dashify)
| gpl-2.0 | 796,572,170,233,028,400 | 34.025 | 77 | 0.683797 | false |
kapadia/toplotly | toplotly/__init__.py | 1 | 2486 |
import os
import json
from dateutil.parser import parse
import plotly
from plotly.graph_objs import Histogram, Scatter, Scatter3d, Data, Layout, XAxis, YAxis, ZAxis, Figure
__version__ = '0.0.1'
def is_numeric(x):
try:
float(x)
return True
except ValueError:
return False
def is_date(d):
try:
parse(d)
return True
except ValueError, AttributeError:
return False
def is_string(x):
return is_numeric(x) + is_date(x) == 0
def format_data(data):
data = json.loads(''.join(data))
keys = data[0].keys()
# Check column type
sidx = [ idx for idx, key in enumerate(keys) if is_string(data[0][key]) ]
values = [ [ d.get(key) for key in keys ] for d in data ]
values = zip(*values)
if len(sidx) == 1:
text = values.pop(sidx[0])
keys.pop(sidx[0])
else:
text = None
return {
'layout': {
'axes': keys
},
'data': {
'values': values,
'text': text
}
}
def get_histogram(data):
values = data['values']
return Data([
Histogram(
x=values
)
])
def get_scatter2d(data):
values = data['values']
return Data([
Scatter(
x=values[0],
y=values[1],
mode='markers',
text=data['text']
)
])
def get_scatter3d(data):
values = data['values']
return Data([
Scatter3d(
x=values[0],
y=values[1],
z=values[2]
)
])
def post(filename, data, fileopt='new', title=None, world_readable=True):
# Get username and api key
username = os.environ.get('PLOTLY_USERNAME')
api_key = os.environ.get('PLOTLY_API_KEY')
plotly.tools.set_credentials_file(username=username, api_key=api_key)
axes = data['layout']['axes']
nAxes = len(axes)
get_data = {
1: get_histogram,
2: get_scatter2d,
3: get_scatter3d
}
axes_kwargs = ['xaxis', 'yaxis', 'zaxis']
axes_obj = [XAxis, YAxis, ZAxis]
layout_kwargs = { axes_kwargs[idx]: axes_obj[idx](title=axis) for idx, axis in enumerate(axes) }
dataobj = get_data[nAxes](data['data'])
layout = Layout(**layout_kwargs)
fig = Figure(data=dataobj, layout=layout)
r = plotly.plotly.plot(fig, filename=filename)
print r
| mit | -6,038,778,973,686,361,000 | 18.429688 | 102 | 0.530571 | false |
Liorst4/pysteamcli | pysteamcli/app_manifest.py | 1 | 1745 | #!/usr/bin/env python3
"""
Parse Steam's application manifest files.
"""
import itertools
def next_data(it):
"""
Advances an iterator until new data is found.
:param it: Character iterator.
:returns: Data found.
"""
quotation_mark = lambda c: c != '"'
data_begin = itertools.dropwhile(quotation_mark, it)
next(data_begin)
data = itertools.takewhile(quotation_mark, data_begin)
return ''.join(data)
def next_scope(it):
"""
Advances the iterator until a scope closing mark is found.
:param it: Character iterator.
:returns: The content of the scope.
"""
s_counter = 0
for i in it:
if i == '{':
s_counter += 1
elif i == '}':
if s_counter == 0:
break
else:
s_counter -= 1
yield i
def parse_acf_content(it):
"""
Parse the content of an acf file.
:param it: Character iterator.
:returns: The content of an acf file as a dictionary.
"""
result = list()
while True:
try:
key = next_data(it)
value_type = next(it)
next(it)
if value_type == '\t':
# Data
value = next_data(it)
elif value_type == '\n':
# Nested scope.
value = parse_acf_content(next_scope(it))
else:
raise Exception
except StopIteration:
break
result.append((key, value))
return dict(result)
def parse_acf_file(file_path):
"""
Parse an acf file.
"""
with open(file_path, 'r') as acf_file:
content = acf_file.read()
return parse_acf_content(iter(content))
| mit | 4,851,794,010,905,452,000 | 18.388889 | 62 | 0.525501 | false |
xaime/sneaks | modules/report.py | 1 | 31061 | # - *- coding: utf- 8 - *- .
# SNEAKS - Snooping Early Alert Knowledge Service
from datetime import timedelta
from boomslang import *
import math
import pygeoip
import socket
from ConfigParser import RawConfigParser
import html2text
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import codecs
import logging
import re
import smtplib
from operator import itemgetter
def tagreplace(tag, html, replacehtml):
"""
Reemplaza el texto contenido entre <!--tag--> y <!--/tag--> por replacehtml
"""
t1 = html.index("<!--" + tag + "-->")
t2 = html.index("<!--/" + tag + "-->")
return html[:t1] + replacehtml + html[t2 + len(tag) + 8:]
def tagdelete(tag, html):
"""
Elimina el texto contenido entre <!--tag--> y <!--/tag-->
"""
t1 = html.index("<!--" + tag + "-->")
t2 = html.index("<!--/" + tag + "-->")
return html[:t1] + html[t2 + len(tag) + 8:]
def org_report_chart(rtime_frame, rinterval, rtime, rips, people, org_alarm_threshold, plugin_dir):
"""
Genera los gráficos de footprinting de la organización y todas las personas con detecciones
y los guarda en la carpeta temp
"""
detected_persons = 0
person_index = []
pinterval = rinterval + rtime_frame
rorg_eval_data = [0] * pinterval
for rperson in people:
rperson.eval_data(rtime_frame, pinterval, rtime, rips)
if rperson.detection:
person_index.append(people.index(rperson))
detected_persons += 1
for j in range(pinterval):
rorg_eval_data[j] += rperson.detection_data[j]
datemin = rtime - timedelta(minutes=(pinterval - 1))
datelist = []
for i in range(pinterval):
datelist.append(datemin + timedelta(minutes=i))
dateidx = range(len(rorg_eval_data))
orgplot = Plot()
orgplot.yLimits = (0, max(rorg_eval_data) + 10)
orgplot.xLimits = (0, max(dateidx))
orgplot.grid.visible = True
orgplot.title = u"Nivel de footprinting sobre la organización"
orgplot.yLabel = u"Valor acumulado en los últimos " + str(rtime_frame) + " minutos"
orgplot.yLabelProperties = {"color":"#808080", "fontsize": 10}
# relleno gris claro del intervalo completo
orgline_fill = StackedLines()
orgline_fill1 = Line()
orgline_fill1.xValues = dateidx
orgline_fill1.yValues = rorg_eval_data
orgline_fill1.lineWidth = 0
points = [dateidx[datelist.index(c)] for c in datelist if c.minute in {0, 30}]
labels = [c.strftime("%H:%M") for c in datelist if c.minute in {0, 30}]
if len(points) > 24:
points = [dateidx[datelist.index(c)] for c in datelist if c.minute == 0]
labels = [c.strftime("%H:%M") for c in datelist if c.minute == 0]
orgline_fill.xTickLabelPoints = points
orgline_fill.xTickLabels = labels
orgline_fill.xTickLabelProperties = {"rotation": 45, "fontsize": 10}
orgline_fill.addLine(orgline_fill1, color="#E6E6E6")
orgplot.add(orgline_fill)
# linea intermintente del intervalo completo
orgline_p = Line()
orgline_p.xValues = dateidx
orgline_p.yValues = rorg_eval_data
orgline_p.lineStyle = "--"
orgline_p.color = "#B2B2B2"
orgplot.add(orgline_p)
# relleno rojo del intervalo analizado
orgline_fill_p = StackedLines()
orgline_fill_p1 = Line()
orgline_fill_p1.xValues = dateidx[rtime_frame:]
orgline_fill_p1.yValues = rorg_eval_data[rtime_frame:]
orgline_fill_p1.lineWidth = 0
orgline_fill_p.addLine(orgline_fill_p1, color="#FF0000")
orgplot.add(orgline_fill_p)
# Se añade la linea sólida de nivel acumulado para "rinterval"
orgline_s = Line()
orgline_s.xValues = dateidx[rtime_frame:]
orgline_s.yValues = rorg_eval_data[rtime_frame:]
orgline_s.lineWidth = 2
orgplot.add(orgline_s)
# Se añade la linea de umbral y su etiqueta
torgline = Line()
torgline.xValues = dateidx
torgline.yValues = [org_alarm_threshold]*pinterval
torgline.lineStyle = "--"
torgline.color = 'r'
orgplot.add(torgline)
tlabel = Label(len(dateidx)/12, org_alarm_threshold + ((max(rorg_eval_data) + 10)/50),
"Umbral (" + str(org_alarm_threshold) + ")")
orgplot.add(tlabel)
# relleno azul del intervalo analizado por debajo del umbral
orgline_fill_u = StackedLines()
orgline_fill_u1 = Line()
orgline_fill_u1.xValues = dateidx[rtime_frame:]
temp = rorg_eval_data[rtime_frame:]
for i in range(len(temp)):
if temp[i] > org_alarm_threshold:
temp[i] = org_alarm_threshold
orgline_fill_u1.yValues = temp
orgline_fill_u1.lineWidth = 0
orgline_fill_u.addLine(orgline_fill_u1, color="#3399FF")
orgplot.add(orgline_fill_u)
# Se añade la linea vertical que marca el intervalo analizado
vline1 = VLine()
vline1.xValues = \
[dateidx[datelist.index(c)] for c in datelist if
(c.minute == (rtime - timedelta(minutes=rinterval - 1)).minute
and c.hour == (rtime - timedelta(minutes=rinterval - 1)).hour)]
vline1.color = 'b'
vline1.lineStyle = ":"
orgplot.add(vline1)
rorg_eval_data_polar = [0]*len(plugin_dir)
for i in person_index:
for j in range(len(plugin_dir)):
rorg_eval_data_polar[j] += max(people[i].datasources[j].eval_data(rtime_frame, rinterval, rtime, rips))
# Se dibuja la proyección de tipo radar
radarplot = Plot()
radarplot.projection = 'polar'
radarplot.title = u"Valor máximo por origen de detección"
radarplot.yLimits = (0, max(rorg_eval_data_polar) + 2)
radarplot.grid.color = "#A1A1A1"
radarplot.grid.visible = True
radarplot.grid.style = "--"
lineradar = Line()
t = len(plugin_dir)
lineradar.yValues = rorg_eval_data_polar + [rorg_eval_data_polar[0]]
lineradar.xValues = [(2*math.pi/t)*x for x in range(t)] + [2*math.pi]
lineradar.xTickLabelPoints = [(2*math.pi/t)*x for x in range(t)]
lineradar.xTickLabels = [p[8:] for p in plugin_dir]
lineradar.xTickLabelProperties = {"color": "#006600", "alpha": 0.8}
lineradar.lineWidth = 2
lineradar.color = "r"
radarscat = Scatter()
radarscat.xValues = lineradar.xValues
radarscat.yValues = lineradar.yValues
radarscat.markerSize = 25
radarscat.marker = "s"
radarplot.add(lineradar)
radarplot.add(radarscat)
orgplot.setDimensions(8, 5, dpi=75)
radarplot.setDimensions(5, 5, dpi=50)
orgplot.save("temp/imgchart_org.png")
radarplot.save("temp/imgradar_org.png")
# Ahora se comienza con el dibujo de las gráficas para cada pesona con detecciones
personplot = []
personline_fill = []
personline_fill1 = []
personline_p = []
personline_fill_p = []
personline_fill_p1 = []
personline_s = []
tpersonline = []
tplabel = []
personline_fill_u = []
personline_fill_u1 = []
vline = []
pradarplot = []
plineradar = []
pradarscat = []
for idx in person_index:
people[idx].eval_data(rtime_frame, pinterval, rtime, rips)
p_eval_data = people[idx].detection_data
personplot.append(Plot())
personplot[-1].yLimits = orgplot.yLimits
personplot[-1].xLimits = orgplot.xLimits
personplot[-1].grid.visible = True
personplot[-1].title = "Nivel de footprinting sobre " + people[idx].name
personplot[-1].yLabel = orgplot.yLabel
personplot[-1].yLabelProperties = orgplot.yLabelProperties
# relleno gris claro del intervalo completo
personline_fill.append(StackedLines())
personline_fill1.append(Line())
personline_fill1[-1].xValues = dateidx
personline_fill1[-1].yValues = p_eval_data
personline_fill1[-1].lineWidth = 0
personline_fill[-1].xTickLabelPoints = orgline_fill.xTickLabelPoints
personline_fill[-1].xTickLabels = orgline_fill.xTickLabels
personline_fill[-1].xTickLabelProperties = orgline_fill.xTickLabelProperties
personline_fill[-1].addLine(personline_fill1[-1], color="#E6E6E6")
personplot[-1].add(personline_fill[-1])
# linea intermintente del intervalo completo
personline_p.append(Line())
personline_p[-1].xValues = dateidx
personline_p[-1].yValues = p_eval_data
personline_p[-1].lineStyle = "--"
personline_p[-1].color = "#B2B2B2"
personplot[-1].add(personline_p[-1])
# relleno rojo del intervalo analizado
personline_fill_p.append(StackedLines())
personline_fill_p1.append(Line())
personline_fill_p1[-1].xValues = orgline_fill_p1.xValues
personline_fill_p1[-1].yValues = p_eval_data[rtime_frame:]
personline_fill_p1[-1].lineWidth = 0
personline_fill_p[-1].addLine(personline_fill_p1[-1], color="#FF8080")
personplot[-1].add(personline_fill_p[-1])
# Se añade la linea sólida de nivel acumulado para "rinterval"
personline_s.append(Line())
personline_s[-1].xValues = orgline_s.xValues
personline_s[-1].yValues = p_eval_data[rtime_frame:]
personline_s[-1].lineWidth = 2
personline_s[-1].color = "#666666"
personplot[-1].add(personline_s[-1])
# Se añade la linea de umbral y su etiqueta
tpersonline.append(Line())
tpersonline[-1].xValues = dateidx
tpersonline[-1].yValues = [people[idx].alarm_threshold]*pinterval
tpersonline[-1].lineStyle = "--"
tpersonline[-1].color = 'r'
personplot[-1].add(tpersonline[-1])
tplabel.append(Label(len(dateidx)/7, people[idx].alarm_threshold + ((max(rorg_eval_data) + 10)/50),
"Umbral personal (" + str(people[idx].alarm_threshold) + ")"))
personplot[-1].add(tplabel[-1])
# relleno azul del intervalo analizado por debajo del umbral
personline_fill_u.append(StackedLines())
personline_fill_u1.append(Line())
personline_fill_u1[-1].xValues = dateidx[rtime_frame:]
temp = p_eval_data[rtime_frame:]
for i in range(len(temp)):
if temp[i] > people[idx].alarm_threshold:
temp[i] = people[idx].alarm_threshold
personline_fill_u1[-1].yValues = temp
personline_fill_u1[-1].lineWidth = 0
personline_fill_u[-1].addLine(personline_fill_u1[-1], color="#85C2FF")
personplot[-1].add(personline_fill_u[-1])
# Se añade la linea vertical que marca el intervalo analizado
vline.append(VLine())
vline[-1].xValues = \
[dateidx[datelist.index(c)] for c in datelist if
(c.minute == (rtime - timedelta(minutes=rinterval - 1)).minute
and c.hour == (rtime - timedelta(minutes=rinterval - 1)).hour)]
vline[-1].color = 'b'
vline[-1].lineStyle = ":"
personplot[-1].add(vline[-1])
pplugin = [p[8:] for p in plugin_dir]
for ds in people[idx].datasources: # Se eliminan las etiquetas de plugins desactivados
if not ds.enabled:
for p in plugin_dir:
if str(ds).count(p):
pplugin.pop(pplugin.index(p[8:]))
t = len(pplugin)
p_eval_data_polar = []
for j in range(len(people[idx].datasources)):
if people[idx].datasources[j].enabled:
p_eval_data_polar.append(max(people[idx].datasources[j].eval_data(rtime_frame, rinterval, rtime, rips)))
# Se dibuja la proyección de tipo radar
pradarplot.append(Plot())
pradarplot[-1].projection = 'polar'
pradarplot[-1].title = u"Valor máximo por origen de detección\n" + people[idx].name
pradarplot[-1].yLimits = (0, max(rorg_eval_data_polar) + 2)
pradarplot[-1].grid.color = "#A1A1A1"
pradarplot[-1].grid.visible = True
pradarplot[-1].grid.style = "--"
plineradar.append(Line())
plineradar[-1].yValues = p_eval_data_polar + [p_eval_data_polar[0]]
plineradar[-1].xValues = [(2*math.pi/t)*x for x in range(t)] + [2*math.pi]
plineradar[-1].xTickLabelPoints = [(2*math.pi/t)*x for x in range(t)]
plineradar[-1].xTickLabels = pplugin
plineradar[-1].xTickLabelProperties = {"color": "#006600", "alpha": 0.8}
plineradar[-1].lineWidth = 2
plineradar[-1].color = "r"
pradarscat.append(Scatter())
pradarscat[-1].xValues = plineradar[-1].xValues
pradarscat[-1].yValues = plineradar[-1].yValues
pradarscat[-1].markerSize = 25
pradarscat[-1].marker = "s"
pradarplot[-1].add(plineradar[-1])
pradarplot[-1].add(pradarscat[-1])
personplot[-1].setDimensions(8, 5, dpi=75)
pradarplot[-1].setDimensions(5, 5, dpi=50)
personplot[-1].save("temp/imgchart_" + people[idx].person + ".png")
pradarplot[-1].save("temp/imgradar_" + people[idx].person + ".png")
def save_org_report(rtime_frame, rinterval, rtime, rips, people, org_alarm_threshold, plugin_dir, filenamesave):
"""
Genera un informe de eventos de footprinting para la organización
"""
with open("resources/mail/orgreporttemplate.html", 'r') as f:
orghtml = f.read()
detected_persons = 0
person_index = []
rorg_eval_data = [0] * rinterval
for rperson in people:
rperson.eval_data(rtime_frame, rinterval, rtime, rips)
if rperson.detection:
person_index.append(people.index(rperson))
detected_persons += 1
for j in range(rinterval):
rorg_eval_data[j] += rperson.detection_data[j]
prev_rorg_eval_data = [0] * rinterval
for rperson in people:
rperson.eval_data(rtime_frame, rinterval, rtime - timedelta(minutes=rinterval), rips)
if rperson.detection:
for j in range(rinterval):
prev_rorg_eval_data[j] += rperson.detection_data[j]
orghtml = orghtml.replace('-ORGTHRESHOLD-', str(org_alarm_threshold))
if max(rorg_eval_data) >= org_alarm_threshold:
orghtml = orghtml.replace('-TITLE-', "Alarma de Footprinting")
orghtml = tagdelete("NOALARM", orghtml)
if max(prev_rorg_eval_data) < org_alarm_threshold: # Detección nueva
orghtml = tagdelete("ALARMUP", orghtml)
orghtml = tagdelete("ALARMDOWN", orghtml)
orghtml = tagdelete("ALARMSTABLE", orghtml)
orghtml = orghtml.replace('-CHECKINTERVAL-', str(rinterval))
orghtml = orghtml.replace('-LEVELMAX-', str(max(rorg_eval_data)))
levelmaxtime = rtime + timedelta(minutes=rorg_eval_data.index(max(rorg_eval_data)) - rinterval)
orghtml = orghtml.replace('-LEVELMAXTIME-', levelmaxtime.strftime("%H:%M"))
idxtt = 0
for data in rorg_eval_data:
if data > org_alarm_threshold:
idxtt = data
break
timethreshold = rtime + timedelta(minutes=rorg_eval_data.index(idxtt) - rinterval)
orghtml = orghtml.replace('-TIMETHRESHOLD-', timethreshold.strftime("%H:%M"))
elif rorg_eval_data[-1] >= org_alarm_threshold: # Continua la alarma
orghtml = tagdelete("NEWALARM", orghtml)
orghtml = tagdelete("ALARMDOWN", orghtml)
if rorg_eval_data[-1] > prev_rorg_eval_data[-1]:
orghtml = tagdelete("ALARMSTABLE", orghtml)
else:
orghtml = tagdelete("ALARMUP", orghtml)
orghtml = orghtml.replace('-CHECKINTERVAL-', str(rinterval))
orghtml = orghtml.replace('-LASTLEVEL-', str(rorg_eval_data[-1]))
elif rorg_eval_data[-1] < org_alarm_threshold: # Se acaba la alarma
orghtml = tagdelete("ALARMUP", orghtml)
orghtml = tagdelete("NEWALARM", orghtml)
orghtml = tagdelete("ALARMSTABLE", orghtml)
orghtml = tagdelete("RUNNINGFOOTPRINTING", orghtml)
idxtt = 0
for data in rorg_eval_data[::-1]:
if data >= org_alarm_threshold:
idxtt = data
break
leveldown = rtime + timedelta(minutes=rorg_eval_data.index(idxtt) - rinterval)
orghtml = orghtml.replace('-LEVELDOWN-', leveldown.strftime("%H:%M"))
else:
orghtml = orghtml.replace('-TITLE-', "Informe de Footprinting")
orghtml = tagdelete("ALARM", orghtml)
orghtml = orghtml.replace('-DATEMIN-', (rtime - timedelta(minutes=rinterval)).strftime("%H:%M"))
orghtml = orghtml.replace('-DATEMAX-', rtime.strftime("%H:%M"))
orghtml = orghtml.replace('-ORGCHART-', "imgchart_org.png")
orghtml = orghtml.replace('-ORGRADAR-', "imgradar_org.png")
orghtml = orghtml.replace('-ONUMPER-', str(detected_persons))
rorg_eval_data_polar = [0]*len(plugin_dir)
for i in person_index:
for j in range(len(plugin_dir)):
rorg_eval_data_polar[j] += max(people[i].datasources[j].eval_data(rtime_frame, rinterval, rtime, rips))
oplugin = plugin_dir[rorg_eval_data_polar.index(max(rorg_eval_data_polar))]
orghtml = orghtml.replace('-OPLUGIN-', oplugin[8:])
orghtml = orghtml.replace('-ONUMIP-', str(len(rips)))
onumsem = len([a for a in rorg_eval_data_polar if a > 0])
orghtml = orghtml.replace('-ONUMSEN-', str(onumsem))
# Iteramos para cada persona
p1 = orghtml.index("<!--PERSON-->")
p2 = orghtml.index("<!--/PERSON-->")
persontemplate = orghtml[p1:p2+14]
personhtml = ''
for idx in person_index:
htmltemp = persontemplate
htmltemp = htmltemp.replace('-USERNAME-', people[idx].name.encode('ascii', 'xmlcharrefreplace'))
htmltemp = htmltemp.replace('-USERCHART-', 'imgchart_' + people[idx].person + '.png')
htmltemp = htmltemp.replace('-USERRADAR-', 'imgradar_' + people[idx].person + '.png')
pplugin = [p[8:] for p in plugin_dir]
for ds in people[idx].datasources: # Se eliminan las etiquetas de plugins desactivados
if not ds.enabled:
for p in plugin_dir:
if str(ds).count(p):
pplugin.pop(pplugin.index(p[8:]))
p_eval_data_polar = []
for j in range(len(people[idx].datasources)):
if people[idx].datasources[j].enabled:
p_eval_data_polar.append(max(people[idx].datasources[j].eval_data(rtime_frame, rinterval, rtime, rips)))
uplugin = pplugin[p_eval_data_polar.index(max(p_eval_data_polar))]
htmltemp = htmltemp.replace('-UPLUGIN-', uplugin)
unumsem = len([a for a in p_eval_data_polar if a > 0])
htmltemp = htmltemp.replace('-UNUMSEN-', str(unumsem))
people[idx].eval_data(rtime_frame, rinterval, rtime, rips)
if people[idx].alarmed:
if not people[idx].notify:
htmltemp = tagdelete("UNOTIFY", htmltemp)
else:
htmltemp = htmltemp.replace('-UMAIL-', people[idx].email.encode('ascii', 'xmlcharrefreplace'))
else:
htmltemp = tagdelete("UALARMED", htmltemp)
pips = set([d[0] for d in people[idx].get_ips(rinterval + rtime_frame, rtime)])
if pips:
unumip = len(pips.intersection(set(rips)))
else:
unumip = 0
htmltemp = htmltemp.replace('-UNUMIP-', str(unumip))
personhtml += htmltemp
orghtml = orghtml.replace(persontemplate, personhtml)
# Generamos el texto del informe
report_data = []
for idx in person_index:
report_data += people[idx].get_report_data(rinterval + rtime_frame, rtime, rips)
report_data = sorted(report_data, key=itemgetter(0)) # Se ordena por fecha y hora
p1 = orghtml.index("<!--DATAROW-->")
p2 = orghtml.index("<!--/DATAROW-->")
htmlrow = orghtml[p1:p2+15]
p1 = orghtml.index("<!--ALTDATAROW-->")
p2 = orghtml.index("<!--/ALTDATAROW-->")
htmlaltrow = orghtml[p1:p2+18]
rawdata = pygeoip.GeoIP('resources/geoip/GeoLiteCity.dat')
htmltable = ""
noalt = True
for data in report_data:
if noalt:
datarow = htmlrow
else:
datarow = htmlaltrow
datarow = datarow.replace('-EHOUR-', data[0].strftime("%H:%M"))
try:
hostname = str(socket.gethostbyaddr(data[1])[0])
except:
hostname = data[1]
datarow = datarow.replace('-EIP-', hostname)
datarow = datarow.replace('-EDESCRIPT-', data[2].encode('ascii', 'xmlcharrefreplace'))
datarow = datarow.replace('-EPLUGIN-', data[3])
datarow = datarow.replace('-EPERSON-', data[4].encode('ascii', 'xmlcharrefreplace'))
try:
ipdata = rawdata.record_by_name(data[1])
country = ipdata['country_name']
city = ipdata['city']
iplocation = (city + ", " + country).encode('ascii', 'xmlcharrefreplace')
except:
iplocation = "Desconocida"
datarow = datarow.replace('-EGEOIP-', iplocation)
htmltable += datarow
noalt = not noalt
orghtml = tagdelete("DATAROW", orghtml)
orghtml = tagreplace("ALTDATAROW", orghtml, htmltable)
with open(filenamesave, 'w') as f:
orghtml = orghtml.decode('utf8', 'xmlcharrefreplace')
f.write(orghtml.encode('ascii', 'xmlcharrefreplace'))
def save_person_report(rtime_frame, rinterval, rtime, rips, rperson, plugin_dir, filenamesave):
"""
Genera un informe de eventos de footprinting para una persona
"""
with open("resources/mail/personreporttemplate.html", 'r') as f:
personhtml = f.read()
rperson.eval_data(rtime_frame, rinterval, rtime, rips)
person_eval_data = rperson.detection_data
rperson.eval_data(rtime_frame, rinterval, rtime - timedelta(minutes=rinterval), rips)
prev_person_eval_data = rperson.detection_data
personhtml = personhtml.replace('-ORGTHRESHOLD-', str(rperson.alarm_threshold))
personhtml = personhtml.replace('-USERNAME-', rperson.name.encode('ascii', 'xmlcharrefreplace'))
if max(person_eval_data) >= rperson.alarm_threshold:
personhtml = personhtml.replace('-TITLE-', "Alarma de Footprinting")
personhtml = tagdelete("NOALARM", personhtml)
if max(prev_person_eval_data) < rperson.alarm_threshold: # Detección nueva
personhtml = tagdelete("ALARMUP", personhtml)
personhtml = tagdelete("ALARMDOWN", personhtml)
personhtml = tagdelete("ALARMSTABLE", personhtml)
personhtml = personhtml.replace('-CHECKINTERVAL-', str(rinterval))
personhtml = personhtml.replace('-LEVELMAX-', str(max(person_eval_data)))
levelmaxtime = rtime + timedelta(minutes=person_eval_data.index(max(person_eval_data)) - rinterval)
personhtml = personhtml.replace('-LEVELMAXTIME-', levelmaxtime.strftime("%H:%M"))
idxtt = 0
for data in person_eval_data:
if data > rperson.alarm_threshold:
idxtt = data
break
timethreshold = rtime + timedelta(minutes=person_eval_data.index(idxtt) - rinterval)
personhtml = personhtml.replace('-TIMETHRESHOLD-', timethreshold.strftime("%H:%M"))
elif person_eval_data[-1] >= rperson.alarm_threshold: # Continua la alarma
personhtml = tagdelete("NEWALARM", personhtml)
personhtml = tagdelete("ALARMDOWN", personhtml)
if person_eval_data[-1] > prev_person_eval_data[-1]:
personhtml = tagdelete("ALARMSTABLE", personhtml)
else:
personhtml = tagdelete("ALARMUP", personhtml)
personhtml = personhtml.replace('-CHECKINTERVAL-', str(rinterval))
personhtml = personhtml.replace('-LASTLEVEL-', str(person_eval_data[-1]))
elif person_eval_data[-1] < rperson.alarm_threshold: # Se acaba la alarma
personhtml = tagdelete("ALARMUP", personhtml)
personhtml = tagdelete("NEWALARM", personhtml)
personhtml = tagdelete("ALARMSTABLE", personhtml)
personhtml = tagdelete("RUNNINGFOOTPRINTING", personhtml)
idxtt = 0
for data in person_eval_data[::-1]:
if data >= rperson.alarm_threshold:
idxtt = data
break
leveldown = rtime + timedelta(minutes=person_eval_data.index(idxtt) - rinterval)
personhtml = personhtml.replace('-LEVELDOWN-', leveldown.strftime("%H:%M"))
else:
personhtml = personhtml.replace('-TITLE-', "Informe de Footprinting")
personhtml = tagdelete("ALARM", personhtml)
personhtml = personhtml.replace('-DATEMIN-', (rtime - timedelta(minutes=rinterval)).strftime("%H:%M"))
personhtml = personhtml.replace('-DATEMAX-', rtime.strftime("%H:%M"))
personhtml = personhtml.replace('-USERCHART-', 'imgchart_' + rperson.person + '.png')
personhtml = personhtml.replace('-USERRADAR-', 'imgradar_' + rperson.person + '.png')
pplugin = [p[8:] for p in plugin_dir]
for ds in rperson.datasources: # Se eliminan las etiquetas de plugins desactivados
if not ds.enabled:
for p in plugin_dir:
if str(ds).count(p):
pplugin.pop(pplugin.index(p[8:]))
p_eval_data_polar = []
for j in range(len(rperson.datasources)):
if rperson.datasources[j].enabled:
p_eval_data_polar.append(max(rperson.datasources[j].eval_data(rtime_frame, rinterval, rtime, rips)))
uplugin = pplugin[p_eval_data_polar.index(max(p_eval_data_polar))]
personhtml = personhtml.replace('-UPLUGIN-', uplugin)
unumsem = len([a for a in p_eval_data_polar if a > 0])
personhtml = personhtml.replace('-UNUMSEN-', str(unumsem))
rperson.eval_data(rtime_frame, rinterval, rtime, rips)
pips = set([d[0] for d in rperson.get_ips(rinterval + rtime_frame, rtime)])
if pips:
unumip = len(pips.intersection(set(rips)))
else:
unumip = 0
personhtml = personhtml.replace('-UNUMIP-', str(unumip))
# Generamos el texto del informe
report_data = rperson.get_report_data(rinterval + rtime_frame, rtime, rips)
p1 = personhtml.index("<!--DATAROW-->")
p2 = personhtml.index("<!--/DATAROW-->")
htmlrow = personhtml[p1:p2+15]
p1 = personhtml.index("<!--ALTDATAROW-->")
p2 = personhtml.index("<!--/ALTDATAROW-->")
htmlaltrow = personhtml[p1:p2+18]
rawdata = pygeoip.GeoIP('resources/geoip/GeoLiteCity.dat')
htmltable = ""
noalt = True
for data in report_data:
if noalt:
datarow = htmlrow
else:
datarow = htmlaltrow
datarow = datarow.replace('-EHOUR-', data[0].strftime("%H:%M"))
try:
hostname = str(socket.gethostbyaddr(data[1])[0])
except:
hostname = data[1]
datarow = datarow.replace('-EIP-', hostname)
datarow = datarow.replace('-EDESCRIPT-', data[2].encode('ascii', 'xmlcharrefreplace'))
datarow = datarow.replace('-EPLUGIN-', data[3])
try:
ipdata = rawdata.record_by_name(data[1])
country = ipdata['country_name']
city = ipdata['city']
iplocation = (city + ", " + country).encode('ascii', 'xmlcharrefreplace')
except:
iplocation = "Desconocida"
datarow = datarow.replace('-EGEOIP-', iplocation)
htmltable += datarow
noalt = not noalt
personhtml = tagdelete("DATAROW", personhtml)
personhtml = tagreplace("ALTDATAROW", personhtml, htmltable)
with open(filenamesave, 'w') as f:
personhtml = personhtml.decode('utf8', 'xmlcharrefreplace')
f.write(personhtml.encode('ascii', 'xmlcharrefreplace'))
def send_report_mail(mailto, filename, subject):
"""
Envía un fichero html filename (el informe) a email. Las imágenes se incrustan en el correo (deben estar en
la misma carpeta que filename. Se genera también una versión en texto del informe para aquellos clientes de
correo que no soporten html
"""
logger = logging.getLogger('report.watched_pages')
parser = RawConfigParser()
with codecs.open('config/sneaks.conf', 'r', encoding='utf-8') as f:
parser.readfp(f)
smtp_email = ''
smtp_server = ''
smtp_port = 0
smtp_user = ''
smtp_pwd = ''
if parser.has_option('general', 'smtp_email'):
smtp_email = parser.get('general', 'smtp_email')
if not smtp_email:
logger.critical('Error en sneaks.conf: smtp_email')
exit(1)
if parser.has_option('general', 'smtp_server'):
smtp_server = parser.get('general', 'smtp_server')
if not smtp_server:
logger.critical('Error en sneaks.conf: smtp_server')
exit(1)
if parser.has_option('general', 'smtp_port'):
smtp_port = parser.getint('general', 'smtp_port')
if not smtp_port:
logger.critical('Error en sneaks.conf: smtp_port')
exit(1)
if parser.has_option('general', 'smtp_user'):
smtp_user = parser.get('general', 'smtp_user')
if not smtp_user:
logger.critical('Error en sneaks.conf: smtp_user')
exit(1)
if parser.has_option('general', 'smtp_pwd'):
smtp_pwd = parser.get('general', 'smtp_pwd')
if not smtp_pwd:
logger.critical('Error en sneaks.conf: smtp_pwd')
exit(1)
with open(filename, 'r') as f:
orghtml = f.read()
orgtxt = html2text.html2text(orghtml)
msgroot = MIMEMultipart('related')
msgroot['Subject'] = subject
msgroot['From'] = smtp_email
msgroot['To'] = mailto
msgroot.preamble = 'This is a multi-part message in MIME format.'
# Encapsulate the plain and HTML versions of the message body in an
# 'alternative' part, so message agents can decide which they want to display.
msgalternative = MIMEMultipart('alternative')
msgroot.attach(msgalternative)
msgtext = MIMEText(orgtxt.encode('ascii', 'xmlcharrefreplace'))
msgalternative.attach(msgtext)
pattern = re.compile(r"img\w+.png")
images = pattern.findall(orghtml)
msgimages = []
for image in images:
orghtml = orghtml.replace(image, "cid:" + image, 1)
fp = open("temp/" + image, 'rb')
msgimages.append(MIMEImage(fp.read()))
fp.close()
for i in range(len(images)):
msgimages[i].add_header('Content-ID', "<" + images[i] + ">")
msgroot.attach(msgimages[i])
msgtext = MIMEText(orghtml, 'html')
msgalternative.attach(msgtext)
# Send the email (this example assumes SMTP authentication is required)
smtp = smtplib.SMTP(smtp_server, smtp_port)
try:
smtp.ehlo()
# If we can encrypt this session, do it
if smtp.has_extn('STARTTLS'):
smtp.starttls()
smtp.ehlo() # re-identify ourselves over TLS connection
smtp.login(smtp_user, smtp_pwd)
smtp.sendmail(smtp_email, mailto, msgroot.as_string())
finally:
smtp.quit()
| gpl-2.0 | -1,516,157,296,209,164,800 | 39.462842 | 120 | 0.619816 | false |
rcbuild-info/scrape | rcbi/rcbi/spiders/MultirotorSuperstore.py | 1 | 3983 | import scrapy
from scrapy import log
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from rcbi.items import Part
import copy
import os
import urlparse
import urllib
MANUFACTURERS = ["Cobra", "Dinogy", "SkyRC", "DYS", "HQProp", "iPower", "Tattu", "GemFan", "SunnySky", "Emax", "ZTW", "MS", "FrSky", "RCTimer", "TBS", "VAS", "DTF UHF", "Pololu", "ImmersionRC", "Hovership", "FatShark", "Hawkeye", "Brotronics", "Argonaut", "3DR", "Tarot", "SkyZone", "Shendrones", "Revolectrix", "Flying Cinema", "Airbot", "Circular Wireless"]
CORRECT = {"GemFan": "Gemfan", "SunnySky": "Sunnysky", "Emax": "EMAX", "MS": "MultirotorSuperstore", "TBS": "Team BlackSheep", "VAS": "Video Aerial Systems", "3DR": "3DRobotics", "SkyZone": "Skyzone", "ShenDrones": "Shendrones"}
NEW_PREFIX = {}
STOCK_STATE_MAP = {"http://schema.org/InStock": "in_stock",
"http://schema.org/OutOfStock": "out_of_stock"}
class MultirotorSuperstoreSpider(CrawlSpider):
name = "multirotorsuperstore"
allowed_domains = ["multirotorsuperstore.com"]
start_urls = ["http://www.multirotorsuperstore.com/"]
rules = (
Rule(LinkExtractor(restrict_css=[".submenu", ".pages"])),
Rule(LinkExtractor(restrict_css=".category-products"), callback='parse_item'),
)
def parse_item(self, response):
item = Part()
item["site"] = self.name
product_name = response.css(".product-name")
if not product_name:
return
item["name"] = product_name[0].xpath("//h1/text()").extract()[0]
variant = {}
item["variants"] = [variant]
parsed = urlparse.urlparse(response.url)
filename = "/" + os.path.basename(parsed[2])
variant["url"] = urlparse.urlunparse((parsed[0], parsed[1], filename,
parsed[3], parsed[4], parsed[5]))
for m in MANUFACTURERS:
if item["name"].startswith(m):
item["name"] = item["name"][len(m):].strip("- ")
item["manufacturer"] = m
break
if "manufacturer" in item:
m = item["manufacturer"]
if m in NEW_PREFIX:
item["name"] = NEW_PREFIX[m] + " " + item["name"]
if m in CORRECT:
item["manufacturer"] = CORRECT[m]
superproduct = response.css("#super-product-table")
if not superproduct:
availability = response.css("[itemprop=\"availability\"]::attr(href)")
if availability and availability.extract_first() in STOCK_STATE_MAP:
variant["stock_state"] = STOCK_STATE_MAP[availability.extract_first()]
variant["stock_text"] = response.css(".availability>span::text").extract_first().strip()
elif availability:
print(availability)
price = response.css(".product-essential .regular-price .price::text")
if price:
special = response.css(".product-essential .special-price .price::text")
if special:
variant["price"] = special.extract_first().strip()
else:
variant["price"] = price.extract_first().strip()
else:
subproducts = superproduct.css("tbody>tr")
first = True
in_stock = response.css(".product-essential .in-stock")
if not in_stock:
variant["stock_state"] = "out_of_stock"
for subproduct in subproducts:
cols = subproduct.css("td")
if first:
first = False
else:
variant = copy.deepcopy(variant)
item["variants"].append(variant)
variant["description"] = cols[0].css("::text").extract_first().strip()
if in_stock:
quantity_field = cols[2].css("input")
if quantity_field:
variant["stock_state"] = "in_stock"
else:
variant["stock_state"] = "out_of_stock"
# Do price last so we can copy for tiered pricing.
price = cols[1].css(".regular-price .price::text")
if price:
variant["price"] = price.extract_first().strip()
# TODO(tannewt): Support tiered pricing.
return item
| apache-2.0 | 1,018,846,798,764,537,900 | 38.435644 | 359 | 0.614361 | false |
daviur/py-cracking-the-coding-interview | arrays-and-strings/as18.py | 1 | 1607 | # The MIT License (MIT)
#
# Copyright (c) 2016 David I Urbina
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pytest
def is_rotation(string1, string2):
if len(string1) == len(string2) and len(string1) > 0:
s = string1 + string1
return True if s.find(string2) != -1 else False
return False
@pytest.mark.parametrize('input, expected', [
(('abcdefg', 'abcdefg'), True),
(('abcdefg', 'defgabc'), True),
(('abcdefg', 'efgacdd'), False),
(('abcde', 'abcdefg'), False)
])
def test_is_rotation(input, expected):
assert is_rotation(*input) is expected
| mit | 774,038,168,573,921,400 | 40.205128 | 80 | 0.728065 | false |
edit4ever/script.module.tvh2kodi | default.py | 1 | 183136 | #!/usr/bin/env python
################################################################################
# This file is part of LibreELEC - https://libreelec.tv
# Copyright (C) 2016-2017 Team LibreELEC
# Copyright (C) 2017 Tnds82 ([email protected])
#
# LibreELEC is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# LibreELEC is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with LibreELEC. If not, see <http://www.gnu.org/licenses/>.
################################################################################
import xbmc,xbmcaddon,xbmcvfs,xbmcgui,xbmcplugin
import subprocess
from subprocess import Popen
from xbmcswift2 import Plugin
import StringIO
import os
import re
import requests
import sys
import json
import urllib2
import time
import ast
import zipfile
import datetime
import urllib
import picons
plugin = Plugin()
dialog = xbmcgui.Dialog()
try:
tvh_url_get = xbmcaddon.Addon('pvr.hts').getSetting("host")
if tvh_url_get:
tvh_url_set = xbmcaddon.Addon().setSetting(id='tvhurl', value=tvh_url_get)
else:
try:
tvh_url = xbmcaddon.Addon().getSetting('tvhurl')
except:
tvh_url_set = xbmcaddon.Addon().setSetting(id='tvhurl', value="127.0.0.1")
tvh_port_get = xbmcaddon.Addon('pvr.hts').getSetting("http_port")
if tvh_port_get:
tvh_port_set = xbmcaddon.Addon().setSetting(id='tvhport', value=tvh_port_get)
else:
try:
tvh_port = xbmcaddon.Addon().getSetting('tvhport')
except:
tvh_port_set = xbmcaddon.Addon().setSetting(id='tvhport', value="9981")
except:
pass
tvh_port = xbmcaddon.Addon().getSetting('tvhport')
tvh_usern = xbmcaddon.Addon().getSetting('usern')
tvh_passw = xbmcaddon.Addon().getSetting('passw')
if tvh_usern != "" and tvh_passw != "":
tvh_url = tvh_usern + ":" + tvh_passw + "@" + xbmcaddon.Addon().getSetting('tvhurl')
else:
tvh_url = xbmcaddon.Addon().getSetting('tvhurl')
try:
check_url = 'http://' + tvh_url + ':' + tvh_port + '/api/status/connections'
check_load = requests.get(check_url)
check_status = check_load.raise_for_status()
except requests.exceptions.HTTPError as err:
dialog.ok("Tvheadend Access Error!", str(err), "", "Please check your username/password in settings.")
except requests.exceptions.RequestException as e:
dialog.ok("Tvheadend Access Error!", "Could not connect to Tvheadend server.", "Please check your Tvheadend server is running or check the IP and port configuration in the settings.")
truefalse = ['true', 'false']
enabledisable = ['Enabled', 'Disabled']
def get_icon_path(icon_name):
addon_path = xbmcaddon.Addon().getAddonInfo("path")
return os.path.join(addon_path, 'resources', 'img', icon_name+".png")
def find_param(d, param_id):
for param in d['entries'][0]['params']:
if param['id'] == param_id:
try:
value = param['value']
except:
value = ""
break
else:
value = "NO PARAMATER FOUND"
return value
def find_param_item(d, param_id, item):
for param in d['entries'][0]['params']:
if param['id'] == param_id:
try:
value = param[item]
except:
value = ""
break
else:
value = "NO PARAMATER FOUND"
return value
def find_param_dict(d, param_id, param_id2):
param_value = find_param(d, param_id)
for param in d['entries'][0]['params']:
if param['id'] == param_id:
param_dict = param[param_id2]
break
else:
param_dict = "NO PARAMATER FOUND"
param_key = []
param_val = []
for param_k in param_dict:
param_key.append(param_k['key'])
for param_v in param_dict:
param_val.append(param_v['val'])
try:
param_index = param_key.index(param_value)
return (param_val[param_index], param_key, param_val)
except:
return (param_value, param_key, param_val)
def find_param_enum(d, param_id, param_id2):
param_value = find_param(d, param_id)
for param in d['entries'][0]['params']:
if param['id'] == param_id:
param_dict = param['enum']
break
else:
param_dict = "NO PARAMATER FOUND"
param_key = []
param_val = []
for param_k in param_dict:
param_key.append(param_k['key'])
for param_v in param_dict:
param_val.append(param_v['val'])
try:
param_index = param_key.index(param_value)
return (param_val[param_index], param_key, param_val)
except:
return (param_value, param_key, param_val)
def find_param_list(d, param_id, param_id2):
param_value = find_param(d, param_id)
for param in d['entries'][0]['params']:
if param['id'] == param_id:
param_list = param[param_id2]
break
else:
param_list = "NO PARAMATER FOUND"
return (param_value, param_list)
def find_list(d, param_id, param_id2):
for param in d['entries'][0]['params']:
if param['id'] == param_id:
param_list = param[param_id2]
break
else:
param_list = []
return param_list
def find_prop(d, param_id):
for param in d['props']:
if param['id'] == param_id:
try:
value = param['default']
except:
value = ""
break
else:
value = "NO PARAMATER FOUND"
return value
def find_props_dict(d, param_id, param_id2):
for param in d['props']:
if param['id'] == param_id:
param_dict = param[param_id2]
break
else:
param_dict = "NO PARAMATER FOUND"
param_key = []
param_val = []
for param_k in param_dict:
param_key.append(param_k['key'])
for param_v in param_dict:
param_val.append(param_v['val'])
return (param_key, param_val)
def dis_or_enable_addon(addon_id, enable="true"):
addon = '"%s"' % addon_id
if xbmc.getCondVisibility("System.HasAddon(%s)" % addon_id) and enable == "true":
return xbmc.log("### Skipped %s, reason = allready enabled" % addon_id)
elif not xbmc.getCondVisibility("System.HasAddon(%s)" % addon_id) and enable == "false":
return xbmc.log("### Skipped %s, reason = not installed" % addon_id)
else:
do_json = '{"jsonrpc":"2.0","id":1,"method":"Addons.SetAddonEnabled","params":{"addonid":%s,"enabled":%s}}' % (addon, enable)
query = xbmc.executeJSONRPC(do_json)
response = json.loads(query)
if enable == "true":
xbmc.log("### Enabled %s, response = %s" % (addon_id, response))
else:
xbmc.log("### Disabled %s, response = %s" % (addon_id, response))
return xbmc.executebuiltin('Container.Update(%s)' % xbmc.getInfoLabel('Container.FolderPath'))
def ZipDir(inputDir, outputZip):
zipOut = zipfile.ZipFile(outputZip, 'w', compression=zipfile.ZIP_DEFLATED)
rootLen = len(os.path.dirname(inputDir))
def _ArchiveDirectory(parentDirectory):
contents = os.listdir(parentDirectory)
if not contents:
archiveRoot = parentDirectory[rootLen:].replace('\\', '/').lstrip('/')
zipInfo = zipfile.ZipInfo(archiveRoot+'/')
zipOut.writestr(zipInfo, '')
for item in contents:
fullPath = os.path.join(parentDirectory, item)
if os.path.isdir(fullPath) and not os.path.islink(fullPath):
_ArchiveDirectory(fullPath)
else:
archiveRoot = fullPath[rootLen:].replace('\\', '/').lstrip('/')
if os.path.islink(fullPath):
zipInfo = zipfile.ZipInfo(archiveRoot)
zipInfo.create_system = 3
zipInfo.external_attr = 2716663808L
zipOut.writestr(zipInfo, os.readlink(fullPath))
else:
zipOut.write(fullPath, archiveRoot, zipfile.ZIP_DEFLATED)
_ArchiveDirectory(inputDir)
zipOut.close()
def picons_param_load():
url_latest = 'http://cvh.libreelec.tv/picons/latest2.json'
ljson = requests.get(url_latest).json()
picons_source_list = ['Custom URL']
picons_source_files = ['Custom URL']
for p in ljson['Picons']['latest']:
picons_source_list.append(p['desc'])
for n in ljson['Picons']['latest']:
picons_source_files.append(n['name'])
picons_source_value = xbmcaddon.Addon().getSetting('psource')
picons_source = picons_source_list[int(picons_source_value)]
picons_file = picons_source_files[int(picons_source_value)]
picons_dest = xbmcaddon.Addon().getSetting('pdest')
picons_url = xbmcaddon.Addon().getSetting('purl')
picons_list = ["Picons Source: " + str(picons_source), "Picons Destination: " + str(picons_dest), "DOWNLOAD PICONS"]
sel_param = dialog.select('Picons Download - Select parameter', list=picons_list)
if sel_param < 0:
return
if sel_param >= 0:
if sel_param == 0:
sel_psource = dialog.select('Select Picons Source', list=picons_source_list)
if sel_psource < 0:
picons_param_load()
else:
picons_source_set = xbmcaddon.Addon().setSetting(id='psource', value=str(sel_psource))
picons_param_load()
if sel_param == 1:
picons_dest_update = dialog.browse(3, "Select Picons Destination", "files", defaultt=picons_dest)
picons_dest_set = xbmcaddon.Addon().setSetting(id='pdest', value=picons_dest_update)
picons_param_load()
if sel_param == 2:
if picons_source_value == "0":
sel_purl = dialog.input('Enter the Picons URL to Download', defaultt=picons_url,type=xbmcgui.INPUT_ALPHANUM)
if sel_purl != "":
picons_url_set = xbmcaddon.Addon().setSetting(id='purl', value=str(sel_purl))
picons.url_external(sel_purl)
if picons_source_value > "0":
picons.compare_release(url_latest, picons_file, picons_source_value)
def dvr_param_load(dvr_uuid_sel):
dvr_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/load?uuid=' + dvr_uuid_sel
dvr_load = requests.get(dvr_url).json()
dvr_name = dvr_load['entries'][0]['text']
dvr_enabled = find_param(dvr_load, 'enabled')
dvr_keep, dvr_keep_key, dvr_keep_val = find_param_dict(dvr_load, 'removal-days', 'enum')
dvr_profile_value = find_param(dvr_load, 'profile')
dvr_profile_dict_url = 'http://' + tvh_url + ':' + tvh_port + '/api/profile/list'
dvr_profile_dict_load = requests.get(dvr_profile_dict_url).json()
dvr_profile_dict = dvr_profile_dict_load['entries']
dvr_profile_key = []
dvr_profile_val = []
for dvr_k in dvr_profile_dict:
dvr_profile_key.append(dvr_k['key'])
for dvr_v in dvr_profile_dict:
dvr_profile_val.append(dvr_v['val'])
dvr_profile_index = dvr_profile_key.index(dvr_profile_value)
dvr_profile = dvr_profile_val[dvr_profile_index]
dvr_clone = find_param(dvr_load, 'clone')
dvr_storage = find_param(dvr_load, 'storage')
xbmcaddon.Addon().setSetting(id='dvrstorage', value=dvr_storage)
dvr_info_list = ["Name: " + str(dvr_name), "Enabled: " + str(dvr_enabled), "Storage: " + str(dvr_storage), "Days to Keep Recordings: " + str(dvr_keep), "Duplicate Recording Timer If Error Occurs: " + str(dvr_clone), "Stream Profile: " + str(dvr_profile), "Recording File and Folder options", "Timeshift Options"]
dvr_param_edit(dvr_uuid_sel, dvr_info_list, dvr_keep_key, dvr_keep_val, dvr_name, dvr_enabled, dvr_storage, dvr_keep, dvr_clone, dvr_profile_key, dvr_profile_val, dvr_profile)
def dvr_param_edit(dvr_uuid_sel, dvr_info_list, dvr_keep_key, dvr_keep_val, dvr_name, dvr_enabled, dvr_storage, dvr_keep, dvr_clone, dvr_profile_key, dvr_profile_val, dvr_profile):
sel_param = dialog.select('DVR Configuration - Select parameter to edit', list=dvr_info_list)
if sel_param < 0:
dvr()
if sel_param >= 0:
param_update = ""
if sel_param == 0:
sel_dvr_name = dialog.input('Edit the DVR profile name', defaultt=dvr_name,type=xbmcgui.INPUT_ALPHANUM)
if sel_dvr_name == "":
dvr_param_load(dvr_uuid_sel)
else:
param_update = '"name":"' + sel_dvr_name + '"'
if sel_param == 1:
sel_enabled = dialog.select('Enable or disable the DVR profile', list=enabledisable)
if sel_enabled >= 0:
dvr_enabled = truefalse[sel_enabled]
param_update = '"enabled":' + dvr_enabled
if sel_param == 2:
if tvh_url == "127.0.0.1":
plugin.open_settings()
dvr_storage_update_tvh = xbmcaddon.Addon().getSetting('dvrstorage')
param_update = '"storage":"' + str(dvr_storage_update_tvh) + '"'
else:
dialog.ok('Tvheadend backend on network location', 'Your Tvheadend backend is located on a network. Currently Kodi cannot browse network folders.', 'Please enter the DVR recording location manually.')
dvr_storage_update_tvh = dialog.input('Edit the DVR recording location', defaultt=dvr_storage,type=xbmcgui.INPUT_ALPHANUM)
xbmcaddon.Addon().setSetting(id='dvrstorage', value=dvr_storage_update_tvh)
param_update = '"storage":"' + str(dvr_storage_update_tvh) + '"'
if sel_param == 3:
sel_enabled = dialog.select('Select the number of days to keep DVR recordings', list=dvr_keep_val)
if sel_enabled >= 0:
dvr_keep = dvr_keep_key[sel_enabled]
param_update = '"removal-days":' + str(dvr_keep)
if sel_param == 4:
sel_enabled = dialog.select('Enable or disable the re-recording of a timer if an error occurs', list=enabledisable)
if sel_enabled >= 0:
dvr_clone = truefalse[sel_enabled]
param_update = '"clone":' + dvr_clone
if sel_param == 5:
sel_enabled = dialog.select('Select the stream profile for DVR playback', list=dvr_profile_val)
if sel_enabled >= 0:
dvr_keep = dvr_profile_key[sel_enabled]
param_update = '"profile":' + str(dvr_profile)
if sel_param == 6:
dvr_file_param_load(dvr_uuid_sel)
if sel_param == 7:
time_param_load(dvr_uuid_sel)
if param_update != "":
param_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/save?node={' + param_update + ',"uuid":"' + dvr_uuid_sel + '"}'
param_save = requests.get(param_url)
dvr_param_load(dvr_uuid_sel)
def dvr_file_param_load(dvr_uuid_sel):
dvr_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/load?uuid=' + dvr_uuid_sel
dvr_load = requests.get(dvr_url).json()
dvr_day_dir = find_param(dvr_load, 'day-dir')
dvr_channel_dir = find_param(dvr_load, 'channel-dir')
dvr_title_dir = find_param(dvr_load, 'title-dir')
dvr_channel_title = find_param(dvr_load, 'channel-in-title')
dvr_date_title = find_param(dvr_load, 'date-in-title')
dvr_time_title = find_param(dvr_load, 'time-in-title')
dvr_episode_title = find_param(dvr_load, 'episode-in-title')
dvr_subtitle_title = find_param(dvr_load, 'subtitle-in-title')
dvr_omit_title = find_param(dvr_load, 'omit-title')
dvr_clean_title = find_param(dvr_load, 'clean-title')
dvr_whitespace_title = find_param(dvr_load, 'whitespace-in-title')
dvr_windows_title = find_param(dvr_load, 'windows-compatible-filenames')
dvr_file_info_list = ["Make subdirectories per day: " + str(dvr_day_dir), "Make subdirectories per channel: " + str(dvr_channel_dir), "Make subdirectories per title: " + str(dvr_title_dir), "Include channel name in filename: " + str(dvr_channel_title), "Include date in filename: " + str(dvr_date_title), "Include time in filename: " + str(dvr_time_title), "Include episode in filename: " + str(dvr_episode_title), "Include subtitle in filename: " + str(dvr_subtitle_title), "Don't include title in filename: " + str(dvr_omit_title), "Remove all unsafe characters from filename: " + str(dvr_clean_title), "Replace whitespace in title with '-': " + str(dvr_whitespace_title), "Use Windows-compatible filenames: " + str(dvr_windows_title)]
dvr_file_param_edit(dvr_uuid_sel, dvr_file_info_list, dvr_day_dir, dvr_channel_dir, dvr_title_dir, dvr_channel_title, dvr_date_title, dvr_time_title, dvr_episode_title, dvr_subtitle_title, dvr_omit_title, dvr_clean_title, dvr_whitespace_title, dvr_windows_title)
def dvr_file_param_edit(dvr_uuid_sel, dvr_file_info_list, dvr_day_dir, dvr_channel_dir, dvr_title_dir, dvr_channel_title, dvr_date_title, dvr_time_title, dvr_episode_title, dvr_subtitle_title, dvr_omit_title, dvr_clean_title, dvr_whitespace_title, dvr_windows_title):
sel_param = dialog.select('DVR File and Folder Options - Select parameter to edit', list=dvr_file_info_list)
if sel_param < 0:
return
if sel_param >= 0:
param_update = ""
if sel_param == 0:
sel_enabled = dialog.select('Make subdirectories per day', list=enabledisable)
if sel_enabled >= 0:
dvr_day_dir = truefalse[sel_enabled]
param_update = '"day-dir":' + dvr_day_dir
if sel_param == 1:
sel_enabled = dialog.select('Make subdirectories per channel', list=enabledisable)
if sel_enabled >= 0:
dvr_channel_dir = truefalse[sel_enabled]
param_update = '"channel-dir":' + dvr_channel_dir
if sel_param == 2:
sel_enabled = dialog.select('Make subdirectories per title', list=enabledisable)
if sel_enabled >= 0:
dvr_title_dir = truefalse[sel_enabled]
param_update = '"title-dir":' + dvr_title_dir
if sel_param == 3:
sel_enabled = dialog.select('Include channel name in filename', list=enabledisable)
if sel_enabled >= 0:
dvr_channel_title = truefalse[sel_enabled]
param_update = '"channel-in-title":' + dvr_channel_title
if sel_param == 4:
sel_enabled = dialog.select('Include date in filename', list=enabledisable)
if sel_enabled >= 0:
dvr_date_title = truefalse[sel_enabled]
param_update = '"date-in-title":' + dvr_date_title
if sel_param == 5:
sel_enabled = dialog.select('Include time in filename', list=enabledisable)
if sel_enabled >= 0:
dvr_time_title = truefalse[sel_enabled]
param_update = '"time-in-title":' + dvr_time_title
if sel_param == 6:
sel_enabled = dialog.select('Include episode in filename', list=enabledisable)
if sel_enabled >= 0:
dvr_episode_title = truefalse[sel_enabled]
param_update = '"episode-in-title":' + dvr_episode_title
if sel_param == 7:
sel_enabled = dialog.select('Include subtitle in filename', list=enabledisable)
if sel_enabled >= 0:
dvr_subtitle_title = truefalse[sel_enabled]
param_update = '"subtitle-in-title":' + dvr_subtitle_title
if sel_param == 8:
sel_enabled = dialog.select("Don't include title in filename", list=enabledisable)
if sel_enabled >= 0:
dvr_omit_title = truefalse[sel_enabled]
param_update = '"omit-title":' + dvr_omit_title
if sel_param == 9:
sel_enabled = dialog.select('Remove all unsafe characters from filename', list=enabledisable)
if sel_enabled >= 0:
dvr_clean_title = truefalse[sel_enabled]
param_update = '"clean-title":' + dvr_clean_title
if sel_param == 10:
sel_enabled = dialog.select("Replace whitespace in title with '-'", list=enabledisable)
if sel_enabled >= 0:
dvr_whitespace_title = truefalse[sel_enabled]
param_update = '"whitespace-in-title":' + dvr_whitespace_title
if sel_param == 11:
sel_enabled = dialog.select('Use Windows-compatible filenames', list=enabledisable)
if sel_enabled >= 0:
dvr_windows_title = truefalse[sel_enabled]
param_update = '"windows-compatible-filenames":' + dvr_windows_title
if param_update != "":
param_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/save?node={' + param_update + ',"uuid":"' + dvr_uuid_sel + '"}'
param_save = requests.get(param_url)
dvr_file_param_load(dvr_uuid_sel)
def time_param_load(dvr_uuid_sel):
time_url = 'http://' + tvh_url + ':' + tvh_port + '/api/timeshift/config/load'
time_load = requests.get(time_url).json()
time_enabled = find_param(time_load, 'enabled')
time_ondemand = find_param(time_load, 'ondemand')
time_path = find_param(time_load, 'path')
time_max_period = find_param(time_load, 'max_period')
time_unlimited_period = find_param(time_load, 'unlimited_period')
time_max_size = find_param(time_load, 'max_size')
time_ram_size = find_param(time_load, 'ram_size')
time_unlimited_size = find_param(time_load, 'unlimited_size')
time_ram_only = find_param(time_load, 'ram_only')
time_ram_fit = find_param(time_load, 'ram_fit')
time_teletext = find_param(time_load, 'teletext')
time_info_list = ["Timeshift Enabled: " + str(time_enabled), "Maximum Time (mins): " + str(time_max_period), "Storage Path: " + str(time_path), "Maximum Size (MB): " + str(time_max_size), "Maximum RAM Size (MB): " + str(time_ram_size), "RAM Only: " + str(time_ram_only), "On-demand (no first rewind): " + str(time_ondemand), "Unlimited Time: " + str(time_unlimited_period), "Unlimited Size: " + str(time_unlimited_size), "Fit to RAM (cut rewind): " + str(time_ram_fit), "Include Teletext: " + str(time_teletext)]
time_param_edit(dvr_uuid_sel, time_info_list, time_enabled, time_max_period, time_path, time_max_size, time_ram_size, time_ram_only, time_ondemand, time_unlimited_period, time_unlimited_size, time_ram_fit, time_teletext)
def time_param_edit(dvr_uuid_sel, time_info_list, time_enabled, time_max_period, time_path, time_max_size, time_ram_size, time_ram_only, time_ondemand, time_unlimited_period, time_unlimited_size, time_ram_fit, time_teletext):
sel_param = dialog.select('Timeshift Options - Select parameter to edit', list=time_info_list)
if sel_param < 0:
return
if sel_param >= 0:
param_update = ""
if sel_param == 0:
sel_enabled = dialog.select('Enable/Disable the timeshift function', list=enabledisable)
if sel_enabled >= 0:
time_enabled = truefalse[sel_enabled]
param_update = '"enabled":' + time_enabled
if sel_param == 1:
sel_num = dialog.input('Set maximum time for buffering (minutes)', defaultt=str(time_max_period),type=xbmcgui.INPUT_NUMERIC)
if sel_num >= 0:
time_max_period = sel_num
param_update = '"max_period":' + str(time_max_period)
if sel_param == 2:
if tvh_url == "127.0.0.1":
plugin.open_settings()
time_storage_update_tvh = xbmcaddon.Addon().getSetting('timestorage')
param_update = '"path":"' + str(time_storage_update_tvh) + '"'
else:
dialog.ok('Tvheadend backend on network location', 'Your Tvheadend backend is located on a network. Currently Kodi cannot browse network folders.', 'Please enter the DVR recording location manually.')
time_storage_update_tvh = dialog.input('Edit the timeshift buffer path', defaultt=time_path,type=xbmcgui.INPUT_ALPHANUM)
xbmcaddon.Addon().setSetting(id='timestorage', value=time_storage_update_tvh)
param_update = '"path":"' + str(time_storage_update_tvh) + '"'
if sel_param == 3:
sel_num = dialog.input('Set maximum storage size for buffering (MB)', defaultt=str(time_max_size),type=xbmcgui.INPUT_NUMERIC)
if sel_num >= 0:
time_max_size = sel_num
param_update = '"max_size":' + str(time_max_size)
if sel_param == 4:
sel_num = dialog.input('Set maximum RAM size for buffering (MB)', defaultt=str(time_ram_size),type=xbmcgui.INPUT_NUMERIC)
if sel_num >= 0:
time_ram_size = sel_num
param_update = '"ram_size":' + str(time_ram_size)
if sel_param == 5:
sel_enabled = dialog.select('Enable/Disable to use RAM only', list=enabledisable)
if sel_enabled >= 0:
time_ram_only = truefalse[sel_enabled]
param_update = '"ram_only":' + time_ram_only
if sel_param == 6:
sel_enabled = dialog.select('Enable/Disable timeshift on-demand (no first rewind)', list=enabledisable)
if sel_enabled >= 0:
time_ondemand = truefalse[sel_enabled]
param_update = '"ondemand":' + time_ondemand
if sel_param == 7:
sel_enabled = dialog.select('Enable/Disable unlimited time (may cause slowdown)', list=enabledisable)
if sel_enabled >= 0:
time_unlimited_period = truefalse[sel_enabled]
param_update = '"unlimited_period":' + time_unlimited_period
if sel_param == 8:
sel_enabled = dialog.select('Enable/Disable unlimited size (uses all storage)', list=enabledisable)
if sel_enabled >= 0:
time_unlimited_size = truefalse[sel_enabled]
param_update = '"unlimited_size":' + time_unlimited_size
if sel_param == 9:
sel_enabled = dialog.select('Enable/Disable fit to RAM (clears oldest buffer)', list=enabledisable)
if sel_enabled >= 0:
time_ram_fit = truefalse[sel_enabled]
param_update = '"ram_fit":' + time_ram_fit
if sel_param == 5:
sel_enabled = dialog.select('Enable/Disable to include teletext data', list=enabledisable)
if sel_enabled >= 0:
time_teletext = truefalse[sel_enabled]
param_update = '"teletext":' + time_teletext
if param_update != "":
param_url = 'http://' + tvh_url + ':' + tvh_port + '/api/timeshift/config/save?node={' + param_update + '}'
param_save = requests.get(param_url)
time_param_load(dvr_uuid_sel)
def muxes_load(net_uuid_sel):
net_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/load?uuid=' + str(net_uuid_sel)
net_load = requests.get(net_url).json()
net_class = net_load['entries'][0]['class']
muxes_url = 'http://' + tvh_url + ':' + tvh_port + '/api/mpegts/mux/grid?limit=999999999&filter=[{"type":"string","value":"' + net_uuid_sel + '","field":"network_uuid"}]'
muxes = requests.get(muxes_url).json()
muxes_name = []
muxes_uuid = []
muxes_enabled = []
muxes_network = []
muxes_frequency = []
muxes_total = muxes['total']
if muxes_total > 0:
for mux_n in muxes['entries']:
muxes_name.append(mux_n['name'])
for mux_u in muxes['entries']:
muxes_uuid.append(mux_u['uuid'])
for mux_w in muxes['entries']:
muxes_network.append(" in " + mux_w['network'])
try:
for mux_f in muxes['entries']:
muxes_frequency.append(mux_f['frequency'])
except:
for mux_f in muxes['entries']:
muxes_frequency.append(mux_f['channel_number'])
muxes_full = zip(muxes_name, muxes_network,)
muxes_list = ["%s %s" % x for x in muxes_full]
muxes_frequency, muxes_list, muxes_uuid = zip(*sorted(zip(muxes_frequency, muxes_list, muxes_uuid)))
create_mux = "CREATE NEW MUX"
muxes_list = list(muxes_list)
muxes_list.insert(0,create_mux)
muxes_list = tuple(muxes_list)
muxes_frequency = list(muxes_frequency)
muxes_frequency.insert(0,create_mux)
muxes_frequency = tuple(muxes_frequency)
muxes_uuid = list(muxes_uuid)
muxes_uuid.insert(0,create_mux)
muxes_uuid = tuple(muxes_uuid)
else:
muxes_list = ['CREATE NEW MUX']
sel_mux = dialog.select('Select a mux to configure', list=muxes_list)
if sel_mux == 0:
if net_class == "iptv_network" or net_class == "iptv_auto_network":
mux_new_iptv(net_uuid_sel)
else:
mux_new()
if sel_mux >= 0:
mux_uuid_sel = muxes_uuid[sel_mux]
sel_mux_class_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/load?uuid=' + mux_uuid_sel
sel_mux_class_load = requests.get(sel_mux_class_url).json()
sel_mux_class = sel_mux_class_load['entries'][0]['class']
if sel_mux_class == "dvb_mux_atsc_t":
mux_param_load_atsct(mux_uuid_sel, net_uuid_sel)
if sel_mux_class == "dvb_mux_atsc_c":
mux_param_load_atscc(mux_uuid_sel, net_uuid_sel)
if sel_mux_class == "dvb_mux_dvbc":
mux_param_load_atscc(mux_uuid_sel, net_uuid_sel)
if sel_mux_class == "dvb_mux_dvbt":
mux_param_load_dvbt(mux_uuid_sel, net_uuid_sel)
if sel_mux_class == "dvb_mux_dvbs":
mux_param_load_dvbs(mux_uuid_sel, net_uuid_sel)
if sel_mux_class == "iptv_mux":
mux_param_load_iptv(mux_uuid_sel, net_uuid_sel)
def mux_param_load_atsct(mux_uuid_sel, net_uuid_sel):
mux_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/load?uuid=' + mux_uuid_sel
mux_load = requests.get(mux_url).json()
mux_name = mux_load['entries'][0]['text']
mux_enabled, mux_enabled_key, mux_enabled_val = find_param_dict(mux_load, 'enabled', 'enum')
mux_modulation, mux_modulation_key, mux_modulation_val = find_param_dict(mux_load, 'modulation', 'enum')
mux_delsys, mux_delsys_list = find_param_list(mux_load, 'delsys', 'enum')
mux_scanstate, mux_scanstate_key, mux_scanstate_val = find_param_dict(mux_load, 'scan_state', 'enum')
mux_frequency = find_param(mux_load, 'frequency')
mux_services = find_param(mux_load, 'num_svc')
mux_channels = find_param(mux_load, 'num_chn')
mux_info_list = ["Enabled: " + str(mux_enabled), "Delivery System: " + str(mux_delsys), "Frequency: " + str(mux_frequency), "Modulation: " + str(mux_modulation), "Scan Status: " + str(mux_scanstate), "Number of Services: " + str(mux_services), "Number of Channels: " + str(mux_channels), "DELETE THE MUX"]
mux_param_edit_atsct(mux_uuid_sel, mux_info_list, mux_scanstate, mux_scanstate_key, mux_scanstate_val, mux_frequency, mux_modulation, mux_modulation_key, mux_modulation_val, mux_enabled, mux_enabled_key, mux_enabled_val, mux_delsys, mux_delsys_list, mux_name, mux_services, mux_channels, net_uuid_sel)
def mux_param_edit_atsct(mux_uuid_sel, mux_info_list, mux_scanstate, mux_scanstate_key, mux_scanstate_val, mux_frequency, mux_modulation, mux_modulation_key, mux_modulation_val, mux_enabled, mux_enabled_key, mux_enabled_val, mux_delsys, mux_delsys_list, mux_name, mux_services, mux_channels, net_uuid_sel):
if mux_scanstate == "ACTIVE":
sel_param = dialog.select(str(mux_name) + ' - Select parameter to edit', list=mux_info_list, autoclose=4000)
mux_param_load_atsct(mux_uuid_sel, net_uuid_sel)
sel_param = dialog.select(str(mux_name) + ' - Select parameter to edit', list=mux_info_list)
if sel_param < 0:
muxes()
if sel_param >= 0:
param_update = ""
if sel_param == 0:
sel_enabled = dialog.select('Enable or disable the mux', list=mux_enabled_val)
if sel_enabled <0:
mux_param_load_atsct(mux_uuid_sel, net_uuid_sel)
if sel_enabled >= 0:
mux_enabled = mux_enabled_key[sel_enabled]
param_update = '"enabled":' + str(mux_enabled)
if sel_param == 1:
sel_enabled = dialog.select('Select the mux delivery system', list=mux_delsys_list)
if sel_enabled <0:
mux_param_load_atsct(mux_uuid_sel, net_uuid_sel)
if sel_enabled >= 0:
mux_delsys = mux_delsys_list[sel_enabled]
param_update = '"delsys":"' + str(mux_delsys + '"')
if sel_param == 2:
sel_mux_frequency = dialog.input('Edit the mux frequency', defaultt=str(mux_frequency),type=xbmcgui.INPUT_NUMERIC)
param_update = '"frequency":' + sel_mux_frequency
if sel_param == 3:
sel_mux_modulation = dialog.select('Select the modulation of the mux', list=mux_modulation_val)
if sel_mux_modulation <0:
mux_param_load_atsct(mux_uuid_sel, net_uuid_sel)
if sel_mux_modulation >= 0:
mux_modulation = mux_modulation_key[sel_mux_modulation]
param_update = '"modulation":"' + str(mux_modulation) + '"'
if sel_param == 4:
sel_mux_scanstate = dialog.select('Set the scan state of the mux', list=mux_scanstate_val)
if sel_mux_scanstate <0:
mux_param_load_atsct(mux_uuid_sel, net_uuid_sel)
if sel_mux_scanstate >= 0:
mux_scanstate = mux_scanstate_key[sel_mux_scanstate]
param_update = '"scan_state":' + str(mux_scanstate)
if sel_param == 7:
confirm_del = dialog.yesno('Confirm mux delete', 'Are you sure want to delete the ' + mux_name + ' mux?')
if not confirm_del:
return
delete_mux_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/delete?uuid=["' + mux_uuid_sel +'"]'
delete_mux = requests.get(delete_mux_url)
muxes_load(net_uuid_sel)
if param_update != "":
param_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/save?node={' + param_update + ',"uuid":"' + mux_uuid_sel + '"}'
param_save = requests.get(param_url)
mux_param_load_atsct(mux_uuid_sel, net_uuid_sel)
def mux_param_load_atscc(mux_uuid_sel, net_uuid_sel):
mux_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/load?uuid=' + mux_uuid_sel
mux_load = requests.get(mux_url).json()
mux_name = mux_load['entries'][0]['text']
mux_enabled, mux_enabled_key, mux_enabled_val = find_param_dict(mux_load, 'enabled', 'enum')
mux_modulation, mux_modulation_key, mux_modulation_val = find_param_dict(mux_load, 'constellation', 'enum')
mux_delsys, mux_delsys_list = find_param_list(mux_load, 'delsys', 'enum')
mux_scanstate, mux_scanstate_key, mux_scanstate_val = find_param_dict(mux_load, 'scan_state', 'enum')
mux_frequency = find_param(mux_load, 'frequency')
mux_symbolrate = find_param(mux_load, 'symbolrate')
mux_services = find_param(mux_load, 'num_svc')
mux_channels = find_param(mux_load, 'num_chn')
mux_info_list = ["Enabled: " + str(mux_enabled), "Delivery System: " + str(mux_delsys), "Frequency: " + str(mux_frequency), "Symbol Rate: " + str(mux_symbolrate), "Modulation: " + str(mux_modulation), "Scan Status: " + str(mux_scanstate), "Number of Services: " + str(mux_services), "Number of Channels: " + str(mux_channels), "DELETE THE MUX"]
mux_param_edit_atscc(mux_uuid_sel, mux_info_list, mux_scanstate, mux_scanstate_key, mux_scanstate_val, mux_frequency, mux_symbolrate, mux_modulation, mux_modulation_key, mux_modulation_val, mux_enabled, mux_enabled_key, mux_enabled_val, mux_delsys, mux_delsys_list, mux_name, mux_services, mux_channels, net_uuid_sel)
def mux_param_edit_atscc(mux_uuid_sel, mux_info_list, mux_scanstate, mux_scanstate_key, mux_scanstate_val, mux_frequency, mux_symbolrate, mux_modulation, mux_modulation_key, mux_modulation_val, mux_enabled, mux_enabled_key, mux_enabled_val, mux_delsys, mux_delsys_list, mux_name, mux_services, mux_channels, net_uuid_sel):
if mux_scanstate == "ACTIVE":
sel_param = dialog.select(str(mux_name) + ' - Select parameter to edit', list=mux_info_list, autoclose=4000)
mux_param_load_atscc(mux_uuid_sel, net_uuid_sel)
sel_param = dialog.select(str(mux_name) + ' - Select parameter to edit', list=mux_info_list)
if sel_param < 0:
muxes()
if sel_param >= 0:
param_update = ""
if sel_param == 0:
sel_enabled = dialog.select('Enable or disable the mux', list=mux_enabled_val)
if sel_enabled <0:
mux_param_load_atscc(mux_uuid_sel, net_uuid_sel)
if sel_enabled >= 0:
mux_enabled = mux_enabled_key[sel_enabled]
param_update = '"enabled":' + str(mux_enabled)
if sel_param == 1:
sel_enabled = dialog.select('Select the mux delivery system', list=mux_delsys_list)
if sel_enabled <0:
mux_param_load_atscc(mux_uuid_sel, net_uuid_sel)
if sel_enabled >= 0:
mux_delsys = mux_delsys_list[sel_enabled]
param_update = '"delsys":"' + str(mux_delsys + '"')
if sel_param == 2:
sel_mux_frequency = dialog.input('Edit the mux frequency', defaultt=str(mux_frequency),type=xbmcgui.INPUT_NUMERIC)
param_update = '"frequency":' + sel_mux_frequency
if sel_param == 3:
sel_mux_frequency = dialog.input('Edit the mux symbol rate', defaultt=str(mux_symbolrate),type=xbmcgui.INPUT_NUMERIC)
param_update = '"symbolrate":' + sel_mux_symbolrate
if sel_param == 4:
sel_mux_modulation = dialog.select('Select the modulation of the mux', list=mux_modulation_val)
if sel_mux_modulation <0:
mux_param_load_atscc(mux_uuid_sel, net_uuid_sel)
if sel_mux_modulation >= 0:
mux_modulation = mux_modulation_key[sel_mux_modulation]
param_update = '"constellation":"' + str(mux_modulation) + '"'
if sel_param == 5:
sel_mux_scanstate = dialog.select('Set the scan state of the mux', list=mux_scanstate_val)
if sel_mux_scanstate <0:
mux_param_load_atscc(mux_uuid_sel, net_uuid_sel)
if sel_mux_scanstate >= 0:
mux_scanstate = mux_scanstate_key[sel_mux_scanstate]
param_update = '"scan_state":' + str(mux_scanstate)
if sel_param == 8:
confirm_del = dialog.yesno('Confirm mux delete', 'Are you sure want to delete the ' + mux_name + ' mux?')
if not confirm_del:
mux_param_load_atscc(mux_uuid_sel, net_uuid_sel)
delete_mux_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/delete?uuid=["' + mux_uuid_sel +'"]'
delete_mux = requests.get(delete_mux_url)
muxes_load(net_uuid_sel)
if param_update != "":
param_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/save?node={' + param_update + ',"uuid":"' + mux_uuid_sel + '"}'
param_save = requests.get(param_url)
mux_param_load_atscc(mux_uuid_sel, net_uuid_sel)
def mux_param_load_dvbt(mux_uuid_sel, net_uuid_sel):
mux_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/load?uuid=' + mux_uuid_sel
mux_load = requests.get(mux_url).json()
mux_name = mux_load['entries'][0]['text']
mux_enabled, mux_enabled_key, mux_enabled_val = find_param_dict(mux_load, 'enabled', 'enum')
mux_modulation, mux_modulation_key, mux_modulation_val = find_param_dict(mux_load, 'constellation', 'enum')
mux_delsys, mux_delsys_list = find_param_list(mux_load, 'delsys', 'enum')
mux_scanstate, mux_scanstate_key, mux_scanstate_val = find_param_dict(mux_load, 'scan_state', 'enum')
mux_frequency = find_param(mux_load, 'frequency')
mux_bandwidth, mux_bandwidth_key, mux_bandwidth_val = find_param_dict(mux_load, 'bandwidth', 'enum')
mux_transmission, mux_transmission_key, mux_transmission_val = find_param_dict(mux_load, 'transmission_mode', 'enum')
mux_guard, mux_guard_key, mux_guard_val = find_param_dict(mux_load, 'guard_interval', 'enum')
mux_hierarchy, mux_hierarchy_key, mux_hierarchy_val = find_param_dict(mux_load, 'hierarchy', 'enum')
mux_fec_hi, mux_fec_hi_key, mux_fec_hi_val = find_param_dict(mux_load, 'fec_hi', 'enum')
mux_fec_lo, mux_fec_lo_key, mux_fec_lo_val = find_param_dict(mux_load, 'fec_lo', 'enum')
mux_plp_id = find_param(mux_load, 'plp_id')
mux_services = find_param(mux_load, 'num_svc')
mux_channels = find_param(mux_load, 'num_chn')
mux_info_list = ["Enabled: " + str(mux_enabled), "Delivery System: " + str(mux_delsys), "Frequency: " + str(mux_frequency), "Bandwidth: " + str(mux_bandwidth), "COFDM Modulation: " + str(mux_modulation), "Transmission Mode: " + str(mux_transmission), "Guard Interval: " + str(mux_guard), "Hierarchy: " + str(mux_hierarchy), "FEC High: " + str(mux_fec_hi), "FEC Low: " + str(mux_fec_lo), "PLP ID: " + str(mux_plp_id), "Scan Status: " + str(mux_scanstate), "Number of Services: " + str(mux_services), "Number of Channels: " + str(mux_channels), "DELETE THE MUX"]
mux_param_edit_dvbt(mux_uuid_sel, mux_info_list, mux_plp_id, mux_fec_lo, mux_fec_lo_key, mux_fec_lo_val, mux_fec_hi, mux_fec_hi_key, mux_fec_hi_val, mux_hierarchy, mux_hierarchy_key, mux_hierarchy_val, mux_guard, mux_guard_key, mux_guard_val, mux_transmission, mux_transmission_key, mux_transmission_val, mux_scanstate, mux_scanstate_key, mux_scanstate_val, mux_frequency, mux_bandwidth, mux_bandwidth_key, mux_bandwidth_val, mux_modulation, mux_modulation_key, mux_modulation_val, mux_enabled, mux_enabled_key, mux_enabled_val, mux_delsys, mux_delsys_list, mux_name, mux_services, mux_channels, net_uuid_sel)
def mux_param_edit_dvbt(mux_uuid_sel, mux_info_list, mux_plp_id, mux_fec_lo, mux_fec_lo_key, mux_fec_lo_val, mux_fec_hi, mux_fec_hi_key, mux_fec_hi_val, mux_hierarchy, mux_hierarchy_key, mux_hierarchy_val, mux_guard, mux_guard_key, mux_guard_val, mux_transmission, mux_transmission_key, mux_transmission_val, mux_scanstate, mux_scanstate_key, mux_scanstate_val, mux_frequency, mux_bandwidth, mux_bandwidth_key, mux_bandwidth_val, mux_modulation, mux_modulation_key, mux_modulation_val, mux_enabled, mux_enabled_key, mux_enabled_val, mux_delsys, mux_delsys_list, mux_name, mux_services, mux_channels, net_uuid_sel):
if mux_scanstate == "ACTIVE":
sel_param = dialog.select(str(mux_name) + ' - Select parameter to edit', list=mux_info_list, autoclose=4000)
mux_param_load_dvbt(mux_uuid_sel, net_uuid_sel)
sel_param = dialog.select(str(mux_name) + ' - Select parameter to edit', list=mux_info_list)
if sel_param < 0:
muxes()
if sel_param >= 0:
param_update = ""
if sel_param == 0:
sel_enabled = dialog.select('Enable or disable the mux', list=mux_enabled_val)
if sel_enabled <0:
mux_param_load_dvbt(mux_uuid_sel, net_uuid_sel)
if sel_enabled >= 0:
mux_enabled = mux_enabled_key[sel_enabled]
param_update = '"enabled":' + str(mux_enabled)
if sel_param == 1:
sel_enabled = dialog.select('Select the mux delivery system', list=mux_delsys_list)
if sel_enabled <0:
mux_param_load_dvbt(mux_uuid_sel, net_uuid_sel)
if sel_enabled >= 0:
mux_delsys = mux_delsys_list[sel_enabled]
param_update = '"delsys":"' + str(mux_delsys + '"')
if sel_param == 2:
sel_mux_frequency = dialog.input('Edit the mux frequency', defaultt=str(mux_frequency),type=xbmcgui.INPUT_NUMERIC)
param_update = '"frequency":' + sel_mux_frequency
if sel_param == 3:
sel_mux_bandwidth = dialog.select('Select the mux bandwidth', list=mux_bandwidth_val)
if sel_mux_bandwidth <0:
mux_param_load_dvbt(mux_uuid_sel, net_uuid_sel)
if sel_mux_bandwidth >= 0:
mux_bandwidth = mux_bandwidth_key[sel_mux_bandwidth]
param_update = '"bandwidth":"' + str(mux_bandwidth) + '"'
if sel_param == 4:
sel_mux_modulation = dialog.select('Select the COFDM modulation of the mux', list=mux_modulation_val)
if sel_mux_modulation <0:
mux_param_load_dvbt(mux_uuid_sel, net_uuid_sel)
if sel_mux_modulation >= 0:
mux_modulation = mux_modulation_key[sel_mux_modulation]
param_update = '"modulation":"' + str(mux_modulation) + '"'
if sel_param == 5:
sel_mux_transmission = dialog.select('Select the mux transmission mode', list=mux_transmission_val)
if sel_mux_transmission <0:
mux_param_load_dvbt(mux_uuid_sel, net_uuid_sel)
if sel_mux_transmission >= 0:
mux_transmission = mux_transmission_key[sel_mux_transmission]
param_update = '"transmission_mode":"' + str(mux_transmission) + '"'
if sel_param == 6:
sel_mux_guard = dialog.select('Select the mux guard interval', list=mux_guard_val)
if sel_mux_guard <0:
mux_param_load_dvbt(mux_uuid_sel, net_uuid_sel)
if sel_mux_guard >= 0:
mux_guard = mux_guard_key[sel_mux_guard]
param_update = '"guard_interval":"' + str(mux_guard) + '"'
if sel_param == 7:
sel_mux_hierarchy = dialog.select('Select the mux hierarchy', list=mux_hierarchy_val)
if sel_mux_hierarchy <0:
mux_param_load_dvbt(mux_uuid_sel, net_uuid_sel)
if sel_mux_hierarchy >= 0:
mux_hierarchy = mux_hierarchy_key[sel_mux_hierarchy]
param_update = '"hierarchy":"' + str(mux_hierarchy) + '"'
if sel_param == 8:
sel_mux_fec_hi = dialog.select('Select the mux forward error correction high', list=mux_fec_hi_val)
if sel_mux_fec_hi <0:
mux_param_load_dvbt(mux_uuid_sel, net_uuid_sel)
if sel_mux_fec_hi >= 0:
mux_fec_hi = mux_fec_hi_key[sel_mux_fec_hi]
param_update = '"fec_hi":"' + str(mux_fec_hi) + '"'
if sel_param == 9:
sel_mux_fec_lo = dialog.select('Select the mux forward error correction low', list=mux_fec_lo_val)
if sel_mux_fec_lo <0:
mux_param_load_dvbt(mux_uuid_sel, net_uuid_sel)
if sel_mux_fec_lo >= 0:
mux_fec_lo = mux_fec_lo_key[sel_mux_fec_lo]
param_update = '"fec_lo":"' + str(mux_fec_lo) + '"'
if sel_param == 10:
sel_mux_plp_id = dialog.input('Edit the mux PLP ID', defaultt=str(mux_plp_id),type=xbmcgui.INPUT_ALPHANUM)
if sel_mux_plp_id == "":
return
else:
param_update = '"plp_id":' + sel_mux_plp_id
if sel_param == 11:
sel_mux_scanstate = dialog.select('Set the scan state of the mux', list=mux_scanstate_val)
if sel_mux_scanstate <0:
mux_param_load_dvbt(mux_uuid_sel, net_uuid_sel)
if sel_mux_scanstate >= 0:
mux_scanstate = mux_scanstate_key[sel_mux_scanstate]
param_update = '"scan_state":' + str(mux_scanstate)
if sel_param == 14:
confirm_del = dialog.yesno('Confirm mux delete', 'Are you sure want to delete the ' + mux_name + ' mux?')
if not confirm_del:
return
delete_mux_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/delete?uuid=["' + mux_uuid_sel +'"]'
delete_mux = requests.get(delete_mux_url)
muxes_load(net_uuid_sel)
if param_update != "":
param_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/save?node={' + param_update + ',"uuid":"' + mux_uuid_sel + '"}'
param_save = requests.get(param_url)
mux_param_load_dvbt(mux_uuid_sel, net_uuid_sel)
def mux_param_load_dvbs(mux_uuid_sel, net_uuid_sel):
mux_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/load?uuid=' + mux_uuid_sel
mux_load = requests.get(mux_url).json()
mux_name = mux_load['entries'][0]['text']
mux_enabled, mux_enabled_key, mux_enabled_val = find_param_dict(mux_load, 'enabled', 'enum')
mux_delsys, mux_delsys_list = find_param_list(mux_load, 'delsys', 'enum')
mux_frequency = find_param(mux_load, 'frequency')
mux_symbolrate = find_param(mux_load, 'symbolrate')
mux_polarization, mux_polarization_key, mux_polarization_val = find_param_dict(mux_load, 'polarisation', 'enum')
mux_modulation, mux_modulation_key, mux_modulation_val = find_param_dict(mux_load, 'modulation', 'enum')
mux_fec, mux_fec_key, mux_fec_val = find_param_dict(mux_load, 'fec', 'enum')
mux_scanstate, mux_scanstate_key, mux_scanstate_val = find_param_dict(mux_load, 'scan_state', 'enum')
mux_rolloff, mux_rolloff_key, mux_rolloff_val = find_param_dict(mux_load, 'rolloff', 'enum')
mux_pilot, mux_pilot_key, mux_pilot_val = find_param_dict(mux_load, 'pilot', 'enum')
mux_sidfilter = find_param(mux_load, 'sid_filter')
mux_streamid = find_param(mux_load, 'stream_id')
mux_plsmode, mux_plsmode_key, mux_plsmode_val = find_param_dict(mux_load, 'pls_mode', 'enum')
mux_plscode = find_param(mux_load, 'pls_code')
mux_services = find_param(mux_load, 'num_svc')
mux_channels = find_param(mux_load, 'num_chn')
mux_info_list = ["Enabled: " + str(mux_enabled), "Delivery System: " + str(mux_delsys), "Frequency: " + str(mux_frequency), "Symbol Rate: " + str(mux_symbolrate), "Polarization: " + str(mux_polarization), "Modulation: " + str(mux_modulation), "FEC: " + str(mux_fec), "Rolloff: " + str(mux_rolloff), "Pilot: " + str(mux_pilot), "Service ID: " + str(mux_sidfilter), "ISI Stream ID: " + str(mux_streamid), "PLS Mode: " + str(mux_plsmode), "PLS Code: " + str(mux_plscode), "Scan Status: " + str(mux_scanstate), "Number of Services: " + str(mux_services), "Number of Channels: " + str(mux_channels), "DELETE THE MUX"]
mux_param_edit_dvbs(mux_uuid_sel, mux_info_list, mux_sidfilter, mux_streamid, mux_polarization, mux_polarization_key, mux_polarization_val, mux_symbolrate, mux_plscode, mux_fec, mux_fec_key, mux_fec_val, mux_plsmode, mux_plsmode_key, mux_plsmode_val, mux_pilot, mux_pilot_key, mux_pilot_val, mux_scanstate, mux_scanstate_key, mux_scanstate_val, mux_frequency, mux_rolloff, mux_rolloff_key, mux_rolloff_val, mux_modulation, mux_modulation_key, mux_modulation_val, mux_enabled, mux_enabled_key, mux_enabled_val, mux_delsys, mux_delsys_list, mux_name, mux_services, mux_channels, net_uuid_sel)
def mux_param_edit_dvbs(mux_uuid_sel, mux_info_list, mux_sidfilter, mux_streamid, mux_polarization, mux_polarization_key, mux_polarization_val, mux_symbolrate, mux_plscode, mux_fec, mux_fec_key, mux_fec_val, mux_plsmode, mux_plsmode_key, mux_plsmode_val, mux_pilot, mux_pilot_key, mux_pilot_val, mux_scanstate, mux_scanstate_key, mux_scanstate_val, mux_frequency, mux_rolloff, mux_rolloff_key, mux_rolloff_val, mux_modulation, mux_modulation_key, mux_modulation_val, mux_enabled, mux_enabled_key, mux_enabled_val, mux_delsys, mux_delsys_list, mux_name, mux_services, mux_channels, net_uuid_sel):
if mux_scanstate == "ACTIVE":
sel_param = dialog.select(str(mux_name) + ' - Select parameter to edit', list=mux_info_list, autoclose=4000)
mux_param_load_dvbs(mux_uuid_sel, net_uuid_sel)
sel_param = dialog.select(str(mux_name) + ' - Select parameter to edit', list=mux_info_list)
if sel_param < 0:
muxes()
if sel_param >= 0:
param_update = ""
if sel_param == 0:
sel_enabled = dialog.select('Enable or disable the mux', list=mux_enabled_val)
if sel_enabled <0:
mux_param_load_dvbs(mux_uuid_sel, net_uuid_sel)
if sel_enabled >= 0:
mux_enabled = mux_enabled_key[sel_enabled]
param_update = '"enabled":' + str(mux_enabled)
if sel_param == 1:
sel_enabled = dialog.select('Select the mux delivery system', list=mux_delsys_list)
if sel_enabled <0:
mux_param_load_dvbs(mux_uuid_sel, net_uuid_sel)
if sel_enabled >= 0:
mux_delsys = mux_delsys_list[sel_enabled]
param_update = '"delsys":"' + str(mux_delsys + '"')
if sel_param == 2:
sel_mux_frequency = dialog.input('Edit the mux frequency', defaultt=str(mux_frequency),type=xbmcgui.INPUT_NUMERIC)
param_update = '"frequency":' + sel_mux_frequency
if sel_param == 3:
sel_mux_frequency = dialog.input('Edit the mux symbol rate', defaultt=str(mux_symbolrate),type=xbmcgui.INPUT_NUMERIC)
param_update = '"symbolrate":' + sel_mux_symbolrate
if sel_param == 4:
sel_mux_polarization = dialog.select('Select the polarization of the mux', list=mux_polarization_val)
if sel_mux_polarization <0:
mux_param_load_dvbs(mux_uuid_sel, net_uuid_sel)
if sel_mux_polarization >= 0:
mux_polarization = mux_polarization_key[sel_mux_polarization]
param_update = '"polarisation":"' + str(mux_polarization) + '"'
if sel_param == 5:
sel_mux_modulation = dialog.select('Select the modulation of the mux', list=mux_modulation_val)
if sel_mux_modulation <0:
mux_param_load_dvbs(mux_uuid_sel, net_uuid_sel)
if sel_mux_modulation >= 0:
mux_modulation = mux_modulation_key[sel_mux_modulation]
param_update = '"modulation":"' + str(mux_modulation) + '"'
if sel_param == 6:
sel_mux_fec = dialog.select('Select the mux forward error correction', list=mux_fec_val)
if sel_mux_fec <0:
mux_param_load_dvbs(mux_uuid_sel, net_uuid_sel)
if sel_mux_fec >= 0:
mux_fec = mux_fec_key[sel_mux_fec]
param_update = '"fec":"' + str(mux_fec) + '"'
if sel_param == 7:
sel_mux_rolloff = dialog.select('Select the mux rolloff', list=mux_rolloff_val)
if sel_mux_rolloff <0:
mux_param_load_dvbs(mux_uuid_sel, net_uuid_sel)
if sel_mux_rolloff >= 0:
mux_rolloff = mux_rolloff_key[sel_mux_rolloff]
param_update = '"rolloff":"' + str(mux_rolloff) + '"'
if sel_param == 8:
sel_mux_pilot = dialog.select('Select the mux pilot', list=mux_pilot_val)
if sel_mux_pilot <0:
mux_param_load_dvbs(mux_uuid_sel, net_uuid_sel)
if sel_mux_pilot >= 0:
mux_pilot = mux_pilot_key[sel_mux_pilot]
param_update = '"pilot":"' + str(mux_pilot) + '"'
if sel_param == 9:
sel_mux_sidfilter = dialog.input('Edit the mux Service ID - filter out others', defaultt=str(mux_sidfilter),type=xbmcgui.INPUT_ALPHANUM)
if sel_mux_sidfilter == "":
mux_param_load_dvbs(mux_uuid_sel, net_uuid_sel)
else:
param_update = '"sid_filter":' + sel_mux_sidfilter
if sel_param == 10:
sel_mux_streamid = dialog.input('Edit the mux Stream ID', defaultt=str(mux_streamid),type=xbmcgui.INPUT_ALPHANUM)
if sel_mux_streamid == "":
mux_param_load_dvbs(mux_uuid_sel, net_uuid_sel)
else:
param_update = '"stream_id":' + sel_mux_streamid
if sel_param == 11:
sel_mux_plsmode = dialog.select('Select the mux bandwidth', list=mux_plsmode_val)
if sel_mux_plsmode <0:
mux_param_load_dvbs(mux_uuid_sel, net_uuid_sel)
if sel_mux_plsmode >= 0:
mux_plsmode = mux_plsmode_key[sel_mux_plsmode]
param_update = '"pls_mode":"' + str(mux_plsmode) + '"'
if sel_param == 12:
sel_mux_plscode = dialog.input('Edit the mux PLS Code', defaultt=str(mux_plscode),type=xbmcgui.INPUT_ALPHANUM)
if sel_mux_plscode == "":
mux_param_load_dvbs(mux_uuid_sel, net_uuid_sel)
else:
param_update = '"pls_code":' + sel_mux_plscode
if sel_param == 13:
sel_mux_scanstate = dialog.select('Set the scan state of the mux', list=mux_scanstate_val)
if sel_mux_scanstate <0:
mux_param_load_(mux_uuid_sel, net_uuid_sel)
if sel_mux_scanstate >= 0:
mux_scanstate = mux_scanstate_key[sel_mux_scanstate]
param_update = '"scan_state":' + str(mux_scanstate)
if sel_param == 16:
confirm_del = dialog.yesno('Confirm mux delete', 'Are you sure want to delete the ' + mux_name + ' mux?')
if not confirm_del:
return
delete_mux_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/delete?uuid=["' + mux_uuid_sel +'"]'
delete_mux = requests.get(delete_mux_url)
muxes_load(net_uuid_sel)
if param_update != "":
param_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/save?node={' + param_update + ',"uuid":"' + mux_uuid_sel + '"}'
param_save = requests.get(param_url)
mux_param_load_dvbs(mux_uuid_sel, net_uuid_sel)
def mux_param_load_iptv(mux_uuid_sel, net_uuid_sel):
mux_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/load?uuid=' + mux_uuid_sel
mux_load = requests.get(mux_url).json()
mux_name = mux_load['entries'][0]['text']
mux_enabled, mux_enabled_key, mux_enabled_val = find_param_dict(mux_load, 'enabled', 'enum')
mux_iptv_muxname = find_param(mux_load, 'iptv_muxname')
mux_url = find_param(mux_load, 'iptv_url')
mux_atsc = find_param(mux_load, 'iptv_atsc')
mux_chnum = find_param(mux_load, 'channel_number')
mux_channels = find_param(mux_load, 'num_chn')
mux_sname= find_param(mux_load, 'iptv_sname')
mux_services = find_param(mux_load, 'num_svc')
mux_scanstate, mux_scanstate_key, mux_scanstate_val = find_param_dict(mux_load, 'scan_state', 'enum')
mux_info_list = ["Enabled: " + str(mux_enabled), "URL: " + str(mux_url), "ATSC: " + str(mux_atsc), "Name: " + (mux_iptv_muxname), "Channel Number: " + str(mux_chnum), "Service Name: " + str(mux_sname), "Scan Status: " + str(mux_scanstate), "Number of Services: " + str(mux_services), "Number of Channels: " + str(mux_channels), "DELETE THE MUX"]
mux_param_edit_iptv(mux_uuid_sel, mux_info_list, mux_scanstate, mux_scanstate_key, mux_scanstate_val, mux_enabled, mux_enabled_key, mux_enabled_val, mux_name, mux_services, mux_channels, mux_url, mux_atsc, mux_iptv_muxname, mux_chnum, mux_sname, net_uuid_sel)
def mux_param_edit_iptv(mux_uuid_sel, mux_info_list, mux_scanstate, mux_scanstate_key, mux_scanstate_val, mux_enabled, mux_enabled_key, mux_enabled_val, mux_name, mux_services, mux_channels, mux_url, mux_atsc, mux_iptv_muxname, mux_chnum, mux_sname, net_uuid_sel):
if mux_scanstate == "ACTIVE":
sel_param = dialog.select(str(mux_name) + ' - Select parameter to edit', list=mux_info_list, autoclose=4000)
mux_param_load_iptv(mux_uuid_sel, net_uuid_sel)
sel_param = dialog.select(str(mux_name) + ' - Select parameter to edit', list=mux_info_list)
if sel_param < 0:
muxes()
if sel_param >= 0:
param_update = ""
if sel_param == 0:
sel_enabled = dialog.select('Enable or disable the mux', list=mux_enabled_val)
if sel_enabled <0:
mux_param_load_iptv(mux_uuid_sel, net_uuid_sel)
if sel_enabled >= 0:
mux_enabled = mux_enabled_key[sel_enabled]
param_update = '"enabled":' + str(mux_enabled)
if sel_param == 1:
sel_mux_url = dialog.input('Edit the mux URL', defaultt=str(mux_url),type=xbmcgui.INPUT_ALPHANUM)
if sel_mux_url == "":
mux_param_load_iptv(mux_uuid_sel, net_uuid_sel)
else:
param_update = '"url":"' + sel_mux_url + '"'
if sel_param == 2:
sel_atsc = dialog.select('Change if IPTV mux is ATSC', list=truefalse)
if sel_atsc <0:
mux_param_load_iptv(mux_uuid_sel, net_uuid_sel)
if sel_atsc >= 0:
mux_atsc = truefalse[sel_atsc]
param_update = '"iptv_atsc":' + str(mux_atsc)
if sel_param == 3:
sel_mux_name = dialog.input('Edit the mux name', defaultt=str(mux_iptv_muxname),type=xbmcgui.INPUT_ALPHANUM)
if sel_mux_name == "":
mux_param_load_iptv(mux_uuid_sel, net_uuid_sel)
else:
param_update = '"iptv_muxname":"' + sel_mux_name + '"'
if sel_param == 4:
sel_mux_chnum = dialog.input('Edit the mux channel number', defaultt=str(mux_chnum),type=xbmcgui.INPUT_NUMERIC)
if sel_mux_chnum == "":
mux_param_load_iptv(mux_uuid_sel, net_uuid_sel)
else:
param_update = '"channel_number":' + sel_mux_chnum
if sel_param == 5:
sel_mux_sname = dialog.input('Edit the mux service name', defaultt=str(mux_sname),type=xbmcgui.INPUT_ALPHANUM)
if sel_mux_sname == "":
mux_param_load_iptv(mux_uuid_sel, net_uuid_sel)
else:
param_update = '"iptv_sname":"' + sel_mux_sname + '"'
if sel_param == 6:
sel_mux_scanstate = dialog.select('Set the scan state of the mux', list=mux_scanstate_val)
if sel_mux_scanstate <0:
mux_param_load_iptv(mux_uuid_sel, net_uuid_sel)
if sel_mux_scanstate >= 0:
mux_scanstate = mux_scanstate_key[sel_mux_scanstate]
param_update = '"scan_state":' + str(mux_scanstate)
if sel_param == 9:
confirm_del = dialog.yesno('Confirm mux delete', 'Are you sure want to delete the ' + mux_name + ' mux?')
if not confirm_del:
mux_param_load_iptv(mux_uuid_sel, net_uuid_sel)
delete_mux_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/delete?uuid=["' + mux_uuid_sel +'"]'
delete_mux = requests.get(delete_mux_url)
muxes_load(net_uuid_sel)
if param_update != "":
param_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/save?node={' + param_update + ',"uuid":"' + mux_uuid_sel + '"}'
param_save = requests.get(param_url)
mux_param_load_iptv(mux_uuid_sel, net_uuid_sel)
def mux_new():
new_mux_net_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/load?class=mpegts_network'
new_mux_net_load = requests.get(new_mux_net_url).json()
new_mux_net_name = []
new_mux_net_uuid = []
new_mux_net_class = []
for net_n in new_mux_net_load['entries']:
new_mux_net_name.append(net_n['text'])
for net_u in new_mux_net_load['entries']:
new_mux_net_uuid.append(net_u['uuid'])
for net_c in new_mux_net_load['entries']:
new_mux_net_class.append(net_c['class'])
sel_new_mux_network = dialog.select('Select a network for new mux', list=new_mux_net_name)
if sel_new_mux_network < 0:
muxes()
if sel_new_mux_network >= 0:
new_mux_net_uuid_sel = new_mux_net_uuid[sel_new_mux_network]
new_mux_net_class_sel = new_mux_net_class[sel_new_mux_network]
new_mux_url = 'http://' + tvh_url + ':' + tvh_port + '/api/mpegts/network/mux_class?uuid=' + new_mux_net_uuid_sel
new_mux_load = requests.get(new_mux_url).json()
sel_freq = dialog.input('Enter the frequency of the new mux', defaultt="",type=xbmcgui.INPUT_NUMERIC)
if sel_freq == "":
return
else:
if new_mux_net_class_sel == "dvb_network_atsc_t":
mux_create_url = 'http://' + tvh_url + ':' + tvh_port + '/api/mpegts/network/mux_create?conf={"enabled":1,"epg":1,"delsys":"ATSC-T","frequency":' + str(sel_freq) + ',"modulation":"AUTO","scan_state":0,"charset":"","tsid_zero":false,"pmt_06_ac3":0,"eit_tsid_nocheck":false,"sid_filter":0}&uuid=' + str(new_mux_net_uuid_sel)
new_mux_create = requests.get(mux_create_url).json()
mux_uuid_sel = new_mux_create['uuid']
mux_param_load_atsct(mux_uuid_sel, new_mux_net_uuid_sel)
if new_mux_net_class_sel == "dvb_network_atsc_c":
mux_create_url = 'http://' + tvh_url + ':' + tvh_port + '/api/mpegts/network/mux_create?conf={"enabled":1,"epg":1,"delsys":"ATSC-C","frequency":' + str(sel_freq) + ',"symbolrate":0,"constellation":"AUTO","fec":"AUTO","scan_state":0,"charset":"","tsid_zero":false,"pmt_06_ac3":0,"eit_tsid_nocheck":false,"sid_filter":0}&uuid=' + str(new_mux_net_uuid_sel)
new_mux_create = requests.get(mux_create_url).json()
mux_uuid_sel = new_mux_create['uuid']
mux_param_load_atscc(mux_uuid_sel,new_mux_net_uuid_sel)
if new_mux_net_class_sel == "dvb_network_dvbc":
mux_create_url = 'http://' + tvh_url + ':' + tvh_port + '/api/mpegts/network/mux_create?conf={"enabled":1,"epg":1,"delsys":"DVB-C","frequency":' + str(sel_freq) + ',"symbolrate":0,"constellation":"AUTO","fec":"AUTO","scan_state":0,"charset":"","tsid_zero":false,"pmt_06_ac3":0,"eit_tsid_nocheck":false,"sid_filter":0}&uuid=' + str(new_mux_net_uuid_sel)
new_mux_create = requests.get(mux_create_url).json()
mux_uuid_sel = new_mux_create['uuid']
mux_param_load_atscc(mux_uuid_sel,new_mux_net_uuid_sel)
if new_mux_net_class_sel == "dvb_network_dvbt":
mux_create_url = 'http://' + tvh_url + ':' + tvh_port + '/api/mpegts/network/mux_create?conf={"enabled":1,"epg":1,"delsys":"DVBT","frequency":' + str(sel_freq) + ',"bandwidth":"AUTO","constellation":"AUTO","transmission_mode":"AUTO","guard_interval":"AUTO","hierarchy":"AUTO","fec_hi":"AUTO","fec_lo":"AUTO","plp_id":-1,"scan_state":0,"charset":"","tsid_zero":false,"pmt_06_ac3":0,"eit_tsid_nocheck":false,"sid_filter":0}&uuid=' + str(new_mux_net_uuid_sel)
new_mux_create = requests.get(mux_create_url).json()
mux_uuid_sel = new_mux_create['uuid']
mux_param_load_dvbt(mux_uuid_sel, new_mux_net_uuid_sel)
if new_mux_net_class_sel == "dvb_network_dvbs":
dialog.ok("Not available yet!", "DVB-S configuration is not yet available in this program.")
muxes()
def mux_new_iptv(net_uuid_sel):
new_mux_url = 'http://' + tvh_url + ':' + tvh_port + '/api/mpegts/network/mux_class?uuid=' + net_uuid_sel
new_mux_load = requests.get(new_mux_url).json()
sel_url = dialog.input('Enter the URL of the new mux', defaultt="http://",type=xbmcgui.INPUT_ALPHANUM)
if sel_url == "":
return
sel_atsc = dialog.yesno('ATSC based mux', "Is this IPTV mux ATSC?")
sel_name = dialog.input('Enter the name of the new mux', defaultt="",type=xbmcgui.INPUT_ALPHANUM)
sel_chnum = dialog.input('Enter the channel number of the new mux', defaultt="",type=xbmcgui.INPUT_NUMERIC)
sel_service = dialog.input('Enter the service name of the new mux', defaultt=str(sel_name),type=xbmcgui.INPUT_ALPHANUM)
mux_create_url = 'http://' + tvh_url + ':' + tvh_port + '/api/mpegts/network/mux_create?conf={"enabled":1,"epg":1,"iptv_url":"' + sel_url + '","iptv_atsc":' + str(sel_atsc) + ',"iptv_muxname":"' + str(sel_name) + '","channel_number":"' + str(sel_chnum) + '","iptv_sname":"' + str(sel_service) + '","scan_state":0,"charset":"","priority":0,"spriority":0,"iptv_substitute":false,"iptv_interface":"","iptv_epgid":"","iptv_icon":"","iptv_tags":"","iptv_satip_dvbt_freq":0,"iptv_buffer_limit":0,"tsid_zero":false,"pmt_06_ac3":0,"eit_tsid_nocheck":false,"sid_filter":0,"iptv_respawn":false,"iptv_kill":0,"iptv_kill_timeout":5,"iptv_env":"","iptv_hdr":""}&uuid=' + str(net_uuid_sel)
new_mux_create = requests.get(mux_create_url).json()
mux_uuid_sel = new_mux_create['uuid']
mux_param_load_iptv(mux_uuid_sel, net_uuid_sel)
def ch_param_load(ch_uuid_sel):
ch_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/load?uuid=' + ch_uuid_sel
ch_load = requests.get(ch_url).json()
ch_enabled = find_param(ch_load, 'enabled')
ch_autoname = find_param(ch_load, 'autoname')
ch_name = find_param(ch_load, 'name')
ch_number = find_param(ch_load, 'number')
ch_icon = find_param(ch_load, 'icon')
ch_epg_list = find_list(ch_load, 'epggrab', 'value')
ch_epg_text = []
for ch_epg_y in ch_epg_list:
ch_epg_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/load?uuid=' + ch_epg_y
ch_epg_get = requests.get(ch_epg_url).json()
ch_epg_text.append(ch_epg_get['entries'][0]['text'])
ch_epg = ', '.join(ch_epg_text)
ch_info_list = ["Name: " + str(ch_name), "Number: " + str(ch_number), "Enabled: " + str(ch_enabled), "Autoname: " + str(ch_autoname), "Icon URL: " + str(ch_icon), "EPG Source: " + str(ch_epg), "DELETE THE CHANNEL"]
ch_param_edit(ch_uuid_sel, ch_info_list, ch_enabled, ch_autoname, ch_name, ch_number, ch_icon, ch_epg)
def ch_param_edit(ch_uuid_sel, ch_info_list, ch_enabled, ch_autoname, ch_name, ch_number, ch_icon, ch_epg):
sel_param = dialog.select('Channels Configuration - Select parameter to edit', list=ch_info_list)
if sel_param < 0:
channels()
if sel_param >= 0:
param_update = ""
if sel_param == 0:
sel_ch_name = dialog.input('Edit the channel name', defaultt=ch_name,type=xbmcgui.INPUT_ALPHANUM)
if sel_ch_name == "":
ch_param_load(ch_uuid_sel)
else:
param_update = '"name":"' + sel_ch_name + '"'
if sel_param == 1:
sel_ch_number = dialog.input('Edit the channel number', defaultt=ch_number,type=xbmcgui.INPUT_NUMERIC)
param_update = '"number":"' + sel_ch_number + '"'
if sel_param == 2:
sel_enabled = dialog.select('Enable or disable channel', list=enabledisable)
if sel_enabled >= 0:
ch_enabled = truefalse[sel_enabled]
param_update = '"enabled":' + ch_enabled
if sel_param == 3:
sel_autoname = dialog.select('Select True/False to automatically name the channel with the service name', list=truefalse)
if sel_autoname >= 0:
ch_autoname = truefalse[sel_autoname]
param_update = '"autoname":' + ch_autoname
if sel_param == 4:
sel_ch_icon = dialog.input('Edit the channel icon URL', defaultt=ch_icon,type=xbmcgui.INPUT_ALPHANUM)
if sel_ch_icon == "":
ch_param_load(ch_uuid_sel)
else:
param_update = '"icon":"' + sel_ch_icon + '"'
if sel_param == 5:
epg_grid_url = 'http://' + tvh_url + ':' + tvh_port + '/api/epggrab/channel/grid?sort=names&dir=ASC&limit=999999999&all=1'
epg_grid_load = requests.get(epg_grid_url).json()
epg_list_text = [x['names'] for x in epg_grid_load['entries']]
epg_list_id = [x['id'] for x in epg_grid_load['entries']]
epg_list_uuid = [x['uuid'] for x in epg_grid_load['entries']]
epg_list_full = zip(epg_list_text, epg_list_id)
epg_list_list = ["%s - %s" % x for x in epg_list_full]
sel_epg = dialog.select('Select EPG source for channel: ' + str(ch_number) + " " + str(ch_name), list=epg_list_list)
if sel_epg < 0:
ch_param_edit(ch_uuid_sel, ch_info_list, ch_enabled, ch_autoname, ch_name, ch_number, ch_icon, ch_epg)
if sel_epg >= 0:
epg_uuid_sel = epg_list_uuid[sel_epg]
param_update = '"epggrab":["' + epg_uuid_sel + '"]'
if sel_param == 6:
confirm_del = dialog.yesno('Confirm delete channel', 'Are you sure want to delete the ' + ch_name + ' channel?')
if not confirm_del:
ch_param_load(ch_uuid_sel)
delete_ch_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/delete?uuid=["' + ch_uuid_sel +'"]'
delete_ch = requests.get(delete_ch_url)
channels()
if param_update != "":
param_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/save?node={' + param_update + ',"uuid":"' + ch_uuid_sel + '"}'
param_save = requests.get(param_url)
ch_param_load(ch_uuid_sel)
def cron_edit(epg_intcron):
cron_def_weekday_list = ['Everyday', 'Every Other Day', 'on Sundays', 'on Mondays', 'on Tuesdays', 'on Wednesdays', 'on Thursdays', 'on Fridays', 'on Saturdays']
cron_def_weekday = {'*':'Everyday', '2-30/2': 'Every Other Day', '0':'on Sundays', '1':'on Mondays', '2':'on Tuesdays', '3':'on Wednesdays', '4':'on Thursdays', '5':'on Fridays', '6':'on Saturdays'}
cron_def_split_hour_list = ['Specific Hour', '2x a Day', '3x a Day', '4x a Day', '6x a Day', '8x a Day', '12x a Day', 'every Hour']
cron_def_split_hour = {'*':'every Hour', '*/2':'12x a Day', '*/3':'8x a Day', '*/4':'6x a Day','*/6':'4x a Day', '*/8':'3x a Day', '*/12':'2x a Day'}
cron_def_hours = ['12:00AM - Midnight', '1:00AM', '2:00AM', '3:00AM', '4:00AM', '5:00AM', '6:00AM', '7:00AM', '8:00AM', '9:00AM', '10:00AM', '11:00AM', '12:00PM - Noon', '1:00PM', '2:00PM', '3:00PM', '4:00PM', '5:00PM', '6:00PM', '7:00PM', '8:00PM', '9:00PM', '10:00PM', '11:00PM']
epg_intcron_clean = re.sub('#.*\n', '', epg_intcron)
cron_current = epg_intcron_clean.split(' ')
cron_current_min = str(int(cron_current[0])).zfill(2)
if '*' in cron_current[1]:
cron_current_str = cron_current_min + ' Minutes past the hour, ' + cron_def_split_hour[cron_current[1]] + ', ' + cron_def_weekday[cron_current[2]]
else:
cron_ampm = 'AM'
if cron_current[1] == '00' or cron_current[1] == '0':
cron_current_hour = '12'
elif int(cron_current[1]) > 12:
cron_current_hour = str(24 - int(cron_current[1]))
cron_ampm = 'PM'
else:
cron_current_hour = cron_current[1]
cron_current_str = cron_current_hour + ':' + cron_current_min + cron_ampm + ' - ' + cron_def_weekday[cron_current[2]]
cron_edit_sel = dialog.yesno('Cron edit', 'The grabber is set to run at:', cron_current_str, 'Do you wish to edit this cron setting?')
if cron_edit_sel:
cron_sel_weekday = dialog.select('Select which day(s) to run the grabber', list=cron_def_weekday_list)
if cron_sel_weekday >= 0:
cron_new_weekday = cron_def_weekday.keys()[cron_def_weekday.values().index(cron_def_weekday_list[cron_sel_weekday])]
cron_sel_hour = dialog.select('Select which hour(s) to run the grabber', list=cron_def_split_hour_list)
if cron_sel_hour == 0:
cron_sel_hour_spec = dialog.select('Select which hour(s) to run the grabber', list=cron_def_hours)
cron_new_hour = cron_sel_hour_spec
if cron_sel_hour > 0:
cron_new_hour = cron_def_split_hour.keys()[cron_def_split_hour.values().index(cron_def_split_hour_list[cron_sel_hour])]
cron_new_min = dialog.input('Enter the minutes after the hour to run the grabber', defaultt='0', type=xbmcgui.INPUT_NUMERIC)
cron_update = str(cron_new_min) + ' ' + str(cron_new_hour) + ' ' + cron_new_weekday + ' * *'
return cron_update
else:
return epg_intcron
def epg_param(sel_epg, epg_rename, epg_renumber, epg_reicon, epg_dbsave, epg_intcron, epg_otainit, epg_otacron, epg_otatime):
param_update = ""
if sel_epg == 3:
sel_epg_rename = dialog.select('Update channel name with EPG provider name', list=enabledisable)
if sel_epg_rename >= 0:
epg_rename = truefalse[sel_epg_rename]
param_update = '"channel_rename":' + epg_rename
if sel_epg == 4:
sel_epg_renumber = dialog.select('Update channel number with EPG provider number', list=enabledisable)
if sel_epg_renumber >= 0:
epg_renumber = truefalse[sel_epg_renumber]
param_update = '"channel_renumber":' + epg_renumber
if sel_epg == 5:
sel_epg_reicon = dialog.select('Update channel icon with EPG provider icon', list=enabledisable)
if sel_epg_reicon >= 0:
epg_reicon = truefalse[sel_epg_reicon]
param_update = '"channel_reicon":' + epg_reicon
if sel_epg == 6:
sel_epg_dbsave = dialog.input('Save EPG data to disk every X hours (set 0 to disable)', defaultt=str(epg_dbsave),type=xbmcgui.INPUT_NUMERIC)
if sel_epg_dbsave == "":
sel_epg_dbsave = epg_dbsave
param_update = '"epgdb_periodicsave":' + str(sel_epg_dbsave)
if sel_epg == 7:
sel_epg_intcron_type = dialog.yesno('Edit the cron for internal grabbers', 'If you are familiar with cron settings you can manually enter the cron.', '', 'Otherwise use the wizard to select the grabber run times.', 'Wizard', 'Manual')
if sel_epg_intcron_type:
sel_epg_intcron = dialog.input('Edit the cron multiline for internal grabbers', defaultt=epg_intcron,type=xbmcgui.INPUT_ALPHANUM)
if sel_epg_intcron == "":
sel_epg_intcron = epg_intcron
else:
sel_epg_intcron = cron_edit(epg_intcron)
if sel_epg_intcron == "":
sel_epg_intcron = epg_intcron
param_update = '"cron":"' + sel_epg_intcron + '"'
if sel_epg == 8:
sel_epg_otainit = dialog.select('Enable or disable initial EPG grab at startup', list=enabledisable)
if sel_epg_otainit >= 0:
epg_otainit = truefalse[sel_epg_otainit]
param_update = '"ota_initial":' + epg_otainit
if sel_epg == 9:
sel_epg_otacron_type = dialog.yesno('Edit the cron for OTA grabbers', 'If you are familiar with cron settings you can manually enter the cron.', '', 'Otherwise use the wizard to select the grabber run times.', 'Wizard', 'Manual')
if sel_epg_otacron_type:
sel_epg_otacron = dialog.input('Edit the cron multiline for over-the-air grabbers', defaultt=epg_otacron,type=xbmcgui.INPUT_ALPHANUM)
if sel_epg_otacron == "":
sel_epg_otacron = epg_otacron
else:
sel_epg_otacron = cron_edit(epg_otacron)
if sel_epg_otacron == "":
sel_epg_otacron = epg_otacron
param_update = '"ota_cron":"' + sel_epg_otacron + '"'
if sel_epg == 10:
sel_epg_otatime = dialog.input('OTA EPG scan timeout in seconds (30-7200)', defaultt=str(epg_otatime),type=xbmcgui.INPUT_NUMERIC)
if sel_epg_otatime == "":
sel_epg_otatime = epg_otatime
param_update = '"ota_timeout":' + str(sel_epg_otatime)
if param_update != "":
param_url = 'http://' + tvh_url + ':' + tvh_port + '/api/epggrab/config/save?node={' + param_update + '}'
param_save = requests.get(param_url)
epg()
def epgmod_list_load():
epg_modlist_url = 'http://' + tvh_url + ':' + tvh_port + '/api/epggrab/module/list'
epg_modlist_load = requests.get(epg_modlist_url).json()
epg_modlist_name = []
epg_modlist_uuid = []
epg_modlist_enabled = []
for n in epg_modlist_load['entries']:
epg_modlist_name.append(n['title'])
for u in epg_modlist_load['entries']:
epg_modlist_uuid.append(u['uuid'])
for e in epg_modlist_load['entries']:
epg_modlist_enabled.append(str(e['status']))
epg_modlist_enabled = [w.replace('epggrabmodNone', ' ** DISABLED **') for w in epg_modlist_enabled]
epg_modlist_enabled = [w.replace('epggrabmodEnabled', ' ') for w in epg_modlist_enabled]
epg_modlist_full = zip(epg_modlist_name, epg_modlist_enabled)
epg_modlist_list = ["%s %s" % x for x in epg_modlist_full]
epg_modlist_list, epg_modlist_uuid = (list(t) for t in zip(*sorted(zip(epg_modlist_list, epg_modlist_uuid))))
sel_epgmod = dialog.select('Select an EPG grabber module to configure', list=epg_modlist_list)
if sel_epgmod < 0:
epg()
if sel_epgmod >= 0:
epgmod_uuid_sel = epg_modlist_uuid[sel_epgmod]
epgmod_param_load(epgmod_uuid_sel)
def epgmod_param_load(epgmod_uuid_sel):
epgmod_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/load?uuid=' + epgmod_uuid_sel
epgmod_load = requests.get(epgmod_url).json()
epgmod_enabled = find_param(epgmod_load, 'enabled')
epgmod_name = find_param(epgmod_load, 'text')
epgmod_priority = find_param(epgmod_load, 'priority')
epgmod_type = find_param(epgmod_load, 'type')
epgmod_dnchnum = ""
epgmod_dnchnum_key = ""
epgmod_dnchnum_val = ""
epgmod_args = ""
if epgmod_type == "External":
epgmod_dnchnum = find_param(epgmod_load, 'dn_chnum')
epgmod_args = ""
epgmod_info_list = ["Enabled: " + str(epgmod_enabled), "Priority: " + str(epgmod_priority), "Channel Numbers (heuristic): " + str(epgmod_dnchnum)]
if epgmod_type == "Internal":
epgmod_dnchnum, epgmod_dnchnum_key, epgmod_dnchnum_val = find_param_dict(epgmod_load, 'dn_chnum', 'enum')
epgmod_args = find_param(epgmod_load, 'args')
epgmod_info_list = ["Enabled: " + str(epgmod_enabled), "Priority: " + str(epgmod_priority), "Channel Numbers (heuristic): " + str(epgmod_dnchnum), "Extra Arguments: " + str(epgmod_args)]
if epgmod_type == "Over-the-air":
epgmod_info_list = ["Enabled: " + str(epgmod_enabled), "Priority: " + str(epgmod_priority)]
epgmod_param_edit(epgmod_uuid_sel, epgmod_info_list, epgmod_enabled, epgmod_name, epgmod_priority, epgmod_type, epgmod_dnchnum, epgmod_dnchnum_key, epgmod_dnchnum_val, epgmod_args)
def epgmod_param_edit(epgmod_uuid_sel, epgmod_info_list, epgmod_enabled, epgmod_name, epgmod_priority, epgmod_type, epgmod_dnchnum, epgmod_dnchnum_key, epgmod_dnchnum_val, epgmod_args):
sel_param = dialog.select('EPG Module Configuration - Select parameter to edit', list=epgmod_info_list)
if sel_param < 0:
epgmod_list_load()
if sel_param >= 0:
param_update = ""
if sel_param == 0:
sel_enabled = dialog.select('Enable or disable the EPG grabber module', list=enabledisable)
if sel_enabled <0:
epgmod_param_load(epgmod_uuid_sel)
if sel_enabled >= 0:
epgmod_enabled = truefalse[sel_enabled]
param_update = '"enabled":' + epgmod_enabled
if sel_param == 1:
sel_epgmod_priority = dialog.input('Edit the EPG grabber priority - higher number gets used first', defaultt=str(epgmod_priority),type=xbmcgui.INPUT_NUMERIC)
param_update = '"priority":"' + sel_epgmod_priority + '"'
if sel_param == 2 and epgmod_type == "External":
sel_epgmod_dnchnum = dialog.select('Enable or disable trying to read channel number from xml tag', list=enabledisable)
if sel_epgmod_dnchnum <0:
epgmod_param_load(epgmod_uuid_sel)
if sel_enabled >= 0:
epgmod_dnchnum = truefalse[sel_epgmod_dnchnum]
param_update = '"dn_chnum":' + epgmod_dnchnum
if sel_param == 2 and epgmod_type == "Internal":
sel_epgmod_dnchnum = dialog.select('Select the mode for readingchannel number from displayname xml tafg', list=epgmod_dnchnum_val)
if sel_epgmod_dnchnum <0:
epgmod_param_load(epgmod_uuid_sel)
if sel_epgmod_dnchnum >= 0:
epgmod_dnchnum = epgmod_dnchnum_key[sel_epgmod_dnchnum]
param_update = '"dn_chnum":"' + str(epgmod_dnchnum) + '"'
if sel_param == 3:
sel_epgmod_args = dialog.input('Additional arguments to pass to the grabber', defaultt=epgmod_args,type=xbmcgui.INPUT_ALPHANUM)
param_update = '"args":"' + sel_epgmod_args + '"'
if param_update != "":
param_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/save?node={' + param_update + ',"uuid":"' + epgmod_uuid_sel + '"}'
param_save = requests.get(param_url)
epgmod_param_load(epgmod_uuid_sel)
def adapt_param_load(adapter_uuid_sel):
adapt_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/load?uuid=' + adapter_uuid_sel
adapt_load = requests.get(adapt_url).json()
adapt_class = adapt_load['entries'][0]['class']
adapt_enabled = find_param(adapt_load, 'enabled')
adapt_priority = find_param(adapt_load, 'priority')
adapt_name = find_param(adapt_load, 'displayname')
adapt_otaepg = find_param(adapt_load, 'ota_epg')
adapt_init = find_param(adapt_load, 'initscan')
adapt_idle = find_param(adapt_load, 'idlescan')
if adapt_class == 'linuxdvb_frontend_dvbs':
adapt_satconf, adapt_satconf_key, adapt_satconf_val = find_param_dict(adapt_load, 'satconf', 'enum')
adapt_info_list = ["Name: " + str(adapt_name), "Enabled: " + str(adapt_enabled), "Satellite config: " + str(adapt_satconf), "EDIT SATELLITE LNB/SWITCH", "Priority: " + str(adapt_priority), "Enable OTA EPG scanning: " + str(adapt_otaepg), "Allow initial scanning on startup: " + str(adapt_init), "Allow idle scanning: " + str(adapt_idle)]
adapt_param_dvbsedit(adapter_uuid_sel, adapt_info_list, adapt_enabled, adapt_name, adapt_priority, adapt_otaepg, adapt_init, adapt_idle, adapt_satconf, adapt_satconf_key, adapt_satconf_val)
else:
adapt_network = find_param(adapt_load, 'networks')
adapt_network_uuid_list = find_list(adapt_load, 'networks', 'value')
if adapt_network == []:
adapt_network = ""
else:
adapt_network_name = []
for net_u in adapt_network_uuid_list:
adapt_network_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/load?uuid=' + str(net_u)
adapt_network_load = requests.get(adapt_network_url).json()
adapt_network_name.append(adapt_network_load['entries'][0]['text'])
adapt_network = ' & '.join(str(s) for s in adapt_network_name)
adapt_info_list = ["Name: " + str(adapt_name), "Enabled: " + str(adapt_enabled), "Networks: " + str(adapt_network), "Priority: " + str(adapt_priority), "Enable OTA EPG scanning: " + str(adapt_otaepg), "Allow initial scanning on startup: " + str(adapt_init), "Allow idle scanning: " + str(adapt_idle)]
adapt_param_edit(adapter_uuid_sel, adapt_info_list, adapt_enabled, adapt_name, adapt_network, adapt_network_uuid_list, adapt_priority, adapt_otaepg, adapt_init, adapt_idle)
def lnb_param_load(adapter_uuid_sel):
lnb_find_url = 'http://' + tvh_url + ':' + tvh_port + '/api/hardware/tree?uuid=' + adapter_uuid_sel
lnb_find_load = requests.get(lnb_find_url).json()
lnb_uuid = lnb_find_load[0]['uuid']
lnb_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/load?uuid=' + lnb_uuid
lnb_load = requests.get(lnb_url).json()
lnb_class = lnb_load['entries'][0]['class']
lnb_name = lnb_load['entries'][0]['text']
lnb_early_tune = find_param(lnb_load, 'early_tune')
lnb_diseqc_repeats = find_param(lnb_load, 'diseqc_repeats')
lnb_diseqc_full = find_param(lnb_load, 'diseqc_full')
lnb_poweroff = find_param(lnb_load, 'lnb_poweroff')
if lnb_class == 'linuxdvb_satconf_lnbonly':
lnb_network = find_param(lnb_load, 'networks')
lnb_network_uuid_list = find_list(lnb_load, 'networks', 'value')
if lnb_network == []:
lnb_network = ""
else:
lnb_network_name = []
for net_u in lnb_network_uuid_list:
lnb_network_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/load?uuid=' + str(net_u)
lnb_network_load = requests.get(lnb_network_url).json()
lnb_network_name.append(lnb_network_load['entries'][0]['text'])
lnb_network = ' & '.join(str(s) for s in lnb_network_name)
lnb_info_list = ["Name: " + str(lnb_name), "Tune before DiseqC: " + str(lnb_early_tune), "DiseqC repeats: " + str(lnb_diseqc_repeats), "Full DiseqC: " + str(lnb_diseqc_full), "Turn off LNB when idle: " + str(lnb_poweroff), "Networks: " + str(lnb_network)]
lnb_only_param_edit(lnb_uuid, lnb_info_list, lnb_name, lnb_early_tune, lnb_diseqc_repeats, lnb_diseqc_full, lnb_poweroff, lnb_network, lnb_network_uuid_list, adapter_uuid_sel)
if lnb_class == 'linuxdvb_satconf_2port':
lnb_network_a = find_param(lnb_load, 'network_a')
lnb_network_a_uuid_list = find_list(lnb_load, 'network_a', 'value')
if lnb_network_a == []:
lnb_network_a = ""
else:
lnb_network_a_name = []
for net_u in lnb_network_a_uuid_list:
lnb_network_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/load?uuid=' + str(net_u)
lnb_network_load = requests.get(lnb_network_url).json()
lnb_network_a_name.append(lnb_network_load['entries'][0]['text'])
lnb_network_a = ' & '.join(str(s) for s in lnb_network_a_name)
lnb_network_b = find_param(lnb_load, 'network_b')
lnb_network_b_uuid_list = find_list(lnb_load, 'network_b', 'value')
if lnb_network_b == []:
lnb_network_b = ""
else:
lnb_network_b_name = []
for net_u in lnb_network_b_uuid_list:
lnb_network_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/load?uuid=' + str(net_u)
lnb_network_load = requests.get(lnb_network_url).json()
lnb_network_b_name.append(lnb_network_load['entries'][0]['text'])
lnb_network_b = ' & '.join(str(s) for s in lnb_network_b_name)
lnb_info_list = ["Name: " + str(lnb_name), "Tune before DiseqC: " + str(lnb_early_tune), "DiseqC repeats: " + str(lnb_diseqc_repeats), "Full DiseqC: " + str(lnb_diseqc_full), "Turn off LNB when idle: " + str(lnb_poweroff), "Network A: " + str(lnb_network_a), "Network B: " + str(lnb_network_b)]
lnb_2port_param_edit(lnb_uuid, lnb_info_list, lnb_name, lnb_early_tune, lnb_diseqc_repeats, lnb_diseqc_full, lnb_poweroff, lnb_network_a, lnb_network_a_uuid_list, lnb_network_b, lnb_network_b_uuid_list, adapter_uuid_sel)
if lnb_class == 'linuxdvb_satconf_4port':
lnb_network_aa = find_param(lnb_load, 'network_aa')
lnb_network_aa_uuid_list = find_list(lnb_load, 'network_aa', 'value')
if lnb_network_aa == []:
lnb_network_aa = ""
else:
lnb_network_aa_name = []
for net_u in lnb_network_aa_uuid_list:
lnb_network_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/load?uuid=' + str(net_u)
lnb_network_load = requests.get(lnb_network_url).json()
lnb_network_aa_name.append(lnb_network_load['entries'][0]['text'])
lnb_network_aa = ' & '.join(str(s) for s in lnb_network_aa_name)
lnb_network_ab = find_param(lnb_load, 'network_ab')
lnb_network_ab_uuid_list = find_list(lnb_load, 'network_ab', 'value')
if lnb_network_ab == []:
lnb_network_ab = ""
else:
lnb_network_ab_name = []
for net_u in lnb_network_ab_uuid_list:
lnb_network_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/load?uuid=' + str(net_u)
lnb_network_load = requests.get(lnb_network_url).json()
lnb_network_ab_name.append(lnb_network_load['entries'][0]['text'])
lnb_network_ab = ' & '.join(str(s) for s in lnb_network_ab_name)
lnb_network_ba = find_param(lnb_load, 'network_ba')
lnb_network_ba_uuid_list = find_list(lnb_load, 'network_ba', 'value')
if lnb_network_ba == []:
lnb_network_ba = ""
else:
lnb_network_ba_name = []
for net_u in lnb_network_ba_uuid_list:
lnb_network_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/load?uuid=' + str(net_u)
lnb_network_load = requests.get(lnb_network_url).json()
lnb_network_ba_name.append(lnb_network_load['entries'][0]['text'])
lnb_network_ba = ' & '.join(str(s) for s in lnb_network_ba_name)
lnb_network_bb = find_param(lnb_load, 'network_bb')
lnb_network_bb_uuid_list = find_list(lnb_load, 'network_bb', 'value')
if lnb_network_bb == []:
lnb_network_bb = ""
else:
lnb_network_bb_name = []
for net_u in lnb_network_bb_uuid_list:
lnb_network_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/load?uuid=' + str(net_u)
lnb_network_load = requests.get(lnb_network_url).json()
lnb_network_bb_name.append(lnb_network_load['entries'][0]['text'])
lnb_network_bb = ' & '.join(str(s) for s in lnb_network_bb_name)
lnb_info_list = ["Name: " + str(lnb_name), "Tune before DiseqC: " + str(lnb_early_tune), "DiseqC repeats: " + str(lnb_diseqc_repeats), "Full DiseqC: " + str(lnb_diseqc_full), "Turn off LNB when idle: " + str(lnb_poweroff), "Network AA: " + str(lnb_network_aa), "Network AB: " + str(lnb_network_ab), "Network BA: " + str(lnb_network_ba), "Network BB: " + str(lnb_network_bb)]
lnb_4port_param_edit(lnb_uuid, lnb_info_list, lnb_name, lnb_early_tune, lnb_diseqc_repeats, lnb_diseqc_full, lnb_poweroff, lnb_network_aa, lnb_network_aa_uuid_list, lnb_network_ab, lnb_network_ab_uuid_list, lnb_network_ba, lnb_network_ba_uuid_list, lnb_network_bb, lnb_network_bb_uuid_list, adapter_uuid_sel)
def lnb_only_param_edit(lnb_uuid, lnb_info_list, lnb_name, lnb_early_tune, lnb_diseqc_repeats, lnb_diseqc_full, lnb_poweroff, lnb_network, lnb_network_uuid_list, adapter_uuid_sel):
sel_param = dialog.select('LNB Configuration - Select parameter to edit', list=lnb_info_list)
if sel_param < 0:
adapt_param_load(adapter_uuid_sel)
if sel_param >= 0:
truefalse = ['true', 'false']
enabledisable = ['Enabled', 'Disabled']
param_update = ""
if sel_param == 1:
sel_lnb_early_tune = dialog.select('Enable or disable tune before DiseqC', list=enabledisable)
if sel_lnb_early_tune >= 0:
lnb_early_tune_sel = truefalse[sel_lnb_early_tune]
param_update = '"early_tune":' + lnb_early_tune_sel
if sel_param == 2:
sel_lnb_diseqc_repeats = dialog.input('Select the number of repeats for the DiseqC commands', defaultt=str(lnb_diseqc_repeats),type=xbmcgui.INPUT_NUMERIC)
if sel_lnb_diseqc_repeats == "":
sel_lnb_diseqc_repeats = lnb_diseqc_repeats
param_update = '"diseqc_repeat":"' + str(sel_lnb_diseqc_repeats) + '"'
if sel_param == 3:
sel_lnb_diseqc_full = dialog.select('Enable or disable to always send the whole DiseqC sequence', list=enabledisable)
if sel_lnb_diseqc_full >= 0:
lnb_diseqc_full_sel = truefalse[sel_lnb_diseqc_full]
param_update = '"diseqc_full":' + lnb_diseqc_full_sel
if sel_param == 4:
sel_lnb_poweroff = dialog.select('Enable or disable turn off LNB when idle', list=enabledisable)
if sel_lnb_poweroff >= 0:
lnb_poweroff_sel = truefalse[sel_lnb_poweroff]
param_update = '"lnb_poweroff":' + lnb_poweroff_sel
if sel_param == 5:
networks_url = 'http://' + tvh_url + ':' + tvh_port + '/api/mpegts/input/network_list?uuid=' + adapter_uuid_sel
networks = requests.get(networks_url).json()
net_uuid = []
if networks['entries'] == []:
if dialog.yesno("No Networks found!", "", "Would you like to setup a new Network?"):
net_uuid_sel = network_new()
param_update = '"networks":["' + net_uuid_sel + '"]'
else:
net_key = []
net_val = []
net_dict = networks['entries']
for net_k in net_dict:
net_key.append(net_k['key'])
for net_v in net_dict:
net_val.append(net_v['val'])
net_preselect = [i for i, item in enumerate(net_key) if item in set(lnb_network_uuid_list)]
sel_network = dialog.multiselect('Select which networks to assign to this adapter', options=net_val, preselect=net_preselect)
if sel_network == [] or sel_network == None:
lnb_param_load(adapter_uuid_sel)
else:
for sel in sel_network:
net_uuid.append(net_key[sel])
net_uuid_sel = '", "'.join(str(s) for s in net_uuid)
param_update = '"networks":["' + net_uuid_sel + '"]'
if param_update != "":
param_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/save?node={' + param_update + ',"uuid":"' + lnb_uuid + '"}'
param_save = requests.get(param_url)
lnb_param_load(adapter_uuid_sel)
def lnb_2port_param_edit(lnb_uuid, lnb_info_list, lnb_name, lnb_early_tune, lnb_diseqc_repeats, lnb_diseqc_full, lnb_poweroff, lnb_network_a, lnb_network_a_uuid_list, lnb_network_b, lnb_network_b_uuid_list, adapter_uuid_sel):
sel_param = dialog.select('LNB Configuration - Select parameter to edit', list=lnb_info_list)
if sel_param < 0:
adapt_param_load(adapter_uuid_sel)
if sel_param >= 0:
truefalse = ['true', 'false']
enabledisable = ['Enabled', 'Disabled']
param_update = ""
if sel_param == 1:
sel_lnb_early_tune = dialog.select('Enable or disable tune before DiseqC', list=enabledisable)
if sel_lnb_early_tune >= 0:
lnb_early_tune_sel = truefalse[sel_lnb_early_tune]
param_update = '"early_tune":' + lnb_early_tune_sel
if sel_param == 2:
sel_lnb_diseqc_repeats = dialog.input('Select the number of repeats for the DiseqC commands', defaultt=str(lnb_diseqc_repeats),type=xbmcgui.INPUT_NUMERIC)
if sel_lnb_diseqc_repeats == "":
sel_lnb_diseqc_repeats = lnb_diseqc_repeats
param_update = '"diseqc_repeat":"' + str(sel_lnb_diseqc_repeats) + '"'
if sel_param == 3:
sel_lnb_diseqc_full = dialog.select('Enable or disable to always send the whole DiseqC sequence', list=enabledisable)
if sel_lnb_diseqc_full >= 0:
lnb_diseqc_full_sel = truefalse[sel_lnb_diseqc_full]
param_update = '"diseqc_full":' + lnb_diseqc_full_sel
if sel_param == 4:
sel_lnb_poweroff = dialog.select('Enable or disable turn off LNB when idle', list=enabledisable)
if sel_lnb_poweroff >= 0:
lnb_poweroff_sel = truefalse[sel_lnb_poweroff]
param_update = '"lnb_poweroff":' + lnb_poweroff_sel
if sel_param >= 5:
networks_url = 'http://' + tvh_url + ':' + tvh_port + '/api/mpegts/input/network_list?uuid=' + adapter_uuid_sel
networks = requests.get(networks_url).json()
net_uuid = []
if networks['entries'] == []:
if dialog.yesno("No Networks found!", "", "Would you like to setup a new Network?"):
net_uuid_sel = network_new()
param_update = '"networks":["' + net_uuid_sel + '"]'
else:
net_key = []
net_val = []
net_dict = networks['entries']
for net_k in net_dict:
net_key.append(net_k['key'])
for net_v in net_dict:
net_val.append(net_v['val'])
if sel_param == 5:
net_preselect = [i for i, item in enumerate(net_key) if item in set(lnb_network_a_uuid_list)]
if sel_param == 6:
net_preselect = [i for i, item in enumerate(net_key) if item in set(lnb_network_b_uuid_list)]
sel_network = dialog.multiselect('Select which networks to assign to this adapter', options=net_val, preselect=net_preselect)
if sel_network == [] or sel_network == None:
lnb_param_load(adapter_uuid_sel)
else:
for sel in sel_network:
net_uuid.append(net_key[sel])
net_uuid_sel = '", "'.join(str(s) for s in net_uuid)
if sel_param == 5:
lnb_networks = 'network_a'
if sel_param == 6:
lnb_networks = 'network_b'
param_update = '"' + lnb_networks + '":["' + net_uuid_sel + '"]'
if param_update != "":
param_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/save?node={' + param_update + ',"uuid":"' + lnb_uuid + '"}'
param_save = requests.get(param_url)
lnb_param_load(adapter_uuid_sel)
def lnb_4port_param_edit(lnb_uuid, lnb_info_list, lnb_name, lnb_early_tune, lnb_diseqc_repeats, lnb_diseqc_full, lnb_poweroff, lnb_network_aa, lnb_network_aa_uuid_list, lnb_network_ab, lnb_network_ab_uuid_list, lnb_network_ba, lnb_network_ba_uuid_list, lnb_network_bb, lnb_network_bb_uuid_list, adapter_uuid_sel):
sel_param = dialog.select('LNB Configuration - Select parameter to edit', list=lnb_info_list)
if sel_param < 0:
adapt_param_load(adapter_uuid_sel)
if sel_param >= 0:
truefalse = ['true', 'false']
enabledisable = ['Enabled', 'Disabled']
param_update = ""
if sel_param == 1:
sel_lnb_early_tune = dialog.select('Enable or disable tune before DiseqC', list=enabledisable)
if sel_lnb_early_tune >= 0:
lnb_early_tune_sel = truefalse[sel_lnb_early_tune]
param_update = '"early_tune":' + lnb_early_tune_sel
if sel_param == 2:
sel_lnb_diseqc_repeats = dialog.input('Select the number of repeats for the DiseqC commands', defaultt=str(lnb_diseqc_repeats),type=xbmcgui.INPUT_NUMERIC)
if sel_lnb_diseqc_repeats == "":
sel_lnb_diseqc_repeats = lnb_diseqc_repeats
param_update = '"diseqc_repeat":"' + str(sel_lnb_diseqc_repeats) + '"'
if sel_param == 3:
sel_lnb_diseqc_full = dialog.select('Enable or disable to always send the whole DiseqC sequence', list=enabledisable)
if sel_lnb_diseqc_full >= 0:
lnb_diseqc_full_sel = truefalse[sel_lnb_diseqc_full]
param_update = '"diseqc_full":' + lnb_diseqc_full_sel
if sel_param == 4:
sel_lnb_poweroff = dialog.select('Enable or disable turn off LNB when idle', list=enabledisable)
if sel_lnb_poweroff >= 0:
lnb_poweroff_sel = truefalse[sel_lnb_poweroff]
param_update = '"lnb_poweroff":' + lnb_poweroff_sel
if sel_param >= 5:
networks_url = 'http://' + tvh_url + ':' + tvh_port + '/api/mpegts/input/network_list?uuid=' + adapter_uuid_sel
networks = requests.get(networks_url).json()
net_uuid = []
if networks['entries'] == []:
if dialog.yesno("No Networks found!", "", "Would you like to setup a new Network?"):
net_uuid_sel = network_new()
param_update = '"networks":["' + net_uuid_sel + '"]'
else:
net_key = []
net_val = []
net_dict = networks['entries']
for net_k in net_dict:
net_key.append(net_k['key'])
for net_v in net_dict:
net_val.append(net_v['val'])
if sel_param == 5:
net_preselect = [i for i, item in enumerate(net_key) if item in set(lnb_network_aa_uuid_list)]
if sel_param == 6:
net_preselect = [i for i, item in enumerate(net_key) if item in set(lnb_network_ab_uuid_list)]
if sel_param == 7:
net_preselect = [i for i, item in enumerate(net_key) if item in set(lnb_network_ba_uuid_list)]
if sel_param == 8:
net_preselect = [i for i, item in enumerate(net_key) if item in set(lnb_network_bb_uuid_list)]
sel_network = dialog.multiselect('Select which networks to assign to this adapter', options=net_val, preselect=net_preselect)
if sel_network == [] or sel_network == None:
lnb_param_load(adapter_uuid_sel)
else:
for sel in sel_network:
net_uuid.append(net_key[sel])
net_uuid_sel = '", "'.join(str(s) for s in net_uuid)
if sel_param == 5:
lnb_networks = 'network_aa'
if sel_param == 6:
lnb_networks = 'network_ab'
if sel_param == 7:
lnb_networks = 'network_ba'
if sel_param == 8:
lnb_networks = 'network_bb'
param_update = '"' + lnb_networks + '":["' + net_uuid_sel + '"]'
if param_update != "":
param_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/save?node={' + param_update + ',"uuid":"' + lnb_uuid + '"}'
param_save = requests.get(param_url)
lnb_param_load(adapter_uuid_sel)
def adapt_param_dvbsedit(adapter_uuid_sel, adapt_info_list, adapt_enabled, adapt_name, adapt_priority, adapt_otaepg, adapt_init, adapt_idle, adapt_satconf, adapt_satconf_key, adapt_satconf_val):
sel_param = dialog.select('Adapters Configuration - Select parameter to edit', list=adapt_info_list)
if sel_param < 0:
adapters()
if sel_param >= 0:
truefalse = ['true', 'false']
enabledisable = ['Enabled', 'Disabled']
param_update = ""
if sel_param == 0:
sel_adapt_name = dialog.input('Edit the adapter name', defaultt=adapt_name,type=xbmcgui.INPUT_ALPHANUM)
if sel_adapt_name == "":
sel_adapt_name = adapt_name
param_update = '"displayname":"' + sel_adapt_name + '"'
if sel_param == 1:
sel_adapt_enabled = dialog.select('Enable or disable the adapter', list=enabledisable)
if sel_adapt_enabled >= 0:
adapt_enabled = truefalse[sel_adapt_enabled]
param_update = '"enabled":' + adapt_enabled
if sel_param == 2:
sel_adapt_satconf = dialog.select('Select the satellite configuration to use', list=adapt_satconf_val)
if sel_adapt_satconf >= 0:
adapt_satconf_sel = adapt_satconf_key[sel_adapt_satconf]
param_update = '"satconf":"' + str(adapt_satconf_sel) + '"'
if sel_param == 3:
lnb_param_load(adapter_uuid_sel)
if sel_param == 4:
sel_adapt_priority = dialog.input('Edit the adapter priority (higher used first)', defaultt=str(adapt_priority),type=xbmcgui.INPUT_NUMERIC)
if sel_adapt_priority == "":
sel_adapt_priority = adapt_priority
param_update = '"priority":"' + str(sel_adapt_priority) + '"'
if sel_param == 5:
sel_adapt_otaepg = dialog.select('Enable or disable OTA EPG scanning', list=enabledisable)
if sel_adapt_otaepg >= 0:
adapt_otaepg = truefalse[sel_adapt_otaepg]
param_update = '"ota_epg":' + adapt_otaepg
if sel_param == 6:
sel_adapt_init = dialog.select('Enable or disable initial startup scanning', list=enabledisable)
if sel_adapt_init >= 0:
adapt_init = truefalse[sel_adapt_init]
param_update = '"initscan":' + adapt_init
if sel_param == 7:
sel_adapt_idle = dialog.select('Enable or disable idle scanning', list=enabledisable)
if sel_adapt_idle >= 0:
adapt_idle = truefalse[sel_adapt_idle]
param_update = '"idlescan":' + adapt_idle
if param_update != "":
param_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/save?node={' + param_update + ',"uuid":"' + adapter_uuid_sel + '"}'
param_save = requests.get(param_url)
adapt_param_load(adapter_uuid_sel)
def adapt_param_edit(adapter_uuid_sel, adapt_info_list, adapt_enabled, adapt_name, adapt_network, adapt_network_uuid_list, adapt_priority, adapt_otaepg, adapt_init, adapt_idle):
sel_param = dialog.select('Adapters Configuration - Select parameter to edit', list=adapt_info_list)
if sel_param < 0:
adapters()
if sel_param >= 0:
truefalse = ['true', 'false']
enabledisable = ['Enabled', 'Disabled']
param_update = ""
if sel_param == 0:
sel_adapt_name = dialog.input('Edit the adapter name', defaultt=adapt_name,type=xbmcgui.INPUT_ALPHANUM)
if sel_adapt_name == "":
sel_adapt_name = adapt_name
param_update = '"displayname":"' + sel_adapt_name + '"'
if sel_param == 1:
sel_adapt_enabled = dialog.select('Enable or disable the adapter', list=enabledisable)
if sel_adapt_enabled >= 0:
adapt_enabled = truefalse[sel_adapt_enabled]
param_update = '"enabled":' + adapt_enabled
if sel_param == 2:
networks_url = 'http://' + tvh_url + ':' + tvh_port + '/api/mpegts/input/network_list?uuid=' + adapter_uuid_sel
networks = requests.get(networks_url).json()
net_uuid = []
if networks['entries'] == []:
if dialog.yesno("No Networks found!", "", "Would you like to setup a new Network?"):
net_uuid_sel = network_new()
param_update = '"networks":["' + net_uuid_sel + '"]'
else:
net_key = []
net_val = []
net_dict = networks['entries']
for net_k in net_dict:
net_key.append(net_k['key'])
for net_v in net_dict:
net_val.append(net_v['val'])
net_preselect = [i for i, item in enumerate(net_key) if item in set(adapt_network_uuid_list)]
sel_network = dialog.multiselect('Select which networks to assign to this adapter', options=net_val, preselect=net_preselect)
if sel_network == [] or sel_network == None:
adapt_param_load(adapter_uuid_sel)
else:
for sel in sel_network:
net_uuid.append(net_key[sel])
net_uuid_sel = '", "'.join(str(s) for s in net_uuid)
param_update = '"networks":["' + net_uuid_sel + '"]'
if sel_param == 3:
sel_adapt_priority = dialog.input('Edit the adapter priority (higher used first)', defaultt=str(adapt_priority),type=xbmcgui.INPUT_NUMERIC)
if sel_adapt_priority == "":
sel_adapt_priority = adapt_priority
param_update = '"priority":"' + str(sel_adapt_priority) + '"'
if sel_param == 4:
sel_adapt_otaepg = dialog.select('Enable or disable OTA EPG scanning', list=enabledisable)
if sel_adapt_otaepg >= 0:
adapt_otaepg = truefalse[sel_adapt_otaepg]
param_update = '"ota_epg":' + adapt_otaepg
if sel_param == 5:
sel_adapt_init = dialog.select('Enable or disable initial startup scanning', list=enabledisable)
if sel_adapt_init >= 0:
adapt_init = truefalse[sel_adapt_init]
param_update = '"initscan":' + adapt_init
if sel_param == 6:
sel_adapt_idle = dialog.select('Enable or disable idle scanning', list=enabledisable)
if sel_adapt_idle >= 0:
adapt_idle = truefalse[sel_adapt_idle]
param_update = '"idlescan":' + adapt_idle
if param_update != "":
param_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/save?node={' + param_update + ',"uuid":"' + adapter_uuid_sel + '"}'
param_save = requests.get(param_url)
adapt_param_load(adapter_uuid_sel)
def cas_param_load(cas_uuid_sel):
cas_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/load?uuid=' + str(cas_uuid_sel)
cas_load = requests.get(cas_url).json()
cas_param_list = ['DELETE CONDITIONAL ACCESS CLIENT']
cas_param_list_id = ['']
for param in cas_load['entries'][0]['params']:
cas_label = param['caption']
cas_id = param['id']
cas_param_enum = find_param_item(cas_load, cas_id, 'enum')
cas_type = param['type']
if cas_param_enum == "NO PARAMATER FOUND" or cas_param_enum == '':
try:
if cas_type == 'u16' or cas_type == 'u32':
cas_value = hex(param['value'])
else:
cas_value = param['value']
except:
cas_value = ''
else:
cas_value, cas_k, cas_v = find_param_dict(cas_load, cas_id, 'enum')
try:
cas_hidden = param['hidden']
except:
cas_hidden = 'false'
if cas_hidden == 'false':
param_list_add = cas_label + ': ' + str(cas_value)
cas_param_list.append(param_list_add)
cas_param_list_id.append(cas_id)
sel_param_title = cas_label + ' Client Configuration - Select parameter to edit'
sel_param = dialog.select(sel_param_title, list=cas_param_list)
if sel_param == 0:
confirm_del = dialog.yesno('Confirm delete CA client', 'Are you sure want to delete the ' + cas_label + ' client?')
if not confirm_del:
cas_param_load(cas_uuid_sel)
else:
delete_cas_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/delete?uuid=["' + cas_uuid_sel +'"]'
delete_cas = requests.get(delete_cas_url)
cas()
if sel_param > 0:
cas_param_sel = cas_param_list_id[sel_param]
cas_param_type = find_param_item(cas_load, cas_param_sel, 'type')
cas_param_desc = find_param_item(cas_load, cas_param_sel, 'description')
cas_param_value = find_param_item(cas_load, cas_param_sel, 'value')
if cas_param_type == 'bool':
sel_param_edit = dialog.select(cas_param_desc, list=enabledisable)
if sel_param_edit >= 0:
param_edit_sel = truefalse[sel_param_edit]
param_update = '"' + cas_param_sel + '":' + param_edit_sel
if cas_param_type == 'int':
cas_param_enum = find_param_item(cas_load, cas_param_sel, 'enum')
if cas_param_enum == '':
sel_param_edit = dialog.input(cas_param_desc, defaultt=str(cas_param_value),type=xbmcgui.INPUT_NUMERIC)
if sel_param_edit >= 0:
param_update = '"' + cas_param_sel + '":"' + str(sel_param_edit) + '"'
else:
cas_param_value, cas_param_enum_key, cas_param_enum_value = find_param_dict(cas_load, cas_param_sel, 'enum')
sel_param_edit = dialog.select(cas_param_desc, list=cas_param_enum_value)
if sel_param_edit <0:
return
if sel_param_edit >= 0:
param_edit_sel = cas_param_enum_key[sel_param_edit]
param_update = '"' + cas_param_sel + '":"' + str(param_edit_sel) + '"'
if cas_param_type == 'str':
sel_param_edit = dialog.input(cas_param_desc, defaultt=str(cas_param_value),type=xbmcgui.INPUT_ALPHANUM)
if sel_param_edit == '':
param_update = ""
else:
param_update = '"' + cas_param_sel + '":"' + sel_param_edit + '"'
if cas_param_type == 'u16' or cas_param_type == 'u32':
sel_param_edit = dialog.input(cas_param_desc, defaultt=hex(cas_param_value),type=xbmcgui.INPUT_ALPHANUM)
if sel_param_edit == '':
param_update = ""
else:
sel_param_edit_hex = int(sel_param_edit, 0)
param_update = '"' + cas_param_sel + '":"' + str(sel_param_edit_hex) + '"'
if param_update != "":
param_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/save?node={' + param_update + ',"uuid":"' + cas_uuid_sel + '"}'
param_save = requests.get(param_url)
cas_param_load(cas_uuid_sel)
def cas_new():
cas_new_url = 'http://' + tvh_url + ':' + tvh_port + '/api/caclient/builders'
cas_new_load = requests.get(cas_new_url).json()
cas_new_list = []
for cas in cas_new_load['entries']:
cas_new_list.append(cas['caption'])
sel_cas_new = dialog.select('Select a conditional access client type', list=cas_new_list)
if sel_cas_new >= 0:
cas_conf_dict = {}
cas_new_class = cas_new_load['entries'][sel_cas_new]['class']
for prop in cas_new_load['entries'][sel_cas_new]['props']:
cas_new_id = prop['id']
if cas_new_id != 'name':
cas_new_default = prop['default']
try:
cas_new_hidden = prop['hidden']
except:
cas_new_hidden = 'false'
if cas_new_hidden == 'false':
cas_conf_dict[cas_new_id] = cas_new_default
cas_new_name = dialog.input('Name of the CA Client', type=xbmcgui.INPUT_ALPHANUM)
if cas_new_name != '':
cas_conf_dict['name'] = cas_new_name
else:
dialog.ok('Client Name Required!', 'You must enter a client name to create a new conditional access client.')
cas_new()
cas_conf = json.dumps(cas_conf_dict)
cas_create_url = 'http://' + tvh_url + ':' + tvh_port + '/api/caclient/create?class=' + cas_new_class + '&conf=' + cas_conf
cas_create_load = requests.get(cas_create_url).json()
cas_new_uuid = cas_create_load['uuid']
return (cas_new_uuid)
def network_new():
net_type_name = ["ATSC-T","ATSC-C","DVB-S","DVB-C","DVB-T","IPTV Automatic","IPTV Network","ISDB-S","ISDB-C","ISDB-T"]
net_type_class = ["dvb_network_atsc_t","dvb_network_atsc_c","dvb_network_dvbs","dvb_network_dvbc","dvb_network_dvbt","iptv_auto_network","iptv_network","dvb_network_isdb_s","dvb_network_isdb_c","dvb_network_isdb_t"]
sel_net_type = dialog.select('Select a network type to create', list=net_type_name)
if sel_net_type < 0:
net_uuid_sel = ""
return net_uuid_sel
if sel_net_type >= 0 and sel_net_type <= 4:
net_type = net_type_name[sel_net_type]
net_class = net_type_class[sel_net_type]
new_net_name = dialog.input('Name of the network', defaultt=net_type,type=xbmcgui.INPUT_ALPHANUM)
if new_net_name == "":
new_net_name = net_type
dvb_list_url = 'http://' + tvh_url + ':' + tvh_port + '/api/dvb/scanfile/list?type=' + net_type.lower()
dvb_list = requests.get(dvb_list_url).json()
scan_key = []
scan_val = []
for scan_k in dvb_list['entries']:
scan_key.append(scan_k['key'])
for scan_v in dvb_list['entries']:
scan_val.append(scan_v['val'])
sel_scan = dialog.select('Select a pre-defined mux list for the ' + new_net_name + " network", list=scan_val)
scan_val_sel = scan_key[sel_scan]
net_create_url = 'http://' + tvh_url + ':' + tvh_port + '/api/mpegts/network/create?class=' + net_class + '&conf={"networkname":"' + new_net_name + '","bouquet":false,"scanfile":"' + scan_val_sel + '","pnetworkname":"","nid":0,"autodiscovery":1,"ignore_chnum":false,"satip_source":0,"charset":""}'
net_create = requests.get(net_create_url).json()
net_uuid_sel = net_create['uuid']
return net_uuid_sel
if sel_net_type == 5:
net_type = net_type_name[sel_net_type]
net_class = net_type_class[sel_net_type]
new_net_name = dialog.input('Name of the network', defaultt=net_type,type=xbmcgui.INPUT_ALPHANUM)
if new_net_name == "":
new_net_name = net_type
new_net_url = dialog.input('URL of the network', defaultt="http://",type=xbmcgui.INPUT_ALPHANUM)
new_net_channel_number = dialog.input('Start Channel Numbers From', defaultt="",type=xbmcgui.INPUT_NUMERIC)
net_create_url = 'http://' + tvh_url + ':' + tvh_port + '/api/mpegts/network/create?class=' + net_class + '&conf={"networkname":"' + new_net_name + '","bouquet":false,"url":"' + new_net_url + '","channel_number":"' + str(new_net_channel_number) + '"}'
net_create = requests.get(net_create_url).json()
net_uuid_sel = net_create['uuid']
return net_uuid_sel
if sel_net_type == 6:
net_type = net_type_name[sel_net_type]
net_class = net_type_class[sel_net_type]
new_net_name = dialog.input('Name of the network', defaultt=net_type,type=xbmcgui.INPUT_ALPHANUM)
if new_net_name == "":
new_net_name = net_type
net_create_url = 'http://' + tvh_url + ':' + tvh_port + '/api/mpegts/network/create?class=' + net_class + '&conf={"networkname":"' + new_net_name + '","bouquet":false}'
net_create = requests.get(net_create_url).json()
net_uuid_sel = net_create['uuid']
return net_uuid_sel
if sel_net_type >= 7:
dialog.ok("Network Not Supported!", "ISDB Networks are currently not supported in this addon.", "Please use the Tvheadend web interface to configure ISDB Networks.")
net_uuid_sel = ""
return net_uuid_sel
def net_param_load(net_uuid_sel):
net_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/load?uuid=' + str(net_uuid_sel)
net_load = requests.get(net_url).json()
net_name = find_param(net_load, 'networkname')
net_bouquet = find_param(net_load, 'bouquet')
net_class = net_load['entries'][0]['class']
net_type = re.sub("dvb_network_","",net_class)
net_num_mux = find_param(net_load, 'num_mux')
net_num_svc = find_param(net_load, 'num_svc')
net_num_ch = find_param(net_load, 'num_chn')
netiptv_max_streams = find_param(net_load, 'max_streams')
netiptv_max_bandwidth = find_param(net_load, 'max_bandwidth')
netiptv_url = find_param(net_load, 'url')
netiptv_channel_number = find_param(net_load, 'channel_number')
netiptv_tsid_zero = find_param(net_load, 'tsid_zero')
net_discovery = find_param(net_load, 'autodiscovery')
net_discovery_list = ['Disable', 'New muxes only', 'New muxes + changed muxes']
net_orbital = find_param(net_load, 'orbital_pos')
if net_class == 'dvb_network_dvbs':
net_orbital_url = 'http://' + tvh_url + ':' + tvh_port + '/api/dvb/orbitalpos/list'
net_orbital_load = requests.get(net_orbital_url).json()
net_orbital_fulllist = net_orbital_load['entries']
net_orbital_dict = {}
net_orbital_list = []
for item in net_orbital_fulllist:
short = item['key']
net_orbital_dict[short] = item['val']
net_orbital_list.append(item['val'])
net_orbital_long = net_orbital_dict.get(net_orbital)
if net_num_svc == 0 and net_num_mux == 0:
net_num_svc_disp = "0 - add muxes before scanning for services"
elif net_num_mux != 0 and net_num_svc == 0:
net_num_svc_disp = "0 - select to scan muxes for services"
else:
net_num_svc_disp = net_num_svc
if net_num_mux == 0:
if net_class != "iptv_auto_network" and net_class != "iptv_network":
net_num_mux_disp = "0 - select from list of pre-defined muxes"
if net_class == "iptv_auto_network" or net_class == "iptv_network":
net_num_mux_disp = "0 - select to add muxes"
else:
net_num_mux_disp = net_num_mux
if net_class == "iptv_auto_network":
net_info_list = ["Name: " + net_name, "Create bouquet: " + str(net_bouquet), "URL: " + netiptv_url, "Max number of input streams: " + str(netiptv_max_streams), "Max bandwidth (Kbps): " + str(netiptv_max_bandwidth), "Channel numbers from: " + str(netiptv_channel_number), "Accept zero value for TSID: " + str(netiptv_tsid_zero), "Number of channels: " + str(net_num_ch), "Number of muxes: " + str(net_num_mux_disp), "Number of services: " + str(net_num_svc_disp), "DELETE THE NETWORK"]
netiptvauto_param_edit(net_uuid_sel, net_info_list, net_name, net_bouquet, net_type, net_num_mux, net_num_svc, net_num_ch, netiptv_url, netiptv_max_streams, netiptv_max_bandwidth, netiptv_channel_number, netiptv_tsid_zero)
elif net_class == "iptv_network":
net_info_list = ["Name: " + net_name, "Create bouquet: " + str(net_bouquet), "Max number of input streams: " + str(netiptv_max_streams), "Max bandwidth (Kbps): " + str(netiptv_max_bandwidth), "Number of muxes: " + str(net_num_mux_disp), "Number of services: " + str(net_num_svc_disp), "Number of channels: " + str(net_num_ch), "DELETE THE NETWORK"]
netiptv_param_edit(net_uuid_sel, net_info_list, net_name, net_bouquet, net_type, net_num_mux, net_num_svc, net_num_ch, netiptv_max_streams, netiptv_max_bandwidth)
elif net_class == 'dvb_network_dvbs':
net_info_list = ["Name: " + net_name, "Create bouquet: " + str(net_bouquet), "Orbital position: " + str(net_orbital_long), "Network discovery: " + net_discovery_list[net_discovery], "Number of muxes: " + str(net_num_mux_disp), "Number of services: " + str(net_num_svc_disp), "Number of channels: " + str(net_num_ch), "DELETE THE NETWORK"]
netdvbs_param_edit(net_uuid_sel, net_info_list, net_name, net_bouquet, net_type, net_num_mux, net_num_svc, net_num_ch, net_discovery, net_discovery_list, net_orbital, net_orbital_list)
else:
net_info_list = ["Name: " + net_name, "Create bouquet: " + str(net_bouquet), "Network discovery: " + net_discovery_list[net_discovery], "Number of muxes: " + str(net_num_mux_disp), "Number of services: " + str(net_num_svc_disp), "Number of channels: " + str(net_num_ch), "DELETE THE NETWORK"]
net_param_edit(net_uuid_sel, net_info_list, net_name, net_bouquet, net_type, net_num_mux, net_num_svc, net_num_ch, net_discovery, net_discovery_list)
def netdvbs_param_edit(net_uuid_sel, net_info_list, net_name, net_bouquet, net_type, net_num_mux, net_num_svc, net_num_ch, net_discovery, net_discovery_list, net_orbital, net_orbital_list):
sel_param = dialog.select('Network Configuration - Select parameter to edit', list=net_info_list)
if sel_param < 0:
networks()
if sel_param >= 0:
param_update = ""
if sel_param == 0:
sel_net_name = dialog.input('Edit the network name', defaultt=net_name,type=xbmcgui.INPUT_ALPHANUM)
if sel_net_name == "":
sel_net_name = net_name
param_update = '"networkname":"' + sel_net_name + '"'
if sel_param == 1:
sel_net_bouquet = dialog.select('Enable or disable to automatically create a bouquet from all services', list=enabledisable)
if sel_net_bouquet >= 0:
net_bouquet_enabled = truefalse[sel_net_bouquet]
param_update = '"bouquet":' + net_bouquet_enabled
if sel_param == 2:
sel_net_orbital = dialog.select('Select the orbital position of the satellite for your dish', list=net_orbital_list)
if sel_net_orbital >= 0:
net_orbital_sel = net_orbital_list[sel_net_orbital]
net_orbital_sel_short = net_orbital_sel.split(' ')[0]
param_update = '"orbital_pos":"' + net_orbital_sel_short + '"'
if sel_param == 3:
sel_net_discovery = dialog.select('Select the type of network discovery for muxes', list=net_discovery_list)
if sel_net_discovery >= 0:
param_update = '"autodiscovery":' + str(sel_net_discovery)
if sel_param == 4 and net_num_mux != 0:
muxes_load(net_uuid_sel)
if sel_param == 4 and net_num_mux == 0:
dvb_list_url = 'http://' + tvh_url + ':' + tvh_port + '/api/dvb/scanfile/list?type=' + net_type
dvb_list = requests.get(dvb_list_url).json()
scan_key = []
scan_val = []
for scan_k in dvb_list['entries']:
scan_key.append(scan_k['key'])
for scan_v in dvb_list['entries']:
scan_val.append(scan_v['val'])
sel_scan = dialog.select('Select a pre-defined mux list for the ' + net_name + " network", list=scan_val)
scan_val_sel = scan_key[sel_scan]
param_update = '"scanfile":"' + scan_val_sel + '"'
if sel_param == 5 and net_num_mux != 0 and net_num_svc != 0:
if dialog.yesno(str(net_num_svc) + " services found!", "Would you like to scan muxes for new services?"):
start_scan(net_uuid_sel)
if sel_param == 5 and net_num_mux == 0:
dialog.ok("No services found!", "Add muxes before scanning for services.")
if sel_param == 5 and net_num_mux != 0 and net_num_svc == 0:
if dialog.yesno("No services found!", "Would you like to scan muxes for new services?"):
start_scan(net_uuid_sel)
if sel_param == 6 and net_num_svc != 0 and net_num_ch == 0:
if dialog.yesno(str(net_num_svc) + " services found!", "Would you like to map services to channels?"):
services()
if sel_param == 6 and net_num_svc != 0 and net_num_ch != 0:
channels()
if sel_param == 6 and net_num_svc == 0 and net_num_mux != 0:
if dialog.yesno("No services found!", "Would you like to scan muxes for new services?"):
start_scan(net_uuid_sel)
if sel_param == 6 and net_num_mux == 0:
dialog.ok("No muxes found!", "Add muxes before scanning for services and mapping channels.")
if sel_param == 7:
confirm_del = dialog.yesno('Confirm delete network', 'Are you sure want to delete the ' + net_name + ' network?')
if not confirm_del:
return
delete_net_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/delete?uuid=["' + net_uuid_sel +'"]'
delete_net = requests.get(delete_net_url)
networks()
if param_update != "":
param_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/save?node={' + param_update + ',"uuid":"' + net_uuid_sel + '"}'
param_save = requests.get(param_url)
net_param_load(net_uuid_sel)
def net_param_edit(net_uuid_sel, net_info_list, net_name, net_bouquet, net_type, net_num_mux, net_num_svc, net_num_ch, net_discovery, net_discovery_list):
sel_param = dialog.select('Network Configuration - Select parameter to edit', list=net_info_list)
if sel_param < 0:
networks()
if sel_param >= 0:
param_update = ""
if sel_param == 0:
sel_net_name = dialog.input('Edit the network name', defaultt=net_name,type=xbmcgui.INPUT_ALPHANUM)
if sel_net_name == "":
sel_net_name = net_name
param_update = '"networkname":"' + sel_net_name + '"'
if sel_param == 1:
sel_net_bouquet = dialog.select('Enable or disable to automatically create a bouquet from all services', list=enabledisable)
if sel_net_bouquet >= 0:
net_bouquet_enabled = truefalse[sel_net_bouquet]
param_update = '"bouquet":' + net_bouquet_enabled
if sel_param == 2:
sel_net_discovery = dialog.select('Select the type of network discovery for muxes', list=net_discovery_list)
if sel_net_discovery >= 0:
param_update = '"autodiscovery":' + str(sel_net_discovery)
if sel_param == 3 and net_num_mux != 0:
muxes_load(net_uuid_sel)
if sel_param == 3 and net_num_mux == 0:
dvb_list_url = 'http://' + tvh_url + ':' + tvh_port + '/api/dvb/scanfile/list?type=' + net_type
dvb_list = requests.get(dvb_list_url).json()
scan_key = []
scan_val = []
for scan_k in dvb_list['entries']:
scan_key.append(scan_k['key'])
for scan_v in dvb_list['entries']:
scan_val.append(scan_v['val'])
sel_scan = dialog.select('Select a pre-defined mux list for the ' + net_name + " network", list=scan_val)
scan_val_sel = scan_key[sel_scan]
param_update = '"scanfile":"' + scan_val_sel + '"'
if sel_param == 4 and net_num_mux != 0 and net_num_svc != 0:
if dialog.yesno(str(net_num_svc) + " services found!", "Would you like to scan muxes for new services?"):
start_scan(net_uuid_sel)
if sel_param == 4 and net_num_mux == 0:
dialog.ok("No services found!", "Add muxes before scanning for services.")
if sel_param == 4 and net_num_mux != 0 and net_num_svc == 0:
if dialog.yesno("No services found!", "Would you like to scan muxes for new services?"):
start_scan(net_uuid_sel)
if sel_param == 5 and net_num_svc != 0 and net_num_ch == 0:
if dialog.yesno(str(net_num_svc) + " services found!", "Would you like to map services to channels?"):
services()
if sel_param == 5 and net_num_svc != 0 and net_num_ch != 0:
channels()
if sel_param == 5 and net_num_svc == 0 and net_num_mux != 0:
if dialog.yesno("No services found!", "Would you like to scan muxes for new services?"):
start_scan(net_uuid_sel)
if sel_param == 5 and net_num_mux == 0:
dialog.ok("No muxes found!", "Add muxes before scanning for services and mapping channels.")
if sel_param == 6:
confirm_del = dialog.yesno('Confirm delete network', 'Are you sure want to delete the ' + net_name + ' network?')
if not confirm_del:
return
delete_net_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/delete?uuid=["' + net_uuid_sel +'"]'
delete_net = requests.get(delete_net_url)
networks()
if param_update != "":
param_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/save?node={' + param_update + ',"uuid":"' + net_uuid_sel + '"}'
param_save = requests.get(param_url)
net_param_load(net_uuid_sel)
def netiptvauto_param_edit(net_uuid_sel, net_info_list, net_name, net_bouquet, net_type, net_num_mux, net_num_svc, net_num_ch, netiptv_url, netiptv_max_streams, netiptv_max_bandwidth, netiptv_channel_number, netiptv_tsid_zero):
sel_param = dialog.select('Network Configuration - Select parameter to edit', list=net_info_list)
if sel_param < 0:
networks()
if sel_param >= 0:
param_update = ""
if sel_param == 0:
sel_net_name = dialog.input('Edit the network name', defaultt=net_name,type=xbmcgui.INPUT_ALPHANUM)
if sel_net_name == "":
sel_net_name = net_name
param_update = '"networkname":"' + sel_net_name + '"'
if sel_param == 1:
sel_net_bouquet = dialog.select('Enable or disable to automatically create a bouquet from all services', list=enabledisable)
if sel_net_bouquet >= 0:
net_bouquet_enabled = truefalse[sel_net_bouquet]
param_update = '"bouquet":' + net_bouquet_enabled
if sel_param == 2:
sel_netiptv_url = dialog.input('Edit the network URL', defaultt=netiptv_url,type=xbmcgui.INPUT_ALPHANUM)
if sel_netiptv_url == "":
sel_netiptv_url = netiptv_url
param_update = '"url":"' + sel_netiptv_url + '"'
if sel_param == 3:
sel_netiptv_max_streams = dialog.input('Set the max number of input streams for the network', defaultt=str(netiptv_max_streams),type=xbmcgui.INPUT_NUMERIC)
if sel_netiptv_max_streams == "":
sel_netiptv_max_streams = netiptv_max_streams
param_update = '"max_streams":"' + str(sel_netiptv_max_streams) + '"'
if sel_param == 4:
sel_netiptv_max_bandwidth = dialog.input('Set the max bandwidth for the network', defaultt=str(netiptv_max_bandwidth),type=xbmcgui.INPUT_NUMERIC)
if sel_netiptv_max_bandwidth == "":
sel_netiptv_max_bandwidth = netiptv_max_bandwidth
param_update = '"max_bandwidth":"' + str(sel_netiptv_max_bandwidth) + '"'
if sel_param == 5:
sel_netiptv_channel_number = dialog.input('Set the lowest (starting) channel number', defaultt=str(netiptv_channel_number),type=xbmcgui.INPUT_NUMERIC)
if sel_netiptv_channel_number == "":
sel_netiptv_channel_number = netiptv_channel_number
param_update = '"channel_number":"' + str(sel_netiptv_channel_number) + '"'
if sel_param == 6:
sel_netiptv_tsid_zero = dialog.select('Enable or disable to accept a zero value for TSID', list=enabledisable)
if sel_netiptv_tsid_zero >= 0:
netiptv_tsid_zero_enabled = truefalse[sel_netiptv_tsid_zero]
param_update = '"tsid_zero":' + netiptv_tsid_zero_enabled
if sel_param == 7 and net_num_mux != 0:
muxes_load(net_uuid_sel)
if sel_param == 7 and net_num_mux == 0:
if dialog.yesno("No muxes found!", "Would you like to edit muxes?"):
mux_new_iptv(net_uuid_sel)
if sel_param == 8 and net_num_mux != 0 and net_num_svc != 0:
if dialog.yesno(str(net_num_svc) + " services found!", "Would you like to scan muxes for new services?"):
start_scan(net_uuid_sel)
if sel_param == 8 and net_num_mux == 0:
dialog.ok("No muxes found!", "Add muxes before scanning for services.")
if sel_param == 8 and net_num_mux != 0 and net_num_svc == 0:
if dialog.yesno("No services found!", "Would you like to scan muxes for new services?"):
start_scan(net_uuid_sel)
if sel_param == 9 and net_num_svc != 0 and net_num_ch == 0:
if dialog.yesno(str(net_num_svc) + " services found!", "Would you like to map services to channels?"):
services()
if sel_param == 9 and net_num_svc != 0 and net_num_ch != 0:
channels()
if sel_param == 9 and net_num_svc == 0 and net_num_mux != 0:
if dialog.yesno("No services found!", "Would you like to scan muxes for new services?"):
start_scan(net_uuid_sel)
if sel_param == 9 and net_num_mux == 0:
dialog.ok("No muxes found!", "Add muxes before scanning for services and mapping channels.")
if sel_param == 10:
confirm_del = dialog.yesno('Confirm delete network', 'Are you sure want to delete the ' + net_name + ' network?')
if not confirm_del:
return
delete_net_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/delete?uuid=["' + net_uuid_sel +'"]'
delete_net = requests.get(delete_net_url)
networks()
if param_update != "":
param_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/save?node={' + param_update + ',"uuid":"' + net_uuid_sel + '"}'
param_save = requests.get(param_url)
net_param_load(net_uuid_sel)
def netiptv_param_edit(net_uuid_sel, net_info_list, net_name, net_bouquet, net_type, net_num_mux, net_num_svc, net_num_ch, netiptv_max_streams, netiptv_max_bandwidth):
sel_param = dialog.select('Network Configuration - Select parameter to edit', list=net_info_list)
if sel_param < 0:
networks()
if sel_param >= 0:
param_update = ""
if sel_param == 0:
sel_net_name = dialog.input('Edit the network name', defaultt=net_name,type=xbmcgui.INPUT_ALPHANUM)
if sel_net_name == "":
sel_net_name = net_name
param_update = '"networkname":"' + sel_net_name + '"'
if sel_param == 1:
sel_net_bouquet = dialog.select('Enable or disable to automatically create a bouquet from all services', list=enabledisable)
if sel_net_bouquet >= 0:
net_bouquet_enabled = truefalse[sel_net_bouquet]
param_update = '"bouquet":' + net_bouquet_enabled
if sel_param == 2:
sel_netiptv_max_streams = dialog.input('Set the max number of input streams for the network', defaultt=str(netiptv_max_streams),type=xbmcgui.INPUT_NUMERIC)
if sel_netiptv_max_streams == "":
sel_netiptv_max_streams = netiptv_max_streams
param_update = '"max_streams":"' + str(sel_netiptv_max_streams) + '"'
if sel_param == 3:
sel_netiptv_max_bandwidth = dialog.input('Set the max bandwidth for the network', defaultt=str(netiptv_max_bandwidth),type=xbmcgui.INPUT_NUMERIC)
if sel_netiptv_max_bandwidth == "":
sel_netiptv_max_bandwidth = netiptv_max_bandwidth
param_update = '"max_bandwidth":"' + str(sel_netiptv_max_bandwidth) + '"'
if sel_param == 4 and net_num_mux != 0:
muxes_load(net_uuid_sel)
if sel_param == 4 and net_num_mux == 0:
if dialog.yesno("No muxes found!", "Would you like to create a new mux?"):
mux_new_iptv(net_uuid_sel)
if sel_param == 5 and net_num_mux != 0 and net_num_svc != 0:
if dialog.yesno(str(net_num_svc) + " services found!", "Would you like to scan muxes for new services?"):
start_scan(net_uuid_sel)
if sel_param == 5 and net_num_mux == 0:
dialog.ok("No muxes found!", "Add muxes before scanning for services.")
if sel_param == 5 and net_num_mux != 0 and net_num_svc == 0:
if dialog.yesno("No services found!", "Would you like to scan muxes for new services?"):
start_scan(net_uuid_sel)
if sel_param == 6 and net_num_svc != 0 and net_num_ch == 0:
if dialog.yesno(str(net_num_svc) + " services found!", "Would you like to map services to channels?"):
services()
if sel_param == 6 and net_num_svc != 0 and net_num_ch != 0:
channels()
if sel_param == 6 and net_num_svc == 0 and net_num_mux != 0:
if dialog.yesno("No services found!", "Would you like to scan muxes for new services?"):
start_scan(net_uuid_sel)
if sel_param == 6 and net_num_mux == 0:
dialog.ok("No muxes found!", "Add muxes before scanning for services and mapping channels.")
if sel_param == 7:
confirm_del = dialog.yesno('Confirm delete network', 'Are you sure want to delete the ' + net_name + ' network?')
if not confirm_del:
return
delete_net_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/delete?uuid=["' + net_uuid_sel +'"]'
delete_net = requests.get(delete_net_url)
networks()
if param_update != "":
param_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/save?node={' + param_update + ',"uuid":"' + net_uuid_sel + '"}'
param_save = requests.get(param_url)
net_param_load(net_uuid_sel)
def services_param_load():
services_url = 'http://' + tvh_url + ':' + tvh_port + '/api/service/mapper/load'
services_load = requests.get(services_url).json()
services_opt_name = ['MAP ALL SERVICES TO CHANNELS', 'MAP SELECTED SERVICES TO CHANNELS']
services_opt_id = ['','']
services_opt_label = ['','']
services_node = {}
for param in services_load['entries'][0]['params']:
serv_id = param['id']
if serv_id != 'services':
services_opt_id.append(serv_id)
serv_label = param['caption']
services_opt_label.append(serv_label)
serv_value_orig = param['value']
if serv_value_orig == True:
serv_value = 'Enabled'
else:
serv_value = 'Disabled'
serv_list_add = serv_label + ': ' + serv_value
services_opt_name.append(serv_list_add)
services_node[serv_id] = serv_value_orig
services_param_edit(services_opt_name, services_opt_id, services_opt_label, services_node)
def services_param_edit(services_opt_name, services_opt_id, services_opt_label, services_node):
sel_param = dialog.select('Edit Options or Select to Map Services', list=services_opt_name)
if sel_param == 0:
if dialog.yesno("Map ALL Services to Channels", "Are you sure you want to map all services to channels?"):
serv_url = 'http://' + tvh_url + ':' + tvh_port + '/api/mpegts/service/grid?limit=999999999&sort=multiplex'
services = requests.get(serv_url).json()
serv_total = services['total']
serv_uuid = []
for serv_id in services['entries']:
if serv_id['channel'] == []:
serv_uuid.append(serv_id['uuid'])
serv_uuid_str = str(serv_uuid)
serv_uuid_str = re.sub("u\'","\"",serv_uuid_str)
serv_uuid_str = re.sub("\'","\"",serv_uuid_str)
serv_node_str = json.dumps(services_node)
serv_node_str = re.sub('{','',serv_node_str)
map_url = 'http://' + tvh_url + ':' + tvh_port + '/api/service/mapper/save?node={"services":' + serv_uuid_str + ',' + serv_node_str
map_ch = requests.get(map_url)
status_url = 'http://' + tvh_url + ':' + tvh_port + '/api/service/mapper/status'
time.sleep(3)
map_status = requests.get(status_url).json()
map_total_num = map_status['total']
map_ok_num = map_status['ok']
map_fail_num = map_status['fail']
map_ignore_num = map_status['ignore']
map_complete = (map_ok_num + map_fail_num + map_ignore_num)
map_total_perc = ((float(map_complete) / float(serv_total)) * 100)
dialog.ok("Channel mapping complete.", str(map_ok_num) + " new channels added.", str(map_ignore_num) + " services ignored.", str(map_fail_num) + " services failed.")
if sel_param == 1:
serv_url = 'http://' + tvh_url + ':' + tvh_port + '/api/mpegts/service/grid?limit=999999999&sort=multiplex'
services = requests.get(serv_url).json()
serv_total = services['total']
services_uuid = []
services_list = []
services_chan = []
services_sel = []
for serv in services['entries']:
try:
serv_name = serv['svcname']
except:
serv_name = 'NO SERVICE NAME'
serv_uuid = serv['uuid']
serv_multiplex = serv['multiplex']
serv_network = serv['network']
serv_channel = serv['channel']
services_uuid.append(serv_uuid)
if serv_channel != []:
services_chan.append(serv_uuid)
serv_list = serv_network + ' / ' + serv_multiplex + ' / ' + serv_name + ' ** MAPPED **'
else:
serv_list = serv_network + ' / ' + serv_multiplex + ' / ' + serv_name
services_list.append(serv_list)
serv_preselect = [i for i, item in enumerate(services_uuid) if item not in set(services_chan)]
sel_service = dialog.multiselect('Select which services to map to channels', options=services_list, preselect=serv_preselect)
if sel_service == [] or sel_service == None:
services_param_load()
else:
for sel in sel_service:
services_sel.append(services_uuid[sel])
services_node['services'] = services_sel
serv_node_str = json.dumps(services_node)
serv_update_url = 'http://' + tvh_url + ':' + tvh_port + '/api/service/mapper/save?node=' + serv_node_str
serv_update_load = requests.get(serv_update_url)
status_url = 'http://' + tvh_url + ':' + tvh_port + '/api/service/mapper/status'
time.sleep(3)
map_status = requests.get(status_url).json()
map_total_num = map_status['total']
map_ok_num = map_status['ok']
map_fail_num = map_status['fail']
map_ignore_num = map_status['ignore']
map_complete = (map_ok_num + map_fail_num + map_ignore_num)
map_total_perc = ((float(map_complete) / float(serv_total)) * 100)
dialog.ok("Channel mapping complete.", str(map_ok_num) + " new channels added.", str(map_ignore_num) + " services ignored.", str(map_fail_num) + " services failed.")
if sel_param > 1:
serv_param_name = services_opt_label[sel_param]
serv_param_id = services_opt_id[sel_param]
serv_param_desc = serv_param_name + ': Select to Enable/Disable'
sel_param_edit = dialog.select(serv_param_desc, list=enabledisable)
if sel_param_edit >= 0:
if sel_param_edit == 0:
services_node[serv_param_id] = True
if sel_param_edit == 1:
services_node[serv_param_id] = False
services_node['services'] = ''
serv_node_str = json.dumps(services_node)
serv_update_url = 'http://' + tvh_url + ':' + tvh_port + '/api/service/mapper/save?node=' + serv_node_str
serv_update_load = requests.get(serv_update_url)
services_param_load()
def start_scan(net_uuid_sel):
adapters_url = 'http://' + tvh_url + ':' + tvh_port + '/api/hardware/tree?uuid=root'
adapters_get = requests.get(adapters_url).json()
if adapters_get == []:
dialog.ok("No adapters found!", "Please make sure your TV adapter is connected.")
return
scan_url = 'http://' + tvh_url + ':' + tvh_port + '/api/mpegts/network/scan?uuid=' + net_uuid_sel
update_url = 'http://' + tvh_url + ':' + tvh_port + '/api/mpegts/network/grid'
mux_url = 'http://' + tvh_url + ':' + tvh_port + '/api/mpegts/mux/grid'
stream_url = 'http://' + tvh_url + ':' + tvh_port + '/api/status/inputs'
mux_list_get = requests.get(mux_url).json()
mux_list = [x['uuid'] for x in mux_list_get['entries']]
pDialog = xbmcgui.DialogProgress()
pDialog.create('Scanning muxes for new services')
scan = requests.get(scan_url).json()
time.sleep(1)
update = requests.get(update_url).json()
update_scan = [x['scanq_length'] for x in update['entries'] if x['uuid'] == net_uuid_sel]
update_scan_num = update_scan[0]
update_mux = [x['num_mux'] for x in update['entries'] if x['uuid'] == net_uuid_sel]
update_mux_num = update_mux[0]
orig_serv = [x['num_svc'] for x in update['entries'] if x['uuid'] == net_uuid_sel]
orig_serv_num = orig_serv[0]
while update_scan_num > 0:
update = requests.get(update_url).json()
update_scan = [x['scanq_length'] for x in update['entries'] if x['uuid'] == net_uuid_sel]
update_scan_num = update_scan[0]
update_serv = [x['num_svc'] for x in update['entries'] if x['uuid'] == net_uuid_sel]
update_serv_num = (update_serv[0] - orig_serv_num)
update_scan_perc = 100 - ((float(update_scan_num) / float(update_mux_num)) * 100)
update_stream = requests.get(stream_url).json()
stream_freq_list = []
stream_freq_list = [x.get('stream') for x in update_stream['entries']]
stream_freq = ' & '.join(str(s) for s in stream_freq_list)
pDialog.update(int(update_scan_perc), "Scanning: " + str(stream_freq), "New services found: " + str(update_serv_num))
time.sleep(1)
if (pDialog.iscanceled()):
mux_list_str = str(mux_list)
mux_list_str = re.sub("u\'","\"",mux_list_str)
mux_list_str = re.sub("\'","\"",mux_list_str)
mux_stop_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/save?node={"scan_state":0,"uuid":' + mux_list_str + '}'
mux_stop = requests.get(mux_stop_url)
dialog.ok('Scanning muxes cancelled', 'New services will not be mapped.')
return
pDialog.close()
if update_serv_num == 0:
dialog.ok('Scanning complete.', "New services found: " + str(update_serv_num), "There are no new services to map to channels.")
return
goto_map = dialog.yesno('Scanning complete.', "New services found: " + str(update_serv_num), "Would you like to continue and map new services to channels?")
if not goto_map:
return
services()
def wizard_start():
adapters_url = 'http://' + tvh_url + ':' + tvh_port + '/api/hardware/tree?uuid=root'
adapters_get = requests.get(adapters_url).json()
if adapters_get == []:
dialog.ok("No adapters found!", "Please make sure your TV adapter is connected.")
return
adapters_uuid = []
for adapter_fe in adapters_get:
adapters_uuid.append(adapter_fe['uuid'])
adapter_uuid = []
adapter_list = []
for adapter_y in adapters_uuid:
adapter_url = 'http://' + tvh_url + ':' + tvh_port + '/api/hardware/tree?uuid=' + adapter_y
adapter_get = requests.get(adapter_url).json()
for adapter_x in adapter_get:
adapter_uuid.append(adapter_x['uuid'])
for adapter_t in adapter_get:
adapter_list.append(adapter_t['text'])
sel_adapter = dialog.select('Select which adapter you would like to setup first', list=adapter_list)
if sel_adapter >= 0:
adapter_uuid_sel = adapter_uuid[sel_adapter]
adapter_text_sel = adapter_list[sel_adapter]
adapt_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/load?uuid=' + adapter_uuid_sel
adapt_load = requests.get(adapt_url).json()
adapt_network = find_param(adapt_load, 'networks')
net_create = ""
if adapt_network == []:
if "ATSC-T" in adapter_text_sel:
net_class = "dvb_network_atsc_t"
scanfile_type = "atsc-t"
net_name_create = "ATSC-T"
if "ATSC-C" in adapter_text_sel:
net_class = "dvb_network_atsc_c"
scanfile_type = "atsc-c"
net_name_create = "ATSC-C"
if "DVB-S" in adapter_text_sel:
net_class = "dvb_network_dvbs"
scanfile_type = "dvbs"
net_name_create = "DVB-S"
if "DVB-T" in adapter_text_sel:
net_class = "dvb_network_dvbt"
scanfile_type = "dvbt"
net_name_create = "DVB-T"
if "DVB-C" in adapter_text_sel:
net_class = "dvb_network_dvbc"
scanfile_type = "dvbc"
net_name_create = "DVB-C"
networks_url = 'http://' + tvh_url + ':' + tvh_port + '/api/mpegts/network/grid'
networks = requests.get(networks_url).json()
net_name = []
net_uuid = []
for net_n in networks['entries']:
net_name.append(net_n['networkname'])
for net_u in networks['entries']:
net_uuid.append(net_u['uuid'])
if not any (net_name_create in s for s in net_name):
dvb_list_url = 'http://' + tvh_url + ':' + tvh_port + '/api/dvb/scanfile/list?type=' + scanfile_type
dvb_list = requests.get(dvb_list_url).json()
scan_key = []
scan_val = []
for scan_k in dvb_list['entries']:
scan_key.append(scan_k['key'])
for scan_v in dvb_list['entries']:
scan_val.append(scan_v['val'])
sel_scan = dialog.select('Select a pre-defined mux list for the ' + net_name_create + " network", list=scan_val)
scan_val_sel = scan_key[sel_scan]
net_create_url = 'http://' + tvh_url + ':' + tvh_port + '/api/mpegts/network/create?class=' + net_class + '&conf={"networkname":"' + net_name_create + '","scanfile":"' + scan_val_sel + '"}'
net_create_new = requests.get(net_create_url).json()
net_create = net_create_new['uuid']
else:
net_create_index = (net_name.index(net_name_create) if net_name_create in net_name else None)
net_create = net_uuid[net_create_index]
else:
net_create = adapt_network[0]
adapt_net_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/save?node={"enabled":true,"networks":["' + str(net_create) + '"],"uuid":"' + str(adapter_uuid_sel) + '"}'
adapt_update = requests.get(adapt_net_url).json()
scan_url = 'http://' + tvh_url + ':' + tvh_port + '/api/mpegts/network/scan?uuid=' + net_create
update_url = 'http://' + tvh_url + ':' + tvh_port + '/api/mpegts/network/grid'
mux_url = 'http://' + tvh_url + ':' + tvh_port + '/api/mpegts/mux/grid'
stream_url = 'http://' + tvh_url + ':' + tvh_port + '/api/status/inputs'
mux_list_get = requests.get(mux_url).json()
mux_list = [x['uuid'] for x in mux_list_get['entries']]
pDialog = xbmcgui.DialogProgress()
pDialog.create('Scanning muxes for new services')
scan = requests.get(scan_url).json()
time.sleep(1)
update = requests.get(update_url).json()
update_scan = [x['scanq_length'] for x in update['entries'] if x['uuid'] == net_create]
update_scan_num = update_scan[0]
update_mux = [x['num_mux'] for x in update['entries'] if x['uuid'] == net_create]
update_mux_num = update_mux[0]
orig_serv = [x['num_svc'] for x in update['entries'] if x['uuid'] == net_create]
orig_serv_num = orig_serv[0]
while update_scan_num > 0:
update = requests.get(update_url).json()
update_scan = [x['scanq_length'] for x in update['entries'] if x['uuid'] == net_create]
update_scan_num = update_scan[0]
update_serv = [x['num_svc'] for x in update['entries'] if x['uuid'] == net_create]
update_serv_num = (update_serv[0] - orig_serv_num)
update_scan_perc = 100 - ((float(update_scan_num) / float(update_mux_num)) * 100)
update_stream = requests.get(stream_url).json()
stream_freq_list = []
stream_freq_list = [x.get('stream') for x in update_stream['entries']]
stream_freq = ' & '.join(str(s) for s in stream_freq_list)
pDialog.update(int(update_scan_perc), "Scanning: " + str(stream_freq), "New services found: " + str(update_serv_num))
time.sleep(1)
if (pDialog.iscanceled()):
mux_list_str = str(mux_list)
mux_list_str = re.sub("u\'","\"",mux_list_str)
mux_list_str = re.sub("\'","\"",mux_list_str)
mux_stop_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/save?node={"scan_state":0,"uuid":' + mux_list_str + '}'
mux_stop = requests.get(mux_stop_url)
dialog.ok('Scanning muxes cancelled', 'New services will not be mapped.')
return
pDialog.close()
if update_serv_num == 0:
dialog.ok('Scanning complete.', "New services found: " + str(update_serv_num), "There are no new services to map to channels.")
return
serv_url = 'http://' + tvh_url + ':' + tvh_port + '/api/mpegts/service/grid?limit=999999999&sort=multiplex'
services = requests.get(serv_url).json()
serv_total = services['total']
serv_uuid = []
for serv_id in services['entries']:
if serv_id['channel'] == []:
serv_uuid.append(serv_id['uuid'])
serv_uuid_str = str(serv_uuid)
serv_uuid_str = re.sub("u\'","\"",serv_uuid_str)
serv_uuid_str = re.sub("\'","\"",serv_uuid_str)
map_url = 'http://' + tvh_url + ':' + tvh_port + '/api/service/mapper/save?node={"services":' + serv_uuid_str + ',"encrypted":false,"merge_same_name":false,"check_availability":false,"type_tags":true,"provider_tags":false,"network_tags":false}'
map_ch = requests.get(map_url)
status_url = 'http://' + tvh_url + ':' + tvh_port + '/api/service/mapper/status'
time.sleep(3)
map_status = requests.get(status_url).json()
map_total_num = map_status['total']
map_ok_num = map_status['ok']
map_fail_num = map_status['fail']
map_ignore_num = map_status['ignore']
map_complete = (map_ok_num + map_fail_num + map_ignore_num)
map_total_perc = ((float(map_complete) / float(serv_total)) * 100)
dialog.ok("Wizard complete!", str(map_ok_num) + " new channels.", str(map_ignore_num) + " services ignored. " + str(map_fail_num) + " services failed.", "You can now enable additional tuners in the adapters menu.")
@plugin.route('/adapters')
def adapters():
adapters_url = 'http://' + tvh_url + ':' + tvh_port + '/api/hardware/tree?uuid=root'
adapters_get = requests.get(adapters_url).json()
if adapters_get == []:
dialog.ok("No adapters found!", "Please make sure your TV adapter is connected.")
return
adapters_uuid = []
for adapter_fe in adapters_get:
adapters_uuid.append(adapter_fe['uuid'])
adapter_uuid = []
adapter_text = []
adapter_enabled = []
for adapter_y in adapters_uuid:
adapter_url = 'http://' + tvh_url + ':' + tvh_port + '/api/hardware/tree?uuid=' + adapter_y
adapter_get = requests.get(adapter_url).json()
for adapter_x in adapter_get:
adapter_uuid.append(adapter_x['uuid'])
for adapter_t in adapter_get:
adapter_text.append(adapter_t['text'])
for adapter_e in adapter_get:
adapter_enabled.append(str(adapter_e['params'][0]['value']))
adapter_enabled = [w.replace('False', ' ** DISABLED **') for w in adapter_enabled]
adapter_enabled = [w.replace('True', ' ') for w in adapter_enabled]
adapters_full = zip(adapter_text, adapter_enabled)
adapters_list = ["%s %s" % x for x in adapters_full]
sel_adapter = dialog.select('Select which adapter you would like to configure', list=adapters_list)
if sel_adapter >= 0:
adapter_uuid_sel = adapter_uuid[sel_adapter]
adapt_param_load(adapter_uuid_sel)
@plugin.route('/networks')
def networks():
networks_url = 'http://' + tvh_url + ':' + tvh_port + '/api/mpegts/network/grid'
networks = requests.get(networks_url).json()
net_name = ["Setup New Network"]
net_uuid = [0]
for net_n in networks['entries']:
net_name.append(net_n['networkname'])
for net_u in networks['entries']:
net_uuid.append(net_u['uuid'])
sel_network = dialog.select('Select a network to configure', list=net_name)
if sel_network == 0:
net_uuid_sel = network_new()
if net_uuid_sel == "":
return
else:
net_param_load(net_uuid_sel)
if sel_network > 0:
net_uuid_sel = net_uuid[sel_network]
net_param_load(net_uuid_sel)
@plugin.route('/muxes')
def muxes():
networks_url = 'http://' + tvh_url + ':' + tvh_port + '/api/mpegts/network/grid'
networks = requests.get(networks_url).json()
net_name = []
net_uuid = []
for net_n in networks['entries']:
net_name.append(net_n['networkname'])
for net_u in networks['entries']:
net_uuid.append(net_u['uuid'])
sel_network = dialog.select('Select a network to see list of muxes', list=net_name)
if sel_network >= 0:
net_uuid_sel = net_uuid[sel_network]
muxes_load(net_uuid_sel)
@plugin.route('/mux_scan')
def mux_scan():
api_path = 'mpegts/network/grid'
api_url = 'http://' + tvh_url + ':' + tvh_port + '/api/' + api_path
networks = requests.get(api_url).json()
net_name = []
net_uuid = []
for net_n in networks['entries']:
net_name.append(net_n['networkname'])
for net_u in networks['entries']:
net_uuid.append(net_u['uuid'])
sel = dialog.select('Select a network to scan', list=net_name)
if sel >= 0:
net_uuid_sel = net_uuid[sel]
start_scan(net_uuid_sel)
@plugin.route('/services')
def services():
services_param_load()
@plugin.route('/channels')
def channels():
channels_url = 'http://' + tvh_url + ':' + tvh_port + '/api/channel/grid?all=1&limit=999999999&sort=name'
channels = requests.get(channels_url).json()
channels_name = []
channels_uuid = []
channels_enabled = []
for ch_n in channels['entries']:
channels_name.append(ch_n['name'])
for ch_u in channels['entries']:
channels_uuid.append(ch_u['uuid'])
for ch_e in channels['entries']:
channels_enabled.append(str(ch_e['enabled']))
channels_enabled = [w.replace('False', ' ** DISABLED **') for w in channels_enabled]
channels_enabled = [w.replace('True', ' ') for w in channels_enabled]
channels_full = zip(channels_name, channels_enabled)
channels_list = ["%s %s" % x for x in channels_full]
sel_ch = dialog.select('Select a channel to configure', list=channels_list)
if sel_ch >= 0:
ch_uuid_sel = channels_uuid[sel_ch]
ch_param_load(ch_uuid_sel)
@plugin.route('/dvr')
def dvr():
dvr_config_url = 'http://' + tvh_url + ':' + tvh_port + '/api/dvr/config/grid'
dvr_config = requests.get(dvr_config_url).json()
dvr_config_name = []
dvr_config_uuid = []
for dvr_n in dvr_config['entries']:
if dvr_n['name'] == "":
dvr_n = "(Default profile)"
dvr_config_name.append(dvr_n)
for dvr_u in dvr_config['entries']:
dvr_config_uuid.append(dvr_u['uuid'])
sel_dvr = dialog.select('Select a DVR configuration to edit', list=dvr_config_name)
if sel_dvr >= 0:
dvr_uuid_sel = dvr_config_uuid[sel_dvr]
dvr_param_load(dvr_uuid_sel)
@plugin.route('/cas')
def cas():
cas_url = 'http://' + tvh_url + ':' + tvh_port + '/api/caclient/list'
cas = requests.get(cas_url).json()
cas_name = ["Setup New Conditional Access Client"]
cas_uuid = [0]
for cas_n in cas['entries']:
cas_name.append(cas_n['title'])
for cas_u in cas['entries']:
cas_uuid.append(cas_u['uuid'])
sel_cas = dialog.select('Select a conditional access client to configure', list=cas_name)
if sel_cas == 0:
cas_uuid_sel = cas_new()
if cas_uuid_sel == "":
return
else:
cas_param_load(cas_uuid_sel)
if sel_cas > 0:
cas_uuid_sel = cas_uuid[sel_cas]
cas_param_load(cas_uuid_sel)
@plugin.route('/epg')
def epg():
epg_url = 'http://' + tvh_url + ':' + tvh_port + '/api/epggrab/config/load'
epg_load = requests.get(epg_url).json()
epg_rename = find_param(epg_load, 'channel_rename')
epg_renumber = find_param(epg_load, 'channel_renumber')
epg_reicon = find_param(epg_load, 'channel_reicon')
epg_dbsave = find_param(epg_load, 'epgdb_periodicsave')
epg_intcron = find_param(epg_load, 'cron')
epg_otainit = find_param(epg_load, 'ota_initial')
epg_otacron = find_param(epg_load, 'ota_cron')
epg_otatime = find_param(epg_load, 'ota_timeout')
epg_info_list = ["EDIT EPG GRABBER MODULES", "TRIGGER OTA GRABBER", "RE-RUN INTERNAL GRABBER", "Update channel name: " + str(epg_rename), "Update channel number: " + str(epg_renumber), "Update channel icon: " + str(epg_reicon), "Periodically save EPG to disk (hours): " + str(epg_dbsave), "Internal Cron multi-line: " + str(epg_intcron), "Force initial OTA EPG grab at start-up: " + str(epg_otainit), "Over-the-air Cron multi-line: " + str(epg_otacron), "OTA EPG scan timeout in seconds (30-7200): " + str(epg_otatime)]
sel_epg = dialog.select('Select an EPG Grabber configuration to edit', list=epg_info_list)
if sel_epg < 0:
return
if sel_epg == 0:
epgmod_list_load()
if sel_epg == 1:
epg_run_ota_url = 'http://' + tvh_url + ':' + tvh_port + '/api/epggrab/ota/trigger?trigger=1'
epg_run_ota = requests.get(epg_run_ota_url).json()
if epg_run_ota == {}:
dialog.ok("OTA EPG grabber triggered", "You have initiated the OTA EPG grabber. Your epg should update once completed. Sometimes Kodi needs a restart in order to update the EPG display.")
if sel_epg == 2:
comet_poll_box_url = 'http://' + tvh_url + ':' + tvh_port + '/comet/poll'
comet_poll_box = requests.get(comet_poll_box_url).json()
comet_poll_box_id = comet_poll_box['boxid']
epg_run_int_url = 'http://' + tvh_url + ':' + tvh_port + '/api/epggrab/internal/rerun?rerun=1'
epg_run_int = requests.get(epg_run_int_url).json()
if epg_run_int == {}:
pDialog = xbmcgui.DialogProgress()
pDialog.create('Internal EPG grabber triggered')
comet_poll_url = 'http://' + tvh_url + ':' + tvh_port + '/comet/poll?boxid=' + comet_poll_box_id + '&immediate=0'
comet_poll = requests.get(comet_poll_url).json()
comet_poll_logtxt_list = []
for t in comet_poll['messages']:
comet_poll_logtxt_list.insert(0,t.get('logtxt', "..."))
comet_poll_logtxt = '\n'.join(comet_poll_logtxt_list)
pDialog.update(10, comet_poll_logtxt)
time.sleep(1)
if (pDialog.iscanceled()):
pDialog.close()
comet_update = False
grabber_success = False
perc_update = 10
while comet_update == False:
if (pDialog.iscanceled()):
pDialog.close()
perc_update = perc_update + int((100 - perc_update) * .1) + 1
comet_poll_logtxt_list = []
for t in comet_poll['messages']:
comet_poll_logtxt_list.insert(0,t.get('logtxt', "..."))
comet_poll_logtxt = '\n'.join(comet_poll_logtxt_list)
if "grab took" in comet_poll_logtxt:
comet_update = True
grabber_success = True
if "grab returned no data" in comet_poll_logtxt:
comet_update = True
pDialog.update(perc_update, comet_poll_logtxt)
comet_poll = requests.get(comet_poll_url).json()
time.sleep(1)
pDialog.update(100, comet_poll_logtxt)
time.sleep(2)
pDialog.close()
if grabber_success == True:
if dialog.yesno("Internal EPG grabber finished", "Your EPG has been updated.", "Sometimes Kodi needs a restart in order to update the EPG display. Or you can clear the data in the PVR & Live TV settings.", "Would you like to open the PVR & Live TV settings?"):
xbmc.executebuiltin('ActivateWindow(pvrsettings)')
else:
dialog.ok("Internal EPG Grabber Error!", "The EPG Grabber failed to return data.", "", "Please check your grabber installation for issues.")
if sel_epg > 2 :
epg_param(sel_epg, epg_rename, epg_renumber, epg_reicon, epg_dbsave, epg_intcron, epg_otainit, epg_otacron, epg_otatime)
@plugin.route('/wizard')
def wizard():
start = dialog.yesno("TVheadend Wizard - Start", "This wizard will walk you through the initial setup for TVheadend using usb tuners. Running this wizard on an already configured system could cause issues.", "Do you wish to continue?")
if not start:
return
else:
wizard_start()
@plugin.route('/tvh')
def tvh():
tvh_config_url = 'http://' + tvh_url + ':' + tvh_port + '/api/config/load'
tvh_config_load = requests.get(tvh_config_url).json()
dvb_scan_path = find_param(tvh_config_load, 'muxconfpath')
prefer_picon = find_param(tvh_config_load, 'prefer_picon')
ch_icon_path = find_param(tvh_config_load, 'chiconpath')
ch_icon_scheme, ch_icon_scheme_key, ch_icon_scheme_val = find_param_dict(tvh_config_load, 'chiconscheme', 'enum')
picon_path = find_param(tvh_config_load, 'piconpath')
picon_scheme, picon_scheme_key, picon_scheme_val = find_param_dict(tvh_config_load, 'piconscheme', 'enum')
tvh_config_info_list = ["DVB scan path: " + str(dvb_scan_path), "Prefer picon: " + str(prefer_picon), "Channel icon path: " + str(ch_icon_path), "Channel icon scheme: " + str(ch_icon_scheme), "Picon path: " + str(picon_path), "Picon scheme: " + str(picon_scheme), "RESET ALL CHANNEL ICONS", "BACKUP TVHEADEND USERDATA", "IMPORT TVHEADEND USERDATA", "DOWNLOAD PICONS"]
param_update = ""
sel_tvh = dialog.select('Select a Tvh configuration parameter to edit', list=tvh_config_info_list)
if sel_tvh < 0:
return
if sel_tvh == 0:
sel_dvb_scan_path = dialog.input('Edit the DVB scan files path', defaultt=dvb_scan_path,type=xbmcgui.INPUT_ALPHANUM)
if sel_dvb_scan_path == "":
return
else:
param_update = '"muxconfpath":"' + sel_dvb_scan_path + '"'
if sel_tvh == 1:
sel_prefer_picon = dialog.select('Enable or disable to prefer picons over channel name', list=enabledisable)
if sel_prefer_picon <0:
return
if sel_prefer_picon >= 0:
prefer_picon = truefalse[sel_prefer_picon]
param_update = '"prefer_picon":' + prefer_picon
if sel_tvh == 2:
sel_ch_icon_path = dialog.input('Edit the channel icons path', defaultt=ch_icon_path,type=xbmcgui.INPUT_ALPHANUM)
if sel_ch_icon_path == "":
return
else:
param_update = '"chiconpath":"' + sel_ch_icon_path + '"'
if sel_tvh == 3:
sel_ch_icon_scheme = dialog.select('Select the channel icon name scheme', list=ch_icon_scheme_val)
if sel_ch_icon_scheme <0:
return
if sel_ch_icon_scheme >= 0:
ch_icon_scheme = ch_icon_scheme_key[sel_ch_icon_scheme]
param_update = '"chiconscheme":"' + str(ch_icon_scheme) + '"'
if sel_tvh == 4:
sel_picon_path = dialog.input('Edit the channel icons path', defaultt=picon_path,type=xbmcgui.INPUT_ALPHANUM)
if sel_picon_path == "":
return
else:
param_update = '"piconpath":"' + sel_picon_path + '"'
if sel_tvh == 5:
sel_picon_scheme = dialog.select('Select the channel icon name scheme', list=picon_scheme_val)
if sel_picon_schem <0:
return
if sel_picon_scheme >= 0:
picon_scheme = ch_icon_scheme_key[sel_ch_icon_scheme]
param_update = '"piconscheme":"' + str(picon_scheme) + '"'
if sel_tvh == 6:
if dialog.yesno("Channel Icons Reset", "This will reset all channel icons urls and try to match icons based on icon/picon settings.", "Are you sure you want to reset all channel icons?"):
channels_url = 'http://' + tvh_url + ':' + tvh_port + '/api/channel/grid?all=1&limit=999999999'
channels = requests.get(channels_url).json()
icon_update_list = []
for ch_u in channels['entries']:
channel_uuid = ch_u['uuid']
icon_update_list.append('{"icon":"","uuid":"' + str(ch_u['uuid']) + '"}')
icon_update = ','.join(icon_update_list)
icon_update_url = 'http://' + tvh_url + ':' + tvh_port + '/api/idnode/save?node=[' + icon_update + ']'
icon_update_save = requests.get(icon_update_url)
if sel_tvh == 7:
dialog.ok("Tvheadend Userdata Backup", "The Tvheadend service will be stopped to start the backup.", "The Tvheadend client may show a connection error during the process.")
if tvh_url == "127.0.0.1":
tvh_addon = xbmcaddon.Addon(id='service.tvheadend42')
tvh_userdata_path = xbmc.translatePath(tvh_addon.getAddonInfo('profile'))
else:
tvh_userdata_path = '//' + tvh_url + '/userdata/addon_data/service.tvheadend42/'
try:
tvh_json_url = 'http://' + tvh_url + ':8080/jsonrpc?request={"jsonrpc":"2.0","id":1,"method":"Addons.SetAddonEnabled","params":{"addonid":"service.tvheadend42","enabled":false}}'
tvh_json_load = requests.get(tvh_json_url).json()
tvh_stop = tvh_json_load['result']
except:
dialog.ok("Tvheadend Service Still Running!", "Unable to stop the Tvheadend service.", "Unable to complete backup.")
return
if tvh_stop == "OK":
output_path = dialog.browse(3, "Where would you like to save the Tvheadend Backup file?", "files")
output_name = output_path + "service.tvheadend42-backup-" + str(datetime.date.today()) + ".zip"
if dialog.yesno('Backup Tvheadend Userdata to Zip File', 'Zip file will be created in the following location:', str(output_path), 'Select YES to create backup.'):
ZipDir(tvh_userdata_path, output_name)
dialog.ok("Tvheadend Userdata Backup Complete", "Tvheadend userdata has been backed up.", "Tvheadend service will be restarted.")
try:
tvh_json_url = 'http://' + tvh_url + ':8080/jsonrpc?request={"jsonrpc":"2.0","id":1,"method":"Addons.SetAddonEnabled","params":{"addonid":"service.tvheadend42","enabled":true}}'
tvh_json_load = requests.get(tvh_json_url).json()
tvh_stop = tvh_json_load['result']
except:
dialog.ok("Unable to Restart Tvheadend Service!", "Unable to restart the Tvheadend service.", "Please enable the service in Kodi addons.")
else:
dialog.ok("Tvheadend Service Still Running!", "Unable to stop the Tvheadend service.", "Unable to complete backup.")
if sel_tvh == 8:
dialog.ok("Tvheadend Userdata Import", "The Tvheadend service will be stopped to start the import.", "The Tvheadend client may show a connection error during the process.")
try:
tvh_json_url = 'http://' + tvh_url + ':8080/jsonrpc?request={"jsonrpc":"2.0","id":1,"method":"Addons.SetAddonEnabled","params":{"addonid":"service.tvheadend42","enabled":false}}'
tvh_json_load = requests.get(tvh_json_url).json()
tvh_stop = tvh_json_load['result']
except:
dialog.ok("Tvheadend Service Still Running!", "Unable to stop the Tvheadend service.", "Unable to complete the import.")
return
if tvh_stop == "OK":
if tvh_url == "127.0.0.1":
tvh_addon = xbmcaddon.Addon(id='service.tvheadend42')
tvh_userdata_path = xbmc.translatePath(tvh_addon.getAddonInfo('profile'))
else:
tvh_userdata_path = '//' + tvh_url + '/userdata/addon_data/service.tvheadend42'
zipfile_path = dialog.browse(1, "Select your Tvheadend userdata backup zip file?", "files", ".zip")
if dialog.yesno('Import Tvheadend Userdata from Zip File', 'Your current Tvheadend userdata will be overwritten.', '', 'Select YES to start import.'):
tvh_zip = zipfile.ZipFile(zipfile_path)
tvh_zip.extractall(tvh_userdata_path)
tvh_zip.close()
dialog.ok("Tvheadend Userdata Import Complete", "Tvheadend userdata has been imported.", "Tvheadend service will be restarted.")
try:
tvh_json_url = 'http://' + tvh_url + ':8080/jsonrpc?request={"jsonrpc":"2.0","id":1,"method":"Addons.SetAddonEnabled","params":{"addonid":"service.tvheadend42","enabled":true}}'
tvh_json_load = requests.get(tvh_json_url).json()
tvh_stop = tvh_json_load['result']
except:
dialog.ok("Unable to Restart Tvheadend Service!", "Unable to restart the Tvheadend service.", "Please enable the service in Kodi addons.")
else:
dialog.ok("Tvheadend Service Still Running!", "Unable to stop the Tvheadend service.", "Unable to complete backup.")
if sel_tvh == 9:
picons_param_load()
if param_update != "":
param_url = 'http://' + tvh_url + ':' + tvh_port + '/api/config/save?node={' + param_update + '}'
param_save = requests.get(param_url)
tvh()
@plugin.route('/tvhclient')
def tvhclient():
plugin.open_settings()
xbmc.executebuiltin('Container.Refresh')
@plugin.route('/')
def index():
items = []
items.append(
{
'label': 'Adapters Configuration',
'path': plugin.url_for(u'adapters'),
'thumbnail':get_icon_path('adapter'),
})
items.append(
{
'label': 'Networks Configuration',
'path': plugin.url_for(u'networks'),
'thumbnail':get_icon_path('antenna'),
})
items.append(
{
'label': 'Muxes Configuration',
'path': plugin.url_for(u'muxes'),
'thumbnail':get_icon_path('signal'),
})
items.append(
{
'label': 'Channels Configuration',
'path': plugin.url_for(u'channels'),
'thumbnail':get_icon_path('numlist'),
})
items.append(
{
'label': 'Scan for New Channels',
'path': plugin.url_for(u'mux_scan'),
'thumbnail':get_icon_path('frequency'),
})
items.append(
{
'label': 'Map Services to Channels',
'path': plugin.url_for(u'services'),
'thumbnail':get_icon_path('folder'),
})
items.append(
{
'label': 'EPG Grabber Configuration',
'path': plugin.url_for(u'epg'),
'thumbnail':get_icon_path('list'),
})
items.append(
{
'label': 'DVR Configuration',
'path': plugin.url_for(u'dvr'),
'thumbnail':get_icon_path('dvr'),
})
items.append(
{
'label': 'Conditional Access Clients',
'path': plugin.url_for(u'cas'),
'thumbnail':get_icon_path('cas'),
})
items.append(
{
'label': 'Tvh Base Configuration & Backup',
'path': plugin.url_for(u'tvh'),
'thumbnail':get_icon_path('settings'),
})
items.append(
{
'label': 'Start Wizard',
'path': plugin.url_for(u'wizard'),
'thumbnail':get_icon_path('wand'),
})
items.append(
{
'label': 'Tvheadend Backend: ' + tvh_url + ':' + tvh_port,
'path': plugin.url_for(u'tvhclient'),
'thumbnail':get_icon_path('server'),
})
return items
if __name__ == '__main__':
plugin.run()
| gpl-3.0 | -8,064,920,706,645,967,000 | 58.770235 | 741 | 0.587203 | false |
netkicorp/wns-api-server | netki/common/test_config.py | 1 | 3834 | __author__ = 'mdavid'
# Setup our test environment
import os
os.environ['NETKI_ENV'] = 'test'
from unittest import TestCase
from mock import patch
from StringIO import StringIO
from netki.common.config import ConfigManager
class ConfigManagerTestCase(TestCase):
def tearDown(self):
super(ConfigManagerTestCase, self).tearDown()
ConfigManager._instances.clear()
class TestConfigManagerInit(ConfigManagerTestCase):
def setUp(self):
ConfigManager._instances.clear()
self.patcher1 = patch('netki.common.config.os.listdir')
self.patcher2 = patch('netki.common.config.os.path.isfile')
self.patcher3 = patch('netki.common.config.open', create=True)
self.patcher4 = patch('netki.common.config.ConfigManager.find_config_file')
self.mockListdir = self.patcher1.start()
self.mockIsFile = self.patcher2.start()
self.mockOpen = self.patcher3.start()
self.mockFindConfigFile = self.patcher4.start()
self.mockListdir.side_effect = (['etc'], ['app.test.config'])
self.mockIsFile.return_value = True
def empty_func(*args):
pass
def return_file_data():
return StringIO('''
[section]
string_value=string
int_value=1
float_value=42.42
bool_true=true
bool_false=false
''')
mockFile = StringIO()
mockFile.__enter__ = return_file_data
mockFile.__exit__ = empty_func
self.mockOpen.return_value = mockFile
self.mockFindConfigFile.return_value = 'CONFIGFILE'
def tearDown(self):
self.patcher1.stop()
self.patcher2.stop()
self.patcher3.stop()
self.patcher4.stop()
def test_init_go_right(self):
ret_val = ConfigManager()
self.assertIsNotNone(ret_val)
self.assertEqual('string', ret_val.config_dict.section.string_value)
self.assertEqual(1, ret_val.config_dict.section.int_value)
self.assertEqual(42.42, ret_val.config_dict.section.float_value)
self.assertTrue(ret_val.config_dict.section.bool_true)
self.assertFalse(ret_val.config_dict.section.bool_false)
self.assertEqual(ret_val.config_dict, ret_val.get_config())
self.assertEqual(1, self.mockFindConfigFile.call_count)
self.assertEqual(1, self.mockIsFile.call_count)
self.assertEqual(1, self.mockOpen.call_count)
def test_no_config_file(self):
self.mockIsFile.return_value = False
try:
ConfigManager()
self.assertTrue(False)
except Exception as e:
self.assertIsNotNone(e)
self.assertEqual(1, self.mockFindConfigFile.call_count)
self.assertEqual(1, self.mockIsFile.call_count)
self.assertEqual(0, self.mockOpen.call_count)
class TestGetConfigFile(ConfigManagerTestCase):
def setUp(self):
ConfigManager._instances.clear()
self.patcher1 = patch('netki.common.config.os.listdir')
self.mockListDir = self.patcher1.start()
self.mockListDir.side_effect = ( ['etc'], ['app.test.config'])
def test_go_right(self):
ret_val = ConfigManager.find_config_file('test')
self.assertEqual('./etc/app.test.config', ret_val)
self.assertEqual(2, self.mockListDir.call_count)
def test_go_right_no_etc_but_file(self):
self.mockListDir.side_effect = None
self.mockListDir.return_value = ['app.test.config']
ret_val = ConfigManager.find_config_file('test')
self.assertEqual('./app.test.config', ret_val)
def test_not_found(self):
self.mockListDir.side_effect = None
self.mockListDir.return_value = []
ret_val = ConfigManager.find_config_file('test')
self.assertIsNone(ret_val)
if __name__ == "__main__":
import unittest
unittest.main() | bsd-3-clause | -2,711,859,175,844,189,000 | 28.5 | 83 | 0.655451 | false |
mchung94/solitaire-player | pysolvers/solvers/deck.py | 1 | 2238 | """Card and Deck definitions.
Cards are strings containing a rank character followed by a suit character,
because it's simpler than defining a class or named tuple while still being
immutable, hashable, easy to create, and human-readable.
I also want to define a deck as just a tuple of cards that contain exactly
all 52 cards in a standard deck. I think this is the simplest way with the
fewest surprises/pitfalls.
"""
import collections
RANKS = 'A23456789TJQK'
SUITS = 'cdhs'
CARDS = [f'{rank}{suit}' for suit in SUITS for rank in RANKS]
CARDS_SET = set(CARDS)
def is_card(obj):
"""Return true if the object is a card."""
return obj in CARDS_SET
def card_rank(card):
"""Return the card's rank as a character."""
return card[0]
def malformed_cards(tuple_of_objects):
"""Return a list of the objects in the tuple that aren't cards.
If is_standard_deck() returns false for a list, this function may help the
caller determine what's wrong with their deck of cards."""
return [obj for obj in tuple_of_objects if not is_card(obj)]
def missing_cards(tuple_of_cards):
"""Return a list of the standard cards that are missing from the tuple.
Return the missing cards in consistent order by suit and rank.
If is_standard_deck() returns false for a list, this function may help the
caller determine what's wrong with their deck of cards."""
cards = set(tuple_of_cards)
return [card for card in CARDS if card not in cards]
def duplicate_cards(tuple_of_cards):
"""Return a list of the cards that are duplicated in the tuple.
If a card is duplicated N times, the card should be in the result N times
so that the caller knows how many times it's been duplicated.
If is_standard_deck() returns false for a list, this function may help the
caller determine what's wrong with their deck of cards."""
c = collections.Counter(tuple_of_cards)
return [card for card in tuple_of_cards if c[card] > 1]
def is_standard_deck(tuple_of_cards):
"""Return true if the tuple of cards is a standard 52-card deck."""
if not isinstance(tuple_of_cards, tuple):
return False
return len(tuple_of_cards) == 52 and not missing_cards(tuple_of_cards)
| mit | -3,498,415,241,772,121,600 | 32.402985 | 78 | 0.714924 | false |
vlegoff/tsunami | src/secondaires/navigation/chantier_naval.py | 1 | 4209 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la classe ChantierNaval, détaillée plus bas."""
from abstraits.obase import BaseObj
from secondaires.navigation.commande_chantier import *
class ChantierNaval(BaseObj):
"""Classe décrivant un chantier naval.
Un chantier naval est un ensemble de salles que l'on peut utiliser pour
la réparation et la personnalisation d'un navire en particulier. Un
chantier naval possède une salle d'interaction (nommée 'salle_magasin')
et des points d'occupation qui déterminent le lieu des bassins. Si le
navire souhaité n'est pas dans le bassin d'un chantier, le chantier ne
pourra pas travailler dessus.
"""
enregistrer = True
_nom = "chantier_navale"
_version = 1
def __init__(self, cle):
BaseObj.__init__(self)
self.cle = cle
self.salle_magasin = None
self.etendue = None
self.points = []
self.commandes = []
self.cales_seches = []
self._construire()
def __getnewargs__(self):
return ("inconnu", )
def __repr__(self):
return "<ChantierNaval {}>".format(repr(self.cle))
def __str__(self):
return self.cle
def ajouter_commande(self, instigateur, navire, nom_type, duree, *args):
"""Ajout d'une nouvelle commande.
Les paramètres à préciser sont :
instigateur -- le personnage ayant ordonné la commande
navire -- le navire concerné
nom_type -- le type de la commande
duree -- la durée de la commande (en minutes)
*args -- les arguments supplémentaire soptionnels propres au type.
"""
commande = CommandeChantierNaval(self, instigateur, navire, nom_type,
duree, *args)
self.commandes.append(commande)
def get_navires_possedes(self, personnage):
"""Retourne les navires dans le chantier naval."""
navires = [n for n in importeur.navigation.navires.values() if \
n.proprietaire is personnage]
navires = [n for n in navires if n.etendue]
navires = [n for n in navires if (int(n.position.x),
int(n.position.y), int(n.position.z)) in self.points]
navires.sort(key=lambda n: n.cle)
return navires
def executer_commandes(self):
"""Exécute les commandes à faire."""
for commande in list(self.commandes):
if commande.a_faire:
try:
commande.executer()
except CommandeInterrompue:
pass
else:
self.commandes.remove(commande)
| bsd-3-clause | 7,937,201,013,783,846,000 | 38.54717 | 79 | 0.673903 | false |
bhaveshAn/crisscross | crisscross/facades/email.py | 1 | 1397 | '''
Email
=====
The :class:`Email` provides access to public methods to use email of your
device.
.. note::
On Android `INTERNET` permission is needed.
Simple Examples
---------------
To send an e-mail::
>>> from crisscross import email
>>> recipient = '[email protected]'
>>> subject = 'Hi'
>>> text = 'This is an example.'
>>> create_chooser = False
>>> email.send(recipient=recipient, subject=subject, text=text,
create_chooser=create_chooser)
>>> # opens email interface where user can change the content.
'''
class Email(object):
'''
Email facade.
'''
def send(self, recipient=None, subject=None, text=None,
create_chooser=None):
'''
Open an email client message send window, prepopulated with the
given arguments.
:param recipient: Recipient of the message (str)
:param subject: Subject of the message (str)
:param text: Main body of the message (str)
:param create_chooser: Whether to display a program chooser to
handle the message (bool)
.. note:: create_chooser is only supported on Android
'''
self._send(recipient=recipient, subject=subject, text=text,
create_chooser=create_chooser)
# private
def _send(self, **kwargs):
raise NotImplementedError()
| mit | 5,783,431,339,681,985,000 | 24.87037 | 73 | 0.601288 | false |
sunu/oppia-test | models/parameter_test.py | 1 | 1279 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for parameter models."""
__author__ = 'Sean Lip'
import test_utils
import parameter
class ParameterUnitTests(test_utils.AppEngineTestBase):
"""Test the Parameter class."""
def test_parameter_class(self):
"""Tests the Parameter class."""
model = parameter.Parameter(name='param1', values=['hello'])
# Raise an error because no obj_type is specified.
with self.assertRaises(TypeError):
model.put()
model.obj_type = 'Int'
# Raise an error because the value does not match the obj_type.
with self.assertRaises(TypeError):
model.put()
model.values = [6]
model.put()
| apache-2.0 | -8,525,175,909,755,323,000 | 31.794872 | 74 | 0.687256 | false |
jvivian/rnaseq-lib | src/rnaseq_lib/utils/expando.py | 1 | 2336 | # Taken from: https://github.com/BD2KGenomics/bd2k-python-lib
class Expando(dict):
"""
Pass inital attributes to the constructor:
>>> o = Expando(foo=42)
>>> o.foo
42
Dynamically create new attributes:
>>> o.bar = 'hi'
>>> o.bar
'hi'
Expando is a dictionary:
>>> isinstance(o,dict)
True
>>> o['foo']
42
Works great with JSON:
>>> import json
>>> s='{"foo":42}'
>>> o = json.loads(s,object_hook=Expando)
>>> o
{u'foo': 42}
>>> o.foo
42
>>> o.bar = 'hi'
>>> o
{u'foo': 42, 'bar': 'hi'}
And since Expando is a dict, it serializes back to JSON just fine:
>>> json.dumps(o)
'{"foo": 42, "bar": "hi"}'
Attributes can be deleted, too:
>>> o = Expando(foo=42)
>>> o.foo
42
>>> del o.foo
>>> o.foo
Traceback (most recent call last):
...
AttributeError: 'Expando' object has no attribute 'foo'
>>> o['foo']
Traceback (most recent call last):
...
KeyError: 'foo'
>>> del o.foo
Traceback (most recent call last):
...
AttributeError: foo
And copied:
>>> o = Expando(foo=42)
>>> p = o.copy()
>>> isinstance(p,Expando)
True
>>> o == p
True
>>> o is p
False
Same with MagicExpando ...
>>> o = MagicExpando()
>>> o.foo.bar = 42
>>> p = o.copy()
>>> isinstance(p,MagicExpando)
True
>>> o == p
True
>>> o is p
False
... but the copy is shallow:
>>> o.foo is p.foo
True
"""
def __init__(self, *args, **kwargs):
super(Expando, self).__init__(*args, **kwargs)
self.__slots__ = None
self.__dict__ = self
def copy(self):
return type(self)(self)
class MagicExpando(Expando):
"""
Use MagicExpando for chained attribute access. The first time a missing attribute is
accessed, it will be set to a new child MagicExpando.
>>> o=MagicExpando()
>>> o.foo = 42
>>> o
{'foo': 42}
>>> o.bar.hello = 'hi'
>>> o
{'foo': 42, 'bar': {'hello': 'hi'}}
"""
def __getattribute__(self, name):
try:
return super(Expando, self).__getattribute__(name)
except AttributeError:
child = self.__class__()
self[name] = child
return child
| mit | -5,991,703,706,041,165,000 | 18.305785 | 88 | 0.51113 | false |
QualiSystems/OpenStack-Shell | package/tests/test_cp/test_openstack/test_command/test_operations/test_connectivity_operation.py | 1 | 1574 | from unittest import TestCase
from mock import Mock
from cloudshell.cp.openstack.command.operations.connectivity_operation import ConnectivityOperation
class TestConnectivityOperation(TestCase):
def setUp(self):
self.connectivity_service = Mock()
self.conn_operation = ConnectivityOperation(connectivity_service=self.connectivity_service)
self.conn_operation.connectivity_service = Mock()
self.os_session = Mock()
self.cp_resource_model = Mock()
self.logger = Mock()
pass
def test_connectivity_operation_apply_connectivity(self):
connectivity_request = Mock()
mock_result = Mock()
#self.conn_operation.apply_connectivity = Mock(return_value=mock_result)
self.conn_operation.apply_connectivity(openstack_session=self.os_session,
cp_resource_model=self.cp_resource_model,
conn_request=connectivity_request,
logger=self.logger)
self.conn_operation.connectivity_service.perform_apply_connectivity.assert_called_with(
openstack_session=self.os_session,
cp_resource_model=self.cp_resource_model,
connection_request=connectivity_request,
logger=self.logger)
| isc | -8,626,905,349,689,318,000 | 49.774194 | 109 | 0.551461 | false |
gtalarico/pyrevitplus | pyRevitPlus.tab/VP Tools.panel/Levels.pulldown/Save Levels.pushbutton/script.py | 1 | 4676 | """
Save Levels
Save the view dependant properties -
endpoint locations, level heads and leaders
of the selected building levels for re-use
Non-level elements will be skipped with dialog,
so it's advisable to apply filtering beforehead
TESTED REVIT API: 2020
@ejs-ejs
This script is part of PyRevitPlus: Extensions for PyRevit
github.com/ejs-ejs | @ejs-ejs
--------------------------------------------------------
RevitPythonWrapper: revitpythonwrapper.readthedocs.io
pyRevit: github.com/eirannejad/pyRevit
"""
import os
import pickle
from tempfile import gettempdir
from collections import namedtuple
import rpw
from rpw import doc, uidoc, DB, UI
Point = namedtuple('Point', ['X', 'Y','Z'])
Axis = namedtuple('Axis', ['Name', 'Start', 'End','StartBubble', 'EndBubble', 'StartBubbleVisible', 'EndBubbleVisible'])
tempfile = os.path.join(gettempdir(), 'LevelPlacement')
cView = doc.ActiveView
if not(cView.ViewType == DB.ViewType.Section or cView == DB.ViewType.Elevation):
UI.TaskDialog.Show('pyRevitPlus', 'View type \'{}\' not supported'.format(cView.ViewType))
else:
experimental = True
UI.TaskDialog.Show('pyRevitPlus', 'Support for \'{}\' view type is experimental!'.format(cView.ViewType))
selection = rpw.ui.Selection()
#if len(selection) <> 1:
# UI.TaskDialog.Show('pyRevitPlus', 'Select a single grid line!')
# exit(0);
n=0
LevelLines = dict()
for cLevel in selection:
el = cLevel.unwrap()
if isinstance(el, DB.Level):
curves=el.GetCurvesInView(DB.DatumExtentType.ViewSpecific, cView)
if len(curves) <> 1:
UI.TaskDialog.Show('pyRevitPlus', 'The level line is defind by {} curves, unable to proceed', len(curves))
else:
cLevelLine = {'Name':'', 'Start': Point(0,0,0), 'End': Point(0,0,0), 'StartBubble': False, 'StartBubbleVisible': False, 'EndBubble': False, 'EndBubbleVisible': False}
cCurve = curves[0]
leader0 = el.GetLeader(DB.DatumEnds.End0, cView)
if leader0:
tmp = leader0.Elbow
cLevelLine['Leader0Elbow'] = Point(tmp.X, tmp.Y,tmp.Z)
tmp = leader0.End
cLevelLine['Leader0End'] = Point(tmp.X, tmp.Y,tmp.Z)
tmp = leader0.Anchor
cLevelLine['Leader0Anchor'] = Point(tmp.X, tmp.Y,tmp.Z)
leader1 = el.GetLeader(DB.DatumEnds.End1, cView)
if leader1:
tmp = leader1.Elbow
cLevelLine['Leader1Elbow'] = Point(tmp.X, tmp.Y,tmp.Z)
tmp = leader1.End
cLevelLine['Leader1End'] = Point(tmp.X, tmp.Y,tmp.Z)
tmp = leader1.Anchor
cLevelLine['Leader1Anchor'] = Point(tmp.X, tmp.Y,tmp.Z)
cLevelLine['Name'] = el.Name
tmp = cCurve.GetEndPoint(0)
cLevelLine['Start'] = Point(tmp.X, tmp.Y,tmp.Z)
tmp = cCurve.GetEndPoint(1)
cLevelLine['End'] = Point(tmp.X, tmp.Y,tmp.Z)
if el.HasBubbleInView(DB.DatumEnds.End0, cView):
cLevelLine['StartBubble']=True
if el.HasBubbleInView(DB.DatumEnds.End1, cView):
cLevelLine['EndBubble']=True
if el.IsBubbleVisibleInView(DB.DatumEnds.End0, cView):
cLevelLine['StartBubbleVisible']=True
if el.IsBubbleVisibleInView(DB.DatumEnds.End1, cView):
cLevelLine['EndBubbleVisible']=True
#if isinstance(cCurve, DB.Arc):
# tmp = cCurve.Center
# cLevelLine['Center'] = Point(tmp.X, tmp.Y,tmp.Z)
LevelLines[cLevelLine['Name']] = cLevelLine
n += 1
else:
#if isinstance(el, DB.MultiSegmentGrid):
# UI.TaskDialog.Show('pyRevitPlus', 'Skipping yet unsupported Multi-Segment grid \'{}\''.format(el.Name))
#else:
UI.TaskDialog.Show('pyRevitPlus', 'Skipping non- level element \'{}\''.format(el.Name))
if n<>1:
msg = 'Saved {} level placements to {}'.format(n,tempfile)
else:
msg = 'Saved level \'{}\' placement to {}'.format(cLevelLine['Name'],tempfile)
if n>0:
with open(tempfile, 'wb') as fp:
pickle.dump(LevelLines, fp)
# close(fp)
UI.TaskDialog.Show('pyRevitPlus', msg)
else:
UI.TaskDialog.Show('pyRevitPlus', 'Nothing to save')
| gpl-3.0 | -4,241,314,254,409,846,300 | 36.408 | 182 | 0.565013 | false |
biocore/qiime2 | qiime2/sdk/tests/test_artifact.py | 1 | 21617 | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2019, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import os
import tempfile
import unittest
import uuid
import pathlib
import pandas as pd
import qiime2.plugin
import qiime2.core.type
from qiime2 import Metadata
from qiime2.sdk import Artifact
from qiime2.sdk.result import ResultMetadata
from qiime2.plugin.model import ValidationError
import qiime2.core.archive as archive
from qiime2.core.testing.type import IntSequence1, FourInts, Mapping, SingleInt
from qiime2.core.testing.util import get_dummy_plugin, ArchiveTestingMixin
class TestArtifact(unittest.TestCase, ArchiveTestingMixin):
def setUp(self):
# Ignore the returned dummy plugin object, just run this to verify the
# plugin exists as the tests rely on it being loaded.
get_dummy_plugin()
# TODO standardize temporary directories created by QIIME 2
self.test_dir = tempfile.TemporaryDirectory(prefix='qiime2-test-temp-')
self.provenance_capture = archive.ImportProvenanceCapture()
def tearDown(self):
self.test_dir.cleanup()
def test_private_constructor(self):
with self.assertRaisesRegex(
NotImplementedError,
'Artifact constructor.*private.*Artifact.load'):
Artifact()
# Note on testing strategy below: many of the tests for `_from_view` and
# `load` are similar, with the exception that when `load`ing, the
# artifact's UUID is known so more specific assertions can be performed.
# While these tests appear somewhat redundant, they are important because
# they exercise the same operations on Artifact objects constructed from
# different sources, whose codepaths have very different internal behavior.
# This internal behavior could be tested explicitly but it is safer to test
# the public API behavior (e.g. as a user would interact with the object)
# in case the internals change.
def test_from_view(self):
artifact = Artifact._from_view(FourInts, [-1, 42, 0, 43], list,
self.provenance_capture)
self.assertEqual(artifact.type, FourInts)
# We don't know what the UUID is because it's generated within
# Artifact._from_view.
self.assertIsInstance(artifact.uuid, uuid.UUID)
self.assertEqual(artifact.view(list), [-1, 42, 0, 43])
# Can produce same view if called again.
self.assertEqual(artifact.view(list), [-1, 42, 0, 43])
def test_from_view_different_type_with_multiple_view_types(self):
artifact = Artifact._from_view(IntSequence1, [42, 42, 43, -999, 42],
list, self.provenance_capture)
self.assertEqual(artifact.type, IntSequence1)
self.assertIsInstance(artifact.uuid, uuid.UUID)
self.assertEqual(artifact.view(list),
[42, 42, 43, -999, 42])
self.assertEqual(artifact.view(list),
[42, 42, 43, -999, 42])
self.assertEqual(artifact.view(collections.Counter),
collections.Counter({42: 3, 43: 1, -999: 1}))
self.assertEqual(artifact.view(collections.Counter),
collections.Counter({42: 3, 43: 1, -999: 1}))
def test_from_view_and_save(self):
fp = os.path.join(self.test_dir.name, 'artifact.qza')
# Using four-ints data layout because it has multiple files, some of
# which are in a nested directory.
artifact = Artifact._from_view(FourInts, [-1, 42, 0, 43], list,
self.provenance_capture)
artifact.save(fp)
root_dir = str(artifact.uuid)
expected = {
'VERSION',
'checksums.md5',
'metadata.yaml',
'data/file1.txt',
'data/file2.txt',
'data/nested/file3.txt',
'data/nested/file4.txt',
'provenance/metadata.yaml',
'provenance/VERSION',
'provenance/citations.bib',
'provenance/action/action.yaml'
}
self.assertArchiveMembers(fp, root_dir, expected)
def test_load(self):
saved_artifact = Artifact.import_data(FourInts, [-1, 42, 0, 43])
fp = os.path.join(self.test_dir.name, 'artifact.qza')
saved_artifact.save(fp)
artifact = Artifact.load(fp)
self.assertEqual(artifact.type, FourInts)
self.assertEqual(artifact.uuid, saved_artifact.uuid)
self.assertEqual(artifact.view(list), [-1, 42, 0, 43])
self.assertEqual(artifact.view(list), [-1, 42, 0, 43])
def test_load_different_type_with_multiple_view_types(self):
saved_artifact = Artifact.import_data(IntSequence1,
[42, 42, 43, -999, 42])
fp = os.path.join(self.test_dir.name, 'artifact.qza')
saved_artifact.save(fp)
artifact = Artifact.load(fp)
self.assertEqual(artifact.type, IntSequence1)
self.assertEqual(artifact.uuid, saved_artifact.uuid)
self.assertEqual(artifact.view(list),
[42, 42, 43, -999, 42])
self.assertEqual(artifact.view(list),
[42, 42, 43, -999, 42])
self.assertEqual(artifact.view(collections.Counter),
collections.Counter({42: 3, 43: 1, -999: 1}))
self.assertEqual(artifact.view(collections.Counter),
collections.Counter({42: 3, 43: 1, -999: 1}))
def test_load_and_save(self):
fp1 = os.path.join(self.test_dir.name, 'artifact1.qza')
fp2 = os.path.join(self.test_dir.name, 'artifact2.qza')
artifact = Artifact.import_data(FourInts, [-1, 42, 0, 43])
artifact.save(fp1)
artifact = Artifact.load(fp1)
# Overwriting its source file works.
artifact.save(fp1)
# Saving to a new file works.
artifact.save(fp2)
root_dir = str(artifact.uuid)
expected = {
'VERSION',
'checksums.md5',
'metadata.yaml',
'data/file1.txt',
'data/file2.txt',
'data/nested/file3.txt',
'data/nested/file4.txt',
'provenance/metadata.yaml',
'provenance/VERSION',
'provenance/citations.bib',
'provenance/action/action.yaml'
}
self.assertArchiveMembers(fp1, root_dir, expected)
root_dir = str(artifact.uuid)
expected = {
'VERSION',
'checksums.md5',
'metadata.yaml',
'data/file1.txt',
'data/file2.txt',
'data/nested/file3.txt',
'data/nested/file4.txt',
'provenance/metadata.yaml',
'provenance/VERSION',
'provenance/citations.bib',
'provenance/action/action.yaml'
}
self.assertArchiveMembers(fp2, root_dir, expected)
def test_roundtrip(self):
fp1 = os.path.join(self.test_dir.name, 'artifact1.qza')
fp2 = os.path.join(self.test_dir.name, 'artifact2.qza')
artifact = Artifact.import_data(FourInts, [-1, 42, 0, 43])
artifact.save(fp1)
artifact1 = Artifact.load(fp1)
artifact1.save(fp2)
artifact2 = Artifact.load(fp2)
self.assertEqual(artifact1.type, artifact2.type)
self.assertEqual(artifact1.format, artifact2.format)
self.assertEqual(artifact1.uuid, artifact2.uuid)
self.assertEqual(artifact1.view(list),
artifact2.view(list))
# double view to make sure multiple views can be taken
self.assertEqual(artifact1.view(list),
artifact2.view(list))
def test_load_with_archive_filepath_modified(self):
# Save an artifact for use in the following test case.
fp = os.path.join(self.test_dir.name, 'artifact.qza')
Artifact.import_data(FourInts, [-1, 42, 0, 43]).save(fp)
# Load the artifact from a filepath then save a different artifact to
# the same filepath. Assert that both artifacts produce the correct
# views of their data.
#
# `load` used to be lazy, only extracting data when it needed to (e.g.
# when `save` or `view` was called). This was buggy as the filepath
# could have been deleted, or worse, modified to contain a different
# .qza file. Thus, the wrong archive could be extracted on demand, or
# the archive could be missing altogether. There isn't an easy
# cross-platform compatible way to solve this problem, so Artifact.load
# is no longer lazy and always extracts its data immediately. The real
# motivation for lazy loading was for quick inspection of archives
# without extracting/copying data, so that API is now provided through
# Artifact.peek.
artifact1 = Artifact.load(fp)
Artifact.import_data(FourInts, [10, 11, 12, 13]).save(fp)
artifact2 = Artifact.load(fp)
self.assertEqual(artifact1.view(list), [-1, 42, 0, 43])
self.assertEqual(artifact2.view(list), [10, 11, 12, 13])
def test_extract(self):
fp = os.path.join(self.test_dir.name, 'artifact.qza')
artifact = Artifact.import_data(FourInts, [-1, 42, 0, 43])
artifact.save(fp)
root_dir = str(artifact.uuid)
# pathlib normalizes away the `.`, it doesn't matter, but this is the
# implementation we're using, so let's test against that assumption.
output_dir = pathlib.Path(self.test_dir.name) / 'artifact-extract-test'
result_dir = Artifact.extract(fp, output_dir=output_dir)
self.assertEqual(result_dir, str(output_dir / root_dir))
expected = {
'VERSION',
'checksums.md5',
'metadata.yaml',
'data/file1.txt',
'data/file2.txt',
'data/nested/file3.txt',
'data/nested/file4.txt',
'provenance/metadata.yaml',
'provenance/VERSION',
'provenance/citations.bib',
'provenance/action/action.yaml'
}
self.assertExtractedArchiveMembers(output_dir, root_dir, expected)
def test_peek(self):
artifact = Artifact.import_data(FourInts, [0, 0, 42, 1000])
fp = os.path.join(self.test_dir.name, 'artifact.qza')
artifact.save(fp)
metadata = Artifact.peek(fp)
self.assertIsInstance(metadata, ResultMetadata)
self.assertEqual(metadata.type, 'FourInts')
self.assertEqual(metadata.uuid, str(artifact.uuid))
self.assertEqual(metadata.format, 'FourIntsDirectoryFormat')
def test_import_data_invalid_type(self):
with self.assertRaisesRegex(TypeError,
'concrete semantic type.*Visualization'):
Artifact.import_data(qiime2.core.type.Visualization, self.test_dir)
with self.assertRaisesRegex(TypeError,
'concrete semantic type.*Visualization'):
Artifact.import_data('Visualization', self.test_dir)
def test_import_data_with_filepath_multi_file_data_layout(self):
fp = os.path.join(self.test_dir.name, 'test.txt')
with open(fp, 'w') as fh:
fh.write('42\n')
with self.assertRaisesRegex(qiime2.plugin.ValidationError,
"FourIntsDirectoryFormat.*directory"):
Artifact.import_data(FourInts, fp)
def test_import_data_with_wrong_number_of_files(self):
data_dir = os.path.join(self.test_dir.name, 'test')
os.mkdir(data_dir)
error_regex = ("Missing.*MappingDirectoryFormat.*mapping.tsv")
with self.assertRaisesRegex(ValidationError, error_regex):
Artifact.import_data(Mapping, data_dir)
def test_import_data_with_unrecognized_files(self):
data_dir = os.path.join(self.test_dir.name, 'test')
os.mkdir(data_dir)
with open(os.path.join(data_dir, 'file1.txt'), 'w') as fh:
fh.write('42\n')
with open(os.path.join(data_dir, 'file2.txt'), 'w') as fh:
fh.write('43\n')
nested = os.path.join(data_dir, 'nested')
os.mkdir(nested)
with open(os.path.join(nested, 'file3.txt'), 'w') as fh:
fh.write('44\n')
with open(os.path.join(nested, 'foo.txt'), 'w') as fh:
fh.write('45\n')
error_regex = ("Unrecognized.*foo.txt.*FourIntsDirectoryFormat")
with self.assertRaisesRegex(ValidationError, error_regex):
Artifact.import_data(FourInts, data_dir)
def test_import_data_with_unreachable_path(self):
with self.assertRaisesRegex(qiime2.plugin.ValidationError,
"does not exist"):
Artifact.import_data(IntSequence1,
os.path.join(self.test_dir.name, 'foo.txt'))
with self.assertRaisesRegex(qiime2.plugin.ValidationError,
"does not exist"):
Artifact.import_data(FourInts,
os.path.join(self.test_dir.name, 'bar', ''))
def test_import_data_with_invalid_format_single_file(self):
fp = os.path.join(self.test_dir.name, 'foo.txt')
with open(fp, 'w') as fh:
fh.write('42\n')
fh.write('43\n')
fh.write('abc\n')
fh.write('123\n')
error_regex = "foo.txt.*IntSequenceFormat.*\n\n.*Line 3"
with self.assertRaisesRegex(ValidationError, error_regex):
Artifact.import_data(IntSequence1, fp)
def test_import_data_with_invalid_format_multi_file(self):
data_dir = os.path.join(self.test_dir.name, 'test')
os.mkdir(data_dir)
with open(os.path.join(data_dir, 'file1.txt'), 'w') as fh:
fh.write('42\n')
with open(os.path.join(data_dir, 'file2.txt'), 'w') as fh:
fh.write('43\n')
nested = os.path.join(data_dir, 'nested')
os.mkdir(nested)
with open(os.path.join(nested, 'file3.txt'), 'w') as fh:
fh.write('44\n')
with open(os.path.join(nested, 'file4.txt'), 'w') as fh:
fh.write('foo\n')
error_regex = "file4.txt.*SingleIntFormat.*\n\n.*integer"
with self.assertRaisesRegex(ValidationError, error_regex):
Artifact.import_data(FourInts, data_dir)
def test_import_data_with_good_validation_multi_files(self):
data_dir = os.path.join(self.test_dir.name, 'test')
os.mkdir(data_dir)
with open(os.path.join(data_dir, 'file1.txt'), 'w') as fh:
fh.write('1\n')
with open(os.path.join(data_dir, 'file2.txt'), 'w') as fh:
fh.write('1\n')
a = Artifact.import_data(SingleInt, data_dir)
self.assertEqual(1, a.view(int))
def test_import_data_with_bad_validation_multi_files(self):
data_dir = os.path.join(self.test_dir.name, 'test')
os.mkdir(data_dir)
with open(os.path.join(data_dir, 'file1.txt'), 'w') as fh:
fh.write('1\n')
with open(os.path.join(data_dir, 'file2.txt'), 'w') as fh:
fh.write('2\n')
error_regex = ("test.*RedundantSingleIntDirectoryFormat.*\n\n"
".*does not match")
with self.assertRaisesRegex(ValidationError, error_regex):
Artifact.import_data(SingleInt, data_dir)
def test_import_data_with_filepath(self):
data_dir = os.path.join(self.test_dir.name, 'test')
os.mkdir(data_dir)
# Filename shouldn't matter for single-file case.
fp = os.path.join(data_dir, 'foo.txt')
with open(fp, 'w') as fh:
fh.write('42\n')
fh.write('43\n')
fh.write('42\n')
fh.write('0\n')
artifact = Artifact.import_data(IntSequence1, fp)
self.assertEqual(artifact.type, IntSequence1)
self.assertIsInstance(artifact.uuid, uuid.UUID)
self.assertEqual(artifact.view(list), [42, 43, 42, 0])
def test_import_data_with_directory_single_file(self):
data_dir = os.path.join(self.test_dir.name, 'test')
os.mkdir(data_dir)
fp = os.path.join(data_dir, 'ints.txt')
with open(fp, 'w') as fh:
fh.write('-1\n')
fh.write('-2\n')
fh.write('10\n')
fh.write('100\n')
artifact = Artifact.import_data(IntSequence1, data_dir)
self.assertEqual(artifact.type, IntSequence1)
self.assertIsInstance(artifact.uuid, uuid.UUID)
self.assertEqual(artifact.view(list), [-1, -2, 10, 100])
def test_import_data_with_directory_multi_file(self):
data_dir = os.path.join(self.test_dir.name, 'test')
os.mkdir(data_dir)
with open(os.path.join(data_dir, 'file1.txt'), 'w') as fh:
fh.write('42\n')
with open(os.path.join(data_dir, 'file2.txt'), 'w') as fh:
fh.write('41\n')
nested = os.path.join(data_dir, 'nested')
os.mkdir(nested)
with open(os.path.join(nested, 'file3.txt'), 'w') as fh:
fh.write('43\n')
with open(os.path.join(nested, 'file4.txt'), 'w') as fh:
fh.write('40\n')
artifact = Artifact.import_data(FourInts, data_dir)
self.assertEqual(artifact.type, FourInts)
self.assertIsInstance(artifact.uuid, uuid.UUID)
self.assertEqual(artifact.view(list), [42, 41, 43, 40])
def test_eq_identity(self):
artifact = Artifact.import_data(FourInts, [-1, 42, 0, 43])
self.assertEqual(artifact, artifact)
def test_eq_same_uuid(self):
fp = os.path.join(self.test_dir.name, 'artifact.qza')
artifact1 = Artifact.import_data(FourInts, [-1, 42, 0, 43])
artifact1.save(fp)
artifact2 = Artifact.load(fp)
self.assertEqual(artifact1, artifact2)
def test_ne_same_data_different_uuid(self):
artifact1 = Artifact.import_data(FourInts, [-1, 42, 0, 43])
artifact2 = Artifact.import_data(FourInts, [-1, 42, 0, 43])
self.assertNotEqual(artifact1, artifact2)
def test_ne_different_data_different_uuid(self):
artifact1 = Artifact.import_data(FourInts, [-1, 42, 0, 43])
artifact2 = Artifact.import_data(FourInts, [1, 2, 3, 4])
self.assertNotEqual(artifact1, artifact2)
def test_ne_subclass_same_uuid(self):
class ArtifactSubclass(Artifact):
pass
fp = os.path.join(self.test_dir.name, 'artifact.qza')
artifact1 = ArtifactSubclass.import_data(FourInts, [-1, 42, 0, 43])
artifact1.save(fp)
artifact2 = Artifact.load(fp)
self.assertNotEqual(artifact1, artifact2)
self.assertNotEqual(artifact2, artifact1)
def test_ne_different_type_same_uuid(self):
artifact = Artifact.import_data(FourInts, [-1, 42, 0, 43])
class Faker:
@property
def uuid(self):
return artifact.uuid
faker = Faker()
self.assertNotEqual(artifact, faker)
def test_artifact_validate_max(self):
A = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
A.validate()
self.assertTrue(True) # Checkpoint assertion
A.validate(level='max')
self.assertTrue(True) # Checkpoint assertion
A = Artifact.import_data('IntSequence1', [1, 2, 3, 4, 5, 6, 7, 10])
with self.assertRaisesRegex(ValidationError, '3 more'):
A.validate('max')
def test_artifact_validate_min(self):
A = Artifact.import_data('IntSequence1', [1, 2, 3, 4])
A.validate(level='min')
self.assertTrue(True) # Checkpoint assertion
A = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
A.validate(level='min')
self.assertTrue(True) # Checkpoint assertion
def test_artifact_validate_invalid_level(self):
A = Artifact.import_data('IntSequence1', [1, 2, 3, 4])
with self.assertRaisesRegex(ValueError, 'peanut'):
A.validate(level='peanut')
def test_view_as_metadata(self):
A = Artifact.import_data('Mapping', {'a': '1', 'b': '3'})
obs_md = A.view(Metadata)
exp_df = pd.DataFrame({'a': '1', 'b': '3'},
index=pd.Index(['0'], name='id', dtype=object),
dtype=object)
exp_md = Metadata(exp_df)
exp_md._add_artifacts([A])
self.assertEqual(obs_md, exp_md)
# This check is redundant because `Metadata.__eq__` being used above
# takes source artifacts into account. Doesn't hurt to have an explicit
# check though, since this API didn't always track source artifacts
# (this check also future-proofs the test in case `Metadata.__eq__`
# changes in the future).
self.assertEqual(obs_md.artifacts, (A,))
def test_cannot_be_viewed_as_metadata(self):
A = Artifact.import_data('IntSequence1', [1, 2, 3, 4])
with self.assertRaisesRegex(TypeError,
'Artifact.*IntSequence1.*cannot be viewed '
'as QIIME 2 Metadata'):
A.view(Metadata)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 2,226,754,745,554,385,400 | 38.810313 | 79 | 0.593237 | false |
ec429/sexpy | sml.py | 1 | 4208 | #!/usr/bin/python
from sexpy import SExp, tok_STRING
class HtmlNodeType(object):
def __init__(self, name, allowed_children):
self.name = name
self.ac = allowed_children
def __call__(self, context, *args):
if self.ac is None: # not specified
return
for arg in args:
if isinstance(arg, SExp):
if isinstance(arg.tpl[0], HtmlNodeType):
if arg.tpl[0].name not in self.ac:
raise Exception(arg.tpl[0].name, "not allowed as child of", self.name)
class HtmlAttributeType(HtmlNodeType):
def __init__(self, name):
super(HtmlAttributeType, self).__init__(name, [])
def __call__(self, context, *args):
super(HtmlAttributeType, self).__call__(context, *args)
content = []
for arg in args:
if isinstance(arg, SExp):
content.append(arg.eval(context))
else:
content.append(arg)
return HtmlAttribute(self.name, content)
class HtmlElementType(HtmlNodeType):
def __call__(self, context, *args):
super(HtmlElementType, self).__call__(context, *args)
attrs = []
content = []
for arg in args:
if isinstance(arg, SExp):
val = arg.eval(context)
if isinstance(val, HtmlElement):
content.append(val)
elif isinstance(val, HtmlAttribute):
attrs.append(val)
else:
assert 0, val
else:
content.append(str(arg))
return HtmlElement(self.name, attrs, content)
class HtmlAttribute(object):
def __init__(self, name, content):
self.name = name
self.content = content
def __str__(self):
return '%s="%s"'%(self.name, ' '.join(map(str, self.content)))
class HtmlElement(object):
def __init__(self, name, attrs, content):
self.name = name
self.attrs = attrs
self.content = content
def __str__(self):
opentag = ' '.join([self.name,] + map(str, self.attrs))
if self.content:
return '<%s>'%opentag + ' '.join(map(str, self.content)) + '</%s>'%(self.name,)
else:
return '<%s/>'%opentag
HTML = {'html':HtmlElementType('html', ['head', 'body']),
'head':HtmlElementType('head', ['title', 'meta', 'script', 'style', 'link']),
'title':HtmlElementType('title', []),
'meta':HtmlElementType('meta', ['http-equiv', 'content', 'name', 'scheme', 'charset']),
'http-equiv':HtmlAttributeType('http-equiv'),
'content':HtmlAttributeType('content'),
'name':HtmlAttributeType('name'),
'scheme':HtmlAttributeType('scheme'),
'charset':HtmlAttributeType('charset'),
'script':HtmlElementType('script', ['src', 'type', 'defer']),
'src':HtmlAttributeType('src'),
'type':HtmlAttributeType('type'),
'defer':HtmlAttributeType('defer'),
'style':HtmlElementType('style', ['type']),
'link':HtmlElementType('link', ['rel', 'type', 'href']),
'rel':HtmlAttributeType('rel'),
# '':HtmlAttributeType(''),
'body':HtmlElementType('body', None),
'a':HtmlElementType('a', None),
'href':HtmlAttributeType('href'),
'p':HtmlElementType('p', None),
'h1':HtmlElementType('h1', None),
'h2':HtmlElementType('h2', None),
'h3':HtmlElementType('h3', None),
'h4':HtmlElementType('h4', None),
'h5':HtmlElementType('h5', None),
'h6':HtmlElementType('h6', None),
'br':HtmlElementType('br', []),
'blockquote':HtmlElementType('blockquote', None),
'img':HtmlElementType('img', ['src']),
# '':HtmlElementType('', None),
}
if __name__ == '__main__':
test = """(html (head (title SML generated page))
(body (h1 SML generated page)
(p A simple HTML page generated from SML. (br)
(a (href /index.htm) Index)
)
)
)"""
s = SExp.parse(test)
print s
print s.eval(HTML)
| mit | 5,023,051,822,639,744,000 | 38.327103 | 95 | 0.533745 | false |
derolf/Plex.ArgusTV.bundle | Contents/Libraries/Shared/mysql/connector/charsets.py | 1 | 12320 | # -*- coding: utf-8 -*-
# MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# This file was auto-generated.
_GENERATED_ON = '2014-05-12'
_MYSQL_VERSION = (5, 7, 4)
"""This module contains the MySQL Server Character Sets"""
MYSQL_CHARACTER_SETS = [
# (character set name, collation, default)
None,
("big5", "big5_chinese_ci", True), # 1
("latin2", "latin2_czech_cs", False), # 2
("dec8", "dec8_swedish_ci", True), # 3
("cp850", "cp850_general_ci", True), # 4
("latin1", "latin1_german1_ci", False), # 5
("hp8", "hp8_english_ci", True), # 6
("koi8r", "koi8r_general_ci", True), # 7
("latin1", "latin1_swedish_ci", True), # 8
("latin2", "latin2_general_ci", True), # 9
("swe7", "swe7_swedish_ci", True), # 10
("ascii", "ascii_general_ci", True), # 11
("ujis", "ujis_japanese_ci", True), # 12
("sjis", "sjis_japanese_ci", True), # 13
("cp1251", "cp1251_bulgarian_ci", False), # 14
("latin1", "latin1_danish_ci", False), # 15
("hebrew", "hebrew_general_ci", True), # 16
None,
("tis620", "tis620_thai_ci", True), # 18
("euckr", "euckr_korean_ci", True), # 19
("latin7", "latin7_estonian_cs", False), # 20
("latin2", "latin2_hungarian_ci", False), # 21
("koi8u", "koi8u_general_ci", True), # 22
("cp1251", "cp1251_ukrainian_ci", False), # 23
("gb2312", "gb2312_chinese_ci", True), # 24
("greek", "greek_general_ci", True), # 25
("cp1250", "cp1250_general_ci", True), # 26
("latin2", "latin2_croatian_ci", False), # 27
("gbk", "gbk_chinese_ci", True), # 28
("cp1257", "cp1257_lithuanian_ci", False), # 29
("latin5", "latin5_turkish_ci", True), # 30
("latin1", "latin1_german2_ci", False), # 31
("armscii8", "armscii8_general_ci", True), # 32
("utf8", "utf8_general_ci", True), # 33
("cp1250", "cp1250_czech_cs", False), # 34
("ucs2", "ucs2_general_ci", True), # 35
("cp866", "cp866_general_ci", True), # 36
("keybcs2", "keybcs2_general_ci", True), # 37
("macce", "macce_general_ci", True), # 38
("macroman", "macroman_general_ci", True), # 39
("cp852", "cp852_general_ci", True), # 40
("latin7", "latin7_general_ci", True), # 41
("latin7", "latin7_general_cs", False), # 42
("macce", "macce_bin", False), # 43
("cp1250", "cp1250_croatian_ci", False), # 44
("utf8mb4", "utf8mb4_general_ci", True), # 45
("utf8mb4", "utf8mb4_bin", False), # 46
("latin1", "latin1_bin", False), # 47
("latin1", "latin1_general_ci", False), # 48
("latin1", "latin1_general_cs", False), # 49
("cp1251", "cp1251_bin", False), # 50
("cp1251", "cp1251_general_ci", True), # 51
("cp1251", "cp1251_general_cs", False), # 52
("macroman", "macroman_bin", False), # 53
("utf16", "utf16_general_ci", True), # 54
("utf16", "utf16_bin", False), # 55
("utf16le", "utf16le_general_ci", True), # 56
("cp1256", "cp1256_general_ci", True), # 57
("cp1257", "cp1257_bin", False), # 58
("cp1257", "cp1257_general_ci", True), # 59
("utf32", "utf32_general_ci", True), # 60
("utf32", "utf32_bin", False), # 61
("utf16le", "utf16le_bin", False), # 62
("binary", "binary", True), # 63
("armscii8", "armscii8_bin", False), # 64
("ascii", "ascii_bin", False), # 65
("cp1250", "cp1250_bin", False), # 66
("cp1256", "cp1256_bin", False), # 67
("cp866", "cp866_bin", False), # 68
("dec8", "dec8_bin", False), # 69
("greek", "greek_bin", False), # 70
("hebrew", "hebrew_bin", False), # 71
("hp8", "hp8_bin", False), # 72
("keybcs2", "keybcs2_bin", False), # 73
("koi8r", "koi8r_bin", False), # 74
("koi8u", "koi8u_bin", False), # 75
None,
("latin2", "latin2_bin", False), # 77
("latin5", "latin5_bin", False), # 78
("latin7", "latin7_bin", False), # 79
("cp850", "cp850_bin", False), # 80
("cp852", "cp852_bin", False), # 81
("swe7", "swe7_bin", False), # 82
("utf8", "utf8_bin", False), # 83
("big5", "big5_bin", False), # 84
("euckr", "euckr_bin", False), # 85
("gb2312", "gb2312_bin", False), # 86
("gbk", "gbk_bin", False), # 87
("sjis", "sjis_bin", False), # 88
("tis620", "tis620_bin", False), # 89
("ucs2", "ucs2_bin", False), # 90
("ujis", "ujis_bin", False), # 91
("geostd8", "geostd8_general_ci", True), # 92
("geostd8", "geostd8_bin", False), # 93
("latin1", "latin1_spanish_ci", False), # 94
("cp932", "cp932_japanese_ci", True), # 95
("cp932", "cp932_bin", False), # 96
("eucjpms", "eucjpms_japanese_ci", True), # 97
("eucjpms", "eucjpms_bin", False), # 98
("cp1250", "cp1250_polish_ci", False), # 99
None,
("utf16", "utf16_unicode_ci", False), # 101
("utf16", "utf16_icelandic_ci", False), # 102
("utf16", "utf16_latvian_ci", False), # 103
("utf16", "utf16_romanian_ci", False), # 104
("utf16", "utf16_slovenian_ci", False), # 105
("utf16", "utf16_polish_ci", False), # 106
("utf16", "utf16_estonian_ci", False), # 107
("utf16", "utf16_spanish_ci", False), # 108
("utf16", "utf16_swedish_ci", False), # 109
("utf16", "utf16_turkish_ci", False), # 110
("utf16", "utf16_czech_ci", False), # 111
("utf16", "utf16_danish_ci", False), # 112
("utf16", "utf16_lithuanian_ci", False), # 113
("utf16", "utf16_slovak_ci", False), # 114
("utf16", "utf16_spanish2_ci", False), # 115
("utf16", "utf16_roman_ci", False), # 116
("utf16", "utf16_persian_ci", False), # 117
("utf16", "utf16_esperanto_ci", False), # 118
("utf16", "utf16_hungarian_ci", False), # 119
("utf16", "utf16_sinhala_ci", False), # 120
("utf16", "utf16_german2_ci", False), # 121
("utf16", "utf16_croatian_ci", False), # 122
("utf16", "utf16_unicode_520_ci", False), # 123
("utf16", "utf16_vietnamese_ci", False), # 124
None,
None,
None,
("ucs2", "ucs2_unicode_ci", False), # 128
("ucs2", "ucs2_icelandic_ci", False), # 129
("ucs2", "ucs2_latvian_ci", False), # 130
("ucs2", "ucs2_romanian_ci", False), # 131
("ucs2", "ucs2_slovenian_ci", False), # 132
("ucs2", "ucs2_polish_ci", False), # 133
("ucs2", "ucs2_estonian_ci", False), # 134
("ucs2", "ucs2_spanish_ci", False), # 135
("ucs2", "ucs2_swedish_ci", False), # 136
("ucs2", "ucs2_turkish_ci", False), # 137
("ucs2", "ucs2_czech_ci", False), # 138
("ucs2", "ucs2_danish_ci", False), # 139
("ucs2", "ucs2_lithuanian_ci", False), # 140
("ucs2", "ucs2_slovak_ci", False), # 141
("ucs2", "ucs2_spanish2_ci", False), # 142
("ucs2", "ucs2_roman_ci", False), # 143
("ucs2", "ucs2_persian_ci", False), # 144
("ucs2", "ucs2_esperanto_ci", False), # 145
("ucs2", "ucs2_hungarian_ci", False), # 146
("ucs2", "ucs2_sinhala_ci", False), # 147
("ucs2", "ucs2_german2_ci", False), # 148
("ucs2", "ucs2_croatian_ci", False), # 149
("ucs2", "ucs2_unicode_520_ci", False), # 150
("ucs2", "ucs2_vietnamese_ci", False), # 151
None,
None,
None,
None,
None,
None,
None,
("ucs2", "ucs2_general_mysql500_ci", False), # 159
("utf32", "utf32_unicode_ci", False), # 160
("utf32", "utf32_icelandic_ci", False), # 161
("utf32", "utf32_latvian_ci", False), # 162
("utf32", "utf32_romanian_ci", False), # 163
("utf32", "utf32_slovenian_ci", False), # 164
("utf32", "utf32_polish_ci", False), # 165
("utf32", "utf32_estonian_ci", False), # 166
("utf32", "utf32_spanish_ci", False), # 167
("utf32", "utf32_swedish_ci", False), # 168
("utf32", "utf32_turkish_ci", False), # 169
("utf32", "utf32_czech_ci", False), # 170
("utf32", "utf32_danish_ci", False), # 171
("utf32", "utf32_lithuanian_ci", False), # 172
("utf32", "utf32_slovak_ci", False), # 173
("utf32", "utf32_spanish2_ci", False), # 174
("utf32", "utf32_roman_ci", False), # 175
("utf32", "utf32_persian_ci", False), # 176
("utf32", "utf32_esperanto_ci", False), # 177
("utf32", "utf32_hungarian_ci", False), # 178
("utf32", "utf32_sinhala_ci", False), # 179
("utf32", "utf32_german2_ci", False), # 180
("utf32", "utf32_croatian_ci", False), # 181
("utf32", "utf32_unicode_520_ci", False), # 182
("utf32", "utf32_vietnamese_ci", False), # 183
None,
None,
None,
None,
None,
None,
None,
None,
("utf8", "utf8_unicode_ci", False), # 192
("utf8", "utf8_icelandic_ci", False), # 193
("utf8", "utf8_latvian_ci", False), # 194
("utf8", "utf8_romanian_ci", False), # 195
("utf8", "utf8_slovenian_ci", False), # 196
("utf8", "utf8_polish_ci", False), # 197
("utf8", "utf8_estonian_ci", False), # 198
("utf8", "utf8_spanish_ci", False), # 199
("utf8", "utf8_swedish_ci", False), # 200
("utf8", "utf8_turkish_ci", False), # 201
("utf8", "utf8_czech_ci", False), # 202
("utf8", "utf8_danish_ci", False), # 203
("utf8", "utf8_lithuanian_ci", False), # 204
("utf8", "utf8_slovak_ci", False), # 205
("utf8", "utf8_spanish2_ci", False), # 206
("utf8", "utf8_roman_ci", False), # 207
("utf8", "utf8_persian_ci", False), # 208
("utf8", "utf8_esperanto_ci", False), # 209
("utf8", "utf8_hungarian_ci", False), # 210
("utf8", "utf8_sinhala_ci", False), # 211
("utf8", "utf8_german2_ci", False), # 212
("utf8", "utf8_croatian_ci", False), # 213
("utf8", "utf8_unicode_520_ci", False), # 214
("utf8", "utf8_vietnamese_ci", False), # 215
None,
None,
None,
None,
None,
None,
None,
("utf8", "utf8_general_mysql500_ci", False), # 223
("utf8mb4", "utf8mb4_unicode_ci", False), # 224
("utf8mb4", "utf8mb4_icelandic_ci", False), # 225
("utf8mb4", "utf8mb4_latvian_ci", False), # 226
("utf8mb4", "utf8mb4_romanian_ci", False), # 227
("utf8mb4", "utf8mb4_slovenian_ci", False), # 228
("utf8mb4", "utf8mb4_polish_ci", False), # 229
("utf8mb4", "utf8mb4_estonian_ci", False), # 230
("utf8mb4", "utf8mb4_spanish_ci", False), # 231
("utf8mb4", "utf8mb4_swedish_ci", False), # 232
("utf8mb4", "utf8mb4_turkish_ci", False), # 233
("utf8mb4", "utf8mb4_czech_ci", False), # 234
("utf8mb4", "utf8mb4_danish_ci", False), # 235
("utf8mb4", "utf8mb4_lithuanian_ci", False), # 236
("utf8mb4", "utf8mb4_slovak_ci", False), # 237
("utf8mb4", "utf8mb4_spanish2_ci", False), # 238
("utf8mb4", "utf8mb4_roman_ci", False), # 239
("utf8mb4", "utf8mb4_persian_ci", False), # 240
("utf8mb4", "utf8mb4_esperanto_ci", False), # 241
("utf8mb4", "utf8mb4_hungarian_ci", False), # 242
("utf8mb4", "utf8mb4_sinhala_ci", False), # 243
("utf8mb4", "utf8mb4_german2_ci", False), # 244
("utf8mb4", "utf8mb4_croatian_ci", False), # 245
("utf8mb4", "utf8mb4_unicode_520_ci", False), # 246
("utf8mb4", "utf8mb4_vietnamese_ci", False), # 247
("gb18030", "gb18030_chinese_ci", True), # 248
("gb18030", "gb18030_bin", False), # 249
("gb18030", "gb18030_unicode_520_ci", False), # 250
]
| gpl-2.0 | -6,391,614,368,225,597,000 | 42.076923 | 75 | 0.563799 | false |
thepizzaking/whaawmp | src/common/lists.py | 1 | 5662 | # -*- coding: utf-8 -*-
# A few useful lists.
# Copyright © 2007-2011, Jeff Bailes <[email protected]>.
# This file is part of Whaaw! Media Player (whaawmp)
#
# whaawmp is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the Licence, or
# (at your option) any later version.
#
# whaawmp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# The Whaaw! Media Player project hereby grants permission for non-GPL
# compatible GStreamer plugins to be used and distributed together with
# GStreamer and Whaaw! Media Player. This permission is above and beyond
# the permissions granted by the GPL licence by which Whaaw! Media Player
# is covered. (See COPYING file for more details)
## The mime type list of compatable files, for open dialogue.
compatFiles = ['application/ogg', 'application/ram', 'application/smil',
'application/vnd.rn-realmedia', 'application/x-extension-m4a',
'application/x-extension-mp4', 'application/x-flac',
'application/x-flash-video', 'application/x-matroska',
'application/x-ogg', 'application/x-quicktime-media-link',
'application/x-quicktimeplayer', 'application/x-shockwave-flash',
'application/x-shorten', 'application/x-smil', 'application/xspf+xml',
'audio/3gpp', 'audio/ac3', 'audio/AMR', 'audio/AMR-WB', 'audio/basic',
'audio/mp4', 'audio/mpeg', 'audio/mpegurl', 'audio/vnd.rn-realaudio',
'audio/x-ape', 'audio/x-flac', 'audio/x-it', 'audio/x-m4a',
'audio/x-matroska', 'audio/x-mod', 'audio/x-mp3', 'audio/x-mpeg',
'audio/x-mpegurl', 'audio/x-ms-asf', 'audio/x-ms-asx', 'audio/x-ms-wax',
'audio/x-ms-wma', 'audio/x-musepack', 'audio/x-pn-aiff', 'audio/x-pn-au',
'audio/x-pn-realaudio', 'audio/x-pn-realaudio-plugin', 'audio/x-pn-wav',
'audio/x-pn-windows-acm', 'audio/x-realaudio', 'audio/x-real-audio',
'audio/x-scpls', 'audio/x-tta', 'audio/x-wav', 'audio/x-wav',
'audio/x-wavpack', 'image/vnd.rn-realpix', 'image/x-pict', 'misc/ultravox',
'text/google-video-pointer', 'text/x-google-video-pointer', 'video/3gpp',
'video/dv', 'video/fli', 'video/flv', 'video/mp4', 'video/mp4v-es',
'video/mpeg', 'video/msvideo', 'video/quicktime', 'video/vivo',
'video/vnd.divx', 'video/vnd.rn-realvideo', 'video/vnd.vivo', 'video/webm', 'video/x-anim',
'video/x-avi', 'video/x-flc', 'video/x-fli', 'video/x-flic', 'video/x-m4v',
'video/x-matroska', 'video/x-mpeg', 'video/x-ms-asf', 'video/x-msvideo',
'video/x-ms-wm', 'video/x-ms-wmv', 'video/x-ms-wmx', 'video/x-ms-wvx',
'video/x-nsv', 'video/x-ogm+ogg', 'video/x-theora+ogg', 'text/uri-list']
## The widgets that are normally hidden.
hiddenNormalWidgets = ['btnLeaveFullscreen']
## A list of widgets to hide on fullscreen.
hiddenFSWidgets = ['menubar', 'hboxTop', 'hboxControl', 'hboxBottom', 'btnLeaveFullscreen']
## The list of widgets to reshow when the mouse is moved (fullscreen).
fsShowWMouse = ['hboxControl', 'hboxBottom', 'btnLeaveFullscreen']
## A dicrtionary with all the default options.
defaultOptions = { 'video/brightness' : 0,
'video/contrast' : 1,
'video/hue' : 0,
'video/saturation' : 1,
'video/force-aspect-ratio' : True,
'video/videosink' : 'default',
'video/autosub' : False,
'video/autosubexts' : 'srt,idx,sub,ssa,ass',
'video/subfont' : 'Sans 20', # TODO: maybe tweak this.
'video/subenc' : '', # Empty means use default encoding
'gui/mousehidetimeout' : 2000,
'gui/instantseek' : False,
'gui/showtimeremaining' : False,
'gui/enablevisualisation' : False,
'gui/iconsize' : 1,
'gui/fileastitle' : True,
'gui/shownextbutton' : True,
'gui/showrestartbutton' : False,
'gui/tagsyntax' : '{artist} - {title}',
'audio/volume' : 0.75,
'audio/audiosink' : 'default',
'audio/audiodevice' : '',
'misc/onextnewfile' : 1,
'misc/disablescreensaver' : True }
## Some gstreamer lists.
## A list of gstreamer stream types (in order too!).
gstStreamType = [ 'unknown', 'audio', 'video', 'text', 'element' ]
## Available colour settings (Remember to use .lower() if lowercase required.
colourSettings = [ 'Brightness', 'Contrast', 'Hue', 'Saturation' ]
## A dictionary for keystrokes and the signals each should emit.
keypressDict = { 'space' : ['toggle-play-pause'],
'f' : ['toggle-fullscreen'],
'F11' : ['toggle-fullscreen'],
'n' : ['play-next'],
'p' : ['restart-track'],
'r' : ['restart-track'],
'q' : ['toggle-queue'],
'a' : ['toggle-advanced-controls']}
| gpl-3.0 | 9,084,138,836,555,284,000 | 51.906542 | 106 | 0.583819 | false |
babbageclunk/gatesym | gatesym/core_ffi.py | 1 | 1467 | from cffi import FFI
import os
SO_PATH = os.path.expanduser('~/Dropbox/code/rust/gatesymcore/target/release/libgatesymcore.so')
ffi = FFI()
ffi.cdef("""
void *network_new();
void network_free(void *ptr);
size_t network_add_gate(void *ptr, uint8_t kind, uint32_t cookie);
void network_add_link(void *ptr, size_t source_index, size_t dest_index, uint8_t negate);
uint8_t network_read(void *ptr, size_t gate_index);
void network_write(void *ptr, size_t gate_index, uint8_t value);
size_t network_drain(void *ptr);
""")
lib = ffi.dlopen(SO_PATH)
TIE, SWITCH, AND, OR = range(4)
class Network(object):
def __init__(self):
self._ptr = lib.network_new()
self._cookies = []
def __del__(self):
lib.network_free(self._ptr)
def add_gate(self, type_, cookie):
self._cookies.append(cookie)
return lib.network_add_gate(self._ptr, type_, len(self._cookies))
def add_link(self, source_index, dest_index, negate=False):
assert dest_index >= 0
assert source_index >= 0
lib.network_add_link(self._ptr, source_index, dest_index, negate)
def read(self, gate_index):
assert gate_index >= 0
return bool(lib.network_read(self._ptr, gate_index))
def write(self, gate_index, value):
assert gate_index >= 0
lib.network_write(self._ptr, gate_index, value)
def drain(self):
return lib.network_drain(self._ptr)
| mit | 7,358,668,042,376,080,000 | 28.938776 | 96 | 0.629857 | false |
blckshrk/Weboob | weboob/applications/traveloob/traveloob.py | 1 | 4156 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon, Julien Hébert
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import sys
from datetime import datetime
import logging
from weboob.capabilities.travel import ICapTravel, RoadmapFilters
from weboob.tools.application.repl import ReplApplication, defaultcount
__all__ = ['Traveloob']
class Traveloob(ReplApplication):
APPNAME = 'traveloob'
VERSION = '0.h'
COPYRIGHT = 'Copyright(C) 2010-2011 Romain Bignon'
DESCRIPTION = "Console application allowing to search for train stations and get departure times."
SHORT_DESCRIPTION = "search for train stations and departures"
CAPS = ICapTravel
DEFAULT_FORMATTER = 'table'
def add_application_options(self, group):
group.add_option('--departure-time')
group.add_option('--arrival-time')
@defaultcount(10)
def do_stations(self, pattern):
"""
stations PATTERN
Search stations.
"""
for backend, station in self.do('iter_station_search', pattern):
self.format(station)
@defaultcount(10)
def do_departures(self, line):
"""
departures STATION [ARRIVAL]
List all departures for a given station.
"""
station, arrival = self.parse_command_args(line, 2, 1)
station_id, backend_name = self.parse_id(station)
if arrival:
arrival_id, backend_name2 = self.parse_id(arrival)
if backend_name and backend_name2 and backend_name != backend_name2:
logging.error('Departure and arrival aren\'t on the same backend')
return 1
else:
arrival_id = backend_name2 = None
if backend_name:
backends = [backend_name]
elif backend_name2:
backends = [backend_name2]
else:
backends = None
for backend, departure in self.do('iter_station_departures', station_id, arrival_id, backends=backends):
self.format(departure)
def do_roadmap(self, line):
"""
roadmap DEPARTURE ARRIVAL
Display the roadmap to travel from DEPARTURE to ARRIVAL.
Command-line parameters:
--departure-time TIME requested departure time
--arrival-time TIME requested arrival time
TIME might be in form "yyyy-mm-dd HH:MM" or "HH:MM".
Example:
> roadmap Puteaux Aulnay-sous-Bois --arrival-time 22:00
"""
departure, arrival = self.parse_command_args(line, 2, 2)
filters = RoadmapFilters()
try:
filters.departure_time = self.parse_datetime(self.options.departure_time)
filters.arrival_time = self.parse_datetime(self.options.arrival_time)
except ValueError as e:
print >>sys.stderr, 'Invalid datetime value: %s' % e
print >>sys.stderr, 'Please enter a datetime in form "yyyy-mm-dd HH:MM" or "HH:MM".'
return 1
for backend, route in self.do('iter_roadmap', departure, arrival, filters):
self.format(route)
def parse_datetime(self, text):
if text is None:
return None
try:
date = datetime.strptime(text, '%Y-%m-%d %H:%M')
except ValueError:
try:
date = datetime.strptime(text, '%H:%M')
except ValueError:
raise ValueError(text)
date = datetime.now().replace(hour=date.hour, minute=date.minute)
return date
| agpl-3.0 | -5,384,248,984,045,821,000 | 32.24 | 112 | 0.633694 | false |
clones/django-evolution | django_evolution/tests/multi_db.py | 1 | 29302 | from django_evolution.tests.utils import test_sql_mapping
tests = r"""
>>> from django.db import models
>>> from django_evolution.mutations import ChangeField
>>> from django_evolution.tests.utils import test_proj_sig_multi, execute_test_sql, register_models_multi, deregister_models
>>> from django_evolution.diff import Diff
>>> import copy
# Use Cases:
# Setting a null constraint
# -- without an initial value
# -- with a null initial value
# -- with a good initial value (constant)
# -- with a good initial value (callable)
# Removing a null constraint
# Invoking a no-op change field
# Changing the max_length of a character field
# -- increasing the max_length
# -- decreasing the max_length
# Renaming a column
# Changing the db_table of a many to many relationship
# Adding an index
# Removing an index
# Adding a unique constraint
# Removing a unique constraint
# Redundant attributes. (Some attribute have changed, while others haven't but are specified anyway.)
# Changing more than one attribute at a time (on different fields)
# Changing more than one attribute at a time (on one field)
### This one is a bit dubious because changing the primary key of a model will mean
### that all referenced foreign keys and M2M relationships need to be updated
# Adding a primary key constraint
# Removing a Primary Key (Changing the primary key column)
# Options that apply to all fields:
# DB related options
# null
# db_column
# db_index
# db_tablespace (Ignored)
# primary_key
# unique
# db_table (only for many to many relationships)
# -- CharField
# max_length
# Non-DB options
# blank
# core
# default
# editable
# help_text
# radio_admin
# unique_for_date
# unique_for_month
# unique_for_year
# validator_list
# I don't know yet
# choices
>>> class ChangeSequenceFieldInitial(object):
... def __init__(self, suffix):
... self.suffix = suffix
...
... def __call__(self):
... from django.db import connections
... qn = connections['db_multi'].ops.quote_name
... return qn('char_field')
# Now, a useful test model we can use for evaluating diffs
>>> class ChangeAnchor1(models.Model):
... value = models.IntegerField()
>>> class ChangeBaseModel(models.Model):
... my_id = models.AutoField(primary_key=True)
... alt_pk = models.IntegerField()
... int_field = models.IntegerField(db_column='custom_db_column')
... int_field1 = models.IntegerField(db_index=True)
... int_field2 = models.IntegerField(db_index=False)
... int_field3 = models.IntegerField(unique=True)
... int_field4 = models.IntegerField(unique=False)
... char_field = models.CharField(max_length=20)
... char_field1 = models.CharField(max_length=25, null=True)
... char_field2 = models.CharField(max_length=30, null=False)
... m2m_field1 = models.ManyToManyField(ChangeAnchor1, db_table='multi_db_non-default_m2m_table')
# Store the base signatures
>>> anchors = [('ChangeAnchor1', ChangeAnchor1)]
>>> test_model = ('TestModel', ChangeBaseModel)
>>> start = register_models_multi('tests', 'db_multi', *anchors)
>>> start.update(register_models_multi('tests', 'db_multi', test_model))
>>> start_sig = test_proj_sig_multi('tests', test_model, *anchors)
# Setting a null constraint without an initial value
>>> class SetNotNullChangeModel(models.Model):
... my_id = models.AutoField(primary_key=True)
... alt_pk = models.IntegerField()
... int_field = models.IntegerField(db_column='custom_db_column')
... int_field1 = models.IntegerField(db_index=True)
... int_field2 = models.IntegerField(db_index=False)
... int_field3 = models.IntegerField(unique=True)
... int_field4 = models.IntegerField(unique=False)
... char_field = models.CharField(max_length=20)
... char_field1 = models.CharField(max_length=25, null=False)
... char_field2 = models.CharField(max_length=30, null=False)
... m2m_field1 = models.ManyToManyField(ChangeAnchor1, db_table='multi_db_non-default_m2m_table')
>>> end = register_models_multi('tests', 'db_multi', ('TestModel', SetNotNullChangeModel), *anchors)
>>> end_sig = test_proj_sig_multi('tests', ('TestModel', SetNotNullChangeModel), *anchors)
>>> d = Diff(start_sig, end_sig)
>>> print d
In model tests.TestModel:
In field 'char_field1':
Property 'null' has changed
>>> print [str(e) for e in d.evolution()['tests']] # SetNotNullChangeModel
["ChangeField('TestModel', 'char_field1', initial=<<USER VALUE REQUIRED>>, null=False)"]
# Without an initial value
>>> evolution = [ChangeField('TestModel', 'char_field1', null=False)]
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> for mutation in evolution:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
Traceback (most recent call last):
...
SimulationFailure: Cannot change column 'char_field1' on 'tests.TestModel' without a non-null initial value.
# With a null initial value
>>> evolution = [ChangeField('TestModel', 'char_field1', null=False, initial=None)]
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> for mutation in evolution:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
Traceback (most recent call last):
...
SimulationFailure: Cannot change column 'char_field1' on 'tests.TestModel' without a non-null initial value.
# With a good initial value (constant)
>>> evolution = [ChangeField('TestModel', 'char_field1', null=False, initial="abc's xyz")]
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> for mutation in evolution:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
>>> Diff(test_sig, end_sig).is_empty()
True
>>> execute_test_sql(start, end, test_sql, database='db_multi', app_label='tests') # SetNotNullChangedModelWithConstant
%(SetNotNullChangeModelWithConstant)s
# With a good initial value (callable)
>>> evolution = [ChangeField('TestModel', 'char_field1', null=False, initial=ChangeSequenceFieldInitial('SetNotNullChangeModel'))]
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> for mutation in evolution:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
>>> Diff(test_sig, end_sig).is_empty()
True
>>> execute_test_sql(start, end, test_sql, database='db_multi', app_label='tests') # SetNotNullChangeModelWithCallable
%(SetNotNullChangeModelWithCallable)s
# Removing a null constraint
>>> class SetNullChangeModel(models.Model):
... my_id = models.AutoField(primary_key=True)
... alt_pk = models.IntegerField()
... int_field = models.IntegerField(db_column='custom_db_column')
... int_field1 = models.IntegerField(db_index=True)
... int_field2 = models.IntegerField(db_index=False)
... int_field3 = models.IntegerField(unique=True)
... int_field4 = models.IntegerField(unique=False)
... char_field = models.CharField(max_length=20)
... char_field1 = models.CharField(max_length=25, null=True)
... char_field2 = models.CharField(max_length=30, null=True)
... m2m_field1 = models.ManyToManyField(ChangeAnchor1, db_table='multi_db_non-default_m2m_table')
>>> end = register_models_multi('tests', 'db_multi', ('TestModel', SetNullChangeModel), *anchors)
>>> end_sig = test_proj_sig_multi('tests', ('TestModel', SetNullChangeModel), *anchors)
>>> d = Diff(start_sig, end_sig)
>>> print d
In model tests.TestModel:
In field 'char_field2':
Property 'null' has changed
>>> print [str(e) for e in d.evolution()['tests']] # SetNullChangeModel
["ChangeField('TestModel', 'char_field2', initial=None, null=True)"]
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> for mutation in d.evolution()['tests']:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
>>> Diff(test_sig, end_sig).is_empty()
True
>>> execute_test_sql(start, end, test_sql, database='db_multi', app_label='tests') # SetNullChangeModel
%(SetNullChangeModel)s
# Removing a null constraint
>>> class NoOpChangeModel(models.Model):
... my_id = models.AutoField(primary_key=True)
... alt_pk = models.IntegerField()
... int_field = models.IntegerField(db_column='custom_db_column')
... int_field1 = models.IntegerField(db_index=True)
... int_field2 = models.IntegerField(db_index=False)
... int_field3 = models.IntegerField(unique=True)
... int_field4 = models.IntegerField(unique=False)
... char_field = models.CharField(max_length=20)
... char_field1 = models.CharField(max_length=25, null=True)
... char_field2 = models.CharField(max_length=30, null=False)
... m2m_field1 = models.ManyToManyField(ChangeAnchor1, db_table='multi_db_non-default_m2m_table')
>>> end = register_models_multi('tests', 'db_multi', ('TestModel', NoOpChangeModel), *anchors)
>>> end_sig = test_proj_sig_multi('tests', ('TestModel', NoOpChangeModel), *anchors)
>>> d = Diff(start_sig, end_sig)
>>> print d
<BLANKLINE>
>>> evolution = [ChangeField('TestModel', 'char_field1', null=True)]
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> for mutation in evolution:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
>>> Diff(test_sig, end_sig).is_empty()
True
>>> execute_test_sql(start, end, test_sql, database='db_multi', app_label='tests') # NoOpChangeModel
%(NoOpChangeModel)s
# Increasing the max_length of a character field
>>> class IncreasingMaxLengthChangeModel(models.Model):
... my_id = models.AutoField(primary_key=True)
... alt_pk = models.IntegerField()
... int_field = models.IntegerField(db_column='custom_db_column')
... int_field1 = models.IntegerField(db_index=True)
... int_field2 = models.IntegerField(db_index=False)
... int_field3 = models.IntegerField(unique=True)
... int_field4 = models.IntegerField(unique=False)
... char_field = models.CharField(max_length=45)
... char_field1 = models.CharField(max_length=25, null=True)
... char_field2 = models.CharField(max_length=30, null=False)
... m2m_field1 = models.ManyToManyField(ChangeAnchor1, db_table='multi_db_non-default_m2m_table')
>>> end = register_models_multi('tests', 'db_multi', ('TestModel', IncreasingMaxLengthChangeModel), *anchors)
>>> end_sig = test_proj_sig_multi('tests', ('TestModel', IncreasingMaxLengthChangeModel), *anchors)
>>> d = Diff(start_sig, end_sig)
>>> print d
In model tests.TestModel:
In field 'char_field':
Property 'max_length' has changed
>>> print [str(e) for e in d.evolution()['tests']] # IncreasingMaxLengthChangeModel
["ChangeField('TestModel', 'char_field', initial=None, max_length=45)"]
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> for mutation in d.evolution()['tests']:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
>>> Diff(test_sig, end_sig).is_empty()
True
>>> execute_test_sql(start, end, test_sql, database='db_multi', app_label='tests') # IncreasingMaxLengthChangeModel
%(IncreasingMaxLengthChangeModel)s
# Decreasing the max_length of a character field
>>> class DecreasingMaxLengthChangeModel(models.Model):
... my_id = models.AutoField(primary_key=True)
... alt_pk = models.IntegerField()
... int_field = models.IntegerField(db_column='custom_db_column')
... int_field1 = models.IntegerField(db_index=True)
... int_field2 = models.IntegerField(db_index=False)
... int_field3 = models.IntegerField(unique=True)
... int_field4 = models.IntegerField(unique=False)
... char_field = models.CharField(max_length=1)
... char_field1 = models.CharField(max_length=25, null=True)
... char_field2 = models.CharField(max_length=30, null=False)
... m2m_field1 = models.ManyToManyField(ChangeAnchor1, db_table='multi_db_non-default_m2m_table')
>>> end = register_models_multi('tests', 'db_multi', ('TestModel', DecreasingMaxLengthChangeModel), *anchors)
>>> end_sig = test_proj_sig_multi('tests', ('TestModel', DecreasingMaxLengthChangeModel), *anchors)
>>> d = Diff(start_sig, end_sig)
>>> print d
In model tests.TestModel:
In field 'char_field':
Property 'max_length' has changed
>>> print [str(e) for e in d.evolution()['tests']] # DecreasingMaxLengthChangeModel
["ChangeField('TestModel', 'char_field', initial=None, max_length=1)"]
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> for mutation in d.evolution()['tests']:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
>>> Diff(test_sig, end_sig).is_empty()
True
>>> execute_test_sql(start, end, test_sql, database='db_multi', app_label='tests') # DecreasingMaxLengthChangeModel
%(DecreasingMaxLengthChangeModel)s
# Renaming a column
>>> class DBColumnChangeModel(models.Model):
... my_id = models.AutoField(primary_key=True)
... alt_pk = models.IntegerField()
... int_field = models.IntegerField(db_column='customised_db_column')
... int_field1 = models.IntegerField(db_index=True)
... int_field2 = models.IntegerField(db_index=False)
... int_field3 = models.IntegerField(unique=True)
... int_field4 = models.IntegerField(unique=False)
... char_field = models.CharField(max_length=20)
... char_field1 = models.CharField(max_length=25, null=True)
... char_field2 = models.CharField(max_length=30, null=False)
... m2m_field1 = models.ManyToManyField(ChangeAnchor1, db_table='multi_db_non-default_m2m_table')
>>> end = register_models_multi('tests', 'db_multi', ('TestModel', DBColumnChangeModel), *anchors)
>>> end_sig = test_proj_sig_multi('tests', ('TestModel', DBColumnChangeModel), *anchors)
>>> d = Diff(start_sig, end_sig)
>>> print d
In model tests.TestModel:
In field 'int_field':
Property 'db_column' has changed
>>> print [str(e) for e in d.evolution()['tests']] # DBColumnChangeModel
["ChangeField('TestModel', 'int_field', initial=None, db_column='customised_db_column')"]
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> for mutation in d.evolution()['tests']:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
>>> Diff(test_sig, end_sig).is_empty()
True
>>> execute_test_sql(start, end, test_sql, database='db_multi', app_label='tests') # DBColumnChangeModel
%(DBColumnChangeModel)s
# Changing the db_table of a many to many relationship
>>> class M2MDBTableChangeModel(models.Model):
... my_id = models.AutoField(primary_key=True)
... alt_pk = models.IntegerField()
... int_field = models.IntegerField(db_column='custom_db_column')
... int_field1 = models.IntegerField(db_index=True)
... int_field2 = models.IntegerField(db_index=False)
... int_field3 = models.IntegerField(unique=True)
... int_field4 = models.IntegerField(unique=False)
... char_field = models.CharField(max_length=20)
... char_field1 = models.CharField(max_length=25, null=True)
... char_field2 = models.CharField(max_length=30, null=False)
... m2m_field1 = models.ManyToManyField(ChangeAnchor1, db_table='custom_m2m_db_table_name')
>>> end = register_models_multi('tests', 'db_multi', ('TestModel', M2MDBTableChangeModel), *anchors)
>>> end_sig = test_proj_sig_multi('tests', ('TestModel', M2MDBTableChangeModel), *anchors)
>>> d = Diff(start_sig, end_sig)
>>> print d
In model tests.TestModel:
In field 'm2m_field1':
Property 'db_table' has changed
>>> print [str(e) for e in d.evolution()['tests']] # M2MDBTableChangeModel
["ChangeField('TestModel', 'm2m_field1', initial=None, db_table='custom_m2m_db_table_name')"]
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> for mutation in d.evolution()['tests']:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
>>> Diff(test_sig, end_sig).is_empty()
True
>>> execute_test_sql(start, end, test_sql, database='db_multi', app_label='tests') # M2MDBTableChangeModel
%(M2MDBTableChangeModel)s
# Adding an index
>>> class AddDBIndexChangeModel(models.Model):
... my_id = models.AutoField(primary_key=True)
... alt_pk = models.IntegerField()
... int_field = models.IntegerField(db_column='custom_db_column')
... int_field1 = models.IntegerField(db_index=True)
... int_field2 = models.IntegerField(db_index=True)
... int_field3 = models.IntegerField(unique=True)
... int_field4 = models.IntegerField(unique=False)
... char_field = models.CharField(max_length=20)
... char_field1 = models.CharField(max_length=25, null=True)
... char_field2 = models.CharField(max_length=30, null=False)
... m2m_field1 = models.ManyToManyField(ChangeAnchor1, db_table='multi_db_non-default_m2m_table')
>>> end = register_models_multi('tests', 'db_multi', ('TestModel', AddDBIndexChangeModel), *anchors)
>>> end_sig = test_proj_sig_multi('tests', ('TestModel', AddDBIndexChangeModel), *anchors)
>>> d = Diff(start_sig, end_sig)
>>> print d
In model tests.TestModel:
In field 'int_field2':
Property 'db_index' has changed
>>> print [str(e) for e in d.evolution()['tests']] # AddDBIndexChangeModel
["ChangeField('TestModel', 'int_field2', initial=None, db_index=True)"]
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> for mutation in d.evolution()['tests']:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
>>> Diff(test_sig, end_sig).is_empty()
True
>>> execute_test_sql(start, end, test_sql, database='db_multi', app_label='tests') # AddDBIndexChangeModel
%(AddDBIndexChangeModel)s
# Removing an index
>>> class RemoveDBIndexChangeModel(models.Model):
... my_id = models.AutoField(primary_key=True)
... alt_pk = models.IntegerField()
... int_field = models.IntegerField(db_column='custom_db_column')
... int_field1 = models.IntegerField(db_index=False)
... int_field2 = models.IntegerField(db_index=False)
... int_field3 = models.IntegerField(unique=True)
... int_field4 = models.IntegerField(unique=False)
... char_field = models.CharField(max_length=20)
... char_field1 = models.CharField(max_length=25, null=True)
... char_field2 = models.CharField(max_length=30, null=False)
... m2m_field1 = models.ManyToManyField(ChangeAnchor1, db_table='multi_db_non-default_m2m_table')
>>> end = register_models_multi('tests', 'db_multi', ('TestModel', RemoveDBIndexChangeModel), *anchors)
>>> end_sig = test_proj_sig_multi('tests', ('TestModel', RemoveDBIndexChangeModel), *anchors)
>>> d = Diff(start_sig, end_sig)
>>> print d
In model tests.TestModel:
In field 'int_field1':
Property 'db_index' has changed
>>> print [str(e) for e in d.evolution()['tests']] # RemoveDBIndexChangeModel
["ChangeField('TestModel', 'int_field1', initial=None, db_index=False)"]
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> for mutation in d.evolution()['tests']:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
>>> Diff(test_sig, end_sig).is_empty()
True
>>> execute_test_sql(start, end, test_sql, database='db_multi', app_label='tests') # RemoveDBIndexChangeModel
%(RemoveDBIndexChangeModel)s
# Adding a unique constraint
>>> class AddUniqueChangeModel(models.Model):
... my_id = models.AutoField(primary_key=True)
... alt_pk = models.IntegerField()
... int_field = models.IntegerField(db_column='custom_db_column')
... int_field1 = models.IntegerField(db_index=True)
... int_field2 = models.IntegerField(db_index=False)
... int_field3 = models.IntegerField(unique=True)
... int_field4 = models.IntegerField(unique=True)
... char_field = models.CharField(max_length=20)
... char_field1 = models.CharField(max_length=25, null=True)
... char_field2 = models.CharField(max_length=30, null=False)
... m2m_field1 = models.ManyToManyField(ChangeAnchor1, db_table='multi_db_non-default_m2m_table')
>>> end = register_models_multi('tests', 'db_multi', ('TestModel', AddUniqueChangeModel), *anchors)
>>> end_sig = test_proj_sig_multi('tests', ('TestModel', AddUniqueChangeModel), *anchors)
>>> d = Diff(start_sig, end_sig)
>>> print d
In model tests.TestModel:
In field 'int_field4':
Property 'unique' has changed
>>> print [str(e) for e in d.evolution()['tests']] # AddUniqueChangeModel
["ChangeField('TestModel', 'int_field4', initial=None, unique=True)"]
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> for mutation in d.evolution()['tests']:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
>>> Diff(test_sig, end_sig).is_empty()
True
>>> execute_test_sql(start, end, test_sql, database='db_multi', app_label='tests') # AddUniqueChangeModel
%(AddUniqueChangeModel)s
# Remove a unique constraint
>>> class RemoveUniqueChangeModel(models.Model):
... my_id = models.AutoField(primary_key=True)
... alt_pk = models.IntegerField()
... int_field = models.IntegerField(db_column='custom_db_column')
... int_field1 = models.IntegerField(db_index=True)
... int_field2 = models.IntegerField(db_index=False)
... int_field3 = models.IntegerField(unique=False)
... int_field4 = models.IntegerField(unique=False)
... char_field = models.CharField(max_length=20)
... char_field1 = models.CharField(max_length=25, null=True)
... char_field2 = models.CharField(max_length=30, null=False)
... m2m_field1 = models.ManyToManyField(ChangeAnchor1, db_table='multi_db_non-default_m2m_table')
>>> end = register_models_multi('tests', 'db_multi', ('TestModel', RemoveUniqueChangeModel), *anchors)
>>> end_sig = test_proj_sig_multi('tests', ('TestModel', RemoveUniqueChangeModel), *anchors)
>>> d = Diff(start_sig, end_sig)
>>> print d
In model tests.TestModel:
In field 'int_field3':
Property 'unique' has changed
>>> print [str(e) for e in d.evolution()['tests']] # RemoveUniqueChangeModel
["ChangeField('TestModel', 'int_field3', initial=None, unique=False)"]
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> for mutation in d.evolution()['tests']:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
>>> Diff(test_sig, end_sig).is_empty()
True
>>> execute_test_sql(start, end, test_sql, database='db_multi', app_label='tests') # RemoveUniqueChangeModel
%(RemoveUniqueChangeModel)s
# Changing more than one attribute at a time (on different fields)
>>> class MultiAttrChangeModel(models.Model):
... my_id = models.AutoField(primary_key=True)
... alt_pk = models.IntegerField()
... int_field = models.IntegerField(db_column='custom_db_column2')
... int_field1 = models.IntegerField(db_index=True)
... int_field2 = models.IntegerField(db_index=False)
... int_field3 = models.IntegerField(unique=True)
... int_field4 = models.IntegerField(unique=False)
... char_field = models.CharField(max_length=35)
... char_field1 = models.CharField(max_length=25, null=True)
... char_field2 = models.CharField(max_length=30, null=True)
... m2m_field1 = models.ManyToManyField(ChangeAnchor1, db_table='multi_db_non-default_m2m_table')
>>> end = register_models_multi('tests', 'db_multi', ('TestModel', MultiAttrChangeModel), *anchors)
>>> end_sig = test_proj_sig_multi('tests', ('TestModel', MultiAttrChangeModel), *anchors)
>>> d = Diff(start_sig, end_sig)
>>> print d
In model tests.TestModel:
In field 'char_field2':
Property 'null' has changed
In field 'int_field':
Property 'db_column' has changed
In field 'char_field':
Property 'max_length' has changed
>>> print [str(e) for e in d.evolution()['tests']] # MultiAttrChangeModel
["ChangeField('TestModel', 'char_field2', initial=None, null=True)", "ChangeField('TestModel', 'int_field', initial=None, db_column='custom_db_column2')", "ChangeField('TestModel', 'char_field', initial=None, max_length=35)"]
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> for mutation in d.evolution()['tests']:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
>>> Diff(test_sig, end_sig).is_empty()
True
>>> execute_test_sql(start, end, test_sql, database='db_multi', app_label='tests') # MultiAttrChangeModel
%(MultiAttrChangeModel)s
# Changing more than one attribute at a time (on one fields)
>>> class MultiAttrSingleFieldChangeModel(models.Model):
... my_id = models.AutoField(primary_key=True)
... alt_pk = models.IntegerField()
... int_field = models.IntegerField(db_column='custom_db_column')
... int_field1 = models.IntegerField(db_index=True)
... int_field2 = models.IntegerField(db_index=False)
... int_field3 = models.IntegerField(unique=True)
... int_field4 = models.IntegerField(unique=False)
... char_field = models.CharField(max_length=20)
... char_field1 = models.CharField(max_length=25, null=True)
... char_field2 = models.CharField(max_length=35, null=True)
... m2m_field1 = models.ManyToManyField(ChangeAnchor1, db_table='multi_db_non-default_m2m_table')
>>> end = register_models_multi('tests', 'db_multi', ('TestModel', MultiAttrSingleFieldChangeModel), *anchors)
>>> end_sig = test_proj_sig_multi('tests', ('TestModel', MultiAttrSingleFieldChangeModel), *anchors)
>>> d = Diff(start_sig, end_sig)
>>> print d
In model tests.TestModel:
In field 'char_field2':
Property 'max_length' has changed
Property 'null' has changed
>>> print [str(e) for e in d.evolution()['tests']] # MultiAttrSingleFieldChangeModel
["ChangeField('TestModel', 'char_field2', initial=None, max_length=35, null=True)"]
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> for mutation in d.evolution()['tests']:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
>>> Diff(test_sig, end_sig).is_empty()
True
>>> execute_test_sql(start, end, test_sql, database='db_multi', app_label='tests') # MultiAttrSingleFieldChangeModel
%(MultiAttrSingleFieldChangeModel)s
# Redundant attributes. (Some attribute have changed, while others haven't but are specified anyway.)
>>> class RedundantAttrsChangeModel(models.Model):
... my_id = models.AutoField(primary_key=True)
... alt_pk = models.IntegerField()
... int_field = models.IntegerField(db_column='custom_db_column3')
... int_field1 = models.IntegerField(db_index=True)
... int_field2 = models.IntegerField(db_index=False)
... int_field3 = models.IntegerField(unique=True)
... int_field4 = models.IntegerField(unique=False)
... char_field = models.CharField(max_length=35)
... char_field1 = models.CharField(max_length=25, null=True)
... char_field2 = models.CharField(max_length=30, null=True)
... m2m_field1 = models.ManyToManyField(ChangeAnchor1, db_table='multi_db_non-default_m2m_table')
>>> end = register_models_multi('tests', 'db_multi', ('TestModel', RedundantAttrsChangeModel), *anchors)
>>> end_sig = test_proj_sig_multi('tests', ('TestModel', RedundantAttrsChangeModel), *anchors)
>>> d = Diff(start_sig, end_sig)
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> evolutions = [
... ChangeField("TestModel", "char_field2", initial=None, null=True, max_length=30),
... ChangeField("TestModel", "int_field", initial=None, db_column="custom_db_column3", primary_key=False, unique=False, db_index=False),
... ChangeField("TestModel", "char_field", initial=None, max_length=35),
... ]
>>> for mutation in evolutions:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
>>> Diff(test_sig, end_sig).is_empty()
True
>>> execute_test_sql(start, end, test_sql, database='db_multi', app_label='tests') # RedundantAttrsChangeModel
%(RedundantAttrsChangeModel)s
# Change field type to another type with same internal_type
>>> class MyIntegerField(models.IntegerField):
... def get_internal_type(self):
... return 'IntegerField'
>>> class MinorFieldTypeChangeModel(models.Model):
... my_id = models.AutoField(primary_key=True)
... alt_pk = models.IntegerField()
... int_field = models.IntegerField(db_column='custom_db_column')
... int_field1 = models.IntegerField(db_index=True)
... int_field2 = models.IntegerField(db_index=False)
... int_field3 = models.IntegerField(unique=True)
... int_field4 = MyIntegerField(unique=False)
... char_field = models.CharField(max_length=20)
... char_field1 = models.CharField(max_length=25, null=True)
... char_field2 = models.CharField(max_length=30, null=False)
... m2m_field1 = models.ManyToManyField(ChangeAnchor1, db_table='multi_db_non-default_m2m_table')
>>> end = register_models_multi('tests', 'db_multi', ('TestModel', MinorFieldTypeChangeModel), *anchors)
>>> end_sig = test_proj_sig_multi('tests', ('TestModel', MinorFieldTypeChangeModel), *anchors)
>>> d = Diff(start_sig, end_sig)
>>> d.is_empty()
True
# Clean up after the applications that were installed
>>> deregister_models('tests')
""" % test_sql_mapping('multi_db', db_name='db_multi')
| bsd-3-clause | 8,471,837,691,026,984,000 | 41.652111 | 225 | 0.687769 | false |
StephenKinger/privaan | setup.py | 1 | 5110 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from setuptools.command.develop import develop
from setuptools.command.install import install
import os
# notez qu'on import la lib
# donc assurez-vous que l'importe n'a pas d'effet de bord
import privaan
class PostDevelopCommand(develop):
"""Post-installation for development mode."""
def run(self):
# PUT YOUR POST-INSTALL SCRIPT HERE or CALL A FUNCTION
os.system("chmod +x /etc/init.d/privaanservice")
develop.run(self)
class PostInstallCommand(install):
"""Post-installation for installation mode."""
def run(self):
# PUT YOUR POST-INSTALL SCRIPT HERE or CALL A FUNCTION
username = raw_input("Enter the username for sender: ")
password = raw_input("Enter the password for sender: ")
sender_email = raw_input("Enter the email for the sender: ")
sender_receivers = raw_input("Enter the emails for receivers (coma separated): ")
user_max_box_api_key = raw_input("Enter the map box api key (https://www.mapbox.com): ")
os.system("rm privaan/config.py && touch privaan/config.py")
f = open('privaan/config.py', 'w')
f.write('username = \''+username+'\'\n')
f.write('password = \''+password+'\'\n')
f.write('fromaddr = \''+sender_email+'\'\n')
f.write('toaddrs = \''+sender_receivers+'\'\n')
f.write('map_box_api_key = \''+user_max_box_api_key+'\'\n')
f.close()
install.run(self)
os.system("chmod +x /etc/init.d/privaanservice")
os.system("update-rc.d privaanservice defaults")
os.system("/etc/init.d/privaanservice start")
# Ceci n'est qu'un appel de fonction. Mais il est trèèèèèèèèèèès long
# et il comporte beaucoup de paramètres
setup(
# le nom de votre bibliothèque, tel qu'il apparaitre sur pypi
name='privaan',
# la version du code
version=privaan.__version__,
# Liste les packages à insérer dans la distribution
# plutôt que de le faire à la main, on utilise la foncton
# find_packages() de setuptools qui va cherche tous les packages
# python recursivement dans le dossier courant.
# C'est pour cette raison que l'on a tout mis dans un seul dossier:
# on peut ainsi utiliser cette fonction facilement
packages=find_packages(),
# votre pti nom
author="Stephen KINGER",
# Votre email, sachant qu'il sera publique visible, avec tous les risques
# que ça implique.
author_email="",
# Une description courte
description="Tool to monitor apache logs and notify on connexions",
# Une description longue, sera affichée pour présenter la lib
# Généralement on dump le README ici
long_description=open('README.md').read(),
# Vous pouvez rajouter une liste de dépendances pour votre lib
# et même préciser une version. A l'installation, Python essayera de
# les télécharger et les installer.
#
# Ex: ["gunicorn", "docutils >= 0.3", "lxml==0.5a7"]
#
# Dans notre cas on en a pas besoin, donc je le commente, mais je le
# laisse pour que vous sachiez que ça existe car c'est très utile.
# install_requires= ,
# Active la prise en compte du fichier MANIFEST.in
include_package_data=True,
# Une url qui pointe vers la page officielle de votre lib
url='http://github.com/StephenKinger/privaan',
# Il est d'usage de mettre quelques metadata à propos de sa lib
# Pour que les robots puissent facilement la classer.
# La liste des marqueurs autorisées est longue:
# https://pypi.python.org/pypi?%3Aaction=list_classifiers.
#
# Il n'y a pas vraiment de règle pour le contenu. Chacun fait un peu
# comme il le sent. Il y en a qui ne mettent rien.
classifiers=[
"Programming Language :: Python",
"Development Status :: WIP",
"License :: OSI Approved",
"Natural Language :: English",
"Operating System :: Linux",
"Programming Language :: Python :: 2.7",
"Topic :: Security",
],
install_requires=['mock>=2.0.0','pygtail>=0.7.0','docopt>=0.6.2','requests>=2.12.4'],
data_files=[('/etc/init.d', ['daemon/privaanservice'])],
# C'est un système de plugin, mais on s'en sert presque exclusivement
# Pour créer des commandes, comme "django-admin".
# Par exemple, si on veut créer la fabuleuse commande "proclame-sm", on
# va faire pointer ce nom vers la fonction proclamer(). La commande sera
# créé automatiquement.
# La syntaxe est "nom-de-commande-a-creer = package.module:fonction".
entry_points = {
'console_scripts': [
'privaan = privaan:privaan_run',
],
},
# A fournir uniquement si votre licence n'est pas listée dans "classifiers"
# ce qui est notre cas
license="MIT",
# Il y a encore une chiée de paramètres possibles, mais avec ça vous
# couvrez 90% des besoins
cmdclass={
'develop': PostDevelopCommand,
'install': PostInstallCommand,
},
)
| mit | -9,133,602,008,088,023,000 | 35.731884 | 96 | 0.65654 | false |
plinecom/JobManager | gui/submit/fileinfo/fileinfo.py | 1 | 1576 | from PyQt4 import QtGui, QtCore
import gui.submit.fileinfo.common
import gui.submit.fileinfo.maya1
import gui.submit.fileinfo.maya2
import gui.submit.fileinfo.maya_mentalray
import gui.submit.fileinfo.nuke
class FileInfoPanel(QtGui.QTabWidget):
def __init__(self, job_list, dispatcher_list, config_info, parent=None):
QtGui.QTabWidget.__init__(self)
self._parent = parent
self._joblist = job_list
self._dipatcherList = dispatcher_list
self._configInfo = config_info
self.update_ui()
def update_ui(self):
self.clear()
job_common_panel = gui.submit.fileinfo.common.CommonPanel(self._joblist, self._parent)
self.addTab(job_common_panel, "fileinfo")
if "Maya" in self._joblist.get_current_job().getvalue("[*].*.software")[0]:
maya_panel1 = gui.submit.fileinfo.maya1.MayaPanel(self._joblist, self._parent)
self.addTab(maya_panel1, "Maya1")
maya_panel2 = gui.submit.fileinfo.maya2.MayaPanel(self._joblist, self._parent)
self.addTab(maya_panel2, "Maya2")
if "mentalray" in self._joblist.get_current_job().getvalue("[*].*.renderer")[0]:
mentalray_panel = gui.submit.fileinfo.maya_mentalray.MentalrayPanel(self._joblist, self._parent)
self.addTab(mentalray_panel, "Mentalray")
elif "Nuke" in self._joblist.get_current_job().getvalue("[*].*.software")[0]:
nuke_panel = gui.submit.fileinfo.nuke.NukePanel(self._joblist, self._parent)
self.addTab(nuke_panel, "Nuke")
| mit | 884,643,911,518,618,900 | 40.473684 | 112 | 0.654188 | false |
aziele/alfpy | tests/test_word_distance.py | 1 | 10504 | import unittest
from alfpy import word_pattern
from alfpy import word_vector
from alfpy import word_distance
from alfpy.utils import distmatrix
from . import utils
class DistanceTest(unittest.TestCase, utils.ModulesCommonTest):
def __init__(self, *args, **kwargs):
super(DistanceTest, self).__init__(*args, **kwargs)
utils.ModulesCommonTest.set_test_data()
self.pattern = word_pattern.create(self.dna_records.seq_list, 2)
self.counts = word_vector.Counts(self.dna_records.length_list,
self.pattern)
self.freqs = word_vector.Freqs(self.dna_records.length_list,
self.pattern)
def test_angle_cos_diss_freqs(self):
# The result of this method is identical to that from decaf+py.
dist = word_distance.Distance(self.freqs, 'angle_cos_diss')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [' 3',
'seq1 0.0000000 0.2797355 0.1500672',
'seq2 0.2797355 0.0000000 0.1261027',
'seq3 0.1500672 0.1261027 0.0000000']
self.assertEqual(matrix.format(), "\n".join(data))
def test_angle_cos_evol_freqs(self):
# The result of this method is identical to that from decaf+py.
dist = word_distance.Distance(self.freqs, 'angle_cos_evol')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [' 3',
'seq1 0.0000000 0.3281368 0.1625980',
'seq2 0.3281368 0.0000000 0.1347925',
'seq3 0.1625980 0.1347925 0.0000000']
self.assertEqual(matrix.format(), "\n".join(data))
def test_diff_abs_add_freqs(self):
# The result of this method is identical to that from decaf+py.
dist = word_distance.Distance(self.freqs, 'diff_abs_add')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [' 3',
'seq1 0.0000000 0.0810458 0.0507937',
'seq2 0.0810458 0.0000000 0.0526611',
'seq3 0.0507937 0.0526611 0.0000000']
self.assertEqual(matrix.format(), "\n".join(data))
def test_diff_abs_mult1_freqs(self):
# The result of this method is identical to that from decaf+py.
dist = word_distance.Distance(self.freqs, 'diff_abs_mult1')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [' 3',
'seq1 0.0000000 0.0621975 0.0501075',
'seq2 0.0621975 0.0000000 0.0955847',
'seq3 0.0501075 0.0955847 0.0000000']
self.assertEqual(matrix.format(), "\n".join(data))
def test_diff_abs_mult2_freqs(self):
# The result of this method is identical to that from decaf+py.
dist = word_distance.Distance(self.freqs, 'diff_abs_mult2')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [' 3',
'seq1 0.0000000 0.0621975 0.0404611',
'seq2 0.0621975 0.0000000 0.0531478',
'seq3 0.0404611 0.0531478 0.0000000']
self.assertEqual(matrix.format(), "\n".join(data))
def test_euclid_seqlen1_freqs(self):
# The result of this method is identical to that from decaf+py.
dist = word_distance.Distance(self.freqs, 'euclid_seqlen1')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [' 3',
'seq1 0.0000000 0.0065879 0.0032065',
'seq2 0.0065879 0.0000000 0.0041065',
'seq3 0.0032065 0.0041065 0.0000000']
self.assertEqual(matrix.format(), "\n".join(data))
def test_euclid_seqlen2_freqs(self):
# The result of this method is identical to that from decaf+py.
dist = word_distance.Distance(self.freqs, 'euclid_seqlen2')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [' 3',
'seq1 0.0000000 0.0072101 0.0038263',
'seq2 0.0072101 0.0000000 0.0039866',
'seq3 0.0038263 0.0039866 0.0000000']
self.assertEqual(matrix.format(), "\n".join(data))
def test_manhattan_freqs(self):
dist = word_distance.Distance(self.freqs, 'manhattan')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [
" 3",
"seq1 0.0000000 1.2156863 0.7619048",
"seq2 1.2156863 0.0000000 0.7899160",
"seq3 0.7619048 0.7899160 0.0000000"
]
self.assertEqual(matrix.format(), "\n".join(data))
def test_chebyshev_freqs(self):
dist = word_distance.Distance(self.freqs, 'chebyshev')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [
" 3",
"seq1 0.0000000 0.1936275 0.1250000",
"seq2 0.1936275 0.0000000 0.1428571",
"seq3 0.1250000 0.1428571 0.0000000"
]
self.assertEqual(matrix.format(), "\n".join(data))
def test_braycurtis_freqs(self):
dist = word_distance.Distance(self.freqs, 'braycurtis')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [
" 3",
"seq1 0.0000000 0.6078431 0.3809524",
"seq2 0.6078431 0.0000000 0.3949580",
"seq3 0.3809524 0.3949580 0.0000000"
]
self.assertEqual(matrix.format(), "\n".join(data))
def test_diff_abs_mult_freqs(self):
dist = word_distance.Distance(self.freqs, 'diff_abs_mult')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [
" 3",
"seq1 0.0000000 0.0621975 0.0404611",
"seq2 0.0621975 0.0000000 0.0531478",
"seq3 0.0404611 0.0531478 0.0000000"
]
self.assertEqual(matrix.format(), "\n".join(data))
def test_kld_freqs(self):
dist = word_distance.Distance(self.freqs, 'kld')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [
" 3",
"seq1 0.0000000 0.0932800 0.0435210",
"seq2 0.0932800 0.0000000 0.0447391",
"seq3 0.0435210 0.0447391 0.0000000"
]
self.assertEqual(matrix.format(), "\n".join(data))
def test_lcc_freqs(self):
dist = word_distance.Distance(self.freqs, 'lcc')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [
" 3",
"seq1 0.0000000 0.6205496 0.4017554",
"seq2 0.6205496 0.0000000 0.2550506",
"seq3 0.4017554 0.2550506 0.0000000"
]
self.assertEqual(matrix.format(), "\n".join(data))
def test_canberra_freqs(self):
dist = word_distance.Distance(self.freqs, 'canberra')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [
" 3",
"seq1 0.0000000 10.3372258 7.1836838",
"seq2 10.3372258 0.0000000 6.6280959",
"seq3 7.1836838 6.6280959 0.0000000"
]
self.assertEqual(matrix.format(), "\n".join(data))
def test_minkowski_freqs(self):
dist = word_distance.Distance(self.freqs, 'minkowski')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [
" 3",
"seq1 0.0000000 0.3763512 0.2532387",
"seq2 0.3763512 0.0000000 0.2603008",
"seq3 0.2532387 0.2603008 0.0000000"
]
self.assertEqual(matrix.format(), "\n".join(data))
def test_minkowski_throws_exception(self):
dist = word_distance.Distance(self.freqs, 'minkowski')
with self.assertRaises(Exception) as context:
dist.pwdist_minkowski(0, 1, 0.2)
self.assertIn('p must be at least 1', str(context.exception))
def test_jsd_freqs(self):
dist = word_distance.Distance(self.freqs, 'jsd')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [
" 3",
"seq1 0.0000000 0.4608882 0.2550278",
"seq2 0.4608882 0.0000000 0.2457790",
"seq3 0.2550278 0.2457790 0.0000000"
]
self.assertEqual(matrix.format(), "\n".join(data))
def test_euclid_squared_freqs(self):
# The result of this method is identical to that from decaf+py.
dist = word_distance.Distance(self.freqs, 'euclid_squared')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [' 3',
'seq1 0.0000000 0.1416402 0.0641298',
'seq2 0.1416402 0.0000000 0.0677565',
'seq3 0.0641298 0.0677565 0.0000000']
self.assertEqual(matrix.format(), "\n".join(data))
def test_euclid_norm_counts(self):
# The result of this method is identical to that from decaf+py.
dist = word_distance.Distance(self.counts, 'euclid_norm')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [' 3',
'seq1 0.0000000 7.5498344 5.4772256',
'seq2 7.5498344 0.0000000 4.3588989',
'seq3 5.4772256 4.3588989 0.0000000']
self.assertEqual(matrix.format(), "\n".join(data))
def test_euclid_norm_freqs(self):
# The result of this method is identical to that from decaf+py.
dist = word_distance.Distance(self.freqs, 'euclid_norm')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [' 3',
'seq1 0.0000000 0.3763512 0.2532387',
'seq2 0.3763512 0.0000000 0.2603008',
'seq3 0.2532387 0.2603008 0.0000000']
self.assertEqual(matrix.format(), "\n".join(data))
def test_google_freqs(self):
dist = word_distance.Distance(self.freqs, 'google')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [' 3',
'seq1 0.0000000 0.6078431 0.3809524',
'seq2 0.6078431 0.0000000 0.3949580',
'seq3 0.3809524 0.3949580 0.0000000']
self.assertEqual(matrix.format(), "\n".join(data))
if __name__ == '__main__':
unittest.main()
| mit | 4,758,581,940,622,395,000 | 43.134454 | 77 | 0.564261 | false |
mitsuhiko/celery | celery/contrib/batches.py | 1 | 1489 | from itertools import count
from collections import deque, defaultdict
from celery.task.base import Task
class Batches(Task):
abstract = True
flush_every = 10
def __init__(self):
self._buffer = deque()
self._count = count().next
def execute(self, wrapper, pool, loglevel, logfile):
self._buffer.append((wrapper, pool, loglevel, logfile))
if not self._count() % self.flush_every:
self.flush(self._buffer)
self._buffer.clear()
def flush(self, tasks):
for wrapper, pool, loglevel, logfile in tasks:
wrapper.execute_using_pool(pool, loglevel, logfile)
class Counter(Task):
abstract = True
flush_every = 10
def __init__(self):
self._buffer = deque()
self._count = count().next
def execute(self, wrapper, pool, loglevel, logfile):
self._buffer.append((wrapper.args, wrapper.kwargs))
if not self._count() % self.flush_every:
self.flush(self._buffer)
self._buffer.clear()
def flush(self, buffer):
raise NotImplementedError("Counters must implement 'flush'")
class ClickCounter(Task):
flush_every = 1000
def flush(self, buffer):
urlcount = defaultdict(lambda: 0)
for args, kwargs in buffer:
urlcount[kwargs["url"]] += 1
for url, count in urlcount.items():
print(">>> Clicks: %s -> %s" % (url, count))
# increment_in_db(url, n=count)
| bsd-3-clause | -4,240,570,643,636,673,000 | 25.589286 | 68 | 0.599731 | false |
dgfree/Minesweeper-Clone | screen.py | 1 | 6335 | # -*- coding: utf-8 -*-
"""
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
date: Mon Jan 19 14:52:04 2015
@author: daniel
"""
import pygame
from block import Block
from constants import *
class Screen:
FLAG_LOCATION = "img/flag.png"
MINE_LOCATION = "img/mine.png"
RED_X_LOCATION = "img/red_x.png"
BLOCK_COLOR = GRAY
CLICKED_BLOCK_COLOR = DARKGRAY
BACKGROUND_COLOR = BLACK
MINE_COLOR = RED
# Bottom portion of screen to display counters, time, etc.
INFO_HEIGHT = 75
def __init__(self, grid):
# Screen elements
self.screen_size = self._get_screen_size(grid)
self.screen = pygame.display.set_mode(self.screen_size)
#TODO: Grid class's/block classes initialization change
self.block_font_size = int(0.7 * Block.WIDTH)
self.display_font_size = int(grid.row_count * 1.2 - grid.col_count*.2)
pygame.display.set_caption("Minesweeper Alpha")
self.word_font = pygame.font.SysFont('Arial',
self.display_font_size,
True, False)
self.block_font = pygame.font.SysFont('Courier',
self.block_font_size,
True, False)
self.flag_image = pygame.image.load(self.FLAG_LOCATION)
self.mine_image = pygame.image.load(self.MINE_LOCATION)
self.red_x_image = pygame.image.load(self.RED_X_LOCATION)
self.initial_draw(grid)
def initial_draw(self, grid):
self.screen.fill(self.BACKGROUND_COLOR)
for row in range(grid.row_count):
for col in range(grid.col_count):
self._draw_empty_block(row, col, self.BLOCK_COLOR)
self._display_flag_counter(0)
self._display_mine_counter(grid.mine_count)
self._display_time_counter(0)
def draw_grid(self, grid):
for row in grid.blocks:
for block in row:
self._draw_block(block)
def game_over(self, grid):
self._draw_empty_block(grid.last_clicked_block.row,
grid.last_clicked_block.col, self.MINE_COLOR)
grid.reveal_mines_and_flags()
self.draw_grid(grid)
self._display_text("You lose!", 10)
self._display_text("Left click to restart.", 30)
self._display_text("Right click to quit.", 50)
def victory_screen(self, grid):
grid.reveal_mines_and_flags()
self.draw_grid(grid)
self._display_text("You win!", 10)
self._display_text("Left click to restart.", 30)
self._display_text("Right click to quit.", 50)
def _get_screen_size(self, grid):
screen_height = grid.row_count * (Block.HEIGHT + Block.MARGIN) + \
Block.MARGIN + self.INFO_HEIGHT
screen_width = grid.col_count * (Block.WIDTH + Block.MARGIN) + \
Block.MARGIN
return (screen_width, screen_height)
def _draw_empty_block(self, row, col, color):
# TODO: Fix this. Since the blocks aren't generated until after
# the user clicks, we have to do it like this for now. Perhaps
# we can find a different way to initialize blocks.
pygame.draw.rect(self.screen, color,
(col * Block.WIDTH + (col + 1) *
Block.MARGIN,
row * Block.HEIGHT + (row + 1) *
Block.MARGIN, Block.WIDTH, Block.HEIGHT))
def _draw_block(self, block):
if block.is_revealed:
if not block.is_mine and not block.flagged:
self._draw_empty_block(block.row, block.col,
self.CLICKED_BLOCK_COLOR)
if block.mine_neighbor_count > 0:
self._draw_block_number(block)
elif block.is_mine and not block.flagged:
self._draw_mine(block)
elif block.flagged and not block.is_mine:
self._draw_mine(block)
self._draw_image(self.red_x_image, block)
else:
if block.flagged:
self._draw_image(self.flag_image, block)
elif not block.flagged:
self._draw_empty_block(block.row, block.col, self.BLOCK_COLOR)
def _draw_block_number(self, block):
text = self.block_font.render(str(block.mine_neighbor_count),
True, block.color)
self.screen.blit(text, [block.x + 7, block.y + 3])
def _draw_mine(self, block):
self._draw_image(self.mine_image, block)
def _draw_image(self, image, block):
self.screen.blit(image, (block.x, block.y, block.WIDTH, block.HEIGHT))
def _display_text(self, string, y_offset):
y0 = self.screen_size[1] - self.INFO_HEIGHT + y_offset
text = self.word_font.render(string, True, WHITE)
text_loc = self._get_centered_text(string, y0)
pygame.draw.rect(self.screen, BLACK, text_loc)
self.screen.blit(text, text_loc)
def _get_centered_text(self, string, y):
text = self.word_font.render(string, True, WHITE)
textpos = text.get_rect()
textpos.centerx = self.screen_size[0] // 2
textpos.centery = y
return textpos
def _display_time_counter(self, time):
y_offset = 40
self._display_counter("TIME: ", time, y_offset)
def _display_mine_counter(self, mine_count):
y_offset = 20
self._display_counter("MINES: ", mine_count, y_offset)
def _display_flag_counter(self, flag_count):
y_offset = 0
self._display_counter("FLAGS: ", flag_count, y_offset)
def _display_counter(self, prestring, count, y_offset):
x0 = 0
y0 = self.screen_size[1] - self.INFO_HEIGHT + y_offset
string = prestring + str(count)
text = self.word_font.render(string, True, WHITE)
text_size = self.word_font.size(string)
pygame.draw.rect(self.screen, self.BACKGROUND_COLOR,
(x0, y0, text_size[0], text_size[1]))
self.screen.blit(text, [x0, y0, text_size[0], text_size[1]])
| mpl-2.0 | 1,783,015,336,399,862,800 | 35.408046 | 78 | 0.576006 | false |
googlegenomics/gcp-variant-transforms | gcp_variant_transforms/libs/partitioning.py | 1 | 15496 | # Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to create integer range partitioned BigQuery tables."""
import json
import logging
import math
import os
import time
from google.cloud import bigquery
from gcp_variant_transforms.libs import bigquery_util
_GET_COLUMN_NAMES_QUERY = (
'SELECT column_name '
'FROM `{PROJECT_ID}`.{DATASET_ID}.INFORMATION_SCHEMA.COLUMNS '
'WHERE table_name = "{TABLE_ID}"')
_GET_CALL_SUB_FIELDS_QUERY = (
'SELECT field_path '
'FROM `{PROJECT_ID}`.{DATASET_ID}.INFORMATION_SCHEMA.COLUMN_FIELD_PATHS '
'WHERE table_name = "{TABLE_ID}" AND column_name="{CALL_COLUMN}"')
_MAIN_TABLE_ALIAS = 'main_table'
_CALL_TABLE_ALIAS = 'call_table'
_COLUMN_AS = '{TABLE_ALIAS}.{COL} AS `{COL_NAME}`'
_FLATTEN_CALL_QUERY = (
'SELECT {SELECT_COLUMNS} '
'FROM `{PROJECT_ID}.{DATASET_ID}.{TABLE_ID}` as {MAIN_TABLE_ALIAS}, '
'UNNEST({CALL_COLUMN}) as {CALL_TABLE_ALIAS}')
MAX_RANGE_END = pow(2, 63) - 1
_MAX_BQ_NUM_PARTITIONS = 4000
_RANGE_END_SIG_DIGITS = 4
_RANGE_INTERVAL_SIG_DIGITS = 1
_TOTAL_BASE_PAIRS_SIG_DIGITS = 4
_PARTITION_SIZE_SIG_DIGITS = 1
_BQ_CREATE_TABLE_COMMAND = (
'bq mk --table {FULL_TABLE_ID} {SCHEMA_FILE_PATH}')
_BQ_CREATE_PARTITIONED_TABLE_COMMAND = (
'bq mk --table --range_partitioning='
'{PARTITION_COLUMN},0,{RANGE_END},{RANGE_INTERVAL} '
'--clustering_fields=start_position,end_position '
'{FULL_TABLE_ID} {SCHEMA_FILE_PATH}')
class FlattenCallColumn():
"""Flattens call column to convert variant opt tables to sample opt tables."""
def __init__(self, base_table_id, suffixes, append):
# type (str, List[str]) -> None
"""Initialize `FlattenCallColumn` object.
In preparation to convert variant lookup optimized tables to sample lookup
optimized tables, we initiate this class with the base table name of variant
opt table (set using --output_table flag) and the list of suffixes (which
are extracted from sharding config file).
Args:
base_table_id: Base name of variant opt outputs (set by --output_table).
suffixes: List of suffixes (extracted from sharding config file).
append: Whether or not we are appending to the destination tables.
"""
(self._project_id,
self._dataset_id,
self._base_table) = bigquery_util.parse_table_reference(base_table_id)
assert suffixes
self._suffixes = suffixes[:]
self._column_names = []
self._sub_fields = []
job_config = bigquery.job.QueryJobConfig(
write_disposition='WRITE_TRUNCATE' if append else 'WRITE_EMPTY')
self._client = bigquery.Client(project=self._project_id,
default_query_job_config=job_config)
self._find_one_non_empty_table()
def _find_one_non_empty_table(self):
# Any non empty input table can be used as the source for schema extraction.
for suffix in self._suffixes:
table_id = bigquery_util.compose_table_name(self._base_table, suffix)
if not bigquery_util.table_empty(
self._project_id, self._dataset_id, table_id):
self._schema_table_id = table_id
return
raise ValueError('All of the variant optimized tables are empty!')
def _run_query(self, query):
query_job = self._client.query(query)
num_retries = 0
while True:
try:
iterator = query_job.result(timeout=300)
except TimeoutError as e:
logging.warning('Time out waiting for query: %s', query)
if num_retries < bigquery_util.BQ_NUM_RETRIES:
num_retries += 1
time.sleep(90)
else:
raise e
else:
break
result = []
for i in iterator:
result.append(str(list(i.values())[0]))
return result
def _get_column_names(self):
if not self._column_names:
query = _GET_COLUMN_NAMES_QUERY.format(PROJECT_ID=self._project_id,
DATASET_ID=self._dataset_id,
TABLE_ID=self._schema_table_id)
self._column_names = self._run_query(query)[:]
assert self._column_names
return self._column_names
def _get_call_sub_fields(self):
if not self._sub_fields:
query = _GET_CALL_SUB_FIELDS_QUERY.format(
PROJECT_ID=self._project_id,
DATASET_ID=self._dataset_id,
TABLE_ID=self._schema_table_id,
CALL_COLUMN=bigquery_util.ColumnKeyConstants.CALLS)
# returned list is [call, call.name, call.genotype, call.phaseset, ...]
result = self._run_query(query)[1:] # Drop the first element
self._sub_fields = [sub_field.split('.')[1] for sub_field in result]
assert self._sub_fields
return self._sub_fields
def _get_flatten_column_names(self):
column_names = self._get_column_names()
sub_fields = self._get_call_sub_fields()
select_list = []
for column in column_names:
if column != bigquery_util.ColumnKeyConstants.CALLS:
select_list.append(
_COLUMN_AS.format(TABLE_ALIAS=_MAIN_TABLE_ALIAS, COL=column,
COL_NAME=column))
else:
sub_list = []
for s_f in sub_fields:
sub_list.append(
_COLUMN_AS.format(TABLE_ALIAS=_CALL_TABLE_ALIAS, COL=s_f,
COL_NAME=s_f))
if s_f == bigquery_util.ColumnKeyConstants.CALLS_SAMPLE_ID:
select_list.append(sub_list[-1])
call_column = ('STRUCT(' + ', '.join(sub_list) + ') AS ' +
bigquery_util.ColumnKeyConstants.CALLS)
select_list.append(call_column)
return ', '.join(select_list)
def _copy_to_flatten_table(self, output_table_id, cp_query):
job_config = bigquery.job.QueryJobConfig(destination=output_table_id)
query_job = self._client.query(cp_query, job_config=job_config)
num_retries = 0
while True:
try:
_ = query_job.result(timeout=600)
except TimeoutError as e:
logging.warning('Time out waiting for query: %s', cp_query)
if num_retries < bigquery_util.BQ_NUM_RETRIES:
num_retries += 1
time.sleep(90)
else:
logging.error('Copy to table query failed: %s', output_table_id)
raise e
else:
break
logging.info('Copy to table query was successful: %s', output_table_id)
def _convert_variant_schema_to_sample_schema(self, variant_schema):
schema_json = []
for schema_field in variant_schema:
schema_item = schema_field.to_api_repr()
if schema_item.get('name') == bigquery_util.ColumnKeyConstants.CALLS:
# (1) Modify its type from REPEATED to NULLABLE
call_mode = schema_item.get('mode')
if call_mode != bigquery_util.TableFieldConstants.MODE_REPEATED:
logging.error('Expected REPEATED mode for column `call` but got: %s',
call_mode)
raise ValueError('Wrong mode for column `call`: {}'.format(call_mode))
schema_item['mode'] = bigquery_util.TableFieldConstants.MODE_NULLABLE
# (2) Duplicate sample_id as an independent column to the table
sub_items = schema_item.get('fields')
sample_id_found = False
for sub_item in sub_items:
if (sub_item.get('name') ==
bigquery_util.ColumnKeyConstants.CALLS_SAMPLE_ID):
schema_json.append(sub_item)
sample_id_found = True
break
if not sample_id_found:
logging.info('`sample_id` column under `call` column was not found.')
raise ValueError(
'`sample_id` column under `call` column was not found.')
schema_json.append(schema_item)
return schema_json
def get_flatten_table_schema(self, schema_file_path):
# type: (str) -> bool
"""Write the flatten table's schema to the given json file.
This method basically performs the following tasks:
* Extract variant table schema using BigQuery API.
* Copy all columns without any change except `call` column:
* Modify mode from REPEATED TO NULLABLE
* Duplicate call.sample_id column as sample_id column (for partitioning)
Args:
schema_file_path: The json schema will be written to this file.
Returns;
A bool value indicating if the schema was successfully extracted.
"""
full_table_id = '{}.{}.{}'.format(
self._project_id, self._dataset_id, self._schema_table_id)
try:
variant_table = self._client.get_table(full_table_id)
except TimeoutError as e:
logging.error('Failed to get table using its id: "%s"', full_table_id)
raise e
variant_schema = variant_table.schema
sample_schema = self._convert_variant_schema_to_sample_schema(
variant_schema)
with open(schema_file_path, 'w') as outfile:
json.dump(sample_schema, outfile, sort_keys=True, indent=2)
logging.info('Successfully extracted the schema of flatten table.')
return True
def copy_to_flatten_table(self, output_base_table_id):
# type: (str) -> None
"""Copies data from variant lookup optimized tables to sample lookup tables.
Copies rows from _base_table_id__* to output_base_table_id__* for each value
in _suffixes. Here we assume destination tables are already created and are
partitioned based on call_sample_id column. The copying process is done via
a flattening query similar to the one used in get_flatten_table_schema().
Note that if source tables have repeated sample_ids then output table will
have more rows than input table. Essentially:
Number of output rows = Number of input rows * Number of repeated sample_ids
Args:
output_base_table_id: Base table name of output tables.
"""
# Here we assume all output_table_base + suffices[:] are already created.
(output_project_id, output_dataset_id, output_base_table) = (
bigquery_util.parse_table_reference(output_base_table_id))
select_columns = self._get_flatten_column_names()
for suffix in self._suffixes:
input_table_id = bigquery_util.compose_table_name(self._base_table,
suffix)
output_table_id = bigquery_util.compose_table_name(output_base_table,
suffix)
full_output_table_id = '{}.{}.{}'.format(
output_project_id, output_dataset_id, output_table_id)
cp_query = _FLATTEN_CALL_QUERY.format(
SELECT_COLUMNS=select_columns, PROJECT_ID=self._project_id,
DATASET_ID=self._dataset_id, TABLE_ID=input_table_id,
MAIN_TABLE_ALIAS=_MAIN_TABLE_ALIAS,
CALL_COLUMN=bigquery_util.ColumnKeyConstants.CALLS,
CALL_TABLE_ALIAS=_CALL_TABLE_ALIAS)
self._copy_to_flatten_table(full_output_table_id, cp_query)
logging.info('Flatten table is fully loaded: %s', full_output_table_id)
def calculate_optimal_range_interval(range_end):
# type: (int) -> Tuple[int, int]
"""Calculates the optimal range interval given range end value.
BQ allows up to 4000 integer range partitions. This method divides
[0, range_end] range into 3999 partitions. Every value outside of this
range will fall into the 4000th partition. Note this partitioning method
assumes variants are distributed uniformly.
Since given range_end might be a lower estimate, we add a little extra
buffer to the given value to avoid a situation where too many rows fall
into the 4000th partition. The size of added buffer is controlled by the
value of two consts:
* _RANGE_END_SIG_DIGITS is set to 4 which adds [10^4, 2 * 10^4)
* _RANGE_INTERVAL_SIG_DIGITS is set to 1 which adds [0, 10^1 * 3999)
In total we add [10^4, 10 * 3999 + 2 * 10^4) buffer to range_end.
range_end must be capped at MAX_RANGE_END = pow(2, 63) - 1 which is required
by BigQuery integer range partitioning.
Args:
range_end: the maximum value of the column subject to partitioning
Returns:
A tuple (partition size, partition size * 3999).
"""
if range_end >= MAX_RANGE_END:
return(int(MAX_RANGE_END / float(_MAX_BQ_NUM_PARTITIONS)),
MAX_RANGE_END)
# These two operations add [10^4, 2 * 10^4) buffer to range_end.
range_end += math.pow(10, _RANGE_END_SIG_DIGITS)
range_end = (
math.ceil(range_end / math.pow(10, _RANGE_END_SIG_DIGITS)) *
math.pow(10, _RANGE_END_SIG_DIGITS))
# We use 4000 - 1 = 3999 partitions just to avoid hitting the BQ limits.
range_interval = range_end / (_MAX_BQ_NUM_PARTITIONS - 1)
# This operation adds another [0, 10 * 3999) buffer to the range_end.
range_interval_round_up = int(
math.ceil(range_interval / pow(10, _RANGE_INTERVAL_SIG_DIGITS)) *
math.pow(10, _RANGE_INTERVAL_SIG_DIGITS))
range_end_round_up = range_interval_round_up * (_MAX_BQ_NUM_PARTITIONS - 1)
if range_end_round_up < MAX_RANGE_END:
return (range_interval_round_up, range_end_round_up)
else:
return(int(MAX_RANGE_END / float(_MAX_BQ_NUM_PARTITIONS)),
MAX_RANGE_END)
def create_bq_table(full_table_id, schema_file_path, partition_column=None,
range_end=0):
"""Creates an integer range partitioned table using `bq mk table...` command.
Since beam.io.BigQuerySink is unable to create an integer range partition
we use `bq mk table...` to achieve this goal. Note that this command runs on
the worker that monitors the Dataflow job.
Args:
full_table_id: for example: projet:dataset.table_base_name__chr1
schema_file_path: a json file that contains the schema of the table
partition_column: name of the column intended for integer range partitioning
range_end: the maximum value of the column subject to partitioning
"""
if not schema_file_path:
raise ValueError('Missing `schema_file_path` while calling create_bq_table')
if not partition_column and range_end != 0:
raise ValueError(
'When `partition_column` is set to None `range_end` must be 0')
if partition_column:
(range_interval, range_end_enlarged) = (
calculate_optimal_range_interval(range_end))
bq_command = _BQ_CREATE_PARTITIONED_TABLE_COMMAND.format(
PARTITION_COLUMN=partition_column,
RANGE_END=range_end_enlarged,
RANGE_INTERVAL=range_interval,
FULL_TABLE_ID=full_table_id,
SCHEMA_FILE_PATH=schema_file_path)
else:
bq_command = _BQ_CREATE_TABLE_COMMAND.format(
FULL_TABLE_ID=full_table_id,
SCHEMA_FILE_PATH=schema_file_path)
_run_table_creation_command(bq_command)
def _run_table_creation_command(bq_command):
result = os.system(bq_command)
if result != 0:
time.sleep(30) # In our integration tests sometime we overwhelm BQ server.
result_second_attempt = os.system(bq_command)
if result_second_attempt != 0:
raise ValueError(
'Failed to create a BigQuery table using "{}" command.'.format(
bq_command))
| apache-2.0 | 1,038,884,103,644,525,200 | 40.212766 | 80 | 0.657137 | false |
datadreamer/research-chronology-revisited | cgi-bin/pydeliciouslibs/feedparser/feedparser.py | 2 | 121876 | #!/usr/bin/env python
"""Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
Visit http://feedparser.org/ for the latest version
Visit http://feedparser.org/docs/ for the latest documentation
Required: Python 2.1 or later
Recommended: Python 2.3 or later
Recommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/>
"""
__version__ = "4.0.2"# + "$Revision: 1.88 $"[11:15] + "-cvs"
__license__ = """Copyright (c) 2002-2005, Mark Pilgrim, All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>"]
_debug = 0
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically run HTML markup through HTML Tidy, set
# this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html>
# or utidylib <http://utidylib.berlios.de/>.
TIDY_MARKUP = 0
# List of Python interfaces for HTML Tidy, in order of preference. Only useful
# if TIDY_MARKUP = 1
PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"]
# ---------- required modules (should come with any Python distribution) ----------
import sgmllib, re, sys, copy, urlparse, time, rfc822, types, cgi
try:
from cStringIO import StringIO as _StringIO
except:
from StringIO import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except:
gzip = None
try:
import zlib
except:
zlib = None
# timeoutsocket allows feedparser to time out rather than hang forever on ultra-slow servers.
# Python 2.3 now has this functionality available in the standard socket library, so under
# 2.3 or later you don't need to install anything. In fact, under Python 2.4, timeoutsocket
# write all sorts of crazy errors to stderr while running my unit tests, so it's probably
# outlived its usefulness.
import socket
if hasattr(socket, 'setdefaulttimeout'):
socket.setdefaulttimeout(20)
else:
try:
import timeoutsocket # http://www.timo-tasi.org/python/timeoutsocket.py
timeoutsocket.setDefaultSocketTimeout(20)
except ImportError:
pass
import urllib, urllib2
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
from xml.sax.saxutils import escape as _xmlescape
_XML_AVAILABLE = 1
except:
_XML_AVAILABLE = 0
def _xmlescape(data):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
return data
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
except:
base64 = binascii = None
# cjkcodecs and iconv_codec provide support for more character encodings.
# Both are available from http://cjkpython.i18n.org/
try:
import cjkcodecs.aliases
except:
pass
try:
import iconv_codec
except:
pass
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
sgmllib.special = re.compile('<!')
sgmllib.charref = re.compile('&#(x?[0-9A-Fa-f]+)[^0-9A-Fa-f]')
SUPPORTED_VERSIONS = {'': 'unknown',
'rss090': 'RSS 0.90',
'rss091n': 'RSS 0.91 (Netscape)',
'rss091u': 'RSS 0.91 (Userland)',
'rss092': 'RSS 0.92',
'rss093': 'RSS 0.93',
'rss094': 'RSS 0.94',
'rss20': 'RSS 2.0',
'rss10': 'RSS 1.0',
'rss': 'RSS (unknown version)',
'atom01': 'Atom 0.1',
'atom02': 'Atom 0.2',
'atom03': 'Atom 0.3',
'atom10': 'Atom 1.0',
'atom': 'Atom (unknown version)',
'cdf': 'CDF',
'hotrss': 'Hot RSS'
}
try:
UserDict = dict
except NameError:
# Python 2.1 does not have dict
from UserDict import UserDict
def dict(aList):
rc = {}
for k, v in aList:
rc[k] = v
return rc
class FeedParserDict(UserDict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['subtitle', 'summary'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
if key == 'category':
return UserDict.__getitem__(self, 'tags')[0]['term']
if key == 'categories':
return [(tag['scheme'], tag['term']) for tag in UserDict.__getitem__(self, 'tags')]
realkey = self.keymap.get(key, key)
if type(realkey) == types.ListType:
for k in realkey:
if UserDict.has_key(self, k):
return UserDict.__getitem__(self, k)
if UserDict.has_key(self, key):
return UserDict.__getitem__(self, key)
return UserDict.__getitem__(self, realkey)
def __setitem__(self, key, value):
for k in self.keymap.keys():
if key == k:
key = self.keymap[k]
if type(key) == types.ListType:
key = key[0]
return UserDict.__setitem__(self, key, value)
def get(self, key, default=None):
if self.has_key(key):
return self[key]
else:
return default
def setdefault(self, key, value):
if not self.has_key(key):
self[key] = value
return self[key]
def has_key(self, key):
try:
return hasattr(self, key) or UserDict.has_key(self, key)
except AttributeError:
return False
def __getattr__(self, key):
try:
return self.__dict__[key]
except KeyError:
pass
try:
assert not key.startswith('_')
return self.__getitem__(key)
except:
raise AttributeError, "object has no attribute '%s'" % key
def __setattr__(self, key, value):
if key.startswith('_') or key == 'data':
self.__dict__[key] = value
else:
return self.__setitem__(key, value)
def __contains__(self, key):
return self.has_key(key)
def zopeCompatibilityHack():
global FeedParserDict
del FeedParserDict
def FeedParserDict(aDict=None):
rc = {}
if aDict:
rc.update(aDict)
return rc
_ebcdic_to_ascii_map = None
def _ebcdic_to_ascii(s):
global _ebcdic_to_ascii_map
if not _ebcdic_to_ascii_map:
emap = (
0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201,
202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208,
209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215,
216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,
123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237,
125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243,
92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249,
48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255
)
import string
_ebcdic_to_ascii_map = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(_ebcdic_to_ascii_map)
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
return urlparse.urljoin(base, uri)
class _FeedParserMixin:
namespaces = {'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/':'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/XML/1998/namespace': 'xml',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf'
}
_matchnamespaces = {}
can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'license', 'icon', 'logo']
can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
html_types = ['text/html', 'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding='utf-8'):
if _debug: sys.stderr.write('initializing FeedParser\n')
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = '' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or ''
self.lang = baselang or None
if baselang:
self.feeddata['language'] = baselang
def unknown_starttag(self, tag, attrs):
if _debug: sys.stderr.write('start %s with %s\n' % (tag, attrs))
# normalize attrs
attrs = [(k.lower(), v) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
# Note: probably shouldn't simply recreate localname here, but
# our namespace handling isn't actually 100% correct in cases where
# the feed redefines the default namespace (which is actually
# the usual case for inline content, thanks Sam), so here we
# cheat and just reconstruct the element based on localname
# because that compensates for the bugs in our namespace handling.
# This will horribly munge inline content with non-empty qnames,
# but nobody actually does that, so I'm not fixing it.
tag = tag.split(':')[-1]
return self.handle_data('<%s%s>' % (tag, ''.join([' %s="%s"' % t for t in attrs])), escape=0)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
return self.push(prefix + suffix, 1)
def unknown_endtag(self, tag):
if _debug: sys.stderr.write('end %s\n' % tag)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack: return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = unichr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack: return
if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref)
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
else:
# entity resolution graciously donated by Aaron Swartz
def name2cp(k):
import htmlentitydefs
if hasattr(htmlentitydefs, 'name2codepoint'): # requires Python 2.3
return htmlentitydefs.name2codepoint[k]
k = htmlentitydefs.entitydefs[k]
if k.startswith('&#') and k.endswith(';'):
return int(k[2:-1]) # not in latin-1
return ord(k)
try: name2cp(ref)
except KeyError: text = '&%s;' % ref
else: text = unichr(name2cp(ref)).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack: return
if escape and self.contentparams.get('type') == 'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if _debug: sys.stderr.write('entering parse_declaration\n')
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1: k = len(self.rawdata)
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
return k+1
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text':
contentType = 'text/plain'
elif contentType == 'html':
contentType = 'text/html'
elif contentType == 'xhtml':
contentType = 'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version:
self.version = 'rss090'
if loweruri == 'http://purl.org/rss/1.0/' and not self.version:
self.version = 'rss10'
if loweruri == 'http://www.w3.org/2005/atom' and not self.version:
self.version = 'atom10'
if loweruri.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
uri = 'http://backend.userland.com/rss'
loweruri = uri
if self._matchnamespaces.has_key(loweruri):
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or '', uri)
def decodeEntities(self, element, data):
return data
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack: return
if self.elementstack[-1][0] != element: return
element, expectingText, pieces = self.elementstack.pop()
output = ''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText: return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = base64.decodestring(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
# resolve relative URIs within embedded markup
if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding)
# sanitize embedded markup
if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding)
if self.encoding and type(output) != type(u''):
try:
output = unicode(output, self.encoding)
except:
pass
# categories/tags/keywords/whatever are handled in _end_category
if element == 'category':
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource) and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos <> -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith('text/'):
return 0
if self.contentparams['type'].endswith('+xml'):
return 0
if self.contentparams['type'].endswith('/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value):
context = self._getContext()
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': 'rss091u',
'0.92': 'rss092',
'0.93': 'rss093',
'0.94': 'rss094'}
if not self.version:
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = 'rss20'
else:
self.version = 'rss'
def _start_dlhottitles(self, attrsD):
self.version = 'hotrss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
_start_feedinfo = _start_channel
def _cdf_common(self, attrsD):
if attrsD.has_key('lastmod'):
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if attrsD.has_key('href'):
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': 'atom01',
'0.2': 'atom02',
'0.3': 'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = 'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
self.inimage = 1
self.push('image', 0)
context = self._getContext()
context.setdefault('image', FeedParserDict())
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
self.intextinput = 1
self.push('textinput', 0)
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['textinput']['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['image']['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['image']['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
elif self.inimage:
context = self._getContext()
context['image']['href'] = value
elif self.intextinput:
context = self._getContext()
context['textinput']['link'] = value
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%s_detail' % key)
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = '%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author = context.get(key)
if not author: return
emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))''', author)
if not emailmatch: return
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, '')
author = author.replace('()', '')
author = author.strip()
if author and (author[0] == '('):
author = author[1:]
if author and (author[-1] == ')'):
author = author[:-1]
author = author.strip()
context.setdefault('%s_detail' % key, FeedParserDict())
context['%s_detail' % key]['name'] = author
context['%s_detail' % key]['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, 'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, 'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
_start_product = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value))
_end_dcterms_issued = _end_published
_end_issued = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_pubdate = _start_updated
_start_dc_date = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_pubdate = _end_updated
_end_dc_date = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value))
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')))
def _start_cc_license(self, attrsD):
self.push('license', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('license')
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
def _end_creativecommons_license(self):
self.pop('license')
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label): return
value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
if value not in tags:
tags.append(FeedParserDict({'term': term, 'scheme': scheme, 'label': label}))
def _start_category(self, attrsD):
if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD))
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split():
self._addTag(term, 'http://www.itunes.com/', None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value: return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', 'alternate')
attrsD.setdefault('type', 'text/html')
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
expectingText = self.infeed or self.inentry or self.insource
context = self._getContext()
context.setdefault('links', [])
context['links'].append(FeedParserDict(attrsD))
if attrsD['rel'] == 'enclosure':
self._start_enclosure(attrsD)
if attrsD.has_key('href'):
expectingText = 0
if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
_start_producturl = _start_link
def _end_link(self):
value = self.pop('link')
context = self._getContext()
if self.intextinput:
context['textinput']['link'] = value
if self.inimage:
context['image']['link'] = value
_end_producturl = _end_link
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and not self._getContext().has_key('link'))
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
def _start_title(self, attrsD):
self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
value = self.popContent('title')
context = self._getContext()
if self.intextinput:
context['textinput']['title'] = value
elif self.inimage:
context['image']['title'] = value
_end_dc_title = _end_title
_end_media_title = _end_title
def _start_description(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource)
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
context = self._getContext()
if self.intextinput:
context['textinput']['description'] = value
elif self.inimage:
context['image']['description'] = value
self._summaryKey = None
_end_abstract = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, 'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if context.has_key('generator_detail'):
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, 'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
self._getContext().setdefault('enclosures', []).append(FeedParserDict(attrsD))
href = attrsD.get('href')
if href:
context = self._getContext()
if not context.get('id'):
context['id'] = href
def _start_source(self, attrsD):
self.insource = 1
def _end_source(self):
self.insource = 0
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, 'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_prodlink(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, 'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToDescription = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types)
value = self.popContent('content')
if copyToDescription:
self._save('description', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
_end_prodlink = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
self._getContext()['itunes_explicit'] = (value == 'yes') and 1 or 0
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
if _debug: sys.stderr.write('trying StrictFeedParser\n')
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
def startPrefixMapping(self, prefix, uri):
self.trackNamespace(prefix, uri)
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
namespace = 'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix):
raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
if prefix:
localname = prefix + ':' + localname
localname = str(localname).lower()
if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname))
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD = {}
for (namespace, attrlocalname), attrvalue in attrs._attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
self.unknown_starttag(localname, attrsD.items())
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr',
'img', 'input', 'isindex', 'link', 'meta', 'param']
def __init__(self, encoding):
self.encoding = encoding
if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding)
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
data = re.sub(r'<(\S+?)\s*?/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
if self.encoding and type(data) == type(u''):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
def normalize_attrs(self, attrs):
# utility method to be called by descendants
attrs = [(k.lower(), v) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag)
uattrs = []
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
for key, value in attrs:
if type(value) != type(u''):
value = unicode(value, self.encoding)
uattrs.append((unicode(key, self.encoding), value))
strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs]).encode(self.encoding)
if tag in self.elements_no_end_tag:
self.pieces.append('<%(tag)s%(strattrs)s />' % locals())
else:
self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%(tag)s>" % locals())
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
self.pieces.append('&#%(ref)s;' % locals())
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
self.pieces.append('&%(ref)s;' % locals())
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text)
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%(text)s-->' % locals())
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%(text)s>' % locals())
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%(text)s>' % locals())
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
return data
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = [('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src')]
def __init__(self, baseuri, encoding):
_BaseHTMLProcessor.__init__(self, encoding)
self.baseuri = baseuri
def resolveURI(self, uri):
return _urljoin(self.baseuri, uri)
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding):
if _debug: sys.stderr.write('entering _resolveRelativeURIs\n')
p = _RelativeURIResolver(baseURI, encoding)
p.feed(htmlSource)
return p.output()
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big',
'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col',
'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset',
'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input',
'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup',
'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike',
'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th',
'thead', 'tr', 'tt', 'u', 'ul', 'var']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'axis', 'border', 'cellpadding', 'cellspacing',
'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols',
'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled',
'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace',
'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method',
'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly',
'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size',
'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type',
'usemap', 'valign', 'value', 'vspace', 'width']
unacceptable_elements_with_end_tag = ['script', 'applet']
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
def unknown_starttag(self, tag, attrs):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
return
attrs = self.normalize_attrs(attrs)
attrs = [(key, value) for key, value in attrs if key in self.acceptable_attributes]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def _sanitizeHTML(htmlSource, encoding):
p = _HTMLSanitizer(encoding)
p.feed(htmlSource)
data = p.output()
if TIDY_MARKUP:
# loop through list of preferred Tidy interfaces looking for one that's installed,
# then set up a common _tidy function to wrap the interface-specific API.
_tidy = None
for tidy_interface in PREFERRED_TIDY_INTERFACES:
try:
if tidy_interface == "uTidy":
from tidy import parseString as _utidy
def _tidy(data, **kwargs):
return str(_utidy(data, **kwargs))
break
elif tidy_interface == "mxTidy":
from mx.Tidy import Tidy as _mxtidy
def _tidy(data, **kwargs):
nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs)
return data
break
except:
pass
if _tidy:
utf8 = type(data) == type(u'')
if utf8:
data = data.encode('utf-8')
data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8")
if utf8:
data = unicode(data, 'utf-8')
if data.count('<body'):
data = data.split('<body', 1)[1]
if data.count('>'):
data = data.split('>', 1)[1]
if data.count('</body'):
data = data.split('</body', 1)[0]
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
if ((code / 100) == 3) and (code != 304):
return self.http_error_302(req, fp, code, msg, headers)
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
infourl.status = code
return infourl
def http_error_302(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
def http_error_301(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
http_error_300 = http_error_302
http_error_303 = http_error_302
http_error_307 = http_error_302
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# - we're using Python 2.3.3 or later (digest auth is irreparably broken in earlier versions)
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urlparse.urlparse(req.get_full_url())[1]
try:
assert sys.version.split()[0] >= '2.3.3'
assert base64 != None
user, passw = base64.decodestring(req.headers['Authorization'].split(' ')[1]).split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
except:
return self.http_error_default(req, fp, code, msg, headers)
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it must be a tuple of 9 integers
as returned by gmtime() in the standard Python time module. This MUST
be in GMT (Greenwich Mean Time). The formatted date/time will be used
as the value of an If-Modified-Since request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if url_file_stream_or_string == '-':
return sys.stdin
if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp'):
if not agent:
agent = USER_AGENT
# test for inline user:password for basic auth
auth = None
if base64:
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
user_passwd, realhost = urllib.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.encodestring(user_passwd).strip()
# try to open with urllib2 (to use optional headers)
request = urllib2.Request(url_file_stream_or_string)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
request.add_header('A-IM', 'feed') # RFC 3229 support
opener = apply(urllib2.build_opener, tuple([_FeedURLHandler()] + handlers))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string)
except:
pass
# treat url_file_stream_or_string as string
return _StringIO(str(url_file_stream_or_string))
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
del tmpl
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
del regex
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m: break
if not m: return
if m.span() == (0, 0): return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params.keys():
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(params.get('second', 0))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
# daylight savings is complex, but not needed for feedparser's purposes
# as time zones, if specified, include mention of whether it is active
# (e.g. PST vs. PDT, CET). Using -1 is implementation-dependent and
# and most implementations have DST bugs
daylight_savings_flag = 0
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tm))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = u'\ub144' # b3e2 in euc-kr
_korean_month = u'\uc6d4' # bff9 in euc-kr
_korean_day = u'\uc77c' # c0cf in euc-kr
_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m: return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
_mssql_date_re = \
re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?')
def _parse_date_mssql(dateString):
'''Parse a string according to the MS SQL date format'''
m = _mssql_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_mssql)
# Unicode strings for Greek date strings
_greek_months = \
{ \
u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m: return
try:
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
except:
return
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date)
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
u'janu\u00e1r': u'01', # e1 in iso-8859-2
u'febru\u00e1ri': u'02', # e1 in iso-8859-2
u'm\u00e1rcius': u'03', # e1 in iso-8859-2
u'\u00e1prilis': u'04', # e1 in iso-8859-2
u'm\u00e1ujus': u'05', # e1 in iso-8859-2
u'j\u00fanius': u'06', # fa in iso-8859-2
u'j\u00falius': u'07', # fa in iso-8859-2
u'augusztus': u'08',
u'szeptember': u'09',
u'okt\u00f3ber': u'10', # f3 in iso-8859-2
u'november': u'11',
u'december': u'12',
}
_hungarian_date_format_re = \
re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m: return
try:
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
except:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
# Drake and licensed under the Python license. Removed all range checking
# for month, day, hour, minute, and second, since mktime will normalize
# these later
def _parse_date_w3dtf(dateString):
def __extract_date(m):
year = int(m.group('year'))
if year < 100:
year = 100 * int(time.gmtime()[0] / 100) + int(year)
if year < 1000:
return 0, 0, 0
julian = m.group('julian')
if julian:
julian = int(julian)
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
month = m.group('month')
day = 1
if month is None:
month = 1
else:
month = int(month)
day = m.group('day')
if day:
day = int(day)
else:
day = 1
return year, month, day
def __extract_time(m):
if not m:
return 0, 0, 0
hours = m.group('hours')
if not hours:
return 0, 0, 0
hours = int(hours)
minutes = int(m.group('minutes'))
seconds = m.group('seconds')
if seconds:
seconds = int(seconds)
else:
seconds = 0
return hours, minutes, seconds
def __extract_tzd(m):
'''Return the Time Zone Designator as an offset in seconds from UTC.'''
if not m:
return 0
tzd = m.group('tzd')
if not tzd:
return 0
if tzd == 'Z':
return 0
hours = int(m.group('tzdhours'))
minutes = m.group('tzdminutes')
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == '+':
return -offset
return offset
__date_re = ('(?P<year>\d\d\d\d)'
'(?:(?P<dsep>-|)'
'(?:(?P<julian>\d\d\d)'
'|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?')
__tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)'
__tzd_rx = re.compile(__tzd_re)
__time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
'(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?'
+ __tzd_re)
__datetime_re = '%s(?:T%s)?' % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
m = __datetime_rx.match(dateString)
if (m is None) or (m.group() != dateString): return
gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
if gmt[0] == 0: return
return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
registerDateHandler(_parse_date_w3dtf)
def _parse_date_rfc822(dateString):
'''Parse an RFC822, RFC1123, RFC2822, or asctime-style date'''
data = dateString.split()
if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames:
del data[0]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('')
dateString = " ".join(data)
if len(data) < 5:
dateString += ' 00:00:00 GMT'
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
# rfc822.py defines several time zones, but we define some extra ones.
# 'ET' is equivalent to 'EST', etc.
_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800}
rfc822._timezones.update(_additional_timezones)
registerDateHandler(_parse_date_rfc822)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
if not date9tuple: continue
if len(date9tuple) != 9:
if _debug: sys.stderr.write('date handler function must return 9-tuple\n')
raise ValueError
map(int, date9tuple)
return date9tuple
except Exception, e:
if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e)))
pass
return None
def _getCharacterEncoding(http_headers, xml_data):
'''Get the character encoding of the XML document
http_headers is a dictionary
xml_data is a raw string (not Unicode)
This is so much trickier than it sounds, it's not even funny.
According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
is application/xml, application/*+xml,
application/xml-external-parsed-entity, or application/xml-dtd,
the encoding given in the charset parameter of the HTTP Content-Type
takes precedence over the encoding given in the XML prefix within the
document, and defaults to 'utf-8' if neither are specified. But, if
the HTTP Content-Type is text/xml, text/*+xml, or
text/xml-external-parsed-entity, the encoding given in the XML prefix
within the document is ALWAYS IGNORED and only the encoding given in
the charset parameter of the HTTP Content-Type header should be
respected, and it defaults to 'us-ascii' if not specified.
Furthermore, discussion on the atom-syntax mailing list with the
author of RFC 3023 leads me to the conclusion that any document
served with a Content-Type of text/* and no charset parameter
must be treated as us-ascii. (We now do this.) And also that it
must always be flagged as non-well-formed. (We now do this too.)
If Content-Type is unspecified (input was local file or non-HTTP source)
or unrecognized (server just got it totally wrong), then go by the
encoding given in the XML prefix of the document and default to
'iso-8859-1' as per the HTTP specification (RFC 2616).
Then, assuming we didn't find a character encoding in the HTTP headers
(and the HTTP Content-type allowed us to look in the body), we need
to sniff the first few bytes of the XML data and try to determine
whether the encoding is ASCII-compatible. Section F of the XML
specification shows the way here:
http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
If the sniffed encoding is not ASCII-compatible, we need to make it
ASCII compatible so that we can sniff further into the XML declaration
to find the encoding attribute, which will tell us the true encoding.
Of course, none of this guarantees that we will be able to parse the
feed in the declared character encoding (assuming it was declared
correctly, which many are not). CJKCodecs and iconv_codec help a lot;
you should definitely install them if you can.
http://cjkpython.i18n.org/
'''
def _parseHTTPContentType(content_type):
'''takes HTTP Content-Type header and returns (content type, charset)
If no charset is specified, returns (content type, '')
If no content type is specified, returns ('', '')
Both return parameters are guaranteed to be lowercase strings
'''
content_type = content_type or ''
content_type, params = cgi.parse_header(content_type)
return content_type, params.get('charset', '').replace("'", '')
sniffed_xml_encoding = ''
xml_encoding = ''
true_encoding = ''
http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type'))
# Must sniff for non-ASCII-compatible character encodings before
# searching for XML declaration. This heuristic is defined in
# section F of the XML specification:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = _ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and (xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
# ASCII-compatible
pass
xml_encoding_match = re.compile('^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
except:
xml_encoding_match = None
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].lower()
if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
acceptable_content_type = 0
application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity')
text_content_types = ('text/xml', 'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith('application/') and http_content_type.endswith('+xml')):
acceptable_content_type = 1
true_encoding = http_encoding or xml_encoding or 'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith('text/')) and http_content_type.endswith('+xml'):
acceptable_content_type = 1
true_encoding = http_encoding or 'us-ascii'
elif http_content_type.startswith('text/'):
true_encoding = http_encoding or 'us-ascii'
elif http_headers and (not http_headers.has_key('content-type')):
true_encoding = xml_encoding or 'iso-8859-1'
else:
true_encoding = xml_encoding or 'utf-8'
return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type
def _toUTF8(data, encoding):
'''Changes an XML data stream on the fly to specify a new encoding
data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already
encoding is a string recognized by encodings.aliases
'''
if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding)
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16be':
sys.stderr.write('trying utf-16be instead\n')
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16le':
sys.stderr.write('trying utf-16le instead\n')
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-8':
sys.stderr.write('trying utf-8 instead\n')
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32be':
sys.stderr.write('trying utf-32be instead\n')
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32le':
sys.stderr.write('trying utf-32le instead\n')
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding)
declmatch = re.compile('^<\?xml[^>]*?>')
newdecl = '''<?xml version='1.0' encoding='utf-8'?>'''
if declmatch.search(newdata):
newdata = declmatch.sub(newdecl, newdata)
else:
newdata = newdecl + u'\n' + newdata
return newdata.encode('utf-8')
def _stripDoctype(data):
'''Strips DOCTYPE from XML document, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document, minus the DOCTYPE
'''
entity_pattern = re.compile(r'<!ENTITY([^>]*?)>', re.MULTILINE)
data = entity_pattern.sub('', data)
doctype_pattern = re.compile(r'<!DOCTYPE([^>]*?)>', re.MULTILINE)
doctype_results = doctype_pattern.findall(data)
doctype = doctype_results and doctype_results[0] or ''
if doctype.lower().count('netscape'):
version = 'rss091n'
else:
version = None
data = doctype_pattern.sub('', data)
return version, data
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[]):
'''Parse a feed from a URL, file, stream, or string'''
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
if _XML_AVAILABLE:
result['bozo'] = 0
if type(handlers) == types.InstanceType:
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers)
data = f.read()
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
f = None
# if feed is gzip-compressed, decompress it
if f and data and hasattr(f, 'headers'):
if gzip and f.headers.get('content-encoding', '') == 'gzip':
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except Exception, e:
# Some feeds claim to be gzipped but they're not, so
# we get garbage. Ideally, we should re-request the
# feed without the 'Accept-encoding: gzip' header,
# but we don't.
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
elif zlib and f.headers.get('content-encoding', '') == 'deflate':
try:
data = zlib.decompress(data, -zlib.MAX_WBITS)
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
# save HTTP headers
if hasattr(f, 'info'):
info = f.info()
result['etag'] = info.getheader('ETag')
last_modified = info.getheader('Last-Modified')
if last_modified:
result['modified'] = _parse_date(last_modified)
if hasattr(f, 'url'):
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'headers'):
result['headers'] = f.headers.dict
if hasattr(f, 'close'):
f.close()
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
http_headers = result.get('headers', {})
result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \
_getCharacterEncoding(http_headers, data)
if http_headers and (not acceptable_content_type):
if http_headers.has_key('content-type'):
bozo_message = '%s is not an XML media type' % http_headers['content-type']
else:
bozo_message = 'no Content-type specified'
result['bozo'] = 1
result['bozo_exception'] = NonXMLContentType(bozo_message)
result['version'], data = _stripDoctype(data)
baseuri = http_headers.get('content-location', result.get('href'))
baselang = http_headers.get('content-language', None)
# if server sent 304, we're done
if result.get('status', 0) == 304:
result['version'] = ''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
# if there was a problem downloading, we're done
if not data:
return result
# determine character encoding
use_strict_parser = 0
known_encoding = 0
tried_encodings = []
for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding, 'utf-8', 'windows-1252'):
if proposed_encoding in tried_encodings: continue
if not proposed_encoding: continue
try:
data = _toUTF8(data, proposed_encoding)
known_encoding = 1
use_strict_parser = 1
break
except:
pass
tried_encodings.append(proposed_encoding)
if not known_encoding:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingUnknown( \
'document encoding unknown, I tried ' + \
'%s, %s, utf-8, and windows-1252 but nothing worked' % \
(result['encoding'], xml_encoding))
result['encoding'] = ''
elif proposed_encoding != result['encoding']:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingOverride( \
'documented declared as %s, but parsed as %s' % \
(result['encoding'], proposed_encoding))
result['encoding'] = proposed_encoding
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
if hasattr(saxparser, '_ns_stack'):
# work around bug in built-in SAX parser (doesn't recognize xml: namespace)
# PyXML doesn't have this problem, and it doesn't have _ns_stack either
saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'})
try:
saxparser.parse(source)
except Exception, e:
if _debug:
import traceback
traceback.print_stack()
traceback.print_exc()
sys.stderr.write('xml parsing failed\n')
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser:
feedparser = _LooseFeedParser(baseuri, baselang, known_encoding and 'utf-8' or '')
feedparser.feed(data)
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
if __name__ == '__main__':
if not sys.argv[1:]:
print __doc__
sys.exit(0)
else:
urls = sys.argv[1:]
zopeCompatibilityHack()
from pprint import pprint
for url in urls:
print url
print
result = parse(url)
pprint(result)
print
#REVISION HISTORY
#1.0 - 9/27/2002 - MAP - fixed namespace processing on prefixed RSS 2.0 elements,
# added Simon Fell's test suite
#1.1 - 9/29/2002 - MAP - fixed infinite loop on incomplete CDATA sections
#2.0 - 10/19/2002
# JD - use inchannel to watch out for image and textinput elements which can
# also contain title, link, and description elements
# JD - check for isPermaLink='false' attribute on guid elements
# JD - replaced openAnything with open_resource supporting ETag and
# If-Modified-Since request headers
# JD - parse now accepts etag, modified, agent, and referrer optional
# arguments
# JD - modified parse to return a dictionary instead of a tuple so that any
# etag or modified information can be returned and cached by the caller
#2.0.1 - 10/21/2002 - MAP - changed parse() so that if we don't get anything
# because of etag/modified, return the old etag/modified to the caller to
# indicate why nothing is being returned
#2.0.2 - 10/21/2002 - JB - added the inchannel to the if statement, otherwise its
# useless. Fixes the problem JD was addressing by adding it.
#2.1 - 11/14/2002 - MAP - added gzip support
#2.2 - 1/27/2003 - MAP - added attribute support, admin:generatorAgent.
# start_admingeneratoragent is an example of how to handle elements with
# only attributes, no content.
#2.3 - 6/11/2003 - MAP - added USER_AGENT for default (if caller doesn't specify);
# also, make sure we send the User-Agent even if urllib2 isn't available.
# Match any variation of backend.userland.com/rss namespace.
#2.3.1 - 6/12/2003 - MAP - if item has both link and guid, return both as-is.
#2.4 - 7/9/2003 - MAP - added preliminary Pie/Atom/Echo support based on Sam Ruby's
# snapshot of July 1 <http://www.intertwingly.net/blog/1506.html>; changed
# project name
#2.5 - 7/25/2003 - MAP - changed to Python license (all contributors agree);
# removed unnecessary urllib code -- urllib2 should always be available anyway;
# return actual url, status, and full HTTP headers (as result['url'],
# result['status'], and result['headers']) if parsing a remote feed over HTTP --
# this should pass all the HTTP tests at <http://diveintomark.org/tests/client/http/>;
# added the latest namespace-of-the-week for RSS 2.0
#2.5.1 - 7/26/2003 - RMK - clear opener.addheaders so we only send our custom
# User-Agent (otherwise urllib2 sends two, which confuses some servers)
#2.5.2 - 7/28/2003 - MAP - entity-decode inline xml properly; added support for
# inline <xhtml:body> and <xhtml:div> as used in some RSS 2.0 feeds
#2.5.3 - 8/6/2003 - TvdV - patch to track whether we're inside an image or
# textInput, and also to return the character encoding (if specified)
#2.6 - 1/1/2004 - MAP - dc:author support (MarekK); fixed bug tracking
# nested divs within content (JohnD); fixed missing sys import (JohanS);
# fixed regular expression to capture XML character encoding (Andrei);
# added support for Atom 0.3-style links; fixed bug with textInput tracking;
# added support for cloud (MartijnP); added support for multiple
# category/dc:subject (MartijnP); normalize content model: 'description' gets
# description (which can come from description, summary, or full content if no
# description), 'content' gets dict of base/language/type/value (which can come
# from content:encoded, xhtml:body, content, or fullitem);
# fixed bug matching arbitrary Userland namespaces; added xml:base and xml:lang
# tracking; fixed bug tracking unknown tags; fixed bug tracking content when
# <content> element is not in default namespace (like Pocketsoap feed);
# resolve relative URLs in link, guid, docs, url, comments, wfw:comment,
# wfw:commentRSS; resolve relative URLs within embedded HTML markup in
# description, xhtml:body, content, content:encoded, title, subtitle,
# summary, info, tagline, and copyright; added support for pingback and
# trackback namespaces
#2.7 - 1/5/2004 - MAP - really added support for trackback and pingback
# namespaces, as opposed to 2.6 when I said I did but didn't really;
# sanitize HTML markup within some elements; added mxTidy support (if
# installed) to tidy HTML markup within some elements; fixed indentation
# bug in _parse_date (FazalM); use socket.setdefaulttimeout if available
# (FazalM); universal date parsing and normalization (FazalM): 'created', modified',
# 'issued' are parsed into 9-tuple date format and stored in 'created_parsed',
# 'modified_parsed', and 'issued_parsed'; 'date' is duplicated in 'modified'
# and vice-versa; 'date_parsed' is duplicated in 'modified_parsed' and vice-versa
#2.7.1 - 1/9/2004 - MAP - fixed bug handling " and '. fixed memory
# leak not closing url opener (JohnD); added dc:publisher support (MarekK);
# added admin:errorReportsTo support (MarekK); Python 2.1 dict support (MarekK)
#2.7.4 - 1/14/2004 - MAP - added workaround for improperly formed <br/> tags in
# encoded HTML (skadz); fixed unicode handling in normalize_attrs (ChrisL);
# fixed relative URI processing for guid (skadz); added ICBM support; added
# base64 support
#2.7.5 - 1/15/2004 - MAP - added workaround for malformed DOCTYPE (seen on many
# blogspot.com sites); added _debug variable
#2.7.6 - 1/16/2004 - MAP - fixed bug with StringIO importing
#3.0b3 - 1/23/2004 - MAP - parse entire feed with real XML parser (if available);
# added several new supported namespaces; fixed bug tracking naked markup in
# description; added support for enclosure; added support for source; re-added
# support for cloud which got dropped somehow; added support for expirationDate
#3.0b4 - 1/26/2004 - MAP - fixed xml:lang inheritance; fixed multiple bugs tracking
# xml:base URI, one for documents that don't define one explicitly and one for
# documents that define an outer and an inner xml:base that goes out of scope
# before the end of the document
#3.0b5 - 1/26/2004 - MAP - fixed bug parsing multiple links at feed level
#3.0b6 - 1/27/2004 - MAP - added feed type and version detection, result['version']
# will be one of SUPPORTED_VERSIONS.keys() or empty string if unrecognized;
# added support for creativeCommons:license and cc:license; added support for
# full Atom content model in title, tagline, info, copyright, summary; fixed bug
# with gzip encoding (not always telling server we support it when we do)
#3.0b7 - 1/28/2004 - MAP - support Atom-style author element in author_detail
# (dictionary of 'name', 'url', 'email'); map author to author_detail if author
# contains name + email address
#3.0b8 - 1/28/2004 - MAP - added support for contributor
#3.0b9 - 1/29/2004 - MAP - fixed check for presence of dict function; added
# support for summary
#3.0b10 - 1/31/2004 - MAP - incorporated ISO-8601 date parsing routines from
# xml.util.iso8601
#3.0b11 - 2/2/2004 - MAP - added 'rights' to list of elements that can contain
# dangerous markup; fiddled with decodeEntities (not right); liberalized
# date parsing even further
#3.0b12 - 2/6/2004 - MAP - fiddled with decodeEntities (still not right);
# added support to Atom 0.2 subtitle; added support for Atom content model
# in copyright; better sanitizing of dangerous HTML elements with end tags
# (script, frameset)
#3.0b13 - 2/8/2004 - MAP - better handling of empty HTML tags (br, hr, img,
# etc.) in embedded markup, in either HTML or XHTML form (<br>, <br/>, <br />)
#3.0b14 - 2/8/2004 - MAP - fixed CDATA handling in non-wellformed feeds under
# Python 2.1
#3.0b15 - 2/11/2004 - MAP - fixed bug resolving relative links in wfw:commentRSS;
# fixed bug capturing author and contributor URL; fixed bug resolving relative
# links in author and contributor URL; fixed bug resolvin relative links in
# generator URL; added support for recognizing RSS 1.0; passed Simon Fell's
# namespace tests, and included them permanently in the test suite with his
# permission; fixed namespace handling under Python 2.1
#3.0b16 - 2/12/2004 - MAP - fixed support for RSS 0.90 (broken in b15)
#3.0b17 - 2/13/2004 - MAP - determine character encoding as per RFC 3023
#3.0b18 - 2/17/2004 - MAP - always map description to summary_detail (Andrei);
# use libxml2 (if available)
#3.0b19 - 3/15/2004 - MAP - fixed bug exploding author information when author
# name was in parentheses; removed ultra-problematic mxTidy support; patch to
# workaround crash in PyXML/expat when encountering invalid entities
# (MarkMoraes); support for textinput/textInput
#3.0b20 - 4/7/2004 - MAP - added CDF support
#3.0b21 - 4/14/2004 - MAP - added Hot RSS support
#3.0b22 - 4/19/2004 - MAP - changed 'channel' to 'feed', 'item' to 'entries' in
# results dict; changed results dict to allow getting values with results.key
# as well as results[key]; work around embedded illformed HTML with half
# a DOCTYPE; work around malformed Content-Type header; if character encoding
# is wrong, try several common ones before falling back to regexes (if this
# works, bozo_exception is set to CharacterEncodingOverride); fixed character
# encoding issues in BaseHTMLProcessor by tracking encoding and converting
# from Unicode to raw strings before feeding data to sgmllib.SGMLParser;
# convert each value in results to Unicode (if possible), even if using
# regex-based parsing
#3.0b23 - 4/21/2004 - MAP - fixed UnicodeDecodeError for feeds that contain
# high-bit characters in attributes in embedded HTML in description (thanks
# Thijs van de Vossen); moved guid, date, and date_parsed to mapped keys in
# FeedParserDict; tweaked FeedParserDict.has_key to return True if asking
# about a mapped key
#3.0fc1 - 4/23/2004 - MAP - made results.entries[0].links[0] and
# results.entries[0].enclosures[0] into FeedParserDict; fixed typo that could
# cause the same encoding to be tried twice (even if it failed the first time);
# fixed DOCTYPE stripping when DOCTYPE contained entity declarations;
# better textinput and image tracking in illformed RSS 1.0 feeds
#3.0fc2 - 5/10/2004 - MAP - added and passed Sam's amp tests; added and passed
# my blink tag tests
#3.0fc3 - 6/18/2004 - MAP - fixed bug in _changeEncodingDeclaration that
# failed to parse utf-16 encoded feeds; made source into a FeedParserDict;
# duplicate admin:generatorAgent/@rdf:resource in generator_detail.url;
# added support for image; refactored parse() fallback logic to try other
# encodings if SAX parsing fails (previously it would only try other encodings
# if re-encoding failed); remove unichr madness in normalize_attrs now that
# we're properly tracking encoding in and out of BaseHTMLProcessor; set
# feed.language from root-level xml:lang; set entry.id from rdf:about;
# send Accept header
#3.0 - 6/21/2004 - MAP - don't try iso-8859-1 (can't distinguish between
# iso-8859-1 and windows-1252 anyway, and most incorrectly marked feeds are
# windows-1252); fixed regression that could cause the same encoding to be
# tried twice (even if it failed the first time)
#3.0.1 - 6/22/2004 - MAP - default to us-ascii for all text/* content types;
# recover from malformed content-type header parameter with no equals sign
# ('text/xml; charset:iso-8859-1')
#3.1 - 6/28/2004 - MAP - added and passed tests for converting HTML entities
# to Unicode equivalents in illformed feeds (aaronsw); added and
# passed tests for converting character entities to Unicode equivalents
# in illformed feeds (aaronsw); test for valid parsers when setting
# XML_AVAILABLE; make version and encoding available when server returns
# a 304; add handlers parameter to pass arbitrary urllib2 handlers (like
# digest auth or proxy support); add code to parse username/password
# out of url and send as basic authentication; expose downloading-related
# exceptions in bozo_exception (aaronsw); added __contains__ method to
# FeedParserDict (aaronsw); added publisher_detail (aaronsw)
#3.2 - 7/3/2004 - MAP - use cjkcodecs and iconv_codec if available; always
# convert feed to UTF-8 before passing to XML parser; completely revamped
# logic for determining character encoding and attempting XML parsing
# (much faster); increased default timeout to 20 seconds; test for presence
# of Location header on redirects; added tests for many alternate character
# encodings; support various EBCDIC encodings; support UTF-16BE and
# UTF16-LE with or without a BOM; support UTF-8 with a BOM; support
# UTF-32BE and UTF-32LE with or without a BOM; fixed crashing bug if no
# XML parsers are available; added support for 'Content-encoding: deflate';
# send blank 'Accept-encoding: ' header if neither gzip nor zlib modules
# are available
#3.3 - 7/15/2004 - MAP - optimize EBCDIC to ASCII conversion; fix obscure
# problem tracking xml:base and xml:lang if element declares it, child
# doesn't, first grandchild redeclares it, and second grandchild doesn't;
# refactored date parsing; defined public registerDateHandler so callers
# can add support for additional date formats at runtime; added support
# for OnBlog, Nate, MSSQL, Greek, and Hungarian dates (ytrewq1); added
# zopeCompatibilityHack() which turns FeedParserDict into a regular
# dictionary, required for Zope compatibility, and also makes command-
# line debugging easier because pprint module formats real dictionaries
# better than dictionary-like objects; added NonXMLContentType exception,
# which is stored in bozo_exception when a feed is served with a non-XML
# media type such as 'text/plain'; respect Content-Language as default
# language if not xml:lang is present; cloud dict is now FeedParserDict;
# generator dict is now FeedParserDict; better tracking of xml:lang,
# including support for xml:lang='' to unset the current language;
# recognize RSS 1.0 feeds even when RSS 1.0 namespace is not the default
# namespace; don't overwrite final status on redirects (scenarios:
# redirecting to a URL that returns 304, redirecting to a URL that
# redirects to another URL with a different type of redirect); add
# support for HTTP 303 redirects
#4.0 - MAP - support for relative URIs in xml:base attribute; fixed
# encoding issue with mxTidy (phopkins); preliminary support for RFC 3229;
# support for Atom 1.0; support for iTunes extensions; new 'tags' for
# categories/keywords/etc. as array of dict
# {'term': term, 'scheme': scheme, 'label': label} to match Atom 1.0
# terminology; parse RFC 822-style dates with no time; lots of other
# bug fixes
| mit | -7,598,199,181,045,882,000 | 42.02012 | 214 | 0.587925 | false |
MrSenko/Nitrate | tcms/settings/test.py | 1 | 1204 | from tcms.settings.devel import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
LISTENING_MODEL_SIGNAL = False
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '[%(asctime)s] %(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'console':{
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| gpl-2.0 | -5,055,384,822,150,060,000 | 22.607843 | 95 | 0.442691 | false |
jlesquembre/jlle | jlle/scaffold/templates/setup.py | 1 | 1102 | from setuptools import setup
import os
version = '0.0.1.dev0'
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst'), 'rt') as f:
README = f.read()
setup(name='{{project}}',
version=version,
author='José Luis Lafuente',
author_email='[email protected]',
description='blabla',
long_description=README,
license='{{license}}',
url='http://jlesquembre.github.io/{{project}}',
packages=['{{project}}'],
include_package_data=True,
classifiers=[
'Development Status :: 3 - Alpha',
'Topic :: Utilities',
'Intended Audience :: Developers',
'License :: OSI Approved :: {{license}}',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.3',
],
keywords=['python'],
#entry_points = {
# 'console_scripts': [
# 'doc2git = doc2git.cmdline:main',
# 'd2g = doc2git.cmdline:main',
# ],
# },
#install_requires=['sarge']
)
| gpl-3.0 | 8,452,121,628,583,304,000 | 27.230769 | 55 | 0.559491 | false |
jhmatthews/cobra | source/plotspec.py | 1 | 2652 | #! /Library/Frameworks/Python.framework/Versions/2.7/Resources/Python.app/Contents/MacOS/Python
'''
Generic plotting script for PYTHONRT
'''
import read_output as rd
import os, sys
import matplotlib.pyplot as plt
import numpy as np
import cobra_sub as sub
rd.setpars()
def strip(character, string):
''' strip a character from a string'''
new_string = ""
for s in string:
if s != character:
new_string += s
return new_string
def plot_spec (filename, lmin, lmax, smooth = 1, nobs = 0, use = [], \
savename = "fig", yscale = "linear", xscale = "linear" , \
sources = False, Fnu = False):
'''
Function for plotting a spec file outputted from the radiative transfer code PYTHONRT
:INPUT:
filename string
name of file
lmin, lmax float
wavelength range in ANGSTROMS
nobs int
number of observes
smooth int
smoothing factor
use array
which observations to use
savename string
yscale, xscale string
lin or log scale
sources Bool
Plot sources or not
Fnu Bool
Is it an Fnu plot?
:OUTPUT:
Creates plot and opens in preview
'''
# default savename is filename
if savename == "fig":
savename = filename + ".png"
# create spec class from spec file
spec = rd.read_spec_file(filename)
if nobs == 0:
nobs = len(spec.spec)
# strip filenames of funny characters that TeX complains about
savename = strip("_", savename)
filename = strip("_", filename)
# default argument is to plot all observations
if len(use) == 0:
use = np.arange(nobs)
nuse = int(len(use))
# work out the dimensions of the plot
if nuse < 3:
ny = nuse
nx = 1
else:
nx = 2
ny = (len(use) + 1) / 2
# do we want to smooth? if so, do it!
if smooth > 1:
for i in use:
sub.smooth_spectrum( spec, smooth )
# now create figure
fig=plt.figure(figsize=(8.3,11.7),dpi=80)
fig.suptitle(filename,fontsize=24,fontweight='bold')
fig.subplots_adjust(hspace=0.3,wspace=0.2)
for i in range(nuse):
ax = fig.add_subplot( ny, nx, i)
if Fnu:
ax.plot(spec.freq, spec.spec[use[i]])
else:
ax.plot(spec.wavelength, spec.spec[use[i]])
ax.set_yscale(yscale)
ax.set_xscale(xscale)
plt.xlim(lmin, lmax)
plt.savefig(savename)
command = "open -a preview %s" % savename
os.system(command)
if sources:
fig=plt.figure(figsize=(8.3,11.7),dpi=80)
fig.suptitle(filename,fontsize=24,fontweight='bold')
fig.subplots_adjust(hspace=0.3,wspace=0.2)
return 0
filename = sys.argv[1]
nobs = int(sys.argv[2])
plot_spec(filename, 3000, 7000, smooth = 20, yscale = "log")
| gpl-2.0 | -2,380,292,950,757,573,600 | 16 | 95 | 0.647436 | false |
newcastlemakerspace/mkrspc_web | data_migration.py | 1 | 2090 | import redis
import datetime
from site_config import REDIS_DB
import site_utils
import uuid
def _was_migration_applied(redis_conn, seq):
value = redis_conn.get('migration_%d' % seq)
if value is not None:
print "migration_%d - exists" % seq
return True
print "migration_%d - executing" % seq
return False
def _flag_migration_applied(redis_conn, seq):
print "migration_%d - done" % seq
# migration_201410241041
d = datetime.datetime
redis_conn.set('migration_%d' % seq, d.now().isoformat())
def migration_201410241041(redis_conn):
seq = 201410241041
if _was_migration_applied(redis_conn, seq):
return
print " - clear old auth cookies"
key_prefix_search = 'User_Auth_Cookie_*'
keys = redis_conn.keys(key_prefix_search)
for k in keys:
redis_conn.delete(k)
_flag_migration_applied(redis_conn, seq)
def migration_201411130948(redis_conn):
# wiki categories
seq = 201411130948
if _was_migration_applied(redis_conn, seq):
return
print " - re-init wiki"
su = site_utils.SiteUtils(redis_conn)
su.wu.create_wiki_root_category()
root_cat_id = su.wu.wiki_root_category()
misc_cat_id = su.wu.create_wiki_category(root_cat_id, "Misc.")
article_keys = redis_conn.keys('wiki_article_*')
for k in article_keys:
print k, len(k)
print k[13:]
if len(k) == 49:
uuid_sstr = k[13:]
art_id = uuid.UUID(uuid_sstr)
assert isinstance(art_id, uuid.UUID)
print " article: ", misc_cat_id, art_id
cat_articles_key = "wiki_category_articles_%s" % misc_cat_id
r.rpush(cat_articles_key, str(art_id))
else:
print " (not an article)"
print '-----------------------------'
_flag_migration_applied(redis_conn, seq)
if __name__ == '__main__':
print "Ruinning migrations for DB #%d" % REDIS_DB
r = redis.Redis(db=REDIS_DB)
assert isinstance(r, redis.Redis)
migration_201410241041(r)
migration_201411130948(r)
| gpl-3.0 | 2,701,100,883,988,187,000 | 23.022989 | 72 | 0.606699 | false |
RyanBalfanz/reservoir-sampling-cli | sampler/command_line.py | 1 | 1294 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import operator
import random
import sys
def get_parser():
parser = argparse.ArgumentParser("Randomly sample k items from an input S containing n items.")
parser.add_argument("infile", nargs='?', type=argparse.FileType('r'), default=sys.stdin)
parser.add_argument("outfile", nargs='?', type=argparse.FileType('w'), default=sys.stdout)
parser.add_argument("-k", "--num-items", type=int, help="An integer number giving the size of the reservoir")
parser.add_argument("--preserve-order", action="store_true", help="Preserve input ordering")
return parser
def main(argv=None):
parser = get_parser()
args = parser.parse_args(argv)
N = args.num_items
reservoir = []
reservoir_ordered = []
for l, line in enumerate(args.infile):
if l < N:
reservoir.append(line)
reservoir_ordered.append((l, line))
elif l >= N and random.random() < N/float(l+1):
replace = random.randint(0, len(reservoir)-1)
reservoir[replace] = line
reservoir_ordered[replace] = (l, line)
if args.preserve_order:
for item in sorted(reservoir_ordered, key=operator.itemgetter(1)):
args.outfile.write(item[1])
else:
for item in reservoir:
args.outfile.write(item)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| mit | 1,964,912,365,201,834,000 | 28.409091 | 110 | 0.693972 | false |
aldryn/aldryn-redirects | aldryn_redirects/admin.py | 1 | 8613 | from __future__ import unicode_literals
from tablib import Dataset
from django.conf import settings
from django.contrib import admin, messages
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import redirect, render
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _, ugettext
from parler.admin import TranslatableAdmin
from aldryn_translation_tools.admin import AllTranslationsMixin
from .forms import RedirectsImportForm, StaticRedirectsImportForm
from .models import Redirect, StaticRedirect, StaticRedirectInboundRouteQueryParam
class DeletionMixin(object):
actions = ['delete_selected']
def delete_selected(self, request, queryset):
max_items_deletion = getattr(settings, 'DATA_UPLOAD_MAX_NUMBER_FIELDS', 1000) # COMPAT: Django<1.10
if queryset.count() > max_items_deletion:
msg = _('Too many items for deletion. Only first {qty} items were deleted.').format(qty=max_items_deletion)
self.message_user(request, msg, level=messages.WARNING)
# <Queryset>.delete() can not be used with sliced querysets
inner_qs = queryset.all()[:max_items_deletion]
queryset = queryset.filter(id__in=inner_qs)
deleted_qty = queryset.all().delete()[1]['aldryn_redirects.{}'.format(self.opts.model.__name__)]
object_label = self.opts.verbose_name_plural if deleted_qty > 1 else self.opts.verbose_name
msg = _('Successfully deleted {qty} {object_label}.').format(qty=deleted_qty, object_label=object_label)
self.message_user(request, msg)
delete_selected.short_description = _('Delete selected objects')
class RedirectAdmin(DeletionMixin, AllTranslationsMixin, TranslatableAdmin):
list_display = ('old_path',)
list_filter = ('site',)
search_fields = ('old_path', 'translations__new_path')
radio_fields = {'site': admin.VERTICAL}
export_filename = 'redirects-%Y-%m-%d.csv'
export_headers = ['Domain', 'Old', 'New', 'Language']
def get_urls(self):
from django.conf.urls import url
def pattern(regex, fn, name):
args = [regex, self.admin_site.admin_view(fn)]
url_name = "%s_%s_%s" % (self.opts.app_label, self.opts.model_name, name)
return url(*args, name=url_name)
url_patterns = [
pattern(r'export/$', self.export_view, 'export'),
pattern(r'import/$', self.import_view, 'import'),
]
return url_patterns + super(RedirectAdmin, self).get_urls()
def get_form(self, request, obj=None, **kwargs):
form = super(RedirectAdmin, self).get_form(request, obj=None, **kwargs)
site_field = form.base_fields['site']
# the add and change links don't work anyway with admin.VERTICAL radio
# fields
site_field.widget.can_add_related = False
site_field.widget.can_change_related = False
# if there is only one site, select it by default
if site_field.queryset.all().count() == 1:
site_field.initial = [site_field.queryset.get(), ]
return form
def export_view(self, request):
dataset = Dataset(headers=self.export_headers)
filename = timezone.now().date().strftime(self.export_filename)
redirects = self.get_queryset(request).prefetch_related('translations')
for r in redirects:
rows = []
for translation in r.translations.all():
rows.append([
r.site.domain,
r.old_path,
translation.new_path,
translation.language_code,
])
dataset.extend(rows)
response = HttpResponse(dataset.csv, content_type='text/csv; charset=utf-8')
response['Content-Disposition'] = 'attachment; filename="{0}"'.format(filename)
return response
def import_view(self, request):
form = RedirectsImportForm(
data=request.POST or None,
files=request.FILES or None,
)
opts = self.model._meta
if form.is_valid():
url_name = "%s_%s_%s" % (self.opts.app_label, self.opts.model_name, 'changelist')
success_url = 'admin:{}'.format(url_name)
form.do_import()
self.message_user(request, _('Redirects imported successfully.'))
return redirect(success_url)
context = {
'adminform': form,
'has_change_permission': True,
'media': self.media + form.media,
'opts': opts,
'root_path': reverse('admin:index'),
'current_app': self.admin_site.name,
'app_label': opts.app_label,
'title': ugettext('Import redirects'),
'original': ugettext('Import redirects'),
'errors': form.errors,
}
return render(request, 'admin/aldryn_redirects/redirect/import_form.html', context)
class StaticRedirectInboundRouteQueryParamInline(admin.TabularInline):
model = StaticRedirectInboundRouteQueryParam
verbose_name = _('Query Param')
verbose_name_plural = _('Query Params')
extra = 1
class StaticRedirectAdmin(DeletionMixin, admin.ModelAdmin):
inlines = [StaticRedirectInboundRouteQueryParamInline]
filter_horizontal = ('sites',)
list_filter = ('sites',)
list_display = ('inbound_route', 'outbound_route')
search_fields = list_display
# Custom attributes
export_filename = 'static-redirects-%Y-%m-%d.csv'
export_headers = ['domain', 'inbound_route', 'outbound_route']
def get_urls(self):
from django.conf.urls import url
def pattern(regex, fn, name):
args = [regex, self.admin_site.admin_view(fn)]
url_name = "%s_%s_%s" % (self.opts.app_label, self.opts.model_name, name)
return url(*args, name=url_name)
url_patterns = [
pattern(r'export/$', self.export_view, 'export'),
pattern(r'import/$', self.import_view, 'import'),
]
return url_patterns + super(StaticRedirectAdmin, self).get_urls()
def get_form(self, request, obj=None, **kwargs):
form = super(StaticRedirectAdmin, self).get_form(request, obj=None, **kwargs)
sites_field = form.base_fields['sites']
# the add and change links don't work anyway with admin.VERTICAL radio
# fields
sites_field.widget.can_add_related = False
sites_field.widget.can_change_related = False
# if there is only one site, select it by default
if sites_field.queryset.all().count() == 1:
sites_field.initial = [sites_field.queryset.get(), ]
return form
def export_view(self, request):
dataset = Dataset(headers=self.export_headers)
filename = timezone.now().date().strftime(self.export_filename)
for r in self.get_queryset(request):
rows = []
for site in r.sites.all():
rows.append([
site.domain,
r.get_full_inbound_route(),
r.outbound_route,
])
dataset.extend(rows)
response = HttpResponse(dataset.csv, content_type='text/csv; charset=utf-8')
response['Content-Disposition'] = 'attachment; filename="{0}"'.format(filename)
return response
def import_view(self, request):
form = StaticRedirectsImportForm(
data=request.POST or None,
files=request.FILES or None,
)
opts = self.model._meta
if form.is_valid():
url_name = "%s_%s_%s" % (self.opts.app_label, self.opts.model_name, 'changelist')
success_url = 'admin:{}'.format(url_name)
form.do_import()
self.message_user(request, _('Redirects imported successfully.'))
return redirect(success_url)
context = {
'adminform': form,
'has_change_permission': True,
'media': self.media + form.media,
'opts': opts,
'root_path': reverse('admin:index'),
'current_app': self.admin_site.name,
'app_label': opts.app_label,
'title': ugettext('Import redirects'),
'original': ugettext('Import redirects'),
'errors': form.errors,
}
return render(request, 'admin/aldryn_redirects/staticredirect/import_form.html', context)
admin.site.register(Redirect, RedirectAdmin)
admin.site.register(StaticRedirect, StaticRedirectAdmin)
| bsd-3-clause | 7,284,398,277,118,702,000 | 37.797297 | 119 | 0.615929 | false |
ai-se/XTREE | src/Planners/XTREE/smote.py | 1 | 3196 | #! /Users/rkrsn/anaconda/bin/python
from pdb import set_trace
from os import environ, getcwd
from os import walk
from os.path import expanduser
from pdb import set_trace
import sys
# Update PYTHONPATH
HOME = expanduser('~')
axe = HOME + '/git/axe/axe/' # AXE
pystat = HOME + '/git/pystats/' # PySTAT
cwd = getcwd() # Current Directory
sys.path.extend([axe, pystat, cwd])
from scipy.spatial.distance import euclidean
from random import choice, seed as rseed, uniform as rand
import pandas as pd
from tools.axe.table import *
def SMOTE(data=None, k=5, atleast=100, atmost=100, bugIndx=2, resample=False):
def Bugs(tbl):
cells = [i.cells[-bugIndx] for i in tbl._rows]
return cells
def minority(data):
unique = list(set(sorted(Bugs(data))))
counts = len(unique) * [0]
# set_trace()
for n in xrange(len(unique)):
for d in Bugs(data):
if unique[n] == d:
counts[n] += 1
return unique, counts
def knn(one, two):
pdistVect = []
# set_trace()
for ind, n in enumerate(two):
pdistVect.append([ind, euclidean(one.cells[:-1], n.cells[:-1])])
indices = sorted(pdistVect, key=lambda F: F[1])
return [two[n[0]] for n in indices]
def extrapolate(one, two):
new = one
# set_trace()
if bugIndx == 2:
new.cells[3:-1] = [max(min(a, b),
min(min(a, b) + rand() * (abs(a - b)),
max(a, b))) for a, b in zip(one.cells[3:-1],
two.cells[3:-1])]
new.cells[-2] = int(new.cells[-2])
else:
new.cells[3:] = [min(a, b) + rand() * (abs(a - b)) for
a, b in zip(one.cells[3:], two.cells[3:])]
new.cells[-1] = int(new.cells[-1])
return new
def populate(data):
newData = []
# reps = (len(data) - atleast)
for _ in xrange(atleast):
for one in data:
neigh = knn(one, data)[1:k + 1]
# If you're thinking the following try/catch statement is bad coding
# etiquette i i .
try:
two = choice(neigh)
except IndexError:
two = one
newData.append(extrapolate(one, two))
# data.extend(newData)
return newData
def depopulate(data):
if resample:
newer = []
for _ in xrange(atmost):
orig = choice(data)
newer.append(extrapolate(orig, knn(orig, data)[1]))
return newer
else:
return [choice(data) for _ in xrange(atmost)]
newCells = []
rseed(1)
unique, counts = minority(data)
rows = data._rows
for u, n in zip(unique, counts):
if n < atleast:
newCells.extend(populate([r for r in rows if r.cells[-2] == u]))
if n > atmost:
newCells.extend(depopulate([r for r in rows if r.cells[-2] == u]))
else:
newCells.extend([r for r in rows if r.cells[-2] == u])
return clone(data, rows=[k.cells for k in newCells])
def test_smote():
dir = '../Data/camel/camel-1.6.csv'
Tbl = createTbl([dir], _smote=False)
newTbl = createTbl([dir], _smote=True)
print(len(Tbl._rows), len(newTbl._rows))
# for r in newTbl._rows:
# print r.cells
if __name__ == '__main__':
test_smote()
| mit | -2,068,732,287,555,262,000 | 27.792793 | 78 | 0.572904 | false |
MatthewWilkes/mw4068-packaging | src/melange/src/soc/models/timeline.py | 1 | 1544 | #!/usr/bin/env python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the Timeline Model.
"""
__authors__ = [
'"Sverre Rabbelier" <[email protected]>',
]
from google.appengine.ext import db
from django.utils.translation import ugettext
from soc.models import linkable
class Timeline(linkable.Linkable):
"""The Timeline Model, representing the timeline for a Program.
"""
program_start = db.DateTimeProperty(
verbose_name=ugettext('Program Start date'))
program_end = db.DateTimeProperty(
verbose_name=ugettext('Program End date'))
org_signup_start = db.DateTimeProperty(
verbose_name=ugettext('Organization Signup Start date'))
org_signup_end = db.DateTimeProperty(
verbose_name=ugettext('Organization Signup End date'))
student_signup_start = db.DateTimeProperty(
verbose_name=ugettext('Student Signup Start date'))
student_signup_end = db.DateTimeProperty(
verbose_name=ugettext('Student Signup End date'))
| apache-2.0 | 4,690,625,934,440,829,000 | 28.692308 | 74 | 0.738342 | false |
Nydareld/IaGameServer | Server/PlayerThread.py | 1 | 4282 | from Server.Game import *
from threading import Thread
from CodIa.tuto.models import User
from CodIa.tuto.app import db
import threading
import time
import random
class PlayerThread(Thread):
def __init__(self, GameThread, username, ia):
Thread.__init__(self)
self.GameThread = GameThread
self.username = username
GameThread.barrierTours._parties += 1
self.ia = ia
self.joueur = Player(ia,username,GameThread.game.gamesize)
#GameThread.game.joueurs[username]=Player(ia,username,GameThread.game.gamesize)
GameThread.joueursAAdd.append(self.joueur)
GameThread.nbth += 1
def run(self):
while True:
#attend le début du tours
# print("Barriere debut de tours "+str(threading.current_thread().name))
# print(self.GameThread.barrierTours.parties)
self.GameThread.barrierTours.wait()
#execute le code de l'IA
self.executeIa()
#print(self.GameThread.barrierEtape.parties)
self.GameThread.barrierEtape.wait()
self.calculePos()
self.GameThread.barrierEtape.wait()
agraille = self.join()
#print("avant acquire")
self.GameThread.barrierEtape.wait()
self.GameThread.lockmanger.acquire()
self.GameThread.aManger.append(agraille)
#print("pendant")
self.GameThread.lockmanger.release()
#print("après release")
self.GameThread.barrierManger.wait()
if self.joueur.poidTotal<=0 and not self.joueur.end:
self.joueur.end = True
print("\033[91m Le Joueur "+self.joueur.username +" à perdu \033[0m")
user = User.query.filter_by(pseudo=self.joueur.username).first()
if user is not None:
# print("\033[91m Zbra \033[0m")
user.score += self.joueur.score
db.session.commit()
# time.sleep(1/60)
# self.GameThread.nbth-=1
# self.GameThread.barrierTours._parties -= 1
def executeIa(self):
pass
def calculePos(self):
# print("\033[91m caca \033[0m")
# print(str(self.joueur.spheres[0].normeVitesse()) +" "+ str(self.joueur.spheres[0].normeVitesseMax()))
res=0
for sphere in self.joueur.spheres:
sphere.vectVitesse = sphere.vitesseNextTick()
if sphere.normeVitesse() > sphere.normeVitesseMax():
# print("\033[91m caca \033[0m")
sphere.vectVitesse[0] *= 0.9
sphere.vectVitesse[1] *= 0.9
# else :
# print("\033[92m non caca \033[0m")
sphere.vectPos = sphere.posNextTick()
rand = random.randint(1,300)
if sphere.taille > 50000 and rand==1:
sphere.split(self.joueur)
somme = 0
#print("=======================================================")
for joueur in self.GameThread.game.joueurs.values():
somme += joueur.poidTotal
#print("somme2"+str(somme))
#print("taille sphere max: "+str((sphere.taille)))
#pass
self.joueur.updateScore()
def join(self):
try:
listjoueur = dict()
for sphere in self.joueur.spheres:
for joueur2 in self.GameThread.game.joueurs.values():
for sphere2 in joueur2.spheres:
res = sphere.join(sphere2,joueur2)
if(res != None):
# if(not (listjoueur[res[0].username] in locals)):
# listjoueur[res[0].username] = []
try:
listjoueur[res[0].username].append(res[1])
except KeyError:
listjoueur[res[0].username] = []
listjoueur[res[0].username].append(res[1])
except RuntimeError:
print("\033[91m Nb de Thread :"+str(self.GameThread.barrierManger._parties)+", "+str(self.GameThread.nbth)+" \033[0m")
return listjoueur
| gpl-3.0 | 7,055,425,905,077,996,000 | 35.57265 | 131 | 0.535172 | false |
open-o/nfvo | drivers/vnfm/gvnfm/gvnfmadapter/driver/urls.py | 1 | 1060 | # Copyright 2017 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from driver.pub.config.config import REG_TO_MSB_WHEN_START, REG_TO_MSB_REG_URL, REG_TO_MSB_REG_PARAM
from django.conf.urls import include, url
urlpatterns = [
url(r'^', include('driver.interfaces.urls')),
url(r'^', include('driver.swagger.urls')),
]
# regist to MSB when startup
if REG_TO_MSB_WHEN_START:
import json
from driver.pub.utils.restcall import req_by_msb
req_by_msb(REG_TO_MSB_REG_URL, "POST", json.JSONEncoder().encode(REG_TO_MSB_REG_PARAM))
| apache-2.0 | 7,444,818,097,996,803,000 | 39.769231 | 100 | 0.734906 | false |
atmega/ipkg-utils | ipkg.py | 1 | 11947 | #!/usr/bin/env python
# Copyright (C) 2001 Alexander S. Guy <[email protected]>
# Andern Research Labs
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA. */
#
# Copyright 2001, Russell Nelson <[email protected]>
# Added reading in of packages.
# Added missing package information fields.
# Changed render_control() to __repr__().
#
# Current Issues:
# The API doesn't validate package information fields. It should be
# throwing exceptions in the right places.
# Executions of tar could silently fail.
# Executions of tar *do* fail, and loudly, because you have to specify a full filename,
# and tar complains if any files are missing, and the ipkg spec doesn't require
# people to say "./control.tar.gz" or "./control" when they package files.
# It would be much better to require ./control or disallow ./control (either)
# rather than letting people pick. Some freedoms aren't worth their cost.
import tempfile
import os
import sys
import glob
import md5
import re
import string
import commands
from stat import ST_SIZE
class Package:
"""A class for creating objects to manipulate (e.g. create) ipkg
packages."""
def __init__(self, fn=None):
self.package = None
self.version = None
self.architecture = None
self.maintainer = None
self.source = None
self.description = None
self.depends = None
self.provides = None
self.replaces = None
self.conflicts = None
self.recommends = None
self.suggests = None
self.section = None
self.filename_header = None
self.file_list = []
self.md5 = None
self.size = None
self.installed_size = None
self.filename = None
self.isdeb = 0
if fn:
# see if it is deb format
f = open(fn, "r")
magic = f.read(4)
f.close()
if (magic == "!<ar"):
self.isdeb = 1
# compute the MD5.
f = open(fn, "r")
sum = md5.new()
while 1:
data = f.read(1024)
if not data: break
sum.update(data)
f.close()
if sys.version[:1] > '2':
# when using Python 2.0 or newer
self.md5 = sum.hexdigest()
else:
self.md5 = string.join(map((lambda x:"%02x" % ord(x)),sum.digest()),'')
stat = os.stat(fn)
self.size = stat[ST_SIZE]
self.filename = os.path.basename(fn)
## sys.stderr.write(" extracting control.tar.gz from %s\n"% (fn,))
if self.isdeb:
control = os.popen("ar p "+fn+" control.tar.gz | tar xfzO - --wildcards '*control'","r")
else:
control = os.popen("tar xfzO "+fn+" --wildcards '*control.tar.gz' | tar xfzO - --wildcards '*control'","r")
line = control.readline()
while 1:
if not line: break
line = string.rstrip(line)
lineparts = re.match(r'([\w-]*?):\s*(.*)', line)
if lineparts:
name = string.lower(lineparts.group(1))
value = lineparts.group(2)
while 1:
line = control.readline()
if not line: break
if line[0] != ' ': break
line = string.rstrip(line)
value = value + '\n' + line
# don't allow package to override its own filename
if name == "filename":
self.filename_header = value
else:
if self.__dict__.has_key(name):
self.__dict__[name] = value
else:
line = control.readline()
control.close()
if self.isdeb:
data = os.popen("ar p "+fn+" data.tar.gz | tar tfz -","r")
else:
data = os.popen("tar xfzO "+fn+" --wildcards '*data.tar.gz' | tar tfz -","r")
while 1:
line = data.readline()
if not line: break
self.file_list.append(string.rstrip(line))
data.close()
self.scratch_dir = None
self.file_dir = None
self.meta_dir = None
def read_control(self, control):
import os
line = control.readline()
while 1:
if not line: break
line = string.rstrip(line)
lineparts = re.match(r'([\w-]*?):\s*(.*)', line)
if lineparts:
name = string.lower(lineparts.group(1))
value = lineparts.group(2)
while 1:
line = control.readline()
if not line: break
if line[0] != ' ': break
value = value + '\n' + line
if name == 'size':
self.size = int(value)
elif self.__dict__.has_key(name):
self.__dict__[name] = value
if line[0] == '\n':
return # consumes one blank line at end of package descriptoin
else:
line = control.readline()
pass
return
def _setup_scratch_area(self):
self.scratch_dir = "%s/%sipkg" % (tempfile.gettempdir(),
tempfile.gettempprefix())
self.file_dir = "%s/files" % (self.scratch_dir)
self.meta_dir = "%s/meta" % (self.scratch_dir)
os.mkdir(self.scratch_dir)
os.mkdir(self.file_dir)
os.mkdir(self.meta_dir)
def set_package(self, package):
self.package = package
def get_package(self):
return self.package
def set_version(self, version):
self.version = version
def get_version(self):
return self.version
def set_architecture(self, architecture):
self.architecture = architecture
def get_architecture(self):
return self.architecture
def set_maintainer(self, maintainer):
self.maintainer = maintainer
def get_maintainer(self):
return self.maintainer
def set_source(self, source):
self.source = source
def get_source(self):
return self.source
def set_description(self, description):
self.description = description
def get_description(self):
return self.description
def set_depends(self, depends):
self.depends = depends
def get_depends(self, depends):
return self.depends
def set_provides(self, provides):
self.provides = provides
def get_provides(self, provides):
return self.provides
def set_replaces(self, replaces):
self.replaces = replaces
def get_replaces(self, replaces):
return self.replaces
def set_conflicts(self, conflicts):
self.conflicts = conflicts
def get_conflicts(self, conflicts):
return self.conflicts
def set_suggests(self, suggests):
self.suggests = suggests
def get_suggests(self, suggests):
return self.suggests
def set_section(self, section):
self.section = section
def get_section(self, section):
return self.section
def get_file_list(self):
return self.file_list
def write_package(self, dirname):
buf = self.render_control()
file = open("%s/control" % self.meta_dir, 'w')
file.write(buf)
self._setup_scratch_area()
cmd = "cd %s ; tar cvfz %s/control.tar.gz control" % (self.meta_dir,
self.scratch_dir)
cmd_out, cmd_in, cmd_err = os.popen3(cmd)
while cmd_err.readline() != "":
pass
cmd_out.close()
cmd_in.close()
cmd_err.close()
bits = "control.tar.gz"
if self.file_list:
cmd = "cd %s ; tar cvfz %s/data.tar.gz" % (self.file_dir,
self.scratch_dir)
cmd_out, cmd_in, cmd_err = os.popen3(cmd)
while cmd_err.readline() != "":
pass
cmd_out.close()
cmd_in.close()
cmd_err.close()
bits = bits + " data.tar.gz"
file = "%s_%s_%s.ipk" % (self.package, self.version, self.architecture)
cmd = "cd %s ; tar cvfz %s/%s %s" % (self.scratch_dir,
dirname,
file,
bits)
cmd_out, cmd_in, cmd_err = os.popen3(cmd)
while cmd_err.readline() != "":
pass
cmd_out.close()
cmd_in.close()
cmd_err.close()
def __repr__(self):
out = ""
# XXX - Some checks need to be made, and some exceptions
# need to be thrown. -- a7r
if self.package: out = out + "Package: %s\n" % (self.package)
if self.version: out = out + "Version: %s\n" % (self.version)
if self.depends: out = out + "Depends: %s\n" % (self.depends)
if self.provides: out = out + "Provides: %s\n" % (self.provides)
if self.replaces: out = out + "Replaces: %s\n" % (self.replaces)
if self.conflicts: out = out + "Conflicts: %s\n" % (self.conflicts)
if self.suggests: out = out + "Suggests: %s\n" % (self.suggests)
if self.recommends: out = out + "Recommends: %s\n" % (self.recommends)
if self.section: out = out + "Section: %s\n" % (self.section)
if self.architecture: out = out + "Architecture: %s\n" % (self.architecture)
if self.maintainer: out = out + "Maintainer: %s\n" % (self.maintainer)
if self.md5: out = out + "MD5Sum: %s\n" % (self.md5)
if self.size: out = out + "Size: %d\n" % int(self.size)
if self.installed_size: out = out + "InstalledSize: %d\n" % int(self.installed_size)
if self.filename: out = out + "Filename: %s\n" % (self.filename)
if self.source: out = out + "Source: %s\n" % (self.source)
if self.description: out = out + "Description: %s\n" % (self.description)
out = out + "\n"
return out
def __del__(self):
# XXX - Why is the `os' module being yanked out before Package objects
# are being destroyed? -- a7r
pass
class Packages:
"""A currently unimplemented wrapper around the ipkg utility."""
def __init__(self):
self.packages = {}
return
def add_package(self, pkg):
package = pkg.package
arch = pkg.architecture
name = ("%s:%s" % (package, arch))
if (not self.packages.has_key(name)):
self.packages[name] = pkg
(s, outtext) = commands.getstatusoutput("ipkg-compare-versions %s '>' %s" % (pkg.version, self.packages[name].version))
if (s == 0):
self.packages[name] = pkg
return 0
else:
return 1
def read_packages_file(self, fn):
f = open(fn, "r")
while 1:
pkg = Package()
pkg.read_control(f)
if pkg.get_package():
self.add_package(pkg)
else:
break
f.close()
return
def write_packages_file(self, fn):
f = open(fn, "w")
names = self.packages.keys()
names.sort()
for name in names:
f.write(self.packages[name].__repr__())
return
def keys(self):
return self.packages.keys()
def __getitem__(self, key):
return self.packages[key]
if __name__ == "__main__":
package = Package()
package.set_package("FooBar")
package.set_version("0.1-fam1")
package.set_architecture("arm")
package.set_maintainer("Testing <[email protected]>")
package.set_depends("libc")
package.set_description("A test of the APIs.")
print "<"
sys.stdout.write(package)
print ">"
package.write_package("/tmp")
| mit | -7,228,900,134,880,194,000 | 29.633333 | 131 | 0.572612 | false |
epsy/napper | napper/tests/test_restspec.py | 1 | 15960 | # napper -- A REST Client for Python
# Copyright (C) 2016 by Yann Kaiser and contributors.
# See AUTHORS and COPYING for details.
import io
import json
import re
from .. import restspec
from ..errors import UnknownParameters
from .util import Tests
class ConfigTests(Tests):
def make_spec(self, **obj):
obj.setdefault('base_address', 'http://www.example.org')
return restspec.RestSpec.from_file(io.StringIO(json.dumps(obj)))
def test_unknown_params(self):
with self.assertWarns(UnknownParameters):
self.make_spec(
base_address="http://some.address", invalidoption=0)
def test_address(self):
spec = self.make_spec(base_address="http://an.address.com")
self.assertEqual(spec.address, "http://an.address.com")
def test_address_trailing(self):
spec = self.make_spec(base_address="http://an.address.com/")
self.assertEqual(spec.address, "http://an.address.com")
def test_permalink_attr_suffix(self):
spec = self.make_spec(permalink_attribute=[
{"context": "attribute"}, {"matches": {"suffix": "_url"}}])
self.assertTrue(
spec.is_permalink_attr("https://...", {"attribute": "abcd_url"}))
self.assertFalse(
spec.is_permalink_attr("https://...", {"attribute": "abcd"}))
def test_permalink_attr_prefix(self):
spec = self.make_spec(permalink_attribute=[
{"context": "attribute"}, {"matches": {"prefix": "link_"}}])
self.assertTrue(
spec.is_permalink_attr("https://...", {"attribute": "link_abcd"}))
self.assertFalse(
spec.is_permalink_attr("https://...", {"attribute": "abcd"}))
def test_permalink_attr_prefix_suffix(self):
spec = self.make_spec(permalink_attribute=[
{"context": "attribute"}, {"matches": {"prefix": "link_",
"suffix": "_url"}}])
self.assertTrue(spec.is_permalink_attr(
"https://...", {"attribute": "link_abcd_url"}))
self.assertFalse(spec.is_permalink_attr(
"https://...", {"attribute": "link_abcd"}))
self.assertFalse(spec.is_permalink_attr(
"https://...", {"attribute": "abcd_url"}))
self.assertFalse(spec.is_permalink_attr(
"https://...", {"attribute": "abcd"}))
def test_permalink_attr_pattern(self):
spec = self.make_spec(permalink_attribute=[
{"context": "attribute"},
{"matches": {"pattern": "^link_[0-9]+_url$"}}])
self.assertTrue(spec.is_permalink_attr(
"https://...", {"attribute": "link_4_url"}))
self.assertTrue(spec.is_permalink_attr(
"https://...", {"attribute": "link_123456_url"}))
self.assertFalse(spec.is_permalink_attr(
"https://...", {"attribute": "link_abcd_url"}))
self.assertFalse(spec.is_permalink_attr(
"https://...", {"attribute": "1234567"}))
class FetcherTests(Tests):
def f(self, obj):
obj = json.loads(json.dumps(obj), object_hook=restspec.WarnOnUnusedKeys)
return restspec.Fetcher.from_restspec(obj)
def nv(self):
return self.assertRaises(restspec.NoValue)
def test_none(self):
f = self.f(None)
with self.nv():
f({})
with self.nv():
f("abc")
with self.nv():
f({"spam": "ham"})
r = {"spam": "ham"}
with self.nv():
f("ham", {"parent": r, "key": "spam", "root": r})
def test_missing_action(self):
with self.assertRaises(ValueError):
self.f({})
def test_multiple_actions(self):
with self.assertRaises(ValueError):
self.f({'attr': 'abc', 'value': 42})
def test_implicit_value(self):
self.assertEqual(None, self.f([None])({}))
self.assertEqual(0, self.f(0)({}))
self.assertEqual(42, self.f(42)({}))
self.assertEqual('ham', self.f('ham')({}))
self.assertEqual(['item1', 'item2'], self.f([['item1', 'item2']])({}))
def test_value(self):
self.assertEqual(None, self.f({'value': None})({}))
self.assertEqual(0, self.f({'value': 0})({}))
self.assertEqual('ham', self.f({'value': 'ham'})({}))
self.assertEqual({'a': 0}, self.f({'value': {'a': 0}})({}))
self.assertEqual('always', self.f('always')({}))
self.assertEqual('never', self.f('never')({}))
def test_attribute(self):
f = self.f({'attr': 'spam'})
self.assertEqual('ham', f({'spam': 'ham', 'eggs': '42'}))
with self.nv():
f({'eggs': '42'})
with self.nv():
f('str doesnt have attrs')
def test_attribute_indirection(self):
f = self.f({'attr': {'attr': 'eggs'}})
self.assertEqual('spam', f({'eggs': 'ham', 'ham': 'spam'}))
with self.nv():
f({'ham': 'spam'})
with self.nv():
f({'eggs': 'ham'})
def test_deep_attribute(self):
f = self.f([{'attr': 'spam'}, {'attr': 'ham'}])
self.assertEqual('eggs', f({'spam': {'ham': 'eggs'}}))
with self.nv():
f('str doesnt have attrs')
def test_item(self):
fixt = ['spam', 'ham', 'eggs']
self.assertEqual('spam', self.f({'item': 0})(fixt))
self.assertEqual('ham', self.f({'item': 1})(fixt))
self.assertEqual('eggs', self.f({'item': 2})(fixt))
self.assertEqual('spam', self.f({'item': -3})(fixt))
self.assertEqual('ham', self.f({'item': -2})(fixt))
self.assertEqual('eggs', self.f({'item': -1})(fixt))
with self.nv():
self.f({'item': 3})(fixt)
with self.nv():
self.f({'item': -4})(fixt)
def test_format(self):
f = self.f({'format': ['John']})
self.assertEqual('Hello John!', f('Hello {}!'))
self.assertEqual('Goodbye John!', f('Goodbye {}!'))
def test_root(self):
f = self.f([{'attr': 'ham'}, {'context': 'root'}, {'attr': 'spam'}])
self.assertEqual('sausages', f({'ham': 'eggs', 'spam': 'sausages'}))
f = self.f(['Hello {}!', {'format': [[{'context': 'root'}, {'attr': 'name'}]]}])
self.assertEqual('Hello John!', f({'name': 'John'}))
def test_ifelse(self):
f = self.f({'if': {'is_eq': 23}, 'then': 'abc', 'else': 'def'})
self.assertEqual(f(23), 'abc')
self.assertEqual(f(24), 'def')
class ConditionalTests(Tests):
def c(self, obj):
obj = json.loads(json.dumps(obj), object_hook=restspec.WarnOnUnusedKeys)
return restspec.Conditional.from_restspec(obj)
def test_missing(self):
with self.assertRaises(ValueError):
self.c({})
def test_always_false(self):
c = self.c("never")
self.assertFalse(c({}))
self.assertFalse(c("abc"))
self.assertFalse(c({"spam": "ham"}))
r = {"spam": "ham"}
self.assertFalse(c("ham", {"parent": r, "key": "spam", "root": r}))
def test_none(self):
c = self.c(None)
self.assertFalse(c({}))
self.assertFalse(c("abc"))
self.assertFalse(c({"spam": "ham"}))
r = {"spam": "ham"}
self.assertFalse(c("ham", {"parent": r, "key": "spam", "root": r}))
def test_always_true(self):
c = self.c("always")
self.assertTrue(c({}))
self.assertTrue(c("abc"))
self.assertTrue(c({"spam": "ham"}))
r = {"spam": "ham"}
self.assertTrue(c("ham", {"parent": r, "key": "spam", "root": r}))
def test_attr_exists(self):
c = self.c({'attr_exists': 'attr'})
self.assertTrue(c({'attr': 'ham'}))
r = {'spam': 'ham'}
self.assertFalse(c(r, context={"root": r}))
r2 = {"attr": r}
self.assertFalse(
c(r, {"attribute": "attr", "parent": r2, "root": r2}))
def test_eq_value(self):
c = self.c({'is_eq': 42})
self.assertTrue(c(42))
self.assertFalse(c(43))
c = self.c({'eq': [{"context": "value"}, 42]})
self.assertTrue(c(42))
self.assertFalse(c(43))
def test_eq(self):
c = self.c({'eq': [42, {"attr": "spam"}]})
self.assertTrue(c({"spam": 42}))
self.assertFalse(c({"spam": 43}))
def test_attr_name_is(self):
c = self.c({'eq': ["permalink", [{"context": "attribute"}]]})
r = {"permalink": "abc", "spam": "def"}
self.assertTrue(
c(r["permalink"], {"attribute": "permalink", "parent": r}))
self.assertFalse(
c(r["spam"], {"attribute": "spam", "parent": r}))
def test_not(self):
c = self.c({'not': {'is_eq': "apples"}})
self.assertFalse(c("apples"))
self.assertTrue(c("oranges"))
def test_any(self):
c = self.c({'any': [{'is_eq': 'pear'}, {'is_eq': 'apple'}]})
self.assertTrue(c("pear"))
self.assertTrue(c("apple"))
self.assertFalse(c("orange"))
def test_any_recover(self):
c = self.c({'any': [{'eq': ['ham', {'context': 'attribute'}]},
{'is_eq': 42}]})
self.assertTrue(c(42))
self.assertFalse(c(43))
def test_all(self):
c = self.c({'all': [
{'is_eq': 'spam'},
{'eq': ['ham', {'context': 'attribute'}]}
]})
self.assertTrue(c("spam", context={'attribute': 'ham'}))
self.assertFalse(c("spam", context={'attribute': 'eggs'}))
self.assertFalse(c("spam", context={}))
self.assertFalse(c("orange", context={'attribute': 'ham'}))
def test_not_conditional(self):
with self.assertRaises(ValueError):
self.c(42)
with self.assertRaises(ValueError):
self.c({"value": ['abc']})
def test_raw_value(self):
c = self.c(True)
self.assertTrue(c({}))
c = self.c(False)
self.assertFalse(c({}))
def test_implicit_and(self):
c = self.c({'attr_exists': 'abc', 'eq': [{'attr': 'spam'}, 'ham']})
self.assertTrue(c({'abc': 0, 'spam': 'ham'}))
self.assertFalse(c({'abc': 0, 'spam': 'eggs'}))
self.assertFalse(c({'abc': 0}))
self.assertFalse(c({'spam': 'ham'}))
def test_mixed(self):
with self.assertRaises(ValueError):
self.c({'attr_exists': 'abc', 'value': 'True'})
def test_match(self):
c = self.c({'matches': {'prefix': 'link_', 'suffix': '_url'}})
self.assertTrue(c('link_stuff_url'))
self.assertFalse(c('link_stuff'))
self.assertFalse(c('stuff_url'))
self.assertFalse(c('link_url'))
c = self.c({'matches': {'pattern': 'link_.*_url'}})
self.assertTrue(c('link_stuff_url'))
self.assertFalse(c('link_stuff'))
self.assertFalse(c('stuff_url'))
self.assertFalse(c('link_url'))
def test_hint(self):
c = self.c([{'context': 'attribute'}, {'matches': {'suffix': '_url'}}])
self.assertEqual(c.attr_name_hint('abc'), 'abc_url')
self.assertEqual(c.attr_name_hint('xyz'), 'xyz_url')
def test_nohint(self):
cs = [
self.c(True),
self.c([{'attr': 'abc'}, {'attr': 'def'}, {'is_eq': 'ghi'}]),
self.c([{'attr': 'abc'}, {'is_eq': 123}]),
self.c([{'context': [{'attr': 'abc'}, {'attr': 'def'}]},
{'is_eq': 'ghi'}]),
self.c([{'context': {'attr': 'abc'}}, {'is_eq': 123}]),
self.c([{'context': 'value'}, {'is_eq': 123}]),
self.c([{'context': 'attribute'}, {'is_eq': 123}]),
]
for c in cs:
with self.assertRaises(restspec.NoValue):
c.attr_name_hint("test")
class MatcherTests(Tests):
def m(self, spec):
return restspec.Matcher.from_restspec(self.to_config_dict(spec))
def test_false(self):
m = self.m(None)
self.assertFalse(m('abcdef'))
self.assertFalse(m(''))
self.assertEqual(m.pattern, None)
def test_true(self):
m = self.m("any")
self.assertTrue(m('abcdef'))
self.assertTrue(m(''))
self.assertEqual(m.pattern, re.compile(''))
def test_pattern(self):
m = self.m({'pattern': 'abc.*def'})
self.assertTrue(m('abcdef'))
self.assertTrue(m('abcxyzdef'))
self.assertTrue(m('abc123def'))
self.assertFalse(m('abc'))
self.assertFalse(m('abcxyz'))
self.assertFalse(m('xyzdef'))
self.assertFalse(m('def'))
self.assertFalse(m('xyz'))
self.assertFalse(m(''))
self.assertEqual(m.pattern, re.compile('abc.*def'))
def test_prefix(self):
m = self.m({'prefix': 'abc'})
self.assertTrue(m('abc'))
self.assertTrue(m('abcdef'))
self.assertTrue(m('abc123'))
self.assertFalse(m(''))
self.assertFalse(m('def'))
self.assertFalse(m('123'))
self.assertFalse(m('defabc'))
self.assertEqual(m.pattern, re.compile('^abc.*$'))
def test_suffix(self):
m = self.m({'suffix': 'xyz'})
self.assertTrue(m('xyz'))
self.assertTrue(m('abcdefxyz'))
self.assertTrue(m('123xyz'))
self.assertFalse(m('xyzabc'))
self.assertFalse(m(''))
self.assertFalse(m('abc'))
self.assertFalse(m('123'))
self.assertEqual(m.pattern, re.compile('^.*xyz$'))
def test_prefix_suffix(self):
m = self.m({'prefix': 'abc', 'suffix': 'xyz'})
self.assertTrue(m('abcxyz'))
self.assertTrue(m('abcdefxyz'))
self.assertTrue(m('abc123xyz'))
self.assertFalse(m('xyzabc'))
self.assertFalse(m(''))
self.assertFalse(m('abc'))
self.assertFalse(m('123'))
self.assertFalse(m('xyz'))
self.assertFalse(m('abcxyz123'))
self.assertFalse(m('123abcxyz'))
self.assertEqual(m.pattern, re.compile('^abc.*xyz$'))
def test_prefix_suffix_escape(self):
m = self.m({'prefix': '$', 'suffix': '$'})
self.assertTrue(m('$abcdef$'))
self.assertTrue(m('$$'))
self.assertTrue(m('$123$'))
self.assertFalse(m('abc$'))
self.assertFalse(m('$abc'))
self.assertFalse(m('$'))
self.assertEqual(m.pattern, re.compile(r'^\$.*\$$'))
def test_nospec(self):
with self.assertRaises(ValueError):
self.m({})
def test_pat_nohint(self):
m = self.m({'pattern': 'abc.*'})
with self.assertRaises(restspec.NoValue):
m.hint('test')
def test_pat_expl_hint(self):
m = self.m({'pattern': 'abc.*', 'hint': 'abc{}def'})
self.assertEqual(m.hint('test'), 'abctestdef')
self.assertEqual(m.hint('abc'), 'abcabcdef')
self.assertEqual(m.hint(''), 'abcdef')
def test_prefix_hint(self):
m = self.m({'prefix': 'abc'})
self.assertEqual(m.hint('test'), 'abctest')
self.assertEqual(m.hint(''), 'abc')
self.assertEqual(m.hint('abc'), 'abcabc')
def test_suffix_hint(self):
m = self.m({'suffix': 'abc'})
self.assertEqual(m.hint('test'), 'testabc')
self.assertEqual(m.hint(''), 'abc')
self.assertEqual(m.hint('abc'), 'abcabc')
def test_prefix_suffix_hint(self):
m = self.m({'prefix': 'abc', 'suffix': 'xyz'})
self.assertEqual(m.hint('test'), 'abctestxyz')
self.assertEqual(m.hint(''), 'abcxyz')
self.assertEqual(m.hint('abc'), 'abcabcxyz')
def test_prefix_expl_hint(self):
m = self.m({'prefix': 'abc', 'hint': 'abc{}123'})
self.assertEqual(m.hint("xyz"), "abcxyz123")
def test_suffix_expl_hint(self):
m = self.m({'suffix': 'abc', 'hint': '123{}abc'})
self.assertEqual(m.hint("xyz"), "123xyzabc")
def test_prefix_suffix_expl_hint(self):
m = self.m({'prefix': 'abc', 'suffix': 'xyz', 'hint': 'abcxyz{}abcxyz'})
self.assertEqual(m.hint("123"), "abcxyz123abcxyz")
| mit | 8,510,429,901,137,347,000 | 35.774194 | 88 | 0.527444 | false |
Nanguage/BioInfoCollections | others/bedgraph2bed.py | 1 | 1815 | import pandas as pd
import click
def skip_lines(path):
n = 0
with open(path) as f:
for line in f:
if line.startswith("track"):
n += 1
else:
break
return n
def read_bed(path):
n_skip = skip_lines(path)
df = pd.read_table(path, sep="\t", header=None, skiprows=n_skip)
base_cols = ['chr', 'start', 'end']
n_col = len(df.columns)
if n_col == 4:
columns = base_cols + ['value']
else:
columns = base_cols
if n_col >= 6:
columns += ['name', 'score', 'strand']
if n_col >= 9:
columns += ['thickStart', 'thickEnd', 'itemRgb']
if n_col == 12:
columns += ['blockCount', 'blockSizes', 'blockStarts']
df.columns = columns
return df
def region_str(df):
ser = df.chr + '_' + df.start.map(str) + '_' + df.end.map(str)
return ser
@click.command()
@click.argument("bedgraph")
@click.argument("output")
@click.option("--ref-bed", "-r",
help="reference BED file.")
def bedgraph2bed(bedgraph, output, ref_bed):
"""
Expand bedGraph to BED.
Default expand to BED6, if set reference BED,
substitude the value section with bedgraph value.
"""
bg = read_bed(bedgraph)
if ref_bed:
ref_bed = read_bed(ref_bed)
outbed = ref_bed
bg.index = region_str(bg)
outbed.index = region_str(outbed)
outbed = outbed.loc[bg.index]
outbed.score = bg.value
else:
outbed = bg
outbed['name'] = '.'
outbed['score'] = bg.value
outbed['strand'] = '.'
outbed = outbed[['chr', 'start', 'end', 'name', 'score', 'strand']]
outbed.to_csv(output, header=False, sep="\t", index=False)
if __name__ == "__main__":
eval("bedgraph2bed()")
| gpl-3.0 | -4,500,874,446,086,020,600 | 24.928571 | 75 | 0.539394 | false |
zhy0216/random-read | utils/image_crawler.py | 1 | 2041 | import os.path
import shutil
from urlparse import urlparse, urljoin
import base64
import requests
from bs4 import BeautifulSoup
from blue.settings import DOWNLOAD_IMAGE_FOLDER, IMAGE_PREFIX
''' download the image from the article '''
IMAGE_DOWNLOAD_FOLDER = DOWNLOAD_IMAGE_FOLDER
def get_absolute_url(article_url, image_url):
urlcomponent = urlparse(article_url)
host = urlcomponent.netloc
image_url = image_url.strip()
if image_url.startswith("http://") \
or image_url.startswith("https://"):
return image_url
if image_url.startswith("//"):
return "http:" + image_url
if image_url.startswith("/"):
return host + image_url
return urljoin(article_url, image_url)
def get_name(url):
name = base64.b64encode(url)
dot_index = url.rfind('.')
if dot_index < 0:
return None
question_mark_index = url.rfind('?')
if(question_mark_index > dot_index):
return name + url[dot_index:question_mark_index]
return name + url[dot_index:]
## this is export to use
def change_image(article):
soup = BeautifulSoup(article.content)
## ''.join(soup.body.contents)
for img in soup.find_all('img'):
src = img.get('src', None)
if src:
absolute_url = get_absolute_url(article.original_url, src)
name = get_name(absolute_url)
if name is None:
continue
img['src'] = IMAGE_PREFIX + name
# download image
# its better to use another worker
download_image(absolute_url, name)
## catch the image can be caught
article.content = ''.join(map(str, soup.body.contents))
article.save()
def download_image(image_url, new_name):
filename = IMAGE_DOWNLOAD_FOLDER + new_name
if os.path.isfile(filename):
return None
response = requests.get(image_url, stream=True)
with open(filename, 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
del response
| mit | 6,987,141,486,954,231,000 | 24.835443 | 70 | 0.627634 | false |
carthach/essentia | test/src/unittests/sfx/test_tctototal.py | 1 | 2182 | #!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
from math import exp
class TestTCToTotal(TestCase):
def testEmpty(self):
self.assertComputeFails(TCToTotal(), [])
def testOne(self):
self.assertComputeFails(TCToTotal(), [])
def testImpulseBeginning(self):
self.assertAlmostEqual(TCToTotal()([1,0]), 0)
def testImpulseMiddle(self):
self.assertAlmostEqual(TCToTotal()([0,1,0]), 0.5)
def testTriangle(self):
size = 100
envelope = zeros(size)
for i in range(int(size/2)):
envelope[i] = i
for i in range(int(size/2), size):
envelope[i] = size - i
TCToTotal()(envelope)
self.assertAlmostEqual(TCToTotal()(envelope), 0.5*size/float(size-1))
def testImpulseEnd(self):
self.assertAlmostEqual(TCToTotal()([0,1]), 1)
def testFlat(self):
self.assertAlmostEqual(TCToTotal()([1]*100), 0.5)
def testZero(self):
self.assertComputeFails(TCToTotal(), [0]*100)
def testGaussian(self):
data = [x/100. for x in range(-50, 50)]
envelope = [exp(-(x**2)/2) for x in data]
self.assertAlmostEqual(TCToTotal()(envelope), 0.5, 1e-3)
def testAlternating(self):
self.assertComputeFails(TCToTotal(), [1,-1,1,-1,1,-1,1,-1])
suite = allTests(TestTCToTotal)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
| agpl-3.0 | -6,654,946,886,523,032,000 | 29.732394 | 79 | 0.666361 | false |
dreadatour/Cactus | cactus/tests/deployment/test_bucket_name.py | 1 | 1110 | # coding:utf-8
from cactus.tests.deployment import DummyUI, DummySite, DummyDeploymentEngine, BaseDeploymentTestCase
class BucketNameTestCase(BaseDeploymentTestCase):
def setUp(self):
super(BucketNameTestCase, self).setUp()
self.ui = DummyUI(create_bucket=False)
self.site = DummySite(self.test_dir, self.ui)
self.engine = DummyDeploymentEngine(self.site)
def test_not_configured(self):
"""
Test that we prompt the bucket name in case it's not configured
"""
self.assertEqual(0, self.ui.asked_name)
self.engine.configure()
self.assertEqual(1, self.ui.asked_name)
self.assertEqual("test-bucket", self.engine.bucket_name)
def test_configured(self):
"""
Test that we don't prompt the bucket name in case it's configured
"""
self.site.config.set("test-conf-entry", "test-bucket")
self.assertEqual(0, self.ui.asked_name)
self.engine.configure()
self.assertEqual(0, self.ui.asked_name)
self.assertEqual("test-bucket", self.engine.bucket_name)
| bsd-3-clause | -5,000,288,575,577,189,000 | 31.647059 | 101 | 0.659459 | false |
DrDos0016/z2 | museum_site/migrations/0011_auto_20170112_0208.py | 1 | 2385 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-12 02:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('museum_site', '0010_auto_20160826_2152'),
]
operations = [
migrations.AlterModelOptions(
name='article',
options={'ordering': ['title']},
),
migrations.AlterModelOptions(
name='detail',
options={'ordering': ['detail']},
),
migrations.AlterModelOptions(
name='file',
options={'ordering': ['title']},
),
migrations.AlterModelOptions(
name='review',
options={'ordering': ['id']},
),
migrations.AddField(
model_name='file',
name='parent',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='file',
name='articles',
field=models.ManyToManyField(blank=True, default=None, to='museum_site.Article'),
),
migrations.AlterField(
model_name='file',
name='company',
field=models.CharField(blank=True, default='', max_length=80, null=True),
),
migrations.AlterField(
model_name='file',
name='description',
field=models.TextField(blank=True, default='', null=True),
),
migrations.AlterField(
model_name='file',
name='details',
field=models.ManyToManyField(blank=True, default=None, to='museum_site.Detail'),
),
migrations.AlterField(
model_name='file',
name='release_date',
field=models.DateField(blank=True, default=None, null=True),
),
migrations.AlterField(
model_name='review',
name='author',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='review',
name='email',
field=models.EmailField(blank=True, max_length=254, null=True),
),
migrations.AlterField(
model_name='review',
name='ip',
field=models.GenericIPAddressField(blank=True, null=True),
),
]
| mit | -992,943,723,689,504,300 | 30.381579 | 93 | 0.534172 | false |
ericawright/bedrock | bedrock/firefox/views.py | 1 | 32602 | # -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import hashlib
import hmac
import re
from collections import OrderedDict
from urllib.parse import urlparse
import basket
import querystringsafe_base64
from django.conf import settings
from django.http import (
HttpResponsePermanentRedirect,
JsonResponse,
)
from django.utils.cache import patch_response_headers
from django.utils.encoding import force_text
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_GET, require_POST
from django.views.generic.base import TemplateView
from lib import l10n_utils
from lib.l10n_utils import L10nTemplateView
from lib.l10n_utils.dotlang import lang_file_is_active
from lib.l10n_utils.fluent import ftl, ftl_file_is_active
from product_details.version_compare import Version
from bedrock.base.urlresolvers import reverse
from bedrock.base.waffle import switch
from bedrock.contentcards.models import get_page_content_cards
from bedrock.firefox.firefox_details import firefox_android, firefox_desktop
from bedrock.firefox.forms import SendToDeviceWidgetForm
from bedrock.newsletter.forms import NewsletterFooterForm
from bedrock.releasenotes import version_re
from bedrock.wordpress.views import BlogPostsView
from bedrock.base.views import GeoRedirectView
UA_REGEXP = re.compile(r"Firefox/(%s)" % version_re)
INSTALLER_CHANNElS = [
'release',
'beta',
'alpha',
'nightly',
'aurora', # deprecated name for dev edition
]
SEND_TO_DEVICE_MESSAGE_SETS = settings.SEND_TO_DEVICE_MESSAGE_SETS
STUB_VALUE_NAMES = [
# name, default value
('utm_source', '(not set)'),
('utm_medium', '(direct)'),
('utm_campaign', '(not set)'),
('utm_content', '(not set)'),
('experiment', '(not set)'),
('variation', '(not set)'),
('ua', '(not set)'),
]
STUB_VALUE_RE = re.compile(r'^[a-z0-9-.%():_]+$', flags=re.IGNORECASE)
class InstallerHelpView(L10nTemplateView):
ftl_files_map = {
'firefox/installer-help-redesign.html': ['firefox/installer-help']
}
def get_context_data(self, **kwargs):
ctx = super(InstallerHelpView, self).get_context_data(**kwargs)
installer_lang = self.request.GET.get('installer_lang', None)
installer_channel = self.request.GET.get('channel', None)
ctx['installer_lang'] = None
ctx['installer_channel'] = None
if installer_lang and installer_lang in firefox_desktop.languages:
ctx['installer_lang'] = installer_lang
if installer_channel and installer_channel in INSTALLER_CHANNElS:
if installer_channel == 'aurora':
ctx['installer_channel'] = 'alpha'
else:
ctx['installer_channel'] = installer_channel
return ctx
def get_template_names(self):
if ftl_file_is_active('firefox/installer-help'):
template_name = 'firefox/installer-help-redesign.html'
else:
template_name = 'firefox/installer-help.html'
return [template_name]
@require_GET
def stub_attribution_code(request):
"""Return a JSON response containing the HMAC signed stub attribution value"""
if not request.is_ajax():
return JsonResponse({'error': 'Resource only available via XHR'}, status=400)
response = None
if not settings.STUB_ATTRIBUTION_RATE:
# return as though it was rate limited, since it was
response = JsonResponse({'error': 'rate limited'}, status=429)
elif not settings.STUB_ATTRIBUTION_HMAC_KEY:
response = JsonResponse({'error': 'service not configured'}, status=403)
if response:
patch_response_headers(response, 300) # 5 min
return response
data = request.GET
codes = OrderedDict()
has_value = False
for name, default_value in STUB_VALUE_NAMES:
val = data.get(name, '')
# remove utm_
if name.startswith('utm_'):
name = name[4:]
if val and STUB_VALUE_RE.match(val):
codes[name] = val
has_value = True
else:
codes[name] = default_value
if codes['source'] == '(not set)' and 'referrer' in data:
try:
domain = urlparse(data['referrer']).netloc
if domain and STUB_VALUE_RE.match(domain):
codes['source'] = domain
codes['medium'] = 'referral'
has_value = True
except Exception:
# any problems and we should just ignore it
pass
if not has_value:
codes['source'] = 'www.mozilla.org'
codes['medium'] = '(none)'
code_data = sign_attribution_codes(codes)
if code_data:
response = JsonResponse(code_data)
else:
response = JsonResponse({'error': 'Invalid code'}, status=400)
patch_response_headers(response, 300) # 5 min
return response
def get_attrribution_code(codes):
"""
Take the attribution codes and return the URL encoded string
respecting max length.
"""
code = '&'.join('='.join(attr) for attr in codes.items())
if len(codes['campaign']) > 5 and len(code) > settings.STUB_ATTRIBUTION_MAX_LEN:
# remove 5 char at a time
codes['campaign'] = codes['campaign'][:-5] + '_'
code = get_attrribution_code(codes)
return code
def sign_attribution_codes(codes):
"""
Take the attribution codes and return the base64 encoded string
respecting max length and HMAC signature.
"""
key = settings.STUB_ATTRIBUTION_HMAC_KEY
code = get_attrribution_code(codes)
if len(code) > settings.STUB_ATTRIBUTION_MAX_LEN:
return None
code = querystringsafe_base64.encode(code.encode())
sig = hmac.new(key.encode(), code, hashlib.sha256).hexdigest()
return {'attribution_code': code.decode(), 'attribution_sig': sig}
@require_POST
@csrf_exempt
def send_to_device_ajax(request):
locale = l10n_utils.get_locale(request)
phone_or_email = request.POST.get('phone-or-email')
# ensure a value was entered in phone or email field
if not phone_or_email:
return JsonResponse({'success': False, 'errors': ['phone-or-email']})
# pull message set from POST (not part of form, so wont be in cleaned_data)
message_set = request.POST.get('message-set', 'default')
# begin collecting data to pass to form constructor
data = {'platform': request.POST.get('platform')}
# determine if email or phone number was submitted
data_type = 'email' if '@' in phone_or_email else 'number'
# populate data type in form data dict
data[data_type] = phone_or_email
# instantiate the form with processed POST data
form = SendToDeviceWidgetForm(data)
if form.is_valid():
phone_or_email = form.cleaned_data.get(data_type)
platform = form.cleaned_data.get('platform')
# if no platform specified, default to 'all'
if not platform:
platform = 'all'
# ensure we have a valid message set. if not, fall back to default
if message_set not in SEND_TO_DEVICE_MESSAGE_SETS:
MESSAGES = SEND_TO_DEVICE_MESSAGE_SETS['default']
else:
MESSAGES = SEND_TO_DEVICE_MESSAGE_SETS[message_set]
if data_type == 'number':
# for testing purposes return success
if phone_or_email == '5555555555':
return JsonResponse({'success': True})
if platform in MESSAGES['sms']:
data = {
'mobile_number': phone_or_email,
'msg_name': MESSAGES['sms'][platform],
'lang': locale,
}
country = request.POST.get('country')
if country and re.match(r'^[a-z]{2}$', country, flags=re.I):
data['country'] = country
try:
basket.request('post', 'subscribe_sms', data=data)
except basket.BasketException as e:
if e.desc == 'mobile_number is invalid':
return JsonResponse({'success': False, 'errors': ['number']})
else:
return JsonResponse(
{'success': False, 'errors': ['system']}, status=400
)
else:
return JsonResponse({'success': False, 'errors': ['platform']})
else: # email
if platform in MESSAGES['email']:
try:
basket.subscribe(
phone_or_email,
MESSAGES['email'][platform],
source_url=request.POST.get('source-url'),
lang=locale,
)
except basket.BasketException:
return JsonResponse(
{'success': False, 'errors': ['system']}, status=400
)
else:
return JsonResponse({'success': False, 'errors': ['platform']})
resp_data = {'success': True}
else:
resp_data = {'success': False, 'errors': list(form.errors)}
return JsonResponse(resp_data)
def firefox_all(request):
ftl_files = 'firefox/all'
product_android = firefox_android
product_desktop = firefox_desktop
# Human-readable product labels
products = OrderedDict(
[
('desktop_release', ftl('firefox-all-product-firefox', ftl_files=ftl_files)),
('desktop_beta', ftl('firefox-all-product-firefox-beta', ftl_files=ftl_files)),
('desktop_developer', ftl('firefox-all-product-firefox-developer', ftl_files=ftl_files)),
('desktop_nightly', ftl('firefox-all-product-firefox-nightly', ftl_files=ftl_files)),
('desktop_esr', ftl('firefox-all-product-firefox-esr', ftl_files=ftl_files)),
('android_release', ftl('firefox-all-product-firefox-android', ftl_files=ftl_files)),
('android_beta', ftl('firefox-all-product-firefox-android-beta', ftl_files=ftl_files)),
('android_nightly', ftl('firefox-all-product-firefox-android-nightly', ftl_files=ftl_files)),
]
)
channel_release = 'release'
channel_beta = 'beta'
channel_dev = 'devedition'
channel_nightly = 'nightly'
channel_esr = 'esr'
channel_esr_next = 'esr_next'
latest_release_version_desktop = product_desktop.latest_version(channel_release)
latest_beta_version_desktop = product_desktop.latest_version(channel_beta)
latest_developer_version_desktop = product_desktop.latest_version(channel_dev)
latest_nightly_version_desktop = product_desktop.latest_version(channel_nightly)
latest_esr_version_desktop = product_desktop.latest_version(channel_esr)
latest_esr_next_version_desktop = product_desktop.latest_version(channel_esr_next)
latest_release_version_android = product_android.latest_version(channel_release)
latest_beta_version_android = product_android.latest_version(channel_beta)
latest_nightly_version_android = product_android.latest_version(channel_nightly)
context = {
'products': products.items(),
'desktop_release_platforms': product_desktop.platforms(channel_release),
'desktop_release_full_builds': product_desktop.get_filtered_full_builds(
channel_release, latest_release_version_desktop
),
'desktop_release_channel_label': product_desktop.channel_labels.get(
channel_release, 'Firefox'
),
'desktop_release_latest_version': latest_release_version_desktop,
'desktop_beta_platforms': product_desktop.platforms(channel_beta),
'desktop_beta_full_builds': product_desktop.get_filtered_full_builds(
channel_beta, latest_beta_version_desktop
),
'desktop_beta_channel_label': product_desktop.channel_labels.get(
channel_beta, 'Firefox'
),
'desktop_beta_latest_version': latest_beta_version_desktop,
'desktop_developer_platforms': product_desktop.platforms(channel_dev),
'desktop_developer_full_builds': product_desktop.get_filtered_full_builds(
channel_dev, latest_developer_version_desktop
),
'desktop_developer_channel_label': product_desktop.channel_labels.get(
channel_dev, 'Firefox'
),
'desktop_developer_latest_version': latest_developer_version_desktop,
'desktop_nightly_platforms': product_desktop.platforms(channel_nightly),
'desktop_nightly_full_builds': product_desktop.get_filtered_full_builds(
channel_nightly, latest_nightly_version_desktop
),
'desktop_nightly_channel_label': product_desktop.channel_labels.get(
channel_nightly, 'Firefox'
),
'desktop_nightly_latest_version': latest_nightly_version_desktop,
'desktop_esr_platforms': product_desktop.platforms(channel_esr),
'desktop_esr_full_builds': product_desktop.get_filtered_full_builds(
channel_esr, latest_esr_version_desktop
),
'desktop_esr_channel_label': product_desktop.channel_labels.get(
channel_esr, 'Firefox'
),
'desktop_esr_latest_version': latest_esr_version_desktop,
'android_release_platforms': product_android.platforms(channel_release),
'android_release_full_builds': product_android.get_filtered_full_builds(
channel_release, latest_release_version_android
),
'android_release_channel_label': product_android.channel_labels.get(
channel_release, 'Firefox'
),
'android_release_latest_version': latest_release_version_android,
'android_beta_platforms': product_android.platforms(channel_beta),
'android_beta_full_builds': product_android.get_filtered_full_builds(
channel_beta, latest_beta_version_android
),
'android_beta_channel_label': product_android.channel_labels.get(
channel_beta, 'Firefox'
),
'android_beta_latest_version': latest_beta_version_android,
'android_nightly_platforms': product_android.platforms(channel_nightly),
'android_nightly_full_builds': product_android.get_filtered_full_builds(
channel_nightly, latest_nightly_version_android
),
'android_nightly_channel_label': product_android.channel_labels.get(
channel_nightly, 'Firefox'
),
'android_nightly_latest_version': latest_nightly_version_android,
}
if latest_esr_next_version_desktop:
context['desktop_esr_platforms_next'] = product_desktop.platforms(
channel_esr_next, True
)
context[
'desktop_esr_full_builds_next'
] = product_desktop.get_filtered_full_builds(
channel_esr_next, latest_esr_next_version_desktop
)
context['desktop_esr_channel_label_next'] = (
product_desktop.channel_labels.get(channel_esr_next, 'Firefox'),
)
context['desktop_esr_next_version'] = latest_esr_next_version_desktop
return l10n_utils.render(request, 'firefox/all-unified.html',
context, ftl_files=ftl_files)
def detect_channel(version):
match = re.match(r'\d{1,2}', version)
if match:
num_version = int(match.group(0))
if num_version >= 35:
if version.endswith('a1'):
return 'nightly'
if version.endswith('a2'):
return 'developer'
if version.endswith('beta'):
return 'beta'
return 'unknown'
def show_38_0_5_firstrun(version):
try:
version = Version(version)
except ValueError:
return False
return version >= Version('38.0.5')
def show_57_dev_whatsnew(version):
version = version[:-2]
try:
version = Version(version)
except ValueError:
return False
return version >= Version('57.0')
def show_62_firstrun(version):
try:
version = Version(version)
except ValueError:
return False
return version >= Version('62.0')
def show_57_firstrun(version):
try:
version = Version(version)
except ValueError:
return False
return version >= Version('57.0')
def show_57_dev_firstrun(version):
version = version[:-2]
try:
version = Version(version)
except ValueError:
return False
return version >= Version('57.0')
def show_70_0_2_whatsnew(oldversion):
try:
oldversion = Version(oldversion)
except ValueError:
return False
return oldversion >= Version('70.0')
def redirect_old_firstrun(version):
try:
version = Version(version)
except ValueError:
return False
return version < Version('40.0')
def show_default_account_whatsnew(version):
try:
version = Version(version)
except ValueError:
return False
return version >= Version('60.0')
class FirstrunView(l10n_utils.LangFilesMixin, TemplateView):
def get(self, *args, **kwargs):
version = self.kwargs.get('version') or ''
# redirect legacy /firstrun URLs to /firefox/new/
if redirect_old_firstrun(version):
return HttpResponsePermanentRedirect(reverse('firefox.new'))
else:
return super(FirstrunView, self).get(*args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super(FirstrunView, self).get_context_data(**kwargs)
# add version to context for use in templates
ctx['version'] = self.kwargs.get('version') or ''
return ctx
def get_template_names(self):
version = self.kwargs.get('version') or ''
if detect_channel(version) == 'developer':
if show_57_dev_firstrun(version):
template = 'firefox/developer/firstrun.html'
else:
template = 'firefox/firstrun/firstrun.html'
else:
template = 'firefox/firstrun/firstrun.html'
# return a list to conform with original intention
return [template]
class WhatsNewRedirectorView(GeoRedirectView):
geo_urls = {
'IN': 'firefox.whatsnew.india'
}
default_url = 'firefox.whatsnew.all'
def get_redirect_url(self, *args, **kwargs):
if 'version' in kwargs and kwargs['version'] is None:
del kwargs['version']
return super().get_redirect_url(*args, **kwargs)
class WhatsnewView(L10nTemplateView):
ftl_files_map = {
'firefox/whatsnew/index-account.html': ['firefox/whatsnew/whatsnew-account', 'firefox/whatsnew/whatsnew']
}
def get_context_data(self, **kwargs):
ctx = super(WhatsnewView, self).get_context_data(**kwargs)
# add version to context for use in templates
version = self.kwargs.get('version') or ''
match = re.match(r'\d{1,2}', version)
num_version = int(match.group(0)) if match else ''
ctx['version'] = version
ctx['num_version'] = num_version
# add analytics parameters to context for use in templates
channel = detect_channel(version)
if channel not in ['nightly', 'developer', 'beta']:
channel = ''
analytics_version = str(num_version) + channel
entrypoint = 'mozilla.org-whatsnew' + analytics_version
campaign = 'whatsnew' + analytics_version
ctx['analytics_version'] = analytics_version
ctx['entrypoint'] = entrypoint
ctx['campaign'] = campaign
ctx['utm_params'] = 'utm_source={0}&utm_medium=referral&utm_campaign={1}&entrypoint={2}'.format(
entrypoint, campaign, entrypoint)
return ctx
def get_template_names(self):
variation = self.request.GET.get('v', None)
locale = l10n_utils.get_locale(self.request)
version = self.kwargs.get('version') or ''
oldversion = self.request.GET.get('oldversion', '')
# old versions of Firefox sent a prefixed version
if oldversion.startswith('rv:'):
oldversion = oldversion[3:]
channel = detect_channel(version)
if channel == 'nightly':
template = 'firefox/nightly_whatsnew.html'
elif channel == 'developer':
if show_57_dev_whatsnew(version):
template = 'firefox/developer/whatsnew.html'
else:
template = 'firefox/whatsnew/index.html'
elif channel == 'beta':
if version.startswith('74.'):
if locale in ['en-US', 'en-CA', 'en-GB']:
template = 'firefox/whatsnew/whatsnew-fx70-en.html'
elif locale == 'de':
template = 'firefox/whatsnew/whatsnew-fx70-de.html'
elif locale == 'fr':
template = 'firefox/whatsnew/whatsnew-fx70-fr.html'
else:
template = 'firefox/whatsnew/index.html'
else:
template = 'firefox/whatsnew/index.html'
elif locale == 'id':
template = 'firefox/whatsnew/index-lite.id.html'
elif version.startswith('78.'):
variations = ['2', '3', '4', '5']
locales = ['en-US', 'en-CA', 'en-GB', 'de', 'fr']
if variation in variations and locale in locales:
locale = locale.split('-')[0]
template = 'firefox/whatsnew/whatsnew-fx78-{0}-{1}.html'.format(variation, locale)
else:
template = 'firefox/whatsnew/whatsnew-fx78.html'
elif version.startswith('77.') and lang_file_is_active('firefox/whatsnew_77', locale):
# YouTube is blocked in China so zh-CN gets an alternative, self-hosted video.
# If we run into bandwidth trouble we can turn the video off and zh-CN falls back to the 76 page.
if locale == 'zh-CN' and not switch('firefox-whatsnew77-video-zhCN'):
template = 'firefox/whatsnew/whatsnew-fx76.html'
else:
template = 'firefox/whatsnew/whatsnew-fx77.html'
elif version.startswith('76.') and lang_file_is_active('firefox/whatsnew_76', locale):
template = 'firefox/whatsnew/whatsnew-fx76.html'
elif version.startswith('75.') and lang_file_is_active('firefox/whatsnew_75', locale):
template = 'firefox/whatsnew/whatsnew-fx75.html'
elif version.startswith('74.'):
# Facebook isn't used in China so zh-CN should fall back to more relevant content
if locale != 'zh-CN' and lang_file_is_active('firefox/whatsnew_74', locale):
template = 'firefox/whatsnew/whatsnew-fx74.html'
elif lang_file_is_active('firefox/whatsnew_73', locale):
template = 'firefox/whatsnew/whatsnew-fx73.html'
else:
template = 'firefox/whatsnew/index.html'
elif version.startswith('73.') and lang_file_is_active('firefox/whatsnew_73', locale):
template = 'firefox/whatsnew/whatsnew-fx73.html'
elif version.startswith('72.') and lang_file_is_active('firefox/whatsnew_71', locale):
template = 'firefox/whatsnew/whatsnew-fx71.html'
elif version.startswith('71.') and lang_file_is_active('firefox/whatsnew_71', locale):
template = 'firefox/whatsnew/whatsnew-fx71.html'
elif version.startswith('70.'):
if locale in ['en-US', 'en-CA', 'en-GB']:
template = 'firefox/whatsnew/whatsnew-fx70-en.html'
elif locale == 'de':
template = 'firefox/whatsnew/whatsnew-fx70-de.html'
elif locale == 'fr':
template = 'firefox/whatsnew/whatsnew-fx70-fr.html'
else:
template = 'firefox/whatsnew/index.html'
else:
if show_default_account_whatsnew(version) and ftl_file_is_active('firefox/whatsnew/whatsnew-account'):
template = 'firefox/whatsnew/index-account.html'
else:
template = 'firefox/whatsnew/index.html'
# return a list to conform with original intention
return [template]
class WhatsNewIndiaView(WhatsnewView):
def get_template_names(self):
locale = l10n_utils.get_locale(self.request)
version = self.kwargs.get('version') or ''
channel = detect_channel(version)
if locale.startswith('en-') and channel not in ['nightly', 'alpha', 'beta']:
# return a list to conform with original intention
template = ['firefox/whatsnew/index-lite.html']
else:
template = super().get_template_names()
return template
class DownloadThanksView(L10nTemplateView):
ftl_files_map = {
'firefox/new/trailhead/thanks.html': ['firefox/new/download'],
'firefox/new/trailhead/thanks-b.html': ['firefox/new/download'],
}
# place expected ?v= values in this list
variations = []
def get_context_data(self, **kwargs):
ctx = super(DownloadThanksView, self).get_context_data(**kwargs)
variant = self.request.GET.get('v', None)
# ensure variant matches pre-defined value
if variant not in self.variations:
variant = None
ctx['variant'] = variant
return ctx
def get_template_names(self):
variant = self.request.GET.get('v', None)
# ensure variant matches pre-defined value
if variant not in self.variations:
variant = None
if ftl_file_is_active('firefox/new/download'):
template = 'firefox/new/trailhead/thanks.html'
else:
template = 'firefox/new/protocol/thanks.html'
return [template]
class NewView(L10nTemplateView):
ftl_files_map = {
'firefox/new/trailhead/download.html': ['firefox/new/download', 'banners/firefox-mobile'],
'firefox/new/trailhead/download-yandex.html': ['firefox/new/download', 'banners/firefox-mobile'],
}
# place expected ?v= values in this list
variations = ['a', 'b']
def get(self, *args, **kwargs):
# Remove legacy query parameters (Bug 1236791)
if self.request.GET.get('product', None) or self.request.GET.get('os', None):
return HttpResponsePermanentRedirect(reverse('firefox.new'))
scene = self.request.GET.get('scene', None)
if scene == '2':
# send to new permanent scene2 URL (bug 1438302)
thanks_url = reverse('firefox.download.thanks')
query_string = self.request.META.get('QUERY_STRING', '')
if query_string:
thanks_url = '?'.join(
[thanks_url, force_text(query_string, errors='ignore')]
)
return HttpResponsePermanentRedirect(thanks_url)
return super(NewView, self).get(*args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super(NewView, self).get_context_data(**kwargs)
# note: v and xv params only allow a-z, A-Z, 0-9, -, and _ characters
experience = self.request.GET.get('xv', None)
variant = self.request.GET.get('v', None)
# ensure variant matches pre-defined value
if variant not in self.variations:
variant = None
ctx['experience'] = experience
ctx['variant'] = variant
return ctx
def get_template_names(self):
locale = l10n_utils.get_locale(self.request)
variant = self.request.GET.get('v', None)
experience = self.request.GET.get('xv', None)
# ensure variant matches pre-defined value
if variant not in self.variations:
variant = None
if locale == 'ru' and switch('firefox-yandex'):
template = 'firefox/new/trailhead/download-yandex.html'
elif locale == 'en-US' and experience == 'desktop':
template = 'firefox/new/desktop/download.html'
elif locale == 'en-US' and switch('experiment-new-redesign') and variant == 'b':
template = 'firefox/new/desktop/download.html'
elif ftl_file_is_active('firefox/new/download'):
template = 'firefox/new/trailhead/download.html'
else:
template = 'firefox/new/protocol/download.html'
return [template]
def ios_testflight(request):
# no country field, so no need to send locale
newsletter_form = NewsletterFooterForm('ios-beta-test-flight', '')
return l10n_utils.render(
request, 'firefox/testflight.html', {'newsletter_form': newsletter_form}
)
def ad_blocker(request):
return l10n_utils.render(request, 'firefox/features/adblocker.html')
class FeaturesBookmarksView(BlogPostsView):
blog_posts_limit = 3
blog_posts_template_variable = 'articles'
blog_slugs = 'firefox'
blog_tags = ['modern', 'privacy', 'featured']
template_name = 'firefox/features/bookmarks.html'
class FeaturesFastView(BlogPostsView):
blog_posts_limit = 3
blog_posts_template_variable = 'articles'
blog_slugs = 'firefox'
blog_tags = ['fastest', 'featured']
template_name = 'firefox/features/fast.html'
class FeaturesIndependentView(BlogPostsView):
blog_posts_limit = 3
blog_posts_template_variable = 'articles'
blog_slugs = 'firefox'
blog_tags = ['browser', 'featured']
template_name = 'firefox/features/independent.html'
class FeaturesMemoryView(BlogPostsView):
blog_posts_limit = 3
blog_posts_template_variable = 'articles'
blog_slugs = 'firefox'
blog_tags = ['memory', 'featured']
template_name = 'firefox/features/memory.html'
class FeaturesPasswordManagerView(BlogPostsView):
blog_posts_limit = 3
blog_posts_template_variable = 'articles'
blog_slugs = 'firefox'
blog_tags = ['modern', 'privacy', 'featured']
template_name = 'firefox/features/password-manager.html'
class FeaturesPrivateBrowsingView(BlogPostsView):
blog_posts_limit = 3
blog_posts_template_variable = 'articles'
blog_slugs = 'firefox'
blog_tags = ['privacy', 'security', 'featured']
template_name = 'firefox/features/private-browsing.html'
class FirefoxHomeView(L10nTemplateView):
ftl_files_map = {
'firefox/home/index-master.html': ['firefox/home']
}
def get_template_names(self):
if ftl_file_is_active('firefox/home'):
template_name = 'firefox/home/index-master.html'
else:
template_name = 'firefox/home/index-quantum.html'
return [template_name]
def election_with_cards(request):
locale = l10n_utils.get_locale(request)
ctx = {
'page_content_cards': get_page_content_cards('election-en', locale),
'active_locales': ['de', 'fr', 'en-US'],
}
if locale == 'de':
template_name = 'firefox/election/index-de.html'
ctx['page_content_cards'] = get_page_content_cards('election-de', 'de')
elif locale == 'fr':
template_name = 'firefox/election/index-fr.html'
ctx['page_content_cards'] = get_page_content_cards('election-fr', 'fr')
else:
template_name = 'firefox/election/index.html'
ctx['page_content_cards'] = get_page_content_cards('election-en', 'en-US')
return l10n_utils.render(request, template_name, ctx)
BREACH_TIPS_URLS = {
'de': 'https://blog.mozilla.org/firefox/de/was-macht-man-nach-einem-datenleck/',
'fr': 'https://blog.mozilla.org/firefox/fr/que-faire-en-cas-de-fuite-de-donnees/',
'en-CA': 'https://blog.mozilla.org/firefox/what-to-do-after-a-data-breach/',
'en-GB': 'https://blog.mozilla.org/firefox/what-to-do-after-a-data-breach/',
'en-US': 'https://blog.mozilla.org/firefox/what-to-do-after-a-data-breach/',
}
def firefox_welcome_page1(request):
locale = l10n_utils.get_locale(request)
# get localized blog post URL for 2019 page
breach_tips_query = (
'?utm_source=mozilla.org-firefox-welcome-1&utm_medium=referral'
'&utm_campaign=welcome-1-monitor&entrypoint=mozilla.org-firefox-welcome-1'
)
breach_tips_url = BREACH_TIPS_URLS.get(locale, BREACH_TIPS_URLS['en-US'])
context = {'breach_tips_url': breach_tips_url + breach_tips_query}
template_name = 'firefox/welcome/page1.html'
return l10n_utils.render(request, template_name, context,
ftl_files='firefox/welcome/page1')
| mpl-2.0 | -329,211,271,012,326,140 | 35.921857 | 114 | 0.623765 | false |
jaeilepp/eggie | mne/coreg.py | 1 | 40760 | """Coregistration between different coordinate frames"""
# Authors: Christian Brodbeck <[email protected]>
#
# License: BSD (3-clause)
from .externals.six.moves import configparser
import fnmatch
from glob import glob, iglob
import os
import re
import shutil
from warnings import warn
import numpy as np
from numpy import dot
from scipy.optimize import leastsq
from scipy.spatial.distance import cdist
from scipy.linalg import norm
from .io.meas_info import read_fiducials, write_fiducials
from .label import read_label, Label
from .source_space import (add_source_space_distances, read_source_spaces,
write_source_spaces)
from .surface import (read_surface, write_surface, read_bem_surfaces,
write_bem_surface)
from .transforms import rotation, rotation3d, scaling, translation
from .utils import get_config, get_subjects_dir, logger, pformat
from functools import reduce
from .externals.six.moves import zip
# some path templates
trans_fname = os.path.join('{raw_dir}', '{subject}-trans.fif')
subject_dirname = os.path.join('{subjects_dir}', '{subject}')
bem_dirname = os.path.join(subject_dirname, 'bem')
surf_dirname = os.path.join(subject_dirname, 'surf')
bem_fname = os.path.join(bem_dirname, "{subject}-{name}.fif")
head_bem_fname = pformat(bem_fname, name='head')
fid_fname = pformat(bem_fname, name='fiducials')
fid_fname_general = os.path.join(bem_dirname, "{head}-fiducials.fif")
src_fname = os.path.join(bem_dirname, '{subject}-{spacing}-src.fif')
def create_default_subject(mne_root=None, fs_home=None, update=False,
subjects_dir=None):
"""Create an average brain subject for subjects without structural MRI
Create a copy of fsaverage from the Freesurfer directory in subjects_dir
and add auxiliary files from the mne package.
Parameters
----------
mne_root : None | str
The mne root directory (only needed if MNE_ROOT is not specified as
environment variable).
fs_home : None | str
The freesurfer home directory (only needed if FREESURFER_HOME is not
specified as environment variable).
update : bool
In cases where a copy of the fsaverage brain already exists in the
subjects_dir, this option allows to only copy files that don't already
exist in the fsaverage directory.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable
(os.environ['SUBJECTS_DIR']) as destination for the new subject.
Notes
-----
When no structural MRI is available for a subject, an average brain can be
substituted. Freesurfer comes with such an average brain model, and MNE
comes with some auxiliary files which make coregistration easier.
:py:func:`create_default_subject` copies the relevant files from Freesurfer
into the current subjects_dir, and also adds the auxiliary files provided
by MNE.
The files provided by MNE are listed below and can be found under
``share/mne/mne_analyze/fsaverage`` in the MNE directory (see MNE manual
section 7.19 Working with the average brain):
fsaverage_head.fif:
The approximate head surface triangulation for fsaverage.
fsaverage_inner_skull-bem.fif:
The approximate inner skull surface for fsaverage.
fsaverage-fiducials.fif:
The locations of the fiducial points (LPA, RPA, and nasion).
fsaverage-trans.fif:
Contains a default MEG-MRI coordinate transformation suitable for
fsaverage.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
if fs_home is None:
fs_home = get_config('FREESURFER_HOME', fs_home)
if fs_home is None:
err = ("FREESURFER_HOME environment variable not found. Please "
"specify the fs_home parameter in your call to "
"create_default_subject().")
raise ValueError(err)
if mne_root is None:
mne_root = get_config('MNE_ROOT', mne_root)
if mne_root is None:
err = ("MNE_ROOT environment variable not found. Please "
"specify the mne_root parameter in your call to "
"create_default_subject().")
raise ValueError(err)
# make sure freesurfer files exist
fs_src = os.path.join(fs_home, 'subjects', 'fsaverage')
if not os.path.exists(fs_src):
err = ('fsaverage not found at %r. Is fs_home specified '
'correctly?' % fs_src)
raise IOError(err)
for name in ('label', 'mri', 'surf'):
dirname = os.path.join(fs_src, name)
if not os.path.isdir(dirname):
err = ("Freesurfer fsaverage seems to be incomplete: No directory "
"named %s found in %s" % (name, fs_src))
raise IOError(err)
# make sure destination does not already exist
dest = os.path.join(subjects_dir, 'fsaverage')
if dest == fs_src:
err = ("Your subjects_dir points to the freesurfer subjects_dir (%r). "
"The default subject can not be created in the freesurfer "
"installation directory; please specify a different "
"subjects_dir." % subjects_dir)
raise IOError(err)
elif (not update) and os.path.exists(dest):
err = ("Can not create fsaverage because %r already exists in "
"subjects_dir %r. Delete or rename the existing fsaverage "
"subject folder." % ('fsaverage', subjects_dir))
raise IOError(err)
# make sure mne files exist
mne_fname = os.path.join(mne_root, 'share', 'mne', 'mne_analyze',
'fsaverage', 'fsaverage-%s.fif')
mne_files = ('fiducials', 'head', 'inner_skull-bem', 'trans')
for name in mne_files:
fname = mne_fname % name
if not os.path.isfile(fname):
err = ("MNE fsaverage incomplete: %s file not found at "
"%s" % (name, fname))
raise IOError(err)
# copy fsaverage from freesurfer
logger.info("Copying fsaverage subject from freesurfer directory...")
if (not update) or not os.path.exists(dest):
shutil.copytree(fs_src, dest)
# add files from mne
dest_bem = os.path.join(dest, 'bem')
if not os.path.exists(dest_bem):
os.mkdir(dest_bem)
logger.info("Copying auxiliary fsaverage files from mne directory...")
dest_fname = os.path.join(dest_bem, 'fsaverage-%s.fif')
for name in mne_files:
if not os.path.exists(dest_fname % name):
shutil.copy(mne_fname % name, dest_bem)
def _decimate_points(pts, res=10):
"""Decimate the number of points using a voxel grid
Create a voxel grid with a specified resolution and retain at most one
point per voxel. For each voxel, the point closest to its center is
retained.
Parameters
----------
pts : array, shape = (n_points, 3)
The points making up the head shape.
res : scalar
The resolution of the voxel space (side length of each voxel).
Returns
-------
pts : array, shape = (n_points, 3)
The decimated points.
"""
pts = np.asarray(pts)
# find the bin edges for the voxel space
xmin, ymin, zmin = pts.min(0) - res / 2.
xmax, ymax, zmax = pts.max(0) + res
xax = np.arange(xmin, xmax, res)
yax = np.arange(ymin, ymax, res)
zax = np.arange(zmin, zmax, res)
# find voxels containing one or more point
H, _ = np.histogramdd(pts, bins=(xax, yax, zax), normed=False)
# for each voxel, select one point
X, Y, Z = pts.T
out = np.empty((np.sum(H > 0), 3))
for i, (xbin, ybin, zbin) in enumerate(zip(*np.nonzero(H))):
x = xax[xbin]
y = yax[ybin]
z = zax[zbin]
xi = np.logical_and(X >= x, X < x + res)
yi = np.logical_and(Y >= y, Y < y + res)
zi = np.logical_and(Z >= z, Z < z + res)
idx = np.logical_and(zi, np.logical_and(yi, xi))
ipts = pts[idx]
mid = np.array([x, y, z]) + res / 2.
dist = cdist(ipts, [mid])
i_min = np.argmin(dist)
ipt = ipts[i_min]
out[i] = ipt
return out
def _trans_from_params(param_info, params):
"""Convert transformation parameters into a transformation matrix
Parameters
----------
param_info : tuple, len = 3
Tuple describing the parameters in x (do_translate, do_rotate,
do_scale).
params : tuple
The transformation parameters.
Returns
-------
trans : array, shape = (4, 4)
Transformation matrix.
"""
do_rotate, do_translate, do_scale = param_info
i = 0
trans = []
if do_rotate:
x, y, z = params[:3]
trans.append(rotation(x, y, z))
i += 3
if do_translate:
x, y, z = params[i:i + 3]
trans.insert(0, translation(x, y, z))
i += 3
if do_scale == 1:
s = params[i]
trans.append(scaling(s, s, s))
elif do_scale == 3:
x, y, z = params[i:i + 3]
trans.append(scaling(x, y, z))
trans = reduce(dot, trans)
return trans
def fit_matched_points(src_pts, tgt_pts, rotate=True, translate=True,
scale=False, tol=None, x0=None, out='trans'):
"""Find a transform that minimizes the squared distance between two
matching sets of points.
Uses :func:`scipy.optimize.leastsq` to find a transformation involving
a combination of rotation, translation, and scaling (in that order).
Parameters
----------
src_pts : array, shape = (n, 3)
Points to which the transform should be applied.
tgt_pts : array, shape = (n, 3)
Points to which src_pts should be fitted. Each point in tgt_pts should
correspond to the point in src_pts with the same index.
rotate : bool
Allow rotation of the ``src_pts``.
translate : bool
Allow translation of the ``src_pts``.
scale : bool
Number of scaling parameters. With False, points are not scaled. With
True, points are scaled by the same factor along all axes.
tol : scalar | None
The error tolerance. If the distance between any of the matched points
exceeds this value in the solution, a RuntimeError is raised. With
None, no error check is performed.
x0 : None | tuple
Initial values for the fit parameters.
out : 'params' | 'trans'
In what format to return the estimate: 'params' returns a tuple with
the fit parameters; 'trans' returns a transformation matrix of shape
(4, 4).
Returns
-------
One of the following, depending on the ``out`` parameter:
trans : array, shape = (4, 4)
Transformation that, if applied to src_pts, minimizes the squared
distance to tgt_pts.
params : array, shape = (n_params, )
A single tuple containing the translation, rotation and scaling
parameters in that order.
"""
src_pts = np.atleast_2d(src_pts)
tgt_pts = np.atleast_2d(tgt_pts)
if src_pts.shape != tgt_pts.shape:
err = ("src_pts and tgt_pts must have same shape "
"(got {0}, {1})".format(src_pts.shape, tgt_pts.shape))
raise ValueError(err)
rotate = bool(rotate)
translate = bool(translate)
scale = int(scale)
if translate:
src_pts = np.hstack((src_pts, np.ones((len(src_pts), 1))))
param_info = (rotate, translate, scale)
if param_info == (True, False, 0):
def error(x):
rx, ry, rz = x
trans = rotation3d(rx, ry, rz)
est = dot(src_pts, trans.T)
return (tgt_pts - est).ravel()
if x0 is None:
x0 = (0, 0, 0)
elif param_info == (True, False, 1):
def error(x):
rx, ry, rz, s = x
trans = rotation3d(rx, ry, rz) * s
est = dot(src_pts, trans.T)
return (tgt_pts - est).ravel()
if x0 is None:
x0 = (0, 0, 0, 1)
elif param_info == (True, True, 0):
def error(x):
rx, ry, rz, tx, ty, tz = x
trans = dot(translation(tx, ty, tz), rotation(rx, ry, rz))
est = dot(src_pts, trans.T)
return (tgt_pts - est[:, :3]).ravel()
if x0 is None:
x0 = (0, 0, 0, 0, 0, 0)
elif param_info == (True, True, 1):
def error(x):
rx, ry, rz, tx, ty, tz, s = x
trans = reduce(dot, (translation(tx, ty, tz), rotation(rx, ry, rz),
scaling(s, s, s)))
est = dot(src_pts, trans.T)
return (tgt_pts - est[:, :3]).ravel()
if x0 is None:
x0 = (0, 0, 0, 0, 0, 0, 1)
else:
err = ("The specified parameter combination is not implemented: "
"rotate=%r, translate=%r, scale=%r" % param_info)
raise NotImplementedError(err)
x, _, _, _, _ = leastsq(error, x0, full_output=True)
# re-create the final transformation matrix
if (tol is not None) or (out == 'trans'):
trans = _trans_from_params(param_info, x)
# assess the error of the solution
if tol is not None:
if not translate:
src_pts = np.hstack((src_pts, np.ones((len(src_pts), 1))))
est_pts = dot(src_pts, trans.T)[:, :3]
err = np.sqrt(np.sum((est_pts - tgt_pts) ** 2, axis=1))
if np.any(err > tol):
raise RuntimeError("Error exceeds tolerance. Error = %r" % err)
if out == 'params':
return x
elif out == 'trans':
return trans
else:
err = ("Invalid out parameter: %r. Needs to be 'params' or "
"'trans'." % out)
raise ValueError(err)
def get_ras_to_neuromag_trans(nasion, lpa, rpa):
"""Construct a transformation matrix to the MNE head coordinate system
Construct a transformation matrix from an arbitrary RAS coordinate system
to the MNE head coordinate system, in which the x axis passes through the
two preauricular points, and the y axis passes through the nasion and is
normal to the x axis. (see mne manual, pg. 97)
Parameters
----------
nasion : array_like, shape = (3,)
Nasion point coordinate.
lpa : array_like, shape = (3,)
Left peri-auricular point coordinate.
rpa : array_like, shape = (3,)
Right peri-auricular point coordinate.
Returns
-------
trans : numpy.array, shape = (4, 4)
Transformation matrix to MNE head space.
"""
# check input args
nasion = np.asarray(nasion)
lpa = np.asarray(lpa)
rpa = np.asarray(rpa)
for pt in (nasion, lpa, rpa):
if pt.ndim != 1 or len(pt) != 3:
err = ("Points have to be provided as one dimensional arrays of "
"length 3.")
raise ValueError(err)
right = rpa - lpa
right_unit = right / norm(right)
origin = lpa + np.dot(nasion - lpa, right_unit) * right_unit
anterior = nasion - origin
anterior_unit = anterior / norm(anterior)
superior_unit = np.cross(right_unit, anterior_unit)
x, y, z = -origin
origin_trans = translation(x, y, z)
trans_l = np.vstack((right_unit, anterior_unit, superior_unit, [0, 0, 0]))
trans_r = np.reshape([0, 0, 0, 1], (4, 1))
rot_trans = np.hstack((trans_l, trans_r))
trans = np.dot(rot_trans, origin_trans)
return trans
def _point_cloud_error(src_pts, tgt_pts):
"""Find the distance from each source point to its closest target point
Parameters
----------
src_pts : array, shape = (n, 3)
Source points.
tgt_pts : array, shape = (m, 3)
Target points.
Returns
-------
dist : array, shape = (n, )
For each point in ``src_pts``, the distance to the closest point in
``tgt_pts``.
"""
Y = cdist(src_pts, tgt_pts, 'euclidean')
dist = Y.min(axis=1)
return dist
def _point_cloud_error_balltree(src_pts, tgt_tree):
"""Find the distance from each source point to its closest target point
Uses sklearn.neighbors.BallTree for greater efficiency
Parameters
----------
src_pts : array, shape = (n, 3)
Source points.
tgt_tree : sklearn.neighbors.BallTree
BallTree of the target points.
Returns
-------
dist : array, shape = (n, )
For each point in ``src_pts``, the distance to the closest point in
``tgt_pts``.
"""
dist, _ = tgt_tree.query(src_pts)
return dist.ravel()
def fit_point_cloud(src_pts, tgt_pts, rotate=True, translate=True,
scale=0, x0=None, leastsq_args={}, out='params'):
"""Find a transform that minimizes the squared distance from each source
point to its closest target point
Uses :func:`scipy.optimize.leastsq` to find a transformation involving
a combination of rotation, translation, and scaling (in that order).
Parameters
----------
src_pts : array, shape = (n, 3)
Points to which the transform should be applied.
tgt_pts : array, shape = (m, 3)
Points to which src_pts should be fitted. Each point in tgt_pts should
correspond to the point in src_pts with the same index.
rotate : bool
Allow rotation of the ``src_pts``.
translate : bool
Allow translation of the ``src_pts``.
scale : 0 | 1 | 3
Number of scaling parameters. With 0, points are not scaled. With 1,
points are scaled by the same factor along all axes. With 3, points are
scaled by a separate factor along each axis.
x0 : None | tuple
Initial values for the fit parameters.
leastsq_args : dict
Additional parameters to submit to :func:`scipy.optimize.leastsq`.
out : 'params' | 'trans'
In what format to return the estimate: 'params' returns a tuple with
the fit parameters; 'trans' returns a transformation matrix of shape
(4, 4).
Returns
-------
x : array, shape = (n_params, )
Estimated parameters for the transformation.
Notes
-----
Assumes that the target points form a dense enough point cloud so that
the distance of each src_pt to the closest tgt_pt can be used as an
estimate of the distance of src_pt to tgt_pts.
"""
kwargs = {'epsfcn': 0.01}
kwargs.update(leastsq_args)
# assert correct argument types
src_pts = np.atleast_2d(src_pts)
tgt_pts = np.atleast_2d(tgt_pts)
translate = bool(translate)
rotate = bool(rotate)
scale = int(scale)
if translate:
src_pts = np.hstack((src_pts, np.ones((len(src_pts), 1))))
try:
from sklearn.neighbors import BallTree
tgt_pts = BallTree(tgt_pts)
errfunc = _point_cloud_error_balltree
except ImportError:
warn("Sklearn could not be imported. Fitting points will be slower. "
"To improve performance, install the sklearn module.")
errfunc = _point_cloud_error
# for efficiency, define parameter specific error function
param_info = (rotate, translate, scale)
if param_info == (True, False, 0):
x0 = x0 or (0, 0, 0)
def error(x):
rx, ry, rz = x
trans = rotation3d(rx, ry, rz)
est = dot(src_pts, trans.T)
err = errfunc(est, tgt_pts)
return err
elif param_info == (True, False, 1):
x0 = x0 or (0, 0, 0, 1)
def error(x):
rx, ry, rz, s = x
trans = rotation3d(rx, ry, rz) * s
est = dot(src_pts, trans.T)
err = errfunc(est, tgt_pts)
return err
elif param_info == (True, False, 3):
x0 = x0 or (0, 0, 0, 1, 1, 1)
def error(x):
rx, ry, rz, sx, sy, sz = x
trans = rotation3d(rx, ry, rz) * [sx, sy, sz]
est = dot(src_pts, trans.T)
err = errfunc(est, tgt_pts)
return err
elif param_info == (True, True, 0):
x0 = x0 or (0, 0, 0, 0, 0, 0)
def error(x):
rx, ry, rz, tx, ty, tz = x
trans = dot(translation(tx, ty, tz), rotation(rx, ry, rz))
est = dot(src_pts, trans.T)
err = errfunc(est[:, :3], tgt_pts)
return err
else:
err = ("The specified parameter combination is not implemented: "
"rotate=%r, translate=%r, scale=%r" % param_info)
raise NotImplementedError(err)
est, _, info, msg, _ = leastsq(error, x0, full_output=True, **kwargs)
logger.debug("fit_point_cloud leastsq (%i calls) info: %s", info['nfev'],
msg)
if out == 'params':
return est
elif out == 'trans':
return _trans_from_params(param_info, est)
else:
err = ("Invalid out parameter: %r. Needs to be 'params' or "
"'trans'." % out)
raise ValueError(err)
def _find_label_paths(subject='fsaverage', pattern=None, subjects_dir=None):
"""Find paths to label files in a subject's label directory
Parameters
----------
subject : str
Name of the mri subject.
pattern : str | None
Pattern for finding the labels relative to the label directory in the
MRI subject directory (e.g., "aparc/*.label" will find all labels
in the "subject/label/aparc" directory). With None, find all labels.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable
(sys.environ['SUBJECTS_DIR'])
Returns
------
paths : list
List of paths relative to the subject's label directory
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
subject_dir = os.path.join(subjects_dir, subject)
lbl_dir = os.path.join(subject_dir, 'label')
if pattern is None:
paths = []
for dirpath, _, filenames in os.walk(lbl_dir):
rel_dir = os.path.relpath(dirpath, lbl_dir)
for filename in fnmatch.filter(filenames, '*.label'):
path = os.path.join(rel_dir, filename)
paths.append(path)
else:
paths = [os.path.relpath(path, lbl_dir) for path in iglob(pattern)]
return paths
def _find_mri_paths(subject='fsaverage', subjects_dir=None):
"""Find all files of an mri relevant for source transformation
Parameters
----------
subject : str
Name of the mri subject.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable
(sys.environ['SUBJECTS_DIR'])
Returns
-------
paths | dict
Dictionary whose keys are relevant file type names (str), and whose
values are lists of paths.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
paths = {}
# directories to create
paths['dirs'] = [bem_dirname, surf_dirname]
# surf/ files
paths['surf'] = surf = []
surf_fname = os.path.join(surf_dirname, '{name}')
surf_names = ('orig', 'orig_avg',
'inflated', 'inflated_avg', 'inflated_pre',
'pial', 'pial_avg',
'smoothwm',
'white', 'white_avg',
'sphere', 'sphere.reg', 'sphere.reg.avg')
for name in surf_names:
for hemi in ('lh.', 'rh.'):
fname = pformat(surf_fname, name=hemi + name)
surf.append(fname)
# BEM files
paths['bem'] = bem = []
path = head_bem_fname.format(subjects_dir=subjects_dir, subject=subject)
if os.path.exists(path):
bem.append('head')
bem_pattern = pformat(bem_fname, subjects_dir=subjects_dir,
subject=subject, name='*-bem')
re_pattern = pformat(bem_fname, subjects_dir=subjects_dir, subject=subject,
name='(.+)')
for path in iglob(bem_pattern):
match = re.match(re_pattern, path)
name = match.group(1)
bem.append(name)
# fiducials
paths['fid'] = [fid_fname]
# duplicate curvature files
paths['duplicate'] = dup = []
path = os.path.join(surf_dirname, '{name}')
for name in ['lh.curv', 'rh.curv']:
fname = pformat(path, name=name)
dup.append(fname)
# check presence of required files
for ftype in ['surf', 'fid', 'duplicate']:
for fname in paths[ftype]:
path = fname.format(subjects_dir=subjects_dir, subject=subject)
path = os.path.realpath(path)
if not os.path.exists(path):
raise IOError("Required file not found: %r" % path)
# find source space files
paths['src'] = src = []
bem_dir = bem_dirname.format(subjects_dir=subjects_dir, subject=subject)
fnames = fnmatch.filter(os.listdir(bem_dir), '*-src.fif')
prefix = subject + '-'
for fname in fnames:
if fname.startswith(prefix):
fname = "{subject}-%s" % fname[len(prefix):]
path = os.path.join(bem_dirname, fname)
src.append(path)
return paths
def _is_mri_subject(subject, subjects_dir=None):
"""Check whether a directory in subjects_dir is an mri subject directory
Parameters
----------
subject : str
Name of the potential subject/directory.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
Returns
-------
is_mri_subject : bool
Whether ``subject`` is an mri subject.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
fname = head_bem_fname.format(subjects_dir=subjects_dir, subject=subject)
if not os.path.exists(fname):
return False
return True
def _mri_subject_has_bem(subject, subjects_dir=None):
"""Check whether an mri subject has a file matching the bem pattern
Parameters
----------
subject : str
Name of the subject.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
Returns
-------
has_bem_file : bool
Whether ``subject`` has a bem file.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
pattern = bem_fname.format(subjects_dir=subjects_dir, subject=subject,
name='*-bem')
fnames = glob(pattern)
return bool(len(fnames))
def read_elp(fname):
"""Read point coordinates from a text file
Parameters
----------
fname : str
Absolute path to laser point file (*.txt).
Returns
-------
elp_points : array, [n_points x 3]
Point coordinates.
"""
pattern = re.compile(r'(\-?\d+\.\d+)\s+(\-?\d+\.\d+)\s+(\-?\d+\.\d+)')
with open(fname) as fid:
elp_points = pattern.findall(fid.read())
elp_points = np.array(elp_points, dtype=float)
if elp_points.shape[1] != 3:
err = ("File %r does not contain 3 columns as required; got shape "
"%s." % (fname, elp_points.shape))
raise ValueError(err)
return elp_points
def read_mri_cfg(subject, subjects_dir=None):
"""Read information from the cfg file of a scaled MRI brain
Parameters
----------
subject : str
Name of the scaled MRI subject.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
Returns
-------
cfg : dict
Dictionary with entries from the MRI's cfg file.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
fname = os.path.join(subjects_dir, subject, 'MRI scaling parameters.cfg')
if not os.path.exists(fname):
err = ("%r does not seem to be a scaled mri subject: %r does not "
"exist." % (subject, fname))
raise IOError(err)
logger.info("Reading MRI cfg file %s" % fname)
config = configparser.RawConfigParser()
config.read(fname)
n_params = config.getint("MRI Scaling", 'n_params')
if n_params == 1:
scale = config.getfloat("MRI Scaling", 'scale')
elif n_params == 3:
scale_str = config.get("MRI Scaling", 'scale')
scale = np.array([float(s) for s in scale_str.split()])
else:
raise ValueError("Invalid n_params value in MRI cfg: %i" % n_params)
out = {'subject_from': config.get("MRI Scaling", 'subject_from'),
'n_params': n_params, 'scale': scale}
return out
def _write_mri_config(fname, subject_from, subject_to, scale):
"""Write the cfg file describing a scaled MRI subject
Parameters
----------
fname : str
Target file.
subject_from : str
Name of the source MRI subject.
subject_to : str
Name of the scaled MRI subject.
scale : float | array_like, shape = (3,)
The scaling parameter.
"""
scale = np.asarray(scale)
if np.isscalar(scale) or scale.shape == ():
n_params = 1
else:
n_params = 3
config = configparser.RawConfigParser()
config.add_section("MRI Scaling")
config.set("MRI Scaling", 'subject_from', subject_from)
config.set("MRI Scaling", 'subject_to', subject_to)
config.set("MRI Scaling", 'n_params', str(n_params))
if n_params == 1:
config.set("MRI Scaling", 'scale', str(scale))
else:
config.set("MRI Scaling", 'scale', ' '.join([str(s) for s in scale]))
config.set("MRI Scaling", 'version', '1')
with open(fname, 'w') as fid:
config.write(fid)
def _scale_params(subject_to, subject_from, scale, subjects_dir):
subjects_dir = get_subjects_dir(subjects_dir, True)
if (subject_from is None) != (scale is None):
err = ("Need to provide either both subject_from and scale "
"parameters, or neither.")
raise TypeError(err)
if subject_from is None:
cfg = read_mri_cfg(subject_to, subjects_dir)
subject_from = cfg['subject_from']
n_params = cfg['n_params']
scale = cfg['scale']
else:
scale = np.asarray(scale)
if scale.ndim == 0:
n_params = 1
elif scale.shape == (3,):
n_params = 3
else:
err = ("Invalid shape for scale parameer. Need scalar or array of "
"length 3. Got %s." % str(scale))
raise ValueError(err)
return subjects_dir, subject_from, n_params, scale
def scale_bem(subject_to, bem_name, subject_from=None, scale=None,
subjects_dir=None):
"""Scale a bem file
Parameters
----------
subject_to : str
Name of the scaled MRI subject (the destination mri subject).
bem_name : str
Name of the bem file. For example, to scale
``fsaverage-inner_skull-bem.fif``, the bem_name would be
"inner_skull-bem".
subject_from : None | str
The subject from which to read the source space. If None, subject_from
is read from subject_to's config file.
scale : None | float | array, shape = (3,)
Scaling factor. Has to be specified if subjects_from is specified,
otherwise it is read from subject_to's config file.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
"""
subjects_dir, subject_from, _, scale = _scale_params(subject_to,
subject_from, scale,
subjects_dir)
src = bem_fname.format(subjects_dir=subjects_dir, subject=subject_from,
name=bem_name)
dst = bem_fname.format(subjects_dir=subjects_dir, subject=subject_to,
name=bem_name)
if os.path.exists(dst):
raise IOError("File alredy exists: %s" % dst)
surfs = read_bem_surfaces(src)
if len(surfs) != 1:
err = ("BEM file with more than one surface: %r" % src)
raise NotImplementedError(err)
surf0 = surfs[0]
surf0['rr'] = surf0['rr'] * scale
write_bem_surface(dst, surf0)
def scale_labels(subject_to, pattern=None, overwrite=False, subject_from=None,
scale=None, subjects_dir=None):
"""Scale labels to match a brain that was previously created by scaling
Parameters
----------
subject_to : str
Name of the scaled MRI subject (the destination brain).
pattern : str | None
Pattern for finding the labels relative to the label directory in the
MRI subject directory (e.g., "lh.BA3a.label" will scale
"fsaverage/label/lh.BA3a.label"; "aparc/*.label" will find all labels
in the "fsaverage/label/aparc" directory). With None, scale all labels.
overwrite : bool
Overwrite any label file that already exists for subject_to (otherwise
existsing labels are skipped).
subject_from : None | str
Name of the original MRI subject (the brain that was scaled to create
subject_to). If None, the value is read from subject_to's cfg file.
scale : None | float | array_like, shape = (3,)
Scaling parameter. If None, the value is read from subject_to's cfg
file.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
"""
# read parameters from cfg
if scale is None or subject_from is None:
cfg = read_mri_cfg(subject_to, subjects_dir)
if subject_from is None:
subject_from = cfg['subject_from']
if scale is None:
scale = cfg['scale']
# find labels
paths = _find_label_paths(subject_from, pattern, subjects_dir)
if not paths:
return
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
src_root = os.path.join(subjects_dir, subject_from, 'label')
dst_root = os.path.join(subjects_dir, subject_to, 'label')
# scale labels
for fname in paths:
dst = os.path.join(dst_root, fname)
if not overwrite and os.path.exists(dst):
continue
dirname = os.path.dirname(dst)
if not os.path.exists(dirname):
os.makedirs(dirname)
src = os.path.join(src_root, fname)
l_old = read_label(src)
pos = l_old.pos * scale
l_new = Label(l_old.vertices, pos, l_old.values, l_old.hemi,
l_old.comment, subject=subject_to)
l_new.save(dst)
def scale_mri(subject_from, subject_to, scale, overwrite=False,
subjects_dir=None):
"""Create a scaled copy of an MRI subject
Parameters
----------
subject_from : str
Name of the subject providing the MRI.
subject_to : str
New subject name for which to save the scaled MRI.
scale : float | array_like, shape = (3,)
The scaling factor (one or 3 parameters).
overwrite : bool
If an MRI already exists for subject_to, overwrite it.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
See Also
--------
scale_labels : add labels to a scaled MRI
scale_source_space : add a source space to a scaled MRI
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
paths = _find_mri_paths(subject_from, subjects_dir=subjects_dir)
scale = np.asarray(scale)
# make sure we have an empty target directory
dest = subject_dirname.format(subject=subject_to,
subjects_dir=subjects_dir)
if os.path.exists(dest):
if overwrite:
shutil.rmtree(dest)
else:
err = ("Subject directory for %s already exists: "
"%r" % (subject_to, dest))
raise IOError(err)
for dirname in paths['dirs']:
dir_ = dirname.format(subject=subject_to, subjects_dir=subjects_dir)
os.makedirs(dir_)
# save MRI scaling parameters
fname = os.path.join(dest, 'MRI scaling parameters.cfg')
_write_mri_config(fname, subject_from, subject_to, scale)
# surf files [in mm]
for fname in paths['surf']:
src = fname.format(subject=subject_from, subjects_dir=subjects_dir)
src = os.path.realpath(src)
dest = fname.format(subject=subject_to, subjects_dir=subjects_dir)
pts, tri = read_surface(src)
write_surface(dest, pts * scale, tri)
# BEM files [in m]
for bem_name in paths['bem']:
scale_bem(subject_to, bem_name, subject_from, scale, subjects_dir)
# fiducials [in m]
for fname in paths['fid']:
src = fname.format(subject=subject_from, subjects_dir=subjects_dir)
src = os.path.realpath(src)
pts, cframe = read_fiducials(src)
for pt in pts:
pt['r'] = pt['r'] * scale
dest = fname.format(subject=subject_to, subjects_dir=subjects_dir)
write_fiducials(dest, pts, cframe)
# duplicate files
for fname in paths['duplicate']:
src = fname.format(subject=subject_from, subjects_dir=subjects_dir)
dest = fname.format(subject=subject_to, subjects_dir=subjects_dir)
shutil.copyfile(src, dest)
# source spaces
for fname in paths['src']:
src_name = os.path.basename(fname)
scale_source_space(subject_to, src_name, subject_from, scale,
subjects_dir)
# labels [in m]
scale_labels(subject_to, subject_from=subject_from, scale=scale,
subjects_dir=subjects_dir)
def scale_source_space(subject_to, src_name, subject_from=None, scale=None,
subjects_dir=None, n_jobs=1):
"""Scale a source space for an mri created with scale_mri()
Parameters
----------
subject_to : str
Name of the scaled MRI subject (the destination mri subject).
src_name : str
Source space name. Can be a spacing parameter (e.g., ``'7'``,
``'ico4'``, ``'oct6'``) or a file name of a source space file relative
to the bem directory; if the file name contains the subject name, it
should be indicated as "{subject}" in ``src_name`` (e.g.,
``"{subject}-my_source_space-src.fif"``).
subject_from : None | str
The subject from which to read the source space. If None, subject_from
is read from subject_to's config file.
scale : None | float | array, shape = (3,)
Scaling factor. Has to be specified if subjects_from is specified,
otherwise it is read from subject_to's config file.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
n_jobs : int
Number of jobs to eggie in parallel if recomputing distances (only
applies if scale is an array of length 3, and will not use more cores
than there are source spaces).
"""
subjects_dir, subject_from, n_params, scale = _scale_params(subject_to,
subject_from,
scale,
subjects_dir)
# find the source space file names
if src_name.isdigit():
spacing = src_name # spacing in mm
src_pattern = src_fname
else:
match = re.match("(oct|ico)-?(\d+)$", src_name)
if match:
spacing = '-'.join(match.groups())
src_pattern = src_fname
else:
spacing = None
src_pattern = os.path.join(bem_dirname, src_name)
src = src_pattern.format(subjects_dir=subjects_dir, subject=subject_from,
spacing=spacing)
dst = src_pattern.format(subjects_dir=subjects_dir, subject=subject_to,
spacing=spacing)
# prepare scaling parameters
if n_params == 1:
norm_scale = None
elif n_params == 3:
norm_scale = 1. / scale
else:
err = ("Invalid n_params entry in MRI cfg file: %s" % str(n_params))
raise RuntimeError(err)
# read and scale the source space [in m]
sss = read_source_spaces(src)
logger.info("scaling source space %s: %s -> %s", spacing, subject_from,
subject_to)
logger.info("Scale factor: %s", scale)
add_dist = False
for ss in sss:
ss['subject_his_id'] = subject_to
ss['rr'] *= scale
# distances and patch info
if norm_scale is None:
if ss['dist'] is not None:
ss['dist'] *= scale
ss['nearest_dist'] *= scale
ss['dist_limit'] *= scale
else:
nn = ss['nn']
nn *= norm_scale
norm = np.sqrt(np.sum(nn ** 2, 1))
nn /= norm[:, np.newaxis]
if ss['dist'] is not None:
add_dist = True
if add_dist:
logger.info("Recomputing distances, this might take a while")
dist_limit = np.asscalar(sss[0]['dist_limit'])
add_source_space_distances(sss, dist_limit, n_jobs)
write_source_spaces(dst, sss)
| bsd-2-clause | -7,231,553,313,191,468,000 | 34.259516 | 79 | 0.5934 | false |
kustomzone/Fuzium | core/src/Ui/UiRequest.py | 1 | 25682 | import time
import re
import os
import mimetypes
import json
import cgi
from Config import config
from Site import SiteManager
from User import UserManager
from Plugin import PluginManager
from Ui.UiWebsocket import UiWebsocket
from Crypt import CryptHash
status_texts = {
200: "200 OK",
206: "206 Partial Content",
400: "400 Bad Request",
403: "403 Forbidden",
404: "404 Not Found",
500: "500 Internal Server Error",
}
@PluginManager.acceptPlugins
class UiRequest(object):
def __init__(self, server, get, env, start_response):
if server:
self.server = server
self.log = server.log
self.get = get # Get parameters
self.env = env # Enviroment settings
# ['CONTENT_LENGTH', 'CONTENT_TYPE', 'GATEWAY_INTERFACE', 'HTTP_ACCEPT', 'HTTP_ACCEPT_ENCODING', 'HTTP_ACCEPT_LANGUAGE',
# 'HTTP_COOKIE', 'HTTP_CACHE_CONTROL', 'HTTP_HOST', 'HTTP_HTTPS', 'HTTP_ORIGIN', 'HTTP_PROXY_CONNECTION', 'HTTP_REFERER',
# 'HTTP_USER_AGENT', 'PATH_INFO', 'QUERY_STRING', 'REMOTE_ADDR', 'REMOTE_PORT', 'REQUEST_METHOD', 'SCRIPT_NAME',
# 'SERVER_NAME', 'SERVER_PORT', 'SERVER_PROTOCOL', 'SERVER_SOFTWARE', 'werkzeug.request', 'wsgi.errors',
# 'wsgi.input', 'wsgi.multiprocess', 'wsgi.multithread', 'wsgi.run_once', 'wsgi.url_scheme', 'wsgi.version']
self.start_response = start_response # Start response function
self.user = None
# Call the request handler function base on path
def route(self, path):
if config.ui_restrict and self.env['REMOTE_ADDR'] not in config.ui_restrict: # Restict Ui access by ip
return self.error403(details=False)
path = re.sub("^http://zero[/]+", "/", path) # Remove begining http://zero/ for chrome extension
path = re.sub("^http://", "/", path) # Remove begining http for chrome extension .bit access
if self.env["REQUEST_METHOD"] == "OPTIONS":
if "/" not in path.strip("/"):
content_type = self.getContentType("index.html")
else:
content_type = self.getContentType(path)
self.sendHeader(content_type=content_type)
return ""
if path == "/":
return self.actionIndex()
elif path == "/favicon.ico":
return self.actionFile("src/Ui/media/img/favicon.ico")
# Media
elif path.startswith("/uimedia/"):
return self.actionUiMedia(path)
elif "/uimedia/" in path:
# uimedia within site dir (for chrome extension)
path = re.sub(".*?/uimedia/", "/uimedia/", path)
return self.actionUiMedia(path)
# Websocket
elif path == "/Websocket":
return self.actionWebsocket()
# Debug
elif path == "/Debug" and config.debug:
return self.actionDebug()
elif path == "/Console" and config.debug:
return self.actionConsole()
# Site media wrapper
else:
if self.get.get("wrapper_nonce"):
return self.actionSiteMedia("/media" + path) # Only serve html files with frame
else:
body = self.actionWrapper(path)
if body:
return body
else:
func = getattr(self, "action" + path.lstrip("/"), None) # Check if we have action+request_path function
if func:
return func()
else:
return self.error404(path)
# The request is proxied by chrome extension
def isProxyRequest(self):
return self.env["PATH_INFO"].startswith("http://")
def isWebSocketRequest(self):
return self.env.get("HTTP_UPGRADE") == "websocket"
def isAjaxRequest(self):
return self.env.get("HTTP_X_REQUESTED_WITH") == "XMLHttpRequest"
# Get mime by filename
def getContentType(self, file_name):
content_type = mimetypes.guess_type(file_name)[0]
if file_name.endswith(".css"): # Force correct css content type
content_type = "text/css"
if not content_type:
if file_name.endswith(".json"): # Correct json header
content_type = "application/json"
else:
content_type = "application/octet-stream"
return content_type
# Return: <dict> Posted variables
def getPosted(self):
if self.env['REQUEST_METHOD'] == "POST":
return dict(cgi.parse_qsl(
self.env['wsgi.input'].readline().decode()
))
else:
return {}
# Return: <dict> Cookies based on self.env
def getCookies(self):
raw_cookies = self.env.get('HTTP_COOKIE')
if raw_cookies:
cookies = cgi.parse_qsl(raw_cookies)
return {key.strip(): val for key, val in cookies}
else:
return {}
def getCurrentUser(self):
if self.user:
return self.user # Cache
self.user = UserManager.user_manager.get() # Get user
if not self.user:
self.user = UserManager.user_manager.create()
return self.user
# Send response headers
def sendHeader(self, status=200, content_type="text/html", extra_headers=[]):
headers = []
headers.append(("Version", "HTTP/1.1"))
headers.append(("Connection", "Keep-Alive"))
headers.append(("Keep-Alive", "max=25, timeout=30"))
if content_type != "text/html":
headers.append(("Access-Control-Allow-Origin", "*")) # Allow json access on non-html files
headers.append(("X-Frame-Options", "SAMEORIGIN"))
# headers.append(("Content-Security-Policy", "default-src 'self' data: 'unsafe-inline' ws://127.0.0.1:* http://127.0.0.1:* wss://tracker.webtorrent.io; sandbox allow-same-origin allow-top-navigation allow-scripts")) # Only local connections
if self.env["REQUEST_METHOD"] == "OPTIONS":
# Allow json access
headers.append(("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, Cookie"))
headers.append(("Access-Control-Allow-Credentials", "true"))
if content_type == "text/html":
content_type = "text/html; charset=utf-8"
cacheable_type = (
content_type == "text/css" or content_type.startswith("image") or content_type.startswith("video") or
self.env["REQUEST_METHOD"] == "OPTIONS" or content_type == "application/javascript"
)
if status in (200, 206) and cacheable_type: # Cache Css, Js, Image files for 10min
headers.append(("Cache-Control", "public, max-age=600")) # Cache 10 min
else:
headers.append(("Cache-Control", "no-cache, no-store, private, must-revalidate, max-age=0")) # No caching at all
headers.append(("Content-Type", content_type))
for extra_header in extra_headers:
headers.append(extra_header)
return self.start_response(status_texts[status], headers)
# Renders a template
def render(self, template_path, *args, **kwargs):
template = open(template_path).read().decode("utf8")
return template.format(**kwargs).encode("utf8")
# - Actions -
# Redirect to an url
def actionRedirect(self, url):
self.start_response('301 Redirect', [('Location', url)])
yield "Location changed: %s" % url
def actionIndex(self):
return self.actionRedirect("/" + config.homepage)
# Render a file from media with iframe site wrapper
def actionWrapper(self, path, extra_headers=None):
if not extra_headers:
extra_headers = []
match = re.match("/(?P<address>[A-Za-z0-9\._-]+)(?P<inner_path>/.*|$)", path)
if match:
address = match.group("address")
inner_path = match.group("inner_path").lstrip("/")
if "." in inner_path and not inner_path.endswith(".html"):
return self.actionSiteMedia("/media" + path) # Only serve html files with frame
if self.isAjaxRequest():
return self.error403("Ajax request not allowed to load wrapper") # No ajax allowed on wrapper
if self.isWebSocketRequest():
return self.error403("WebSocket request not allowed to load wrapper") # No websocket
if "text/html" not in self.env.get("HTTP_ACCEPT", ""):
return self.error403("Invalid Accept header to load wrapper")
if "prefetch" in self.env.get("HTTP_X_MOZ", "") or "prefetch" in self.env.get("HTTP_PURPOSE", ""):
return self.error403("Prefetch not allowed to load wrapper")
site = SiteManager.site_manager.get(address)
if (
site and site.content_manager.contents.get("content.json") and
(not site.getReachableBadFiles() or site.settings["own"])
): # Its downloaded or own
title = site.content_manager.contents["content.json"]["title"]
else:
title = "Loading %s..." % address
site = SiteManager.site_manager.need(address) # Start download site
if not site:
return False
self.sendHeader(extra_headers=extra_headers[:])
return iter([self.renderWrapper(site, path, inner_path, title, extra_headers)])
# Dont know why wrapping with iter necessary, but without it around 100x slower
else: # Bad url
return False
def renderWrapper(self, site, path, inner_path, title, extra_headers):
file_inner_path = inner_path
if not file_inner_path:
file_inner_path = "index.html" # If inner path defaults to index.html
if file_inner_path.endswith("/"):
file_inner_path = file_inner_path + "index.html"
address = re.sub("/.*", "", path.lstrip("/"))
if self.isProxyRequest() and (not path or "/" in path[1:]):
file_url = re.sub(".*/", "", inner_path)
if self.env["HTTP_HOST"] == "zero":
root_url = "/" + address + "/"
else:
root_url = "/"
else:
file_url = "/" + address + "/" + inner_path
root_url = "/" + address + "/"
# Wrapper variable inits
query_string = ""
body_style = ""
meta_tags = ""
postmessage_nonce_security = "false"
wrapper_nonce = self.getWrapperNonce()
if self.env.get("QUERY_STRING"):
query_string = "?%s&wrapper_nonce=%s" % (self.env["QUERY_STRING"], wrapper_nonce)
else:
query_string = "?wrapper_nonce=%s" % wrapper_nonce
if self.isProxyRequest(): # Its a remote proxy request
if self.env["REMOTE_ADDR"] == "127.0.0.1": # Local client, the server address also should be 127.0.0.1
server_url = "http://127.0.0.1:%s" % self.env["SERVER_PORT"]
else: # Remote client, use SERVER_NAME as server's real address
server_url = "http://%s:%s" % (self.env["SERVER_NAME"], self.env["SERVER_PORT"])
homepage = "http://zero/" + config.homepage
else: # Use relative path
server_url = ""
homepage = "/" + config.homepage
if site.content_manager.contents.get("content.json"): # Got content.json
content = site.content_manager.contents["content.json"]
if content.get("background-color"):
body_style += "background-color: %s;" % \
cgi.escape(site.content_manager.contents["content.json"]["background-color"], True)
if content.get("viewport"):
meta_tags += '<meta name="viewport" id="viewport" content="%s">' % cgi.escape(content["viewport"], True)
if content.get("favicon"):
meta_tags += '<link rel="icon" href="%s%s">' % (root_url, cgi.escape(content["favicon"], True))
if content.get("postmessage_nonce_security"):
postmessage_nonce_security = "true"
if site.settings.get("own"):
sandbox_permissions = "allow-modals" # For coffeescript compile errors
else:
sandbox_permissions = ""
return self.render(
"src/Ui/template/wrapper.html",
server_url=server_url,
inner_path=inner_path,
file_url=re.escape(file_url),
file_inner_path=re.escape(file_inner_path),
address=site.address,
title=cgi.escape(title, True),
body_style=body_style,
meta_tags=meta_tags,
query_string=re.escape(query_string),
wrapper_key=site.settings["wrapper_key"],
wrapper_nonce=wrapper_nonce,
postmessage_nonce_security=postmessage_nonce_security,
permissions=json.dumps(site.settings["permissions"]),
show_loadingscreen=json.dumps(not site.storage.isFile(file_inner_path)),
sandbox_permissions=sandbox_permissions,
rev=config.rev,
lang=config.language,
homepage=homepage
)
# Create a new wrapper nonce that allows to get one html file without the wrapper
def getWrapperNonce(self):
wrapper_nonce = CryptHash.random()
self.server.wrapper_nonces.append(wrapper_nonce)
return wrapper_nonce
# Returns if media request allowed from that referer
def isMediaRequestAllowed(self, site_address, referer):
if not re.sub("^http[s]{0,1}://", "", referer).startswith(self.env["HTTP_HOST"]):
return False
referer_path = re.sub("http[s]{0,1}://.*?/", "/", referer).replace("/media", "") # Remove site address
return referer_path.startswith("/" + site_address)
# Return {address: 1Site.., inner_path: /data/users.json} from url path
def parsePath(self, path):
path = path.replace("/index.html/", "/") # Base Backward compatibility fix
if path.endswith("/"):
path = path + "index.html"
match = re.match("/media/(?P<address>[A-Za-z0-9\._-]+)/(?P<inner_path>.*)", path)
if match:
path_parts = match.groupdict()
path_parts["request_address"] = path_parts["address"] # Original request address (for Merger sites)
return path_parts
else:
return None
# Serve a media for site
def actionSiteMedia(self, path, header_length=True):
path_parts = self.parsePath(path)
# Check wrapper nonce
content_type = self.getContentType(path_parts["inner_path"])
if "htm" in content_type: # Valid nonce must present to render html files
wrapper_nonce = self.get.get("wrapper_nonce")
if wrapper_nonce not in self.server.wrapper_nonces:
return self.error403("Wrapper nonce error. Please reload the page.")
self.server.wrapper_nonces.remove(self.get["wrapper_nonce"])
referer = self.env.get("HTTP_REFERER")
if referer and path_parts: # Only allow same site to receive media
if not self.isMediaRequestAllowed(path_parts["request_address"], referer):
self.log.error("Media referrer error: %s not allowed from %s" % (path_parts["address"], referer))
return self.error403("Media referrer error") # Referrer not starts same address as requested path
if path_parts: # Looks like a valid path
address = path_parts["address"]
file_path = "%s/%s/%s" % (config.data_dir, address, path_parts["inner_path"])
if ".." in path_parts["inner_path"]: # File not in allowed path
return self.error403("Invalid file path")
else:
if config.debug and file_path.split("/")[-1].startswith("all."):
# If debugging merge *.css to all.css and *.js to all.js
site = self.server.sites.get(address)
if site.settings["own"]:
from Debug import DebugMedia
DebugMedia.merge(file_path)
if os.path.isfile(file_path): # File exists
return self.actionFile(file_path, header_length=header_length)
elif os.path.isdir(file_path): # If this is actually a folder, add "/" and redirect
return self.actionRedirect("./{0}/".format(path_parts["inner_path"].split("/")[-1]))
else: # File not exists, try to download
if address not in SiteManager.site_manager.sites: # Only in case if site already started downloading
return self.error404(path_parts["inner_path"])
site = SiteManager.site_manager.need(address)
if path_parts["inner_path"].endswith("favicon.ico"): # Default favicon for all sites
return self.actionFile("src/Ui/media/img/favicon.ico")
result = site.needFile(path_parts["inner_path"], priority=5) # Wait until file downloads
if result:
return self.actionFile(file_path, header_length=header_length)
else:
self.log.debug("File not found: %s" % path_parts["inner_path"])
# Site larger than allowed, re-add wrapper nonce to allow reload
if site.settings.get("size", 0) > site.getSizeLimit() * 1024 * 1024:
self.server.wrapper_nonces.append(self.get.get("wrapper_nonce"))
return self.error404(path_parts["inner_path"])
else: # Bad url
return self.error404(path)
# Serve a media for ui
def actionUiMedia(self, path):
match = re.match("/uimedia/(?P<inner_path>.*)", path)
if match: # Looks like a valid path
file_path = "src/Ui/media/%s" % match.group("inner_path")
allowed_dir = os.path.abspath("src/Ui/media") # Only files within data/sitehash allowed
if ".." in file_path or not os.path.dirname(os.path.abspath(file_path)).startswith(allowed_dir):
# File not in allowed path
return self.error403()
else:
if config.debug and match.group("inner_path").startswith("all."):
# If debugging merge *.css to all.css and *.js to all.js
from Debug import DebugMedia
DebugMedia.merge(file_path)
return self.actionFile(file_path, header_length=False) # Dont's send site to allow plugins append content
else: # Bad url
return self.error400()
# Stream a file to client
def actionFile(self, file_path, block_size=64 * 1024, send_header=True, header_length=True):
if os.path.isfile(file_path):
# Try to figure out content type by extension
content_type = self.getContentType(file_path)
# TODO: Dont allow external access: extra_headers=
# [("Content-Security-Policy", "default-src 'unsafe-inline' data: http://localhost:43110 ws://localhost:43110")]
range = self.env.get("HTTP_RANGE")
range_start = None
if send_header:
extra_headers = {}
file_size = os.path.getsize(file_path)
extra_headers["Accept-Ranges"] = "bytes"
if header_length:
extra_headers["Content-Length"] = str(file_size)
if range:
range_start = int(re.match(".*?([0-9]+)", range).group(1))
if re.match(".*?-([0-9]+)", range):
range_end = int(re.match(".*?-([0-9]+)", range).group(1)) + 1
else:
range_end = file_size
extra_headers["Content-Length"] = str(range_end - range_start)
extra_headers["Content-Range"] = "bytes %s-%s/%s" % (range_start, range_end - 1, file_size)
if range:
status = 206
else:
status = 200
self.sendHeader(status, content_type=content_type, extra_headers=extra_headers.items())
if self.env["REQUEST_METHOD"] != "OPTIONS":
file = open(file_path, "rb")
if range_start:
file.seek(range_start)
while 1:
try:
block = file.read(block_size)
if block:
yield block
else:
raise StopIteration
except StopIteration:
file.close()
break
else: # File not exists
yield self.error404(file_path)
# On websocket connection
def actionWebsocket(self):
ws = self.env.get("wsgi.websocket")
if ws:
wrapper_key = self.get["wrapper_key"]
# Find site by wrapper_key
site = None
for site_check in self.server.sites.values():
if site_check.settings["wrapper_key"] == wrapper_key:
site = site_check
if site: # Correct wrapper key
user = self.getCurrentUser()
if not user:
self.log.error("No user found")
return self.error403()
ui_websocket = UiWebsocket(ws, site, self.server, user, self)
site.websockets.append(ui_websocket) # Add to site websockets to allow notify on events
ui_websocket.start()
for site_check in self.server.sites.values():
# Remove websocket from every site (admin sites allowed to join other sites event channels)
if ui_websocket in site_check.websockets:
site_check.websockets.remove(ui_websocket)
return "Bye."
else: # No site found by wrapper key
self.log.error("Wrapper key not found: %s" % wrapper_key)
return self.error403()
else:
self.start_response("400 Bad Request", [])
return "Not a websocket!"
# Debug last error
def actionDebug(self):
# Raise last error from DebugHook
import sys
last_error = sys.modules["main"].DebugHook.last_error
if last_error:
raise last_error[0], last_error[1], last_error[2]
else:
self.sendHeader()
return "No error! :)"
# Just raise an error to get console
def actionConsole(self):
import sys
sites = self.server.sites
main = sys.modules["main"]
raise Exception("Here is your console")
# - Tests -
def actionTestStream(self):
self.sendHeader()
yield " " * 1080 # Overflow browser's buffer
yield "He"
time.sleep(1)
yield "llo!"
# yield "Running websockets: %s" % len(self.server.websockets)
# self.server.sendMessage("Hello!")
# - Errors -
# Send bad request error
def error400(self, message=""):
self.sendHeader(400)
return self.formatError("Bad Request", message)
# You are not allowed to access this
def error403(self, message="", details=True):
self.sendHeader(403)
self.log.debug("Error 403: %s" % message)
return self.formatError("Forbidden", message, details=details)
# Send file not found error
def error404(self, path=""):
self.sendHeader(404)
return self.formatError("Not Found", cgi.escape(path.encode("utf8")), details=False)
# Internal server error
def error500(self, message=":("):
self.sendHeader(500)
return self.formatError("Server error", cgi.escape(message))
def formatError(self, title, message, details=True):
import sys
import gevent
if details:
details = {key: val for key, val in self.env.items() if hasattr(val, "endswith") and "COOKIE" not in key}
details["version_zeronet"] = "%s r%s" % (config.version, config.rev)
details["version_python"] = sys.version
details["version_gevent"] = gevent.__version__
details["plugins"] = PluginManager.plugin_manager.plugin_names
arguments = {key: val for key, val in vars(config.arguments).items() if "password" not in key}
details["arguments"] = arguments
return """
<style>
* { font-family: Consolas, Monospace; color: #333 }
pre { padding: 10px; background-color: #EEE }
</style>
<h1>%s</h1>
<h2>%s</h3>
<h3>Please <a href="https://github.com/HelloZeroNet/ZeroNet/issues" target="_blank">report it</a> if you think this an error.</h3>
<h4>Details:</h4>
<pre>%s</pre>
""" % (title, message, json.dumps(details, indent=4, sort_keys=True))
else:
return """
<h1>%s</h1>
<h2>%s</h3>
""" % (title, message)
# - Reload for eaiser developing -
# def reload():
# import imp, sys
# global UiWebsocket
# UiWebsocket = imp.load_source("UiWebsocket", "src/Ui/UiWebsocket.py").UiWebsocket
# reload(sys.modules["User.UserManager"])
# UserManager.reloadModule()
# self.user = UserManager.user_manager.getCurrent()
| mit | -3,974,623,801,601,027,000 | 43.203098 | 249 | 0.566545 | false |
nthien/flaskup | flaskup/console.py | 2 | 1462 | # -*- coding: utf-8 -*-
import os
import argparse
from datetime import date
from flaskup.models import SharedFile
from flaskup.filters import filesizeformat
def action_clean(quiet):
today = date.today()
count = 0
deleted_files = []
for f in SharedFile.find_all():
if f.expire_date < today:
f.delete(notify=False)
count += 1
deleted_files.append(f)
if not quiet and count > 0:
print u'Files deleted: {0}'.format(count)
for info in deleted_files:
print u" - '{0}' - {1}".format(os.path.join(info.path, info.filename),
filesizeformat(info.size, True))
def list_actions():
from flaskup import console
attributes = dir(console)
actions = []
for attribute in attributes:
if attribute.startswith('action_'):
actions.append(attribute[7:])
return actions
def main():
# parse arguments
parser = argparse.ArgumentParser(description='Flaskup! command line tool.')
parser.add_argument('-q', '--quiet',
action='store_true',
help='quiet, print only errors')
choices = list_actions()
parser.add_argument('action', choices=choices)
args = parser.parse_args()
# quiet?
quiet = args.quiet
# call function
from flaskup import console
action = getattr(console, 'action_' + args.action)
action(quiet)
| bsd-3-clause | -4,315,482,644,957,705,700 | 25.581818 | 82 | 0.596443 | false |
jimmy201602/webterminal | permission/commons.py | 1 | 3118 | from django.contrib.auth.models import Permission
from django.utils.translation import ugettext_lazy as _
def parse_permission_tree():
permission_tree = {}
permission_tree_list = []
queryset = Permission.objects.filter(content_type__app_label__in=[
'common', 'permission'], codename__contains='can_')
for i in ['common', 'permission']:
for p in queryset.filter(content_type__app_label=i):
if 'text' in permission_tree.keys():
if p.content_type.model not in [i['model'] for i in permission_tree['children']]:
permission_tree['children'].append({
"text": _(p.content_type.model),
"icon": "fa fa-folder",
"state": {"selected": "!0"},
"app_label": p.content_type.app_label,
"model": p.content_type.model,
'level': 'two',
'children': [{
"text": _(p.name),
"icon": "fa fa-folder",
"state": {"selected": "!0"},
"id": p.id,
"app_label": p.content_type.app_label,
"model": p.content_type.model,
'level': 'three'
}]
})
else:
for i in permission_tree['children']:
if i['model'] == p.content_type.model:
permission_tree['children'][permission_tree['children'].index(i)]['children'].append({
"text": _(p.name),
"icon": "fa fa-folder",
"state": {"selected": "!0"},
"id": p.id,
"app_label": p.content_type.app_label,
"model": p.content_type.model,
'level': 'three'
})
else:
permission_tree['text'] = i
permission_tree['level'] = 'one'
permission_tree['children'] = []
permission_tree['children'].append({
"text": _(p.content_type.model),
"icon": "fa fa-folder",
"app_label": p.content_type.app_label,
"model": p.content_type.model,
"state": {"selected": "!0"},
'level': 'two',
'children': [{
"text": _(p.name),
"icon": "fa fa-folder",
"state": {"selected": "!0"},
"id": p.id,
"app_label": p.content_type.app_label,
"model": p.content_type.model,
'level': 'three'
}]
})
permission_tree_list.append(permission_tree)
permission_tree = {}
return permission_tree_list
| gpl-3.0 | -1,413,970,611,561,117,400 | 46.242424 | 114 | 0.395125 | false |
revesansparole/oacontainer | src/openalea/container/property_graph.py | 1 | 9223 | # -*- python -*-
#
# OpenAlea.Core
#
# Copyright 2006-2009 INRIA - CIRAD - INRA
#
# File author(s): Fred Boudon <[email protected]>
#
# Distributed under the Cecill-C License.
# See accompanying file LICENSE.txt or copy at
# http://www.cecill.info/licences/Licence_CeCILL-C_V1-en.html
#
# OpenAlea WebSite: http://openalea.gforge.inria.fr
#
################################################################################
"""This module provide a set of concepts to add properties to graph elements.
TODO: stupid implementation that do not ensure that ids in properties are valid
graph elements.
"""
from graph import Graph, InvalidVertex, InvalidEdge
class InvalidProperty(Exception):
"""Exception used when a property is missing."""
pass
class PropertyGraph(Graph):
"""Simple implementation of PropertyGraph using
dict as properties and two dictionaries to
maintain these properties
"""
def __init__(self, graph=None, **kwds):
self._vertex_property = {}
self._edge_property = {}
self._graph_property = {}
Graph.__init__(self, graph, **kwds)
def vertex_property_names(self):
"""Names of properties associated to vertices.
return:
- (iter of str)
"""
return self._vertex_property.iterkeys()
def vertex_properties(self):
"""Iterate on all properties associated to vertices.
return:
- (iter of dict of (vid, any))
"""
return self._vertex_property.items()
def vertex_property(self, property_name):
"""Return a map between vid and data for all vertices where
property_name is defined
args:
- property_name (str): name identifier of the property
return:
- (dict of (vid, any))
"""
try:
return self._vertex_property[property_name]
except KeyError:
raise InvalidProperty("property %s is undefined on vertices"
% property_name)
def edge_property_names(self):
"""Names of properties associated to edges.
return:
- (iter of str)
"""
return self._edge_property.iterkeys()
def edge_properties(self):
"""Iterate on all properties associated to edges.
return:
- (iter of dict of (eid, any))
"""
return self._edge_property.items()
def edge_property(self, property_name):
"""Return a map between eid and data for all edges where
property_name is defined
args:
- property_name (str): name identifier of the property
return:
- (dict of (eid, any))
"""
try:
return self._edge_property[property_name]
except KeyError:
raise InvalidProperty("property %s is undefined on edges"
% property_name)
def graph_property_names(self):
"""Names of properties associated to the graph.
return:
- (iter of str)
"""
return self._graph_property.iterkeys()
def graph_properties(self):
"""Iterate on all properties associated to the graph.
return:
- (iter of (str, any))
"""
return self._graph_property.iteritems()
def graph_property(self, property_name):
"""Return the value of a property associated to the graph.
args:
- property_name (str): name identifier of the property
return:
- (any)
"""
try:
return self._graph_property[property_name]
except KeyError:
raise InvalidProperty("property %s is undefined on graph"
% property_name)
###########################################################
#
# mutable property concept
#
###########################################################
def add_vertex_property(self, property_name, values=None):
"""Add a new map between vid and a data.
args:
- property_name (str): name identifier for this property
- values (dict of (vid, any)): pre set values for some vertices.
If None (default), property will be emtpy.
"""
if property_name in self._vertex_property:
raise InvalidProperty("property %s is already defined on vertices"
% property_name)
if values is None:
values = {}
self._vertex_property[property_name] = values
def remove_vertex_property(self, property_name):
"""Remove a given property.
args:
- property_name (str): name identifier for this property
"""
try:
del self._vertex_property[property_name]
except KeyError:
raise InvalidProperty("property %s is undefined on vertices"
% property_name)
def add_edge_property(self, property_name, values=None):
"""Add a new map between eid and a data.
args:
- property_name (str): name identifier for this property
- values (dict of (eid, any)): pre set values for some edge.
If None (default), property will be emtpy.
"""
if property_name in self._edge_property:
raise InvalidProperty("property %s is already defined on edges"
% property_name)
if values is None:
values = {}
self._edge_property[property_name] = values
def remove_edge_property(self, property_name):
"""Remove a given property.
args:
- property_name (str): name identifier for this property
"""
try:
del self._edge_property[property_name]
except KeyError:
raise InvalidProperty("property %s is undefined on edges"
% property_name)
def add_graph_property(self, property_name, value=None):
"""Add a new property to the graph.
args:
- property_name (str): name identifier for the property
- value (any): value (defaut None) associated to this property
"""
if property_name in self._graph_property:
raise InvalidProperty("property %s is already defined on graph"
% property_name)
self._graph_property[property_name] = value
def remove_graph_property(self, property_name):
"""Remove a given property.
args:
- property_name (str): name identifier for this property
"""
try:
del self._graph_property[property_name]
except KeyError:
raise InvalidProperty("property %s is undefined on graph"
% property_name)
###########################################################
#
# mutable property concept
#
###########################################################
def remove_vertex(self, vid):
for prop in self._vertex_property.itervalues():
prop.pop(vid, None)
Graph.remove_vertex(self, vid)
# remove_vertex.__doc__ = Graph.remove_vertex.__doc__
def remove_edge(self, eid):
for prop in self._edge_property.itervalues():
prop.pop(eid, None)
Graph.remove_edge(self, eid)
# remove_edge.__doc__ = Graph.remove_edge.__doc__
def clear(self):
for prop in self._vertex_property.itervalues():
prop.clear()
for prop in self._edge_property.itervalues():
prop.clear()
self._graph_property.clear()
Graph.clear(self)
# clear.__doc__ = Graph.clear.__doc__
def clear_edges(self):
for prop in self._edge_property.itervalues():
prop.clear()
Graph.clear_edges(self)
# clear_edges.__doc__ = Graph.clear_edges.__doc__
def extend(self, graph):
# add and translate the vertex and edge ids of the second graph
trans_vid, trans_eid = Graph.extend(self, graph)
if isinstance(graph, PropertyGraph):
# update graph properties
for name, prop in graph.vertex_properties():
if name not in self.vertex_property_names():
self.add_vertex_property(name)
self_prop = self.vertex_property(name)
for vid, data in prop.items():
self_prop[trans_vid[vid]] = data
# update edge properties
for name, prop in graph.edge_properties():
if name not in self.edge_property_names():
self.add_edge_property(name)
self_prop = self.edge_property(name)
for eid, data in prop.items():
self_prop[trans_eid[eid]] = data
# update graph properties
for name, data in graph.graph_properties():
if name not in self.graph_properties():
self.add_graph_property(name, data)
return trans_vid, trans_eid
# extend.__doc__ = Graph.extend.__doc__
| mit | -70,432,156,574,563,224 | 31.475352 | 80 | 0.545809 | false |
jds2001/sos | sos/plugins/mongodb.py | 1 | 1619 | # Copyright (C) 2014 Red Hat, Inc., Bryn M. Reeves <[email protected]>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class MongoDb(Plugin, DebianPlugin, UbuntuPlugin):
"""MongoDB document database
"""
plugin_name = 'mongodb'
profiles = ('services',)
packages = ('mongodb-server',)
files = ('/etc/mongodb.conf',)
def setup(self):
self.add_copy_spec([
"/etc/mongodb.conf",
"/var/log/mongodb/mongodb.log",
"/var/log/containers/mongodb/mongodb.log"
])
def postproc(self):
self.do_file_sub(
"/etc/mongodb.conf",
r"(mms-token\s*=\s*.*)",
r"mms-token = ********"
)
class RedHatMongoDb(MongoDb, RedHatPlugin):
def setup(self):
super(RedHatMongoDb, self).setup()
self.add_copy_spec("/etc/sysconfig/mongodb")
# vim: set et ts=4 sw=4 :
| gpl-2.0 | 880,026,662,020,041,000 | 30.745098 | 73 | 0.662137 | false |
wasade/american-gut-web | amgut/handlers/auth_handlers.py | 1 | 4373 | #!/usr/bin/env python
from tornado.web import authenticated
from tornado.escape import json_encode
from amgut.util import AG_DATA_ACCESS
from amgut.lib.mail import send_email
from amgut.handlers.base_handlers import BaseHandler
from amgut import media_locale, text_locale
# login code modified from https://gist.github.com/guillaumevincent/4771570
class AuthRegisterHandoutHandler(BaseHandler):
"""User Creation"""
@authenticated
def get(self):
latlong_db = AG_DATA_ACCESS.getMapMarkers()
self.render("register_user.html", skid=self.current_user,
latlongs_db=latlong_db, loginerror='')
@authenticated
def post(self):
skid = self.current_user
tl=text_locale['handlers']
info = {}
for info_column in ("email", "participantname", "address", "city",
"state", "zip", "country"):
# Make sure that all fields were entered
info[info_column] = self.get_argument(info_column, None)
# create the user if needed
ag_login_id = AG_DATA_ACCESS.addAGLogin(
info['email'], info['participantname'], info['address'],
info['city'], info['state'], info['zip'], info['country'])
# Create the kit and add the kit to the user
kitinfo = AG_DATA_ACCESS.getAGHandoutKitDetails(skid)
printresults = AG_DATA_ACCESS.checkPrintResults(skid)
if printresults is None:
printresults = 'n'
success = AG_DATA_ACCESS.addAGKit(
ag_login_id, skid, kitinfo['password'],
kitinfo['swabs_per_kit'], kitinfo['verification_code'],
printresults)
if success == -1:
self.redirect(media_locale['SITEBASE'] + '/db_error/?err=regkit')
return
# Add the barcodes
kitinfo = AG_DATA_ACCESS.getAGKitDetails(skid)
ag_kit_id = kitinfo['ag_kit_id']
results = AG_DATA_ACCESS.get_barcodes_from_handout_kit(skid)
for row in results:
barcode = row[0]
success = AG_DATA_ACCESS.addAGBarcode(ag_kit_id, barcode)
if success == -1:
self.redirect(media_locale['SITEBASE'] + '/db_error/?err=regbarcode')
return
# Email the verification code
subject = tl['AUTH_SUBJECT']
addendum = ''
if skid.startswith('PGP_'):
addendum = tl['AUTH_REGISTER_PGP']
body = tl['AUTH_REGISTER_BODY'].format(
kitinfo['kit_verification_code'], addendum)
result = tl['KIT_REG_SUCCESS']
try:
send_email(body, subject, recipient=info['email'],
sender=media_locale['HELP_EMAIL'])
except:
result = media_locale['EMAIL_ERROR']
self.render('help_request.html', skid=skid, result=result)
self.redirect(media_locale['SITEBASE'] + '/authed/portal/')
class AuthLoginHandler(BaseHandler):
"""user login, no page necessary"""
def post(self):
skid = self.get_argument("skid", "").strip()
password = self.get_argument("password", "")
tl = text_locale['handlers']
login = AG_DATA_ACCESS.authenticateWebAppUser(skid, password)
if login:
# everything good so log in
self.set_current_user(skid)
self.redirect(media_locale['SITEBASE'] + "/authed/portal/")
return
else:
is_handout = AG_DATA_ACCESS.handoutCheck(skid, password)
if is_handout == 'y':
# login user but have them register themselves
self.set_current_user(skid)
self.redirect(media_locale['SITEBASE'] + '/auth/register/')
return
else:
msg = tl['INVALID_KITID']
latlongs_db = AG_DATA_ACCESS.getMapMarkers()
self.render("index.html", user=None, loginerror=msg,
latlongs_db=latlongs_db)
return
def set_current_user(self, user):
if user:
self.set_secure_cookie("skid", json_encode(user))
else:
self.clear_cookie("skid")
class AuthLogoutHandler(BaseHandler):
"""Logout handler, no page necessary"""
def get(self):
self.clear_cookie("skid")
self.redirect(media_locale['SITEBASE'] + "/")
| bsd-3-clause | -3,059,562,258,704,055,000 | 36.059322 | 85 | 0.584038 | false |
Kynarth/pyqtcli | tests/test_addqres.py | 1 | 6428 | import os
import shutil
from click.testing import CliRunner
from pyqtcli.cli import pyqtcli
from pyqtcli.qrc import read_qrc
from pyqtcli.test.qrc import QRCTestFile
from pyqtcli.test.verbose import format_msg
from pyqtcli import verbose as v
def test_simple_addqres(config, test_resources):
runner = CliRunner()
# Generate a qrc file named res and update config file
runner.invoke(pyqtcli, ["new", "qrc"])
# Test addqres with default option
result = runner.invoke(pyqtcli, ["addqres", "res.qrc", "resources"])
assert result.exit_code == 0
# Parse qrc file
qrcfile = read_qrc("res.qrc")
# Check qresource has been added
qrcfile.get_qresource("/resources")
# Check file subelements in qresource
resources = qrcfile.list_resources("/resources")
for root, dirs, files in os.walk("resources"):
for f in files:
assert os.path.join(root, f) in resources
assert len(resources) == test_resources
# Check res_folder has been added to dirs variable of config file
config.read()
config.get_dirs("res.qrc") == ["resources"]
def test_complex_addqres(config, test_resources):
runner = CliRunner()
# Make a new dir to complicate path between resources folder and qrc file
os.mkdir("test")
shutil.move("resources", "test")
# Generate a qrc file named res and update config file
runner.invoke(pyqtcli, ["new", "qrc", "../res.qrc"])
result = runner.invoke(
pyqtcli, ["addqres", "-a", "-v", "../res.qrc", "test/resources"]
)
assert result.exit_code == 0
# Get in res.qrc directory
os.chdir("..")
# Parse qrc file
qrcfile = read_qrc("res.qrc")
# Check qresource has been added
qrcfile.get_qresource("/resources")
# Check file subelements in qresource
resources = qrcfile.list_resources("/resources")
for root, dirs, files in os.walk("test/resources"):
for f in files:
assert os.path.join(root, f) in resources
assert len(resources) == test_resources
# Check res_folder has been added to dirs variable of config file
config.read()
assert config.get_dirs("res.qrc") == ["test/resources"]
# Check resources' alias
files = qrcfile.list_files("/resources")
for resource in files:
assert os.path.basename(resource.text) == resource.attrib["alias"]
def test_addqres_two_times(config, test_resources):
runner = CliRunner()
# Copy resources dir to make another resource folder in another directory
os.mkdir("test")
shutil.copytree("resources", "test/other_res")
# Generate a qrc file named res and update config file
runner.invoke(pyqtcli, ["new", "qrc", "res.qrc"])
# Create to qresources in res.qrc
runner.invoke(pyqtcli, ["addqres", "res.qrc", "resources"])
runner.invoke(pyqtcli, ["addqres", "-a", "res.qrc", "test/other_res"])
# Parse qrc file
qrcfile = read_qrc("res.qrc")
# Check qresources has been added
qrcfile.get_qresource("/resources")
qrcfile.get_qresource("/other_res")
# Check file subelements in qresource "/resources"
resources = qrcfile.list_resources("/resources")
for root, dirs, files in os.walk("resources"):
for f in files:
assert os.path.join(root, f) in resources
assert len(resources) == test_resources
# Check file subelements in qresource "/other_res"
resources = qrcfile.list_resources("/other_res")
for root, dirs, files in os.walk("test/other_res"):
for f in files:
assert os.path.join(root, f) in resources
assert len(resources) == test_resources
# Check resources' alias in other_res qresource
files = qrcfile.list_files("/other_res")
for resource in files:
assert os.path.basename(resource.text) == resource.attrib["alias"]
# Check that the two res folders have been added to dirs variable of
# config file
config.read()
assert sorted(config.get_dirs("res.qrc")) == sorted([
"resources", "test/other_res"])
def test_addqres_with_two_res_folders(config, test_resources):
runner = CliRunner()
# Copy resources dir to make another resource folder in another directory
os.mkdir("test")
shutil.copytree("resources", "test/other_res")
# Generate a qrc file named res and update config file
runner.invoke(pyqtcli, ["new", "qrc", "res.qrc"])
# Create to qresources in res.qrc
runner.invoke(
pyqtcli, ["addqres", "res.qrc", "resources", "test/other_res"])
# Parse qrc file
qrcfile = read_qrc("res.qrc")
# Check qresources has been added
qrcfile.get_qresource("/resources")
qrcfile.get_qresource("/other_res")
# Check file subelements in qresource "/resources"
resources = qrcfile.list_resources("/resources")
for root, dirs, files in os.walk("resources"):
for f in files:
assert os.path.join(root, f) in resources
assert len(resources) == test_resources
# Check file subelements in qresource "/other_res"
resources = qrcfile.list_resources("/other_res")
for root, dirs, files in os.walk("test/other_res"):
for f in files:
assert os.path.join(root, f) in resources
assert len(resources) == test_resources
# Check that the two res folders have been added to dirs variable of
# config file
config.read()
assert sorted(config.get_dirs("res.qrc")) == sorted([
"resources", "test/other_res"])
# noinspection PyUnusedLocal
def test_addqres_in_non_project_qrc(config, test_resources):
runner = CliRunner()
QRCTestFile("res").add_qresource("/").add_file("test.txt").build()
result = runner.invoke(pyqtcli, ["addqres", "res.qrc", "resources"])
assert format_msg(result.output) == v.error(
"res.qrc isn't part of the project.\nAborted!\n")
# noinspection PyUnusedLocal
def test_addqres_duplication(config, test_resources):
runner = CliRunner()
# Generate a qrc file named res and update config file
runner.invoke(pyqtcli, ["new", "qrc"])
# Add qresources corresponding to resources folder
runner.invoke(pyqtcli, ["addqres", "res.qrc", "resources"])
# Add the same qresource
result = runner.invoke(pyqtcli, ["addqres", "res.qrc", "resources"])
assert format_msg(result.output) == v.warning(
"You have already added \'resources\' to res.qrc.\n")
| mit | -3,913,761,367,511,627,000 | 29.609524 | 77 | 0.660703 | false |
felipenaselva/repo.felipe | plugin.video.uwc/favorites.py | 1 | 2571 | '''
Ultimate Whitecream
Copyright (C) 2015 mortael
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urllib, urllib2, re, cookielib, os.path, sys, socket
import xbmc, xbmcplugin, xbmcgui, xbmcaddon, sqlite3
import utils
from chaturbate import clean_database as cleanchat
from cam4 import clean_database as cleancam4
dialog = utils.dialog
favoritesdb = utils.favoritesdb
conn = sqlite3.connect(favoritesdb)
c = conn.cursor()
try:
c.executescript("CREATE TABLE IF NOT EXISTS favorites (name, url, mode, image);")
c.executescript("CREATE TABLE IF NOT EXISTS keywords (keyword);")
except:
pass
conn.close()
def List():
if utils.addon.getSetting("chaturbate") == "true":
cleanchat()
cleancam4()
conn = sqlite3.connect(favoritesdb)
conn.text_factory = str
c = conn.cursor()
try:
c.execute("SELECT * FROM favorites")
for (name, url, mode, img) in c.fetchall():
utils.addDownLink(name, url, int(mode), img, '', '', 'del')
conn.close()
xbmcplugin.endOfDirectory(utils.addon_handle)
except:
conn.close()
utils.notify('No Favorites','No Favorites found')
return
def Favorites(fav,mode,name,url,img):
if fav == "add":
delFav(url)
addFav(mode, name, url, img)
utils.notify('Favorite added','Video added to the favorites')
elif fav == "del":
delFav(url)
utils.notify('Favorite deleted','Video removed from the list')
xbmc.executebuiltin('Container.Refresh')
def addFav(mode,name,url,img):
conn = sqlite3.connect(favoritesdb)
conn.text_factory = str
c = conn.cursor()
c.execute("INSERT INTO favorites VALUES (?,?,?,?)", (name, url, mode, img))
conn.commit()
conn.close()
def delFav(url):
conn = sqlite3.connect(favoritesdb)
c = conn.cursor()
c.execute("DELETE FROM favorites WHERE url = '%s'" % url)
conn.commit()
conn.close()
| gpl-2.0 | -3,299,589,569,647,617,000 | 28.215909 | 85 | 0.668222 | false |
getefesto/efesto | tests/unit/handlers/BaseHandler.py | 1 | 1059 | # -*- coding: utf-8 -*-
from efesto.handlers import BaseHandler
from pytest import fixture
@fixture
def handler(magic):
handler = BaseHandler(magic())
handler.q = magic()
return handler
def test_basehandler_init(magic):
model = magic()
handler = BaseHandler(model)
assert handler.model == model
assert handler._order == model.id
def test_basehandler_embeds(handler, magic):
model = magic(one=magic(spec_set=['rel_model']))
handler.model = model
result = handler.embeds({'_embeds': 'one'})
handler.model.q.join.assert_called_with(model.one.rel_model, on=False)
assert result == ['one']
def test_basehandler_embeds_reverse(handler):
"""
Verifies that embeds work with backrefs.
"""
result = handler.embeds({'_embeds': 'one'})
model = handler.model
model.one.field = 'field'
handler.model.q.join.assert_called_with(model, on=False)
assert result == ['one']
def test_basehandler_embeds_none(handler):
result = handler.embeds({'_embeds': None})
assert result == []
| gpl-3.0 | 1,938,557,776,759,132,400 | 24.214286 | 74 | 0.663834 | false |
qinjunjerry/PyKeyBox | keybox.py | 1 | 15148 | #!/usr/bin/env python
"""A mini key/password manager written in python using the AES encryption algorithm."""
import os
import sys
import time
import os.path
import random
import sqlite3
import hashlib
import getpass
import argparse
import Crypto.Cipher.AES
class KeyBox(object):
TABLE_NAME = "keybox"
MASTER_KEY_TITLE = "<MASTER>"
def __init__(self, a_file):
# the AES key of the master password, to encrypt key content
self.aes_key = None
self.conn = sqlite3.connect(a_file)
# Use 8-bit string instead of unicode string, in order to read/write
# international characters like Chinese
self.conn.text_factory = str
# The following line would use unicode string
# self.conn.text_factory = lambda x: unicode(x, 'utf-8', 'ignore')
self.cursor = self.conn.cursor()
self.cursor.execute('CREATE TABLE IF NOT EXISTS %s (title TEXT PRIMARY KEY, time LONG, content BLOB)' %
KeyBox.TABLE_NAME)
self.conn.commit()
def list(self):
title_time_list = []
self.cursor.execute('SELECT title,time FROM %s ORDER BY time DESC' % KeyBox.TABLE_NAME)
for row in self.cursor:
if row[0] != KeyBox.MASTER_KEY_TITLE:
title_time_list.append((row[0], row[1]))
return title_time_list
def search(self, keywords):
keywords_lower = {keyword.lower() for keyword in keywords}
matching_title_time_list = []
for title, mod_time in self.list():
title_lower = title.lower()
match = True
for keyword in keywords_lower:
if title_lower.find(keyword) == -1:
match = False
break
if match:
matching_title_time_list.append((title, mod_time))
return matching_title_time_list
def exists(self, title):
self.cursor.execute("SELECT time FROM %s WHERE title=?" % KeyBox.TABLE_NAME, (title,))
return self.cursor.fetchone() is not None
def init_master_password(self, table=TABLE_NAME):
password = input_password("Create a new master password: ")
if password == input_password("Confirm the master password: "):
self.aes_key = hashlib.sha256(password).digest()
# the hash of the AES key, stored in db for master password verification
key_hash = hashlib.sha256(self.aes_key).hexdigest()
self.cursor.execute("INSERT OR REPLACE INTO %s VALUES (?,?,?)" % table,
(KeyBox.MASTER_KEY_TITLE, time.time(), key_hash))
self.conn.commit()
else:
exit_with_error("Error: password not match, please retry")
def verify_master_password(self):
# get the stored key hash
self.cursor.execute("SELECT content FROM %s WHERE title=?"
% KeyBox.TABLE_NAME, (KeyBox.MASTER_KEY_TITLE,))
stored_key_hash = self.cursor.fetchone()[0]
# input master password
password = input_password("Master password: ")
self.aes_key = hashlib.sha256(password).digest()
# compare key hash
if hashlib.sha256(self.aes_key).hexdigest() != stored_key_hash:
exit_with_error("Error: incorrect master password, please retry")
def view(self, title):
self.cursor.execute("SELECT time, content FROM %s WHERE title=?"
% KeyBox.TABLE_NAME, (title,))
mod_time, encrypted = self.cursor.fetchone()
return mod_time, decrypt(encrypted, self.aes_key)
def set(self, title, plain, mod_time=time.time(), table=TABLE_NAME):
# for better print effect
if plain[-1] != "\n": plain += "\n"
encrypted = encrypt(plain, self.aes_key)
self.cursor.execute("INSERT OR REPLACE INTO %s VALUES (?,?,?)" % table,
(title, mod_time, sqlite3.Binary(encrypted)))
self.conn.commit()
def delete(self, title):
mod_time, plain = self.view(title)
self.cursor.execute("DELETE FROM %s WHERE title=?" % KeyBox.TABLE_NAME, (title,))
self.conn.commit()
return mod_time, plain
def reset(self):
tmp_table = "_tmp_"
self.cursor.execute('DROP TABLE IF EXISTS %s' % tmp_table)
self.cursor.execute('CREATE TABLE %s (title TEXT PRIMARY KEY, time LONG, content BLOB)' % tmp_table)
keys = []
for title, mod_time in self.list():
content = self.view(title)
keys.append((title, mod_time, content))
self.init_master_password(table=tmp_table)
for title, mod_time, content in keys:
self.set(title, content, mod_time=mod_time, table=tmp_table)
self.cursor.execute("DROP TABLE %s" % KeyBox.TABLE_NAME)
self.cursor.execute("ALTER TABLE %s RENAME TO %s" % (tmp_table, KeyBox.TABLE_NAME))
self.conn.commit()
def input_content(title):
sys.stdout.write("Input content of '%s', enter an empty line to finish:\n" % title)
lines = []
while True:
line = raw_input()
if line:
lines.append(line)
else:
break
return '\n'.join(lines)
def input_password(text):
password = getpass.getpass(text)
if password == "":
exit_with_error("Error: password not given")
return password
def encrypt(plain, aes_key):
iv = ''.join(chr(random.randint(0, 0xFF)) for _ in range(Crypto.Cipher.AES.block_size))
cipher = Crypto.Cipher.AES.AESCipher(aes_key, Crypto.Cipher.AES.MODE_CFB, iv)
return iv + cipher.encrypt(plain)
def decrypt(encrypted, aes_key):
iv = encrypted[0:Crypto.Cipher.AES.block_size]
cipher = Crypto.Cipher.AES.AESCipher(aes_key, Crypto.Cipher.AES.MODE_CFB, iv)
return cipher.decrypt(encrypted[Crypto.Cipher.AES.block_size:])
def read_keys(a_file):
"""
Supported text file format is as follows:
KEY: key title1
MOD: 1540820240.0
key content line 11
key content line 12
...
KEY: key title2
key content line 21
key content line 22
...
"""
keys = []
with open(a_file, 'r') as fd:
title = ''
mod_time = -1
content_lines = []
for line in fd:
line = line.strip()
if line.startswith("KEY: "): # title line
if title != '' and content_lines != []:
# remove the empty lines at the end
while len(content_lines) > 0 and content_lines[-1] == "\n":
content_lines = content_lines[:-1]
# add to keys for return
if mod_time < 0: mod_time = time.time()
keys.append((title, mod_time, '\n'.join([aLine for aLine in content_lines])))
# set next key title, and clear content
title = line[5:]
content_lines = []
elif line.startswith("MOD: "):
mod_time = float(line[5:])
elif title != "":
content_lines.append(line)
else:
sys.stderr.write("Warn: line '%s' ignored: title missing\n" % line)
# process the last key
if title != '' and content_lines != []:
# remove the empty lines at the end
while len(content_lines) > 0 and content_lines[-1] == "\n":
content_lines = content_lines[:-1]
# add to keys for return
if mod_time < 0: mod_time = time.time()
keys.append((title, mod_time, '\n'.join([aLine for aLine in content_lines])))
return keys
def exit_with_error(err_msg, err_code=-1):
sys.stderr.write(err_msg + "\n")
sys.exit(err_code)
def get_default_db_file():
keybox_file = "%s/.keybox" % os.environ['HOME']
if not os.path.exists(keybox_file):
return "%s/%s.keybox" % (os.environ['HOME'], os.environ['USER'])
with open(keybox_file, 'r') as fd:
for line in fd:
return line
def set_default_db_file(a_file):
keybox_file = "%s/.keybox" % os.environ['HOME']
with open(keybox_file, 'w') as fd:
fd.write(os.path.abspath(a_file))
def main():
# parse command line arguments
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-d', '--database',
help=('the sqlite database file to store keys. ' +
'Default: the previously used database file (see its location in %s/.keybox), ' +
'or %s/%s.keybox') % (os.environ["HOME"], os.environ["HOME"], os.environ['USER']))
subparsers = parser.add_subparsers(title="sub_commands", dest="action",
metavar='help|list|view|add|mod|del|import|export|reset')
subparsers.add_parser("help", help="show this help message and exit")
subparsers.add_parser("list", help="list all key titles (this is default)")
sub_parser = subparsers.add_parser("add", help="add a new key title and content")
sub_parser.add_argument("title", help="a key title")
sub_parser = subparsers.add_parser("view", help="view the content for the key title matching the given keywords")
sub_parser.add_argument("keyword", nargs="+", help="a keyword")
sub_parser = subparsers.add_parser("mod", help="modify the content for the key title matching the given keywords")
sub_parser.add_argument("keyword", nargs="+", help="a keyword")
sub_parser = subparsers.add_parser("del",
help="delete an existing key title matching the given keywords and the key " +
"content")
sub_parser.add_argument("keyword", nargs="+", help="a keyword")
sub_parser = subparsers.add_parser("import", help="import all key titles and contents from a text file")
sub_parser.add_argument("file", help="a text file containing key titles and contents to import")
sub_parser = subparsers.add_parser("export", help="export all key titles and contents to stdout or a file")
sub_parser.add_argument("file", nargs='?', help="a text file to export the key titles and contents")
subparsers.add_parser("reset", help="reset the master password")
# 'list' if no sub-command is given
if len(sys.argv) == 1:
sys.argv.append('list')
args = parser.parse_args()
if args.action == 'help':
parser.print_help()
sys.exit(0)
if args.database is None:
args.database = get_default_db_file() # type: str
else:
set_default_db_file(args.database)
keybox = KeyBox(args.database)
if args.action == 'list':
title_time_array = keybox.list()
if len(title_time_array) == 0:
sys.stdout.write("No item found\n")
else:
for title, mod_time in title_time_array:
print time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(mod_time)) + " - " + title
sys.exit(0)
# check errors before init or verify master password
if args.action == 'add':
if keybox.exists(args.title):
exit_with_error("Error: '%s' exists, try to view it or add with another title" % args.title)
if args.action in ['view', 'mod', 'del']:
matches = keybox.search(args.keyword)
if len(matches) == 0:
exit_with_error(
"Error: no title matching the given keywords, try to list all titles or change to another title")
else:
sys.stdout.write("Found the following titles:\n")
for index, (title, mod_time) in enumerate(matches):
mod_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(mod_time))
print "[%d] %s - %s" % (index, mod_str, title)
index = 0
if len(matches) > 1:
index = -1
while index < 0 or index >= len(matches):
index = raw_input("Select: [0] ").strip()
if len(index) == 0:
index = 0
break
else:
try:
index = int(index)
except ValueError:
pass
args.title = matches[index][0]
elif args.action == "import":
if not os.path.exists(args.file):
exit_with_error("Error: file '%s' not found." % args.file)
elif args.action == "export":
fd = sys.stdout
if args.file is not None:
if os.path.exists(args.file):
exit_with_error("Error: file exists, please choose a different file to export")
else:
fd = open(args.file, 'w')
elif args.action == "reset":
if not keybox.exists(KeyBox.MASTER_KEY_TITLE):
exit_with_error("Error: master password is not set yet")
if not keybox.exists(KeyBox.MASTER_KEY_TITLE):
keybox.init_master_password()
else:
keybox.verify_master_password()
if args.action == 'add':
plain = input_content(args.title)
keybox.set(args.title, plain)
elif args.action == "view":
mod_time, plain = keybox.view(args.title)
mod_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(mod_time))
sys.stdout.write("---\nKEY: %s\nMOD: %s\n%s---\n" % (args.title, mod_str, plain))
elif args.action == "mod":
sys.stdout.write("---\n%s---\n" % keybox.view(args.title)[1])
plain = input_content(args.title)
keybox.set(args.title, plain)
elif args.action == "del":
mod_time, plain = keybox.view(args.title)
mod_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(mod_time))
sys.stdout.write("---\nKEY: %s:\nMOD: %s\n%s---\n" % (args.title, mod_str, plain))
confirm = raw_input("Confirm to delete key '%s' [yes/no]? " % args.title)
while confirm not in ['yes', 'no']:
confirm = raw_input("Confirm to delete key '%s' [yes/no]? " % args.title)
if confirm == 'yes':
keybox.delete(args.title)
sys.stdout.write("Deleted.\n")
elif args.action == "import":
for title, mod_time, content in read_keys(args.file):
if keybox.exists(title):
sys.stdout.write("skipped %s: exists in database\n" % title)
else:
keybox.set(title, content, mod_time=mod_time)
sys.stdout.write("imported %s\n" % title)
elif args.action == "export":
if fd == sys.stdout: fd.write("---\n")
for title, mod_time in keybox.list():
fd.write("KEY: %s\n" % title)
fd.write("MOD: %s\n" % mod_time)
fd.write("%s" % keybox.view(title)[1])
if fd == sys.stdout:
fd.write("---\n")
else:
fd.write("\n")
if fd != sys.stdout:
sys.stdout.write("Exported to file %s\n" % args.file)
elif args.action == "reset":
keybox.reset()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.stdout.write("\nUser aborted.\n")
| apache-2.0 | 3,663,396,818,684,847,600 | 38.75853 | 118 | 0.57057 | false |
hpcugent/vsc-processcontrol | test/processcontrol.py | 1 | 4021 | #
#
# Copyright 2013-2013 Ghent University
#
# This file is part of vsc-processcontrol,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/vsc-processcontrol
#
# vsc-processcontrol is free software: you can redistribute it and/or modify
# it under the terms of the GNU Library General Public License as
# published by the Free Software Foundation, either version 2 of
# the License, or (at your option) any later version.
#
# vsc-processcontrol is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with vsc-processcontrol. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Stijn De Weirdt (Ghent University)
"""
from unittest import TestCase, TestLoader
from vsc.processcontrol.cpusett import CpuSetT
from vsc.processcontrol.algorithm import BasicCore
TOTAL_CORES = len([x for x in open('/proc/cpuinfo').readlines() if x.lower().startswith('processor')])
class TestCpuSetT(CpuSetT):
DEFAULT_CPUSETSIZE = 16 # set high enough
DEFAULT_NCPUBITS = 8 # default c_ulong is 8 bytes, this is just for test
class ProcesscontrolTest(TestCase):
"""Tests for vsc.processcontrol"""
def test_cpusett(self):
"""Test CpuSetT class"""
cs = TestCpuSetT()
cs.convert_hr_bits('2,5-8,10,11-11')
self.assertEqual(cs.nbitmask, 2)
self.assertEqual(cs.cpusetsize, 16)
self.assertEqual(cs.cpus[:cs.DEFAULT_CPUSETSIZE], [0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0])
self.assertEqual(cs.bits, [228, 13])
cs.set_bits([1, 0, 0, 1, 1, 1, 0, 0, 0, 1])
self.assertEqual(len(cs.cpus), cs.cpusetsize)
self.assertEqual(cs.bits, [57, 2])
self.assertEqual(cs.convert_bits_hr(), "0,3-5,9")
def test_basiccore(self):
bc = BasicCore()
# 4 total cpus, 4 processes to place
bc.create(4, 4)
self.assertEqual(bc.proc_placement, [[0], [1], [2], [3]])
# 8 total cpus, 4 processes to place
bc.create(8, 4)
self.assertEqual(bc.proc_placement, [[0], [0], [1], [1], [2], [2], [3], [3]])
# 4 total cpus, 8 processes to place
bc.create(4, 8)
self.assertEqual(bc.proc_placement, [[0, 1], [2, 3], [4, 5], [6, 7]])
# 6 total cpus, 4 processes to place
bc.create(6, 4)
self.assertEqual(bc.proc_placement, [[0], [0], [1], [2], [2], [3]])
# 6 total cpus, 8 processes to place
bc.create(6, 8)
self.assertEqual(bc.proc_placement, [[0], [1], [2, 3], [4], [5], [6, 7]])
def suite():
""" returns all the testcases in this module """
return TestLoader().loadTestsFromTestCase(ProcesscontrolTest)
if __name__ == '__main__':
"""Use this __main__ block to help write and test unittests
just uncomment the parts you need
"""
# cs = TestCpuSetT()
# cs.convert_hr_bits('2,5-8,10,11-11')
# print cs.nbitmask
# print cs.cpusetsize
# print cs.cpus[:cs.DEFAULT_CPUSETSIZE]
# print cs.bits
bc = BasicCore()
# 4 total cpus, 4 processes to place
bc.create(4, 4)
print bc.proc_placement
# 8 total cpus, 4 processes to place
bc.create(8, 4)
print bc.proc_placement
# 4 total cpus, 8 processes to place
bc.create(4, 8)
print bc.proc_placement
# 6 total cpus, 4 processes to place
bc.create(6, 4)
print bc.proc_placement
# 6 total cpus, 8 processes to place
bc.create(6, 8)
print bc.proc_placement
| lgpl-2.1 | -997,042,249,967,279,200 | 33.367521 | 107 | 0.648844 | false |
edgarli/proj8 | main.py | 1 | 14406 | import flask
from flask import render_template
from flask import request
from flask import url_for
import uuid
import json
import logging
# Date handling
import arrow # Replacement for datetime, based on moment.js
import datetime # But we still need time
from dateutil import tz # For interpreting local times
# OAuth2 - Google library implementation for convenience
from oauth2client import client
import httplib2 # used in oauth2 flow
# Google API for services
from apiclient import discovery
###
# Globals
###
import CONFIG
app = flask.Flask(__name__)
SCOPES = 'https://www.googleapis.com/auth/calendar.readonly'
CLIENT_SECRET_FILE = CONFIG.GOOGLE_LICENSE_KEY ## You'll need this
APPLICATION_NAME = 'MeetMe class project'
#############################
#
# Pages (routed from URLs)
#
#############################
from db import add_proposal, list_proposal, delete_proposal, get_proposal
@app.route("/")
@app.route("/index")
def index():
app.logger.debug("Entering index")
if 'daterange' not in flask.session:
init_session_values()
proposals = list_proposal()
return render_template('index.html', proposals=proposals)
@app.route("/create", methods=['POST'])
def create():
app.logger.debug("Create metting")
daterange = request.form.get('daterange')
begin_time = request.form.get('begin_time')
end_time = request.form.get('end_time')
arr = daterange.split()
begin = '%s' % (arrow.get('%sT%s' % (arr[0], begin_time),
'MM/DD/YYYYTHH:mm').replace(tzinfo=tz.tzlocal()))
end = '%s' % (arrow.get('%sT%s' % (arr[2], end_time),
'MM/DD/YYYYTHH:mm').replace(tzinfo=tz.tzlocal()))
add_proposal(begin, end)
return flask.redirect(flask.url_for('index'))
@app.route("/delete")
def delete():
key = request.args.get('key')
delete_proposal(key)
return flask.redirect(flask.url_for('index'))
@app.route("/proposal")
def proposal():
## We'll need authorization to list calendars
## I wanted to put what follows into a function, but had
## to pull it back here because the redirect has to be a
## 'return'
if request.args.get('key'):
flask.session['key'] = request.args.get('key')
key = flask.session.get('key')
if not key:
return flask.redirect(flask.url_for('index'))
proposal = get_proposal(key)
if not proposal:
return flask.redirect(flask.url_for('index'))
credentials = valid_credentials()
if not credentials:
app.logger.debug("Redirecting to authorization")
return flask.redirect(flask.url_for('oauth2callback'))
gcal_service = get_gcal_service(credentials)
app.logger.debug("Returned from get_gcal_service")
events = list_events(gcal_service, proposal['begin'], proposal['end'])
proposal['begin'] = '%s' % arrow.get(proposal['begin']).format('MM/DD/YYYYTHH:mm')
proposal['end'] = '%s' % arrow.get(proposal['end']).format('MM/DD/YYYYTHH:mm')
proposal['id'] = '%s' % proposal['_id']
daterange = {
'begin': proposal['begin'],
'end': proposal['begin'],
}
free_times = split_times(daterange, events)
return render_template('proposal.html', proposal=proposal, free_times=free_times, events=events)
####
#
# Google calendar authorization:
# Returns us to the main /choose screen after inserting
# the calendar_service object in the session state. May
# redirect to OAuth server first, and may take multiple
# trips through the oauth2 callback function.
#
# Protocol for use ON EACH REQUEST:
# First, check for valid credentials
# If we don't have valid credentials
# Get credentials (jump to the oauth2 protocol)
# (redirects back to /choose, this time with credentials)
# If we do have valid credentials
# Get the service object
#
# The final result of successful authorization is a 'service'
# object. We use a 'service' object to actually retrieve data
# from the Google services. Service objects are NOT serializable ---
# we can't stash one in a cookie. Instead, on each request we
# get a fresh serivce object from our credentials, which are
# serializable.
#
# Note that after authorization we always redirect to /choose;
# If this is unsatisfactory, we'll need a session variable to use
# as a 'continuation' or 'return address' to use instead.
#
####
def valid_credentials():
"""
Returns OAuth2 credentials if we have valid
credentials in the session. This is a 'truthy' value.
Return None if we don't have credentials, or if they
have expired or are otherwise invalid. This is a 'falsy' value.
"""
if 'credentials' not in flask.session:
return None
credentials = client.OAuth2Credentials.from_json(
flask.session['credentials'])
if (credentials.invalid or
credentials.access_token_expired):
return None
return credentials
def get_gcal_service(credentials):
"""
We need a Google calendar 'service' object to obtain
list of calendars, busy times, etc. This requires
authorization. If authorization is already in effect,
we'll just return with the authorization. Otherwise,
control flow will be interrupted by authorization, and we'll
end up redirected back to /choose *without a service object*.
Then the second call will succeed without additional authorization.
"""
app.logger.debug("Entering get_gcal_service")
http_auth = credentials.authorize(httplib2.Http())
service = discovery.build('calendar', 'v3', http=http_auth)
app.logger.debug("Returning service")
return service
@app.route('/oauth2callback')
def oauth2callback():
"""
The 'flow' has this one place to call back to. We'll enter here
more than once as steps in the flow are completed, and need to keep
track of how far we've gotten. The first time we'll do the first
step, the second time we'll skip the first step and do the second,
and so on.
"""
app.logger.debug("Entering oauth2callback")
flow = client.flow_from_clientsecrets(
CLIENT_SECRET_FILE,
scope= SCOPES,
redirect_uri=flask.url_for('oauth2callback', _external=True))
## Note we are *not* redirecting above. We are noting *where*
## we will redirect to, which is this function.
## The *second* time we enter here, it's a callback
## with 'code' set in the URL parameter. If we don't
## see that, it must be the first time through, so we
## need to do step 1.
app.logger.debug("Got flow")
if 'code' not in flask.request.args:
app.logger.debug("Code not in flask.request.args")
auth_uri = flow.step1_get_authorize_url()
return flask.redirect(auth_uri)
## This will redirect back here, but the second time through
## we'll have the 'code' parameter set
else:
## It's the second time through ... we can tell because
## we got the 'code' argument in the URL.
app.logger.debug("Code was in flask.request.args")
auth_code = flask.request.args.get('code')
credentials = flow.step2_exchange(auth_code)
flask.session['credentials'] = credentials.to_json()
## Now I can build the service and execute the query,
## but for the moment I'll just log it and go back to
## the main screen
app.logger.debug("Got credentials")
return flask.redirect(flask.url_for('proposal'))
#####
#
# Option setting: Buttons or forms that add some
# information into session state. Don't do the
# computation here; use of the information might
# depend on what other information we have.
# Setting an option sends us back to the main display
# page, where we may put the new information to use.
#
#####
@app.route('/set', methods=['POST'])
def set():
"""
User chose a date range with the bootstrap daterange
widget.
"""
app.logger.debug("Entering set")
# set calid and datetime
calid = request.form.get('calid')
daterange = request.form.get('daterange')
begin_time = request.form.get('begin_time')
end_time = request.form.get('end_time')
flask.session['calid'] = calid
flask.session['daterange'] = daterange
flask.session["begin_time"] = begin_time
flask.session["end_time"] = end_time
cal_timerange()
app.logger.debug("Set calid=%s, daterange=%s, begin_time=%s', end_time=%s",
calid, daterange, begin_time, end_time)
return flask.redirect(flask.url_for("choose"))
####
#
# Initialize session variables
#
####
def init_session_values():
"""
Start with some reasonable defaults for date and time ranges.
Note this must be run in app context ... can't call from main.
"""
# Default date span = tomorrow to 1 week from now
now = arrow.now('local')
tomorrow = now.replace(days=+1)
nextweek = now.replace(days=+7)
flask.session["daterange"] = "{} - {}".format(
tomorrow.format("MM/DD/YYYY"),
nextweek.format("MM/DD/YYYY"))
# Default time span each day, 8 to 5
#flask.session["begin_time"] = interpret_time("9am")
#flask.session["end_time"] = interpret_time("5pm")
flask.session["begin_time"] = "09:00"
flask.session["end_time"] = "17:00"
cal_timerange()
def interpret_time( text ):
"""
Read time in a human-compatible format and
interpret as ISO format with local timezone.
May throw exception if time can't be interpreted. In that
case it will also flash a message explaining accepted formats.
"""
app.logger.debug("Decoding time '{}'".format(text))
time_formats = ["ha", "h:mma", "h:mm a", "H:mm"]
try:
as_arrow = arrow.get(text, time_formats).replace(tzinfo=tz.tzlocal())
app.logger.debug("Succeeded interpreting time")
except:
app.logger.debug("Failed to interpret time")
flask.flash("Time '{}' didn't match accepted formats 13:30 or 1:30pm"
.format(text))
raise
return as_arrow.isoformat()
def interpret_date( text ):
"""
Convert text of date to ISO format used internally,
with the local time zone.
"""
try:
as_arrow = arrow.get(text, "MM/DD/YYYY").replace(
tzinfo=tz.tzlocal())
except:
flask.flash("Date '{}' didn't fit expected format 12/31/2001")
raise
return as_arrow.isoformat()
def next_day(isotext):
"""
ISO date + 1 day (used in query to Google calendar)
"""
as_arrow = arrow.get(isotext)
return as_arrow.replace(days=+1).isoformat()
####
#
# Functions (NOT pages) that return some information
#
####
def list_events(service, begin, end):
"""
Given a google 'service' object, return a list of
calendars. Each calendar is represented by a dict, so that
it can be stored in the session object and converted to
json for cookies. The returned list is sorted to have
the primary calendar first, and selected (that is, displayed in
Google Calendars web app) calendars before unselected calendars.
"""
app.logger.debug("Entering list_calendars")
calendar_list = service.calendarList().list().execute()["items"]
result = []
for cal in calendar_list:
id = cal["id"]
events = service.events().list(calendarId=id, timeMin=begin,
timeMax=end).execute()['items']
for event in events:
if not event.get('start') or not event['start'].get('dateTime'):
continue
result.append({
"summary": event["summary"],
"start": '%s' % arrow.get(event["start"]["dateTime"]).format('MM/DD/YYYYTHH:mm'),
"end": '%s' % arrow.get(event["end"]["dateTime"]).format('MM/DD/YYYYTHH:mm'),
})
result.sort(key=lambda x: x["start"])
return result
def cal_timerange():
daterange = flask.session['daterange']
begin_time = flask.session['begin_time']
end_time = flask.session['end_time']
arr = daterange.split()
flask.session['begin'] = '%s' % (arrow.get('%sT%s' % (arr[0], begin_time),
'MM/DD/YYYYTHH:mm').replace(tzinfo=tz.tzlocal()))
flask.session['end'] = '%s' % (arrow.get('%sT%s' % (arr[2], end_time),
'MM/DD/YYYYTHH:mm').replace(tzinfo=tz.tzlocal()))
def split_times(daterange, events):
"""
split daterange with events
"""
begin = arrow.get(daterange['begin'], 'MM/DD/YYYYTHH:mm')
end = arrow.get(daterange['end'], 'MM/DD/YYYYTHH:mm')
free_times = []
for event in events:
st = arrow.get(event["start"], 'MM/DD/YYYYTHH:mm')
et = arrow.get(event["end"], 'MM/DD/YYYYTHH:mm')
if begin < et:
if begin < st:
free_times.append('%s - %s' %(begin.format('MM/DD/YYYYTHH:mm'),
st.format('MM/DD/YYYYTHH:mm')))
begin = et
if begin < end:
free_times.append('%s - %s' %(begin.format('MM/DD/YYYYTHH:mm'),
end.format('MM/DD/YYYYTHH:mm')))
return free_times
def cal_sort_key( cal ):
"""
Sort key for the list of calendars: primary calendar first,
then other selected calendars, then unselected calendars.
(" " sorts before "X", and tuples are compared piecewise)
"""
if cal["selected"]:
selected_key = " "
else:
selected_key = "X"
if cal["primary"]:
primary_key = " "
else:
primary_key = "X"
return (primary_key, selected_key, cal["summary"])
#################
#
# Functions used within the templates
#
#################
@app.template_filter( 'fmtdate' )
def format_arrow_date( date ):
try:
normal = arrow.get( date )
return normal.format("ddd MM/DD/YYYY")
except:
return "(bad date)"
@app.template_filter( 'fmttime' )
def format_arrow_time( time ):
try:
normal = arrow.get( time )
return normal.format("HH:mm")
except:
return "(bad time)"
#############
if __name__ == "__main__":
# App is created above so that it will
# exist whether this is 'main' or not
# (e.g., if we are running in a CGI script)
app.secret_key = str(uuid.uuid4())
app.debug=CONFIG.DEBUG
app.logger.setLevel(logging.DEBUG)
# We run on localhost only if debugging,
# otherwise accessible to world
if CONFIG.DEBUG:
# Reachable only from the same computer
app.run(port=CONFIG.PORT,host="0.0.0.0")
else:
# Reachable from anywhere
app.run(port=CONFIG.PORT,host="0.0.0.0")
| artistic-2.0 | 5,545,990,509,238,872,000 | 32.737705 | 100 | 0.64376 | false |
DigitalSlideArchive/large_image | large_image/constants.py | 1 | 1924 | #############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#############################################################################
class SourcePriority:
NAMED = 0 # Explicitly requested
PREFERRED = 1
HIGHER = 2
HIGH = 3
MEDIUM = 4
LOW = 5
LOWER = 6
FALLBACK = 7
MANUAL = 8 # Will never be selected automatically
TILE_FORMAT_IMAGE = 'image'
TILE_FORMAT_PIL = 'PIL'
TILE_FORMAT_NUMPY = 'numpy'
TileOutputMimeTypes = {
# JFIF forces conversion to JPEG through PIL to ensure the image is in a
# common colorspace. JPEG colorspace is complex: see
# https://docs.oracle.com/javase/8/docs/api/javax/imageio/metadata/
# doc-files/jpeg_metadata.html
'JFIF': 'image/jpeg',
'JPEG': 'image/jpeg',
'PNG': 'image/png',
'TIFF': 'image/tiff',
}
TileOutputPILFormat = {
'JFIF': 'JPEG'
}
TileInputUnits = {
None: 'base_pixels',
'base': 'base_pixels',
'base_pixel': 'base_pixels',
'base_pixels': 'base_pixels',
'pixel': 'mag_pixels',
'pixels': 'mag_pixels',
'mag_pixel': 'mag_pixels',
'mag_pixels': 'mag_pixels',
'magnification_pixel': 'mag_pixels',
'magnification_pixels': 'mag_pixels',
'mm': 'mm',
'millimeter': 'mm',
'millimeters': 'mm',
'fraction': 'fraction',
}
| apache-2.0 | -6,192,587,494,464,616,000 | 28.6 | 77 | 0.591476 | false |
pelotoncycle/shared_memory_bloomfilter | tests/test_bloomfilter_error_rate.py | 1 | 1471 | from tempfile import NamedTemporaryFile
from unittest import TestCase
from peloton_bloomfilters import BloomFilter, ThreadSafeBloomFilter, SharedMemoryBloomFilter
class Case(object):
def test(self):
self.assert_p_error(0.2, 1340)
self.assert_p_error(0.15, 870)
self.assert_p_error(0.1, 653)
self.assert_p_error(0.05, 312)
self.assert_p_error(0.01, 75)
self.assert_p_error(0.001, 8)
self.assert_p_error(0.0000001,0)
class TestSharedMemoryErrorRate(TestCase, Case):
def assert_p_error(self, p, errors, count=10000):
with NamedTemporaryFile() as f:
bf = SharedMemoryBloomFilter(f.name, count + 1, p)
for v in xrange(count):
bf.add(v)
self.assertEquals(
sum(v in bf for v in xrange(count, count*2)),
errors)
class TestThreadSafeErrorRate(TestCase, Case):
def assert_p_error(self, p, errors, count=10000):
bf = ThreadSafeBloomFilter(count + 1, p)
for v in xrange(count):
bf.add(v)
self.assertEquals(
sum(v in bf for v in xrange(count, count*2)),
errors)
class TestErrorRate(TestCase, Case):
def assert_p_error(self, p, errors, count=10000):
bf = BloomFilter(count + 1, p)
for v in xrange(count):
bf.add(v)
self.assertEquals(
sum(v in bf for v in xrange(count, count*2)),
errors)
| gpl-3.0 | -8,275,922,267,959,018,000 | 30.978261 | 92 | 0.602311 | false |
facebookresearch/ParlAI | parlai/tasks/taskntalk/agents.py | 1 | 3737 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.teachers import Teacher
from parlai.utils.io import PathManager
from .build import build
import json
import os
import random
def _path(opt, task_size='small'):
"""Return path to json file of dataset - it can be train/valid file
of small/large dataset. Validation data is used for test as well,
because labels are inferred from the image and task itself.
"""
dt = opt['datatype'].split(':')[0]
# ensure data is built
build(opt)
if dt == 'train':
file_name = 'train.json'
elif dt == 'valid' or dt == 'test':
file_name = 'valid.json'
else:
raise RuntimeError('Not valid datatype.')
data_path = os.path.join(opt['datapath'], 'taskntalk', task_size, file_name)
return data_path
class AbstractTaskNTalk(Teacher):
"""
TaskNTalk basic teacher, it picks a random image and associates a random task with
it.
Metric updates and observation are to be implemented.
"""
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self.id = 'taskntalk'
if not shared:
self._setup_data(self.opt['datafile'])
else:
self.data = shared['data']
self.task_defn = shared['task_defn']
self.task_index = shared['task_index']
def _setup_data(self, data_path):
"""
Read the json file and store images and task definitions.
"""
print('loading: ' + data_path)
with PathManager.open(data_path) as data_file:
json_data = json.load(data_file)
self.data = json_data['data']
self.task_defn = json_data['task_defn']
# images are [color, shape, style] lists (example: ['red', 'square', 'dotted'])
self.task_index = {'color': 0, 'shape': 1, 'style': 2}
random.shuffle(self.data)
def share(self):
"""
Share images and task definitions with other teachers.
"""
shared = super().share()
shared['data'] = self.data
shared['task_defn'] = self.task_defn
shared['task_index'] = self.task_index
return shared
def __len__(self):
return len(self.data)
def observe(self, observation):
"""
Process observation for metrics.
"""
self.observation = observation
# TODO(kd): update metrics
return observation
def act(self):
"""
Select random image and associate random task with it.
"""
image = random.choice(self.data)
task = random.choice(self.task_defn)
labels = [image[self.task_index[attr]] for attr in task]
action = {
'image': ' '.join(image),
'text': ' '.join(task),
'labels': [' '.join(labels)],
'episode_done': True,
}
# TODO(kd): fetch all data for valid/test
return action
class SmallTeacher(AbstractTaskNTalk):
"""
Teacher for small dataset, invoked by ``taskntalk:small``.
"""
def __init__(self, opt, shared=None):
opt['datafile'] = _path(opt, 'small')
super().__init__(opt, shared)
class LargeTeacher(AbstractTaskNTalk):
"""
Teacher for large dataset, invoked by ``taskntalk:large``.
"""
def __init__(self, opt, shared=None):
opt['datafile'] = _path(opt, 'large')
super().__init__(opt, shared)
class DefaultTeacher(SmallTeacher):
"""
Default teacher for small dataset, invoked by ``taskntalk``.
"""
pass
| mit | 5,102,321,009,451,953,000 | 27.968992 | 87 | 0.587905 | false |
Brazelton-Lab/bio_utils | bio_utils/iterators/fasta.py | 1 | 5590 | #! /usr/bin/env python3
"""Faster, simpler, Screed-esque iterator for FASTA files
Copyright:
fasta.py iterate over and return entries of a FASTA file
Copyright (C) 2015 William Brazelton, Alex Hyer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
__author__ = 'Alex Hyer'
__email__ = '[email protected]'
__license__ = 'GPLv3'
__maintainer__ = 'Alex Hyer'
__status__ = 'Production'
__version__ = '3.0.1'
class FastaEntry:
"""A simple class to store data from FASTA entries and write them
Attributes:
id (str): FASTA ID (everything between the '>' and the first space
of header line)
description (str): FASTA description (everything after the first
space of the header line)
sequence (str): FASTA sequence
"""
def __init__(self):
"""Initialize attributes to store FASTA entry data"""
self.id = None
self.description = None
self.sequence = None
def write(self):
"""Return FASTA formatted string
Returns:
str: FASTA formatted string containing entire FASTA entry
"""
if self.description:
return '>{0} {1}{3}{2}{3}'.format(self.id,
self.description,
self.sequence,
os.linesep)
else:
return '>{0}{2}{1}{2}'.format(self.id,
self.sequence,
os.linesep)
def fasta_iter(handle, header=None):
"""Iterate over FASTA file and return FASTA entries
Args:
handle (file): FASTA file handle, can be any iterator so long as it
it returns subsequent "lines" of a FASTA entry
header (str): Header line of next FASTA entry, if 'handle' has been
partially read and you want to start iterating at the next entry,
read the next FASTA header and pass it to this variable when
calling fasta_iter. See 'Examples.'
Yields:
FastaEntry: class containing all FASTA data
Raises:
IOError: If FASTA entry doesn't start with '>'
Examples:
The following two examples demonstrate how to use fasta_iter.
Note: These doctests will not pass, examples are only in doctest
format as per convention. bio_utils uses pytests for testing.
>>> for entry in fasta_iter(open('test.fasta')):
... print(entry.id) # Print FASTA id
... print(entry.description) # Print FASTA description
... print(entry.sequence) # Print FASTA sequence
... print(entry.write()) # Print full FASTA entry
>>> fasta_handle = open('test.fasta')
>>> next(fasta_handle) # Skip first entry header
>>> next(fasta_handle) # Skip first entry sequence
>>> first_line = next(fasta_handle) # Read second entry header
>>> for entry in fasta_iter(fasta_handle, header=first_line):
... print(entry.id) # Print FASTA id
... print(entry.description) # Print FASTA description
... print(entry.sequence) # Print FASTA sequence
... print(entry.write()) # Print full FASTA entry
"""
# Speed tricks: reduces function calls
append = list.append
join = str.join
strip = str.strip
next_line = next
if header is None:
header = next(handle) # Read first FASTQ entry header
# Check if input is text or bytestream
if (isinstance(header, bytes)):
def next_line(i):
return next(i).decode('utf-8')
header = strip(header.decode('utf-8'))
else:
header = strip(header)
try: # Manually construct a for loop to improve speed by using 'next'
while True: # Loop until StopIteration Exception raised
line = strip(next_line(handle))
data = FastaEntry()
try:
if not header[0] == '>':
raise IOError('Bad FASTA format: no ">" at beginning of line')
except IndexError:
raise IOError('Bad FASTA format: file contains blank lines')
try:
data.id, data.description = header[1:].split(' ', 1)
except ValueError: # No description
data.id = header[1:]
data.description = ''
# Obtain sequence
sequence_list = []
while line and not line[0] == '>':
append(sequence_list, line)
line = strip(next_line(handle)) # Raises StopIteration at EOF
header = line # Store current line so it's not lost next iteration
data.sequence = join('', sequence_list)
yield data
except StopIteration: # Yield last FASTA entry
data.sequence = ''.join(sequence_list)
yield data
| gpl-3.0 | -4,799,499,336,410,846,000 | 33.294479 | 82 | 0.584258 | false |
berrak/cookiecutter-py3starter | {{cookiecutter.github_repo_name}}/{{cookiecutter.package_name}}/cli.py | 1 | 1728 | #!/usr/bin/env python3
#
# Copyright {{ cookiecutter.author_name }}, {{ cookiecutter.initial_year_to_release }}
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
{{ cookiecutter.package_name }}.cli
-----------------------------------
Entry for the `{{ cookiecutter.package_name }}` CLI.
"""
import sys
import argparse
from {{ cookiecutter.package_name }} import __version__
from {{ cookiecutter.package_name }}.utils.environment import python_version
from {{ cookiecutter.package_name }}.api.greetings import Greetings
from {{ cookiecutter.package_name }}.api.greetings import howdy_greeting
def main(argv=sys.argv):
parser = argparse.ArgumentParser()
parser.add_argument(
"-V, --version", help="show the version and exit", action="version",
version="%(prog)s: version {version} (Python {pyversion})".format(version=__version__, pyversion=python_version()))
parser.add_argument(
"-c, --cowboy", help="cowboy greeting",
action="store_true", dest="iscowboy",
default=False)
args = parser.parse_args()
# Do some meaningful ...
if args.iscowboy:
print(howdy_greeting())
else:
greetings = Greetings()
print(greetings)
return 0
| apache-2.0 | -237,356,212,394,522,000 | 29.315789 | 123 | 0.681713 | false |
roiser/WLCG | ssbCvmfsStatus/wlcg-cvmfs4ssb.py | 1 | 10226 | #/bin/env python
import urllib, json, datetime
from xml.parsers import expat
class c4s :
def __init__(self):
self.cvmfsBaseVersionFile = 'cvmfsVersion.txt'
self.requestedVersion = ''
self.myVO = 'LHCb'
self.cvmfsColumnNo = 202
self.wlcgTopoColumnNo = 144
self.topoDict = {'WLCG':{}, self.myVO:{}}
self.ssbTimePat = '%Y-%m-%dT%H:%M:%S'
self.dontpanic = 'http://www.adluge.com/wp-content/uploads/2013/09/homer-simpson-doh.gif'
self.topologyURL = 'http://lhcb-web-dirac.cern.ch/topology/lhcb_topology.xml'
self.wlcgBaseUrl = 'http://wlcg-mon.cern.ch/dashboard/request.py/'
self.wlcgGetUrl = self.wlcgBaseUrl+'getplotdata?columnid=%d&time=24&sites=all&batch=1'
self.wlcgSiteBaseLink = 'http://lhcb-web-dirac.cern.ch/DIRAC/LHCb-Production/undefined/grid/SiteStatus/display?name='
self.ssbMetrics = ['CvmfsVersion','CvmfsRepoRevision','CvmfsMountPoint','CvmfsCondDBMountPoint', 'CvmfsProbeTime', 'CvmfsStratumOnes', 'CvmfsNumSquids', 'CvmfsProbeNoInfo', 'CvmfsProbeLink']
self.ssbData = {}
for k in self.ssbMetrics : self.ssbData[k] = {}
### start probe functions ###
### eval functions ###
def evalCvmfsProbeLink(self, val, site):
return (val, 'green')
def evalCvmfsProbeNoInfo(self, val, site) :
if self.ssbData['CvmfsProbeTime'][site] == 'no probe' : return ('n/a (no probe)', 'grey')
if self.ssbData['CvmfsVersion'][site] == 'not installed' : return ('n/a (not installed)', 'grey')
we = val.split(':')[0]
if we == 'WARNING' : return (val, 'orange')
if we == 'ERROR' : return (val, 'red')
return (val, 'green')
def evalCvmfsVersion(self, val, site):
if self.ssbData['CvmfsProbeTime'][site] == 'no probe' : return ('n/a (no probe)', 'grey')
if val == 'nfs' : return (val, 'green')
if val in ('n/a', 'not installed') : return (val, 'red')
x = 2
maxDiff = range(x+1)
deplV = map(lambda x: int(x), val.split('.'))
reqV = map(lambda x: int(x), self.requestedVersion.split('.'))
if deplV[1] == reqV[1] and deplV[0] == reqV[0] :
if (reqV[2] - deplV[2]) in maxDiff : return (val, 'green')
else : return (val, 'orange')
else : return (val, 'red')
def evalCvmfsRepoRevision(self, val, site):
if self.ssbData['CvmfsProbeTime'][site] == 'no probe' : return ('n/a (no probe)', 'grey')
vers = self.ssbData['CvmfsVersion'][site]
if vers in ('nfs', 'not installed') : return ('n/a (%s)'%vers, 'grey')
return (val, 'green')
def evalCvmfsMountPoint(self, val, site):
if self.ssbData['CvmfsProbeTime'][site] == 'no probe' : return ('n/a (no probe)', 'grey')
vers = self.ssbData['CvmfsVersion'][site]
if vers in ('not installed') : return ('n/a (%s)'%vers, 'grey')
if val and val == '/cvmfs/lhcb.cern.ch' : return (val, 'green')
else : return (val, 'orange')
def evalCvmfsCondDBMountPoint(self, val, site):
if self.ssbData['CvmfsProbeTime'][site] == 'no probe' : return ('n/a (no probe)', 'grey')
if self.ssbData['CvmfsVersion'][site] == 'not installed' : return ('n/a (not installed)', 'grey')
if val == 'yes' : return (val, 'orange')
else : return (val, 'green')
def evalCvmfsProbeTime(self, val, site):
if val == 'no probe' : return (val, 'red')
pTime = datetime.datetime.strptime(val,self.ssbTimePat)
curTime = datetime.datetime.now()
delta = (curTime - pTime).seconds
if delta < 21600 : return (val, 'green')
elif delta < 43200 : return (val, 'orange')
else : return (val, 'red')
def evalCvmfsStratumOnes(self, val, site) :
if self.ssbData['CvmfsProbeTime'][site] == 'no probe' : return ('n/a (no probe)', 'grey')
vers = self.ssbData['CvmfsVersion'][site]
if vers in ('nfs', 'not installed') : return ('n/a (%s)'%vers, 'grey')
if val : return (val, 'green')
else: return ('none', 'red')
def evalCvmfsNumSquids(self, val, site):
if self.ssbData['CvmfsProbeTime'][site] == 'no probe' : return ('n/a (no probe)', 'grey')
vers = self.ssbData['CvmfsVersion'][site]
if vers in ('nfs', 'not installed') : return ('n/a (%s)'%vers, 'grey')
if val :
if int(val) > 1 : return (val, 'green')
else : return (val, 'orange')
else: return (val , 'red')
### retrieval functions ###
def getValCvmfsProbeLink(self, site, probe, metric):
self.ssbData['CvmfsProbeLink'][site]=metric['URL']
def getValCvmfsProbeNoInfo(self, site, probe, metric):
val = 'none'
pat = 'INFO: Mandatory tests exectuted successfully, now continuing with testing optional repositories'
for line in probe :
we = line.split(':')[0]
if line[:len(pat)] == pat : break
elif we == 'WARNING' and val.split(':')[0] != 'ERROR' : val = line
elif we == 'ERROR' : val = line
self.ssbData['CvmfsProbeNoInfo'][site] = val
def getValCvmfsVersion(self, site, probe, metric):
pat1 = 'INFO: CVMFS version installed '
pat2 = 'INFO: Mandatory mount point /cvmfs/lhcb.cern.ch is nfs mount point'
pat3 = 'INFO: No cvmfs rpms found on WN, checking if this WN uses nfs mounting of CVMFS repositories'
ver = 'n/a'
noCVMFS = False
cvmfsViaNFS = False
for line in probe :
if line[:len(pat1)] == pat1 :
ver = line[len(pat1):]
elif line[:len(pat2)] == pat2 :
ver = 'nfs'
cvmfsViaNFS = True
elif line[:len(pat3)] == pat3 :
noCVMFS = True
if noCVMFS and not cvmfsViaNFS : ver = 'not installed'
self.ssbData['CvmfsVersion'][site] = ver
def getValCvmfsRepoRevision(self, site, probe, metric):
pat = 'INFO: repository revision '
rev = 'n/a'
for line in probe :
if line[:len(pat)] == pat :
rev = line[len(pat):]
break
self.ssbData['CvmfsRepoRevision'][site] = rev
def getValCvmfsMountPoint(self, site, probe, metric):
pat1 = 'INFO: Variable VO_LHCB_SW_DIR points to CVMFS mount point '
pat2 = 'INFO: Mandatory mount point /cvmfs/lhcb.cern.ch is nfs mount point'
mp = 'n/a'
for line in probe :
if line[:len(pat1)] == pat1 :
mp = line[len(pat1):]
elif line[:len(pat2)] == pat2 :
mp = '/cvmfs/lhcb.cern.ch'
self.ssbData['CvmfsMountPoint'][site] = mp
def getValCvmfsCondDBMountPoint(self, site, probe, metric):
pat = 'INFO: repository /cvmfs/lhcb-conddb.cern.ch available'
cm = 'no'
for line in probe :
if line[:len(pat)] == pat :
cm = 'yes'
self.ssbData['CvmfsCondDBMountPoint'][site] = cm
def getValCvmfsProbeTime(self, site, probe, metric):
self.ssbData['CvmfsProbeTime'][site] = metric['URL'].split('&')[1].split('=')[1][:-1]
# self.ssbData['CvmfsProbeTime'][site] = metric['EndTime']
def getValCvmfsStratumOnes(self, site, probe, metric) :
strats = []
pat = 'INFO: Servers: '
for line in probe :
if line[:len(pat)] == pat :
stratumL = line[len(pat):]
for serv in stratumL.split() :
strats.append('.'.join(serv.split('/')[2].split(':')[0].split('.')[-2:]))
break
self.ssbData['CvmfsStratumOnes'][site] = ' '.join(strats)
def getValCvmfsNumSquids(self, site, probe, metric) :
numSq = 0
pat = 'INFO: Proxies: '
for line in probe :
if line[:len(pat)] == pat :
numSq = len(line[len(pat):].split())
break
self.ssbData['CvmfsNumSquids'][site] = numSq
### end probe functions ####
def xmlStartElement(self, name, attrs):
if name == 'atp_site' : self.currWLCGSite = attrs['name']
if name == 'group' and attrs['type'] == 'LHCb_Site' :
self.topoDict['WLCG'][attrs['name']] = self.currWLCGSite
def bootstrap(self):
# get WLCG Mon mapping VO site name <-> site ID
topo = json.loads(urllib.urlopen(self.wlcgGetUrl%self.wlcgTopoColumnNo).read())
for ent in topo['csvdata'] : self.topoDict[self.myVO][ent['SiteId']] = ent['Status']
# read CVMFS base line version number
f = open(self.cvmfsBaseVersionFile, 'r')
self.requestedVersion = f.read()
f.close()
# read topology file and create mapping VO site name <-> WLCG site name
topo = urllib.urlopen(self.topologyURL).read()
p = expat.ParserCreate()
p.StartElementHandler = self.xmlStartElement
p.Parse(topo)
def clearSsbData(self, site):
for metric in self.ssbMetrics :
self.ssbData[metric][site] = ''
def collectInfo(self):
info = json.loads(urllib.urlopen(self.wlcgGetUrl%self.cvmfsColumnNo).read())
for metricInf in info['csvdata'] :
site = self.topoDict[self.myVO][metricInf['SiteId']]
tTime = datetime.datetime.strptime(metricInf['Time'], self.ssbTimePat)
dTime = self.ssbData['CvmfsProbeTime'].get(site)
if ( not dTime ) or ( datetime.datetime.strptime(dTime, self.ssbTimePat) < tTime ) :
if dTime : self.clearSsbData(site)
tl = urllib.urlopen(self.wlcgBaseUrl+metricInf['URL']).read().split('\n')
for metr in self.ssbMetrics : eval('self.getVal'+metr)(site, tl, metricInf)
for site in self.topoDict['WLCG'].keys() :
if not self.ssbData['CvmfsProbeTime'].get(site) :
for metric in self.ssbMetrics : self.ssbData[metric][site] = ''
self.ssbData['CvmfsProbeTime'][site] = 'no probe'
def writeSSBColumns(self):
for k in self.ssbMetrics :
fun = 'self.eval'+k
colData = self.ssbData[k]
f = open(k+'.ssb.txt', 'w')
for site in colData.keys() :
now = str(datetime.datetime.now())
(val, color) = eval(fun)(colData[site], site)
url = self.dontpanic
if self.ssbData['CvmfsProbeLink'].get(site): url = self.wlcgBaseUrl+self.ssbData['CvmfsProbeLink'][site]
f.write('%s\t%s\t%s\t%s\t%s\n' % (now, site, val, color, url))
f.close()
def createWLCGLHCbMapping(self):
f = open('WLCGSiteMapping.ssb.txt','w')
for site in self.topoDict['WLCG'].keys() :
now = str(datetime.datetime.now())
val = self.topoDict['WLCG'][site]
color = 'white'
url = self.wlcgSiteBaseLink+site
f.write('%s\t%s\t%s\t%s\t%s\n' % (now, site, val, color, url))
def run(self):
self.bootstrap()
self.collectInfo()
self.writeSSBColumns()
self.createWLCGLHCbMapping()
if __name__ == '__main__' :
c4s().run()
| mit | 7,568,485,905,248,002,000 | 39.741036 | 194 | 0.622531 | false |
arkabytes/abc | ABC/migrations/0005_auto_20171023_0929.py | 1 | 7572 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-10-23 09:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ABC', '0004_auto_20171023_0924'),
]
operations = [
migrations.AddField(
model_name='company',
name='email',
field=models.EmailField(default=None, max_length=254),
),
migrations.AddField(
model_name='company',
name='postal_code',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='company',
name='web',
field=models.URLField(default='http://'),
),
migrations.AddField(
model_name='customer',
name='email',
field=models.EmailField(default=None, max_length=254),
),
migrations.AddField(
model_name='customer',
name='notes',
field=models.TextField(default=None),
),
migrations.AddField(
model_name='customer',
name='postal_code',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='customer',
name='web',
field=models.URLField(default='http://'),
),
migrations.AddField(
model_name='deliverytype',
name='cost',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='deliverytype',
name='days',
field=models.PositiveSmallIntegerField(default=1),
),
migrations.AddField(
model_name='event',
name='description',
field=models.TextField(default=None),
),
migrations.AddField(
model_name='invoice',
name='amount',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='invoice',
name='notes',
field=models.TextField(default=None),
),
migrations.AddField(
model_name='invoice',
name='postal_code',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='invoice',
name='tax_base',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='invoice',
name='vat',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='invoicedetails',
name='description',
field=models.TextField(default=None),
),
migrations.AddField(
model_name='invoicedetails',
name='discount',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='invoicedetails',
name='notes',
field=models.TextField(default=None),
),
migrations.AddField(
model_name='invoicedetails',
name='price',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='invoicedetails',
name='quantity',
field=models.PositiveSmallIntegerField(default=1),
),
migrations.AddField(
model_name='invoicedetails',
name='subtotal',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='invoicedetails',
name='vat',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='item',
name='cost_price',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='item',
name='description',
field=models.TextField(default=None),
),
migrations.AddField(
model_name='item',
name='notes',
field=models.TextField(default=None),
),
migrations.AddField(
model_name='item',
name='retail_price',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='item',
name='stock',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='order',
name='amount',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='order',
name='delivery_cost',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='order',
name='finished',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='order',
name='notes',
field=models.TextField(default=None),
),
migrations.AddField(
model_name='order',
name='payment_cost',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='order',
name='tax_base',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='order',
name='vat',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='orderdetails',
name='description',
field=models.TextField(default=None),
),
migrations.AddField(
model_name='orderdetails',
name='discount',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='orderdetails',
name='notes',
field=models.TextField(default=None),
),
migrations.AddField(
model_name='orderdetails',
name='price',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='orderdetails',
name='quantity',
field=models.PositiveSmallIntegerField(default=1),
),
migrations.AddField(
model_name='orderdetails',
name='subtotal',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='orderdetails',
name='vat',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='provider',
name='email',
field=models.EmailField(default=None, max_length=254),
),
migrations.AddField(
model_name='provider',
name='notes',
field=models.TextField(default=None),
),
migrations.AddField(
model_name='provider',
name='postal_code',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='provider',
name='web',
field=models.URLField(default='http://'),
),
migrations.AddField(
model_name='task',
name='description',
field=models.TextField(default=None),
),
migrations.AddField(
model_name='task',
name='notice',
field=models.TextField(default=None),
),
]
| gpl-3.0 | 5,220,348,303,067,717,000 | 29.288 | 66 | 0.509377 | false |
Brickstertwo/git-commands | tests/unit/test_abandon.py | 1 | 7149 | import mock
import unittest
import testutils
from bin.commands import abandon
class TestAbandon(unittest.TestCase):
@mock.patch('bin.commands.utils.execute.check_output')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.messages.info')
def test_abandon(self, mock_info, mock_call, mock_checkoutput):
# setup
existing_stashes = '1\n2\n3\n4'
stash1 = 'stash1\n'
stash2 = 'stash2\n'
mock_checkoutput.side_effect = [existing_stashes, stash1, stash2]
# when
start = 1
end = 3
abandon.abandon(start, end)
# then
mock_checkoutput.assert_has_calls([
mock.call(['git', 'stash', 'list']),
mock.call(['git', 'rev-parse', 'stash@{1}']),
mock.call(['git', 'rev-parse', 'stash@{1}'])
])
mock_call.assert_called_with('git stash drop --quiet stash@{{{}}}'.format(start).split())
self.assertEqual(mock_call.call_count, 2)
mock_info.assert_has_calls([
mock.call('Dropped refs/stash@{{{}}} ({})'.format(1, stash1.strip()), False),
mock.call('Dropped refs/stash@{{{}}} ({})'.format(2, stash2.strip()), False)
])
@mock.patch('bin.commands.utils.execute.check_output')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.messages.info')
def test_abandon_quiet(self, mock_info, mock_call, mock_checkoutput):
# setup
existing_stashes = '1\n2\n3\n4'
stash1 = 'stash1\n'
stash2 = 'stash2\n'
mock_checkoutput.side_effect = [existing_stashes, stash1, stash2]
# when
start = 1
end = 3
quiet = True
abandon.abandon(start, end, quiet=quiet)
# then
mock_checkoutput.assert_has_calls([
mock.call(['git', 'stash', 'list']),
mock.call(['git', 'rev-parse', 'stash@{1}']),
mock.call(['git', 'rev-parse', 'stash@{1}'])
])
mock_call.assert_called_with('git stash drop --quiet stash@{{{}}}'.format(start).split())
self.assertEqual(mock_call.call_count, 2)
mock_info.assert_has_calls([
mock.call('Dropped refs/stash@{{{}}} ({})'.format(1, stash1.strip()), quiet),
mock.call('Dropped refs/stash@{{{}}} ({})'.format(2, stash2.strip()), quiet)
])
@mock.patch('bin.commands.utils.execute.check_output', return_value='1\n2\n3\n')
@mock.patch('bin.commands.utils.messages.error', side_effect=testutils.and_exit)
def test_abandon_endLessThanZero(self, mock_error, mock_checkoutput):
# when
try:
abandon.abandon(0, -1)
self.fail('expected to exit but did not') # pragma: no cover
except SystemExit:
pass
mock_error.assert_called_once_with('end cannot be negative')
@mock.patch('bin.commands.utils.execute.check_output', return_value='1\n2\n3\n')
@mock.patch('bin.commands.utils.messages.error', side_effect=testutils.and_exit)
def test_abandon_endBeforeStart(self, mock_error, mock_checkoutput):
# when
try:
abandon.abandon(10, 2)
self.fail('expected to exit but did not') # pragma: no cover
except SystemExit:
pass
mock_error.assert_called_once_with('end of range cannot come before the start')
@mock.patch('bin.commands.utils.execute.check_output', return_value='one\ntwo')
@mock.patch('bin.commands.utils.messages.error', side_effect=testutils.and_exit)
def test_abandon_startGreaterThanStashCount(self, mock_error, mock_checkoutput):
# when
try:
abandon.abandon(10, 11)
self.fail('expected to exit but did not') # pragma: no cover
except SystemExit:
pass
# then
mock_checkoutput.assert_called_once_with('git stash list'.split())
mock_error.assert_has_calls([
mock.call('start too high', exit_=False),
mock.call('only 2 stashes exist')
])
@mock.patch('bin.commands.utils.execute.check_output')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.messages.info')
def test_abandon_endGreaterThanStashCount(self, mock_info, mock_call, mock_checkoutput):
# setup
existing_stashes = '1\n2\n'
stash1 = 'stash1\n'
stash2 = 'stash2\n'
mock_checkoutput.side_effect = [existing_stashes, stash1, stash2]
# when
start = 0
end = 200
abandon.abandon(start, end)
# then
mock_checkoutput.assert_has_calls([
mock.call(['git', 'stash', 'list']),
mock.call(['git', 'rev-parse', 'stash@{0}']),
mock.call(['git', 'rev-parse', 'stash@{0}'])
])
mock_call.assert_called_with('git stash drop --quiet stash@{{{}}}'.format(start).split())
self.assertEqual(mock_call.call_count, 2)
mock_info.assert_has_calls([
mock.call('Dropped refs/stash@{{{}}} ({})'.format(0, stash1.strip()), False),
mock.call('Dropped refs/stash@{{{}}} ({})'.format(1, stash2.strip()), False)
])
@mock.patch('bin.commands.utils.execute.check_output')
@mock.patch('bin.commands.utils.messages.info')
def test_abandon_dryRun(self, mock_info, mock_checkoutput):
# setup
existing_stashes = '1\n2\n3\n4'
stash1 = 'stash1\n'
stash2 = 'stash2\n'
mock_checkoutput.side_effect = [existing_stashes, stash1, stash2]
# when
start = 1
end = 3
abandon.abandon(start, end, dry_run=True)
# then
mock_checkoutput.assert_has_calls([
mock.call(['git', 'stash', 'list']),
mock.call(['git', 'rev-parse', 'stash@{1}']),
mock.call(['git', 'rev-parse', 'stash@{2}'])
])
mock_info.assert_has_calls([
mock.call('Would drop refs/stash@{{{}}} ({})'.format(1, stash1.strip())),
mock.call('Would drop refs/stash@{{{}}} ({})'.format(2, stash2.strip()))
])
@mock.patch('bin.commands.utils.execute.check_output')
@mock.patch('bin.commands.utils.messages.info')
def test_abandon_dryRun_quiet(self, mock_info, mock_checkoutput):
"""Same as test_abandon_dryRun since a quiet dry run isn't useful."""
# setup
existing_stashes = '1\n2\n3\n4'
stash1 = 'stash1\n'
stash2 = 'stash2\n'
mock_checkoutput.side_effect = [existing_stashes, stash1, stash2]
# when
start = 1
end = 3
abandon.abandon(start, end, dry_run=True, quiet=True)
# then
mock_checkoutput.assert_has_calls([
mock.call(['git', 'stash', 'list']),
mock.call(['git', 'rev-parse', 'stash@{1}']),
mock.call(['git', 'rev-parse', 'stash@{2}'])
])
mock_info.assert_has_calls([
mock.call('Would drop refs/stash@{{{}}} ({})'.format(1, stash1.strip())),
mock.call('Would drop refs/stash@{{{}}} ({})'.format(2, stash2.strip()))
])
| mit | 7,174,117,415,107,765,000 | 36.041451 | 97 | 0.575465 | false |
tomhur/domoticz-scripts | python/script_time_verisure.py | 1 | 3621 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import domoticz as d
import sys
import time
sys.path.insert(0, '/opt/python-verisure/')
import verisure
import pickle
import pytz
import urllib3
import certifi
from datetime import datetime
from tzlocal import get_localzone
debug = False
try:
execfile("/etc/domoticz/scripts.conf")
except:
exec(open("/etc/domoticz/scripts.conf").read())
d.log("Getting status from Verisure...")
if int(time.time()) % frequency < 60 :
#Login
try:
f = open(mypagesSession, 'rb')
myPages = pickle.load(f)
f.close()
except:
myPages = verisure.Session(email, verisurepass)
myPages.login()
f = open(mypagesSession, 'wb')
pickle.dump(myPages, f)
f.close()
if debug:
d.log("Loading file failed.")
#Get overview
try:
overview = myPages.get_overview()
except:
myPages = verisure.Session(email, verisurepass)
myPages.login()
f = open(mypagesSession, 'wb')
pickle.dump(myPages, f)
f.close()
overview = myPages.get_overview()
if debug:
d.log("Session was timed out")
#Alarm
status = overview['armState']['statusType']
if debug:
d.log("Verisure Alarm status: ", status )
device = d.devices[atHome]
if status == "DISARMED" or status == "ARMED_HOME":
device.on()
else:
device.off()
#Smartplugs
for i in overview['controlPlugs']:
if debug:
d.log("Verisure Smartplug status for " + i['area'].encode("utf-8","ignore") + ": ", i['currentState'] )
device = d.devices[i['area'].encode("utf-8","ignore")]
if i['currentState'] == "ON":
device.on()
else:
device.off()
#Climate
for i in overview['climateValues']:
device = d.devices[i['deviceArea'].encode("utf-8","ignore")]
domlastupdate = datetime.strptime(device.last_update_string, '%Y-%m-%d %H:%M:%S')
verilastupdate = datetime.strptime(i['time'][:-5], '%Y-%m-%dT%H:%M:%S')
verilastupdate = verilastupdate.replace(tzinfo=pytz.UTC)
verilastupdate = verilastupdate.astimezone(get_localzone())
verilastupdate = verilastupdate.replace(tzinfo=None)
if debug:
d.log("Domoticz last update of " + device.name + ": " + str(domlastupdate))
d.log("Verisure last update of " + device.name + ": " + str(verilastupdate))
if verilastupdate > domlastupdate:
if debug:
d.log("update domoticz climate device " + device.name)
if debug:
d.log("time: " + i['time'] )
d.log("location: " + i['deviceArea'].encode("utf-8","ignore") )
d.log("serial: " + i['deviceLabel'] )
d.log("temperature: " + str(i['temperature']))
if 'humidity' in i:
if debug:
d.log("humidity: " + str(i['humidity']))
if i['humidity'] < 20:
comf = 2
if i['humidity'] >= 20 and i['humidity'] <= 35:
comf = 0
if i['humidity'] > 35 and i['humidity'] <= 75:
comf = 1
if i['humidity'] > 75:
comf = 3
url = baseurl + "type=command¶m=udevice&idx=" + climate[i['deviceArea'].encode("utf-8","ignore")] + "&nvalue=0&svalue=" + str(i['temperature']) + ";" + str(i['humidity']) + ";" + str(comf)
else:
url = baseurl + "type=command¶m=udevice&idx=" + climate[i['deviceArea'].encode("utf-8","ignore")] + "&nvalue=0&svalue=" + str(i['temperature'])
if debug:
d.log('URL: ' + url)
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
r = http.request('GET', url, timeout=2.5)
if debug:
d.log("Status code: " + str(r.status) + "\n" + r.data)
if r.status != 200:
d.log("Error updating temp in Domoticz. HTTP code: " + str(r.status) + " " + r.data)
else:
if debug:
d.log("Only runs every " + str(frequency/60) + " min.")
d.log("done getting status from Verisure")
| mit | 8,858,491,424,379,706,000 | 28.680328 | 196 | 0.638498 | false |
AFMD/smallProjects | nanowire-network-simulations/manningp3plotedit6.py | 1 | 20377 | """
Created on Mon Jun 15 15:42:23 2020
@author: sturdzal
"""
#@title Imports
from shapely.geometry import LineString, MultiLineString, MultiPoint, Point
from shapely.ops import cascaded_union
from scipy.special import comb
from itertools import product
import scipy.stats as stats
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import math
import numpy as np
from itertools import islice
from cvxopt import matrix, solvers
from cvxopt.base import sparse
from cvxopt.base import matrix as m
from cvxopt.lapack import *
from cvxopt.blas import *
import cvxopt.misc as misc
#from pykrylov.symmlq import symmlq
#from pykrylov.symmlq import *
#from symmlq import *
#import symmlq
import networkx as nx
from itertools import islice, combinations
from collections import Counter, defaultdict
#from pykrylov.linop import PysparseLinearOperator
#from pykrylov.symmlq import *
import scipy
from scipy.sparse.linalg import *
from scipy.sparse import csc_matrix
from scipy.sparse.linalg import minres
import os.path
import time
import os
import matplotlib.pyplot as plt
import random
from statistics import mean
#------------------Parameter--------------------
R_junc = 1.0 # 100100
#R_junc_list = [1000, 10000, 100000, 10000000, 10000000]
rho0 = 0.314 #0.0790 #0.8 #0.0790 #0.0226
#rho0_list = [0.000314, 0.00314, 0.0314, 0.314, 3.14, 31.4, 314]
wire_diameter = 2 #30.0
wire_length= 1.0 #6.0
extinction_coeff = 4 #0.2
box_length = 5 #15.0 5x wire length gives good results independent of tol for e-9 to e-15
samples = 1
elec_length = box_length
box_y = box_length
lead_sep = box_length
n_min = 0.16411
nstep = 10*n_min
n_initial = 40*n_min #1.90079+30*nstep #0.16411
n_final = 80*n_min #1.90079+31*nstep
percentage_chance = 0.0
distl = False
lower_l = 2.2
upper_l = np.inf
sigmal = 2.0
lmean = wire_length
A0 = math.pi*((wire_diameter*0.001)/2)**2
# End ---------- Parameters block -------------
# ---------- Parameters for symmlq routine -------------
tol=1e-10
show=False
maxit=None
#----------- Parameters for Calculation time display --------
start_time = time.process_time()
# ---------- Output file -------------
res_file = "output2.txt"
if os.path.exists(res_file)==False:
open(res_file, "w").write("Density AF Transmittance Average_resistance resStdev Junct_density R_junc rho0 wire_diameter wire_length box_length samples nstep n_initial n_final tolerance_minres distl lower_l upper_l sigmal junctions_removal calctime\n")
#res_dist = open(res_file,"a")
# ---------- Auxiliary lists for ensemble calculation -------------
res_list=[]
short_sep_list=[]
junc_dens=[]
dens_temp=[]
avg_res_temp=[]
st_dev_temp=[]
resistancelist=[]
transmittancelist=[]
diameterlist=[]
d_counter=0
for wire_diameter in np.arange(1, 4, 2):
transmittancelist.append([])
resistancelist.append([])
diameterlist.append(wire_diameter)
for density in np.arange(n_initial,n_final,nstep):
for sample in range(samples):
while True:
try:
area = box_length**2 # box area (in um^2)
box_x = box_length # box width (in um)
box_y = box_length # box length (in um)
num_junc = 0 # junction counter
nwires = area*density # total number of nanowires
# Start ---------- Creation of random stick coordinates and electrodes -------------
# a single wire is represented by a set of initial and final coordinates as [(x1,y1),(x2,y2)].
x1 = np.random.rand(int(nwires))*box_x
y1 = np.random.rand(int(nwires))*box_y
length_array = np.zeros(int(nwires))
if distl == True:
lengths = stats.truncnorm((lower_l - lmean) / sigmal, (upper_l - lmean) / sigmal, loc=lmean, scale=sigmal)
length_array = lengths.rvs(size=nwires)
else:
length_array.fill(wire_length)
# Sorting the angles that define the wire orientation (in radians from 0 to 2 *pi).
theta1 = np.random.rand(int(nwires))*2.0*math.pi
x2 = length_array * np.cos(theta1) + x1
y2 = length_array * np.sin(theta1) + y1
# Adding to the coordinate list (x1,y1) the points corresponding to the contact leads.
x1 = np.insert(x1, 0, 0.0)
x1 = np.insert(x1, 0,0)
# Adding to the coordinate list (x2,y2) the points corresponding to the contact leads.
x2 = np.insert(x2, 0, 0.0)
x2 = np.insert(x2, 0,0)
ypostop = box_y/2 + elec_length/2
yposbot = box_y/2 - elec_length/2
y1 = np.insert(y1, 0,ypostop)
y1 = np.insert(y1, 0,ypostop)
y2 = np.insert(y2, 0,yposbot)
y2 = np.insert(y2, 0, yposbot)
xposleft = box_x/2-lead_sep/2
xposright = box_x/2+lead_sep/2
x1[0]= xposleft
x2[0] = xposleft
x1[1] = xposright
x2[1] = xposright
# Merging [(x1,y1),(x2,y2)] in accordance to shapely format.
# coords1 = zip(x1,y1)
# coords2 = zip(x2,y2)
# coords = zip(coords1,coords2)
coords1 = list(zip(x1,y1))
coords2 = list(zip(x2,y2))
coords = list(zip(coords1,coords2))
mlines = MultiLineString(coords)
nwires_plus_leads = int(nwires+2)
# End ---------- Creation of random stick coordinates and electrodes -------------
# Start ---------- Identifying intersections between wires -------------
# all pair wire combination
lines_comb = combinations(mlines, 2)
# list storing True or False for pair intersection
intersection_check = [pair[0].intersects(pair[1]) for pair in lines_comb]
# list storing the indexes of intersection_check where the intersection between two wires is TRUE
intersections = [i for i, x in enumerate(intersection_check) if x and random.random() > percentage_chance]
# full list containing all non-repeated combinations of wires
combination_index = list((i,j) for ((i,_),(j,_)) in combinations(enumerate(mlines), 2))
# list storing the connection (wire_i, wire_j)
intersection_index = [combination_index[intersections[i]] for i in range(len(intersections))]
# checking the coordinates for interesection points
inter_point_coll = [pair[0].intersection(pair[1]) for pair in combinations(mlines, 2)]
# eliminating empty shapely points from the previous list
no_empty_inter_point_coll = [inter_point_coll[intersections[i]] for i in range(len(intersections))]
# total number of intersections
nintersections = len(intersection_index)
# End ---------- Identifying intersections between wires -------------
# Start ---------- MNR nodal mapping -------------
# dictionary containing wire index: [list of wires connected to a given wire]
wire_touch_list = defaultdict(list)
for k, v in intersection_index:
wire_touch_list[k].append(v)
wire_touch_list[v].append(k)
# dictionary containing wire index: [label nodes following MNR mapping]
wire_touch_label_list = defaultdict(list)
each_wire_inter_point_storage = defaultdict(list)
label = 2
# Assigning new node labelling according to MNR mapping
for i in iter(wire_touch_list.items()):
for j in range(len(i[1])):
cpoint = mlines[i[0]].intersection(mlines[i[1][j]])
npoint = (cpoint.x,cpoint.y)
each_wire_inter_point_storage[i[0]].append(npoint)
if i[0] > 1:
wire_touch_label_list[i[0]].append(label)
label += 1
else:
wire_touch_label_list[i[0]].append(i[0])
maxl = label # dimension of the resistance matrix
# flattening intersection_index for counting the amount of occurances of wire i
flat = list(sum(intersection_index, ()))
conn_per_wire = Counter(flat)
# checking for isolated wires
complete_list = range(nwires_plus_leads)
isolated_wires = [x for x in complete_list if not x in flat]
# list containing the length segments of each wire (if it has a junction)
each_wire_length_storage = [[] for _ in range(nwires_plus_leads)]
# Routine that obtains the segment lengths on each wire
for i in each_wire_inter_point_storage:
point_ini = Point(mlines[i].coords[0])
point_fin = Point(mlines[i].coords[1])
wlength = point_ini.distance(point_fin)
wire_points = each_wire_inter_point_storage[i]
dist = [0.0]*(len(wire_points)+1)
for j in range(len(wire_points)):
point = Point(wire_points[j])
dist[j] = point_ini.distance(point)
dist[-1] = wlength
dist.sort()
dist_sep = [0.0]*len(dist)
dist_sep[0] = dist[0]
dist_sep[1:len(dist)] = [dist[k]-dist[k-1] for k in range(1,len(dist))]
each_wire_length_storage[i].append(dist_sep)
# End ---------- MNR nodal mapping -------------
# The MNR mapping associated to the NWN is also converted into a mathematical graph given by G.
# G contains 2*nintersections nodes and we conventioned that left and right electrodes are labelled as node 0 and 1, respectively.
G = nx.Graph()
G.add_nodes_from(range(2*nintersections))
mr_matrix_plus = np.zeros((2*nintersections,2*nintersections))
inner_count = 0
inter_count = 0
#nx.draw(G)
#nx.draw_random(G)
#nx.draw_circular(G)
nx.draw_spectral(G, node_size= 10)
##nx.draw_networkx_nodes(G)
plt.show()
# Start ---------- Building resistance matrix -------------
for iwire in range(nwires_plus_leads):
if each_wire_inter_point_storage[iwire]:
for j, pointj in enumerate(each_wire_inter_point_storage[iwire]):
point = Point(pointj)
for i, pointw in enumerate(each_wire_inter_point_storage[iwire]):
comp_pointw = Point(pointw)
inter_dist = point.distance(comp_pointw)
round_inter_dist = round(inter_dist, 4)
for il in each_wire_length_storage[iwire][0]:
value = float(il)
value = round(value,4)
if value == round_inter_dist and value != 0:
inner_resis = (float(value) * rho0 / A0)
if iwire != 0 and iwire != 1 and mr_matrix_plus[wire_touch_label_list[iwire][i], wire_touch_label_list[iwire][j]] == 0.0:
mr_matrix_plus[wire_touch_label_list[iwire][i], wire_touch_label_list[iwire][j]] = -1.0/inner_resis
mr_matrix_plus[wire_touch_label_list[iwire][j], wire_touch_label_list[iwire][i]] = -1.0/inner_resis
G.add_edge(wire_touch_label_list[iwire][i],wire_touch_label_list[iwire][j])
inner_count += 1
for k, label in enumerate(wire_touch_list[iwire]):
for kk, pointk in enumerate(each_wire_inter_point_storage[label]):
pointk = Point(pointk)
inter_dist = point.distance(pointk)
round_inter_dist = round(inter_dist, 4)
if round_inter_dist == 0 and mr_matrix_plus[wire_touch_label_list[iwire][j], wire_touch_label_list[label][kk]] == 0:
G.add_edge(wire_touch_label_list[label][kk],wire_touch_label_list[iwire][j])
r0 = -1/R_junc
mr_matrix_plus[wire_touch_label_list[iwire][j], wire_touch_label_list[label][kk]] = r0
mr_matrix_plus[wire_touch_label_list[label][kk], wire_touch_label_list[iwire][j]] = r0
sum_rows_mr_plus = mr_matrix_plus.sum(1)
np.fill_diagonal(mr_matrix_plus, abs(sum_rows_mr_plus))
mr_nozero_rows_plus = mr_matrix_plus[~(mr_matrix_plus==0).all(1),:]
# nonconnected wires are eliminated from the resistance matrix
mr_nonconnected_plus = mr_nozero_rows_plus[:,~(mr_nozero_rows_plus==0).all(0)]
# End ---------- Building resistance matrix -------------
# input current vector
i0 = 1.0 # absolute value of the current (in Amp)
ic = np.zeros(mr_nonconnected_plus.shape[0])
ic[0] = +i0
ic[1] = -i0
Imatrix = m(ic)
# Solving Ohm's law in matrix form, R^(-1)V = I. Resulting voltages are in Volts.
#Amatrix = m(mr_nonconnected_plus)
#Amatrix = np.array(mr_nonconnected_plus)
#ks = Symmlq(Imatrix)
#elec_pot_mr = ks.solve(Gfun)
#print Gfun
#print Imatrix
#or
#ks = Symmlq(Gfun)
#print Amatrix
#elec_pot_mr = ks.solve(Imatrix)
Amatrix = csc_matrix(mr_nonconnected_plus)
elec_pot_mr = minres(Amatrix, Imatrix, tol=tol)
#elec_pot_mr = Symmlq(Imatrix, Gfun, show=show, rtol=tol, maxit=maxit)
#elec_pot_mr = minres(Imatrix, Amatrix)
# Sheet resistance
resistance = ((elec_pot_mr[0][0] - elec_pot_mr[0][1]))/i0
# Checking if there is a path connecting electrodes at nodes 0 and 1
if nx.has_path(G,0,1):
separation_short = nx.shortest_path_length(G,0,1)
res_list.append(resistance)
short_sep_list.append(separation_short)
junc_dens.append(float(nintersections)/area)
except IndexError:
continue
break
AF = density*wire_diameter*wire_length*0.001
transmittance = round(math.exp(-AF*extinction_coeff), 4)
junc_avg = np.mean(junc_dens)
resAvg = np.mean(res_list)
resStd = np.std(res_list)
short = np.mean(short_sep_list)
dens_temp.append(junc_avg)
avg_res_temp.append(resAvg)
st_dev_temp.append(resStd)
open(res_file,"a").write("%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s\n" %(density,AF,transmittance,resAvg,resStd,junc_avg,R_junc,rho0,wire_diameter,wire_length,box_length,samples,nstep,n_initial,n_final,tol,distl,lower_l,upper_l,sigmal,percentage_chance,round(time.process_time() - start_time, 5)))
print("Density: %s, Transmittance: %s, Average resistance: %s, Standard deviation: %s, Junction density: %s, Junctions removed: %s" %(density,transmittance,round(resAvg, 6),round(resStd, 4),round(junc_avg, 4), percentage_chance))
print("runtime was", round(time.process_time() - start_time, 5), "seconds")
#remove 'nan' data points from arrays to avoid curve fit errors
if np.isnan(transmittance) or np.isnan(resAvg) != True:
transmittancelist[d_counter].append(transmittance)
resistancelist[d_counter].append(resAvg)
res_list=[]
short_sep_list=[]
junc_dens=[]
d_counter=d_counter+1
print(transmittancelist)
print(resistancelist)
print(diameterlist)
for j in np.arange(0,d_counter,1):
transmittancelist[j]=np.array(transmittancelist[j], dtype=np.float64)
resistancelist[j]=np.array(resistancelist[j], dtype=np.float64)
#T vs Rs plot and fit
from scipy.optimize import curve_fit
Z0=377
def T_perc_func(r, p, n):
return (1+(1/p)*((Z0/r)**(1/(1+n))))**(-2)
def T_bulk_func(r,sratio):
return (1+(sratio*Z0/(2*r)))**(-2)
#may need to adjust if further colors necessary
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
for i in np.arange(0,d_counter,1):
popt_perc, pcov_perc = curve_fit(T_perc_func, resistancelist[i], transmittancelist[i])
popt_perc
popt_bulk, pcov_bulk = curve_fit(T_bulk_func, resistancelist[i], transmittancelist[i])
popt_bulk
resistancelist_srt=np.sort(resistancelist[i])
#print(resistancelist_srt)
reslistlength=len(resistancelist_srt)
res_start=resistancelist_srt[0]
res_end=resistancelist_srt[reslistlength-1]
res_step= (res_end - res_start)/25
print(res_start, res_end, res_step)
resfitlist=[]
for j in np.arange(res_start,res_end + (res_step/2),res_step):
resfitlist.append(j)
#print(resfitlist)
resfitlist=np.array(resfitlist, dtype=np.float64)
plotcolor=colors[i]
plt.plot(resfitlist, T_perc_func(resfitlist, *popt_perc), plotcolor, linestyle='-', label='Percolative fit: \u03A0 =%5.3f, n=%5.3f' % tuple(popt_perc))
plt.plot(resfitlist, T_bulk_func(resfitlist, *popt_bulk), plotcolor, linestyle='--', label='Bulk fit: \u03C3 ratio=%5.3f' % tuple(popt_bulk))
plt.plot(resistancelist[i], transmittancelist[i], plotcolor, marker='o', linestyle='None', label='diameter=%s nm' %(diameterlist[i]))
plt.title('T vs Rs')
plt.ylabel('T')
plt.xlabel('Rs (Ohm/sq) - Log scale')
plt.xscale('log')
leg = plt.legend(loc='best', ncol=2, mode="expand", shadow=True, fancybox=True)
leg.get_frame().set_alpha(0.5)
plt.show()
#Convert T to T^(-1/2)-1 and take log of both arrays
for j in np.arange(0,d_counter,1):
transmittancelist[j]=np.log((transmittancelist[j]**(-1/2))-1)
resistancelist[j]=np.log(resistancelist[j])
#print(transmittancelist)
#print(resistancelist)
def best_fit_slope_and_intercept(xs,ys):
m = (((mean(xs)*mean(ys)) - mean(xs*ys)) /
((mean(xs)*mean(xs)) - mean(xs*xs)))
b = mean(ys) - m*mean(xs)
return m, b
#line fit and plot data on log scale
for i in np.arange(0,d_counter,1):
m, b = best_fit_slope_and_intercept(resistancelist[i],transmittancelist[i])
print(m,b)
#plot best fit line on graph
regression_line = [(m*x)+b for x in resistancelist[i]]
plotcolor=colors[i]
plt.plot(resistancelist[i],transmittancelist[i], plotcolor, marker= 'o', linestyle='None', label='diameter=%s nm' %(diameterlist[i]))
plt.plot(resistancelist[i], regression_line, plotcolor, linestyle='-', label='Line Fit y = %s x + %s' %(round(m,3),round(b,3)))
plt.title('Log(T^(-1/2)-1) vs Log(Rs) with Line Fit')
plt.ylabel('Log(T^(-1/2)-1)')
plt.xlabel('Log(Rs)')
leg = plt.legend(loc='best', ncol=2, mode="expand", shadow=True, fancybox=True)
leg.get_frame().set_alpha(0.5)
plt.show()
open(res_file,"a").close()
duration = 0.1
freq = 1100
| gpl-2.0 | -3,190,882,586,409,944,000 | 40.416667 | 327 | 0.551112 | false |
rbuffat/pyidf | tests/test_zonemixing.py | 1 | 5372 | import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.zone_airflow import ZoneMixing
log = logging.getLogger(__name__)
class TestZoneMixing(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_zonemixing(self):
pyidf.validation_level = ValidationLevel.error
obj = ZoneMixing()
# alpha
var_name = "Name"
obj.name = var_name
# object-list
var_zone_name = "object-list|Zone Name"
obj.zone_name = var_zone_name
# object-list
var_schedule_name = "object-list|Schedule Name"
obj.schedule_name = var_schedule_name
# alpha
var_design_flow_rate_calculation_method = "Flow/Zone"
obj.design_flow_rate_calculation_method = var_design_flow_rate_calculation_method
# real
var_design_flow_rate = 0.0
obj.design_flow_rate = var_design_flow_rate
# real
var_flow_rate_per_zone_floor_area = 0.0
obj.flow_rate_per_zone_floor_area = var_flow_rate_per_zone_floor_area
# real
var_flow_rate_per_person = 0.0
obj.flow_rate_per_person = var_flow_rate_per_person
# real
var_air_changes_per_hour = 0.0
obj.air_changes_per_hour = var_air_changes_per_hour
# object-list
var_source_zone_name = "object-list|Source Zone Name"
obj.source_zone_name = var_source_zone_name
# real
var_delta_temperature = 10.1
obj.delta_temperature = var_delta_temperature
# object-list
var_delta_temperature_schedule_name = "object-list|Delta Temperature Schedule Name"
obj.delta_temperature_schedule_name = var_delta_temperature_schedule_name
# object-list
var_minimum_zone_temperature_schedule_name = "object-list|Minimum Zone Temperature Schedule Name"
obj.minimum_zone_temperature_schedule_name = var_minimum_zone_temperature_schedule_name
# object-list
var_maximum_zone_temperature_schedule_name = "object-list|Maximum Zone Temperature Schedule Name"
obj.maximum_zone_temperature_schedule_name = var_maximum_zone_temperature_schedule_name
# object-list
var_minimum_source_zone_temperature_schedule_name = "object-list|Minimum Source Zone Temperature Schedule Name"
obj.minimum_source_zone_temperature_schedule_name = var_minimum_source_zone_temperature_schedule_name
# object-list
var_maximum_source_zone_temperature_schedule_name = "object-list|Maximum Source Zone Temperature Schedule Name"
obj.maximum_source_zone_temperature_schedule_name = var_maximum_source_zone_temperature_schedule_name
# object-list
var_minimum_outdoor_temperature_schedule_name = "object-list|Minimum Outdoor Temperature Schedule Name"
obj.minimum_outdoor_temperature_schedule_name = var_minimum_outdoor_temperature_schedule_name
# object-list
var_maximum_outdoor_temperature_schedule_name = "object-list|Maximum Outdoor Temperature Schedule Name"
obj.maximum_outdoor_temperature_schedule_name = var_maximum_outdoor_temperature_schedule_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.zonemixings[0].name, var_name)
self.assertEqual(idf2.zonemixings[0].zone_name, var_zone_name)
self.assertEqual(idf2.zonemixings[0].schedule_name, var_schedule_name)
self.assertEqual(idf2.zonemixings[0].design_flow_rate_calculation_method, var_design_flow_rate_calculation_method)
self.assertAlmostEqual(idf2.zonemixings[0].design_flow_rate, var_design_flow_rate)
self.assertAlmostEqual(idf2.zonemixings[0].flow_rate_per_zone_floor_area, var_flow_rate_per_zone_floor_area)
self.assertAlmostEqual(idf2.zonemixings[0].flow_rate_per_person, var_flow_rate_per_person)
self.assertAlmostEqual(idf2.zonemixings[0].air_changes_per_hour, var_air_changes_per_hour)
self.assertEqual(idf2.zonemixings[0].source_zone_name, var_source_zone_name)
self.assertAlmostEqual(idf2.zonemixings[0].delta_temperature, var_delta_temperature)
self.assertEqual(idf2.zonemixings[0].delta_temperature_schedule_name, var_delta_temperature_schedule_name)
self.assertEqual(idf2.zonemixings[0].minimum_zone_temperature_schedule_name, var_minimum_zone_temperature_schedule_name)
self.assertEqual(idf2.zonemixings[0].maximum_zone_temperature_schedule_name, var_maximum_zone_temperature_schedule_name)
self.assertEqual(idf2.zonemixings[0].minimum_source_zone_temperature_schedule_name, var_minimum_source_zone_temperature_schedule_name)
self.assertEqual(idf2.zonemixings[0].maximum_source_zone_temperature_schedule_name, var_maximum_source_zone_temperature_schedule_name)
self.assertEqual(idf2.zonemixings[0].minimum_outdoor_temperature_schedule_name, var_minimum_outdoor_temperature_schedule_name)
self.assertEqual(idf2.zonemixings[0].maximum_outdoor_temperature_schedule_name, var_maximum_outdoor_temperature_schedule_name) | apache-2.0 | 8,416,077,038,472,639,000 | 51.676471 | 142 | 0.706627 | false |
applied-mixnetworks/txmix | txmix/udp_transport.py | 1 | 1452 |
from __future__ import print_function
import attr
from zope.interface import implementer
from twisted.internet.interfaces import IReactorUDP
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import defer
from txmix import IMixTransport
@implementer(IMixTransport)
@attr.s()
class UDPTransport(DatagramProtocol, object):
"""
implements the IMixTransport interface
"""
name = "udp"
reactor = attr.ib(validator=attr.validators.provides(IReactorUDP))
addr = attr.ib(validator=attr.validators.instance_of(tuple))
def register_protocol(self, protocol):
# XXX todo: assert that protocol provides the appropriate interface
self.protocol = protocol
def start(self):
"""
make this transport begin listening on the specified interface and UDP port
interface must be an IP address
"""
interface, port = self.addr
self.reactor.listenUDP(port, self, interface=interface)
return defer.succeed(None)
def send(self, addr, message):
"""
send message to addr
where addr is a 2-tuple of type: (ip address, UDP port)
"""
self.transport.write(message, addr)
return defer.succeed(None)
def datagramReceived(self, datagram, addr):
"""
i am called by the twisted reactor when our transport receives a UDP packet
"""
self.protocol.received(datagram)
| gpl-3.0 | 8,444,479,411,809,278,000 | 29.25 | 83 | 0.683196 | false |
oscarfonts/sensor-widgets | doc/en/conf.py | 1 | 9203 | # -*- coding: utf-8 -*-
#
# Sensor Widgets documentation build configuration file, created by
# sphinx-quickstart on Fri Sep 4 12:48:01 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Sensor Widgets'
copyright = u'2015, Oscar Fonts'
author = u'Oscar Fonts'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'SensorWidgetsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'SensorWidgets.tex', u'Sensor Widgets Documentation',
u'Oscar Fonts', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sensorwidgets', u'Sensor Widgets Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'SensorWidgets', u'Sensor Widgets Documentation',
author, 'SensorWidgets', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit | -4,561,226,566,972,050,400 | 31.40493 | 79 | 0.708356 | false |
catapult-project/catapult | telemetry/telemetry/internal/platform/network_controller_backend.py | 3 | 7521 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import os
from telemetry.internal.util import webpagereplay_go_server
from telemetry.internal.util import ts_proxy_server
from telemetry.util import wpr_modes
class ArchiveDoesNotExistError(Exception):
"""Raised when the archive path does not exist for replay mode."""
pass
class ReplayAndBrowserPortsError(Exception):
"""Raised an existing browser would get different remote replay ports."""
pass
class NetworkControllerBackend(object):
"""Control network settings and servers to simulate the Web.
Network changes include forwarding device ports to host platform ports.
Web Page Replay is used to record and replay HTTP/HTTPS responses.
"""
def __init__(self, platform_backend):
self._platform_backend = platform_backend
# Controller options --- bracketed by Open/Close
self._wpr_mode = None
# Replay options --- bracketed by StartReplay/StopReplay
self._archive_path = None
self._make_javascript_deterministic = None
self._extra_wpr_args = None
# Network control services
self._ts_proxy_server = None
self._forwarder = None
self._wpr_server = None
def Open(self, wpr_mode):
"""Get the target platform ready for network control.
This will both start a TsProxy server and set up a forwarder to it.
If options are compatible and the controller is already open, it will
try to re-use the existing server and forwarder.
After network interactions are over, clients should call the Close method.
Args:
wpr_mode: a mode for web page replay; available modes are
wpr_modes.WPR_OFF, wpr_modes.APPEND, wpr_modes.WPR_REPLAY, or
wpr_modes.WPR_RECORD. Setting wpr_modes.WPR_OFF configures the
network controller to use live traffic.
"""
if self.is_open:
use_live_traffic = wpr_mode == wpr_modes.WPR_OFF
if self.use_live_traffic != use_live_traffic:
self.Close() # Need to restart the current TsProxy and forwarder.
else:
if self._wpr_mode != wpr_mode:
self.StopReplay() # Need to restart the WPR server, if any.
self._wpr_mode = wpr_mode
return
self._wpr_mode = wpr_mode
try:
local_port = self._StartTsProxyServer()
self._forwarder = self._platform_backend.forwarder_factory.Create(
local_port=local_port, remote_port=None)
except Exception:
self.Close()
raise
@property
def is_open(self):
return self._ts_proxy_server is not None
@property
def use_live_traffic(self):
return self._wpr_mode == wpr_modes.WPR_OFF
@property
def host_ip(self):
return self._platform_backend.forwarder_factory.host_ip
def Close(self):
"""Undo changes in the target platform used for network control.
Implicitly stops replay if currently active.
"""
self.StopReplay()
self._StopForwarder()
self._StopTsProxyServer()
self._wpr_mode = None
def StartReplay(self, archive_path, make_javascript_deterministic,
extra_wpr_args):
"""Start web page replay from a given replay archive.
Starts as needed, and reuses if possible, the replay server on the host.
Implementation details
----------------------
The local host is where Telemetry is run. The remote is host where
the target application is run. The local and remote hosts may be
the same (e.g., testing a desktop browser) or different (e.g., testing
an android browser).
A replay server is started on the local host using the local ports, while
a forwarder ties the local to the remote ports.
Both local and remote ports may be zero. In that case they are determined
by the replay server and the forwarder respectively. Setting dns to None
disables DNS traffic.
Args:
archive_path: a path to a specific WPR archive.
make_javascript_deterministic: True if replay should inject a script
to make JavaScript behave deterministically (e.g., override Date()).
extra_wpr_args: a tuple with any extra args to send to the WPR server.
"""
assert self.is_open, 'Network controller is not open'
if self.use_live_traffic:
return
if not archive_path:
# TODO(slamm, tonyg): Ideally, replay mode should be stopped when there is
# no archive path. However, if the replay server already started, and
# a file URL is tested with the
# telemetry.core.local_server.LocalServerController, then the
# replay server forwards requests to it. (Chrome is configured to use
# fixed ports fo all HTTP/HTTPS requests.)
return
if (self._wpr_mode == wpr_modes.WPR_REPLAY and
not os.path.exists(archive_path)):
raise ArchiveDoesNotExistError(
'Archive path does not exist: %s' % archive_path)
if (self._wpr_server is not None and
self._archive_path == archive_path and
self._make_javascript_deterministic == make_javascript_deterministic and
self._extra_wpr_args == extra_wpr_args):
return # We may reuse the existing replay server.
self._archive_path = archive_path
self._make_javascript_deterministic = make_javascript_deterministic
self._extra_wpr_args = extra_wpr_args
local_ports = self._StartReplayServer()
self._ts_proxy_server.UpdateOutboundPorts(
http_port=local_ports['http'], https_port=local_ports['https'])
def StopReplay(self):
"""Stop web page replay.
Stops the replay server if currently active.
"""
self._StopReplayServer()
self._archive_path = None
self._make_javascript_deterministic = None
self._extra_wpr_args = None
def _StartReplayServer(self):
"""Start the replay server and return the started local_ports."""
self._StopReplayServer() # In case it was already running.
self._wpr_server = webpagereplay_go_server.ReplayServer(
self._archive_path,
self.host_ip,
http_port=0,
https_port=0,
replay_options=self._ReplayCommandLineArgs())
return self._wpr_server.StartServer()
def _StopReplayServer(self):
"""Stop the replay server only."""
if self._wpr_server:
self._wpr_server.StopServer()
self._wpr_server = None
def _StopForwarder(self):
if self._forwarder:
self._forwarder.Close()
self._forwarder = None
def _StopTsProxyServer(self):
"""Stop the replay server only."""
if self._ts_proxy_server:
self._ts_proxy_server.StopServer()
self._ts_proxy_server = None
def _ReplayCommandLineArgs(self):
wpr_args = list(self._extra_wpr_args)
if self._wpr_mode == wpr_modes.WPR_APPEND:
wpr_args.append('--append')
elif self._wpr_mode == wpr_modes.WPR_RECORD:
wpr_args.append('--record')
if not self._make_javascript_deterministic:
wpr_args.append('--inject_scripts=')
return wpr_args
def _StartTsProxyServer(self):
assert not self._ts_proxy_server, 'ts_proxy_server is already started'
host_ip = None if self.use_live_traffic else self.host_ip
self._ts_proxy_server = ts_proxy_server.TsProxyServer(host_ip=host_ip)
self._ts_proxy_server.StartServer()
return self._ts_proxy_server.port
@property
def forwarder(self):
return self._forwarder
@property
def ts_proxy_server(self):
return self._ts_proxy_server
| bsd-3-clause | -3,696,970,511,087,998,500 | 33.819444 | 80 | 0.68568 | false |
felipenaselva/felipe.repository | script.module.placenta/lib/resources/lib/sources/fr/filmenstreaminghd.py | 1 | 6831 | # -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @tantrumdev wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo
import re, urllib, urlparse, base64, json, unicodedata
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import proxy
class source:
def __init__(self):
self.priority = 1
self.language = ['fr']
self.domains = ['filmenstreaminghd.co']
#http://dpstreaming.tv/?s=max+steel
#http://dpstreaming.tv/max-steel-vostfr-streaming-telecharger/']
self.base_link = 'http://www.filmenstreaminghd.co'
self.key_link = '?'
self.moviesearch_link = 's=%s'
self.tvsearch_link = 's=%s'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
print '------------------------------- -------------------------------'
sources = []
print url
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
season = data['season'] if 'season' in data else False
episode = data['episode'] if 'episode' in data else False
print season, episode
if season and episode:
print 'TV'
self.search_link = 'query=%s&submit=Submit+Query'
aTitle = data['tvshowtitle']
else:
self.search_link = 'query=%s&submit=Submit+Query'
aTitle = data['title']
post = self.search_link % (urllib.quote_plus(cleantitle.query(aTitle)))
url = 'http://www.filmenstreaminghd.com/recherche/'
t = cleantitle.get(aTitle)
r = client.request(url, XHR=True, referer=url, post=post)
r = client.parseDOM(r, 'div', attrs={'class': 'film-k kutu-icerik kat'})
if season and episode:
t = t + 'saison0' + season
r = client.parseDOM(r, 'div', attrs={'class': 'play fa fa-play-circle'})
r = sorted(set(r))
r = [(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title')) for i in r]
r = [(i[0][0], i[1][0].lower()) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [i[0] for i in r if t == cleantitle.get(i[1])][0]
#r = sorted(set(r))
url0 = '%s%s' % ('http://www.filmenstreaminghd.com' , r)
print url0
url = client.replaceHTMLCodes(url0)
url = url0.encode('utf-8')
r = client.request(url, XHR=True, referer=url)
r = re.sub('(\n|\t)', '', r)
langue = re.compile('<b class=\"fa fa-cc\"></b><span>(.+?)</span>', re.MULTILINE | re.DOTALL).findall(r)[0]
if langue == 'VF':
langue = 'FR'
quality2 = re.compile('<div class=\"kalite\">(.+?)</div>', re.MULTILINE | re.DOTALL).findall(r)[0]
quality2 = re.sub('-', '', quality2)
if season and episode:
unLien0a = client.parseDOM(r, 'div', attrs={'class': 'dizi-bolumleri'})[0]
r = re.compile('Saison\s+0%s\s+\-\s+Episode\s+0%s(.+?)class=\"dropit-trigger\">' % (season, episode), re.MULTILINE | re.DOTALL).findall(unLien0a)[0]
unLien0b = client.parseDOM(r, 'li', ret='id')
else:
r = client.parseDOM(r, 'div', attrs={'class': 'dizi-bolumleri film'})
unLien0b = client.parseDOM(r, 'span', ret='id')
counter = 0
for unLienUrl in unLien0b:
if 'gf-' in unLienUrl:
continue
dataUrl = urllib.urlencode({'pid': unLienUrl[1:]})
dataUrl = client.request(url0, post=dataUrl, XHR=True, referer=url0)
try:
url = client.parseDOM(dataUrl, 'iframe', ret='src')[1]
except:
url = client.parseDOM(dataUrl, 'iframe', ret='src')[0]
if url.startswith('//'):
url = url.replace('//', '', 1)
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: continue
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
url = url.encode('utf-8')
if '1080p' in quality2:
quality = '1080p'
elif '720p' in quality2 or 'bdrip' in quality2 or 'hdrip' in quality2:
quality = 'HD'
else:
quality = 'SD'
if 'dvdscr' in quality2 or 'r5' in quality2 or 'r6' in quality2:
quality2 = 'SCR'
elif 'camrip' in quality2 or 'tsrip' in quality2 or 'hdcam' in quality2 or 'hdts' in quality2 or 'dvdcam' in quality2 or 'dvdts' in quality2 or 'cam' in quality2 or 'telesync' in quality2 or 'ts' in quality2:
quality2 = 'CAM'
sources.append({'source': host, 'quality': quality, 'language': langue, 'url': url, 'direct': False, 'debridonly': False})
print sources
return sources
except:
return sources
def resolve(self, url):
return url
| gpl-2.0 | 2,041,355,159,829,254,000 | 36.740331 | 224 | 0.495389 | false |
jasonbaldridge/try-tf | softmax-python3.py | 1 | 5020 | import tensorflow.python.platform
import numpy as np
import tensorflow as tf
import plot_boundary_on_data
# Global variables.
NUM_LABELS = 2 # The number of labels.
BATCH_SIZE = 100 # The number of training examples to use per training step.
# Define the flags useable from the command line.
tf.app.flags.DEFINE_string('train', None,
'File containing the training data (labels & features).')
tf.app.flags.DEFINE_string('test', None,
'File containing the test data (labels & features).')
tf.app.flags.DEFINE_integer('num_epochs', 1,
'Number of examples to separate from the training '
'data for the validation set.')
tf.app.flags.DEFINE_boolean('verbose', False, 'Produce verbose output.')
tf.app.flags.DEFINE_boolean('plot', True, 'Plot the final decision boundary on the data.')
FLAGS = tf.app.flags.FLAGS
# Extract numpy representations of the labels and features given rows consisting of:
# label, feat_0, feat_1, ..., feat_n
def extract_data(filename):
# Arrays to hold the labels and feature vectors.
labels = []
fvecs = []
# Iterate over the rows, splitting the label from the features. Convert labels
# to integers and features to floats.
for line in open(filename):
row = line.split(",")
labels.append(int(row[0]))
fvecs.append([float(x) for x in row[1:]])
# Convert the array of float arrays into a numpy float matrix.
fvecs_np = np.matrix(fvecs).astype(np.float32)
# Convert the array of int labels into a numpy array.
labels_np = np.array(labels).astype(dtype=np.uint8)
# Convert the int numpy array into a one-hot matrix.
labels_onehot = (np.arange(NUM_LABELS) == labels_np[:, None]).astype(np.float32)
# Return a pair of the feature matrix and the one-hot label matrix.
return fvecs_np,labels_onehot
def main(argv=None):
# Be verbose?
verbose = FLAGS.verbose
# Plot?
plot = FLAGS.plot
# Get the data.
train_data_filename = FLAGS.train
test_data_filename = FLAGS.test
# Extract it into numpy matrices.
train_data,train_labels = extract_data(train_data_filename)
test_data, test_labels = extract_data(test_data_filename)
# Get the shape of the training data.
train_size,num_features = train_data.shape
# Get the number of epochs for training.
num_epochs = FLAGS.num_epochs
# This is where training samples and labels are fed to the graph.
# These placeholder nodes will be fed a batch of training data at each
# training step using the {feed_dict} argument to the Run() call below.
x = tf.placeholder("float", shape=[None, num_features])
y_ = tf.placeholder("float", shape=[None, NUM_LABELS])
# These are the weights that inform how much each feature contributes to
# the classification.
W = tf.Variable(tf.zeros([num_features,NUM_LABELS]))
b = tf.Variable(tf.zeros([NUM_LABELS]))
y = tf.nn.softmax(tf.matmul(x,W) + b)
# Optimization.
cross_entropy = -tf.reduce_sum(y_*tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
# For the test data, hold the entire dataset in one constant node.
test_data_node = tf.constant(test_data)
# Evaluation.
predicted_class = tf.argmax(y,1);
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# Create a local session to run this computation.
with tf.Session() as s:
# Run all the initializers to prepare the trainable parameters.
tf.global_variables_initializer().run()
# Iterate and train.
for step in range(num_epochs * train_size // BATCH_SIZE):
offset = (step * BATCH_SIZE) % train_size
# get a batch of data
batch_data = train_data[offset:(offset + BATCH_SIZE), :]
batch_labels = train_labels[offset:(offset + BATCH_SIZE)]
# feed data into the model
train_step.run(feed_dict={x: batch_data, y_: batch_labels})
# Give very detailed output.
if verbose:
print
print('Weight matrix.')
print (s.run(W))
print
print ('Bias vector.')
print (s.run(b))
print
print ("Applying model to first test instance.")
first = test_data[:1]
print ("Point =", first)
print ("Wx+b = ", s.run(tf.matmul(first,W)+b))
print ("softmax(Wx+b) = ", s.run(tf.nn.softmax(tf.matmul(first,W)+b)))
print
print ("Accuracy:", accuracy.eval(feed_dict={x: test_data, y_: test_labels}))
if plot:
eval_fun = lambda X: predicted_class.eval(feed_dict={x:X});
plot_boundary_on_data.plot(test_data, test_labels, eval_fun)
if __name__ == '__main__':
tf.app.run()
| apache-2.0 | 6,272,947,537,748,210,000 | 34.857143 | 90 | 0.6251 | false |
vamdt/spider | douban/pics.py | 1 | 1638 | # coding=utf-8
import re
import urllib
import json
import os, random
BASE_DOWN_DIR = './download'
BASE_DOWN_POSTS_DIR = BASE_DOWN_DIR + '/posts'
BASE_URL = 'http://www.douban.com/photos/photo/2230938262/'
class AppURLopener(urllib.FancyURLopener):
version = "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.4 Safari/537.36"
urllib._urlopener = AppURLopener()
def main():
i = 0;
url = BASE_URL;
while(i<3):
i = i+1;
url = play(url, i);
def play(url, index):
f = urllib.urlopen(url)
html = f.read()
print html
pattern = re.compile(u'<a href="(http://www.douban.com/photos/photo/\d+/#image)" title=".+" id="next_photo">.+</a>',re.DOTALL)
url = pattern.findall(html)[0]
p2 = re.compile(u'<a class="mainphoto" href="\S+" title="\S+">\s+<img src="(http://img.+\.douban\.com/view/photo/photo/public/.+\.jpg)" />\s+</a>', re.DOTALL)
img_url = p2.findall(html)[0]
print img_url
create_dirs(BASE_DOWN_POSTS_DIR)
save_posts(img_url, index)
return url
def get_html(url):
return urllib.urlopen(url).read()
def create_dirs(dir_name):
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def save_posts(url, index):
html = get_html(url)
file_name = BASE_DOWN_POSTS_DIR + '/' + str(index) + '.jpg'
save( html, file_name)
def save(obj, name):
file = open(name, 'w')
file.write(str(obj))
file.close
def save_as_json(obj, name):
json_data = json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
save(json_data, name)
if __name__ == '__main__':
main() | mit | 6,337,651,235,404,347,000 | 25.015873 | 162 | 0.616606 | false |
ewindisch/nova | nova/compute/manager.py | 1 | 251375 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all processes relating to instances (guest vms).
The :py:class:`ComputeManager` class is a :py:class:`nova.manager.Manager` that
handles RPC calls relating to creating instances. It is responsible for
building a disk image, launching it via the underlying virtualization driver,
responding to calls to check its state, attaching persistent storage, and
terminating it.
"""
import base64
import contextlib
import functools
import socket
import sys
import time
import traceback
import uuid
import eventlet.event
from eventlet import greenthread
import eventlet.timeout
from oslo.config import cfg
from oslo import messaging
from nova import block_device
from nova.cells import rpcapi as cells_rpcapi
from nova.cloudpipe import pipelib
from nova import compute
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import resource_tracker
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import conductor
from nova import consoleauth
import nova.context
from nova import exception
from nova import hooks
from nova.image import glance
from nova import manager
from nova import network
from nova.network import model as network_model
from nova.network.security_group import openstack_driver
from nova.objects import aggregate as aggregate_obj
from nova.objects import base as obj_base
from nova.objects import block_device as block_device_obj
from nova.objects import external_event as external_event_obj
from nova.objects import flavor as flavor_obj
from nova.objects import instance as instance_obj
from nova.objects import instance_group as instance_group_obj
from nova.objects import migration as migration_obj
from nova.objects import quotas as quotas_obj
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import periodic_task
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova import paths
from nova import rpc
from nova import safe_utils
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import utils
from nova.virt import block_device as driver_block_device
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import storage_users
from nova.virt import virtapi
from nova import volume
from nova.volume import encryptors
compute_opts = [
cfg.StrOpt('console_host',
default=socket.gethostname(),
help='Console proxy host to use to connect '
'to instances on this host.'),
cfg.StrOpt('default_access_ip_network_name',
help='Name of network to use to set access IPs for instances'),
cfg.BoolOpt('defer_iptables_apply',
default=False,
help='Whether to batch up the application of IPTables rules'
' during a host restart and apply all at the end of the'
' init phase'),
cfg.StrOpt('instances_path',
default=paths.state_path_def('instances'),
help='Where instances are stored on disk'),
cfg.BoolOpt('instance_usage_audit',
default=False,
help="Generate periodic compute.instance.exists"
" notifications"),
cfg.IntOpt('live_migration_retry_count',
default=30,
help="Number of 1 second retries needed in live_migration"),
cfg.BoolOpt('resume_guests_state_on_host_boot',
default=False,
help='Whether to start guests that were running before the '
'host rebooted'),
cfg.IntOpt('network_allocate_retries',
default=0,
help="Number of times to retry network allocation on failures"),
]
interval_opts = [
cfg.IntOpt('bandwidth_poll_interval',
default=600,
help='Interval to pull network bandwidth usage info. Not '
'supported on all hypervisors. Set to 0 to disable.'),
cfg.IntOpt('sync_power_state_interval',
default=600,
help='Interval to sync power states between '
'the database and the hypervisor'),
cfg.IntOpt("heal_instance_info_cache_interval",
default=60,
help="Number of seconds between instance info_cache self "
"healing updates"),
cfg.IntOpt('reclaim_instance_interval',
default=0,
help='Interval in seconds for reclaiming deleted instances'),
cfg.IntOpt('volume_usage_poll_interval',
default=0,
help='Interval in seconds for gathering volume usages'),
cfg.IntOpt('shelved_poll_interval',
default=3600,
help='Interval in seconds for polling shelved instances to '
'offload'),
cfg.IntOpt('shelved_offload_time',
default=0,
help='Time in seconds before a shelved instance is eligible '
'for removing from a host. -1 never offload, 0 offload '
'when shelved'),
cfg.IntOpt('instance_delete_interval',
default=300,
help=('Interval in seconds for retrying failed instance file '
'deletes'))
]
timeout_opts = [
cfg.IntOpt("reboot_timeout",
default=0,
help="Automatically hard reboot an instance if it has been "
"stuck in a rebooting state longer than N seconds. "
"Set to 0 to disable."),
cfg.IntOpt("instance_build_timeout",
default=0,
help="Amount of time in seconds an instance can be in BUILD "
"before going into ERROR status."
"Set to 0 to disable."),
cfg.IntOpt("rescue_timeout",
default=0,
help="Automatically unrescue an instance after N seconds. "
"Set to 0 to disable."),
cfg.IntOpt("resize_confirm_window",
default=0,
help="Automatically confirm resizes after N seconds. "
"Set to 0 to disable."),
]
running_deleted_opts = [
cfg.StrOpt("running_deleted_instance_action",
default="reap",
help="Action to take if a running deleted instance is detected."
"Valid options are 'noop', 'log', 'shutdown', or 'reap'. "
"Set to 'noop' to take no action."),
cfg.IntOpt("running_deleted_instance_poll_interval",
default=1800,
help="Number of seconds to wait between runs of the cleanup "
"task."),
cfg.IntOpt("running_deleted_instance_timeout",
default=0,
help="Number of seconds after being deleted when a running "
"instance should be considered eligible for cleanup."),
]
instance_cleaning_opts = [
cfg.IntOpt('maximum_instance_delete_attempts',
default=5,
help=('The number of times to attempt to reap an instance\'s '
'files.')),
]
CONF = cfg.CONF
CONF.register_opts(compute_opts)
CONF.register_opts(interval_opts)
CONF.register_opts(timeout_opts)
CONF.register_opts(running_deleted_opts)
CONF.register_opts(instance_cleaning_opts)
CONF.import_opt('allow_resize_to_same_host', 'nova.compute.api')
CONF.import_opt('console_topic', 'nova.console.rpcapi')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('vnc_enabled', 'nova.vnc')
CONF.import_opt('enabled', 'nova.spice', group='spice')
CONF.import_opt('enable', 'nova.cells.opts', group='cells')
CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache')
CONF.import_opt('image_cache_manager_interval', 'nova.virt.imagecache')
CONF.import_opt('enabled', 'nova.rdp', group='rdp')
CONF.import_opt('html5_proxy_base_url', 'nova.rdp', group='rdp')
LOG = logging.getLogger(__name__)
get_notifier = functools.partial(rpc.get_notifier, service='compute')
wrap_exception = functools.partial(exception.wrap_exception,
get_notifier=get_notifier)
def errors_out_migration(function):
"""Decorator to error out migration on failure."""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except Exception:
with excutils.save_and_reraise_exception():
# Find migration argument. The argument cannot be
# defined by position because the wrapped functions
# do not have the same signature.
for arg in args:
if not isinstance(arg, migration_obj.Migration):
continue
status = arg.status
if status not in ['migrating', 'post-migrating']:
continue
arg.status = 'error'
try:
arg.save(context.elevated())
except Exception:
LOG.debug(_('Error setting migration status '
'for instance %s.') %
arg.instance_uuid, exc_info=True)
break
return decorated_function
def reverts_task_state(function):
"""Decorator to revert task_state on failure."""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except exception.UnexpectedTaskStateError as e:
# Note(maoy): unexpected task state means the current
# task is preempted. Do not clear task state in this
# case.
with excutils.save_and_reraise_exception():
LOG.info(_("Task possibly preempted: %s") % e.format_message())
except Exception:
with excutils.save_and_reraise_exception():
try:
self._instance_update(context,
kwargs['instance']['uuid'],
task_state=None)
except Exception:
pass
return decorated_function
def wrap_instance_fault(function):
"""Wraps a method to catch exceptions related to instances.
This decorator wraps a method to catch any exceptions having to do with
an instance that may get thrown. It then logs an instance fault in the db.
"""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except exception.InstanceNotFound:
raise
except Exception as e:
# NOTE(gtt): If argument 'instance' is in args rather than kwargs,
# we will get a KeyError exception which will cover up the real
# exception. So, we update kwargs with the values from args first.
# then, we can get 'instance' from kwargs easily.
kwargs.update(dict(zip(function.func_code.co_varnames[2:], args)))
with excutils.save_and_reraise_exception():
compute_utils.add_instance_fault_from_exc(context,
self.conductor_api, kwargs['instance'],
e, sys.exc_info())
return decorated_function
def wrap_instance_event(function):
"""Wraps a method to log the event taken on the instance, and result.
This decorator wraps a method to log the start and result of an event, as
part of an action taken on an instance.
"""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
wrapped_func = utils.get_wrapped_function(function)
keyed_args = safe_utils.getcallargs(wrapped_func, context, *args,
**kwargs)
instance_uuid = keyed_args['instance']['uuid']
event_name = 'compute_{0}'.format(function.func_name)
with compute_utils.EventReporter(context, self.conductor_api,
event_name, instance_uuid):
function(self, context, *args, **kwargs)
return decorated_function
def delete_image_on_error(function):
"""Used for snapshot related method to ensure the image created in
compute.api is deleted when an error occurs.
"""
@functools.wraps(function)
def decorated_function(self, context, image_id, instance,
*args, **kwargs):
try:
return function(self, context, image_id, instance,
*args, **kwargs)
except Exception:
with excutils.save_and_reraise_exception():
LOG.debug(_("Cleaning up image %s") % image_id,
exc_info=True, instance=instance)
try:
image_service = glance.get_default_image_service()
image_service.delete(context, image_id)
except Exception:
LOG.exception(_("Error while trying to clean up image %s")
% image_id, instance=instance)
return decorated_function
# TODO(danms): Remove me after Icehouse
# NOTE(mikal): if the method being decorated has more than one decorator, then
# put this one first. Otherwise the various exception handling decorators do
# not function correctly.
def object_compat(function):
"""Wraps a method that expects a new-world instance
This provides compatibility for callers passing old-style dict
instances.
"""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
def _load_instance(instance_or_dict):
if isinstance(instance_or_dict, dict):
instance = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), instance_or_dict,
expected_attrs=metas)
instance._context = context
return instance
return instance_or_dict
metas = ['metadata', 'system_metadata']
try:
kwargs['instance'] = _load_instance(kwargs['instance'])
except KeyError:
args = (_load_instance(args[0]),) + args[1:]
migration = kwargs.get('migration')
if isinstance(migration, dict):
migration = migration_obj.Migration._from_db_object(
context.elevated(), migration_obj.Migration(),
migration)
kwargs['migration'] = migration
return function(self, context, *args, **kwargs)
return decorated_function
# TODO(danms): Remove me after Icehouse
def aggregate_object_compat(function):
"""Wraps a method that expects a new-world aggregate."""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
aggregate = kwargs.get('aggregate')
if isinstance(aggregate, dict):
aggregate = aggregate_obj.Aggregate._from_db_object(
context.elevated(), aggregate_obj.Aggregate(),
aggregate)
kwargs['aggregate'] = aggregate
return function(self, context, *args, **kwargs)
return decorated_function
def _get_image_meta(context, image_ref):
image_service, image_id = glance.get_remote_image_service(context,
image_ref)
return image_service.show(context, image_id)
class InstanceEvents(object):
def __init__(self):
self._events = {}
@staticmethod
def _lock_name(instance):
return '%s-%s' % (instance.uuid, 'events')
def prepare_for_instance_event(self, instance, event_name):
"""Prepare to receive an event for an instance.
This will register an event for the given instance that we will
wait on later. This should be called before initiating whatever
action will trigger the event. The resulting eventlet.event.Event
object should be wait()'d on to ensure completion.
:param instance: the instance for which the event will be generated
:param event_name: the name of the event we're expecting
:returns: an event object that should be wait()'d on
"""
@utils.synchronized(self._lock_name)
def _create_or_get_event():
if instance.uuid not in self._events:
self._events.setdefault(instance.uuid, {})
return self._events[instance.uuid].setdefault(
event_name, eventlet.event.Event())
LOG.debug(_('Preparing to wait for external event %(event)s '
'for instance %(uuid)s'), {'event': event_name,
'uuid': instance.uuid})
return _create_or_get_event()
def pop_instance_event(self, instance, event):
"""Remove a pending event from the wait list.
This will remove a pending event from the wait list so that it
can be used to signal the waiters to wake up.
:param instance: the instance for which the event was generated
:param event: the nova.objects.external_event.InstanceExternalEvent
that describes the event
:returns: the eventlet.event.Event object on which the waiters
are blocked
"""
@utils.synchronized(self._lock_name)
def _pop_event():
events = self._events.get(instance.uuid)
if not events:
return None
_event = events.pop(event.key, None)
if not events:
del self._events[instance.uuid]
return _event
return _pop_event()
def clear_events_for_instance(self, instance):
"""Remove all pending events for an instance.
This will remove all events currently pending for an instance
and return them (indexed by event name).
:param instance: the instance for which events should be purged
:returns: a dictionary of {event_name: eventlet.event.Event}
"""
@utils.synchronized(self._lock_name)
def _clear_events():
return self._events.pop(instance.uuid, {})
return _clear_events()
class ComputeVirtAPI(virtapi.VirtAPI):
def __init__(self, compute):
super(ComputeVirtAPI, self).__init__()
self._compute = compute
def instance_update(self, context, instance_uuid, updates):
return self._compute._instance_update(context,
instance_uuid,
**updates)
def provider_fw_rule_get_all(self, context):
return self._compute.conductor_api.provider_fw_rule_get_all(context)
def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
return self._compute.conductor_api.agent_build_get_by_triple(
context, hypervisor, os, architecture)
def _default_error_callback(self, event_name, instance):
raise exception.NovaException(_('Instance event failed'))
@contextlib.contextmanager
def wait_for_instance_event(self, instance, event_names, deadline=300,
error_callback=None):
"""Plan to wait for some events, run some code, then wait.
This context manager will first create plans to wait for the
provided event_names, yield, and then wait for all the scheduled
events to complete.
Note that this uses an eventlet.timeout.Timeout to bound the
operation, so callers should be prepared to catch that
failure and handle that situation appropriately.
If the event is not received by the specified timeout deadline,
eventlet.timeout.Timeout is raised.
If the event is received but did not have a 'completed'
status, a NovaException is raised. If an error_callback is
provided, instead of raising an exception as detailed above
for the failure case, the callback will be called with the
event_name and instance, and can return True to continue
waiting for the rest of the events, False to stop processing,
or raise an exception which will bubble up to the waiter.
:param:instance: The instance for which an event is expected
:param:event_names: A list of event names. Each element can be a
string event name or tuple of strings to
indicate (name, tag).
:param:deadline: Maximum number of seconds we should wait for all
of the specified events to arrive.
:param:error_callback: A function to be called if an event arrives
"""
if error_callback is None:
error_callback = self._default_error_callback
events = {}
for event_name in event_names:
if isinstance(event_name, tuple):
name, tag = event_name
event_name = external_event_obj.InstanceExternalEvent.make_key(
name, tag)
events[event_name] = (
self._compute.instance_events.prepare_for_instance_event(
instance, event_name))
yield
with eventlet.timeout.Timeout(deadline):
for event_name, event in events.items():
actual_event = event.wait()
if actual_event.status == 'completed':
continue
decision = error_callback(event_name, instance)
if decision is False:
break
class ComputeManager(manager.Manager):
"""Manages the running instances from creation to destruction."""
target = messaging.Target(version='3.23')
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
self.virtapi = ComputeVirtAPI(self)
self.network_api = network.API()
self.volume_api = volume.API()
self._last_host_check = 0
self._last_bw_usage_poll = 0
self._bw_usage_supported = True
self._last_vol_usage_poll = 0
self._last_info_cache_heal = 0
self._last_bw_usage_cell_update = 0
self.compute_api = compute.API()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.conductor_api = conductor.API()
self.compute_task_api = conductor.ComputeTaskAPI()
self.is_neutron_security_groups = (
openstack_driver.is_neutron_security_groups())
self.consoleauth_rpcapi = consoleauth.rpcapi.ConsoleAuthAPI()
self.cells_rpcapi = cells_rpcapi.CellsAPI()
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self._resource_tracker_dict = {}
self.instance_events = InstanceEvents()
super(ComputeManager, self).__init__(service_name="compute",
*args, **kwargs)
# NOTE(russellb) Load the driver last. It may call back into the
# compute manager via the virtapi, so we want it to be fully
# initialized before that happens.
self.driver = driver.load_compute_driver(self.virtapi, compute_driver)
self.use_legacy_block_device_info = \
self.driver.need_legacy_block_device_info
def _get_resource_tracker(self, nodename):
rt = self._resource_tracker_dict.get(nodename)
if not rt:
if not self.driver.node_is_available(nodename):
raise exception.NovaException(
_("%s is not a valid node managed by this "
"compute host.") % nodename)
rt = resource_tracker.ResourceTracker(self.host,
self.driver,
nodename)
self._resource_tracker_dict[nodename] = rt
return rt
def _instance_update(self, context, instance_uuid, **kwargs):
"""Update an instance in the database using kwargs as value."""
instance_ref = self.conductor_api.instance_update(context,
instance_uuid,
**kwargs)
if (instance_ref['host'] == self.host and
self.driver.node_is_available(instance_ref['node'])):
rt = self._get_resource_tracker(instance_ref.get('node'))
rt.update_usage(context, instance_ref)
return instance_ref
def _set_instance_error_state(self, context, instance_uuid):
try:
self._instance_update(context, instance_uuid,
vm_state=vm_states.ERROR)
except exception.InstanceNotFound:
LOG.debug(_('Instance has been destroyed from under us while '
'trying to set it to ERROR'),
instance_uuid=instance_uuid)
def _set_instance_obj_error_state(self, context, instance):
try:
instance.vm_state = vm_states.ERROR
instance.save()
except exception.InstanceNotFound:
LOG.debug(_('Instance has been destroyed from under us while '
'trying to set it to ERROR'),
instance_uuid=instance.uuid)
def _get_instances_on_driver(self, context, filters=None):
"""Return a list of instance records for the instances found
on the hypervisor which satisfy the specified filters. If filters=None
return a list of instance records for all the instances found on the
hypervisor.
"""
if not filters:
filters = {}
try:
driver_uuids = self.driver.list_instance_uuids()
filters['uuid'] = driver_uuids
local_instances = instance_obj.InstanceList.get_by_filters(
context, filters)
return local_instances
except NotImplementedError:
pass
# The driver doesn't support uuids listing, so we'll have
# to brute force.
driver_instances = self.driver.list_instances()
instances = instance_obj.InstanceList.get_by_filters(context, filters)
name_map = dict((instance.name, instance) for instance in instances)
local_instances = []
for driver_instance in driver_instances:
instance = name_map.get(driver_instance)
if not instance:
continue
local_instances.append(instance)
return local_instances
def _destroy_evacuated_instances(self, context):
"""Destroys evacuated instances.
While nova-compute was down, the instances running on it could be
evacuated to another host. Check that the instances reported
by the driver are still associated with this host. If they are
not, destroy them.
"""
our_host = self.host
filters = {'deleted': False}
local_instances = self._get_instances_on_driver(context, filters)
for instance in local_instances:
if instance.host != our_host:
LOG.info(_('Deleting instance as its host ('
'%(instance_host)s) is not equal to our '
'host (%(our_host)s).'),
{'instance_host': instance.host,
'our_host': our_host}, instance=instance)
destroy_disks = False
try:
network_info = self._get_instance_nw_info(context,
instance)
bdi = self._get_instance_volume_block_device_info(context,
instance)
destroy_disks = not (self._is_instance_storage_shared(
context, instance))
except exception.InstanceNotFound:
network_info = network_model.NetworkInfo()
bdi = {}
LOG.info(_('Instance has been marked deleted already, '
'removing it from the hypervisor.'),
instance=instance)
# always destroy disks if the instance was deleted
destroy_disks = True
self.driver.destroy(context, instance,
network_info,
bdi, destroy_disks)
def _is_instance_storage_shared(self, context, instance):
shared_storage = True
data = None
try:
data = self.driver.check_instance_shared_storage_local(context,
instance)
if data:
shared_storage = (self.compute_rpcapi.
check_instance_shared_storage(context,
obj_base.obj_to_primitive(instance),
data))
except NotImplementedError:
LOG.warning(_('Hypervisor driver does not support '
'instance shared storage check, '
'assuming it\'s not on shared storage'),
instance=instance)
shared_storage = False
except Exception:
LOG.exception(_('Failed to check if instance shared'),
instance=instance)
finally:
if data:
self.driver.check_instance_shared_storage_cleanup(context,
data)
return shared_storage
def _complete_partial_deletion(self, context, instance):
"""Complete deletion for instances in DELETED status but not marked as
deleted in the DB
"""
instance.destroy()
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
quotas = quotas_obj.Quotas()
project_id, user_id = quotas_obj.ids_from_instance(context, instance)
quotas.reserve(context, project_id=project_id, user_id=user_id,
instances=-1, cores=-instance.vcpus,
ram=-instance.memory_mb)
self._complete_deletion(context,
instance,
bdms,
quotas,
instance.system_metadata)
def _complete_deletion(self, context, instance, bdms,
quotas, system_meta):
if quotas:
quotas.commit()
# ensure block device mappings are not leaked
for bdm in bdms:
bdm.destroy()
self._notify_about_instance_usage(context, instance, "delete.end",
system_metadata=system_meta)
if CONF.vnc_enabled or CONF.spice.enabled:
if CONF.cells.enable:
self.cells_rpcapi.consoleauth_delete_tokens(context,
instance.uuid)
else:
self.consoleauth_rpcapi.delete_tokens_for_instance(context,
instance.uuid)
def _init_instance(self, context, instance):
'''Initialize this instance during service init.'''
# Instances that are shut down, or in an error state can not be
# initialized and are not attempted to be recovered. The exception
# to this are instances that are in RESIZE_MIGRATING or DELETING,
# which are dealt with further down.
if (instance.vm_state == vm_states.SOFT_DELETED or
(instance.vm_state == vm_states.ERROR and
instance.task_state not in
(task_states.RESIZE_MIGRATING, task_states.DELETING))):
LOG.debug(_("Instance is in %s state."),
instance.vm_state, instance=instance)
return
if instance.vm_state == vm_states.DELETED:
try:
self._complete_partial_deletion(context, instance)
except Exception:
# we don't want that an exception blocks the init_host
msg = _('Failed to complete a deletion')
LOG.exception(msg, instance=instance)
finally:
return
if (instance.vm_state == vm_states.BUILDING or
instance.task_state in [task_states.SCHEDULING,
task_states.BLOCK_DEVICE_MAPPING,
task_states.NETWORKING,
task_states.SPAWNING]):
# NOTE(dave-mcnally) compute stopped before instance was fully
# spawned so set to ERROR state. This is safe to do as the state
# may be set by the api but the host is not so if we get here the
# instance has already been scheduled to this particular host.
LOG.debug(_("Instance failed to spawn correctly, "
"setting to ERROR state"), instance=instance)
instance.task_state = None
instance.vm_state = vm_states.ERROR
instance.save()
return
if (instance.vm_state != vm_states.ERROR and
instance.task_state in [task_states.IMAGE_SNAPSHOT_PENDING,
task_states.IMAGE_PENDING_UPLOAD,
task_states.IMAGE_UPLOADING,
task_states.IMAGE_SNAPSHOT]):
LOG.debug(_("Instance in transitional state %s at start-up "
"clearing task state"),
instance['task_state'], instance=instance)
instance.task_state = None
instance.save()
if instance.task_state == task_states.DELETING:
try:
LOG.info(_('Service started deleting the instance during '
'the previous run, but did not finish. Restarting '
'the deletion now.'), instance=instance)
instance.obj_load_attr('metadata')
instance.obj_load_attr('system_metadata')
bdms = (block_device_obj.BlockDeviceMappingList.
get_by_instance_uuid(context, instance.uuid))
self._delete_instance(context, instance, bdms)
except Exception:
# we don't want that an exception blocks the init_host
msg = _('Failed to complete a deletion')
LOG.exception(msg, instance=instance)
self._set_instance_error_state(context, instance['uuid'])
finally:
return
net_info = compute_utils.get_nw_info_for_instance(instance)
try:
self.driver.plug_vifs(instance, net_info)
except NotImplementedError as e:
LOG.debug(e, instance=instance)
if instance.task_state == task_states.RESIZE_MIGRATING:
# We crashed during resize/migration, so roll back for safety
try:
# NOTE(mriedem): check old_vm_state for STOPPED here, if it's
# not in system_metadata we default to True for backwards
# compatibility
power_on = (instance.system_metadata.get('old_vm_state') !=
vm_states.STOPPED)
block_dev_info = self._get_instance_volume_block_device_info(
context, instance)
self.driver.finish_revert_migration(context,
instance, net_info, block_dev_info, power_on)
except Exception as e:
LOG.exception(_('Failed to revert crashed migration'),
instance=instance)
finally:
LOG.info(_('Instance found in migrating state during '
'startup. Resetting task_state'),
instance=instance)
instance.task_state = None
instance.save()
db_state = instance.power_state
drv_state = self._get_power_state(context, instance)
expect_running = (db_state == power_state.RUNNING and
drv_state != db_state)
LOG.debug(_('Current state is %(drv_state)s, state in DB is '
'%(db_state)s.'),
{'drv_state': drv_state, 'db_state': db_state},
instance=instance)
if expect_running and CONF.resume_guests_state_on_host_boot:
LOG.info(_('Rebooting instance after nova-compute restart.'),
instance=instance)
block_device_info = \
self._get_instance_volume_block_device_info(
context, instance)
try:
self.driver.resume_state_on_host_boot(
context, instance, net_info, block_device_info)
except NotImplementedError:
LOG.warning(_('Hypervisor driver does not support '
'resume guests'), instance=instance)
except Exception:
# NOTE(vish): The instance failed to resume, so we set the
# instance to error and attempt to continue.
LOG.warning(_('Failed to resume instance'), instance=instance)
self._set_instance_error_state(context, instance.uuid)
elif drv_state == power_state.RUNNING:
# VMwareAPI drivers will raise an exception
try:
self.driver.ensure_filtering_rules_for_instance(
instance, net_info)
except NotImplementedError:
LOG.warning(_('Hypervisor driver does not support '
'firewall rules'), instance=instance)
def handle_lifecycle_event(self, event):
LOG.info(_("Lifecycle event %(state)d on VM %(uuid)s") %
{'state': event.get_transition(),
'uuid': event.get_instance_uuid()})
context = nova.context.get_admin_context()
instance = instance_obj.Instance.get_by_uuid(
context, event.get_instance_uuid())
vm_power_state = None
if event.get_transition() == virtevent.EVENT_LIFECYCLE_STOPPED:
vm_power_state = power_state.SHUTDOWN
elif event.get_transition() == virtevent.EVENT_LIFECYCLE_STARTED:
vm_power_state = power_state.RUNNING
elif event.get_transition() == virtevent.EVENT_LIFECYCLE_PAUSED:
vm_power_state = power_state.PAUSED
elif event.get_transition() == virtevent.EVENT_LIFECYCLE_RESUMED:
vm_power_state = power_state.RUNNING
else:
LOG.warning(_("Unexpected power state %d") %
event.get_transition())
if vm_power_state is not None:
self._sync_instance_power_state(context,
instance,
vm_power_state)
def handle_events(self, event):
if isinstance(event, virtevent.LifecycleEvent):
try:
self.handle_lifecycle_event(event)
except exception.InstanceNotFound:
LOG.debug(_("Event %s arrived for non-existent instance. The "
"instance was probably deleted.") % event)
else:
LOG.debug(_("Ignoring event %s") % event)
def init_virt_events(self):
self.driver.register_event_listener(self.handle_events)
def init_host(self):
"""Initialization for a standalone compute service."""
self.driver.init_host(host=self.host)
context = nova.context.get_admin_context()
instances = instance_obj.InstanceList.get_by_host(
context, self.host, expected_attrs=['info_cache'])
if CONF.defer_iptables_apply:
self.driver.filter_defer_apply_on()
self.init_virt_events()
try:
# checking that instance was not already evacuated to other host
self._destroy_evacuated_instances(context)
for instance in instances:
self._init_instance(context, instance)
finally:
if CONF.defer_iptables_apply:
self.driver.filter_defer_apply_off()
def pre_start_hook(self):
"""After the service is initialized, but before we fully bring
the service up by listening on RPC queues, make sure to update
our available resources (and indirectly our available nodes).
"""
self.update_available_resource(nova.context.get_admin_context())
def _get_power_state(self, context, instance):
"""Retrieve the power state for the given instance."""
LOG.debug(_('Checking state'), instance=instance)
try:
return self.driver.get_info(instance)["state"]
except exception.NotFound:
return power_state.NOSTATE
def get_console_topic(self, context):
"""Retrieves the console host for a project on this host.
Currently this is just set in the flags for each compute host.
"""
#TODO(mdragon): perhaps make this variable by console_type?
return '%s.%s' % (CONF.console_topic, CONF.console_host)
def get_console_pool_info(self, context, console_type):
return self.driver.get_console_pool_info(console_type)
@wrap_exception()
def refresh_security_group_rules(self, context, security_group_id):
"""Tell the virtualization driver to refresh security group rules.
Passes straight through to the virtualization driver.
"""
return self.driver.refresh_security_group_rules(security_group_id)
@wrap_exception()
def refresh_security_group_members(self, context, security_group_id):
"""Tell the virtualization driver to refresh security group members.
Passes straight through to the virtualization driver.
"""
return self.driver.refresh_security_group_members(security_group_id)
@wrap_exception()
def refresh_instance_security_rules(self, context, instance):
"""Tell the virtualization driver to refresh security rules for
an instance.
Passes straight through to the virtualization driver.
Synchronise the call because we may still be in the middle of
creating the instance.
"""
@utils.synchronized(instance['uuid'])
def _sync_refresh():
try:
return self.driver.refresh_instance_security_rules(instance)
except NotImplementedError:
LOG.warning(_('Hypervisor driver does not support '
'security groups.'), instance=instance)
return _sync_refresh()
@wrap_exception()
def refresh_provider_fw_rules(self, context):
"""This call passes straight through to the virtualization driver."""
return self.driver.refresh_provider_fw_rules()
def _get_instance_nw_info(self, context, instance, use_slave=False):
"""Get a list of dictionaries of network data of an instance."""
if (not hasattr(instance, 'system_metadata') or
len(instance['system_metadata']) == 0):
# NOTE(danms): Several places in the code look up instances without
# pulling system_metadata for performance, and call this function.
# If we get an instance without it, re-fetch so that the call
# to network_api (which requires it for instance_type) will
# succeed.
instance = instance_obj.Instance.get_by_uuid(context,
instance['uuid'],
use_slave=use_slave)
network_info = self.network_api.get_instance_nw_info(context,
instance)
return network_info
def _await_block_device_map_created(self, context, vol_id, max_tries=60,
wait_between=1):
# TODO(yamahata): creating volume simultaneously
# reduces creation time?
# TODO(yamahata): eliminate dumb polling
# TODO(harlowja): make the max_tries configurable or dynamic?
attempts = 0
start = time.time()
while attempts < max_tries:
volume = self.volume_api.get(context, vol_id)
volume_status = volume['status']
if volume_status not in ['creating', 'downloading']:
if volume_status != 'available':
LOG.warn(_("Volume id: %s finished being created but was"
" not set as 'available'"), vol_id)
# NOTE(harlowja): return how many attempts were tried
return attempts + 1
greenthread.sleep(wait_between)
attempts += 1
# NOTE(harlowja): Should only happen if we ran out of attempts
raise exception.VolumeNotCreated(volume_id=vol_id,
seconds=int(time.time() - start),
attempts=attempts)
def _decode_files(self, injected_files):
"""Base64 decode the list of files to inject."""
if not injected_files:
return []
def _decode(f):
path, contents = f
try:
decoded = base64.b64decode(contents)
return path, decoded
except TypeError:
raise exception.Base64Exception(path=path)
return [_decode(f) for f in injected_files]
def _run_instance(self, context, request_spec,
filter_properties, requested_networks, injected_files,
admin_password, is_first_time, node, instance,
legacy_bdm_in_spec):
"""Launch a new instance with specified options."""
extra_usage_info = {}
def notify(status, msg="", fault=None, **kwargs):
"""Send a create.{start,error,end} notification."""
type_ = "create.%(status)s" % dict(status=status)
info = extra_usage_info.copy()
info['message'] = unicode(msg)
self._notify_about_instance_usage(context, instance, type_,
extra_usage_info=info, fault=fault, **kwargs)
try:
self._prebuild_instance(context, instance)
if request_spec and request_spec.get('image'):
image_meta = request_spec['image']
else:
image_meta = {}
extra_usage_info = {"image_name": image_meta.get('name', '')}
notify("start") # notify that build is starting
instance, network_info = self._build_instance(context,
request_spec, filter_properties, requested_networks,
injected_files, admin_password, is_first_time, node,
instance, image_meta, legacy_bdm_in_spec)
notify("end", msg=_("Success"), network_info=network_info)
except exception.RescheduledException as e:
# Instance build encountered an error, and has been rescheduled.
notify("error", fault=e)
except exception.BuildAbortException as e:
# Instance build aborted due to a non-failure
LOG.info(e)
notify("end", msg=unicode(e)) # notify that build is done
except Exception as e:
# Instance build encountered a non-recoverable error:
with excutils.save_and_reraise_exception():
self._set_instance_error_state(context, instance['uuid'])
notify("error", fault=e) # notify that build failed
def _prebuild_instance(self, context, instance):
self._check_instance_exists(context, instance)
try:
self._start_building(context, instance)
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
msg = _("Instance disappeared before we could start it")
# Quickly bail out of here
raise exception.BuildAbortException(instance_uuid=instance['uuid'],
reason=msg)
def _validate_instance_group_policy(self, context, instance,
filter_properties):
# NOTE(russellb) Instance group policy is enforced by the scheduler.
# However, there is a race condition with the enforcement of
# anti-affinity. Since more than one instance may be scheduled at the
# same time, it's possible that more than one instance with an
# anti-affinity policy may end up here. This is a validation step to
# make sure that starting the instance here doesn't violate the policy.
scheduler_hints = filter_properties.get('scheduler_hints') or {}
group_uuid = scheduler_hints.get('group')
if not group_uuid:
return
@utils.synchronized(group_uuid)
def _do_validation(context, instance, group_uuid):
group = instance_group_obj.InstanceGroup.get_by_uuid(context,
group_uuid)
if 'anti-affinity' not in group.policies:
return
group_hosts = group.get_hosts(context, exclude=[instance['uuid']])
if self.host in group_hosts:
msg = _("Anti-affinity instance group policy was violated.")
raise exception.RescheduledException(
instance_uuid=instance['uuid'],
reason=msg)
_do_validation(context, instance, group_uuid)
def _build_instance(self, context, request_spec, filter_properties,
requested_networks, injected_files, admin_password, is_first_time,
node, instance, image_meta, legacy_bdm_in_spec):
context = context.elevated()
# If neutron security groups pass requested security
# groups to allocate_for_instance()
if request_spec and self.is_neutron_security_groups:
security_groups = request_spec.get('security_group')
else:
security_groups = []
if node is None:
node = self.driver.get_available_nodes(refresh=True)[0]
LOG.debug(_("No node specified, defaulting to %s"), node)
network_info = None
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
context, instance['uuid'])
# b64 decode the files to inject:
injected_files_orig = injected_files
injected_files = self._decode_files(injected_files)
rt = self._get_resource_tracker(node)
try:
limits = filter_properties.get('limits', {})
with rt.instance_claim(context, instance, limits):
# NOTE(russellb) It's important that this validation be done
# *after* the resource tracker instance claim, as that is where
# the host is set on the instance.
self._validate_instance_group_policy(context, instance,
filter_properties)
macs = self.driver.macs_for_instance(instance)
dhcp_options = self.driver.dhcp_options_for_instance(instance)
network_info = self._allocate_network(context, instance,
requested_networks, macs, security_groups,
dhcp_options)
self._instance_update(
context, instance['uuid'],
vm_state=vm_states.BUILDING,
task_state=task_states.BLOCK_DEVICE_MAPPING)
# Verify that all the BDMs have a device_name set and assign a
# default to the ones missing it with the help of the driver.
self._default_block_device_names(context, instance, image_meta,
bdms)
block_device_info = self._prep_block_device(
context, instance, bdms)
set_access_ip = (is_first_time and
not instance['access_ip_v4'] and
not instance['access_ip_v6'])
instance = self._spawn(context, instance, image_meta,
network_info, block_device_info,
injected_files, admin_password,
set_access_ip=set_access_ip)
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
# the instance got deleted during the spawn
# Make sure the async call finishes
msg = _("Instance disappeared during build")
if network_info is not None:
network_info.wait(do_raise=False)
try:
self._deallocate_network(context, instance)
except Exception:
msg = _('Failed to dealloc network '
'for deleted instance')
LOG.exception(msg, instance=instance)
raise exception.BuildAbortException(
instance_uuid=instance['uuid'],
reason=msg)
except exception.UnexpectedTaskStateError as e:
# Don't try to reschedule, just log and reraise.
with excutils.save_and_reraise_exception():
LOG.debug(e.format_message(), instance=instance)
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
except Exception:
exc_info = sys.exc_info()
# try to re-schedule instance:
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
rescheduled = self._reschedule_or_error(context, instance,
exc_info, requested_networks, admin_password,
injected_files_orig, is_first_time, request_spec,
filter_properties, bdms, legacy_bdm_in_spec)
if rescheduled:
# log the original build error
self._log_original_error(exc_info, instance['uuid'])
raise exception.RescheduledException(
instance_uuid=instance['uuid'],
reason=unicode(exc_info[1]))
else:
# not re-scheduling, go to error:
raise exc_info[0], exc_info[1], exc_info[2]
# spawn success
return instance, network_info
def _log_original_error(self, exc_info, instance_uuid):
LOG.error(_('Error: %s') % exc_info[1], instance_uuid=instance_uuid,
exc_info=exc_info)
def _reschedule_or_error(self, context, instance, exc_info,
requested_networks, admin_password, injected_files, is_first_time,
request_spec, filter_properties, bdms=None,
legacy_bdm_in_spec=True):
"""Try to re-schedule the build or re-raise the original build error to
error out the instance.
"""
instance_uuid = instance['uuid']
rescheduled = False
compute_utils.add_instance_fault_from_exc(context, self.conductor_api,
instance, exc_info[1], exc_info=exc_info)
self._notify_about_instance_usage(context, instance,
'instance.create.error', fault=exc_info[1])
try:
LOG.debug(_("Clean up resource before rescheduling."),
instance=instance)
if bdms is None:
bdms = (block_device_obj.BlockDeviceMappingList.
get_by_instance_uuid(context, instance.uuid))
self._shutdown_instance(context, instance,
bdms, requested_networks)
self._cleanup_volumes(context, instance['uuid'], bdms)
except Exception:
# do not attempt retry if clean up failed:
with excutils.save_and_reraise_exception():
self._log_original_error(exc_info, instance_uuid)
try:
method_args = (request_spec, admin_password, injected_files,
requested_networks, is_first_time, filter_properties,
legacy_bdm_in_spec)
task_state = task_states.SCHEDULING
rescheduled = self._reschedule(context, request_spec,
filter_properties, instance['uuid'],
self.scheduler_rpcapi.run_instance, method_args,
task_state, exc_info)
except Exception:
rescheduled = False
LOG.exception(_("Error trying to reschedule"),
instance_uuid=instance_uuid)
return rescheduled
def _reschedule(self, context, request_spec, filter_properties,
instance_uuid, scheduler_method, method_args, task_state,
exc_info=None):
"""Attempt to re-schedule a compute operation."""
retry = filter_properties.get('retry', None)
if not retry:
# no retry information, do not reschedule.
LOG.debug(_("Retry info not present, will not reschedule"),
instance_uuid=instance_uuid)
return
if not request_spec:
LOG.debug(_("No request spec, will not reschedule"),
instance_uuid=instance_uuid)
return
request_spec['instance_uuids'] = [instance_uuid]
LOG.debug(_("Re-scheduling %(method)s: attempt %(num)d") %
{'method': scheduler_method.func_name,
'num': retry['num_attempts']}, instance_uuid=instance_uuid)
# reset the task state:
self._instance_update(context, instance_uuid, task_state=task_state)
if exc_info:
# stringify to avoid circular ref problem in json serialization:
retry['exc'] = traceback.format_exception(*exc_info)
scheduler_method(context, *method_args)
return True
@periodic_task.periodic_task
def _check_instance_build_time(self, context):
"""Ensure that instances are not stuck in build."""
timeout = CONF.instance_build_timeout
if timeout == 0:
return
filters = {'vm_state': vm_states.BUILDING,
'host': self.host}
building_insts = instance_obj.InstanceList.get_by_filters(context,
filters, expected_attrs=[], use_slave=True)
for instance in building_insts:
if timeutils.is_older_than(instance['created_at'], timeout):
self._set_instance_error_state(context, instance['uuid'])
LOG.warn(_("Instance build timed out. Set to error state."),
instance=instance)
def _check_instance_exists(self, context, instance):
"""Ensure an instance with the same name is not already present."""
if self.driver.instance_exists(instance['name']):
raise exception.InstanceExists(name=instance['name'])
def _start_building(self, context, instance):
"""Save the host and launched_on fields and log appropriately."""
LOG.audit(_('Starting instance...'), context=context,
instance=instance)
self._instance_update(context, instance['uuid'],
vm_state=vm_states.BUILDING,
task_state=None,
expected_task_state=(task_states.SCHEDULING,
None))
def _allocate_network_async(self, context, instance, requested_networks,
macs, security_groups, is_vpn, dhcp_options):
"""Method used to allocate networks in the background.
Broken out for testing.
"""
LOG.debug(_("Allocating IP information in the background."),
instance=instance)
retries = CONF.network_allocate_retries
if retries < 0:
LOG.warn(_("Treating negative config value (%(retries)s) for "
"'network_allocate_retries' as 0."),
{'retries': retries})
attempts = retries > 1 and retries + 1 or 1
retry_time = 1
for attempt in range(1, attempts + 1):
try:
nwinfo = self.network_api.allocate_for_instance(
context, instance, vpn=is_vpn,
requested_networks=requested_networks,
macs=macs,
security_groups=security_groups,
dhcp_options=dhcp_options)
LOG.debug(_('Instance network_info: |%s|'), nwinfo,
instance=instance)
# NOTE(alaski): This can be done more cleanly once we're sure
# we'll receive an object.
sys_meta = utils.metadata_to_dict(instance['system_metadata'])
sys_meta['network_allocated'] = 'True'
self._instance_update(context, instance['uuid'],
system_metadata=sys_meta)
return nwinfo
except Exception:
exc_info = sys.exc_info()
log_info = {'attempt': attempt,
'attempts': attempts}
if attempt == attempts:
LOG.exception(_('Instance failed network setup '
'after %(attempts)d attempt(s)'),
log_info)
raise exc_info[0], exc_info[1], exc_info[2]
LOG.warn(_('Instance failed network setup '
'(attempt %(attempt)d of %(attempts)d)'),
log_info, instance=instance)
time.sleep(retry_time)
retry_time *= 2
if retry_time > 30:
retry_time = 30
# Not reached.
def _build_networks_for_instance(self, context, instance,
requested_networks, security_groups):
# If we're here from a reschedule the network may already be allocated.
if strutils.bool_from_string(
instance.system_metadata.get('network_allocated', 'False')):
return self._get_instance_nw_info(context, instance)
if not self.is_neutron_security_groups:
security_groups = []
macs = self.driver.macs_for_instance(instance)
dhcp_options = self.driver.dhcp_options_for_instance(instance)
network_info = self._allocate_network(context, instance,
requested_networks, macs, security_groups, dhcp_options)
if not instance.access_ip_v4 and not instance.access_ip_v6:
# If CONF.default_access_ip_network_name is set, grab the
# corresponding network and set the access ip values accordingly.
# Note that when there are multiple ips to choose from, an
# arbitrary one will be chosen.
network_name = CONF.default_access_ip_network_name
if not network_name:
return network_info
for vif in network_info:
if vif['network']['label'] == network_name:
for ip in vif.fixed_ips():
if ip['version'] == 4:
instance.access_ip_v4 = ip['address']
if ip['version'] == 6:
instance.access_ip_v6 = ip['address']
instance.save()
break
return network_info
def _allocate_network(self, context, instance, requested_networks, macs,
security_groups, dhcp_options):
"""Start network allocation asynchronously. Return an instance
of NetworkInfoAsyncWrapper that can be used to retrieve the
allocated networks when the operation has finished.
"""
# NOTE(comstud): Since we're allocating networks asynchronously,
# this task state has little meaning, as we won't be in this
# state for very long.
instance = self._instance_update(context, instance['uuid'],
vm_state=vm_states.BUILDING,
task_state=task_states.NETWORKING,
expected_task_state=[None])
is_vpn = pipelib.is_vpn_image(instance['image_ref'])
return network_model.NetworkInfoAsyncWrapper(
self._allocate_network_async, context, instance,
requested_networks, macs, security_groups, is_vpn,
dhcp_options)
def _default_root_device_name(self, instance, image_meta, root_bdm):
try:
return self.driver.default_root_device_name(instance,
image_meta,
root_bdm)
except NotImplementedError:
return compute_utils.get_next_device_name(instance, [])
def _default_device_names_for_instance(self, instance,
root_device_name,
*block_device_lists):
try:
self.driver.default_device_names_for_instance(instance,
root_device_name,
*block_device_lists)
except NotImplementedError:
compute_utils.default_device_names_for_instance(
instance, root_device_name, *block_device_lists)
def _default_block_device_names(self, context, instance,
image_meta, block_devices):
"""Verify that all the devices have the device_name set. If not,
provide a default name.
It also ensures that there is a root_device_name and is set to the
first block device in the boot sequence (boot_index=0).
"""
root_bdm = block_device.get_root_bdm(block_devices)
if not root_bdm:
return
# Get the root_device_name from the root BDM or the instance
root_device_name = None
update_instance = False
update_root_bdm = False
if root_bdm.device_name:
root_device_name = root_bdm.device_name
instance['root_device_name'] = root_device_name
update_instance = True
elif instance['root_device_name']:
root_device_name = instance['root_device_name']
root_bdm.device_name = root_device_name
update_root_bdm = True
else:
root_device_name = self._default_root_device_name(instance,
image_meta,
root_bdm)
instance['root_device_name'] = root_device_name
root_bdm.device_name = root_device_name
update_instance = update_root_bdm = True
if update_instance:
self._instance_update(context, instance['uuid'],
root_device_name=root_device_name)
if update_root_bdm:
root_bdm.save()
def _is_mapping(bdm):
return (bdm.source_type in ('image', 'volume', 'snapshot') and
driver_block_device.is_implemented(bdm))
ephemerals = filter(block_device.new_format_is_ephemeral,
block_devices)
swap = filter(block_device.new_format_is_swap,
block_devices)
block_device_mapping = filter(_is_mapping, block_devices)
self._default_device_names_for_instance(instance,
root_device_name,
ephemerals,
swap,
block_device_mapping)
def _prep_block_device(self, context, instance, bdms):
"""Set up the block device for an instance with error logging."""
try:
block_device_info = {
'root_device_name': instance['root_device_name'],
'swap': driver_block_device.convert_swap(bdms),
'ephemerals': driver_block_device.convert_ephemerals(bdms),
'block_device_mapping': (
driver_block_device.attach_block_devices(
driver_block_device.convert_volumes(bdms),
context, instance, self.volume_api,
self.driver) +
driver_block_device.attach_block_devices(
driver_block_device.convert_snapshots(bdms),
context, instance, self.volume_api,
self.driver, self._await_block_device_map_created) +
driver_block_device.attach_block_devices(
driver_block_device.convert_images(bdms),
context, instance, self.volume_api,
self.driver, self._await_block_device_map_created))
}
if self.use_legacy_block_device_info:
for bdm_type in ('swap', 'ephemerals', 'block_device_mapping'):
block_device_info[bdm_type] = \
driver_block_device.legacy_block_devices(
block_device_info[bdm_type])
# Get swap out of the list
block_device_info['swap'] = driver_block_device.get_swap(
block_device_info['swap'])
return block_device_info
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_('Instance failed block device setup'),
instance=instance)
@object_compat
def _spawn(self, context, instance, image_meta, network_info,
block_device_info, injected_files, admin_password,
set_access_ip=False):
"""Spawn an instance with error logging and update its power state."""
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SPAWNING
instance.save(expected_task_state=task_states.BLOCK_DEVICE_MAPPING)
try:
self.driver.spawn(context, instance, image_meta,
injected_files, admin_password,
network_info,
block_device_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_('Instance failed to spawn'), instance=instance)
current_power_state = self._get_power_state(context, instance)
instance.power_state = current_power_state
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.launched_at = timeutils.utcnow()
def _set_access_ip_values():
"""Add access ip values for a given instance.
If CONF.default_access_ip_network_name is set, this method will
grab the corresponding network and set the access ip values
accordingly. Note that when there are multiple ips to choose
from, an arbitrary one will be chosen.
"""
network_name = CONF.default_access_ip_network_name
if not network_name:
return
for vif in network_info:
if vif['network']['label'] == network_name:
for ip in vif.fixed_ips():
if ip['version'] == 4:
instance.access_ip_v4 = ip['address']
if ip['version'] == 6:
instance.access_ip_v6 = ip['address']
return
if set_access_ip:
_set_access_ip_values()
if network_info is not None:
network_info.wait(do_raise=True)
instance.save(expected_task_state=task_states.SPAWNING)
return instance
def _notify_about_instance_usage(self, context, instance, event_suffix,
network_info=None, system_metadata=None,
extra_usage_info=None, fault=None):
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, event_suffix,
network_info=network_info,
system_metadata=system_metadata,
extra_usage_info=extra_usage_info, fault=fault)
def _deallocate_network(self, context, instance,
requested_networks=None):
LOG.debug(_('Deallocating network for instance'), instance=instance)
self.network_api.deallocate_for_instance(
context, instance, requested_networks=requested_networks)
def _get_instance_volume_block_device_info(self, context, instance,
refresh_conn_info=False,
bdms=None):
"""Transform volumes to the driver block_device format."""
if not bdms:
bdms = (block_device_obj.BlockDeviceMappingList.
get_by_instance_uuid(context, instance['uuid']))
block_device_mapping = (
driver_block_device.convert_volumes(bdms) +
driver_block_device.convert_snapshots(bdms) +
driver_block_device.convert_images(bdms))
if not refresh_conn_info:
# if the block_device_mapping has no value in connection_info
# (returned as None), don't include in the mapping
block_device_mapping = [
bdm for bdm in block_device_mapping
if bdm.get('connection_info')]
else:
block_device_mapping = driver_block_device.refresh_conn_infos(
block_device_mapping, context, instance, self.volume_api,
self.driver)
if self.use_legacy_block_device_info:
block_device_mapping = driver_block_device.legacy_block_devices(
block_device_mapping)
return {'block_device_mapping': block_device_mapping}
# NOTE(mikal): No object_compat wrapper on this method because its
# callers all pass objects already
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def build_and_run_instance(self, context, instance, image, request_spec,
filter_properties, admin_password=None,
injected_files=None, requested_networks=None,
security_groups=None, block_device_mapping=None,
node=None, limits=None):
@utils.synchronized(instance.uuid)
def do_build_and_run_instance(context, instance, image, request_spec,
filter_properties, admin_password, injected_files,
requested_networks, security_groups, block_device_mapping,
node=None, limits=None):
try:
LOG.audit(_('Starting instance...'), context=context,
instance=instance)
instance.vm_state = vm_states.BUILDING
instance.task_state = None
instance.save(expected_task_state=
(task_states.SCHEDULING, None))
except exception.InstanceNotFound:
msg = _('Instance disappeared before build.')
LOG.debug(msg, instance=instance)
return
except exception.UnexpectedTaskStateError as e:
LOG.debug(e.format_message(), instance=instance)
return
# b64 decode the files to inject:
decoded_files = self._decode_files(injected_files)
if limits is None:
limits = {}
if node is None:
node = self.driver.get_available_nodes()[0]
LOG.debug(_('No node specified, defaulting to %s'), node)
try:
self._build_and_run_instance(context, instance, image,
decoded_files, admin_password, requested_networks,
security_groups, block_device_mapping, node, limits)
except exception.RescheduledException as e:
LOG.debug(e.format_message(), instance=instance)
# dhcp_options are per host, so if they're set we need to
# deallocate the networks and reallocate on the next host.
if self.driver.dhcp_options_for_instance(instance):
self._cleanup_allocated_networks(context, instance,
requested_networks)
instance.task_state = task_states.SCHEDULING
instance.save()
self.compute_task_api.build_instances(context, [instance],
image, filter_properties, admin_password,
injected_files, requested_networks, security_groups,
block_device_mapping)
except exception.InstanceNotFound:
msg = _('Instance disappeared during build.')
LOG.debug(msg, instance=instance)
except exception.BuildAbortException as e:
LOG.exception(e.format_message(), instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
self._set_instance_error_state(context, instance.uuid)
except exception.UnexpectedDeletingTaskStateError as e:
# The instance is deleting, so clean up but don't error.
LOG.debug(e.format_message(), instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
except Exception:
# Should not reach here.
msg = _('Unexpected build failure, not rescheduling build.')
LOG.exception(msg, instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
self._set_instance_error_state(context, instance.uuid)
do_build_and_run_instance(context, instance, image, request_spec,
filter_properties, admin_password, injected_files,
requested_networks, security_groups, block_device_mapping,
node, limits)
def _build_and_run_instance(self, context, instance, image, injected_files,
admin_password, requested_networks, security_groups,
block_device_mapping, node, limits):
image_name = image.get('name')
self._notify_about_instance_usage(context, instance, 'create.start',
extra_usage_info={'image_name': image_name})
try:
rt = self._get_resource_tracker(node)
with rt.instance_claim(context, instance, limits):
with self._build_resources(context, instance,
requested_networks, security_groups, image,
block_device_mapping) as resources:
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SPAWNING
instance.save(expected_task_state=
task_states.BLOCK_DEVICE_MAPPING)
block_device_info = resources['block_device_info']
network_info = resources['network_info']
self.driver.spawn(context, instance, image,
injected_files, admin_password,
network_info=network_info,
block_device_info=block_device_info)
self._notify_about_instance_usage(context, instance,
'create.end',
extra_usage_info={'message': _('Success')},
network_info=network_info)
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError) as e:
with excutils.save_and_reraise_exception():
self._notify_about_instance_usage(context, instance,
'create.end', fault=e)
except exception.ComputeResourcesUnavailable as e:
LOG.debug(e.format_message(), instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
raise exception.RescheduledException(
instance_uuid=instance.uuid, reason=e.format_message())
except exception.BuildAbortException as e:
with excutils.save_and_reraise_exception():
LOG.debug(e.format_message(), instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
except (exception.VirtualInterfaceCreateException,
exception.VirtualInterfaceMacAddressException,
exception.FixedIpLimitExceeded,
exception.NoMoreNetworks) as e:
LOG.exception(_('Failed to allocate network(s)'),
instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
msg = _('Failed to allocate the network(s), not rescheduling.')
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
except Exception as e:
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
raise exception.RescheduledException(
instance_uuid=instance.uuid, reason=str(e))
# NOTE(alaski): This is only useful during reschedules, remove it now.
instance.system_metadata.pop('network_allocated', None)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.SPAWNING)
@contextlib.contextmanager
def _build_resources(self, context, instance, requested_networks,
security_groups, image, block_device_mapping):
resources = {}
try:
network_info = self._build_networks_for_instance(context, instance,
requested_networks, security_groups)
resources['network_info'] = network_info
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
raise
except exception.UnexpectedTaskStateError as e:
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=e.format_message())
except Exception:
# Because this allocation is async any failures are likely to occur
# when the driver accesses network_info during spawn().
LOG.exception('Failed to allocate network(s)', instance=instance)
msg = _('Failed to allocate the network(s), not rescheduling.')
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
try:
# Verify that all the BDMs have a device_name set and assign a
# default to the ones missing it with the help of the driver.
self._default_block_device_names(context, instance, image,
block_device_mapping)
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.BLOCK_DEVICE_MAPPING
instance.save()
block_device_info = self._prep_block_device(context, instance,
block_device_mapping)
resources['block_device_info'] = block_device_info
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
raise
except exception.UnexpectedTaskStateError as e:
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=e.format_message())
except Exception:
LOG.exception(_('Failure prepping block device'),
instance=instance)
msg = _('Failure prepping block device.')
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
try:
yield resources
except Exception:
with excutils.save_and_reraise_exception() as ctxt:
LOG.exception(_('Instance failed to spawn'), instance=instance)
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
try:
self._cleanup_build_resources(context, instance,
block_device_mapping)
except Exception:
ctxt.reraise = False
msg = _('Could not clean up failed build,'
' not rescheduling')
raise exception.BuildAbortException(
instance_uuid=instance.uuid, reason=msg)
def _cleanup_allocated_networks(self, context, instance,
requested_networks):
try:
self._deallocate_network(context, instance, requested_networks)
instance.system_metadata['network_allocated'] = 'False'
instance.save()
except Exception:
msg = _('Failed to deallocate networks')
LOG.exception(msg, instance=instance)
def _cleanup_build_resources(self, context, instance,
block_device_mapping):
# Don't clean up networks here in case we reschedule
try:
self._cleanup_volumes(context, instance.uuid,
block_device_mapping)
except Exception:
with excutils.save_and_reraise_exception():
msg = _('Failed to cleanup volumes for failed build,'
' not rescheduling')
LOG.exception(msg, instance=instance)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def run_instance(self, context, instance, request_spec,
filter_properties, requested_networks,
injected_files, admin_password,
is_first_time, node, legacy_bdm_in_spec):
if filter_properties is None:
filter_properties = {}
@utils.synchronized(instance['uuid'])
def do_run_instance():
self._run_instance(context, request_spec,
filter_properties, requested_networks, injected_files,
admin_password, is_first_time, node, instance,
legacy_bdm_in_spec)
do_run_instance()
def _try_deallocate_network(self, context, instance,
requested_networks=None):
try:
# tear down allocated network structure
self._deallocate_network(context, instance, requested_networks)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_('Failed to deallocate network for instance.'),
instance=instance)
self._set_instance_error_state(context, instance['uuid'])
def _shutdown_instance(self, context, instance,
bdms, requested_networks=None, notify=True):
"""Shutdown an instance on this host."""
context = context.elevated()
LOG.audit(_('%(action_str)s instance') % {'action_str': 'Terminating'},
context=context, instance=instance)
if notify:
self._notify_about_instance_usage(context, instance,
"shutdown.start")
# get network info before tearing down
try:
network_info = self._get_instance_nw_info(context, instance)
except (exception.NetworkNotFound, exception.NoMoreFixedIps,
exception.InstanceInfoCacheNotFound):
network_info = network_model.NetworkInfo()
# NOTE(vish) get bdms before destroying the instance
vol_bdms = [bdm for bdm in bdms if bdm.is_volume]
block_device_info = self._get_instance_volume_block_device_info(
context, instance, bdms=bdms)
# NOTE(melwitt): attempt driver destroy before releasing ip, may
# want to keep ip allocated for certain failures
try:
self.driver.destroy(context, instance, network_info,
block_device_info)
except exception.InstancePowerOffFailure:
# if the instance can't power off, don't release the ip
with excutils.save_and_reraise_exception():
pass
except Exception:
with excutils.save_and_reraise_exception():
# deallocate ip and fail without proceeding to
# volume api calls, preserving current behavior
self._try_deallocate_network(context, instance,
requested_networks)
self._try_deallocate_network(context, instance, requested_networks)
for bdm in vol_bdms:
try:
# NOTE(vish): actual driver detach done in driver.destroy, so
# just tell cinder that we are done with it.
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context,
bdm.volume_id,
connector)
self.volume_api.detach(context, bdm.volume_id)
except exception.DiskNotFound as exc:
LOG.warn(_('Ignoring DiskNotFound: %s') % exc,
instance=instance)
except exception.VolumeNotFound as exc:
LOG.warn(_('Ignoring VolumeNotFound: %s') % exc,
instance=instance)
if notify:
self._notify_about_instance_usage(context, instance,
"shutdown.end")
def _cleanup_volumes(self, context, instance_uuid, bdms):
for bdm in bdms:
LOG.debug(_("terminating bdm %s") % bdm,
instance_uuid=instance_uuid)
if bdm.volume_id and bdm.delete_on_termination:
self.volume_api.delete(context, bdm.volume_id)
# NOTE(vish): bdms will be deleted on instance destroy
@hooks.add_hook("delete_instance")
def _delete_instance(self, context, instance, bdms,
reservations=None):
"""Delete an instance on this host. Commit or rollback quotas
as necessary.
"""
instance_uuid = instance['uuid']
image = instance['image_ref']
if context.is_admin and context.project_id != instance['project_id']:
project_id = instance['project_id']
else:
project_id = context.project_id
if context.user_id != instance['user_id']:
user_id = instance['user_id']
else:
user_id = context.user_id
was_soft_deleted = instance['vm_state'] == vm_states.SOFT_DELETED
if was_soft_deleted:
# Instances in SOFT_DELETED vm_state have already had quotas
# decremented.
try:
self._quota_rollback(context, reservations,
project_id=project_id,
user_id=user_id)
except Exception:
pass
reservations = None
try:
events = self.instance_events.clear_events_for_instance(instance)
if events:
LOG.debug(_('Events pending at deletion: %(events)s'),
{'events': ','.join(events.keys()),
'instance': instance})
db_inst = obj_base.obj_to_primitive(instance)
instance.info_cache.delete()
self._notify_about_instance_usage(context, instance,
"delete.start")
self._shutdown_instance(context, db_inst, bdms)
# NOTE(vish): We have already deleted the instance, so we have
# to ignore problems cleaning up the volumes. It
# would be nice to let the user know somehow that
# the volume deletion failed, but it is not
# acceptable to have an instance that can not be
# deleted. Perhaps this could be reworked in the
# future to set an instance fault the first time
# and to only ignore the failure if the instance
# is already in ERROR.
try:
self._cleanup_volumes(context, instance_uuid, bdms)
except Exception as exc:
err_str = _("Ignoring volume cleanup failure due to %s")
LOG.warn(err_str % exc, instance=instance)
# if a delete task succeed, always update vm state and task
# state without expecting task state to be DELETING
instance.vm_state = vm_states.DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
system_meta = utils.instance_sys_meta(instance)
db_inst = self.conductor_api.instance_destroy(
context, obj_base.obj_to_primitive(instance))
instance = instance_obj.Instance._from_db_object(context, instance,
db_inst)
except Exception:
with excutils.save_and_reraise_exception():
self._quota_rollback(context, reservations,
project_id=project_id,
user_id=user_id)
quotas = quotas_obj.Quotas.from_reservations(context,
reservations,
instance=instance)
self._complete_deletion(context,
instance,
bdms,
quotas,
system_meta)
@wrap_exception()
@wrap_instance_event
@wrap_instance_fault
def terminate_instance(self, context, instance, bdms, reservations):
"""Terminate an instance on this host."""
# NOTE (ndipanov): If we get non-object BDMs, just get them from the
# db again, as this means they are sent in the old format and we want
# to avoid converting them back when we can just get them.
# Remove this when we bump the RPC major version to 4.0
if (bdms and
any(not isinstance(bdm, block_device_obj.BlockDeviceMapping)
for bdm in bdms)):
bdms = (block_device_obj.BlockDeviceMappingList.
get_by_instance_uuid(context, instance.uuid))
@utils.synchronized(instance['uuid'])
def do_terminate_instance(instance, bdms):
try:
self._delete_instance(context, instance, bdms,
reservations=reservations)
except exception.InstanceNotFound:
LOG.info(_("Instance disappeared during terminate"),
instance=instance)
except Exception as error:
# As we're trying to delete always go to Error if something
# goes wrong that _delete_instance can't handle.
with excutils.save_and_reraise_exception():
LOG.exception(_('Setting instance vm_state to ERROR'),
instance=instance)
self._set_instance_error_state(context, instance['uuid'])
do_terminate_instance(instance, bdms)
# NOTE(johannes): This is probably better named power_off_instance
# so it matches the driver method, but because of other issues, we
# can't use that name in grizzly.
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def stop_instance(self, context, instance):
"""Stopping an instance on this host."""
self._notify_about_instance_usage(context, instance, "power_off.start")
self.driver.power_off(instance)
current_power_state = self._get_power_state(context, instance)
instance.power_state = current_power_state
instance.vm_state = vm_states.STOPPED
instance.task_state = None
instance.save(expected_task_state=task_states.POWERING_OFF)
self._notify_about_instance_usage(context, instance, "power_off.end")
def _power_on(self, context, instance):
network_info = self._get_instance_nw_info(context, instance)
block_device_info = self._get_instance_volume_block_device_info(
context, instance)
self.driver.power_on(context, instance,
network_info,
block_device_info)
# NOTE(johannes): This is probably better named power_on_instance
# so it matches the driver method, but because of other issues, we
# can't use that name in grizzly.
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def start_instance(self, context, instance):
"""Starting an instance on this host."""
self._notify_about_instance_usage(context, instance, "power_on.start")
self._power_on(context, instance)
current_power_state = self._get_power_state(context, instance)
instance.power_state = current_power_state
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save(expected_task_state=task_states.POWERING_ON)
self._notify_about_instance_usage(context, instance, "power_on.end")
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def soft_delete_instance(self, context, instance, reservations):
"""Soft delete an instance on this host."""
if context.is_admin and context.project_id != instance['project_id']:
project_id = instance['project_id']
else:
project_id = context.project_id
if context.user_id != instance['user_id']:
user_id = instance['user_id']
else:
user_id = context.user_id
try:
self._notify_about_instance_usage(context, instance,
"soft_delete.start")
try:
self.driver.soft_delete(instance)
except NotImplementedError:
# Fallback to just powering off the instance if the
# hypervisor doesn't implement the soft_delete method
self.driver.power_off(instance)
current_power_state = self._get_power_state(context, instance)
instance.power_state = current_power_state
instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None
instance.save(expected_task_state=[task_states.SOFT_DELETING])
except Exception:
with excutils.save_and_reraise_exception():
self._quota_rollback(context, reservations,
project_id=project_id,
user_id=user_id)
self._quota_commit(context, reservations, project_id=project_id,
user_id=user_id)
self._notify_about_instance_usage(context, instance, "soft_delete.end")
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def restore_instance(self, context, instance):
"""Restore a soft-deleted instance on this host."""
self._notify_about_instance_usage(context, instance, "restore.start")
try:
self.driver.restore(instance)
except NotImplementedError:
# Fallback to just powering on the instance if the hypervisor
# doesn't implement the restore method
self._power_on(context, instance)
current_power_state = self._get_power_state(context, instance)
instance.power_state = current_power_state
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save(expected_task_state=task_states.RESTORING)
self._notify_about_instance_usage(context, instance, "restore.end")
def _rebuild_default_impl(self, context, instance, image_meta,
injected_files, admin_password, bdms,
detach_block_devices, attach_block_devices,
network_info=None,
recreate=False, block_device_info=None,
preserve_ephemeral=False):
if preserve_ephemeral:
# The default code path does not support preserving ephemeral
# partitions.
raise exception.PreserveEphemeralNotSupported()
detach_block_devices(context, bdms)
if not recreate:
self.driver.destroy(context, instance, network_info,
block_device_info=block_device_info)
instance.task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING
instance.save(expected_task_state=[task_states.REBUILDING])
new_block_device_info = attach_block_devices(context, instance, bdms)
instance.task_state = task_states.REBUILD_SPAWNING
instance.save(
expected_task_state=[task_states.REBUILD_BLOCK_DEVICE_MAPPING])
self.driver.spawn(context, instance, image_meta, injected_files,
admin_password, network_info=network_info,
block_device_info=new_block_device_info)
@object_compat
@messaging.expected_exceptions(exception.PreserveEphemeralNotSupported)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral=False):
"""Destroy and re-make this instance.
A 'rebuild' effectively purges all existing data from the system and
remakes the VM with given 'metadata' and 'personalities'.
:param context: `nova.RequestContext` object
:param instance: Instance object
:param orig_image_ref: Original image_ref before rebuild
:param image_ref: New image_ref for rebuild
:param injected_files: Files to inject
:param new_pass: password to set on rebuilt instance
:param orig_sys_metadata: instance system metadata from pre-rebuild
:param bdms: block-device-mappings to use for rebuild
:param recreate: True if the instance is being recreated (e.g. the
hypervisor it was on failed) - cleanup of old state will be
skipped.
:param on_shared_storage: True if instance files on shared storage
:param preserve_ephemeral: True if the default ephemeral storage
partition must be preserved on rebuild
"""
context = context.elevated()
# NOTE (ndipanov): If we get non-object BDMs, just get them from the
# db again, as this means they are sent in the old format and we want
# to avoid converting them back when we can just get them.
# Remove this on the next major RPC version bump
if (bdms and
any(not isinstance(bdm, block_device_obj.BlockDeviceMapping)
for bdm in bdms)):
bdms = None
orig_vm_state = instance.vm_state
with self._error_out_instance_on_exception(context, instance.uuid):
LOG.audit(_("Rebuilding instance"), context=context,
instance=instance)
if recreate:
if not self.driver.capabilities["supports_recreate"]:
raise exception.InstanceRecreateNotSupported
self._check_instance_exists(context, instance)
# To cover case when admin expects that instance files are on
# shared storage, but not accessible and vice versa
if on_shared_storage != self.driver.instance_on_disk(instance):
raise exception.InvalidSharedStorage(
_("Invalid state of instance files on shared"
" storage"))
if on_shared_storage:
LOG.info(_('disk on shared storage, recreating using'
' existing disk'))
else:
image_ref = orig_image_ref = instance.image_ref
LOG.info(_("disk not on shared storagerebuilding from:"
" '%s'") % str(image_ref))
# NOTE(mriedem): On a recreate (evacuate), we need to update
# the instance's host and node properties to reflect it's
# destination node for the recreate.
node_name = None
try:
compute_node = self._get_compute_info(context, self.host)
node_name = compute_node['hypervisor_hostname']
except exception.NotFound:
LOG.exception(_('Failed to get compute_info for %s') %
self.host)
finally:
instance.host = self.host
instance.node = node_name
instance.save()
if image_ref:
image_meta = _get_image_meta(context, image_ref)
else:
image_meta = {}
# This instance.exists message should contain the original
# image_ref, not the new one. Since the DB has been updated
# to point to the new one... we have to override it.
orig_image_ref_url = glance.generate_image_url(orig_image_ref)
extra_usage_info = {'image_ref_url': orig_image_ref_url}
self.conductor_api.notify_usage_exists(context,
obj_base.obj_to_primitive(instance),
current_period=True, system_metadata=orig_sys_metadata,
extra_usage_info=extra_usage_info)
# This message should contain the new image_ref
extra_usage_info = {'image_name': image_meta.get('name', '')}
self._notify_about_instance_usage(context, instance,
"rebuild.start", extra_usage_info=extra_usage_info)
instance.power_state = self._get_power_state(context, instance)
instance.task_state = task_states.REBUILDING
instance.save(expected_task_state=[task_states.REBUILDING])
if recreate:
self.network_api.setup_networks_on_host(
context, instance, self.host)
network_info = self._get_instance_nw_info(context, instance)
if bdms is None:
bdms = (block_device_obj.BlockDeviceMappingList.
get_by_instance_uuid(context, instance.uuid))
block_device_info = \
self._get_instance_volume_block_device_info(
context, instance, bdms=bdms)
def detach_block_devices(context, bdms):
for bdm in bdms:
if bdm.is_volume:
self.volume_api.detach(context, bdm.volume_id)
files = self._decode_files(injected_files)
kwargs = dict(
context=context,
instance=instance,
image_meta=image_meta,
injected_files=files,
admin_password=new_pass,
bdms=bdms,
detach_block_devices=detach_block_devices,
attach_block_devices=self._prep_block_device,
block_device_info=block_device_info,
network_info=network_info,
preserve_ephemeral=preserve_ephemeral)
try:
self.driver.rebuild(**kwargs)
except NotImplementedError:
# NOTE(rpodolyaka): driver doesn't provide specialized version
# of rebuild, fall back to the default implementation
self._rebuild_default_impl(**kwargs)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=[task_states.REBUILD_SPAWNING])
LOG.info(_("bringing vm to original state: '%s'") % orig_vm_state)
if orig_vm_state == vm_states.STOPPED:
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.POWERING_OFF
instance.progress = 0
instance.save()
self.stop_instance(context, instance)
self._notify_about_instance_usage(
context, instance, "rebuild.end",
network_info=network_info,
extra_usage_info=extra_usage_info)
def _handle_bad_volumes_detached(self, context, instance, bad_devices,
block_device_info):
"""Handle cases where the virt-layer had to detach non-working volumes
in order to complete an operation.
"""
for bdm in block_device_info['block_device_mapping']:
if bdm.get('mount_device') in bad_devices:
try:
volume_id = bdm['connection_info']['data']['volume_id']
except KeyError:
continue
# NOTE(sirp): ideally we'd just call
# `compute_api.detach_volume` here but since that hits the
# DB directly, that's off limits from within the
# compute-manager.
#
# API-detach
LOG.info(_("Detaching from volume api: %s") % volume_id)
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_detach(context, volume)
self.volume_api.begin_detaching(context, volume_id)
# Manager-detach
self.detach_volume(context, volume_id, instance)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def reboot_instance(self, context, instance, block_device_info,
reboot_type):
"""Reboot an instance on this host."""
context = context.elevated()
LOG.audit(_("Rebooting instance"), context=context, instance=instance)
block_device_info = self._get_instance_volume_block_device_info(
context, instance)
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(context, instance, "reboot.start")
current_power_state = self._get_power_state(context, instance)
instance.power_state = current_power_state
instance.save()
if instance['power_state'] != power_state.RUNNING:
state = instance['power_state']
running = power_state.RUNNING
LOG.warn(_('trying to reboot a non-running instance:'
' (state: %(state)s expected: %(running)s)'),
{'state': state, 'running': running},
context=context, instance=instance)
def bad_volumes_callback(bad_devices):
self._handle_bad_volumes_detached(
context, instance, bad_devices, block_device_info)
try:
# Don't change it out of rescue mode
if instance['vm_state'] == vm_states.RESCUED:
new_vm_state = vm_states.RESCUED
else:
new_vm_state = vm_states.ACTIVE
new_power_state = None
self.driver.reboot(context, instance,
network_info,
reboot_type,
block_device_info=block_device_info,
bad_volumes_callback=bad_volumes_callback)
except Exception as error:
with excutils.save_and_reraise_exception() as ctxt:
exc_info = sys.exc_info()
# if the reboot failed but the VM is running don't
# put it into an error state
new_power_state = self._get_power_state(context, instance)
if new_power_state == power_state.RUNNING:
LOG.warning(_('Reboot failed but instance is running'),
context=context, instance=instance)
compute_utils.add_instance_fault_from_exc(context,
self.conductor_api, instance, error, exc_info)
self._notify_about_instance_usage(context, instance,
'reboot.error', fault=error)
ctxt.reraise = False
else:
LOG.error(_('Cannot reboot instance: %s'), error,
context=context, instance=instance)
self._set_instance_obj_error_state(context, instance)
if not new_power_state:
new_power_state = self._get_power_state(context, instance)
try:
instance.power_state = new_power_state
instance.vm_state = new_vm_state
instance.task_state = None
instance.save()
except exception.InstanceNotFound:
LOG.warn(_("Instance disappeared during reboot"),
context=context, instance=instance)
self._notify_about_instance_usage(context, instance, "reboot.end")
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def backup_instance(self, context, image_id, instance, backup_type,
rotation):
"""Backup an instance on this host.
:param backup_type: daily | weekly
:param rotation: int representing how many backups to keep around
"""
if rotation < 0:
raise exception.RotationRequiredForBackup()
self._snapshot_instance(context, image_id, instance,
task_states.IMAGE_BACKUP)
self._rotate_backups(context, instance, backup_type, rotation)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
@delete_image_on_error
def snapshot_instance(self, context, image_id, instance):
"""Snapshot an instance on this host.
:param context: security context
:param instance: an Instance dict
:param image_id: glance.db.sqlalchemy.models.Image.Id
"""
# NOTE(dave-mcnally) the task state will already be set by the api
# but if the compute manager has crashed/been restarted prior to the
# request getting here the task state may have been cleared so we set
# it again and things continue normally
try:
instance.task_state = task_states.IMAGE_SNAPSHOT
instance.save(
expected_task_state=task_states.IMAGE_SNAPSHOT_PENDING)
except exception.InstanceNotFound:
# possibility instance no longer exists, no point in continuing
LOG.debug(_("Instance not found, could not set state %s "
"for instance."),
task_states.IMAGE_SNAPSHOT, instance=instance)
return
except exception.UnexpectedDeletingTaskStateError:
LOG.debug(_("Instance being deleted, snapshot cannot continue"),
instance=instance)
return
self._snapshot_instance(context, image_id, instance,
task_states.IMAGE_SNAPSHOT)
def _snapshot_instance(self, context, image_id, instance,
expected_task_state):
context = context.elevated()
current_power_state = self._get_power_state(context, instance)
try:
instance.power_state = current_power_state
instance.save()
LOG.audit(_('instance snapshotting'), context=context,
instance=instance)
if instance.power_state != power_state.RUNNING:
state = instance.power_state
running = power_state.RUNNING
LOG.warn(_('trying to snapshot a non-running instance: '
'(state: %(state)s expected: %(running)s)'),
{'state': state, 'running': running},
instance=instance)
self._notify_about_instance_usage(
context, instance, "snapshot.start")
def update_task_state(task_state,
expected_state=expected_task_state):
instance.task_state = task_state
instance.save(expected_task_state=expected_state)
self.driver.snapshot(context, instance, image_id,
update_task_state)
instance.task_state = None
instance.save(expected_task_state=task_states.IMAGE_UPLOADING)
self._notify_about_instance_usage(context, instance,
"snapshot.end")
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
# the instance got deleted during the snapshot
# Quickly bail out of here
msg = _("Instance disappeared during snapshot")
LOG.debug(msg, instance=instance)
except exception.ImageNotFound:
instance.task_state = None
instance.save()
msg = _("Image not found during snapshot")
LOG.warn(msg, instance=instance)
@object_compat
@messaging.expected_exceptions(NotImplementedError)
def volume_snapshot_create(self, context, instance, volume_id,
create_info):
self.driver.volume_snapshot_create(context, instance, volume_id,
create_info)
@object_compat
@messaging.expected_exceptions(NotImplementedError)
def volume_snapshot_delete(self, context, instance, volume_id,
snapshot_id, delete_info):
self.driver.volume_snapshot_delete(context, instance, volume_id,
snapshot_id, delete_info)
@wrap_instance_fault
def _rotate_backups(self, context, instance, backup_type, rotation):
"""Delete excess backups associated to an instance.
Instances are allowed a fixed number of backups (the rotation number);
this method deletes the oldest backups that exceed the rotation
threshold.
:param context: security context
:param instance: Instance dict
:param backup_type: daily | weekly
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
"""
image_service = glance.get_default_image_service()
filters = {'property-image_type': 'backup',
'property-backup_type': backup_type,
'property-instance_uuid': instance.uuid}
images = image_service.detail(context, filters=filters,
sort_key='created_at', sort_dir='desc')
num_images = len(images)
LOG.debug(_("Found %(num_images)d images (rotation: %(rotation)d)"),
{'num_images': num_images, 'rotation': rotation},
instance=instance)
if num_images > rotation:
# NOTE(sirp): this deletes all backups that exceed the rotation
# limit
excess = len(images) - rotation
LOG.debug(_("Rotating out %d backups"), excess,
instance=instance)
for i in xrange(excess):
image = images.pop()
image_id = image['id']
LOG.debug(_("Deleting image %s"), image_id,
instance=instance)
image_service.delete(context, image_id)
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def set_admin_password(self, context, instance, new_pass):
"""Set the root/admin password for an instance on this host.
This is generally only called by API password resets after an
image has been built.
"""
context = context.elevated()
if new_pass is None:
# Generate a random password
new_pass = utils.generate_password()
current_power_state = self._get_power_state(context, instance)
expected_state = power_state.RUNNING
if current_power_state != expected_state:
instance.task_state = None
instance.save(expected_task_state=task_states.UPDATING_PASSWORD)
_msg = _('Failed to set admin password. Instance %s is not'
' running') % instance["uuid"]
raise exception.InstancePasswordSetFailed(
instance=instance['uuid'], reason=_msg)
else:
try:
self.driver.set_admin_password(instance, new_pass)
LOG.audit(_("Root password set"), instance=instance)
instance.task_state = None
instance.save(
expected_task_state=task_states.UPDATING_PASSWORD)
except NotImplementedError:
_msg = _('set_admin_password is not implemented '
'by this driver or guest instance.')
LOG.warn(_msg, instance=instance)
instance.task_state = None
instance.save(
expected_task_state=task_states.UPDATING_PASSWORD)
raise NotImplementedError(_msg)
except exception.UnexpectedTaskStateError:
# interrupted by another (most likely delete) task
# do not retry
raise
except Exception as e:
# Catch all here because this could be anything.
LOG.exception(_('set_admin_password failed: %s') % e,
instance=instance)
self._set_instance_error_state(context,
instance['uuid'])
# We create a new exception here so that we won't
# potentially reveal password information to the
# API caller. The real exception is logged above
_msg = _('error setting admin password')
raise exception.InstancePasswordSetFailed(
instance=instance['uuid'], reason=_msg)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def inject_file(self, context, path, file_contents, instance):
"""Write a file to the specified path in an instance on this host."""
# NOTE(russellb) Remove this method, as well as the underlying virt
# driver methods, when the compute rpc interface is bumped to 4.x
# as it is no longer used.
context = context.elevated()
current_power_state = self._get_power_state(context, instance)
expected_state = power_state.RUNNING
if current_power_state != expected_state:
LOG.warn(_('trying to inject a file into a non-running (state: '
'%(current_state)s expected: %(expected_state)s)'),
{'current_state': current_power_state,
'expected_state': expected_state},
instance=instance)
LOG.audit(_('injecting file to %s'), path,
instance=instance)
self.driver.inject_file(instance, path, file_contents)
def _get_rescue_image(self, context, instance):
"""Determine what image should be used to boot the rescue VM."""
system_meta = utils.instance_sys_meta(instance)
rescue_image_ref = system_meta.get('image_base_image_ref')
# 1. First try to use base image associated with instance's current
# image.
#
# The idea here is to provide the customer with a rescue environment
# which they are familiar with. So, if they built their instance off of
# a Debian image, their rescue VM will also be Debian.
if not rescue_image_ref:
# 2. As a last resort, use instance's current image
LOG.warn(_('Unable to find a different image to use for rescue VM,'
' using instance\'s current image'))
rescue_image_ref = instance['image_ref']
image_service, image_id = glance.get_remote_image_service(
context, rescue_image_ref)
image_meta = compute_utils.get_image_metadata(context, image_service,
rescue_image_ref,
instance)
# NOTE(belliott) bug #1227350 - xenapi needs the actual image id
image_meta['id'] = rescue_image_ref
return image_meta
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_event
def rescue_instance(self, context, instance, rescue_password):
"""Rescue an instance on this host.
:param rescue_password: password to set on rescue instance
"""
context = context.elevated()
LOG.audit(_('Rescuing'), context=context, instance=instance)
admin_password = (rescue_password if rescue_password else
utils.generate_password())
network_info = self._get_instance_nw_info(context, instance)
rescue_image_meta = self._get_rescue_image(context, instance)
extra_usage_info = {'rescue_image_name':
rescue_image_meta.get('name', '')}
self._notify_about_instance_usage(context, instance,
"rescue.start", extra_usage_info=extra_usage_info,
network_info=network_info)
try:
self.driver.rescue(context, instance,
network_info,
rescue_image_meta, admin_password)
except Exception as e:
LOG.exception(_("Error trying to Rescue Instance"),
instance=instance)
raise exception.InstanceNotRescuable(
instance_id=instance['uuid'],
reason=_("Driver Error: %s") % unicode(e))
self.conductor_api.notify_usage_exists(context, instance,
current_period=True)
current_power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.RESCUED
instance.task_state = None
instance.power_state = current_power_state
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.RESCUING)
self._notify_about_instance_usage(context, instance,
"rescue.end", extra_usage_info=extra_usage_info,
network_info=network_info)
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def unrescue_instance(self, context, instance):
"""Rescue an instance on this host."""
context = context.elevated()
LOG.audit(_('Unrescuing'), context=context, instance=instance)
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(context, instance,
"unrescue.start", network_info=network_info)
with self._error_out_instance_on_exception(context, instance['uuid']):
self.driver.unrescue(instance,
network_info)
current_power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.power_state = current_power_state
instance.save(expected_task_state=task_states.UNRESCUING)
self._notify_about_instance_usage(context,
instance,
"unrescue.end",
network_info=network_info)
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def change_instance_metadata(self, context, diff, instance):
"""Update the metadata published to the instance."""
LOG.debug(_("Changing instance metadata according to %r"),
diff, instance=instance)
self.driver.change_instance_metadata(context, instance, diff)
def _cleanup_stored_instance_types(self, migration, instance,
restore_old=False):
"""Clean up "old" and "new" instance_type information stored in
instance's system_metadata. Optionally update the "current"
instance_type to the saved old one first.
Returns the updated system_metadata as a dict, as well as the
post-cleanup current instance type.
"""
sys_meta = instance.system_metadata
if restore_old:
instance_type = flavors.extract_flavor(instance, 'old_')
sys_meta = flavors.save_flavor_info(sys_meta, instance_type)
else:
instance_type = flavors.extract_flavor(instance)
flavors.delete_flavor_info(sys_meta, 'old_')
flavors.delete_flavor_info(sys_meta, 'new_')
return sys_meta, instance_type
@wrap_exception()
@wrap_instance_event
@wrap_instance_fault
def confirm_resize(self, context, instance, reservations, migration):
@utils.synchronized(instance['uuid'])
def do_confirm_resize(context, instance, migration_id):
# NOTE(wangpan): Get the migration status from db, if it has been
# confirmed, we do nothing and return here
LOG.debug(_("Going to confirm migration %s") % migration_id,
context=context, instance=instance)
try:
# TODO(russellb) Why are we sending the migration object just
# to turn around and look it up from the db again?
migration = migration_obj.Migration.get_by_id(
context.elevated(), migration_id)
except exception.MigrationNotFound:
LOG.error(_("Migration %s is not found during confirmation") %
migration_id, context=context, instance=instance)
return
if migration.status == 'confirmed':
LOG.info(_("Migration %s is already confirmed") %
migration_id, context=context, instance=instance)
return
elif migration.status not in ('finished', 'confirming'):
LOG.warn(_("Unexpected confirmation status '%(status)s' of "
"migration %(id)s, exit confirmation process") %
{"status": migration.status, "id": migration_id},
context=context, instance=instance)
return
# NOTE(wangpan): Get the instance from db, if it has been
# deleted, we do nothing and return here
expected_attrs = ['metadata', 'system_metadata']
try:
instance = instance_obj.Instance.get_by_uuid(context,
instance.uuid, expected_attrs=expected_attrs)
except exception.InstanceNotFound:
LOG.info(_("Instance is not found during confirmation"),
context=context, instance=instance)
return
self._confirm_resize(context, instance, reservations=reservations,
migration=migration)
do_confirm_resize(context, instance, migration.id)
def _confirm_resize(self, context, instance, reservations=None,
migration=None):
"""Destroys the source instance."""
self._notify_about_instance_usage(context, instance,
"resize.confirm.start")
with self._error_out_instance_on_exception(context, instance['uuid'],
reservations):
# NOTE(danms): delete stashed migration information
sys_meta, instance_type = self._cleanup_stored_instance_types(
migration, instance)
sys_meta.pop('old_vm_state', None)
instance.system_metadata = sys_meta
instance.save()
# NOTE(tr3buchet): tear down networks on source host
self.network_api.setup_networks_on_host(context, instance,
migration.source_compute, teardown=True)
network_info = self._get_instance_nw_info(context, instance)
self.driver.confirm_migration(migration, instance,
network_info)
migration.status = 'confirmed'
migration.save(context.elevated())
rt = self._get_resource_tracker(migration.source_node)
rt.drop_resize_claim(instance, prefix='old_')
# NOTE(mriedem): The old_vm_state could be STOPPED but the user
# might have manually powered up the instance to confirm the
# resize/migrate, so we need to check the current power state
# on the instance and set the vm_state appropriately. We default
# to ACTIVE because if the power state is not SHUTDOWN, we
# assume _sync_instance_power_state will clean it up.
p_state = instance.power_state
vm_state = None
if p_state == power_state.SHUTDOWN:
vm_state = vm_states.STOPPED
LOG.debug(_("Resized/migrated instance is powered off. "
"Setting vm_state to '%s'."), vm_state,
instance=instance)
else:
vm_state = vm_states.ACTIVE
instance.vm_state = vm_state
instance.task_state = None
instance.save(expected_task_state=[None, task_states.DELETING])
self._notify_about_instance_usage(
context, instance, "resize.confirm.end",
network_info=network_info)
self._quota_commit(context, reservations)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def revert_resize(self, context, instance, migration, reservations):
"""Destroys the new instance on the destination machine.
Reverts the model changes, and powers on the old instance on the
source machine.
"""
# NOTE(comstud): A revert_resize is essentially a resize back to
# the old size, so we need to send a usage event here.
self.conductor_api.notify_usage_exists(
context, instance, current_period=True)
with self._error_out_instance_on_exception(context, instance['uuid'],
reservations):
# NOTE(tr3buchet): tear down networks on destination host
self.network_api.setup_networks_on_host(context, instance,
teardown=True)
instance_p = obj_base.obj_to_primitive(instance)
migration_p = obj_base.obj_to_primitive(migration)
self.conductor_api.network_migrate_instance_start(context,
instance_p,
migration_p)
network_info = self._get_instance_nw_info(context, instance)
bdms = (block_device_obj.BlockDeviceMappingList.
get_by_instance_uuid(context, instance.uuid))
block_device_info = self._get_instance_volume_block_device_info(
context, instance, bdms=bdms)
self.driver.destroy(context, instance, network_info,
block_device_info)
self._terminate_volume_connections(context, instance, bdms)
migration.status = 'reverted'
migration.save(context.elevated())
rt = self._get_resource_tracker(instance.node)
rt.drop_resize_claim(instance)
self.compute_rpcapi.finish_revert_resize(context, instance,
migration, migration.source_compute,
reservations=reservations)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def finish_revert_resize(self, context, instance, reservations, migration):
"""Finishes the second half of reverting a resize.
Bring the original source instance state back (active/shutoff) and
revert the resized attributes in the database.
"""
with self._error_out_instance_on_exception(context, instance.uuid,
reservations):
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "resize.revert.start")
sys_meta, instance_type = self._cleanup_stored_instance_types(
migration, instance, True)
# NOTE(mriedem): delete stashed old_vm_state information; we
# default to ACTIVE for backwards compatibility if old_vm_state
# is not set
old_vm_state = sys_meta.pop('old_vm_state', vm_states.ACTIVE)
instance.system_metadata = sys_meta
instance.memory_mb = instance_type['memory_mb']
instance.vcpus = instance_type['vcpus']
instance.root_gb = instance_type['root_gb']
instance.ephemeral_gb = instance_type['ephemeral_gb']
instance.instance_type_id = instance_type['id']
instance.host = migration['source_compute']
instance.node = migration['source_node']
instance.save()
self.network_api.setup_networks_on_host(context, instance,
migration['source_compute'])
block_device_info = self._get_instance_volume_block_device_info(
context, instance, refresh_conn_info=True)
power_on = old_vm_state != vm_states.STOPPED
self.driver.finish_revert_migration(context, instance,
network_info,
block_device_info, power_on)
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.RESIZE_REVERTING)
instance_p = obj_base.obj_to_primitive(instance)
migration_p = obj_base.obj_to_primitive(migration)
self.conductor_api.network_migrate_instance_finish(context,
instance_p,
migration_p)
# if the original vm state was STOPPED, set it back to STOPPED
LOG.info(_("Updating instance to original state: '%s'") %
old_vm_state)
if power_on:
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save()
else:
instance.task_state = task_states.POWERING_OFF
instance.save()
self.stop_instance(context, instance=instance)
self._notify_about_instance_usage(
context, instance, "resize.revert.end")
self._quota_commit(context, reservations)
def _quota_commit(self, context, reservations, project_id=None,
user_id=None):
if reservations:
self.conductor_api.quota_commit(context, reservations,
project_id=project_id,
user_id=user_id)
def _quota_rollback(self, context, reservations, project_id=None,
user_id=None):
if reservations:
self.conductor_api.quota_rollback(context, reservations,
project_id=project_id,
user_id=user_id)
def _prep_resize(self, context, image, instance, instance_type,
reservations, request_spec, filter_properties, node):
if not filter_properties:
filter_properties = {}
if not instance['host']:
self._set_instance_error_state(context, instance['uuid'])
msg = _('Instance has no source host')
raise exception.MigrationError(msg)
same_host = instance['host'] == self.host
if same_host and not CONF.allow_resize_to_same_host:
self._set_instance_error_state(context, instance['uuid'])
msg = _('destination same as source!')
raise exception.MigrationError(msg)
# NOTE(danms): Stash the new instance_type to avoid having to
# look it up in the database later
sys_meta = instance.system_metadata
flavors.save_flavor_info(sys_meta, instance_type, prefix='new_')
# NOTE(mriedem): Stash the old vm_state so we can set the
# resized/reverted instance back to the same state later.
vm_state = instance['vm_state']
LOG.debug(_('Stashing vm_state: %s'), vm_state, instance=instance)
sys_meta['old_vm_state'] = vm_state
instance.save()
limits = filter_properties.get('limits', {})
rt = self._get_resource_tracker(node)
with rt.resize_claim(context, instance, instance_type,
limits=limits) as claim:
LOG.audit(_('Migrating'), context=context, instance=instance)
self.compute_rpcapi.resize_instance(context, instance,
claim.migration, image, instance_type, reservations)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def prep_resize(self, context, image, instance, instance_type,
reservations, request_spec, filter_properties, node):
"""Initiates the process of moving a running instance to another host.
Possibly changes the RAM and disk size in the process.
"""
if node is None:
node = self.driver.get_available_nodes(refresh=True)[0]
LOG.debug(_("No node specified, defaulting to %s"), node)
with self._error_out_instance_on_exception(context, instance['uuid'],
reservations):
self.conductor_api.notify_usage_exists(
context, instance, current_period=True)
self._notify_about_instance_usage(
context, instance, "resize.prep.start")
try:
self._prep_resize(context, image, instance,
instance_type, reservations,
request_spec, filter_properties,
node)
except Exception:
# try to re-schedule the resize elsewhere:
exc_info = sys.exc_info()
self._reschedule_resize_or_reraise(context, image, instance,
exc_info, instance_type, reservations, request_spec,
filter_properties)
finally:
extra_usage_info = dict(
new_instance_type=instance_type['name'],
new_instance_type_id=instance_type['id'])
self._notify_about_instance_usage(
context, instance, "resize.prep.end",
extra_usage_info=extra_usage_info)
def _reschedule_resize_or_reraise(self, context, image, instance, exc_info,
instance_type, reservations, request_spec, filter_properties):
"""Try to re-schedule the resize or re-raise the original error to
error out the instance.
"""
if not request_spec:
request_spec = {}
if not filter_properties:
filter_properties = {}
rescheduled = False
instance_uuid = instance['uuid']
try:
# NOTE(comstud): remove the scheduler RPCAPI method when
# this is adjusted to send to conductor... and then
# deprecate the scheduler manager method.
scheduler_method = self.scheduler_rpcapi.prep_resize
instance_p = obj_base.obj_to_primitive(instance)
method_args = (instance_p, instance_type, image, request_spec,
filter_properties, reservations)
task_state = task_states.RESIZE_PREP
rescheduled = self._reschedule(context, request_spec,
filter_properties, instance_uuid, scheduler_method,
method_args, task_state, exc_info)
except Exception as error:
rescheduled = False
LOG.exception(_("Error trying to reschedule"),
instance_uuid=instance_uuid)
compute_utils.add_instance_fault_from_exc(context,
self.conductor_api, instance, error,
exc_info=sys.exc_info())
self._notify_about_instance_usage(context, instance,
'resize.error', fault=error)
if rescheduled:
self._log_original_error(exc_info, instance_uuid)
compute_utils.add_instance_fault_from_exc(context,
self.conductor_api, instance, exc_info[1],
exc_info=exc_info)
self._notify_about_instance_usage(context, instance,
'resize.error', fault=exc_info[1])
else:
# not re-scheduling
raise exc_info[0], exc_info[1], exc_info[2]
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@errors_out_migration
@wrap_instance_fault
def resize_instance(self, context, instance, image,
reservations, migration, instance_type):
"""Starts the migration of a running instance to another host."""
with self._error_out_instance_on_exception(context, instance.uuid,
reservations):
if not instance_type:
instance_type = flavor_obj.Flavor.get_by_id(
context, migration['new_instance_type_id'])
network_info = self._get_instance_nw_info(context, instance)
migration.status = 'migrating'
migration.save(context.elevated())
instance.task_state = task_states.RESIZE_MIGRATING
instance.save(expected_task_state=task_states.RESIZE_PREP)
self._notify_about_instance_usage(
context, instance, "resize.start", network_info=network_info)
bdms = (block_device_obj.BlockDeviceMappingList.
get_by_instance_uuid(context, instance.uuid))
block_device_info = self._get_instance_volume_block_device_info(
context, instance, bdms=bdms)
disk_info = self.driver.migrate_disk_and_power_off(
context, instance, migration.dest_host,
instance_type, network_info,
block_device_info)
self._terminate_volume_connections(context, instance, bdms)
migration_p = obj_base.obj_to_primitive(migration)
instance_p = obj_base.obj_to_primitive(instance)
self.conductor_api.network_migrate_instance_start(context,
instance_p,
migration_p)
migration.status = 'post-migrating'
migration.save(context.elevated())
instance.host = migration.dest_compute
instance.node = migration.dest_node
instance.task_state = task_states.RESIZE_MIGRATED
instance.save(expected_task_state=task_states.RESIZE_MIGRATING)
self.compute_rpcapi.finish_resize(context, instance,
migration, image, disk_info,
migration.dest_compute, reservations=reservations)
self._notify_about_instance_usage(context, instance, "resize.end",
network_info=network_info)
self.instance_events.clear_events_for_instance(instance)
def _terminate_volume_connections(self, context, instance, bdms):
connector = self.driver.get_volume_connector(instance)
for bdm in bdms:
if bdm.is_volume:
self.volume_api.terminate_connection(context, bdm.volume_id,
connector)
def _finish_resize(self, context, instance, migration, disk_info,
image):
resize_instance = False
old_instance_type_id = migration['old_instance_type_id']
new_instance_type_id = migration['new_instance_type_id']
old_instance_type = flavors.extract_flavor(instance)
sys_meta = instance.system_metadata
# NOTE(mriedem): Get the old_vm_state so we know if we should
# power on the instance. If old_vm_sate is not set we need to default
# to ACTIVE for backwards compatibility
old_vm_state = sys_meta.get('old_vm_state', vm_states.ACTIVE)
flavors.save_flavor_info(sys_meta,
old_instance_type,
prefix='old_')
if old_instance_type_id != new_instance_type_id:
instance_type = flavors.extract_flavor(instance, prefix='new_')
flavors.save_flavor_info(sys_meta, instance_type)
instance.instance_type_id = instance_type['id']
instance.memory_mb = instance_type['memory_mb']
instance.vcpus = instance_type['vcpus']
instance.root_gb = instance_type['root_gb']
instance.ephemeral_gb = instance_type['ephemeral_gb']
instance.system_metadata = sys_meta
instance.save()
resize_instance = True
# NOTE(tr3buchet): setup networks on destination host
self.network_api.setup_networks_on_host(context, instance,
migration['dest_compute'])
instance_p = obj_base.obj_to_primitive(instance)
migration_p = obj_base.obj_to_primitive(migration)
self.conductor_api.network_migrate_instance_finish(context,
instance_p,
migration_p)
network_info = self._get_instance_nw_info(context, instance)
instance.task_state = task_states.RESIZE_FINISH
instance.system_metadata = sys_meta
instance.save(expected_task_state=task_states.RESIZE_MIGRATED)
self._notify_about_instance_usage(
context, instance, "finish_resize.start",
network_info=network_info)
block_device_info = self._get_instance_volume_block_device_info(
context, instance, refresh_conn_info=True)
# NOTE(mriedem): If the original vm_state was STOPPED, we don't
# automatically power on the instance after it's migrated
power_on = old_vm_state != vm_states.STOPPED
self.driver.finish_migration(context, migration, instance,
disk_info,
network_info,
image, resize_instance,
block_device_info, power_on)
migration.status = 'finished'
migration.save(context.elevated())
instance.vm_state = vm_states.RESIZED
instance.task_state = None
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.RESIZE_FINISH)
self._notify_about_instance_usage(
context, instance, "finish_resize.end",
network_info=network_info)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@errors_out_migration
@wrap_instance_fault
def finish_resize(self, context, disk_info, image, instance,
reservations, migration):
"""Completes the migration process.
Sets up the newly transferred disk and turns on the instance at its
new host machine.
"""
try:
self._finish_resize(context, instance, migration,
disk_info, image)
self._quota_commit(context, reservations)
except Exception as error:
LOG.exception(_('Setting instance vm_state to ERROR'),
instance=instance)
with excutils.save_and_reraise_exception():
try:
self._quota_rollback(context, reservations)
except Exception as qr_error:
LOG.exception(_("Failed to rollback quota for failed "
"finish_resize: %s"),
qr_error, instance=instance)
self._set_instance_error_state(context, instance['uuid'])
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def add_fixed_ip_to_instance(self, context, network_id, instance):
"""Calls network_api to add new fixed_ip to instance
then injects the new network info and resets instance networking.
"""
self._notify_about_instance_usage(
context, instance, "create_ip.start")
self.network_api.add_fixed_ip_to_instance(context, instance,
network_id)
network_info = self._inject_network_info(context, instance)
self.reset_network(context, instance)
# NOTE(russellb) We just want to bump updated_at. See bug 1143466.
instance.updated_at = timeutils.utcnow()
instance.save()
self._notify_about_instance_usage(
context, instance, "create_ip.end", network_info=network_info)
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def remove_fixed_ip_from_instance(self, context, address, instance):
"""Calls network_api to remove existing fixed_ip from instance
by injecting the altered network info and resetting
instance networking.
"""
self._notify_about_instance_usage(
context, instance, "delete_ip.start")
self.network_api.remove_fixed_ip_from_instance(context, instance,
address)
network_info = self._inject_network_info(context, instance)
self.reset_network(context, instance)
# NOTE(russellb) We just want to bump updated_at. See bug 1143466.
instance.updated_at = timeutils.utcnow()
instance.save()
self._notify_about_instance_usage(
context, instance, "delete_ip.end", network_info=network_info)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def pause_instance(self, context, instance):
"""Pause an instance on this host."""
context = context.elevated()
LOG.audit(_('Pausing'), context=context, instance=instance)
self._notify_about_instance_usage(context, instance, 'pause.start')
self.driver.pause(instance)
current_power_state = self._get_power_state(context, instance)
instance.power_state = current_power_state
instance.vm_state = vm_states.PAUSED
instance.task_state = None
instance.save(expected_task_state=task_states.PAUSING)
self._notify_about_instance_usage(context, instance, 'pause.end')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def unpause_instance(self, context, instance):
"""Unpause a paused instance on this host."""
context = context.elevated()
LOG.audit(_('Unpausing'), context=context, instance=instance)
self._notify_about_instance_usage(context, instance, 'unpause.start')
self.driver.unpause(instance)
current_power_state = self._get_power_state(context, instance)
instance.power_state = current_power_state
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save(expected_task_state=task_states.UNPAUSING)
self._notify_about_instance_usage(context, instance, 'unpause.end')
@wrap_exception()
def host_power_action(self, context, action):
"""Reboots, shuts down or powers up the host."""
# TODO(russellb) Remove the unused host parameter from the driver API
return self.driver.host_power_action(None, action)
@wrap_exception()
def host_maintenance_mode(self, context, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
return self.driver.host_maintenance_mode(host, mode)
@wrap_exception()
def set_host_enabled(self, context, enabled):
"""Sets the specified host's ability to accept new instances."""
# TODO(russellb) Remove the unused host parameter from the driver API
return self.driver.set_host_enabled(None, enabled)
@wrap_exception()
def get_host_uptime(self, context):
"""Returns the result of calling "uptime" on the target host."""
return self.driver.get_host_uptime(self.host)
@object_compat
@wrap_exception()
@wrap_instance_fault
def get_diagnostics(self, context, instance):
"""Retrieve diagnostics for an instance on this host."""
current_power_state = self._get_power_state(context, instance)
if current_power_state == power_state.RUNNING:
LOG.audit(_("Retrieving diagnostics"), context=context,
instance=instance)
return self.driver.get_diagnostics(instance)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def suspend_instance(self, context, instance):
"""Suspend the given instance."""
context = context.elevated()
with self._error_out_instance_on_exception(context, instance['uuid'],
instance_state=instance['vm_state']):
self.driver.suspend(instance)
current_power_state = self._get_power_state(context, instance)
instance.power_state = current_power_state
instance.vm_state = vm_states.SUSPENDED
instance.task_state = None
instance.save(expected_task_state=task_states.SUSPENDING)
self._notify_about_instance_usage(context, instance, 'suspend')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def resume_instance(self, context, instance):
"""Resume the given suspended instance."""
context = context.elevated()
LOG.audit(_('Resuming'), context=context, instance=instance)
network_info = self._get_instance_nw_info(context, instance)
block_device_info = self._get_instance_volume_block_device_info(
context, instance)
self.driver.resume(context, instance, network_info,
block_device_info)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save(expected_task_state=task_states.RESUMING)
self._notify_about_instance_usage(context, instance, 'resume')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def shelve_instance(self, context, instance, image_id):
"""Shelve an instance.
This should be used when you want to take a snapshot of the instance.
It also adds system_metadata that can be used by a periodic task to
offload the shelved instance after a period of time.
:param context: request context
:param instance: an Instance object
:param image_id: an image id to snapshot to.
"""
self.conductor_api.notify_usage_exists(
context, obj_base.obj_to_primitive(instance),
current_period=True)
self._notify_about_instance_usage(context, instance, 'shelve.start')
def update_task_state(task_state, expected_state=task_states.SHELVING):
shelving_state_map = {
task_states.IMAGE_PENDING_UPLOAD:
task_states.SHELVING_IMAGE_PENDING_UPLOAD,
task_states.IMAGE_UPLOADING:
task_states.SHELVING_IMAGE_UPLOADING,
task_states.SHELVING: task_states.SHELVING}
task_state = shelving_state_map[task_state]
expected_state = shelving_state_map[expected_state]
instance.task_state = task_state
instance.save(expected_task_state=expected_state)
self.driver.power_off(instance)
current_power_state = self._get_power_state(context, instance)
self.driver.snapshot(context, instance, image_id, update_task_state)
instance.system_metadata['shelved_at'] = timeutils.strtime()
instance.system_metadata['shelved_image_id'] = image_id
instance.system_metadata['shelved_host'] = self.host
instance.vm_state = vm_states.SHELVED
instance.task_state = None
if CONF.shelved_offload_time == 0:
instance.task_state = task_states.SHELVING_OFFLOADING
instance.power_state = current_power_state
instance.save(expected_task_state=[
task_states.SHELVING,
task_states.SHELVING_IMAGE_UPLOADING])
self._notify_about_instance_usage(context, instance, 'shelve.end')
if CONF.shelved_offload_time == 0:
self.shelve_offload_instance(context, instance)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def shelve_offload_instance(self, context, instance):
"""Remove a shelved instance from the hypervisor.
This frees up those resources for use by other instances, but may lead
to slower unshelve times for this instance. This method is used by
volume backed instances since restoring them doesn't involve the
potentially large download of an image.
:param context: request context
:param instance: an Instance dict
"""
self._notify_about_instance_usage(context, instance,
'shelve_offload.start')
self.driver.power_off(instance)
current_power_state = self._get_power_state(context, instance)
network_info = self._get_instance_nw_info(context, instance)
block_device_info = self._get_instance_volume_block_device_info(
context, instance)
self.driver.destroy(context, instance, network_info,
block_device_info)
instance.power_state = current_power_state
instance.host = None
instance.node = None
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.task_state = None
instance.save(expected_task_state=[task_states.SHELVING,
task_states.SHELVING_OFFLOADING])
self._notify_about_instance_usage(context, instance,
'shelve_offload.end')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def unshelve_instance(self, context, instance, image,
filter_properties=None, node=None):
"""Unshelve the instance.
:param context: request context
:param instance: an Instance dict
:param image: an image to build from. If None we assume a
volume backed instance.
:param filter_properties: dict containing limits, retry info etc.
:param node: target compute node
"""
if filter_properties is None:
filter_properties = {}
@utils.synchronized(instance['uuid'])
def do_unshelve_instance():
self._unshelve_instance(context, instance, image,
filter_properties, node)
do_unshelve_instance()
def _unshelve_instance_key_scrub(self, instance):
"""Remove data from the instance that may cause side effects."""
cleaned_keys = dict(
key_data=instance.key_data,
auto_disk_config=instance.auto_disk_config)
instance.key_data = None
instance.auto_disk_config = False
return cleaned_keys
def _unshelve_instance_key_restore(self, instance, keys):
"""Restore previously scrubbed keys before saving the instance."""
instance.update(keys)
def _unshelve_instance(self, context, instance, image, filter_properties,
node):
self._notify_about_instance_usage(context, instance, 'unshelve.start')
instance.task_state = task_states.SPAWNING
instance.save()
network_info = self._get_instance_nw_info(context, instance)
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = self._prep_block_device(context, instance, bdms)
scrubbed_keys = self._unshelve_instance_key_scrub(instance)
if node is None:
node = self.driver.get_available_nodes()[0]
LOG.debug(_('No node specified, defaulting to %s'), node)
rt = self._get_resource_tracker(node)
limits = filter_properties.get('limits', {})
try:
with rt.instance_claim(context, instance, limits):
self.driver.spawn(context, instance, image, injected_files=[],
admin_password=None,
network_info=network_info,
block_device_info=block_device_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_('Instance failed to spawn'), instance=instance)
if image:
image_service = glance.get_default_image_service()
image_service.delete(context, image['id'])
self._unshelve_instance_key_restore(instance, scrubbed_keys)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.SPAWNING)
self._notify_about_instance_usage(context, instance, 'unshelve.end')
@reverts_task_state
@wrap_instance_fault
def reset_network(self, context, instance):
"""Reset networking on the given instance."""
LOG.debug(_('Reset network'), context=context, instance=instance)
self.driver.reset_network(instance)
def _inject_network_info(self, context, instance):
"""Inject network info for the given instance."""
LOG.debug(_('Inject network info'), context=context, instance=instance)
network_info = self._get_instance_nw_info(context, instance)
LOG.debug(_('network_info to inject: |%s|'), network_info,
instance=instance)
self.driver.inject_network_info(instance,
network_info)
return network_info
@wrap_instance_fault
def inject_network_info(self, context, instance):
"""Inject network info, but don't return the info."""
self._inject_network_info(context, instance)
@messaging.expected_exceptions(NotImplementedError)
@wrap_exception()
@wrap_instance_fault
def get_console_output(self, context, instance, tail_length):
"""Send the console output for the given instance."""
instance = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), instance)
context = context.elevated()
LOG.audit(_("Get console output"), context=context,
instance=instance)
output = self.driver.get_console_output(context, instance)
if tail_length is not None:
output = self._tail_log(output, tail_length)
return output.decode('utf-8', 'replace').encode('ascii', 'replace')
def _tail_log(self, log, length):
try:
length = int(length)
except ValueError:
length = 0
if length == 0:
return ''
else:
return '\n'.join(log.split('\n')[-int(length):])
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
NotImplementedError)
@object_compat
@wrap_exception()
@wrap_instance_fault
def get_vnc_console(self, context, console_type, instance):
"""Return connection information for a vnc console."""
context = context.elevated()
LOG.debug(_("Getting vnc console"), instance=instance)
token = str(uuid.uuid4())
if not CONF.vnc_enabled:
raise exception.ConsoleTypeInvalid(console_type=console_type)
if console_type == 'novnc':
# For essex, novncproxy_base_url must include the full path
# including the html file (like http://myhost/vnc_auto.html)
access_url = '%s?token=%s' % (CONF.novncproxy_base_url, token)
elif console_type == 'xvpvnc':
access_url = '%s?token=%s' % (CONF.xvpvncproxy_base_url, token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
connect_info = self.driver.get_vnc_console(context, instance)
connect_info['token'] = token
connect_info['access_url'] = access_url
except exception.InstanceNotFound:
if instance['vm_state'] != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance['uuid'])
return connect_info
@object_compat
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable)
@wrap_exception()
@wrap_instance_fault
def get_spice_console(self, context, console_type, instance):
"""Return connection information for a spice console."""
context = context.elevated()
LOG.debug(_("Getting spice console"), instance=instance)
token = str(uuid.uuid4())
if not CONF.spice.enabled:
raise exception.ConsoleTypeInvalid(console_type=console_type)
if console_type == 'spice-html5':
# For essex, spicehtml5proxy_base_url must include the full path
# including the html file (like http://myhost/spice_auto.html)
access_url = '%s?token=%s' % (CONF.spice.html5proxy_base_url,
token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
connect_info = self.driver.get_spice_console(context, instance)
connect_info['token'] = token
connect_info['access_url'] = access_url
except exception.InstanceNotFound:
if instance['vm_state'] != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance['uuid'])
return connect_info
@object_compat
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
NotImplementedError)
@wrap_exception()
@wrap_instance_fault
def get_rdp_console(self, context, console_type, instance):
"""Return connection information for a RDP console."""
context = context.elevated()
LOG.debug(_("Getting RDP console"), instance=instance)
token = str(uuid.uuid4())
if not CONF.rdp.enabled:
raise exception.ConsoleTypeInvalid(console_type=console_type)
if console_type == 'rdp-html5':
access_url = '%s?token=%s' % (CONF.rdp.html5_proxy_base_url,
token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
connect_info = self.driver.get_rdp_console(context, instance)
connect_info['token'] = token
connect_info['access_url'] = access_url
except exception.InstanceNotFound:
if instance['vm_state'] != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance['uuid'])
return connect_info
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound)
@object_compat
@wrap_exception()
@wrap_instance_fault
def validate_console_port(self, ctxt, instance, port, console_type):
if console_type == "spice-html5":
console_info = self.driver.get_spice_console(ctxt, instance)
elif console_type == "rdp-html5":
console_info = self.driver.get_rdp_console(ctxt, instance)
else:
console_info = self.driver.get_vnc_console(ctxt, instance)
return console_info['port'] == port
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def reserve_block_device_name(self, context, instance, device,
volume_id, disk_bus=None, device_type=None):
# NOTE(ndipanov): disk_bus and device_type will be set to None if not
# passed (by older clients) and defaulted by the virt driver. Remove
# default values on the next major RPC version bump.
@utils.synchronized(instance['uuid'])
def do_reserve():
bdms = (
block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid))
device_name = compute_utils.get_device_name_for_instance(
context, instance, bdms, device)
# NOTE(vish): create bdm here to avoid race condition
bdm = block_device_obj.BlockDeviceMapping(
source_type='volume', destination_type='volume',
instance_uuid=instance.uuid,
volume_id=volume_id or 'reserved',
device_name=device_name,
disk_bus=disk_bus, device_type=device_type)
bdm.create(context)
return device_name
return do_reserve()
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def attach_volume(self, context, volume_id, mountpoint,
instance, bdm=None):
"""Attach a volume to an instance."""
if not bdm:
bdm = block_device_obj.BlockDeviceMapping.get_by_volume_id(
context, volume_id)
driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm)
try:
return self._attach_volume(context, instance, driver_bdm)
except Exception:
with excutils.save_and_reraise_exception():
bdm.destroy(context)
def _attach_volume(self, context, instance, bdm):
context = context.elevated()
LOG.audit(_('Attaching volume %(volume_id)s to %(mountpoint)s'),
{'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']},
context=context, instance=instance)
try:
bdm.attach(context, instance, self.volume_api, self.driver,
do_check_attach=False, do_driver_attach=True)
except Exception: # pylint: disable=W0702
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to attach %(volume_id)s "
"at %(mountpoint)s"),
{'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']},
context=context, instance=instance)
self.volume_api.unreserve_volume(context, bdm.volume_id)
info = {'volume_id': bdm.volume_id}
self._notify_about_instance_usage(
context, instance, "volume.attach", extra_usage_info=info)
def _detach_volume(self, context, instance, bdm):
"""Do the actual driver detach using block device mapping."""
mp = bdm.device_name
volume_id = bdm.volume_id
LOG.audit(_('Detach volume %(volume_id)s from mountpoint %(mp)s'),
{'volume_id': volume_id, 'mp': mp},
context=context, instance=instance)
connection_info = jsonutils.loads(bdm.connection_info)
# NOTE(vish): We currently don't use the serial when disconnecting,
# but added for completeness in case we ever do.
if connection_info and 'serial' not in connection_info:
connection_info['serial'] = volume_id
try:
if not self.driver.instance_exists(instance['name']):
LOG.warn(_('Detaching volume from unknown instance'),
context=context, instance=instance)
encryption = encryptors.get_encryption_metadata(
context, self.volume_api, volume_id, connection_info)
self.driver.detach_volume(connection_info,
instance,
mp,
encryption=encryption)
except Exception: # pylint: disable=W0702
with excutils.save_and_reraise_exception():
LOG.exception(_('Failed to detach volume %(volume_id)s '
'from %(mp)s'),
{'volume_id': volume_id, 'mp': mp},
context=context, instance=instance)
self.volume_api.roll_detaching(context, volume_id)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def detach_volume(self, context, volume_id, instance):
"""Detach a volume from an instance."""
bdm = block_device_obj.BlockDeviceMapping.get_by_volume_id(
context, volume_id)
if CONF.volume_usage_poll_interval > 0:
vol_stats = []
mp = bdm.device_name
# Handle bootable volumes which will not contain /dev/
if '/dev/' in mp:
mp = mp[5:]
try:
vol_stats = self.driver.block_stats(instance['name'], mp)
except NotImplementedError:
pass
if vol_stats:
LOG.debug(_("Updating volume usage cache with totals"))
rd_req, rd_bytes, wr_req, wr_bytes, flush_ops = vol_stats
self.conductor_api.vol_usage_update(context, volume_id,
rd_req, rd_bytes,
wr_req, wr_bytes,
instance,
update_totals=True)
self._detach_volume(context, instance, bdm)
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context, volume_id, connector)
self.volume_api.detach(context.elevated(), volume_id)
bdm.destroy()
info = dict(volume_id=volume_id)
self._notify_about_instance_usage(
context, instance, "volume.detach", extra_usage_info=info)
def _init_volume_connection(self, context, new_volume_id,
old_volume_id, connector, instance, bdm):
new_cinfo = self.volume_api.initialize_connection(context,
new_volume_id,
connector)
old_cinfo = jsonutils.loads(bdm['connection_info'])
if old_cinfo and 'serial' not in old_cinfo:
old_cinfo['serial'] = old_volume_id
new_cinfo['serial'] = old_cinfo['serial']
return (old_cinfo, new_cinfo)
def _swap_volume(self, context, instance, bdm, connector, old_volume_id,
new_volume_id):
mountpoint = bdm['device_name']
failed = False
new_cinfo = None
try:
old_cinfo, new_cinfo = self._init_volume_connection(context,
new_volume_id,
old_volume_id,
connector,
instance,
bdm)
self.driver.swap_volume(old_cinfo, new_cinfo, instance, mountpoint)
except Exception: # pylint: disable=W0702
failed = True
with excutils.save_and_reraise_exception():
if new_cinfo:
msg = _("Failed to swap volume %(old_volume_id)s "
"for %(new_volume_id)s")
LOG.exception(msg % {'old_volume_id': old_volume_id,
'new_volume_id': new_volume_id},
context=context,
instance=instance)
else:
msg = _("Failed to connect to volume %(volume_id)s "
"with volume at %(mountpoint)s")
LOG.exception(msg % {'volume_id': new_volume_id,
'mountpoint': bdm['device_name']},
context=context,
instance=instance)
self.volume_api.roll_detaching(context, old_volume_id)
self.volume_api.unreserve_volume(context, new_volume_id)
finally:
conn_volume = new_volume_id if failed else old_volume_id
if new_cinfo:
self.volume_api.terminate_connection(context,
conn_volume,
connector)
# If Cinder initiated the swap, it will keep
# the original ID
comp_ret = self.volume_api.migrate_volume_completion(
context,
old_volume_id,
new_volume_id,
error=failed)
self.volume_api.attach(context,
new_volume_id,
instance['uuid'],
mountpoint)
# Remove old connection
self.volume_api.detach(context.elevated(), old_volume_id)
return (comp_ret, new_cinfo)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def swap_volume(self, context, old_volume_id, new_volume_id, instance):
"""Swap volume for an instance."""
context = context.elevated()
bdm = block_device_obj.BlockDeviceMapping.get_by_volume_id(
context, old_volume_id, instance_uuid=instance.uuid)
connector = self.driver.get_volume_connector(instance)
comp_ret, new_cinfo = self._swap_volume(context, instance,
bdm,
connector,
old_volume_id,
new_volume_id)
save_volume_id = comp_ret['save_volume_id']
# Update bdm
values = {
'connection_info': jsonutils.dumps(new_cinfo),
'delete_on_termination': False,
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'volume_id': save_volume_id,
'volume_size': None,
'no_device': None}
bdm.update(values)
bdm.save()
@wrap_exception()
def remove_volume_connection(self, context, volume_id, instance):
"""Remove a volume connection using the volume api."""
# NOTE(vish): We don't want to actually mark the volume
# detached, or delete the bdm, just remove the
# connection from this host.
try:
bdm = block_device_obj.BlockDeviceMapping.get_by_volume_id(
context, volume_id)
self._detach_volume(context, instance, bdm)
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context, volume_id, connector)
except exception.NotFound:
pass
@object_compat
def attach_interface(self, context, instance, network_id, port_id,
requested_ip):
"""Use hotplug to add an network adapter to an instance."""
network_info = self.network_api.allocate_port_for_instance(
context, instance, port_id, network_id, requested_ip)
if len(network_info) != 1:
LOG.error(_('allocate_port_for_instance returned %(ports)s ports')
% dict(ports=len(network_info)))
raise exception.InterfaceAttachFailed(instance=instance)
image_ref = instance.get('image_ref')
image_service, image_id = glance.get_remote_image_service(
context, image_ref)
image_meta = compute_utils.get_image_metadata(
context, image_service, image_ref, instance)
self.driver.attach_interface(instance, image_meta, network_info[0])
return network_info[0]
@object_compat
def detach_interface(self, context, instance, port_id):
"""Detach an network adapter from an instance."""
# FIXME(comstud): Why does this need elevated context?
network_info = self._get_instance_nw_info(context.elevated(),
instance)
condemned = None
for vif in network_info:
if vif['id'] == port_id:
condemned = vif
break
if condemned is None:
raise exception.PortNotFound(_("Port %s is not "
"attached") % port_id)
self.network_api.deallocate_port_for_instance(context, instance,
port_id)
self.driver.detach_interface(instance, condemned)
def _get_compute_info(self, context, host):
compute_node_ref = self.conductor_api.service_get_by_compute_host(
context, host)
try:
return compute_node_ref['compute_node'][0]
except IndexError:
raise exception.NotFound(_("Host %s not found") % host)
@wrap_exception()
@wrap_instance_fault
def check_instance_shared_storage(self, ctxt, instance, data):
"""Check if the instance files are shared
:param context: security context
:param data: result of driver.check_instance_shared_storage_local
Returns True if instance disks located on shared storage and
False otherwise.
"""
return self.driver.check_instance_shared_storage_remote(ctxt, data)
@wrap_exception()
@wrap_instance_fault
def check_can_live_migrate_destination(self, ctxt, instance,
block_migration, disk_over_commit):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: dict of instance data
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
:returns: a dict containing migration info
"""
src_compute_info = self._get_compute_info(ctxt, instance.host)
dst_compute_info = self._get_compute_info(ctxt, CONF.host)
dest_check_data = self.driver.check_can_live_migrate_destination(ctxt,
instance, src_compute_info, dst_compute_info,
block_migration, disk_over_commit)
migrate_data = {}
try:
migrate_data = self.compute_rpcapi.\
check_can_live_migrate_source(ctxt, instance,
dest_check_data)
finally:
self.driver.check_can_live_migrate_destination_cleanup(ctxt,
dest_check_data)
if 'migrate_data' in dest_check_data:
migrate_data.update(dest_check_data['migrate_data'])
return migrate_data
@wrap_exception()
@wrap_instance_fault
def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance: dict of instance data
:param dest_check_data: result of check_can_live_migrate_destination
:returns: a dict containing migration info
"""
is_volume_backed = self.compute_api.is_volume_backed_instance(ctxt,
instance)
dest_check_data['is_volume_backed'] = is_volume_backed
return self.driver.check_can_live_migrate_source(ctxt, instance,
dest_check_data)
@object_compat
@wrap_exception()
@wrap_instance_fault
def pre_live_migration(self, context, instance, block_migration, disk,
migrate_data):
"""Preparations for live migration at dest host.
:param context: security context
:param instance: dict of instance data
:param block_migration: if true, prepare for block migration
:param migrate_data : if not None, it is a dict which holds data
required for live migration without shared storage.
"""
block_device_info = self._get_instance_volume_block_device_info(
context, instance, refresh_conn_info=True)
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "live_migration.pre.start",
network_info=network_info)
pre_live_migration_data = self.driver.pre_live_migration(context,
instance,
block_device_info,
network_info,
disk,
migrate_data)
# NOTE(tr3buchet): setup networks on destination host
self.network_api.setup_networks_on_host(context, instance,
self.host)
# Creating filters to hypervisors and firewalls.
# An example is that nova-instance-instance-xxx,
# which is written to libvirt.xml(Check "virsh nwfilter-list")
# This nwfilter is necessary on the destination host.
# In addition, this method is creating filtering rule
# onto destination host.
self.driver.ensure_filtering_rules_for_instance(instance,
network_info)
self._notify_about_instance_usage(
context, instance, "live_migration.pre.end",
network_info=network_info)
return pre_live_migration_data
@wrap_exception()
@wrap_instance_fault
def live_migration(self, context, dest, instance, block_migration,
migrate_data):
"""Executing live migration.
:param context: security context
:param instance: instance dict
:param dest: destination host
:param block_migration: if true, prepare for block migration
:param migrate_data: implementation specific params
"""
# Create a local copy since we'll be modifying the dictionary
migrate_data = dict(migrate_data or {})
try:
if block_migration:
disk = self.driver.get_instance_disk_info(instance['name'])
else:
disk = None
pre_migration_data = self.compute_rpcapi.pre_live_migration(
context, instance,
block_migration, disk, dest, migrate_data)
migrate_data['pre_live_migration_result'] = pre_migration_data
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_('Pre live migration failed at %s'),
dest, instance=instance)
self._rollback_live_migration(context, instance, dest,
block_migration, migrate_data)
# Executing live migration
# live_migration might raises exceptions, but
# nothing must be recovered in this version.
self.driver.live_migration(context, instance, dest,
self._post_live_migration,
self._rollback_live_migration,
block_migration, migrate_data)
@wrap_exception()
@wrap_instance_fault
def _post_live_migration(self, ctxt, instance_ref,
dest, block_migration=False, migrate_data=None):
"""Post operations for live migration.
This method is called from live_migration
and mainly updating database record.
:param ctxt: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance
:param dest: destination host
:param block_migration: if true, prepare for block migration
:param migrate_data: if not None, it is a dict which has data
required for live migration without shared storage
"""
LOG.info(_('_post_live_migration() is started..'),
instance=instance_ref)
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
ctxt, instance_ref['uuid'])
# Cleanup source host post live-migration
block_device_info = self._get_instance_volume_block_device_info(
ctxt, instance_ref, bdms)
self.driver.post_live_migration(ctxt, instance_ref, block_device_info,
migrate_data)
# Detaching volumes.
connector = self.driver.get_volume_connector(instance_ref)
for bdm in bdms:
# NOTE(vish): We don't want to actually mark the volume
# detached, or delete the bdm, just remove the
# connection from this host.
# remove the volume connection without detaching from hypervisor
# because the instance is not running anymore on the current host
if bdm.is_volume:
self.volume_api.terminate_connection(ctxt, bdm.volume_id,
connector)
# Releasing vlan.
# (not necessary in current implementation?)
network_info = self._get_instance_nw_info(ctxt, instance_ref)
self._notify_about_instance_usage(ctxt, instance_ref,
"live_migration._post.start",
network_info=network_info)
# Releasing security group ingress rule.
self.driver.unfilter_instance(instance_ref,
network_info)
migration = {'source_compute': self.host,
'dest_compute': dest, }
self.conductor_api.network_migrate_instance_start(ctxt,
instance_ref,
migration)
# Define domain at destination host, without doing it,
# pause/suspend/terminate do not work.
self.compute_rpcapi.post_live_migration_at_destination(ctxt,
instance_ref, block_migration, dest)
# No instance booting at source host, but instance dir
# must be deleted for preparing next block migration
# must be deleted for preparing next live migration w/o shared storage
is_shared_storage = True
if migrate_data:
is_shared_storage = migrate_data.get('is_shared_storage', True)
if block_migration or not is_shared_storage:
self.driver.cleanup(ctxt, instance_ref, network_info)
else:
# self.driver.destroy() usually performs vif unplugging
# but we must do it explicitly here when block_migration
# is false, as the network devices at the source must be
# torn down
try:
self.driver.unplug_vifs(instance_ref, network_info)
except NotImplementedError as e:
LOG.debug(e, instance=instance_ref)
# NOTE(tr3buchet): tear down networks on source host
self.network_api.setup_networks_on_host(ctxt, instance_ref,
self.host, teardown=True)
self.instance_events.clear_events_for_instance(instance_ref)
self._notify_about_instance_usage(ctxt, instance_ref,
"live_migration._post.end",
network_info=network_info)
LOG.info(_('Migrating instance to %s finished successfully.'),
dest, instance=instance_ref)
LOG.info(_("You may see the error \"libvirt: QEMU error: "
"Domain not found: no domain with matching name.\" "
"This error can be safely ignored."),
instance=instance_ref)
if CONF.vnc_enabled or CONF.spice.enabled or CONF.rdp.enabled:
if CONF.cells.enable:
self.cells_rpcapi.consoleauth_delete_tokens(ctxt,
instance_ref['uuid'])
else:
self.consoleauth_rpcapi.delete_tokens_for_instance(ctxt,
instance_ref['uuid'])
@object_compat
@wrap_exception()
@wrap_instance_fault
def post_live_migration_at_destination(self, context, instance,
block_migration):
"""Post operations for live migration .
:param context: security context
:param instance: Instance dict
:param block_migration: if true, prepare for block migration
"""
LOG.info(_('Post operation of migration started'),
instance=instance)
# NOTE(tr3buchet): setup networks on destination host
# this is called a second time because
# multi_host does not create the bridge in
# plug_vifs
self.network_api.setup_networks_on_host(context, instance,
self.host)
migration = {'source_compute': instance['host'],
'dest_compute': self.host, }
self.conductor_api.network_migrate_instance_finish(context,
instance,
migration)
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "live_migration.post.dest.start",
network_info=network_info)
block_device_info = self._get_instance_volume_block_device_info(
context, instance)
self.driver.post_live_migration_at_destination(context, instance,
network_info,
block_migration, block_device_info)
# Restore instance state
current_power_state = self._get_power_state(context, instance)
node_name = None
try:
compute_node = self._get_compute_info(context, self.host)
node_name = compute_node['hypervisor_hostname']
except exception.NotFound:
LOG.exception(_('Failed to get compute_info for %s') % self.host)
finally:
instance.host = self.host
instance.power_state = current_power_state
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.node = node_name
instance.save(expected_task_state=task_states.MIGRATING)
# NOTE(vish): this is necessary to update dhcp
self.network_api.setup_networks_on_host(context, instance, self.host)
self._notify_about_instance_usage(
context, instance, "live_migration.post.dest.end",
network_info=network_info)
@wrap_exception()
@wrap_instance_fault
def _rollback_live_migration(self, context, instance,
dest, block_migration, migrate_data=None):
"""Recovers Instance/volume state from migrating -> running.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param dest:
This method is called from live migration src host.
This param specifies destination host.
:param block_migration: if true, prepare for block migration
:param migrate_data:
if not none, contains implementation specific data.
"""
host = instance['host']
instance = self._instance_update(context, instance['uuid'],
host=host, vm_state=vm_states.ACTIVE,
task_state=None, expected_task_state=task_states.MIGRATING)
# NOTE(tr3buchet): setup networks on source host (really it's re-setup)
self.network_api.setup_networks_on_host(context, instance, self.host)
for bdm in (block_device_obj.BlockDeviceMappingList.
get_by_instance_uuid(context, instance['uuid'])):
if bdm.is_volume:
self.compute_rpcapi.remove_volume_connection(context, instance,
bdm.volume_id, dest)
self._notify_about_instance_usage(context, instance,
"live_migration._rollback.start")
# Block migration needs empty image at destination host
# before migration starts, so if any failure occurs,
# any empty images has to be deleted.
# Also Volume backed live migration w/o shared storage needs to delete
# newly created instance-xxx dir on the destination as a part of its
# rollback process
is_volume_backed = False
is_shared_storage = True
if migrate_data:
is_volume_backed = migrate_data.get('is_volume_backed', False)
is_shared_storage = migrate_data.get('is_shared_storage', True)
if block_migration or (is_volume_backed and not is_shared_storage):
self.compute_rpcapi.rollback_live_migration_at_destination(context,
instance, dest)
self._notify_about_instance_usage(context, instance,
"live_migration._rollback.end")
@wrap_exception()
@wrap_instance_fault
def rollback_live_migration_at_destination(self, context, instance):
"""Cleaning up image directory that is created pre_live_migration.
:param context: security context
:param instance: an Instance dict sent over rpc
"""
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "live_migration.rollback.dest.start",
network_info=network_info)
# NOTE(tr3buchet): tear down networks on destination host
self.network_api.setup_networks_on_host(context, instance,
self.host, teardown=True)
# NOTE(vish): The mapping is passed in so the driver can disconnect
# from remote volumes if necessary
block_device_info = self._get_instance_volume_block_device_info(
context, instance)
self.driver.rollback_live_migration_at_destination(context, instance,
network_info, block_device_info)
self._notify_about_instance_usage(
context, instance, "live_migration.rollback.dest.end",
network_info=network_info)
@periodic_task.periodic_task
def _heal_instance_info_cache(self, context):
"""Called periodically. On every call, try to update the
info_cache's network information for another instance by
calling to the network manager.
This is implemented by keeping a cache of uuids of instances
that live on this host. On each call, we pop one off of a
list, pull the DB record, and try the call to the network API.
If anything errors don't fail, as it's possible the instance
has been deleted, etc.
"""
heal_interval = CONF.heal_instance_info_cache_interval
if not heal_interval:
return
curr_time = time.time()
if self._last_info_cache_heal + heal_interval > curr_time:
return
self._last_info_cache_heal = curr_time
instance_uuids = getattr(self, '_instance_uuids_to_heal', None)
instance = None
while not instance or instance['host'] != self.host:
if instance_uuids:
try:
instance = instance_obj.Instance.get_by_uuid(
context, instance_uuids.pop(0),
expected_attrs=['system_metadata'],
use_slave=True)
except exception.InstanceNotFound:
# Instance is gone. Try to grab another.
continue
else:
# No more in our copy of uuids. Pull from the DB.
db_instances = instance_obj.InstanceList.get_by_host(
context, self.host, expected_attrs=[], use_slave=True)
if not db_instances:
# None.. just return.
return
instance = db_instances[0]
instance_uuids = [inst['uuid'] for inst in db_instances[1:]]
self._instance_uuids_to_heal = instance_uuids
# We have an instance now and it's ours
try:
# Call to network API to get instance info.. this will
# force an update to the instance's info_cache
self._get_instance_nw_info(context, instance, use_slave=True)
LOG.debug(_('Updated the info_cache for instance'),
instance=instance)
except Exception:
LOG.debug(_("An error occurred"), exc_info=True)
@periodic_task.periodic_task
def _poll_rebooting_instances(self, context):
if CONF.reboot_timeout > 0:
filters = {'task_state': task_states.REBOOTING,
'host': self.host}
rebooting = instance_obj.InstanceList.get_by_filters(
context, filters, expected_attrs=[], use_slave=True)
to_poll = []
for instance in rebooting:
if timeutils.is_older_than(instance['updated_at'],
CONF.reboot_timeout):
to_poll.append(instance)
self.driver.poll_rebooting_instances(CONF.reboot_timeout, to_poll)
@periodic_task.periodic_task
def _poll_rescued_instances(self, context):
if CONF.rescue_timeout > 0:
filters = {'vm_state': vm_states.RESCUED,
'host': self.host}
rescued_instances = self.conductor_api.instance_get_all_by_filters(
context, filters, columns_to_join=["system_metadata"],
use_slave=True)
to_unrescue = []
for instance in rescued_instances:
if timeutils.is_older_than(instance['launched_at'],
CONF.rescue_timeout):
to_unrescue.append(instance)
for instance in to_unrescue:
self.conductor_api.compute_unrescue(context, instance)
@periodic_task.periodic_task
def _poll_unconfirmed_resizes(self, context):
if CONF.resize_confirm_window == 0:
return
mig_list_cls = migration_obj.MigrationList
migrations = mig_list_cls.get_unconfirmed_by_dest_compute(
context, CONF.resize_confirm_window, self.host,
use_slave=True)
migrations_info = dict(migration_count=len(migrations),
confirm_window=CONF.resize_confirm_window)
if migrations_info["migration_count"] > 0:
LOG.info(_("Found %(migration_count)d unconfirmed migrations "
"older than %(confirm_window)d seconds"),
migrations_info)
def _set_migration_to_error(migration, reason, **kwargs):
LOG.warn(_("Setting migration %(migration_id)s to error: "
"%(reason)s"),
{'migration_id': migration['id'], 'reason': reason},
**kwargs)
migration.status = 'error'
migration.save(context.elevated())
for migration in migrations:
instance_uuid = migration.instance_uuid
LOG.info(_("Automatically confirming migration "
"%(migration_id)s for instance %(instance_uuid)s"),
{'migration_id': migration.id,
'instance_uuid': instance_uuid})
expected_attrs = ['metadata', 'system_metadata']
try:
instance = instance_obj.Instance.get_by_uuid(context,
instance_uuid, expected_attrs=expected_attrs,
use_slave=True)
except exception.InstanceNotFound:
reason = (_("Instance %s not found") %
instance_uuid)
_set_migration_to_error(migration, reason)
continue
if instance['vm_state'] == vm_states.ERROR:
reason = _("In ERROR state")
_set_migration_to_error(migration, reason,
instance=instance)
continue
vm_state = instance['vm_state']
task_state = instance['task_state']
if vm_state != vm_states.RESIZED or task_state is not None:
reason = (_("In states %(vm_state)s/%(task_state)s, not "
"RESIZED/None") %
{'vm_state': vm_state,
'task_state': task_state})
_set_migration_to_error(migration, reason,
instance=instance)
continue
try:
self.compute_api.confirm_resize(context, instance,
migration=migration)
except Exception as e:
LOG.error(_("Error auto-confirming resize: %s. "
"Will retry later.") % e, instance=instance)
@periodic_task.periodic_task(spacing=CONF.shelved_poll_interval)
def _poll_shelved_instances(self, context):
if CONF.shelved_offload_time <= 0:
return
filters = {'vm_state': vm_states.SHELVED,
'host': self.host}
shelved_instances = instance_obj.InstanceList.get_by_filters(
context, filters=filters, expected_attrs=['system_metadata'],
use_slave=True)
to_gc = []
for instance in shelved_instances:
sys_meta = instance.system_metadata
shelved_at = timeutils.parse_strtime(sys_meta['shelved_at'])
if timeutils.is_older_than(shelved_at, CONF.shelved_offload_time):
to_gc.append(instance)
for instance in to_gc:
try:
instance.task_state = task_states.SHELVING_OFFLOADING
instance.save()
self.shelve_offload_instance(context, instance)
except Exception:
LOG.exception(_('Periodic task failed to offload instance.'),
instance=instance)
@periodic_task.periodic_task
def _instance_usage_audit(self, context):
if not CONF.instance_usage_audit:
return
if compute_utils.has_audit_been_run(context,
self.conductor_api,
self.host):
return
begin, end = utils.last_completed_audit_period()
capi = self.conductor_api
instances = capi.instance_get_active_by_window_joined(
context, begin, end, host=self.host)
num_instances = len(instances)
errors = 0
successes = 0
LOG.info(_("Running instance usage audit for"
" host %(host)s from %(begin_time)s to "
"%(end_time)s. %(number_instances)s"
" instances."),
dict(host=self.host,
begin_time=begin,
end_time=end,
number_instances=num_instances))
start_time = time.time()
compute_utils.start_instance_usage_audit(context,
self.conductor_api,
begin, end,
self.host, num_instances)
for instance in instances:
try:
self.conductor_api.notify_usage_exists(
context, instance,
ignore_missing_network_data=False)
successes += 1
except Exception:
LOG.exception(_('Failed to generate usage '
'audit for instance '
'on host %s') % self.host,
instance=instance)
errors += 1
compute_utils.finish_instance_usage_audit(context,
self.conductor_api,
begin, end,
self.host, errors,
"Instance usage audit ran "
"for host %s, %s instances "
"in %s seconds." % (
self.host,
num_instances,
time.time() - start_time))
@periodic_task.periodic_task(spacing=CONF.bandwidth_poll_interval)
def _poll_bandwidth_usage(self, context):
if (CONF.bandwidth_poll_interval <= 0 or not self._bw_usage_supported):
return
prev_time, start_time = utils.last_completed_audit_period()
curr_time = time.time()
if (curr_time - self._last_bw_usage_poll >
CONF.bandwidth_poll_interval):
self._last_bw_usage_poll = curr_time
LOG.info(_("Updating bandwidth usage cache"))
cells_update_interval = CONF.cells.bandwidth_update_interval
if (cells_update_interval > 0 and
curr_time - self._last_bw_usage_cell_update >
cells_update_interval):
self._last_bw_usage_cell_update = curr_time
update_cells = True
else:
update_cells = False
instances = instance_obj.InstanceList.get_by_host(context,
self.host,
use_slave=True)
try:
bw_counters = self.driver.get_all_bw_counters(instances)
except NotImplementedError:
# NOTE(mdragon): Not all hypervisors have bandwidth polling
# implemented yet. If they don't it doesn't break anything,
# they just don't get the info in the usage events.
# NOTE(PhilDay): Record that its not supported so we can
# skip fast on future calls rather than waste effort getting
# the list of instances.
LOG.warning(_("Bandwidth usage not supported by hypervisor."))
self._bw_usage_supported = False
return
refreshed = timeutils.utcnow()
for bw_ctr in bw_counters:
# Allow switching of greenthreads between queries.
greenthread.sleep(0)
bw_in = 0
bw_out = 0
last_ctr_in = None
last_ctr_out = None
# TODO(geekinutah): Once bw_usage_cache object is created
# need to revisit this and slaveify.
usage = self.conductor_api.bw_usage_get(context,
bw_ctr['uuid'],
start_time,
bw_ctr['mac_address'])
if usage:
bw_in = usage['bw_in']
bw_out = usage['bw_out']
last_ctr_in = usage['last_ctr_in']
last_ctr_out = usage['last_ctr_out']
else:
# TODO(geekinutah): Same here, pls slaveify
usage = self.conductor_api.bw_usage_get(
context, bw_ctr['uuid'], prev_time,
bw_ctr['mac_address'])
if usage:
last_ctr_in = usage['last_ctr_in']
last_ctr_out = usage['last_ctr_out']
if last_ctr_in is not None:
if bw_ctr['bw_in'] < last_ctr_in:
# counter rollover
bw_in += bw_ctr['bw_in']
else:
bw_in += (bw_ctr['bw_in'] - last_ctr_in)
if last_ctr_out is not None:
if bw_ctr['bw_out'] < last_ctr_out:
# counter rollover
bw_out += bw_ctr['bw_out']
else:
bw_out += (bw_ctr['bw_out'] - last_ctr_out)
self.conductor_api.bw_usage_update(context,
bw_ctr['uuid'],
bw_ctr['mac_address'],
start_time,
bw_in,
bw_out,
bw_ctr['bw_in'],
bw_ctr['bw_out'],
last_refreshed=refreshed,
update_cells=update_cells)
def _get_host_volume_bdms(self, context):
"""Return all block device mappings on a compute host."""
compute_host_bdms = []
instances = instance_obj.InstanceList.get_by_host(context, self.host)
for instance in instances:
instance_bdms = [bdm for bdm in
(block_device_obj.BlockDeviceMappingList.
get_by_instance_uuid(context, instance.uuid))
if bdm.is_volume]
compute_host_bdms.append(dict(instance=instance,
instance_bdms=instance_bdms))
return compute_host_bdms
def _update_volume_usage_cache(self, context, vol_usages):
"""Updates the volume usage cache table with a list of stats."""
for usage in vol_usages:
# Allow switching of greenthreads between queries.
greenthread.sleep(0)
self.conductor_api.vol_usage_update(context, usage['volume'],
usage['rd_req'],
usage['rd_bytes'],
usage['wr_req'],
usage['wr_bytes'],
usage['instance'])
@periodic_task.periodic_task
def _poll_volume_usage(self, context, start_time=None):
if CONF.volume_usage_poll_interval == 0:
return
if not start_time:
start_time = utils.last_completed_audit_period()[1]
curr_time = time.time()
if (curr_time - self._last_vol_usage_poll) < \
CONF.volume_usage_poll_interval:
return
self._last_vol_usage_poll = curr_time
compute_host_bdms = self._get_host_volume_bdms(context)
if not compute_host_bdms:
return
LOG.debug(_("Updating volume usage cache"))
try:
vol_usages = self.driver.get_all_volume_usage(context,
compute_host_bdms)
except NotImplementedError:
return
self._update_volume_usage_cache(context, vol_usages)
@periodic_task.periodic_task(spacing=CONF.sync_power_state_interval,
run_immediately=True)
def _sync_power_states(self, context):
"""Align power states between the database and the hypervisor.
To sync power state data we make a DB call to get the number of
virtual machines known by the hypervisor and if the number matches the
number of virtual machines known by the database, we proceed in a lazy
loop, one database record at a time, checking if the hypervisor has the
same power state as is in the database.
"""
db_instances = instance_obj.InstanceList.get_by_host(context,
self.host,
use_slave=True)
num_vm_instances = self.driver.get_num_instances()
num_db_instances = len(db_instances)
if num_vm_instances != num_db_instances:
LOG.warn(_("Found %(num_db_instances)s in the database and "
"%(num_vm_instances)s on the hypervisor."),
{'num_db_instances': num_db_instances,
'num_vm_instances': num_vm_instances})
for db_instance in db_instances:
if db_instance['task_state'] is not None:
LOG.info(_("During sync_power_state the instance has a "
"pending task. Skip."), instance=db_instance)
continue
# No pending tasks. Now try to figure out the real vm_power_state.
try:
try:
vm_instance = self.driver.get_info(db_instance)
vm_power_state = vm_instance['state']
except exception.InstanceNotFound:
vm_power_state = power_state.NOSTATE
# Note(maoy): the above get_info call might take a long time,
# for example, because of a broken libvirt driver.
try:
self._sync_instance_power_state(context,
db_instance,
vm_power_state,
use_slave=True)
except exception.InstanceNotFound:
# NOTE(hanlind): If the instance gets deleted during sync,
# silently ignore and move on to next instance.
continue
except Exception:
LOG.exception(_("Periodic sync_power_state task had an error "
"while processing an instance."),
instance=db_instance)
def _sync_instance_power_state(self, context, db_instance, vm_power_state,
use_slave=False):
"""Align instance power state between the database and hypervisor.
If the instance is not found on the hypervisor, but is in the database,
then a stop() API will be called on the instance.
"""
# We re-query the DB to get the latest instance info to minimize
# (not eliminate) race condition.
db_instance.refresh(use_slave=use_slave)
db_power_state = db_instance.power_state
vm_state = db_instance.vm_state
if self.host != db_instance.host:
# on the sending end of nova-compute _sync_power_state
# may have yielded to the greenthread performing a live
# migration; this in turn has changed the resident-host
# for the VM; However, the instance is still active, it
# is just in the process of migrating to another host.
# This implies that the compute source must relinquish
# control to the compute destination.
LOG.info(_("During the sync_power process the "
"instance has moved from "
"host %(src)s to host %(dst)s") %
{'src': self.host,
'dst': db_instance.host},
instance=db_instance)
return
elif db_instance.task_state is not None:
# on the receiving end of nova-compute, it could happen
# that the DB instance already report the new resident
# but the actual VM has not showed up on the hypervisor
# yet. In this case, let's allow the loop to continue
# and run the state sync in a later round
LOG.info(_("During sync_power_state the instance has a "
"pending task. Skip."), instance=db_instance)
return
if vm_power_state != db_power_state:
# power_state is always updated from hypervisor to db
db_instance.power_state = vm_power_state
db_instance.save()
db_power_state = vm_power_state
# Note(maoy): Now resolve the discrepancy between vm_state and
# vm_power_state. We go through all possible vm_states.
if vm_state in (vm_states.BUILDING,
vm_states.RESCUED,
vm_states.RESIZED,
vm_states.SUSPENDED,
vm_states.PAUSED,
vm_states.ERROR):
# TODO(maoy): we ignore these vm_state for now.
pass
elif vm_state == vm_states.ACTIVE:
# The only rational power state should be RUNNING
if vm_power_state in (power_state.SHUTDOWN,
power_state.CRASHED):
LOG.warn(_("Instance shutdown by itself. Calling "
"the stop API."), instance=db_instance)
try:
# Note(maoy): here we call the API instead of
# brutally updating the vm_state in the database
# to allow all the hooks and checks to be performed.
self.compute_api.stop(context, db_instance)
except Exception:
# Note(maoy): there is no need to propagate the error
# because the same power_state will be retrieved next
# time and retried.
# For example, there might be another task scheduled.
LOG.exception(_("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_power_state == power_state.SUSPENDED:
LOG.warn(_("Instance is suspended unexpectedly. Calling "
"the stop API."), instance=db_instance)
try:
self.compute_api.stop(context, db_instance)
except Exception:
LOG.exception(_("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_power_state == power_state.PAUSED:
# Note(maoy): a VM may get into the paused state not only
# because the user request via API calls, but also
# due to (temporary) external instrumentations.
# Before the virt layer can reliably report the reason,
# we simply ignore the state discrepancy. In many cases,
# the VM state will go back to running after the external
# instrumentation is done. See bug 1097806 for details.
LOG.warn(_("Instance is paused unexpectedly. Ignore."),
instance=db_instance)
elif vm_power_state == power_state.NOSTATE:
# Occasionally, depending on the status of the hypervisor,
# which could be restarting for example, an instance may
# not be found. Therefore just log the condition.
LOG.warn(_("Instance is unexpectedly not found. Ignore."),
instance=db_instance)
elif vm_state == vm_states.STOPPED:
if vm_power_state not in (power_state.NOSTATE,
power_state.SHUTDOWN,
power_state.CRASHED):
LOG.warn(_("Instance is not stopped. Calling "
"the stop API."), instance=db_instance)
try:
# NOTE(russellb) Force the stop, because normally the
# compute API would not allow an attempt to stop a stopped
# instance.
self.compute_api.force_stop(context, db_instance)
except Exception:
LOG.exception(_("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_state in (vm_states.SOFT_DELETED,
vm_states.DELETED):
if vm_power_state not in (power_state.NOSTATE,
power_state.SHUTDOWN):
# Note(maoy): this should be taken care of periodically in
# _cleanup_running_deleted_instances().
LOG.warn(_("Instance is not (soft-)deleted."),
instance=db_instance)
@periodic_task.periodic_task
def _reclaim_queued_deletes(self, context):
"""Reclaim instances that are queued for deletion."""
interval = CONF.reclaim_instance_interval
if interval <= 0:
LOG.debug(_("CONF.reclaim_instance_interval <= 0, skipping..."))
return
filters = {'vm_state': vm_states.SOFT_DELETED,
'task_state': None,
'host': self.host}
instances = instance_obj.InstanceList.get_by_filters(
context, filters,
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS,
use_slave=True)
for instance in instances:
if self._deleted_old_enough(instance, interval):
bdms = (block_device_obj.BlockDeviceMappingList.
get_by_instance_uuid(context, instance.uuid))
LOG.info(_('Reclaiming deleted instance'), instance=instance)
# NOTE(comstud): Quotas were already accounted for when
# the instance was soft deleted, so there's no need to
# pass reservations here.
try:
self._delete_instance(context, instance, bdms)
except Exception as e:
LOG.warning(_("Periodic reclaim failed to delete "
"instance: %s"),
unicode(e), instance=instance)
@periodic_task.periodic_task
def update_available_resource(self, context):
"""See driver.get_available_resource()
Periodic process that keeps that the compute host's understanding of
resource availability and usage in sync with the underlying hypervisor.
:param context: security context
"""
new_resource_tracker_dict = {}
nodenames = set(self.driver.get_available_nodes())
for nodename in nodenames:
rt = self._get_resource_tracker(nodename)
rt.update_available_resource(context)
new_resource_tracker_dict[nodename] = rt
# Delete orphan compute node not reported by driver but still in db
compute_nodes_in_db = self._get_compute_nodes_in_db(context)
for cn in compute_nodes_in_db:
if cn.get('hypervisor_hostname') not in nodenames:
LOG.audit(_("Deleting orphan compute node %s") % cn['id'])
self.conductor_api.compute_node_delete(context, cn)
self._resource_tracker_dict = new_resource_tracker_dict
def _get_compute_nodes_in_db(self, context):
service_ref = self.conductor_api.service_get_by_compute_host(
context, self.host)
if not service_ref:
LOG.error(_("No service record for host %s"), self.host)
return []
return service_ref['compute_node']
@periodic_task.periodic_task(
spacing=CONF.running_deleted_instance_poll_interval)
def _cleanup_running_deleted_instances(self, context):
"""Cleanup any instances which are erroneously still running after
having been deleted.
Valid actions to take are:
1. noop - do nothing
2. log - log which instances are erroneously running
3. reap - shutdown and cleanup any erroneously running instances
4. shutdown - power off *and disable* any erroneously running
instances
The use-case for this cleanup task is: for various reasons, it may be
possible for the database to show an instance as deleted but for that
instance to still be running on a host machine (see bug
https://bugs.launchpad.net/nova/+bug/911366).
This cleanup task is a cross-hypervisor utility for finding these
zombied instances and either logging the discrepancy (likely what you
should do in production), or automatically reaping the instances (more
appropriate for dev environments).
"""
action = CONF.running_deleted_instance_action
if action == "noop":
return
# NOTE(sirp): admin contexts don't ordinarily return deleted records
with utils.temporary_mutation(context, read_deleted="yes"):
for instance in self._running_deleted_instances(context):
bdms = (block_device_obj.BlockDeviceMappingList.
get_by_instance_uuid(context, instance.uuid))
if action == "log":
LOG.warning(_("Detected instance with name label "
"'%s' which is marked as "
"DELETED but still present on host."),
instance['name'], instance=instance)
elif action == 'shutdown':
LOG.info(_("Powering off instance with name label "
"'%s' which is marked as "
"DELETED but still present on host."),
instance['name'], instance=instance)
try:
try:
# disable starting the instance
self.driver.set_bootable(instance, False)
except NotImplementedError:
LOG.warn(_("set_bootable is not implemented for "
"the current driver"))
# and power it off
self.driver.power_off(instance)
except Exception:
msg = _("Failed to power off instance")
LOG.warn(msg, instance=instance, exc_info=True)
elif action == 'reap':
LOG.info(_("Destroying instance with name label "
"'%s' which is marked as "
"DELETED but still present on host."),
instance['name'], instance=instance)
self.instance_events.clear_events_for_instance(instance)
try:
self._shutdown_instance(context, instance, bdms,
notify=False)
self._cleanup_volumes(context, instance['uuid'], bdms)
except Exception as e:
LOG.warning(_("Periodic cleanup failed to delete "
"instance: %s"),
unicode(e), instance=instance)
else:
raise Exception(_("Unrecognized value '%s'"
" for CONF.running_deleted_"
"instance_action") % action)
def _running_deleted_instances(self, context):
"""Returns a list of instances nova thinks is deleted,
but the hypervisor thinks is still running.
"""
timeout = CONF.running_deleted_instance_timeout
filters = {'deleted': True,
'soft_deleted': False,
'host': self.host}
instances = self._get_instances_on_driver(context, filters)
return [i for i in instances if self._deleted_old_enough(i, timeout)]
def _deleted_old_enough(self, instance, timeout):
deleted_at = instance['deleted_at']
if isinstance(instance, obj_base.NovaObject) and deleted_at:
deleted_at = deleted_at.replace(tzinfo=None)
return (not deleted_at or timeutils.is_older_than(deleted_at, timeout))
@contextlib.contextmanager
def _error_out_instance_on_exception(self, context, instance_uuid,
reservations=None,
instance_state=vm_states.ACTIVE):
try:
yield
except NotImplementedError as error:
with excutils.save_and_reraise_exception():
self._quota_rollback(context, reservations)
LOG.info(_("Setting instance back to %(state)s after: "
"%(error)s") %
{'state': instance_state, 'error': error},
instance_uuid=instance_uuid)
self._instance_update(context, instance_uuid,
vm_state=instance_state,
task_state=None)
except exception.InstanceFaultRollback as error:
self._quota_rollback(context, reservations)
LOG.info(_("Setting instance back to ACTIVE after: %s"),
error, instance_uuid=instance_uuid)
self._instance_update(context, instance_uuid,
vm_state=vm_states.ACTIVE,
task_state=None)
raise error.inner_exception
except Exception as error:
LOG.exception(_('Setting instance vm_state to ERROR'),
instance_uuid=instance_uuid)
with excutils.save_and_reraise_exception():
self._quota_rollback(context, reservations)
self._set_instance_error_state(context, instance_uuid)
@aggregate_object_compat
@wrap_exception()
def add_aggregate_host(self, context, aggregate, host, slave_info):
"""Notify hypervisor of change (for hypervisor pools)."""
try:
self.driver.add_to_aggregate(context, aggregate, host,
slave_info=slave_info)
except NotImplementedError:
LOG.debug(_('Hypervisor driver does not support '
'add_aggregate_host'))
except exception.AggregateError:
with excutils.save_and_reraise_exception():
self.driver.undo_aggregate_operation(
context,
self.conductor_api.aggregate_host_delete,
aggregate, host)
@aggregate_object_compat
@wrap_exception()
def remove_aggregate_host(self, context, host, slave_info, aggregate):
"""Removes a host from a physical hypervisor pool."""
try:
self.driver.remove_from_aggregate(context, aggregate, host,
slave_info=slave_info)
except NotImplementedError:
LOG.debug(_('Hypervisor driver does not support '
'remove_aggregate_host'))
except (exception.AggregateError,
exception.InvalidAggregateAction) as e:
with excutils.save_and_reraise_exception():
self.driver.undo_aggregate_operation(
context,
self.conductor_api.aggregate_host_add,
aggregate, host,
isinstance(e, exception.AggregateError))
def _process_instance_event(self, instance, event):
_event = self.instance_events.pop_instance_event(instance, event)
if _event:
LOG.debug(_('Processing event %(event)s'),
{'event': event.key, 'instance': instance})
_event.send(event)
@wrap_exception()
def external_instance_event(self, context, instances, events):
# NOTE(danms): Some event types are handled by the manager, such
# as when we're asked to update the instance's info_cache. If it's
# not one of those, look for some thread(s) waiting for the event and
# unblock them if so.
for event in events:
instance = [inst for inst in instances
if inst.uuid == event.instance_uuid][0]
if event.name == 'network-changed':
self.network_api.get_instance_nw_info(context, instance)
else:
self._process_instance_event(instance, event)
@periodic_task.periodic_task(spacing=CONF.image_cache_manager_interval,
external_process_ok=True)
def _run_image_cache_manager_pass(self, context):
"""Run a single pass of the image cache manager."""
if not self.driver.capabilities["has_imagecache"]:
return
if CONF.image_cache_manager_interval == 0:
return
# Determine what other nodes use this storage
storage_users.register_storage_use(CONF.instances_path, CONF.host)
nodes = storage_users.get_storage_users(CONF.instances_path)
# Filter all_instances to only include those nodes which share this
# storage path.
# TODO(mikal): this should be further refactored so that the cache
# cleanup code doesn't know what those instances are, just a remote
# count, and then this logic should be pushed up the stack.
filters = {'deleted': False,
'soft_deleted': True,
'host': nodes}
filtered_instances = instance_obj.InstanceList.get_by_filters(context,
filters, expected_attrs=[], use_slave=True)
self.driver.manage_image_cache(context, filtered_instances)
@periodic_task.periodic_task(spacing=CONF.instance_delete_interval)
def _run_pending_deletes(self, context):
"""Retry any pending instance file deletes."""
if CONF.instance_delete_interval == 0:
return
LOG.debug(_('Cleaning up deleted instances'))
filters = {'deleted': True,
'soft_deleted': False,
'host': CONF.host,
'cleaned': False}
attrs = ['info_cache', 'security_groups', 'system_metadata']
with utils.temporary_mutation(context, read_deleted='yes'):
instances = instance_obj.InstanceList.get_by_filters(
context, filters, expected_attrs=attrs)
LOG.debug(_('There are %d instances to clean'), len(instances))
for instance in instances:
attempts = int(instance.system_metadata.get('clean_attempts', '0'))
LOG.debug(_('Instance has had %(attempts)s of %(max)s '
'cleanup attempts'),
{'attempts': attempts,
'max': CONF.maximum_instance_delete_attempts},
instance=instance)
if attempts < CONF.maximum_instance_delete_attempts:
success = self.driver.delete_instance_files(instance)
instance.system_metadata['clean_attempts'] = str(attempts + 1)
if success:
instance.cleaned = True
with utils.temporary_mutation(context, read_deleted='yes'):
instance.save(context)
| apache-2.0 | -2,143,329,436,891,058,400 | 44.235739 | 79 | 0.561126 | false |
agx/git-buildpackage | setup.py | 1 | 3370 | #!/usr/bin/python3
# vim: set fileencoding=utf-8 :
# Copyright (C) 2006-2011 Guido Günther <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, please see
# <http://www.gnu.org/licenses/>
# END OF COPYRIGHT #
import os
import re
from setuptools import setup, find_packages
VERSION_PY_PATH = 'gbp/version.py'
def _parse_changelog():
"""Get version from debian changelog and write it to gbp/version.py"""
with open("debian/changelog", encoding="utf-8") as f:
line = f.readline()
# Parse version from changelog without external tooling so it can work
# on non Debian systems.
m = re.match(".* \\(([0-9a-zA-Z.~\\-:+]+)\\) ", line)
if m:
return m.group(1)
raise ValueError('Could not parse version from debian/changelog')
def _save_version_py(version):
with open(VERSION_PY_PATH, 'w') as f:
f.write('"The current gbp version number"\n')
f.write('gbp_version = "%s"\n' % version)
def _load_version():
with open(VERSION_PY_PATH, 'r') as f:
version_py = f.read()
version_py_globals = {}
exec(version_py, version_py_globals)
return version_py_globals['gbp_version']
def parse_and_fetch_version():
if os.path.exists('debian/changelog'):
version = _parse_changelog()
_save_version_py(version)
# we could return with the version here, but instead we check that
# the file has been properly written and it can be loaded back
version = _load_version()
return version
def readme():
with open('README.md') as file:
return file.read()
def setup_requires():
if os.getenv('WITHOUT_NOSETESTS'):
return []
else:
return ['nose>=0.11.1', 'coverage>=2.85', 'nosexcover>=1.0.7']
setup(name="gbp",
version=parse_and_fetch_version(),
author=u'Guido Günther',
author_email='[email protected]',
url='https://honk.sigxcpu.org/piki/projects/git-buildpackage/',
description='Suite to help with Debian packages in Git repositories',
license='GPLv2+',
long_description=readme(),
classifiers=[
'Environment :: Console',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Version Control',
'Operating System :: POSIX :: Linux',
],
scripts=['bin/git-pbuilder',
'bin/gbp-builder-mock'],
packages=find_packages(exclude=['tests', 'tests.*']),
data_files=[("share/git-buildpackage/", ["gbp.conf"]), ],
requires=['dateutil'],
install_requires=[
'python-dateutil',
],
setup_requires=setup_requires(),
python_requires='>=3.5',
entry_points={
'console_scripts': ['gbp=gbp.scripts.supercommand:supercommand'],
},
)
| gpl-2.0 | 564,592,606,133,849,100 | 31.07619 | 75 | 0.635986 | false |
rhazdon/django-sonic-screwdriver | django_sonic_screwdriver/apps/ban/migrations/0001_initial.py | 1 | 2978 | # Generated by Django 3.0.2 on 2020-01-23 13:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserBan',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, help_text='Model was created at this time.', verbose_name='Created at')),
('updated_at', models.DateTimeField(auto_now=True, help_text='Model was updated at this time.', verbose_name='Updated at')),
('end_date', models.DateTimeField(blank=True, help_text='The end date tells, until the ban is valid. If the end_date is empty, the ban is infinit.', null=True, verbose_name='End date of the ban')),
('banned_user', models.ForeignKey(help_text='This is the banned user or the receiver of the ban.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='banned_user', to=settings.AUTH_USER_MODEL, verbose_name='User')),
('creator', models.ForeignKey(help_text='This is the creator of the ban. If the creator is empty, the ban was created by the system.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
options={
'verbose_name': 'User Ban',
'verbose_name_plural': 'User Bans',
},
),
migrations.CreateModel(
name='IPBan',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, help_text='Model was created at this time.', verbose_name='Created at')),
('updated_at', models.DateTimeField(auto_now=True, help_text='Model was updated at this time.', verbose_name='Updated at')),
('end_date', models.DateTimeField(blank=True, help_text='The end date tells, until the ban is valid. If the end_date is empty, the ban is infinit.', null=True, verbose_name='End date of the ban')),
('ip', models.GenericIPAddressField(help_text='This is the banned ip. Every request from this IP will result in 403.', null=True, verbose_name='IP')),
('creator', models.ForeignKey(help_text='This is the creator of the ban. If the creator is empty, the ban was created by the system.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
options={
'verbose_name': 'IP Ban',
'verbose_name_plural': 'IP Bans',
},
),
]
| mit | -282,668,870,110,881,380 | 62.361702 | 258 | 0.629953 | false |
cuckoobox/cuckoo | stuff/distributed/cluster-test.py | 1 | 3084 | # Copyright (C) 2017-2018 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import click
import requests
import time
class Script(object):
def __init__(self):
self.name = self.__class__.__name__.lower()
self.filename = "%s.py" % self.name
self.source = self.__doc__
def check(self):
pass
class Internet(Script):
r"""
import socket
s = socket.create_connection(("google.com", 80))
s.send("GET / HTTP/1.0\r\nHost: google.com\r\n\r\n")
s.recv(0x10000)
"""
def check(self, report):
for dns in report.get("network", {}).get("dns", []):
if dns["request"] == "google.com" and dns["answers"]:
return True
return False
@click.command()
@click.argument("host")
@click.argument("port", default=9003, required=False)
@click.option("-s", "--script", default="internet")
def main(host, port, script):
for cls in Script.__subclasses__():
if cls().name == script:
script = cls()
break
else:
print "Unknown script:", script
exit(1)
owner = "cluster.test.%d" % int(time.time())
url = "http://%s:%s" % (host, port)
r = requests.get("%s/api/node" % url).json()
machines = []
for name, info in r["nodes"].items():
if not info["enabled"]:
continue
print "indexing..", name
info = requests.post("%s/api/node/%s/refresh" % (url, name)).json()
for vm in info["machines"]:
machines.append((name, vm["name"]))
tasks = {}
for node, vmname in machines:
r = requests.post("%s/api/task" % url, files={
"file": (script.filename, script.source),
}, data={
"node": node,
"machine": vmname,
"options": "json.calls=0",
"priority": 5,
"owner": owner,
})
tasks[r.json()["task_id"]] = node, vmname
print "submitted..", node, vmname, r.json()["task_id"]
status = []
while tasks:
r = requests.get("%s/api/task" % url, params={
"status": "finished",
"owner": owner,
})
assert r.status_code == 200
for task in r.json()["tasks"].values():
r = requests.get("%s/api/report/%d" % (url, task["id"]))
if task["id"] not in tasks:
continue
node, vmname = tasks.pop(task["id"])
ret = script.check(r.json())
status.append((node, vmname, task["id"], ret))
print "finished..", status[-1], "report.length=%d" % len(r.text)
if not ret:
print "^-- incorrect return value!"
else:
requests.delete("%s/api/task/%d" % (url, task["id"]))
counts = {}
for node, _ in tasks.values():
counts[node] = counts.get(node, 0) + 1
print "left:", " ".join("%s=%s" % (k, v) for k, v in counts.items())
time.sleep(3)
if __name__ == "__main__":
main()
| mit | -8,013,044,425,276,418,000 | 29.534653 | 76 | 0.519455 | false |
Illumina/HapMix | scripts/haplobams/split_by_haplotype_lib.py | 1 | 11911 | import sys
sys.path.append('/opt/rh/python27/root/usr/lib64/python2.7/site-packages/pysam')
sys.path.append('/home/ptedder/local/lib/python2.7/site-packages')
sys.path.append('/home/ptedder/local/lib64/python2.7/site-packages')
print sys.path
import random,pysam,re,subprocess,HTSeq,pdb,argparse
from collections import defaultdict
parser = argparse.ArgumentParser()
parser.add_argument('-s','--sample_name',help="please give sample name e.g. NA12878",required=True)
parser.add_argument('-c','--chr_no',help="please give chr_no",required=True)
parser.add_argument('-b','--bam_file',help="please specify bam file",required=True)
parser.add_argument('-r','--ref_file',help="please specify reference directory",required=True)
parser.add_argument('-n','--snp_file',help="please specify tabixed haplotyped SNP file",required=True)
parser.add_argument('-i','--indel_file',help="please specify tabixed haplotyped indel file",required=True)
parser.add_argument('-x','--chr_prefix',help="does the chromsome need a prefix eg chr",required=False)
args = parser.parse_args()
sample_name=args.sample_name
chr_no=args.chr_no
bam_file=args.bam_file
ref_file=args.ref_file
print "chr_no ", chr_no
snp_file=args.snp_file
indel_file=args.indel_file
if (args.chr_prefix):
chr= args.chr_prefix+str(chr_no)
else:
chr=str(chr_no)
sequence={}
for s in HTSeq.FastaReader(ref_file):
sequence[s.name]=s
reference_seq=sequence["chr"+str(chr_no)]
pos_ref=0
samfile = pysam.Samfile(bam_file,"rb")
haplotyped_snp_file=subprocess.Popen(['tabix',snp_file,chr_no ],stdout=subprocess.PIPE)
haplotyped_indel_file=subprocess.Popen(['tabix',indel_file,chr_no ],stdout=subprocess.PIPE)
#d={'hc':0,'hd':0,'bt':0,'ot':0,'rf':0,'fr':0}
haplotypeC_bam= pysam.Samfile("haplotypeC_"+chr +".bam", "wb", template=samfile)
haplotypeD_bam= pysam.Samfile("haplotypeD_"+chr+".bam", "wb", template=samfile)
haplotype_count={}
def main():
read_variant_dict={}
paired_read={}
(haplotype_dict_snvs,haplotype_dict_snvs_pos)=read_in_vcf(haplotyped_snp_file)
(haplotype_dict_indels,haplotype_dict_indels_pos)=read_in_vcf(haplotyped_indel_file)
chr_variant_dict={}
chr_variant_dict['haplotypeC']=dict(haplotype_dict_snvs['haplotypeC'].items()+haplotype_dict_indels['haplotypeC'].items())
chr_variant_dict['haplotypeD']=dict(haplotype_dict_snvs['haplotypeD'].items()+haplotype_dict_indels['haplotypeD'].items())
haplotype_dict_pos=dict(haplotype_dict_snvs_pos.items()+haplotype_dict_indels_pos.items())
for read_line in samfile.fetch(chr):
if read_line.cigar == None:
continue #SKIPPING READ AS UNMAPPED
if not read_line.qname in read_variant_dict:
read_variant_dict[read_line.qname]={}
rvd=variant_count(read_line,haplotype_dict_pos)
read_variant_dict[read_line.qname].update(rvd) #HYPOTHETICAL BUG IF INDEL AND SNP AT SAME POS
if not read_line.qname in haplotype_count:
haplotype_count[read_line.qname]={'other':{},'C':{},'D':{}}
#COUNT NUMBER OF MUTATIONS FOR EACH READ WHICH CAN BE ASSIGNED TO EACH HAPLOTYPE
for variant_pos in read_variant_dict[read_line.qname].keys():
if variant_pos in chr_variant_dict['haplotypeC'] and variant_pos in chr_variant_dict['haplotypeD'] and read_variant_dict[read_line.qname][variant_pos]['call']== chr_variant_dict['haplotypeC'][variant_pos]['alt'] and read_variant_dict[read_line.qname][variant_pos]['call']== chr_variant_dict['haplotypeD'][variant_pos]['alt']: #check hom/het and call:
haplotype_count[read_line.qname]['C'][variant_pos]={}
haplotype_count[read_line.qname]['D'][variant_pos]={}
elif variant_pos in chr_variant_dict['haplotypeC'] and read_variant_dict[read_line.qname][variant_pos]['call']== chr_variant_dict['haplotypeC'][variant_pos]['alt']:
haplotype_count[read_line.qname]['C'][variant_pos]={'call':read_variant_dict[read_line.qname][variant_pos]['call']}
elif variant_pos in chr_variant_dict['haplotypeD'] and read_variant_dict[read_line.qname][variant_pos]['call']== chr_variant_dict['haplotypeD'][variant_pos]['alt']:
haplotype_count[read_line.qname]['D'][variant_pos]={}
else:
haplotype_count[read_line.qname]['other'][variant_pos]={}
# IS IT THE SECOND/ORPHAN READ? CAN THE READ BE ASSIGNED UNIQUELY TO EITHER OF THE HAPLOTYPES?
if not read_line.is_proper_pair or (read_line.pnext in paired_read and read_line.qname in paired_read[read_line.pnext]) :
haplotype=assign_to_haplotype(haplotype_count,paired_read,read_line)
write_to_bam_file(haplotype,paired_read,read_line)
haplotype_count.pop(read_line.qname, None)
read_variant_dict.pop(read_line.qname, None)
# IS IT THE FIRST READ? ADD TO DICT
if read_line.is_proper_pair and not read_line.pnext in paired_read:
if not read_line.pos in paired_read:
paired_read[read_line.pos]={}
if not read_line.qname in paired_read[read_line.pos]:
paired_read[read_line.pos][read_line.qname]=read_line
#FLUSH DICTIONARIES EVERY 10k bp
if not read_line.pos % 1e4:
tmpkeys=paired_read.keys()
for pos in tmpkeys:
if pos<read_line.pos:
paired_read.pop(pos, None)
def read_in_vcf(vcf_file):
cd={'haplotypeC':{},'haplotypeD':{}}
csdl={}
for line in vcf_file.stdout:
if re.match('#',line):
continue
if not re.search('bwa',line) and not re.search('isaac',line): # ONLY TRUST ISAAC & BWA BASED CALLS
continue
else:
(chrom,pos,id,ref,alt,qual,filter,info,format,NA12877,NA12878,NA12879,NA12880,NA12881,NA12882,NA12883,NA12884,NA12885,NA12886,NA12887,NA12888,NA12889,NA12890,NA12891,NA12892,NA12893)=line.strip().split('\t')
if re.match('chr',chr) and not re.match('chr',chrom):
chrom='chr'+chrom
if chrom != chr:
continue
pos=int(float(pos))
format_columns=format.split(':') #JUST GENOTYPE AND EDIT DISTANCE
format_columns_data=eval(sample_name).split(':')
f_dict={}
for i,k in enumerate(format_columns):
f_dict[k]=format_columns_data[i]
if 'GT' in f_dict:
if re.search("/",f_dict['GT']):
continue
(ploidyC,ploidyD)=f_dict['GT'].split('|')
(ploidyC,ploidyD)=(int(ploidyC),int(ploidyD))
ploidyC_base_call=''
ploidyD_base_call=''
if ploidyC ==0 and ploidyD ==0:
continue # not haplotyped so skip
if ploidyC ==0:
ploidyC_base_call=ref
elif ploidyC ==1:
ploidyC_base_call=alt
if ploidyD ==0:
ploidyD_base_call=ref
elif ploidyD ==1:
ploidyD_base_call=alt
if len(ref)==len(alt)==1:
type='S'
if len(ref)==len(alt)!=1:
type='SUB'
if len(ref)>len(alt):
type='D'
if len(ref)<len(alt):
type='I'
cd['haplotypeC'][pos]={'pos':pos,'alt':ploidyC_base_call}
cd['haplotypeD'][pos]={'pos':pos,'alt':ploidyD_base_call}
csdl[pos]={'ref':ref,'alt':alt,'type':type}
else:
sys.exit("no genotyping on line")
return(cd,csdl)
def variant_count(read_line,haplotype_dict_pos):
pos_in_read=0
pos_ref=read_line.pos
read_variant_dict={}
for cigar_operations in read_line.cigar:
(type_cigar_op,length_cigar_op)=cigar_operations
if type_cigar_op==0 or type_cigar_op==7: #MATCH
ref_pos=pos_ref
for ii in range(0,length_cigar_op):
chr='chr'+str(read_line.tid)
ref_base=reference_seq.seq[ref_pos].upper()
pos_ref+=1
if pos_ref in haplotype_dict_pos: # IF ITS A HAPLOTYPED READ
if haplotype_dict_pos[pos_ref]['type']=='S':
read_variant_dict[pos_ref]={'type':haplotype_dict_pos[pos_ref]['type'],'call':read_line.seq[pos_in_read],'ref':ref_base}
if haplotype_dict_pos[pos_ref]['type']=='D':
ref_del=reference_seq.seq[pos_ref-1:pos_ref+length_cigar_op].upper()
read_variant_dict[pos_ref]={'type':'D','alt':haplotype_dict_pos[pos_ref]['alt'],'call':haplotype_dict_pos[pos_ref]['ref'],'ln':len(haplotype_dict_pos[pos_ref]['alt'])} # deletions vcf ref will be longer than alt
if haplotype_dict_pos[pos_ref]['type']=='I':
read_variant_dict[pos_ref]={'type':'I','alt':haplotype_dict_pos[pos_ref]['alt'],'call':haplotype_dict_pos[pos_ref]['ref']} # for indels this has to be base before as well
pos_in_read+=1
elif type_cigar_op==3 : #N
pos_in_read+=length_cigar_op
pos_ref+=length_cigar_op
elif type_cigar_op==4: # SOFT CLIP
pos_in_read+=length_cigar_op #BAM FILE START POS IS AFTER SOFT CLIPPING
elif type_cigar_op==1 :# INSERTION
if pos_ref in haplotype_dict_pos:
read_variant_dict[pos_ref]={'type':'I','call':read_line.seq[pos_in_read-1:pos_in_read+length_cigar_op],'ref':read_line.seq[pos_in_read-1]} # for indels this has to be base before as well
pos_in_read+=length_cigar_op
pos_ref+=1
elif type_cigar_op==2 :# DELETION
if pos_ref in haplotype_dict_pos:
ref_del=reference_seq.seq[pos_ref-1:pos_ref+length_cigar_op].upper()
read_variant_dict[pos_ref]={'type':'D','call':read_line.seq[pos_in_read-1],'alt':read_line.seq[pos_in_read-1],'ref':ref_del,'ln':length_cigar_op} # deletions vcf ref will be longer than alt
pos_ref+=length_cigar_op
return read_variant_dict
def write_to_bam_file(haplotype,paired_read,read_line):
if haplotype =='haplotypeC':
haplotypeC_bam.write(read_line)
elif haplotype =='haplotypeD':
haplotypeD_bam.write(read_line)
if read_line.is_proper_pair:
other_read=paired_read[read_line.pnext][read_line.qname]
if haplotype =='haplotypeC':
haplotypeC_bam.write(other_read)
elif haplotype =='haplotypeD':
haplotypeD_bam.write(other_read)
def assign_to_haplotype(haplotype_count,paired_read,read_line):
if len(haplotype_count[read_line.qname]['C']) != 0 and len(haplotype_count[read_line.qname]['D']) == 0 :
haplotype='haplotypeC'
if len(haplotype_count[read_line.qname]['C']) == 0 and len(haplotype_count[read_line.qname]['D']) != 0 :
haplotype='haplotypeD'
elif len(haplotype_count[read_line.qname]['C']) != 0 and len(haplotype_count[read_line.qname]['D']) != 0 :
if random.random()<0.5:
haplotype='haplotypeC'
else:
haplotype='haplotypeD'
elif len(haplotype_count[read_line.qname]['C']) == 0 and len(haplotype_count[read_line.qname]['D']) == 0 and len(haplotype_count[read_line.qname]['other']) != 0:
if random.random()<0.5:
haplotype='haplotypeC'
else:
haplotype='haplotypeD'
elif len(haplotype_count[read_line.qname]['C']) == 0 and len(haplotype_count[read_line.qname]['D']) == 0 and len(haplotype_count[read_line.qname]['other']) == 0:
if random.random() <0.5:
haplotype='haplotypeC'
else:
haplotype='haplotypeD'
return haplotype
if __name__ == "__main__":
main()
| gpl-3.0 | -1,688,665,998,435,795,000 | 43.114815 | 363 | 0.608513 | false |
Runscope/pysaml2 | src/saml2/pack.py | 1 | 8806 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
"""Contains classes and functions that are necessary to implement
different bindings.
Bindings normally consists of three parts:
- rules about what to send
- how to package the information
- which protocol to use
"""
import urlparse
import saml2
import base64
import urllib
from saml2.s_utils import deflate_and_base64_encode
from saml2.s_utils import Unsupported
import logging
from saml2.sigver import REQ_ORDER
from saml2.sigver import RESP_ORDER
from saml2.sigver import SIGNER_ALGS
logger = logging.getLogger(__name__)
try:
from xml.etree import cElementTree as ElementTree
if ElementTree.VERSION < '1.3.0':
# cElementTree has no support for register_namespace
# neither _namespace_map, thus we sacrify performance
# for correctness
from xml.etree import ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
from elementtree import ElementTree
NAMESPACE = "http://schemas.xmlsoap.org/soap/envelope/"
FORM_SPEC = """<form method="post" action="%s">
<input type="hidden" name="%s" value="%s" />
<input type="hidden" name="RelayState" value="%s" />
<input type="submit" value="Submit" />
</form>"""
def http_form_post_message(message, location, relay_state="",
typ="SAMLRequest"):
"""The HTTP POST binding defines a mechanism by which SAML protocol
messages may be transmitted within the base64-encoded content of a
HTML form control.
:param message: The message
:param location: Where the form should be posted to
:param relay_state: for preserving and conveying state information
:return: A tuple containing header information and a HTML message.
"""
response = ["<head>", """<title>SAML 2.0 POST</title>""", "</head><body>"]
if not isinstance(message, basestring):
message = "%s" % (message,)
if typ == "SAMLRequest" or typ == "SAMLResponse":
_msg = base64.b64encode(message)
else:
_msg = message
response.append(FORM_SPEC % (location, typ, _msg, relay_state))
response.append("""<script type="text/javascript">""")
response.append(" window.onload = function ()")
response.append(" { document.forms[0].submit(); }")
response.append("""</script>""")
response.append("</body>")
return {"headers": [("Content-type", "text/html")], "data": response}
def http_redirect_message(message, location, relay_state="", typ="SAMLRequest",
sigalg=None, key=None):
"""The HTTP Redirect binding defines a mechanism by which SAML protocol
messages can be transmitted within URL parameters.
Messages are encoded for use with this binding using a URL encoding
technique, and transmitted using the HTTP GET method.
The DEFLATE Encoding is used in this function.
:param message: The message
:param location: Where the message should be posted to
:param relay_state: for preserving and conveying state information
:param typ: What type of message it is SAMLRequest/SAMLResponse/SAMLart
:param sigalg: The signature algorithm to use.
:param key: Key to use for signing
:return: A tuple containing header information and a HTML message.
"""
if not isinstance(message, basestring):
message = "%s" % (message,)
_order = None
if typ in ["SAMLRequest", "SAMLResponse"]:
if typ == "SAMLRequest":
_order = REQ_ORDER
else:
_order = RESP_ORDER
args = {typ: deflate_and_base64_encode(message)}
elif typ == "SAMLart":
args = {typ: message}
else:
raise Exception("Unknown message type: %s" % typ)
if relay_state:
args["RelayState"] = relay_state
if sigalg:
# sigalgs
# http://www.w3.org/2000/09/xmldsig#dsa-sha1
# http://www.w3.org/2000/09/xmldsig#rsa-sha1
args["SigAlg"] = sigalg
try:
signer = SIGNER_ALGS[sigalg]
except:
raise Unsupported("Signing algorithm")
else:
string = "&".join([urllib.urlencode({k: args[k]}) for k in _order if k in args])
args["Signature"] = base64.b64encode(signer.sign(string, key))
string = urllib.urlencode(args)
else:
string = urllib.urlencode(args)
glue_char = "&" if urlparse.urlparse(location).query else "?"
login_url = glue_char.join([location, string])
headers = [('Location', str(login_url))]
body = []
return {"headers": headers, "data": body}
DUMMY_NAMESPACE = "http://example.org/"
PREFIX = '<?xml version="1.0" encoding="UTF-8"?>'
def make_soap_enveloped_saml_thingy(thingy, header_parts=None):
""" Returns a soap envelope containing a SAML request
as a text string.
:param thingy: The SAML thingy
:return: The SOAP envelope as a string
"""
envelope = ElementTree.Element('')
envelope.tag = '{%s}Envelope' % NAMESPACE
if header_parts:
header = ElementTree.Element('')
header.tag = '{%s}Header' % NAMESPACE
envelope.append(header)
for part in header_parts:
# This doesn't work if the headers are signed
part.become_child_element_of(header)
body = ElementTree.Element('')
body.tag = '{%s}Body' % NAMESPACE
envelope.append(body)
if isinstance(thingy, basestring):
# remove the first XML version/encoding line
logger.debug("thingy0: %s" % thingy)
_part = thingy.split("\n")
thingy = "".join(_part[1:])
thingy = thingy.replace(PREFIX, "")
logger.debug("thingy: %s" % thingy)
_child = ElementTree.Element('')
_child.tag = '{%s}FuddleMuddle' % DUMMY_NAMESPACE
body.append(_child)
_str = ElementTree.tostring(envelope, encoding="UTF-8")
logger.debug("SOAP precursor: %s" % _str)
# find an remove the namespace definition
i = _str.find(DUMMY_NAMESPACE)
j = _str.rfind("xmlns:", 0, i)
cut1 = _str[j:i + len(DUMMY_NAMESPACE) + 1]
_str = _str.replace(cut1, "")
first = _str.find("<%s:FuddleMuddle" % (cut1[6:9],))
last = _str.find(">", first + 14)
cut2 = _str[first:last + 1]
return _str.replace(cut2, thingy)
else:
thingy.become_child_element_of(body)
return ElementTree.tostring(envelope, encoding="UTF-8")
def http_soap_message(message):
return {"headers": [("Content-type", "application/soap+xml")],
"data": make_soap_enveloped_saml_thingy(message)}
def http_paos(message, extra=None):
return {"headers": [("Content-type", "application/soap+xml")],
"data": make_soap_enveloped_saml_thingy(message, extra)}
def parse_soap_enveloped_saml(text, body_class, header_class=None):
"""Parses a SOAP enveloped SAML thing and returns header parts and body
:param text: The SOAP object as XML
:return: header parts and body as saml.samlbase instances
"""
envelope = ElementTree.fromstring(text)
assert envelope.tag == '{%s}Envelope' % NAMESPACE
#print len(envelope)
body = None
header = {}
for part in envelope:
#print ">",part.tag
if part.tag == '{%s}Body' % NAMESPACE:
for sub in part:
try:
body = saml2.create_class_from_element_tree(body_class, sub)
except Exception:
raise Exception(
"Wrong body type (%s) in SOAP envelope" % sub.tag)
elif part.tag == '{%s}Header' % NAMESPACE:
if not header_class:
raise Exception("Header where I didn't expect one")
#print "--- HEADER ---"
for sub in part:
#print ">>",sub.tag
for klass in header_class:
#print "?{%s}%s" % (klass.c_namespace,klass.c_tag)
if sub.tag == "{%s}%s" % (klass.c_namespace, klass.c_tag):
header[sub.tag] = \
saml2.create_class_from_element_tree(klass, sub)
break
return body, header
# -----------------------------------------------------------------------------
PACKING = {
saml2.BINDING_HTTP_REDIRECT: http_redirect_message,
saml2.BINDING_HTTP_POST: http_form_post_message,
}
def packager(identifier):
try:
return PACKING[identifier]
except KeyError:
raise Exception("Unkown binding type: %s" % identifier)
def factory(binding, message, location, relay_state="", typ="SAMLRequest"):
return PACKING[binding](message, location, relay_state, typ)
| bsd-2-clause | -1,560,756,759,072,883,200 | 33.533333 | 92 | 0.607086 | false |
mosra/magnum-examples | src/python/magnum-primitives-scenegraph.py | 1 | 4843 | #!/usr/bin/env python3
#
# This file is part of Magnum.
#
# Original authors — credit is appreciated but not required:
#
# 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021
# — Vladimír Vondruš <[email protected]>
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or distribute
# this software, either in source code form or as a compiled binary, for any
# purpose, commercial or non-commercial, and by any means.
#
# In jurisdictions that recognize copyright laws, the author or authors of
# this software dedicate any and all copyright interest in the software to
# the public domain. We make this dedication for the benefit of the public
# at large and to the detriment of our heirs and successors. We intend this
# dedication to be an overt act of relinquishment in perpetuity of all
# present and future rights to this software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from magnum import *
from magnum import gl, meshtools, primitives, scenegraph, shaders
from magnum.platform.sdl2 import Application
from magnum.scenegraph.matrix import Scene3D, Object3D
class CubeDrawable(scenegraph.Drawable3D):
def __init__(self, object: Object3D, drawables: scenegraph.DrawableGroup3D,
mesh: gl.Mesh, shader: shaders.PhongGL, color: Color4):
scenegraph.Drawable3D.__init__(self, object, drawables)
self._mesh = mesh
self._shader = shader
self.color = color # Settable from outside
def draw(self, transformation_matrix: Matrix4, camera: scenegraph.Camera3D):
self._shader.light_positions = [
Vector4(camera.camera_matrix.transform_point((-3.0, 5.0, 10.0)), 0.0)
]
self._shader.light_colors = [Color3(1.0)]
self._shader.diffuse_color = self.color
self._shader.ambient_color = Color3.from_hsv(self.color.hue(), 1.0, 0.3)
self._shader.transformation_matrix = transformation_matrix
self._shader.normal_matrix = transformation_matrix.rotation_scaling()
self._shader.projection_matrix = camera.projection_matrix
self._shader.draw(self._mesh)
class PrimitivesSceneGraphExample(Application):
def __init__(self):
configuration = self.Configuration()
configuration.title = "Magnum Python Primitives + SceneGraph Example"
Application.__init__(self, configuration)
gl.Renderer.enable(gl.Renderer.Feature.DEPTH_TEST)
gl.Renderer.enable(gl.Renderer.Feature.FACE_CULLING)
# Scene and drawables
self._scene = Scene3D()
self._drawables = scenegraph.DrawableGroup3D()
# Camera setup
camera_object = Object3D(parent=self._scene)
camera_object.translate(Vector3.z_axis(10.0))
self._camera = scenegraph.Camera3D(camera_object)
self._camera.projection_matrix = Matrix4.perspective_projection(
fov=Deg(35.0), aspect_ratio=1.33333, near=0.01, far=100.0)
# Cube object and drawable
self._cube = Object3D(parent=self._scene)
self._cube.rotate_y(Deg(40.0))
self._cube.rotate_x(Deg(30.0))
self._cube_drawable = CubeDrawable(self._cube, self._drawables,
meshtools.compile(primitives.cube_solid()), shaders.PhongGL(),
Color3.from_hsv(Deg(35.0), 1.0, 1.0))
self._previous_mouse_position = Vector2i()
def draw_event(self):
gl.default_framebuffer.clear(gl.FramebufferClear.COLOR|
gl.FramebufferClear.DEPTH)
self._camera.draw(self._drawables)
self.swap_buffers()
def mouse_release_event(self, event: Application.MouseEvent):
self._cube_drawable.color = Color3.from_hsv(
self._cube_drawable.color.hue() + Deg(50.0), 1.0, 1.0)
self.redraw()
def mouse_move_event(self, event: Application.MouseMoveEvent):
if event.buttons & self.MouseMoveEvent.Buttons.LEFT:
delta = 1.0*(
Vector2(event.position - self._previous_mouse_position)/
Vector2(self.window_size))
self._cube.rotate_y_local(Rad(delta.x))
self._cube.rotate_x(Rad(delta.y))
self.redraw()
self._previous_mouse_position = event.position
exit(PrimitivesSceneGraphExample().exec())
| unlicense | 7,908,722,276,422,597,000 | 42.576577 | 81 | 0.674178 | false |
jeremiahyan/odoo | addons/account/models/chart_template.py | 1 | 74310 | # -*- coding: utf-8 -*-
from odoo.exceptions import AccessError
from odoo import api, fields, models, _
from odoo import SUPERUSER_ID
from odoo.exceptions import UserError, ValidationError
from odoo.http import request
from odoo.addons.account.models.account_tax import TYPE_TAX_USE
import logging
_logger = logging.getLogger(__name__)
def migrate_set_tags_and_taxes_updatable(cr, registry, module):
''' This is a utility function used to manually set the flag noupdate to False on tags and account tax templates on localization modules
that need migration (for example in case of VAT report improvements)
'''
env = api.Environment(cr, SUPERUSER_ID, {})
xml_record_ids = env['ir.model.data'].search([
('model', 'in', ['account.tax.template', 'account.account.tag']),
('module', 'like', module)
]).ids
if xml_record_ids:
cr.execute("update ir_model_data set noupdate = 'f' where id in %s", (tuple(xml_record_ids),))
def preserve_existing_tags_on_taxes(cr, registry, module):
''' This is a utility function used to preserve existing previous tags during upgrade of the module.'''
env = api.Environment(cr, SUPERUSER_ID, {})
xml_records = env['ir.model.data'].search([('model', '=', 'account.account.tag'), ('module', 'like', module)])
if xml_records:
cr.execute("update ir_model_data set noupdate = 't' where id in %s", [tuple(xml_records.ids)])
# ---------------------------------------------------------------
# Account Templates: Account, Tax, Tax Code and chart. + Wizard
# ---------------------------------------------------------------
class AccountGroupTemplate(models.Model):
_name = "account.group.template"
_description = 'Template for Account Groups'
_order = 'code_prefix_start'
parent_id = fields.Many2one('account.group.template', index=True, ondelete='cascade')
name = fields.Char(required=True)
code_prefix_start = fields.Char()
code_prefix_end = fields.Char()
chart_template_id = fields.Many2one('account.chart.template', string='Chart Template', required=True)
class AccountAccountTemplate(models.Model):
_name = "account.account.template"
_inherit = ['mail.thread']
_description = 'Templates for Accounts'
_order = "code"
name = fields.Char(required=True, index=True)
currency_id = fields.Many2one('res.currency', string='Account Currency', help="Forces all moves for this account to have this secondary currency.")
code = fields.Char(size=64, required=True, index=True)
user_type_id = fields.Many2one('account.account.type', string='Type', required=True,
help="These types are defined according to your country. The type contains more information "\
"about the account and its specificities.")
reconcile = fields.Boolean(string='Allow Invoices & payments Matching', default=False,
help="Check this option if you want the user to reconcile entries in this account.")
note = fields.Text()
tax_ids = fields.Many2many('account.tax.template', 'account_account_template_tax_rel', 'account_id', 'tax_id', string='Default Taxes')
nocreate = fields.Boolean(string='Optional Create', default=False,
help="If checked, the new chart of accounts will not contain this by default.")
chart_template_id = fields.Many2one('account.chart.template', string='Chart Template',
help="This optional field allow you to link an account template to a specific chart template that may differ from the one its root parent belongs to. This allow you "
"to define chart templates that extend another and complete it with few new accounts (You don't need to define the whole structure that is common to both several times).")
tag_ids = fields.Many2many('account.account.tag', 'account_account_template_account_tag', string='Account tag', help="Optional tags you may want to assign for custom reporting")
@api.depends('name', 'code')
def name_get(self):
res = []
for record in self:
name = record.name
if record.code:
name = record.code + ' ' + name
res.append((record.id, name))
return res
class AccountChartTemplate(models.Model):
_name = "account.chart.template"
_description = "Account Chart Template"
name = fields.Char(required=True)
parent_id = fields.Many2one('account.chart.template', string='Parent Chart Template')
code_digits = fields.Integer(string='# of Digits', required=True, default=6, help="No. of Digits to use for account code")
visible = fields.Boolean(string='Can be Visible?', default=True,
help="Set this to False if you don't want this template to be used actively in the wizard that generate Chart of Accounts from "
"templates, this is useful when you want to generate accounts of this template only when loading its child template.")
currency_id = fields.Many2one('res.currency', string='Currency', required=True)
use_anglo_saxon = fields.Boolean(string="Use Anglo-Saxon accounting", default=False)
complete_tax_set = fields.Boolean(string='Complete Set of Taxes', default=True,
help="This boolean helps you to choose if you want to propose to the user to encode the sale and purchase rates or choose from list "
"of taxes. This last choice assumes that the set of tax defined on this template is complete")
account_ids = fields.One2many('account.account.template', 'chart_template_id', string='Associated Account Templates')
tax_template_ids = fields.One2many('account.tax.template', 'chart_template_id', string='Tax Template List',
help='List of all the taxes that have to be installed by the wizard')
bank_account_code_prefix = fields.Char(string='Prefix of the bank accounts', required=True)
cash_account_code_prefix = fields.Char(string='Prefix of the main cash accounts', required=True)
transfer_account_code_prefix = fields.Char(string='Prefix of the main transfer accounts', required=True)
income_currency_exchange_account_id = fields.Many2one('account.account.template',
string="Gain Exchange Rate Account", domain=[('internal_type', '=', 'other'), ('deprecated', '=', False)])
expense_currency_exchange_account_id = fields.Many2one('account.account.template',
string="Loss Exchange Rate Account", domain=[('internal_type', '=', 'other'), ('deprecated', '=', False)])
country_id = fields.Many2one(string="Country", comodel_name='res.country', help="The country this chart of accounts belongs to. None if it's generic.")
account_journal_suspense_account_id = fields.Many2one('account.account.template', string='Journal Suspense Account')
account_journal_payment_debit_account_id = fields.Many2one('account.account.template', string='Journal Outstanding Receipts Account')
account_journal_payment_credit_account_id = fields.Many2one('account.account.template', string='Journal Outstanding Payments Account')
default_cash_difference_income_account_id = fields.Many2one('account.account.template', string="Cash Difference Income Account")
default_cash_difference_expense_account_id = fields.Many2one('account.account.template', string="Cash Difference Expense Account")
default_pos_receivable_account_id = fields.Many2one('account.account.template', string="PoS receivable account")
property_account_receivable_id = fields.Many2one('account.account.template', string='Receivable Account')
property_account_payable_id = fields.Many2one('account.account.template', string='Payable Account')
property_account_expense_categ_id = fields.Many2one('account.account.template', string='Category of Expense Account')
property_account_income_categ_id = fields.Many2one('account.account.template', string='Category of Income Account')
property_account_expense_id = fields.Many2one('account.account.template', string='Expense Account on Product Template')
property_account_income_id = fields.Many2one('account.account.template', string='Income Account on Product Template')
property_stock_account_input_categ_id = fields.Many2one('account.account.template', string="Input Account for Stock Valuation")
property_stock_account_output_categ_id = fields.Many2one('account.account.template', string="Output Account for Stock Valuation")
property_stock_valuation_account_id = fields.Many2one('account.account.template', string="Account Template for Stock Valuation")
property_tax_payable_account_id = fields.Many2one('account.account.template', string="Tax current account (payable)")
property_tax_receivable_account_id = fields.Many2one('account.account.template', string="Tax current account (receivable)")
property_advance_tax_payment_account_id = fields.Many2one('account.account.template', string="Advance tax payment account")
property_cash_basis_base_account_id = fields.Many2one(
comodel_name='account.account.template',
domain=[('deprecated', '=', False)],
string="Base Tax Received Account",
help="Account that will be set on lines created in cash basis journal entry and used to keep track of the "
"tax base amount.")
@api.model
def _prepare_transfer_account_template(self, prefix=None):
''' Prepare values to create the transfer account that is an intermediary account used when moving money
from a liquidity account to another.
:return: A dictionary of values to create a new account.account.
'''
digits = self.code_digits
prefix = prefix or self.transfer_account_code_prefix or ''
# Flatten the hierarchy of chart templates.
chart_template = self
chart_templates = self
while chart_template.parent_id:
chart_templates += chart_template.parent_id
chart_template = chart_template.parent_id
new_code = ''
for num in range(1, 100):
new_code = str(prefix.ljust(digits - 1, '0')) + str(num)
rec = self.env['account.account.template'].search(
[('code', '=', new_code), ('chart_template_id', 'in', chart_templates.ids)], limit=1)
if not rec:
break
else:
raise UserError(_('Cannot generate an unused account code.'))
current_assets_type = self.env.ref('account.data_account_type_current_assets', raise_if_not_found=False)
return {
'name': _('Liquidity Transfer'),
'code': new_code,
'user_type_id': current_assets_type and current_assets_type.id or False,
'reconcile': True,
'chart_template_id': self.id,
}
@api.model
def _create_liquidity_journal_suspense_account(self, company, code_digits):
return self.env['account.account'].create({
'name': _("Bank Suspense Account"),
'code': self.env['account.account']._search_new_account_code(company, code_digits, company.bank_account_code_prefix or ''),
'user_type_id': self.env.ref('account.data_account_type_current_liabilities').id,
'company_id': company.id,
})
def try_loading(self, company=False, install_demo=True):
""" Installs this chart of accounts for the current company if not chart
of accounts had been created for it yet.
:param company (Model<res.company>): the company we try to load the chart template on.
If not provided, it is retrieved from the context.
:param install_demo (bool): whether or not we should load demo data right after loading the
chart template.
"""
# do not use `request.env` here, it can cause deadlocks
if not company:
if request and hasattr(request, 'allowed_company_ids'):
company = self.env['res.company'].browse(request.allowed_company_ids[0])
else:
company = self.env.company
# If we don't have any chart of account on this company, install this chart of account
if not company.chart_template_id and not self.existing_accounting(company):
for template in self:
template.with_context(default_company_id=company.id)._load(15.0, 15.0, company)
# Install the demo data when the first localization is instanciated on the company
if install_demo and self.env.ref('base.module_account').demo:
self.with_context(
default_company_id=company.id,
allowed_company_ids=[company.id],
)._create_demo_data()
def _create_demo_data(self):
try:
with self.env.cr.savepoint():
demo_data = self._get_demo_data()
for model, data in demo_data:
created = self.env[model]._load_records([{
'xml_id': "account.%s" % xml_id if '.' not in xml_id else xml_id,
'values': record,
'noupdate': True,
} for xml_id, record in data.items()])
self._post_create_demo_data(created)
except Exception:
# Do not rollback installation of CoA if demo data failed
_logger.exception('Error while loading accounting demo data')
def _load(self, sale_tax_rate, purchase_tax_rate, company):
""" Installs this chart of accounts on the current company, replacing
the existing one if it had already one defined. If some accounting entries
had already been made, this function fails instead, triggering a UserError.
Also, note that this function can only be run by someone with administration
rights.
"""
self.ensure_one()
# do not use `request.env` here, it can cause deadlocks
# Ensure everything is translated to the company's language, not the user's one.
self = self.with_context(lang=company.partner_id.lang).with_company(company)
if not self.env.is_admin():
raise AccessError(_("Only administrators can load a chart of accounts"))
existing_accounts = self.env['account.account'].search([('company_id', '=', company.id)])
if existing_accounts:
# we tolerate switching from accounting package (localization module) as long as there isn't yet any accounting
# entries created for the company.
if self.existing_accounting(company):
raise UserError(_('Could not install new chart of account as there are already accounting entries existing.'))
# delete accounting properties
prop_values = ['account.account,%s' % (account_id,) for account_id in existing_accounts.ids]
existing_journals = self.env['account.journal'].search([('company_id', '=', company.id)])
if existing_journals:
prop_values.extend(['account.journal,%s' % (journal_id,) for journal_id in existing_journals.ids])
self.env['ir.property'].sudo().search(
[('value_reference', 'in', prop_values)]
).unlink()
# delete account, journal, tax, fiscal position and reconciliation model
models_to_delete = ['account.reconcile.model', 'account.fiscal.position', 'account.move.line', 'account.move', 'account.journal', 'account.tax', 'account.group']
for model in models_to_delete:
res = self.env[model].sudo().search([('company_id', '=', company.id)])
if len(res):
res.with_context(force_delete=True).unlink()
existing_accounts.unlink()
company.write({'currency_id': self.currency_id.id,
'anglo_saxon_accounting': self.use_anglo_saxon,
'bank_account_code_prefix': self.bank_account_code_prefix,
'cash_account_code_prefix': self.cash_account_code_prefix,
'transfer_account_code_prefix': self.transfer_account_code_prefix,
'chart_template_id': self.id
})
#set the coa currency to active
self.currency_id.write({'active': True})
# When we install the CoA of first company, set the currency to price types and pricelists
if company.id == 1:
for reference in ['product.list_price', 'product.standard_price', 'product.list0']:
try:
tmp2 = self.env.ref(reference).write({'currency_id': self.currency_id.id})
except ValueError:
pass
# If the floats for sale/purchase rates have been filled, create templates from them
self._create_tax_templates_from_rates(company.id, sale_tax_rate, purchase_tax_rate)
# Install all the templates objects and generate the real objects
acc_template_ref, taxes_ref = self._install_template(company, code_digits=self.code_digits)
# Set default cash difference account on company
if not company.account_journal_suspense_account_id:
company.account_journal_suspense_account_id = self._create_liquidity_journal_suspense_account(company, self.code_digits)
account_type_current_assets = self.env.ref('account.data_account_type_current_assets')
if not company.account_journal_payment_debit_account_id:
company.account_journal_payment_debit_account_id = self.env['account.account'].create({
'name': _("Outstanding Receipts"),
'code': self.env['account.account']._search_new_account_code(company, self.code_digits, company.bank_account_code_prefix or ''),
'reconcile': True,
'user_type_id': account_type_current_assets.id,
'company_id': company.id,
})
if not company.account_journal_payment_credit_account_id:
company.account_journal_payment_credit_account_id = self.env['account.account'].create({
'name': _("Outstanding Payments"),
'code': self.env['account.account']._search_new_account_code(company, self.code_digits, company.bank_account_code_prefix or ''),
'reconcile': True,
'user_type_id': account_type_current_assets.id,
'company_id': company.id,
})
if not company.default_cash_difference_expense_account_id:
company.default_cash_difference_expense_account_id = self.env['account.account'].create({
'name': _('Cash Difference Loss'),
'code': self.env['account.account']._search_new_account_code(company, self.code_digits, '999'),
'user_type_id': self.env.ref('account.data_account_type_expenses').id,
'tag_ids': [(6, 0, self.env.ref('account.account_tag_investing').ids)],
'company_id': company.id,
})
if not company.default_cash_difference_income_account_id:
company.default_cash_difference_income_account_id = self.env['account.account'].create({
'name': _('Cash Difference Gain'),
'code': self.env['account.account']._search_new_account_code(company, self.code_digits, '999'),
'user_type_id': self.env.ref('account.data_account_type_revenue').id,
'tag_ids': [(6, 0, self.env.ref('account.account_tag_investing').ids)],
'company_id': company.id,
})
# Set the transfer account on the company
company.transfer_account_id = self.env['account.account'].search([
('code', '=like', self.transfer_account_code_prefix + '%'), ('company_id', '=', company.id)], limit=1)
# Create Bank journals
self._create_bank_journals(company, acc_template_ref)
# Create the current year earning account if it wasn't present in the CoA
company.get_unaffected_earnings_account()
# set the default taxes on the company
company.account_sale_tax_id = self.env['account.tax'].search([('type_tax_use', 'in', ('sale', 'all')), ('company_id', '=', company.id)], limit=1).id
company.account_purchase_tax_id = self.env['account.tax'].search([('type_tax_use', 'in', ('purchase', 'all')), ('company_id', '=', company.id)], limit=1).id
if self.country_id:
# If this CoA is made for only one country, set it as the fiscal country of the company.
company.account_fiscal_country_id = self.country_id
return {}
@api.model
def existing_accounting(self, company_id):
""" Returns True iff some accounting entries have already been made for
the provided company (meaning hence that its chart of accounts cannot
be changed anymore).
"""
model_to_check = ['account.payment', 'account.bank.statement']
for model in model_to_check:
if self.env[model].sudo().search([('company_id', '=', company_id.id)], limit=1):
return True
if self.env['account.move'].sudo().search([('company_id', '=', company_id.id), ('state', '!=', 'draft')], limit=1):
return True
return False
def _create_tax_templates_from_rates(self, company_id, sale_tax_rate, purchase_tax_rate):
'''
This function checks if this chart template is configured as containing a full set of taxes, and if
it's not the case, it creates the templates for account.tax object accordingly to the provided sale/purchase rates.
Then it saves the new tax templates as default taxes to use for this chart template.
:param company_id: id of the company for which the wizard is running
:param sale_tax_rate: the rate to use for created sales tax
:param purchase_tax_rate: the rate to use for created purchase tax
:return: True
'''
self.ensure_one()
obj_tax_temp = self.env['account.tax.template']
all_parents = self._get_chart_parent_ids()
# create tax templates from purchase_tax_rate and sale_tax_rate fields
if not self.complete_tax_set:
ref_taxs = obj_tax_temp.search([('type_tax_use', '=', 'sale'), ('chart_template_id', 'in', all_parents)], order="sequence, id desc", limit=1)
ref_taxs.write({'amount': sale_tax_rate, 'name': _('Tax %.2f%%') % sale_tax_rate, 'description': '%.2f%%' % sale_tax_rate})
ref_taxs = obj_tax_temp.search([('type_tax_use', '=', 'purchase'), ('chart_template_id', 'in', all_parents)], order="sequence, id desc", limit=1)
ref_taxs.write({'amount': purchase_tax_rate, 'name': _('Tax %.2f%%') % purchase_tax_rate, 'description': '%.2f%%' % purchase_tax_rate})
return True
def _get_chart_parent_ids(self):
""" Returns the IDs of all ancestor charts, including the chart itself.
(inverse of child_of operator)
:return: the IDS of all ancestor charts, including the chart itself.
"""
chart_template = self
result = [chart_template.id]
while chart_template.parent_id:
chart_template = chart_template.parent_id
result.append(chart_template.id)
return result
def _create_bank_journals(self, company, acc_template_ref):
'''
This function creates bank journals and their account for each line
data returned by the function _get_default_bank_journals_data.
:param company: the company for which the wizard is running.
:param acc_template_ref: the dictionary containing the mapping between the ids of account templates and the ids
of the accounts that have been generated from them.
'''
self.ensure_one()
bank_journals = self.env['account.journal']
# Create the journals that will trigger the account.account creation
for acc in self._get_default_bank_journals_data():
bank_journals += self.env['account.journal'].create({
'name': acc['acc_name'],
'type': acc['account_type'],
'company_id': company.id,
'currency_id': acc.get('currency_id', self.env['res.currency']).id,
'sequence': 10,
})
return bank_journals
@api.model
def _get_default_bank_journals_data(self):
""" Returns the data needed to create the default bank journals when
installing this chart of accounts, in the form of a list of dictionaries.
The allowed keys in these dictionaries are:
- acc_name: string (mandatory)
- account_type: 'cash' or 'bank' (mandatory)
- currency_id (optional, only to be specified if != company.currency_id)
"""
return [{'acc_name': _('Cash'), 'account_type': 'cash'}, {'acc_name': _('Bank'), 'account_type': 'bank'}]
def open_select_template_wizard(self):
# Add action to open wizard to select between several templates
if not self.company_id.chart_template_id:
todo = self.env['ir.actions.todo']
action_rec = self.env['ir.model.data'].xmlid_to_object('account.action_wizard_multi_chart')
if action_rec:
todo.create({'action_id': action_rec.id, 'name': _('Choose Accounting Template')})
return True
@api.model
def _prepare_transfer_account_for_direct_creation(self, name, company):
""" Prepare values to create a transfer account directly, based on the
method _prepare_transfer_account_template().
This is needed when dealing with installation of payment modules
that requires the creation of their own transfer account.
:param name: The transfer account name.
:param company: The company owning this account.
:return: A dictionary of values to create a new account.account.
"""
vals = self._prepare_transfer_account_template()
digits = self.code_digits or 6
prefix = self.transfer_account_code_prefix or ''
vals.update({
'code': self.env['account.account']._search_new_account_code(company, digits, prefix),
'name': name,
'company_id': company.id,
})
del(vals['chart_template_id'])
return vals
@api.model
def generate_journals(self, acc_template_ref, company, journals_dict=None):
"""
This method is used for creating journals.
:param acc_template_ref: Account templates reference.
:param company_id: company to generate journals for.
:returns: True
"""
JournalObj = self.env['account.journal']
for vals_journal in self._prepare_all_journals(acc_template_ref, company, journals_dict=journals_dict):
journal = JournalObj.create(vals_journal)
if vals_journal['type'] == 'general' and vals_journal['code'] == _('EXCH'):
company.write({'currency_exchange_journal_id': journal.id})
if vals_journal['type'] == 'general' and vals_journal['code'] == _('CABA'):
company.write({'tax_cash_basis_journal_id': journal.id})
return True
def _prepare_all_journals(self, acc_template_ref, company, journals_dict=None):
def _get_default_account(journal_vals, type='debit'):
# Get the default accounts
default_account = False
if journal['type'] == 'sale':
default_account = acc_template_ref.get(self.property_account_income_categ_id.id)
elif journal['type'] == 'purchase':
default_account = acc_template_ref.get(self.property_account_expense_categ_id.id)
return default_account
journals = [{'name': _('Customer Invoices'), 'type': 'sale', 'code': _('INV'), 'favorite': True, 'color': 11, 'sequence': 5},
{'name': _('Vendor Bills'), 'type': 'purchase', 'code': _('BILL'), 'favorite': True, 'color': 11, 'sequence': 6},
{'name': _('Miscellaneous Operations'), 'type': 'general', 'code': _('MISC'), 'favorite': True, 'sequence': 7},
{'name': _('Exchange Difference'), 'type': 'general', 'code': _('EXCH'), 'favorite': False, 'sequence': 9},
{'name': _('Cash Basis Taxes'), 'type': 'general', 'code': _('CABA'), 'favorite': False, 'sequence': 10}]
if journals_dict != None:
journals.extend(journals_dict)
self.ensure_one()
journal_data = []
for journal in journals:
vals = {
'type': journal['type'],
'name': journal['name'],
'code': journal['code'],
'company_id': company.id,
'default_account_id': _get_default_account(journal),
'show_on_dashboard': journal['favorite'],
'color': journal.get('color', False),
'sequence': journal['sequence']
}
journal_data.append(vals)
return journal_data
def generate_properties(self, acc_template_ref, company):
"""
This method used for creating properties.
:param acc_template_ref: Mapping between ids of account templates and real accounts created from them
:param company_id: company to generate properties for.
:returns: True
"""
self.ensure_one()
PropertyObj = self.env['ir.property']
todo_list = [
('property_account_receivable_id', 'res.partner'),
('property_account_payable_id', 'res.partner'),
('property_account_expense_categ_id', 'product.category'),
('property_account_income_categ_id', 'product.category'),
('property_account_expense_id', 'product.template'),
('property_account_income_id', 'product.template'),
('property_tax_payable_account_id', 'account.tax.group'),
('property_tax_receivable_account_id', 'account.tax.group'),
('property_advance_tax_payment_account_id', 'account.tax.group'),
]
for field, model in todo_list:
account = self[field]
value = acc_template_ref[account.id] if account else False
if value:
PropertyObj._set_default(field, model, value, company=company)
stock_properties = [
'property_stock_account_input_categ_id',
'property_stock_account_output_categ_id',
'property_stock_valuation_account_id',
]
for stock_property in stock_properties:
account = getattr(self, stock_property)
value = account and acc_template_ref[account.id] or False
if value:
company.write({stock_property: value})
return True
def _install_template(self, company, code_digits=None, obj_wizard=None, acc_ref=None, taxes_ref=None):
""" Recursively load the template objects and create the real objects from them.
:param company: company the wizard is running for
:param code_digits: number of digits the accounts code should have in the COA
:param obj_wizard: the current wizard for generating the COA from the templates
:param acc_ref: Mapping between ids of account templates and real accounts created from them
:param taxes_ref: Mapping between ids of tax templates and real taxes created from them
:returns: tuple with a dictionary containing
* the mapping between the account template ids and the ids of the real accounts that have been generated
from them, as first item,
* a similar dictionary for mapping the tax templates and taxes, as second item,
:rtype: tuple(dict, dict, dict)
"""
self.ensure_one()
if acc_ref is None:
acc_ref = {}
if taxes_ref is None:
taxes_ref = {}
if self.parent_id:
tmp1, tmp2 = self.parent_id._install_template(company, code_digits=code_digits, acc_ref=acc_ref, taxes_ref=taxes_ref)
acc_ref.update(tmp1)
taxes_ref.update(tmp2)
# Ensure, even if individually, that everything is translated according to the company's language.
tmp1, tmp2 = self.with_context(lang=company.partner_id.lang)._load_template(company, code_digits=code_digits, account_ref=acc_ref, taxes_ref=taxes_ref)
acc_ref.update(tmp1)
taxes_ref.update(tmp2)
return acc_ref, taxes_ref
def _load_template(self, company, code_digits=None, account_ref=None, taxes_ref=None):
""" Generate all the objects from the templates
:param company: company the wizard is running for
:param code_digits: number of digits the accounts code should have in the COA
:param acc_ref: Mapping between ids of account templates and real accounts created from them
:param taxes_ref: Mapping between ids of tax templates and real taxes created from them
:returns: tuple with a dictionary containing
* the mapping between the account template ids and the ids of the real accounts that have been generated
from them, as first item,
* a similar dictionary for mapping the tax templates and taxes, as second item,
:rtype: tuple(dict, dict, dict)
"""
self.ensure_one()
if account_ref is None:
account_ref = {}
if taxes_ref is None:
taxes_ref = {}
if not code_digits:
code_digits = self.code_digits
AccountTaxObj = self.env['account.tax']
# Generate taxes from templates.
generated_tax_res = self.with_context(active_test=False).tax_template_ids._generate_tax(company)
taxes_ref.update(generated_tax_res['tax_template_to_tax'])
# Generating Accounts from templates.
account_template_ref = self.generate_account(taxes_ref, account_ref, code_digits, company)
account_ref.update(account_template_ref)
# Generate account groups, from template
self.generate_account_groups(company)
# writing account values after creation of accounts
for key, value in generated_tax_res['account_dict']['account.tax'].items():
if value['cash_basis_transition_account_id']:
AccountTaxObj.browse(key).write({
'cash_basis_transition_account_id': account_ref.get(value['cash_basis_transition_account_id'], False),
})
AccountTaxRepartitionLineObj = self.env['account.tax.repartition.line']
for key, value in generated_tax_res['account_dict']['account.tax.repartition.line'].items():
if value['account_id']:
AccountTaxRepartitionLineObj.browse(key).write({
'account_id': account_ref.get(value['account_id']),
})
# Set the company accounts
self._load_company_accounts(account_ref, company)
# Create Journals - Only done for root chart template
if not self.parent_id:
self.generate_journals(account_ref, company)
# generate properties function
self.generate_properties(account_ref, company)
# Generate Fiscal Position , Fiscal Position Accounts and Fiscal Position Taxes from templates
self.generate_fiscal_position(taxes_ref, account_ref, company)
# Generate account operation template templates
self.generate_account_reconcile_model(taxes_ref, account_ref, company)
return account_ref, taxes_ref
def _load_company_accounts(self, account_ref, company):
# Set the default accounts on the company
accounts = {
'default_cash_difference_income_account_id': self.default_cash_difference_income_account_id.id,
'default_cash_difference_expense_account_id': self.default_cash_difference_expense_account_id.id,
'account_journal_suspense_account_id': self.account_journal_suspense_account_id.id,
'account_journal_payment_debit_account_id': self.account_journal_payment_debit_account_id.id,
'account_journal_payment_credit_account_id': self.account_journal_payment_credit_account_id.id,
'account_cash_basis_base_account_id': self.property_cash_basis_base_account_id.id,
'account_default_pos_receivable_account_id': self.default_pos_receivable_account_id.id,
'income_currency_exchange_account_id': self.income_currency_exchange_account_id.id,
'expense_currency_exchange_account_id': self.expense_currency_exchange_account_id.id,
}
values = {}
# The loop is to avoid writing when we have no values, thus avoiding erasing the account from the parent
for key, account in accounts.items():
if account_ref.get(account):
values[key] = account_ref.get(account)
company.write(values)
def create_record_with_xmlid(self, company, template, model, vals):
return self._create_records_with_xmlid(model, [(template, vals)], company).id
def _create_records_with_xmlid(self, model, template_vals, company):
""" Create records for the given model name with the given vals, and
create xml ids based on each record's template and company id.
"""
if not template_vals:
return self.env[model]
template_model = template_vals[0][0]
template_ids = [template.id for template, vals in template_vals]
template_xmlids = template_model.browse(template_ids).get_external_id()
data_list = []
for template, vals in template_vals:
module, name = template_xmlids[template.id].split('.', 1)
xml_id = "%s.%s_%s" % (module, company.id, name)
data_list.append(dict(xml_id=xml_id, values=vals, noupdate=True))
return self.env[model]._load_records(data_list)
@api.model
def _load_records(self, data_list, update=False):
# When creating a chart template create, for the liquidity transfer account
# - an account.account.template: this allow to define account.reconcile.model.template objects refering that liquidity transfer
# account although it's not existing in any xml file
# - an entry in ir_model_data: this allow to still use the method create_record_with_xmlid() and don't make any difference between
# regular accounts created and that liquidity transfer account
records = super(AccountChartTemplate, self)._load_records(data_list, update)
account_data_list = []
for data, record in zip(data_list, records):
# Create the transfer account only for leaf chart template in the hierarchy.
if record.parent_id:
continue
if data.get('xml_id'):
account_xml_id = data['xml_id'] + '_liquidity_transfer'
if not self.env.ref(account_xml_id, raise_if_not_found=False):
account_vals = record._prepare_transfer_account_template()
account_data_list.append(dict(
xml_id=account_xml_id,
values=account_vals,
noupdate=data.get('noupdate'),
))
self.env['account.account.template']._load_records(account_data_list, update)
return records
def _get_account_vals(self, company, account_template, code_acc, tax_template_ref):
""" This method generates a dictionary of all the values for the account that will be created.
"""
self.ensure_one()
tax_ids = []
for tax in account_template.tax_ids:
tax_ids.append(tax_template_ref[tax.id])
val = {
'name': account_template.name,
'currency_id': account_template.currency_id and account_template.currency_id.id or False,
'code': code_acc,
'user_type_id': account_template.user_type_id and account_template.user_type_id.id or False,
'reconcile': account_template.reconcile,
'note': account_template.note,
'tax_ids': [(6, 0, tax_ids)],
'company_id': company.id,
'tag_ids': [(6, 0, [t.id for t in account_template.tag_ids])],
}
return val
def generate_account(self, tax_template_ref, acc_template_ref, code_digits, company):
""" This method generates accounts from account templates.
:param tax_template_ref: Taxes templates reference for write taxes_id in account_account.
:param acc_template_ref: dictionary containing the mapping between the account templates and generated accounts (will be populated)
:param code_digits: number of digits to use for account code.
:param company_id: company to generate accounts for.
:returns: return acc_template_ref for reference purpose.
:rtype: dict
"""
self.ensure_one()
account_tmpl_obj = self.env['account.account.template']
acc_template = account_tmpl_obj.search([('nocreate', '!=', True), ('chart_template_id', '=', self.id)], order='id')
template_vals = []
for account_template in acc_template:
code_main = account_template.code and len(account_template.code) or 0
code_acc = account_template.code or ''
if code_main > 0 and code_main <= code_digits:
code_acc = str(code_acc) + (str('0'*(code_digits-code_main)))
vals = self._get_account_vals(company, account_template, code_acc, tax_template_ref)
template_vals.append((account_template, vals))
accounts = self._create_records_with_xmlid('account.account', template_vals, company)
for template, account in zip(acc_template, accounts):
acc_template_ref[template.id] = account.id
return acc_template_ref
def generate_account_groups(self, company):
""" This method generates account groups from account groups templates.
:param company: company to generate the account groups for
"""
self.ensure_one()
group_templates = self.env['account.group.template'].search([('chart_template_id', '=', self.id)])
template_vals = []
for group_template in group_templates:
vals = {
'name': group_template.name,
'code_prefix_start': group_template.code_prefix_start,
'code_prefix_end': group_template.code_prefix_end,
'company_id': company.id,
}
template_vals.append((group_template, vals))
groups = self._create_records_with_xmlid('account.group', template_vals, company)
def _prepare_reconcile_model_vals(self, company, account_reconcile_model, acc_template_ref, tax_template_ref):
""" This method generates a dictionary of all the values for the account.reconcile.model that will be created.
"""
self.ensure_one()
account_reconcile_model_lines = self.env['account.reconcile.model.line.template'].search([
('model_id', '=', account_reconcile_model.id)
])
return {
'name': account_reconcile_model.name,
'sequence': account_reconcile_model.sequence,
'company_id': company.id,
'rule_type': account_reconcile_model.rule_type,
'auto_reconcile': account_reconcile_model.auto_reconcile,
'to_check': account_reconcile_model.to_check,
'match_journal_ids': [(6, None, account_reconcile_model.match_journal_ids.ids)],
'match_nature': account_reconcile_model.match_nature,
'match_amount': account_reconcile_model.match_amount,
'match_amount_min': account_reconcile_model.match_amount_min,
'match_amount_max': account_reconcile_model.match_amount_max,
'match_label': account_reconcile_model.match_label,
'match_label_param': account_reconcile_model.match_label_param,
'match_note': account_reconcile_model.match_note,
'match_note_param': account_reconcile_model.match_note_param,
'match_transaction_type': account_reconcile_model.match_transaction_type,
'match_transaction_type_param': account_reconcile_model.match_transaction_type_param,
'match_same_currency': account_reconcile_model.match_same_currency,
'match_total_amount': account_reconcile_model.match_total_amount,
'match_total_amount_param': account_reconcile_model.match_total_amount_param,
'match_partner': account_reconcile_model.match_partner,
'match_partner_ids': [(6, None, account_reconcile_model.match_partner_ids.ids)],
'match_partner_category_ids': [(6, None, account_reconcile_model.match_partner_category_ids.ids)],
'line_ids': [(0, 0, {
'account_id': acc_template_ref[line.account_id.id],
'label': line.label,
'amount_type': line.amount_type,
'force_tax_included': line.force_tax_included,
'amount_string': line.amount_string,
'tax_ids': [[4, tax_template_ref[tax.id], 0] for tax in line.tax_ids],
}) for line in account_reconcile_model_lines],
}
def generate_account_reconcile_model(self, tax_template_ref, acc_template_ref, company):
""" This method creates account reconcile models
:param tax_template_ref: Taxes templates reference for write taxes_id in account_account.
:param acc_template_ref: dictionary with the mapping between the account templates and the real accounts.
:param company_id: company to create models for
:returns: return new_account_reconcile_model for reference purpose.
:rtype: dict
"""
self.ensure_one()
account_reconcile_models = self.env['account.reconcile.model.template'].search([
('chart_template_id', '=', self.id)
])
for account_reconcile_model in account_reconcile_models:
vals = self._prepare_reconcile_model_vals(company, account_reconcile_model, acc_template_ref, tax_template_ref)
self.create_record_with_xmlid(company, account_reconcile_model, 'account.reconcile.model', vals)
# Create a default rule for the reconciliation widget matching invoices automatically.
self.env['account.reconcile.model'].sudo().create({
"name": _('Invoices Matching Rule'),
"sequence": '1',
"rule_type": 'invoice_matching',
"auto_reconcile": False,
"match_nature": 'both',
"match_same_currency": True,
"match_total_amount": True,
"match_total_amount_param": 100,
"match_partner": True,
"company_id": company.id,
})
return True
def _get_fp_vals(self, company, position):
return {
'company_id': company.id,
'sequence': position.sequence,
'name': position.name,
'note': position.note,
'auto_apply': position.auto_apply,
'vat_required': position.vat_required,
'country_id': position.country_id.id,
'country_group_id': position.country_group_id.id,
'state_ids': position.state_ids and [(6,0, position.state_ids.ids)] or [],
'zip_from': position.zip_from,
'zip_to': position.zip_to,
}
def generate_fiscal_position(self, tax_template_ref, acc_template_ref, company):
""" This method generates Fiscal Position, Fiscal Position Accounts
and Fiscal Position Taxes from templates.
:param taxes_ids: Taxes templates reference for generating account.fiscal.position.tax.
:param acc_template_ref: Account templates reference for generating account.fiscal.position.account.
:param company_id: the company to generate fiscal position data for
:returns: True
"""
self.ensure_one()
positions = self.env['account.fiscal.position.template'].search([('chart_template_id', '=', self.id)])
# first create fiscal positions in batch
template_vals = []
for position in positions:
fp_vals = self._get_fp_vals(company, position)
template_vals.append((position, fp_vals))
fps = self._create_records_with_xmlid('account.fiscal.position', template_vals, company)
# then create fiscal position taxes and accounts
tax_template_vals = []
account_template_vals = []
for position, fp in zip(positions, fps):
for tax in position.tax_ids:
tax_template_vals.append((tax, {
'tax_src_id': tax_template_ref[tax.tax_src_id.id],
'tax_dest_id': tax.tax_dest_id and tax_template_ref[tax.tax_dest_id.id] or False,
'position_id': fp.id,
}))
for acc in position.account_ids:
account_template_vals.append((acc, {
'account_src_id': acc_template_ref[acc.account_src_id.id],
'account_dest_id': acc_template_ref[acc.account_dest_id.id],
'position_id': fp.id,
}))
self._create_records_with_xmlid('account.fiscal.position.tax', tax_template_vals, company)
self._create_records_with_xmlid('account.fiscal.position.account', account_template_vals, company)
return True
class AccountTaxTemplate(models.Model):
_name = 'account.tax.template'
_description = 'Templates for Taxes'
_order = 'id'
chart_template_id = fields.Many2one('account.chart.template', string='Chart Template', required=True)
name = fields.Char(string='Tax Name', required=True)
type_tax_use = fields.Selection(TYPE_TAX_USE, string='Tax Type', required=True, default="sale",
help="Determines where the tax is selectable. Note : 'None' means a tax can't be used by itself, however it can still be used in a group.")
tax_scope = fields.Selection([('service', 'Service'), ('consu', 'Consumable')], help="Restrict the use of taxes to a type of product.")
amount_type = fields.Selection(default='percent', string="Tax Computation", required=True,
selection=[('group', 'Group of Taxes'), ('fixed', 'Fixed'), ('percent', 'Percentage of Price'), ('division', 'Percentage of Price Tax Included')])
active = fields.Boolean(default=True, help="Set active to false to hide the tax without removing it.")
children_tax_ids = fields.Many2many('account.tax.template', 'account_tax_template_filiation_rel', 'parent_tax', 'child_tax', string='Children Taxes')
sequence = fields.Integer(required=True, default=1,
help="The sequence field is used to define order in which the tax lines are applied.")
amount = fields.Float(required=True, digits=(16, 4), default=0)
description = fields.Char(string='Display on Invoices')
price_include = fields.Boolean(string='Included in Price', default=False,
help="Check this if the price you use on the product and invoices includes this tax.")
include_base_amount = fields.Boolean(string='Affect Subsequent Taxes', default=False,
help="If set, taxes with a higher sequence than this one will be affected by it, provided they accept it.")
is_base_affected = fields.Boolean(
string="Base Affected by Previous Taxes",
default=True,
help="If set, taxes with a lower sequence might affect this one, provided they try to do it.")
analytic = fields.Boolean(string="Analytic Cost", help="If set, the amount computed by this tax will be assigned to the same analytic account as the invoice line (if any)")
invoice_repartition_line_ids = fields.One2many(string="Repartition for Invoices", comodel_name="account.tax.repartition.line.template", inverse_name="invoice_tax_id", copy=True, help="Repartition when the tax is used on an invoice")
refund_repartition_line_ids = fields.One2many(string="Repartition for Refund Invoices", comodel_name="account.tax.repartition.line.template", inverse_name="refund_tax_id", copy=True, help="Repartition when the tax is used on a refund")
tax_group_id = fields.Many2one('account.tax.group', string="Tax Group")
tax_exigibility = fields.Selection(
[('on_invoice', 'Based on Invoice'),
('on_payment', 'Based on Payment'),
], string='Tax Due', default='on_invoice',
help="Based on Invoice: the tax is due as soon as the invoice is validated.\n"
"Based on Payment: the tax is due as soon as the payment of the invoice is received.")
cash_basis_transition_account_id = fields.Many2one(
comodel_name='account.account.template',
string="Cash Basis Transition Account",
domain=[('deprecated', '=', False)],
help="Account used to transition the tax amount for cash basis taxes. It will contain the tax amount as long as the original invoice has not been reconciled ; at reconciliation, this amount cancelled on this account and put on the regular tax account.")
_sql_constraints = [
('name_company_uniq', 'unique(name, type_tax_use, tax_scope, chart_template_id)', 'Tax names must be unique !'),
]
@api.depends('name', 'description')
def name_get(self):
res = []
for record in self:
name = record.description and record.description or record.name
res.append((record.id, name))
return res
def _get_tax_vals(self, company, tax_template_to_tax):
""" This method generates a dictionary of all the values for the tax that will be created.
"""
# Compute children tax ids
children_ids = []
for child_tax in self.children_tax_ids:
if tax_template_to_tax.get(child_tax.id):
children_ids.append(tax_template_to_tax[child_tax.id])
self.ensure_one()
val = {
'name': self.name,
'type_tax_use': self.type_tax_use,
'tax_scope': self.tax_scope,
'amount_type': self.amount_type,
'active': self.active,
'company_id': company.id,
'sequence': self.sequence,
'amount': self.amount,
'description': self.description,
'price_include': self.price_include,
'include_base_amount': self.include_base_amount,
'is_base_affected': self.is_base_affected,
'analytic': self.analytic,
'children_tax_ids': [(6, 0, children_ids)],
'tax_exigibility': self.tax_exigibility,
}
# We add repartition lines if there are some, so that if there are none,
# default_get is called and creates the default ones properly.
if self.invoice_repartition_line_ids:
val['invoice_repartition_line_ids'] = self.invoice_repartition_line_ids.get_repartition_line_create_vals(company)
if self.refund_repartition_line_ids:
val['refund_repartition_line_ids'] = self.refund_repartition_line_ids.get_repartition_line_create_vals(company)
if self.tax_group_id:
val['tax_group_id'] = self.tax_group_id.id
return val
def _generate_tax(self, company):
""" This method generate taxes from templates.
:param company: the company for which the taxes should be created from templates in self
:returns: {
'tax_template_to_tax': mapping between tax template and the newly generated taxes corresponding,
'account_dict': dictionary containing a to-do list with all the accounts to assign on new taxes
}
"""
# default_company_id is needed in context to allow creation of default
# repartition lines on taxes
ChartTemplate = self.env['account.chart.template'].with_context(default_company_id=company.id)
todo_dict = {'account.tax': {}, 'account.tax.repartition.line': {}}
tax_template_to_tax = {}
templates_todo = list(self)
while templates_todo:
templates = templates_todo
templates_todo = []
# create taxes in batch
tax_template_vals = []
for template in templates:
if all(child.id in tax_template_to_tax for child in template.children_tax_ids):
vals = template._get_tax_vals(company, tax_template_to_tax)
if self.chart_template_id.country_id:
vals['country_id'] = self.chart_template_id.country_id.id
elif company.account_fiscal_country_id:
vals['country_id'] = company.account_fiscal_country_id.id
else:
# Will happen for generic CoAs such as syscohada (they are available for multiple countries, and don't have any country_id)
raise UserError(_("Please first define a fiscal country for company %s.", company.name))
tax_template_vals.append((template, vals))
else:
# defer the creation of this tax to the next batch
templates_todo.append(template)
taxes = ChartTemplate._create_records_with_xmlid('account.tax', tax_template_vals, company)
# fill in tax_template_to_tax and todo_dict
for tax, (template, vals) in zip(taxes, tax_template_vals):
tax_template_to_tax[template.id] = tax.id
# Since the accounts have not been created yet, we have to wait before filling these fields
todo_dict['account.tax'][tax.id] = {
'cash_basis_transition_account_id': template.cash_basis_transition_account_id.id,
}
# We also have to delay the assignation of accounts to repartition lines
# The below code assigns the account_id to the repartition lines according
# to the corresponding repartition line in the template, based on the order.
# As we just created the repartition lines, tax.invoice_repartition_line_ids is not well sorted.
# But we can force the sort by calling sort()
all_tax_rep_lines = tax.invoice_repartition_line_ids.sorted() + tax.refund_repartition_line_ids.sorted()
all_template_rep_lines = template.invoice_repartition_line_ids + template.refund_repartition_line_ids
for i in range(0, len(all_template_rep_lines)):
# We assume template and tax repartition lines are in the same order
template_account = all_template_rep_lines[i].account_id
if template_account:
todo_dict['account.tax.repartition.line'][all_tax_rep_lines[i].id] = {
'account_id': template_account.id,
}
if any(template.tax_exigibility == 'on_payment' for template in self):
# When a CoA is being installed automatically and if it is creating account tax(es) whose field `Use Cash Basis`(tax_exigibility) is set to True by default
# (example of such CoA's are l10n_fr and l10n_mx) then in the `Accounting Settings` the option `Cash Basis` should be checked by default.
company.tax_exigibility = True
return {
'tax_template_to_tax': tax_template_to_tax,
'account_dict': todo_dict
}
# Tax Repartition Line Template
class AccountTaxRepartitionLineTemplate(models.Model):
_name = "account.tax.repartition.line.template"
_description = "Tax Repartition Line Template"
factor_percent = fields.Float(string="%", required=True, help="Factor to apply on the account move lines generated from this distribution line, in percents")
repartition_type = fields.Selection(string="Based On", selection=[('base', 'Base'), ('tax', 'of tax')], required=True, default='tax', help="Base on which the factor will be applied.")
account_id = fields.Many2one(string="Account", comodel_name='account.account.template', help="Account on which to post the tax amount")
invoice_tax_id = fields.Many2one(comodel_name='account.tax.template', help="The tax set to apply this distribution on invoices. Mutually exclusive with refund_tax_id")
refund_tax_id = fields.Many2one(comodel_name='account.tax.template', help="The tax set to apply this distribution on refund invoices. Mutually exclusive with invoice_tax_id")
tag_ids = fields.Many2many(string="Financial Tags", relation='account_tax_repartition_financial_tags', comodel_name='account.account.tag', copy=True, help="Additional tags that will be assigned by this repartition line for use in financial reports")
use_in_tax_closing = fields.Boolean(string="Tax Closing Entry")
# These last two fields are helpers used to ease the declaration of account.account.tag objects in XML.
# They are directly linked to account.tax.report.line objects, which create corresponding + and - tags
# at creation. This way, we avoid declaring + and - separately every time.
plus_report_line_ids = fields.Many2many(string="Plus Tax Report Lines", relation='account_tax_repartition_plus_report_line', comodel_name='account.tax.report.line', copy=True, help="Tax report lines whose '+' tag will be assigned to move lines by this repartition line")
minus_report_line_ids = fields.Many2many(string="Minus Report Lines", relation='account_tax_repartition_minus_report_line', comodel_name='account.tax.report.line', copy=True, help="Tax report lines whose '-' tag will be assigned to move lines by this repartition line")
@api.model
def create(self, vals):
if vals.get('plus_report_line_ids'):
vals['plus_report_line_ids'] = self._convert_tag_syntax_to_orm(vals['plus_report_line_ids'])
if vals.get('minus_report_line_ids'):
vals['minus_report_line_ids'] = self._convert_tag_syntax_to_orm(vals['minus_report_line_ids'])
if vals.get('tag_ids'):
vals['tag_ids'] = self._convert_tag_syntax_to_orm(vals['tag_ids'])
if vals.get('use_in_tax_closing') is None:
if not vals.get('account_id'):
vals['use_in_tax_closing'] = False
else:
internal_group = self.env['account.account.template'].browse(vals.get('account_id')).user_type_id.internal_group
vals['use_in_tax_closing'] = not (internal_group == 'income' or internal_group == 'expense')
return super(AccountTaxRepartitionLineTemplate, self).create(vals)
@api.model
def _convert_tag_syntax_to_orm(self, tags_list):
""" Repartition lines give the possibility to directly give
a list of ids to create for tags instead of a list of ORM commands.
This function checks that tags_list uses this syntactic sugar and returns
an ORM-compliant version of it if it does.
"""
if tags_list and all(isinstance(elem, int) for elem in tags_list):
return [(6, False, tags_list)]
return tags_list
@api.constrains('invoice_tax_id', 'refund_tax_id')
def validate_tax_template_link(self):
for record in self:
if record.invoice_tax_id and record.refund_tax_id:
raise ValidationError(_("Tax distribution line templates should apply to either invoices or refunds, not both at the same time. invoice_tax_id and refund_tax_id should not be set together."))
@api.constrains('plus_report_line_ids', 'minus_report_line_ids')
def validate_tags(self):
all_tax_rep_lines = self.mapped('plus_report_line_ids') + self.mapped('minus_report_line_ids')
lines_without_tag = all_tax_rep_lines.filtered(lambda x: not x.tag_name)
if lines_without_tag:
raise ValidationError(_("The following tax report lines are used in some tax distribution template though they don't generate any tag: %s . This probably means you forgot to set a tag_name on these lines.", str(lines_without_tag.mapped('name'))))
def get_repartition_line_create_vals(self, company):
rslt = [(5, 0, 0)]
for record in self:
tags_to_add = self.env['account.account.tag']
tags_to_add += record.plus_report_line_ids.mapped('tag_ids').filtered(lambda x: not x.tax_negate)
tags_to_add += record.minus_report_line_ids.mapped('tag_ids').filtered(lambda x: x.tax_negate)
tags_to_add += record.tag_ids
rslt.append((0, 0, {
'factor_percent': record.factor_percent,
'repartition_type': record.repartition_type,
'tag_ids': [(6, 0, tags_to_add.ids)],
'company_id': company.id,
'use_in_tax_closing': record.use_in_tax_closing
}))
return rslt
# Fiscal Position Templates
class AccountFiscalPositionTemplate(models.Model):
_name = 'account.fiscal.position.template'
_description = 'Template for Fiscal Position'
sequence = fields.Integer()
name = fields.Char(string='Fiscal Position Template', required=True)
chart_template_id = fields.Many2one('account.chart.template', string='Chart Template', required=True)
account_ids = fields.One2many('account.fiscal.position.account.template', 'position_id', string='Account Mapping')
tax_ids = fields.One2many('account.fiscal.position.tax.template', 'position_id', string='Tax Mapping')
note = fields.Text(string='Notes')
auto_apply = fields.Boolean(string='Detect Automatically', help="Apply automatically this fiscal position.")
vat_required = fields.Boolean(string='VAT required', help="Apply only if partner has a VAT number.")
country_id = fields.Many2one('res.country', string='Country',
help="Apply only if delivery country matches.")
country_group_id = fields.Many2one('res.country.group', string='Country Group',
help="Apply only if delivery country matches the group.")
state_ids = fields.Many2many('res.country.state', string='Federal States')
zip_from = fields.Char(string='Zip Range From')
zip_to = fields.Char(string='Zip Range To')
class AccountFiscalPositionTaxTemplate(models.Model):
_name = 'account.fiscal.position.tax.template'
_description = 'Tax Mapping Template of Fiscal Position'
_rec_name = 'position_id'
position_id = fields.Many2one('account.fiscal.position.template', string='Fiscal Position', required=True, ondelete='cascade')
tax_src_id = fields.Many2one('account.tax.template', string='Tax Source', required=True)
tax_dest_id = fields.Many2one('account.tax.template', string='Replacement Tax')
class AccountFiscalPositionAccountTemplate(models.Model):
_name = 'account.fiscal.position.account.template'
_description = 'Accounts Mapping Template of Fiscal Position'
_rec_name = 'position_id'
position_id = fields.Many2one('account.fiscal.position.template', string='Fiscal Mapping', required=True, ondelete='cascade')
account_src_id = fields.Many2one('account.account.template', string='Account Source', required=True)
account_dest_id = fields.Many2one('account.account.template', string='Account Destination', required=True)
class AccountReconcileModelTemplate(models.Model):
_name = "account.reconcile.model.template"
_description = 'Reconcile Model Template'
# Base fields.
chart_template_id = fields.Many2one('account.chart.template', string='Chart Template', required=True)
name = fields.Char(string='Button Label', required=True)
sequence = fields.Integer(required=True, default=10)
rule_type = fields.Selection(selection=[
('writeoff_button', 'Manually create a write-off on clicked button'),
('writeoff_suggestion', 'Suggest a write-off'),
('invoice_matching', 'Match existing invoices/bills')
], string='Type', default='writeoff_button', required=True)
auto_reconcile = fields.Boolean(string='Auto-validate',
help='Validate the statement line automatically (reconciliation based on your rule).')
to_check = fields.Boolean(string='To Check', default=False, help='This matching rule is used when the user is not certain of all the information of the counterpart.')
matching_order = fields.Selection(
selection=[
('old_first', 'Oldest first'),
('new_first', 'Newest first'),
]
)
# ===== Conditions =====
match_text_location_label = fields.Boolean(
default=True,
help="Search in the Statement's Label to find the Invoice/Payment's reference",
)
match_text_location_note = fields.Boolean(
default=False,
help="Search in the Statement's Note to find the Invoice/Payment's reference",
)
match_text_location_reference = fields.Boolean(
default=False,
help="Search in the Statement's Reference to find the Invoice/Payment's reference",
)
match_journal_ids = fields.Many2many('account.journal', string='Journals',
domain="[('type', 'in', ('bank', 'cash'))]",
help='The reconciliation model will only be available from the selected journals.')
match_nature = fields.Selection(selection=[
('amount_received', 'Amount Received'),
('amount_paid', 'Amount Paid'),
('both', 'Amount Paid/Received')
], string='Amount Nature', required=True, default='both',
help='''The reconciliation model will only be applied to the selected transaction type:
* Amount Received: Only applied when receiving an amount.
* Amount Paid: Only applied when paying an amount.
* Amount Paid/Received: Applied in both cases.''')
match_amount = fields.Selection(selection=[
('lower', 'Is Lower Than'),
('greater', 'Is Greater Than'),
('between', 'Is Between'),
], string='Amount',
help='The reconciliation model will only be applied when the amount being lower than, greater than or between specified amount(s).')
match_amount_min = fields.Float(string='Amount Min Parameter')
match_amount_max = fields.Float(string='Amount Max Parameter')
match_label = fields.Selection(selection=[
('contains', 'Contains'),
('not_contains', 'Not Contains'),
('match_regex', 'Match Regex'),
], string='Label', help='''The reconciliation model will only be applied when the label:
* Contains: The proposition label must contains this string (case insensitive).
* Not Contains: Negation of "Contains".
* Match Regex: Define your own regular expression.''')
match_label_param = fields.Char(string='Label Parameter')
match_note = fields.Selection(selection=[
('contains', 'Contains'),
('not_contains', 'Not Contains'),
('match_regex', 'Match Regex'),
], string='Note', help='''The reconciliation model will only be applied when the note:
* Contains: The proposition note must contains this string (case insensitive).
* Not Contains: Negation of "Contains".
* Match Regex: Define your own regular expression.''')
match_note_param = fields.Char(string='Note Parameter')
match_transaction_type = fields.Selection(selection=[
('contains', 'Contains'),
('not_contains', 'Not Contains'),
('match_regex', 'Match Regex'),
], string='Transaction Type', help='''The reconciliation model will only be applied when the transaction type:
* Contains: The proposition transaction type must contains this string (case insensitive).
* Not Contains: Negation of "Contains".
* Match Regex: Define your own regular expression.''')
match_transaction_type_param = fields.Char(string='Transaction Type Parameter')
match_same_currency = fields.Boolean(string='Same Currency Matching', default=True,
help='Restrict to propositions having the same currency as the statement line.')
match_total_amount = fields.Boolean(string='Amount Matching', default=True,
help='The sum of total residual amount propositions matches the statement line amount.')
match_total_amount_param = fields.Float(string='Amount Matching %', default=100,
help='The sum of total residual amount propositions matches the statement line amount under this percentage.')
match_partner = fields.Boolean(string='Partner Is Set',
help='The reconciliation model will only be applied when a customer/vendor is set.')
match_partner_ids = fields.Many2many('res.partner', string='Restrict Partners to',
help='The reconciliation model will only be applied to the selected customers/vendors.')
match_partner_category_ids = fields.Many2many('res.partner.category', string='Restrict Partner Categories to',
help='The reconciliation model will only be applied to the selected customer/vendor categories.')
line_ids = fields.One2many('account.reconcile.model.line.template', 'model_id')
decimal_separator = fields.Char(help="Every character that is nor a digit nor this separator will be removed from the matching string")
class AccountReconcileModelLineTemplate(models.Model):
_name = "account.reconcile.model.line.template"
_description = 'Reconcile Model Line Template'
model_id = fields.Many2one('account.reconcile.model.template')
sequence = fields.Integer(required=True, default=10)
account_id = fields.Many2one('account.account.template', string='Account', ondelete='cascade', domain=[('deprecated', '=', False)])
label = fields.Char(string='Journal Item Label')
amount_type = fields.Selection([
('fixed', 'Fixed'),
('percentage', 'Percentage of balance'),
('regex', 'From label'),
], required=True, default='percentage')
amount_string = fields.Char(string="Amount")
force_tax_included = fields.Boolean(string='Tax Included in Price', help='Force the tax to be managed as a price included tax.')
tax_ids = fields.Many2many('account.tax.template', string='Taxes', ondelete='restrict')
| gpl-3.0 | -6,534,800,096,511,794,000 | 55.595583 | 274 | 0.642282 | false |
vpol/gitinspector | gitinspector/version.py | 1 | 1300 | # coding: utf-8
#
# Copyright © 2012-2014 Ejwa Software. All rights reserved.
#
# This file is part of gitinspector.
#
# gitinspector is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gitinspector is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
from __future__ import unicode_literals
from gitinspector import localization
localization.init()
__version__ = "0.3.2"
__doc__ = _("""Copyright © 2012-2014 Ejwa Software. All rights reserved.
License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>.
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law.
Written by Adam Waldenberg.""")
def output():
print("gitinspector {0}\n".format(__version__) + __doc__)
| gpl-3.0 | 7,889,733,652,444,446,000 | 33.157895 | 78 | 0.74114 | false |
Brocade-OpenSource/OpenStack-DNRM-Neutron | neutron/db/migration/alembic_migrations/versions/128e042a2b68_ext_gw_mode.py | 1 | 2121 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""ext_gw_mode
Revision ID: 128e042a2b68
Revises: 32b517556ec9
Create Date: 2013-03-27 00:35:17.323280
"""
# revision identifiers, used by Alembic.
revision = '128e042a2b68'
down_revision = '32b517556ec9'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.hyperv.hyperv_neutron_plugin.HyperVNeutronPlugin',
'neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2',
'neutron.plugins.metaplugin.meta_neutron_plugin.MetaPluginV2',
'neutron.plugins.nec.nec_plugin.NECPluginV2',
'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2',
'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2',
'neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2',
'neutron.plugins.niblick.interceptor_plugin.Interceptor',
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
op.add_column('routers', sa.Column('enable_snat', sa.Boolean(),
nullable=False, default=True))
# Set enable_snat to True for existing routers
op.execute("UPDATE routers SET enable_snat=True")
def downgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
op.drop_column('routers', 'enable_snat')
| apache-2.0 | -8,302,952,998,911,602,000 | 32.140625 | 78 | 0.728901 | false |