repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
Notxor/Neuronal | neuronal/neuromotor.py | 1 | 3081 | # -*- coding: utf-8 -*-
# Neuronal - Framework for Neural Networks and Artificial Intelligence
#
# Copyright (C) 2012 dddddd <[email protected]>
# Copyright (C) 2012 Notxor <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from glioblasto import Glioblasto
class _Motor(Glioblasto):
"""Elemento de salida neuromotora."""
def __init__(self, neuromotor):
Glioblasto.__init__(self)
self.vias_aferentes = set()
self.neuromotor = neuromotor
class NeuroMotor(object):
"""Conjunto de sensores. Proporciona datos de entrada a una red."""
def __init__(self, cantidad_de_motores, red = None):
"""
Las neuronas motoras están en una lista inmutable (tuple), por lo tanto
no cambian a lo largo de la vida del neuromotor.
"""
self.motoras = tuple(
_Motor(self) for i in xrange(cantidad_de_motores)
)
self._red = None
if red is not None:
self._conectar_a_red_aferente(red)
def _conectar_a_red_aferente(self, red):
"""
Crea tantas neuronas de salida en la 'red' como motoras haya en
este neuromotor, y las conecta (mediante sinapsis salida->motora).
Es conveniente que dichas neuronas sean las que finalizan la lista de
neuronas del núcleo. El objetivo es que sean disparadas al final del
'ciclo' para reducir el número de pasadas que habrá que darle a la
red. Por lo tanto, lo ideal es llamar a esta función como último
paso de la creación de la red.
"""
n_conexiones = len(self.motoras)
# Crear neuronas en la red, que serviran de emisoras.
nuevas_salidas = red.crear_neuronas_de_salida(n_conexiones)
# Conectar las nuevas salidas (mediante sinapsis) a
# ... las motoras de este neuromotor.
for i in xrange(n_conexiones):
nuevas_salidas[i].crear_sinapsis_saliente(self.motoras[i])
# Guardamos una referencia a la red.
self._red = red
def _conectar_motorizacion(self, funciones):
"""
Este miembro recibe una lista de funciones y le asigna cada una de ellas
a una neurona motora de la red, de modo que si usa salida es activada
por la red, se ejecutará el código contenido en la función asociada.
"""
if (len(funciones) != len(self.motoras)):
raise "No coincide el número de neuronas con las acciones."
| agpl-3.0 | -3,130,294,366,610,631,700 | 40.486486 | 80 | 0.669381 | false |
dlenski/stravacli | stravacli/QueryGrabber.py | 1 | 1046 | from http.server import HTTPServer, BaseHTTPRequestHandler
import socket
from urllib.parse import parse_qs
class handler(BaseHTTPRequestHandler):
def do_GET(self):
self.server.received = parse_qs(self.path.split('?',1)[1])
self.send_response(200)
self.end_headers()
self.wfile.write(self.server.response.encode())
class QueryGrabber(HTTPServer):
def __init__(self, response='', address=None):
self.received = None
self.response = response
if address!=None:
HTTPServer.__init__(self, self.address, handler)
else:
for port in range(1024,65536):
try:
HTTPServer.__init__(self, ('localhost', port), handler)
except socket.error as e:
if e.errno!=98: # Address already in use
raise
else:
break
else:
raise e
def root_uri(self):
return 'http://{}:{:d}'.format(*self.server_address)
| gpl-3.0 | -4,080,119,067,574,555,600 | 33.866667 | 75 | 0.554493 | false |
hanlind/nova | nova/tests/functional/db/test_aggregate.py | 1 | 28761 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from copy import deepcopy
import mock
from oslo_db import exception as db_exc
from oslo_utils import timeutils
from nova import context
from nova import db
from nova.db.sqlalchemy import api as db_api
from nova.db.sqlalchemy import api_models
from nova import exception
import nova.objects.aggregate as aggregate_obj
from nova import test
from nova.tests.unit import matchers
from nova.tests.unit.objects.test_objects import compare_obj as base_compare
from nova.tests import uuidsentinel
SUBS = {'metadata': 'metadetails'}
NOW = timeutils.utcnow().replace(microsecond=0)
def _get_fake_aggregate(db_id, in_api=True, result=True):
agg_map = {
'created_at': NOW,
'updated_at': None,
'deleted_at': None,
'id': db_id,
'uuid': getattr(uuidsentinel, str(db_id)),
'name': 'name_' + str(db_id),
}
if not in_api:
agg_map['deleted'] = False
if result:
agg_map['hosts'] = _get_fake_hosts(db_id)
agg_map['metadetails'] = _get_fake_metadata(db_id)
return agg_map
def _get_fake_hosts(db_id):
return ['constant_host', 'unique_host_' + str(db_id)]
def _get_fake_metadata(db_id):
return {'constant_key': 'constant_value',
'unique_key': 'unique_value_' + str(db_id)}
@db_api.api_context_manager.writer
def _create_aggregate(context, values=_get_fake_aggregate(1, result=False),
metadata=_get_fake_metadata(1)):
aggregate = api_models.Aggregate()
aggregate.update(values)
aggregate.save(context.session)
if metadata:
for key, value in metadata.items():
aggregate_metadata = api_models.AggregateMetadata()
aggregate_metadata.update({'key': key,
'value': value,
'aggregate_id': aggregate['id']})
aggregate_metadata.save(context.session)
return aggregate
@db_api.api_context_manager.writer
def _create_aggregate_with_hosts(context,
values=_get_fake_aggregate(1, result=False),
metadata=_get_fake_metadata(1),
hosts=_get_fake_hosts(1)):
aggregate = _create_aggregate(context, values, metadata)
for host in hosts:
host_model = api_models.AggregateHost()
host_model.update({'host': host,
'aggregate_id': aggregate.id})
host_model.save(context.session)
return aggregate
@db_api.api_context_manager.reader
def _aggregate_host_get_all(context, aggregate_id):
return context.session.query(api_models.AggregateHost).\
filter_by(aggregate_id=aggregate_id).all()
@db_api.api_context_manager.reader
def _aggregate_metadata_get_all(context, aggregate_id):
results = context.session.query(api_models.AggregateMetadata).\
filter_by(aggregate_id=aggregate_id).all()
metadata = {}
for r in results:
metadata[r['key']] = r['value']
return metadata
class AggregateObjectDbTestCase(test.TestCase):
def setUp(self):
super(AggregateObjectDbTestCase, self).setUp()
self.context = context.RequestContext('fake-user', 'fake-project')
def test_in_api(self):
ca1 = _create_aggregate(self.context, values={'name': 'fake_agg_1',
'id': 1,
'uuid': 'nonce'})
ca2 = db.aggregate_create(self.context, {'name': 'fake_agg_2',
'id': 2,
'uuid': 'nonce'})
api_db_agg = aggregate_obj.Aggregate.get_by_id(self.context, ca1['id'])
cell_db_agg = aggregate_obj.Aggregate.get_by_id(
self.context, ca2['id'])
self.assertTrue(api_db_agg.in_api)
self.assertFalse(cell_db_agg.in_api)
def test_aggregate_get_from_db(self):
result = _create_aggregate_with_hosts(self.context)
expected = aggregate_obj._aggregate_get_from_db(self.context,
result['id'])
self.assertEqual(_get_fake_hosts(1), expected.hosts)
self.assertEqual(_get_fake_metadata(1), expected['metadetails'])
def test_aggregate_get_from_db_by_uuid(self):
result = _create_aggregate_with_hosts(self.context)
expected = aggregate_obj._aggregate_get_from_db_by_uuid(
self.context, result['uuid'])
self.assertEqual(result.uuid, expected.uuid)
self.assertEqual(_get_fake_hosts(1), expected.hosts)
self.assertEqual(_get_fake_metadata(1), expected['metadetails'])
def test_aggregate_get_from_db_raise_not_found(self):
aggregate_id = 5
self.assertRaises(exception.AggregateNotFound,
aggregate_obj._aggregate_get_from_db,
self.context, aggregate_id)
def test_aggregate_get_all_from_db(self):
for c in range(3):
_create_aggregate(self.context,
values={'name': 'fake_aggregate_%d' % c})
results = aggregate_obj._get_all_from_db(self.context)
self.assertEqual(len(results), 3)
def test_aggregate_get_by_host_from_db(self):
_create_aggregate_with_hosts(self.context,
values={'name': 'fake_aggregate_1'},
hosts=['host.1.openstack.org'])
_create_aggregate_with_hosts(self.context,
values={'name': 'fake_aggregate_2'},
hosts=['host.1.openstack.org'])
_create_aggregate(self.context,
values={'name': 'no_host_aggregate'})
rh1 = aggregate_obj._get_all_from_db(self.context)
rh2 = aggregate_obj._get_by_host_from_db(self.context,
'host.1.openstack.org')
self.assertEqual(3, len(rh1))
self.assertEqual(2, len(rh2))
def test_aggregate_get_by_host_with_key_from_db(self):
ah1 = _create_aggregate_with_hosts(self.context,
values={'name': 'fake_aggregate_1'},
metadata={'goodkey': 'good'},
hosts=['host.1.openstack.org'])
_create_aggregate_with_hosts(self.context,
values={'name': 'fake_aggregate_2'},
hosts=['host.1.openstack.org'])
rh1 = aggregate_obj._get_by_host_from_db(self.context,
'host.1.openstack.org',
key='goodkey')
self.assertEqual(1, len(rh1))
self.assertEqual(ah1['id'], rh1[0]['id'])
def test_aggregate_get_by_metadata_key_from_db(self):
_create_aggregate(self.context,
values={'name': 'aggregate_1'},
metadata={'goodkey': 'good'})
_create_aggregate(self.context,
values={'name': 'aggregate_2'},
metadata={'goodkey': 'bad'})
_create_aggregate(self.context,
values={'name': 'aggregate_3'},
metadata={'badkey': 'good'})
rl1 = aggregate_obj._get_by_metadata_key_from_db(self.context,
key='goodkey')
self.assertEqual(2, len(rl1))
def test_aggregate_create_in_db(self):
fake_create_aggregate = {
'name': 'fake-aggregate',
}
agg = aggregate_obj._aggregate_create_in_db(self.context,
fake_create_aggregate)
result = aggregate_obj._aggregate_get_from_db(self.context,
agg.id)
self.assertEqual(result.name, fake_create_aggregate['name'])
def test_aggregate_create_in_db_with_metadata(self):
fake_create_aggregate = {
'name': 'fake-aggregate',
}
agg = aggregate_obj._aggregate_create_in_db(self.context,
fake_create_aggregate,
metadata={'goodkey': 'good'})
result = aggregate_obj._aggregate_get_from_db(self.context,
agg.id)
md = aggregate_obj._get_by_metadata_key_from_db(self.context,
key='goodkey')
self.assertEqual(len(md), 1)
self.assertEqual(md[0]['id'], agg.id)
self.assertEqual(result.name, fake_create_aggregate['name'])
def test_aggregate_create_raise_exist_exc(self):
fake_create_aggregate = {
'name': 'fake-aggregate',
}
aggregate_obj._aggregate_create_in_db(self.context,
fake_create_aggregate)
self.assertRaises(exception.AggregateNameExists,
aggregate_obj._aggregate_create_in_db,
self.context,
fake_create_aggregate,
metadata=None)
def test_aggregate_delete(self):
result = _create_aggregate(self.context, metadata=None)
aggregate_obj._aggregate_delete_from_db(self.context, result['id'])
self.assertRaises(exception.AggregateNotFound,
aggregate_obj._aggregate_get_from_db,
self.context, result['id'])
def test_aggregate_delete_raise_not_found(self):
# this does not exist!
aggregate_id = 45
self.assertRaises(exception.AggregateNotFound,
aggregate_obj._aggregate_delete_from_db,
self.context, aggregate_id)
def test_aggregate_delete_with_metadata(self):
result = _create_aggregate(self.context,
metadata={'availability_zone': 'fake_avail_zone'})
aggregate_obj._aggregate_delete_from_db(self.context, result['id'])
self.assertRaises(exception.AggregateNotFound,
aggregate_obj._aggregate_get_from_db,
self.context, result['id'])
def test_aggregate_update(self):
created = _create_aggregate(self.context,
metadata={'availability_zone': 'fake_avail_zone'})
result = aggregate_obj._aggregate_get_from_db(self.context,
created['id'])
self.assertEqual('fake_avail_zone', result['availability_zone'])
new_values = deepcopy(_get_fake_aggregate(1, result=False))
new_values['availability_zone'] = 'different_avail_zone'
updated = aggregate_obj._aggregate_update_to_db(self.context,
result['id'], new_values)
self.assertEqual('different_avail_zone', updated['availability_zone'])
def test_aggregate_update_with_metadata(self):
result = _create_aggregate(self.context, metadata=None)
values = deepcopy(_get_fake_aggregate(1, result=False))
values['metadata'] = deepcopy(_get_fake_metadata(1))
values['availability_zone'] = 'different_avail_zone'
expected_metadata = deepcopy(values['metadata'])
expected_metadata['availability_zone'] = values['availability_zone']
aggregate_obj._aggregate_update_to_db(self.context, result['id'],
values)
metadata = _aggregate_metadata_get_all(self.context, result['id'])
updated = aggregate_obj._aggregate_get_from_db(self.context,
result['id'])
self.assertThat(metadata,
matchers.DictMatches(expected_metadata))
self.assertEqual('different_avail_zone', updated['availability_zone'])
def test_aggregate_update_with_existing_metadata(self):
result = _create_aggregate(self.context)
values = deepcopy(_get_fake_aggregate(1, result=False))
values['metadata'] = deepcopy(_get_fake_metadata(1))
values['metadata']['fake_key1'] = 'foo'
expected_metadata = deepcopy(values['metadata'])
aggregate_obj._aggregate_update_to_db(self.context, result['id'],
values)
metadata = _aggregate_metadata_get_all(self.context, result['id'])
self.assertThat(metadata, matchers.DictMatches(expected_metadata))
def test_aggregate_update_zone_with_existing_metadata(self):
result = _create_aggregate(self.context)
new_zone = {'availability_zone': 'fake_avail_zone_2'}
metadata = deepcopy(_get_fake_metadata(1))
metadata.update(new_zone)
aggregate_obj._aggregate_update_to_db(self.context, result['id'],
new_zone)
expected = _aggregate_metadata_get_all(self.context, result['id'])
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_update_raise_not_found(self):
# this does not exist!
aggregate_id = 2
new_values = deepcopy(_get_fake_aggregate(1, result=False))
self.assertRaises(exception.AggregateNotFound,
aggregate_obj._aggregate_update_to_db,
self.context, aggregate_id, new_values)
def test_aggregate_update_raise_name_exist(self):
_create_aggregate(self.context, values={'name': 'test1'},
metadata={'availability_zone': 'fake_avail_zone'})
_create_aggregate(self.context, values={'name': 'test2'},
metadata={'availability_zone': 'fake_avail_zone'})
aggregate_id = 1
new_values = {'name': 'test2'}
self.assertRaises(exception.AggregateNameExists,
aggregate_obj._aggregate_update_to_db,
self.context, aggregate_id, new_values)
def test_aggregate_host_add_to_db(self):
result = _create_aggregate(self.context, metadata=None)
host = _get_fake_hosts(1)[0]
aggregate_obj._host_add_to_db(self.context, result['id'], host)
expected = aggregate_obj._aggregate_get_from_db(self.context,
result['id'])
self.assertEqual([_get_fake_hosts(1)[0]], expected.hosts)
def test_aggregate_host_re_add_to_db(self):
result = _create_aggregate_with_hosts(self.context,
metadata=None)
host = _get_fake_hosts(1)[0]
aggregate_obj._host_delete_from_db(self.context, result['id'], host)
aggregate_obj._host_add_to_db(self.context, result['id'], host)
expected = _aggregate_host_get_all(self.context, result['id'])
self.assertEqual(len(expected), 2)
def test_aggregate_host_add_to_db_duplicate_works(self):
r1 = _create_aggregate_with_hosts(self.context,
metadata=None)
r2 = _create_aggregate_with_hosts(self.context,
values={'name': 'fake_aggregate2'},
metadata={'availability_zone': 'fake_avail_zone2'})
h1 = _aggregate_host_get_all(self.context, r1['id'])
self.assertEqual(len(h1), 2)
self.assertEqual(r1['id'], h1[0]['aggregate_id'])
h2 = _aggregate_host_get_all(self.context, r2['id'])
self.assertEqual(len(h2), 2)
self.assertEqual(r2['id'], h2[0]['aggregate_id'])
def test_aggregate_host_add_to_db_duplicate_raise_exist_exc(self):
result = _create_aggregate_with_hosts(self.context,
metadata=None)
self.assertRaises(exception.AggregateHostExists,
aggregate_obj._host_add_to_db,
self.context, result['id'],
_get_fake_hosts(1)[0])
def test_aggregate_host_add_to_db_raise_not_found(self):
# this does not exist!
aggregate_id = 1
host = _get_fake_hosts(1)[0]
self.assertRaises(exception.AggregateNotFound,
aggregate_obj._host_add_to_db,
self.context, aggregate_id, host)
def test_aggregate_host_delete_from_db(self):
result = _create_aggregate_with_hosts(self.context,
metadata=None)
aggregate_obj._host_delete_from_db(self.context, result['id'],
_get_fake_hosts(1)[0])
expected = _aggregate_host_get_all(self.context, result['id'])
self.assertEqual(len(expected), 1)
def test_aggregate_host_delete_from_db_raise_not_found(self):
result = _create_aggregate(self.context)
self.assertRaises(exception.AggregateHostNotFound,
aggregate_obj._host_delete_from_db,
self.context, result['id'],
_get_fake_hosts(1)[0])
def test_aggregate_metadata_add(self):
result = _create_aggregate(self.context, metadata=None)
metadata = deepcopy(_get_fake_metadata(1))
aggregate_obj._metadata_add_to_db(self.context, result['id'], metadata)
expected = _aggregate_metadata_get_all(self.context, result['id'])
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_metadata_add_empty_metadata(self):
result = _create_aggregate(self.context, metadata=None)
metadata = {}
aggregate_obj._metadata_add_to_db(self.context, result['id'], metadata)
expected = _aggregate_metadata_get_all(self.context, result['id'])
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_metadata_add_and_update(self):
result = _create_aggregate(self.context)
metadata = deepcopy(_get_fake_metadata(1))
key = list(metadata.keys())[0]
new_metadata = {key: 'foo',
'fake_new_key': 'fake_new_value'}
metadata.update(new_metadata)
aggregate_obj._metadata_add_to_db(self.context,
result['id'], new_metadata)
expected = _aggregate_metadata_get_all(self.context, result['id'])
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_metadata_add_retry(self):
result = _create_aggregate(self.context, metadata=None)
with mock.patch('nova.db.sqlalchemy.api_models.'
'AggregateMetadata.__table__.insert') as insert_mock:
insert_mock.side_effect = db_exc.DBDuplicateEntry
self.assertRaises(db_exc.DBDuplicateEntry,
aggregate_obj._metadata_add_to_db,
self.context,
result['id'],
{'fake_key2': 'fake_value2'},
max_retries=5)
def test_aggregate_metadata_update(self):
result = _create_aggregate(self.context)
metadata = deepcopy(_get_fake_metadata(1))
key = list(metadata.keys())[0]
aggregate_obj._metadata_delete_from_db(self.context, result['id'], key)
new_metadata = {key: 'foo'}
aggregate_obj._metadata_add_to_db(self.context,
result['id'], new_metadata)
expected = _aggregate_metadata_get_all(self.context, result['id'])
metadata[key] = 'foo'
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_metadata_delete(self):
result = _create_aggregate(self.context, metadata=None)
metadata = deepcopy(_get_fake_metadata(1))
aggregate_obj._metadata_add_to_db(self.context, result['id'], metadata)
aggregate_obj._metadata_delete_from_db(self.context, result['id'],
list(metadata.keys())[0])
expected = _aggregate_metadata_get_all(self.context, result['id'])
del metadata[list(metadata.keys())[0]]
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_remove_availability_zone(self):
result = _create_aggregate(self.context, metadata={'availability_zone':
'fake_avail_zone'})
aggregate_obj._metadata_delete_from_db(self.context,
result['id'],
'availability_zone')
aggr = aggregate_obj._aggregate_get_from_db(self.context, result['id'])
self.assertIsNone(aggr['availability_zone'])
def test_aggregate_metadata_delete_raise_not_found(self):
result = _create_aggregate(self.context)
self.assertRaises(exception.AggregateMetadataNotFound,
aggregate_obj._metadata_delete_from_db,
self.context, result['id'], 'foo_key')
def create_aggregate(context, db_id, in_api=True):
if in_api:
fake_aggregate = _get_fake_aggregate(db_id, in_api=False, result=False)
aggregate_obj._aggregate_create_in_db(context, fake_aggregate,
metadata=_get_fake_metadata(db_id))
for host in _get_fake_hosts(db_id):
aggregate_obj._host_add_to_db(context, fake_aggregate['id'], host)
else:
fake_aggregate = _get_fake_aggregate(db_id, in_api=False, result=False)
db.aggregate_create(context, fake_aggregate,
metadata=_get_fake_metadata(db_id))
for host in _get_fake_hosts(db_id):
db.aggregate_host_add(context, fake_aggregate['id'], host)
def compare_obj(test, result, source):
source['deleted'] = False
def updated_at_comparator(result, source):
return True
return base_compare(test, result, source, subs=SUBS,
comparators={'updated_at': updated_at_comparator})
class AggregateObjectCellTestCase(test.TestCase):
"""Tests for the case where all aggregate data is in Cell DB"""
def setUp(self):
super(AggregateObjectCellTestCase, self).setUp()
self.context = context.RequestContext('fake-user', 'fake-project')
self._seed_data()
def _seed_data(self):
for i in range(1, 10):
create_aggregate(self.context, i, in_api=False)
def test_get_by_id(self):
for i in range(1, 10):
agg = aggregate_obj.Aggregate.get_by_id(self.context, i)
compare_obj(self, agg, _get_fake_aggregate(i))
def test_save(self):
for i in range(1, 10):
agg = aggregate_obj.Aggregate.get_by_id(self.context, i)
fake_agg = _get_fake_aggregate(i)
fake_agg['name'] = 'new-name' + str(i)
agg.name = 'new-name' + str(i)
agg.save()
result = aggregate_obj.Aggregate.get_by_id(self.context, i)
compare_obj(self, agg, fake_agg)
compare_obj(self, result, fake_agg)
def test_update_metadata(self):
for i in range(1, 10):
agg = aggregate_obj.Aggregate.get_by_id(self.context, i)
fake_agg = _get_fake_aggregate(i)
fake_agg['metadetails'] = {'constant_key': 'constant_value'}
agg.update_metadata({'unique_key': None})
agg.save()
result = aggregate_obj.Aggregate.get_by_id(self.context, i)
compare_obj(self, agg, fake_agg)
compare_obj(self, result, fake_agg)
def test_destroy(self):
for i in range(1, 10):
agg = aggregate_obj.Aggregate.get_by_id(self.context, i)
agg.destroy()
aggs = aggregate_obj.AggregateList.get_all(self.context)
self.assertEqual(len(aggs), 0)
def test_add_host(self):
for i in range(1, 10):
agg = aggregate_obj.Aggregate.get_by_id(self.context, i)
fake_agg = _get_fake_aggregate(i)
fake_agg['hosts'].append('barbar')
agg.add_host('barbar')
agg.save()
result = aggregate_obj.Aggregate.get_by_id(self.context, i)
compare_obj(self, agg, fake_agg)
compare_obj(self, result, fake_agg)
def test_delete_host(self):
for i in range(1, 10):
agg = aggregate_obj.Aggregate.get_by_id(self.context, i)
fake_agg = _get_fake_aggregate(i)
fake_agg['hosts'].remove('constant_host')
agg.delete_host('constant_host')
result = aggregate_obj.Aggregate.get_by_id(self.context, i)
compare_obj(self, agg, fake_agg)
compare_obj(self, result, fake_agg)
class AggregateObjectApiTestCase(AggregateObjectCellTestCase):
"""Tests the aggregate in the case where all data is in the API DB"""
def _seed_data(self):
for i in range(1, 10):
create_aggregate(self.context, i)
def test_create(self):
new_agg = aggregate_obj.Aggregate(self.context)
new_agg.name = 'new-aggregate'
new_agg.create()
result = aggregate_obj.Aggregate.get_by_id(self.context, new_agg.id)
self.assertEqual(new_agg.name, result.name)
class AggregateObjectMixedTestCase(AggregateObjectCellTestCase):
"""Tests the aggregate in the case where data is in both databases"""
def _seed_data(self):
for i in range(1, 6):
create_aggregate(self.context, i)
for i in range(6, 10):
create_aggregate(self.context, i, in_api=False)
def test_create(self):
new_agg = aggregate_obj.Aggregate(self.context)
new_agg.name = 'new-aggregate'
self.assertRaises(exception.ObjectActionError,
new_agg.create)
class AggregateObjectMigrationTestCase(AggregateObjectCellTestCase):
"""Tests the aggregate in the case where data is migrated to the API db"""
def _seed_data(self):
for i in range(1, 10):
create_aggregate(self.context, i, in_api=False)
aggregate_obj.migrate_aggregates(self.context, 50)
def test_create(self):
new_agg = aggregate_obj.Aggregate(self.context)
new_agg.name = 'new-aggregate'
new_agg.create()
result = aggregate_obj.Aggregate.get_by_id(self.context, new_agg.id)
self.assertEqual(new_agg.name, result.name)
class AggregateMigrationTestCase(test.TestCase):
def setUp(self):
super(AggregateMigrationTestCase, self).setUp()
self.context = context.get_admin_context()
def test_migration(self):
db.aggregate_create(self.context, {'name': 'foo'})
main_aggregates_len = len(db.aggregate_get_all(self.context))
match, done = aggregate_obj.migrate_aggregates(self.context, 50)
self.assertEqual(1, main_aggregates_len)
self.assertEqual(main_aggregates_len, match)
self.assertEqual(main_aggregates_len, done)
self.assertEqual(0, len(db.aggregate_get_all(self.context)))
self.assertEqual(main_aggregates_len,
len(aggregate_obj.AggregateList.get_all(
self.context)))
def test_migrate_aggregate_reset_autoincrement(self):
agg = aggregate_obj.Aggregate(self.context, name='foo')
agg.create()
match, done = aggregate_obj.migrate_aggregate_reset_autoincrement(
self.context, 0)
self.assertEqual(0, match)
self.assertEqual(0, done)
def test_migrate_aggregate_reset_autoincrement_no_aggregates(self):
# NOTE(danms): This validates the "or 0" default if there are no
# aggregates (and thus no max id).
match, done = aggregate_obj.migrate_aggregate_reset_autoincrement(
self.context, 0)
self.assertEqual(0, match)
self.assertEqual(0, done)
| apache-2.0 | -7,799,909,132,526,883,000 | 44.150706 | 79 | 0.575015 | false |
failys/CAIRIS | cairis/tools/PseudoClasses.py | 1 | 16629 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import logging
from flask_restful import fields
from cairis.core.ReferenceSynopsis import ReferenceSynopsis
from cairis.core.ReferenceContribution import ReferenceContribution
__author__ = 'Robin Quetin, Shamal Faily'
obj_id_field = '__python_obj__'
def gen_class_metadata(class_ref):
return {"enum": [class_ref.__module__+'.'+class_ref.__name__]}
class CharacteristicReferenceSynopsis(object):
resource_fields = {
obj_id_field: fields.String,
"theSynopsis": fields.String,
"theDimension": fields.String,
"theActorType": fields.String,
"theActor": fields.String,
"theInitialSatisfaction" : fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self, rsName='', rsDim='', rsActorType='', rsActor='', gSat=''):
self.theSynopsis = rsName
self.theDimension = rsDim
self.theActorType = rsActorType
self.theActor= rsActor
self.theInitialSatisfaction = gSat
def __getitem__(self,varName):
if (varName == 'theSynopsis'): return self.theSynopsis
elif (varName == 'theDimension'): return self.theDimension
elif (varName == 'theActorType'): return self.theActorType
elif (varName == 'theActor'): return self.theActor
elif (varName == 'theInitialSatisfaction'): return self.theInitialSatisfaction
else: return None
class CharacteristicReferenceContribution(object):
resource_fields = {
obj_id_field: fields.String,
"theMeansEnd": fields.String,
"theContribution": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self, rcMe='', rcCont=''):
self.theMeansEnd = rcMe
self.theContribution = rcCont
def __getitem__(self,varName):
if (varName == 'theMeansEnd'): return self.theMeansEnd
elif (varName == 'theContribution'): return self.theContribution
else: return None
class CharacteristicReference(object):
resource_fields = {
obj_id_field: fields.String,
'theReferenceName' : fields.String,
'theCharacteristicType' : fields.String,
'theReferenceDescription' : fields.String,
'theDimensionName' : fields.String,
'theReferenceSynopsis' : fields.Nested(CharacteristicReferenceSynopsis.resource_fields),
'theReferenceContribution' : fields.Nested(CharacteristicReferenceContribution.resource_fields)
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self, refName=None, crTypeName='grounds', refDesc=None, dimName='document',rSyn=None,rCont=None):
"""
:type refName: str
:type crTypeName: str
:type refDesc: str
:type dimName: str
"""
self.theReferenceName = refName
self.theCharacteristicType = crTypeName
self.theReferenceDescription = refDesc
self.theDimensionName = dimName
self.theReferenceSynopsis = rSyn
self.theReferenceContribution = rCont
class Definition(object):
resource_fields = {
obj_id_field: fields.String,
'name': fields.String,
'value': fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class Contributor(object):
resource_fields = {
obj_id_field: fields.String,
'firstName': fields.String,
'surname': fields.String,
'affiliation': fields.String,
'role': fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self, first_name=None, surname=None, affiliation=None, role=None, tuple_form=None):
"""
:type first_name: str
:type surname: str
:type affiliation: str
:type role: str
:type tuple_form: tuple
"""
if tuple_form is None:
self.firstName = first_name or ''
self.surname = surname or ''
self.affiliation = affiliation or ''
self.role = role or ''
else:
attrs = ['firstName', 'surname', 'affiliation', 'role']
for idx in range(0, len(tuple_form)):
self.__setattr__(attrs[idx], tuple_form[idx] or '')
class EnvironmentTensionModel(object):
resource_fields = {
obj_id_field: fields.String,
"base_attr_id": fields.Integer,
"attr_id": fields.Integer,
"value": fields.Integer,
"rationale": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
attr_dictionary = {
'Confidentiality': 0,
'Integrity': 1,
'Availability': 2,
'Accountability': 3,
'Anonymity': 4,
'Pseudonymity': 5,
'Unlinkability': 6,
'Unobservability': 7
}
attr_dictionary = OrderedDict(sorted(list(attr_dictionary.items()), key=lambda t: t[1]))
# endregion
base_attr_values = list(range(-1,4))
attr_values = list(range(4,8))
attr_values.append(-1)
def __init__(self, base_attr_id=-1, attr_id=-1, value=0, rationale='None', key=None):
"""
:type base_attr_id: int
:type attr_id: int
:type value: int|tuple
:type rationale: str
:type key: tuple
"""
if key is not None:
base_attr_id = key[0]
attr_id = key[1]
rationale = value[1]
value = value[0]
if base_attr_id not in self.base_attr_values or attr_id not in self.attr_values:
raise ValueError('Base attribute or subattribute value is incorrect.')
self.base_attr_id = base_attr_id
self.attr_id = attr_id
self.value = value
self.rationale = rationale
class Revision(object):
resource_fields = {
obj_id_field: fields.String,
'id': fields.Integer,
'date': fields.String,
'description': fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self, id=None, date=None, description=None, tuple_form=None):
"""
:type id: int
:type date: str
:type description: str
:type tuple_form: tuple
"""
if tuple_form is None:
self.id = id
self.date = date
self.description = description
else:
attrs = ['id', 'date', 'description']
for idx in range(0, len(tuple_form)):
self.__setattr__(attrs[idx], tuple_form[idx] or '')
class ProjectSettings(object):
resource_fields = {
obj_id_field: fields.String,
'projectName': fields.String,
'richPicture': fields.String,
'projectScope': fields.String,
'definitions': fields.List(fields.Nested(Definition.resource_fields)),
'projectGoals': fields.String,
'contributions': fields.List(fields.Nested(Contributor.resource_fields)),
'projectBackground': fields.String,
'revisions': fields.List(fields.Nested(Revision.resource_fields))
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
req_p_settings_keys = ['Project Background', 'Project Goals', 'Project Name', 'Project Scope', 'Rich Picture']
def __init__(self, pSettings=None, pDict=None, contributors=None, revisions=None):
logger = logging.getLogger('cairisd')
project_settings = pSettings or {}
self.projectBackground = project_settings.get("Project Background", "")
self.projectGoals = project_settings.get("Project Goals", "")
self.projectName = project_settings.get("Project Name", "")
self.projectScope = project_settings.get("Project Scope", "")
self.richPicture = project_settings.get("Rich Picture", "")
self.definitions = pDict or []
self.contributions = []
for contributor in contributors or []:
if isinstance(contributor, tuple):
new_contr = Contributor(tuple_form=contributor)
self.contributions.append(new_contr)
else:
logger.warning('Item does not meet typical contributor structure. Passing this one.')
self.revisions = []
for revision in revisions or []:
if isinstance(revision, tuple):
new_rev = Revision(tuple_form=revision)
self.revisions.append(new_rev)
else:
logger.warning('Item does not meet typical contributor structure. Passing this one.')
class RiskScore(object):
resource_fields = {
obj_id_field: fields.String,
'responseName': fields.String,
'unmitScore': fields.Integer,
'mitScore': fields.Integer,
'details': fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self, response_name, unmit_score, mit_score, details):
"""
:type response_name: str
:type unmit_score: int
:type mit_score: int
:type details: str
"""
self.responseName = response_name
self.unmitScore = unmit_score or -1
self.mitScore = mit_score or -1
self.details = details
class RiskRating(object):
resource_fields = {
obj_id_field: fields.String,
'rating': fields.String,
'threat': fields.String,
'vulnerability': fields.String,
'environment': fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self, threat, vulnerability, environment, rating=None):
self.threat = threat
self.vulnerability = vulnerability
self.environment = environment
self.rating = rating
class CountermeasureTarget(object):
def __init__(self, tName=None, tEffectiveness=None, tRat=None):
"""
:type tName: str
:type tEffectiveness: str
:type tRat: str
"""
self.theName = tName
self.theEffectiveness = tEffectiveness
self.theRationale = tRat
resource_fields = {
"__python_obj__": fields.String,
"theName": fields.String,
"theEffectiveness": fields.String,
"theRationale": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def name(self): return self.theName
def effectiveness(self): return self.theEffectiveness
def rationale(self): return self.theRationale
class PersonaTaskCharacteristics(object):
def __init__(self, pName, pDur, pFreq, pDemands, pGoalConflict):
"""
:type pName: str
:type pDur: str
:type pFreq: str
:type pDemands: str
:type pGoalConflict: str
"""
self.thePersona = pName
self.theDuration = pDur
self.theFrequency = pFreq
self.theDemands = pDemands
self.theGoalConflict = pGoalConflict
resource_fields = {
"__python_obj__": fields.String,
"thePersona": fields.String,
"theDuration": fields.String,
"theFrequency": fields.String,
"theDemands": fields.String,
"theGoalConflict": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def persona(self): return self.thePersona
def duration(self): return self.theDuration
def frequency(self): return self.theFrequency
def demands(self): return self.theDemands
def goalConflict(self): return self.theGoalConflict
class CountermeasureTaskCharacteristics(object):
def __init__(self, pTask, pName, pDur, pFreq, pDemands, pGoalConflict):
"""
:type pTask: str
:type pName: str
:type pDur: str
:type pFreq: str
:type pDemands: str
:type pGoalConflict: str
"""
self.theTask = pTask
self.thePersona = pName
self.theDuration = pDur
self.theFrequency = pFreq
self.theDemands = pDemands
self.theGoalConflict = pGoalConflict
resource_fields = {
"__python_obj__": fields.String,
"theTask": fields.String,
"thePersona": fields.String,
"theDuration": fields.String,
"theFrequency": fields.String,
"theDemands": fields.String,
"theGoalConflict": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def task(self): return self.theTask
def persona(self): return self.thePersona
def duration(self): return self.theDuration
def frequency(self): return self.theFrequency
def demands(self): return self.theDemands
def goalConflict(self): return self.theGoalConflict
class SecurityAttribute(object):
def __init__(self, name=None, value=None, rationale=None):
"""
:type name: str
:type value: str
:type rationale: str
"""
self.name = name
self.value = value
self.rationale = rationale
resource_fields = {
"__python_obj__": fields.String,
"name": fields.String,
"value": fields.String,
"rationale": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def get_attr_value(self, enum_obj):
"""
Gets the database value for the security attribute
:type enum_obj: list|tuple
"""
value = 0
if self.value is not None:
found = False
idx = 0
while not found and idx < len(enum_obj):
if enum_obj[idx] == self.value:
value = idx
found = True
else:
idx += 1
return value
class ValuedRole(object):
resource_fields = {
obj_id_field: fields.String,
'roleName': fields.String,
'cost': fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self, role_name, cost):
self.roleName = role_name
self.cost = cost
class ExceptionAttributes(object):
resource_fields = {
obj_id_field: fields.String,
'theName': fields.String,
'theDimensionType': fields.String,
'theDimensionValue': fields.String,
'theCategoryName': fields.String,
'theDescription': fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self,excName,dimType,dimValue,catName,excDesc):
self.theName = excName
self.theDimensionType = dimType
self.theDimensionValue = dimValue
self.theCategoryName = catName
self.theDescription = excDesc
class StepAttributes(object):
resource_fields = {
obj_id_field: fields.String,
'theStepText': fields.String,
'theSynopsis': fields.String,
'theActor': fields.String,
'theActorType': fields.String,
'theExceptions': fields.List(fields.Nested(ExceptionAttributes.resource_fields)),
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self,stepTxt,stepSyn,stepActor,stepActorType,stepExceptions):
self.theStepText = stepTxt
self.theSynopsis = stepSyn
self.theActor = stepActor
self.theActorType = stepActorType
self.theExceptions = stepExceptions
def synopsis(self): return self.theSynopsis
def actor(self): return self.theActor
def actorType(self): return self.theActorType
def tags(self): return self.theTags
def setSynopsis(self,s): self.theSynopsis = s
def setActor(self,a): self.theActor = a
class StepsAttributes(object):
resource_fields = {
obj_id_field: fields.String,
'theSteps': fields.List(fields.Nested(StepAttributes.resource_fields)),
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self):
self.theSteps = []
def __getitem__(self,stepNo):
return self.theSteps[stepNo]
def __setitem__(self,stepNo,s):
self.theSteps[stepNo] = s
def size(self):
return len(self.theSteps)
def append(self,s):
self.theSteps.append(s)
def remove(self,stepNo):
self.theSteps.pop(stepNo)
def insert(self,pos,s):
self.theSteps.insert(pos,s)
class ObjectDependency(object):
resource_fields = {
obj_id_field: fields.String,
'theDimensionName': fields.String,
'theObjectName': fields.String,
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self,dimension_name,object_name):
self.theDimensionName = dimension_name
self.theObjectName = object_name
class TaskGoalContribution(object):
resource_fields = {
obj_id_field: fields.String,
'theSource': fields.String,
'theDestination': fields.String,
'theEnvironment': fields.String,
'theContribution': fields.String,
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self,src,dest,env,cont):
self.theSource = src
self.theDestination = dest
self.theEnvironment = env
self.theContribution = cont
| apache-2.0 | -6,147,163,597,620,550,000 | 29.234545 | 112 | 0.682783 | false |
ZoranPavlovic/kombu | kombu/utils/objects.py | 1 | 1945 | """Object Utilities."""
class cached_property:
"""Cached property descriptor.
Caches the return value of the get method on first call.
Examples:
.. code-block:: python
@cached_property
def connection(self):
return Connection()
@connection.setter # Prepares stored value
def connection(self, value):
if value is None:
raise TypeError('Connection must be a connection')
return value
@connection.deleter
def connection(self, value):
# Additional action to do at del(self.attr)
if value is not None:
print('Connection {0!r} deleted'.format(value)
"""
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.__get = fget
self.__set = fset
self.__del = fdel
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
self.__module__ = fget.__module__
def __get__(self, obj, type=None):
if obj is None:
return self
try:
return obj.__dict__[self.__name__]
except KeyError:
value = obj.__dict__[self.__name__] = self.__get(obj)
return value
def __set__(self, obj, value):
if obj is None:
return self
if self.__set is not None:
value = self.__set(obj, value)
obj.__dict__[self.__name__] = value
def __delete__(self, obj, _sentinel=object()):
if obj is None:
return self
value = obj.__dict__.pop(self.__name__, _sentinel)
if self.__del is not None and value is not _sentinel:
self.__del(obj, value)
def setter(self, fset):
return self.__class__(self.__get, fset, self.__del)
def deleter(self, fdel):
return self.__class__(self.__get, self.__set, fdel)
| bsd-3-clause | -138,779,550,533,933,420 | 29.390625 | 70 | 0.515681 | false |
Fuzen-py/BladeAndSoulAPI | BladeAndSoul/bns.py | 1 | 21211 | import asyncio
import json
import aiohttp
from bs4 import BeautifulSoup
from .errors import (CharacterNotFound, FailedToParse, InvalidData,
ServiceUnavialable)
try:
import lxml
parser = 'lxml'
except ImportError:
parser = 'html.parser'
# types of weapons in game
VALID_WEAPONS = ['dagger', 'sword', 'staff', 'razor', 'axe', 'bangle', 'gauntlet', 'lynblade', 'bracer']
# types of accessories in game
VALID_ACCESSORIES = ['necklace', 'earring', 'bracelet', 'ring', 'belt', 'energy', 'soul']
PROFILE_URL = 'http://na-bns.ncsoft.com/ingame/bs/character/profile' # ?c=Char
SEARCH_URL = 'http://na-bns.ncsoft.com/ingame/bs/character/search/info' # ?c=Char
SUGGEST_URL = 'http://na-search.ncsoft.com/openapi/suggest.jsp' # ?site=bns&display=10&collection=bnsusersuggest&query=char
MARKET_API_ENDPOINT = 'http://na.bnsbazaar.com/api/market' # ITEM NAME
ITEM_NAME_SUGGEST = 'http://na-search.ncsoft.com/openapi/bnsmarketsuggest.jsp' #?site=bns&display=1&collection=bnsitemsuggest&lang=en&callback=suggestKeyword&query=items
BASE_ITEM_IMAGE_URL = 'http://static.ncsoft.com/bns_resource/ui_resource'
def _float(var):
"""
Attempts to an entry to a float (normally works for this)
"""
if var in [None, False]:
return 0
if var is True:
return 1
if isinstance(var, float):
return var
if isinstance(var, int):
return float(var)
assert isinstance(var, str)
assert any(x.isnumeric() for x in var)
var = var.split()[-1]
while len(var) > 0 and not var[-1].isnumeric():
var = var[:-1]
while len(var) > 0 and not var[0].isnumeric():
var = var[1:]
return float(var)
def _subtract(var1, var2, string=True, percent=False):
"""
Visually do math
"""
if string:
if percent:
return '{}% - {}% = {}%'.format(var1, var2, var1-var2)
return '{} - {} = {}'.format(var1, var2, var1-var2)
if percent:
return str(var1) + '%', str(var2) + '%', str(var1-var2) + '%'
return var1, var2, var1-var2
def get_name(gear_item):
"""
A helper function for extracting names
"""
try:
gear_item = gear_item.find('div', class_='name')
if not gear_item:
return None
if gear_item.find('span', class_='empty') is not None:
return None
return gear_item.span.text
except AttributeError:
return None
def set_bonus(set_) -> tuple:
"""
returns the set bonus for a user as a generator
"""
return (':\n'.join(('\n'.join((t.strip() for t in z.text.strip().split('\n') if t.strip() != '')) for z in x)) for x
in dict(zip(set_.find_all('p', class_='discription'), set_.find_all('p', class_='setEffect'))).items())
async def fetch_url(url, params={}):
"""
Fetch a url and return soup
"""
async with aiohttp.ClientSession() as session:
async with session.get(url, params=params) as re:
return BeautifulSoup(await re.text(), parser)
async def search_user(user, suggest=True, max_count=3) -> list:
soup = await fetch_url(SEARCH_URL, params={'c': user})
search = soup.find('div', class_='searchList')
if suggest:
return [(x.dl.dt.a.text, [b.text for b in x.dl.find('dd', class_='other').dd.find_all('li')]) for x in
search.find_all('li') if x.dt is not None][:max_count]
return (search.li.dl.dt.a.text,
[x.text for x in search.li.dl.find('dd', class_='other').dd.find_all('li') if x is not None])
async def fetch_profile(user) -> dict:
"""
Fetches a user and returns the data as a dict
Dictionary Keys:
Account Name - The display name for their account (str).
Character Name - The Name of the given character (str).
Level - Character's level (str).
HM Level - Character's HM level (str).
Server - Server the character is on (str).
Faction - The Faction the character is in (str).
Picture - Link to the character's profile picture (str).
Stats - A dictionary object with stats (each stat is also a dict).
Gear - The gear of the Given Character (list).
SoulSheild - SoulSheild stats (str).
Set Bonus - Set bonus affects, a list of strings (list).
Outfit - The outfit of the character (dict).
Other Characters - A list of the other characters on that user's account (list).
Region - The region the user is from.
:parm user: The name of the character you wish to fetch data for
"""
CharacterName, other_chars = await search_user(user, suggest=False)
soup = await fetch_url(PROFILE_URL, params={'c': CharacterName})
if len(soup.find_all('div', clas_='pCharacter error', id='container')):
raise ServiceUnavialable('Cannot Access BNS At this time')
# INFORMATION
Name = soup.find('a', href='#').text
CharacterName = soup.find('dt').span.text[1:-1]
Class, Level, Server, *Faction = [x.text.strip().replace('\xa0', ' ') for x in
soup.find('dd', class_='desc').find_all('li')]
if len(Faction) == 0:
Clan = Rank = Faction = None
elif len(Faction) == 1:
Faction = Faction[0].split()
Rank = ' '.join(Faction[2:])
Faction = ' '.join(Faction[:2])
Clan = None
else:
Clan = Faction[1]
Faction = Faction[0].split()
Rank = ' '.join(Faction[2:])
Faction = ' '.join(Faction[:2])
Level = Level.split()
if len(Level) > 2:
HM = int(Level[-1])
else:
HM = 0
Level = int(Level[1])
# ATTACK
ATK = soup.find('div', class_='attack').dl
sub = [z for z in (dict(zip((z.text for z in x.find_all('span', class_='title')),
(z.text for z in x.find_all('span', class_='stat-point')))) for x in ATK.find_all('dd')) if len(z)][:-2]
temp = ATK.find_all('dt')[:-2]
ATK = dict(
zip([t.find('span', class_='title').text for t in temp], [t.find('span', 'stat-point').text for t in temp]))
del ATK['Mastery']
[ATK.update({x: {'Total': ATK.get(x)}}) for x in ATK.keys()]
ATK['Attack Power'].update(sub[0])
ATK['Piercing'].update(sub[2])
ATK['Accuracy'].update(sub[3])
ATK['Critical Hit'].update(sub[5])
ATK['Critical Damage'].update(sub[6])
# DEFENSE
Defense = soup.find('div', class_='defense')
temp = Defense.dl.find_all('dt')
sub = [z for z in (dict(zip((z.text for z in x.find_all('span', class_='title')),
(z.text for z in x.find_all('span', class_='stat-point')))) for x in Defense.find_all('dd')) if len(z)]
Defense = dict(
zip([t.find('span', class_='title').text for t in temp], [t.find('span', 'stat-point').text for t in temp]))
[Defense.update({x: {'Total': Defense.get(x)}}) for x in Defense.keys()]
del Defense['Debuff Defense']
Defense['Defense'].update(sub[1])
Defense['Evolved Defense'].update(sub[2])
Defense['Evasion'].update(sub[3])
Defense['Block'].update(sub[4])
Defense['Critical Defense'].update(sub[5])
Defense['Health Regen'].update(sub[7])
Defense['Recovery'].update(sub[8])
# GEAR
Weapon = get_name(soup.find('div', class_='wrapWeapon'))
Necklace = get_name(soup.find('div', class_='wrapAccessory necklace'))
Earring = get_name(soup.find('div', class_='wrapAccessory earring'))
Ring = get_name(soup.find('div', class_='wrapAccessory ring'))
Bracelet = get_name(soup.find('div', class_='wrapAccessory bracelet'))
Belt = get_name(soup.find('div', class_='wrapAccessory belt'))
Soul = get_name(soup.find('div', class_='wrapAccessory soul'))
# SoulSheild
SS = soup.find('div', class_='wrapGem')
BONUS = ()
Stats = ()
if any(x.img is not None for x in SS.find_all('span')):
BONUS = set_bonus(SS.find('div', class_='lyCharmEffect'))
Stats = ([': '.join([tr.th.text, tr.td.text]) for tr in SS.table.find_all('tr')])
# OUTFIT
Clothes = get_name(soup.find('div', class_='wrapAccessory clothes'))
Head = get_name(soup.find('div', class_='wrapAccessory tire'))
Face = get_name(soup.find('div', class_='wrapAccessory faceDecoration'))
Adornment = get_name(soup.find('div', class_='wrapAccessory clothesDecoration'))
# PROFILEPICTURE
Picture = soup.find('section').div.div.img.get('src')
del soup, temp, sub
r = {'Account Name': Name,
'Character Name': CharacterName,
'Class': Class,
'Level': Level,
'HM Level': HM,
'Server': Server,
'Faction': Faction,
'Clan': Clan,
'Faction Rank': Rank,
'Picture': Picture,
'Stats': {},
'Gear': {
'Weapon': Weapon,
'Necklace': Necklace,
'Earring': Earring,
'Ring': Ring,
'Bracelet': Bracelet,
'Belt': Belt,
'Soul': Soul},
'SoulSheild': Stats,
'Set Bonus': '\n\n'.join(BONUS),
'Outfit': {'Clothes': Clothes,
'Head': Head,
'Face': Face,
'Adornment': Adornment},
'Other Characters': other_chars,
'Region': 'NA'}
r['Stats'].update(ATK)
r['Stats'].update(Defense)
return r
async def get_item_name_suggestions(item, display, session):
async with session.get(ITEM_NAME_SUGGEST, params={'site': 'bns', 'display': display, 'collection': 'bnsitemsuggest', 'callback': 'suggestKeyword', 'query': item}) as re:
data: dict = json.loads((await re.text())[17:-4])
if data['result'] != "0":
raise ServiceUnavialable
return data
async def search_item(item, display:int=1):
def price_parse(html):
soup = BeautifulSoup(html, parser)
return [int(x.text.split()[0]) if x is not 0 else 0 for x in [soup.find(name='span', attrs={'class':c}) or 0 for c in ('gold', 'silver', 'bronze')]]
async def get_item_data(titem, session):
async with session.get(f'{MARKET_API_ENDPOINT}/{titem}/true') as re:
data = await re.json()
if (not isinstance(data, list)) or len(data) == 0:
raise InvalidData("Market Returned Invalid Data")
return {'icon': ''.join([BASE_ITEM_IMAGE_URL, data[0]['iconImg']]),
'prices': [(price_parse(e['price_html']), int(e['sale_data']['amount'])) for e in data],
'name': titem}
with aiohttp.ClientSession() as session:
data = await get_item_name_suggestions(item, display, session)
suggestions = [x[0] for x in data["front"] if len(x) == 2 and x[1] == 0 and isinstance(x[0], str)]
return [await get_item_data(item, session) for item in suggestions]
class Character(object):
"""
Character Object
pretty_profile - Return A prettied profile Overview as a string.
pretty_gear - Return a prettied Gear Overview as a string.
pretty_stats - Return a prettied Stats Overview as a string.
pretty_outfit - Return a prettied Outfit Overview as a string.
Notice: The Following items can be used as self.item with space replaced with "_" and it is not case sensitive.
Notice: The Following items can also be used as self[item] it is case sensitive, no replacement.
Account Name - The display name for their account (str).
Character Name - The Name of the given character (str).
Level - Character's level (str).
HM Level - Character's HM level (str).
Server - Server the character is on (str).
Faction - The Faction the character is in (str).
Picture - Link to the character's profile picture (str).
Stats - A dictionary object with stats (each stat is also a dict).
Gear - The gear of the Given Character (list).
SoulSheild - SoulSheild stats (str).
Set Bonus - Set bonus affects, a list of strings (list).
Outfit - The outfit of the character (dict).
Other Characters - A list of the other characters on that user's account (list).
Region - The region the user is from.
"""
def __init__(self, data: dict):
data = data.copy()
self.name = data['Character Name']
self.__data = data
self.items = self.__data.items
self.keys = self.__data.keys
self.account = data['Account Name']
async def refresh(self):
self.__data = await fetch_profile(self.name)
self.items = self.__data.items
self.keys = self.__data.keys
def __call__(self):
"""returns an awaitable to refresh"""
return self.refresh()
def __getattr__(self, item):
return self[str(item)]
def __getitem__(self, item):
item = str(item).replace('_', ' ')
k = list(self.__data.keys())
k = dict(zip([z.lower() for z in k], k))
try:
return self.__data[k.get(item.lower())]
except KeyError:
return self.__data[k.get(item.lower().replace(' ', '_'))]
def pretty_profile(self):
"""Return A prettyfied profile Overview as a string"""
if self['HM Level']:
temp = 'Level {} Hongmoon Level {}'.format(self['Level'], self['HM Level'])
else:
temp = 'Level {}'.format(self['Level'])
text = ['**Display Name:** {}'.format(self['Account Name']),
'**Character**: {} {}'.format(self['Character Name'], temp),
'**Weapon**: {}'.format(self['Gear']['Weapon']),
'**Server:** {}'.format(self['Server'])]
if self['Faction']:
if self['Faction'] == 'Cerulean Order':
text.append('**Faction:** Cerulean Order :blue_heart:')
else:
text.append('**Faction"** Crimson Legion :heart:')
text.append('**Faction Rank:** {}'.format(self['Faction Rank']))
if self['Clan']:
text.append('**Clan:** {}'.format(self['Clan']))
if len(self['Other Characters']):
temp = ['[', ']']
temp.insert(1, ', '.join(self['Other Characters']))
text.append('**Other Characters:**\n {}'.format(''.join(temp)))
text.append(self['Picture'])
return '\n'.join(text).strip()
def pretty_gear(self):
"""Return a prettyfied Gear Overview as a string"""
temp = [self['Character Name'], '[' + self['Class'],'Level', str(self['Level'])]
if self['HM Level']:
temp += ['Hongmoon Level', str(self['HM Level'])]
temp = ' '.join(temp) + ']'
divider = '─'*len(temp)
stats = self['Stats']
send_this = ['```', temp, divider, 'Total HP {} Attack Power {}'.format(stats['HP']['Total'], stats['Attack Power']['Total']),
divider, 'Soul Shield Attributes (Base + Fused + Set)', '\n'.join(self['SoulSheild']),
''.join(self['Set Bonus']), '']
gear = self['Gear']
temp = list(gear.keys())
temp.sort()
for k in temp:
send_this.append('{}: {}'.format(k, gear[k]))
send_this.append(divider)
send_this.append('```')
return '\n'.join(send_this).strip()
def pretty_stats(self):
"""Return a prettyfied Outfit Overview as a string"""
temp = [self['Character Name'], '[' + self['Class'] + ',','Level', str(self['Level'])]
if self['HM Level']:
temp += ['Hongmoon Level', str(self['HM Level'])]
temp = ' '.join(temp) + ']'
divider = '─'*len(temp)
stats = self['Stats']
send_this = ['```ruby', temp, divider, 'HP: {}'.format(stats['HP']['Total']),
'Attack Power: {}'.format(stats['Attack Power']['Total']),
'Piercing: {}'.format(stats['Piercing']['Total']),
'+Defense Piercing: {}'.format(stats['Piercing']['Defense Piercing']),
'+Block Piercing: {}'.format(stats['Piercing']['Block Piercing']),
'Accuracy: {0[Total]} ({0[Hit Rate]})'.format(stats['Accuracy']),
'Critical Hit: {0[Total]} ({0[Critical Rate]})'.format(stats['Critical Hit']),
'Critical Damage: {0[Total]} ({0[Increase Damage]})'.format(stats['Critical Damage']), divider,
'Defense: {0[Total]} ({0[Damage Reduction]})'.format(stats['Defense']),
'Evasion: {}'.format(stats['Evasion']['Total']),
'+Evasion Rate: {}'.format(stats['Evasion']['Evasion Rate']),
'+Counter Bonus: {}'.format(stats['Evasion']['Counter Bonus']),
('Block: {0[Total]}\n'
'+Damage Reduction: {0[Damage Reduction]}\n'
'+Block Bonus: {0[Block Bonus]}\n'
'+Block Rate: {0[Block Rate]}').format(stats['Block']),
'Health Regen (IN/OUT): {0[In Combat]}/{0[Out of Combat]}'.format(stats['Health Regen']),
'Recovery Rate: {}'.format(stats['Recovery']['Total']),
'```']
return '\n'.join(send_this)
def pretty_outfit(self):
"""Return a prettyfied Outfit Overview as a string"""
outfit = self['Outfit']
o = list(outfit.keys())
o.sort()
return '\n'.join(['```'] + ['{}\'s Outfit:'.format(self['Character Name'])] +
['{}: {}'.format(k, outfit[k]) for k in o] + ['```'])
def avg_dmg(self):
stats = self['Stats']
return avg_dmg(stats['Attack Power']['Total'],
stats['Critical Hit']['Critical Rate'],
stats['Critical Damage']['Total'],
elemental_bonus='100%')
async def get_character(user: str) -> Character:
"""
Return a Character Object for the given user.
:param user: The user to create an object for
:return: Returns A Character Object for the given user
"""
if not isinstance(user, str):
raise InvalidData('Expected type str for user, found {} instead'.format(type(user).__name__))
try:
return Character(await fetch_profile(user))
except AttributeError:
raise CharacterNotFound('Failed to find character "{}"'.format(user))
except Exception as e:
print('[!] Error:', e)
raise Exception(e)
async def compare(user1: Character, user2: Character, update=False):
"""A WIP compare fucntion."""
assert isinstance(user1, Character) and isinstance(user2, Character)
if update:
await user1.refresh()
await user2.refresh()
temp = '{} - {}'.format(user1['Character Name'], user2['Character Name'])
divider = '─'*len(temp)
user1 = user1['Stats']
user2 = user2['Stats']
for k,v in user1.items():
for k2,v2 in v.items():
v[k2] = _float(v2)
user1[k] = v
for k,v in user2.items():
for k2,v2 in v.items():
v[k2] = _float(v2)
user2[k] = v
send_this = [temp, divider, 'HP: {}'.format(_subtract(user1['HP']['Total'], user2['HP']['Total'])),
'Attack Power: {}'.format(_subtract(user1['Attack Power']['Total'],
user2['Attack Power']['Total'])),
'Piercing: {}'.format(_subtract(user1['Piercing']['Total'], user2['Piercing']['Total'])),
'+Defense Piercing: {}'.format(_subtract(user1['Piercing']['Defense Piercing'],
user2['Piercing']['Defense Piercing'],
percent=True)),
'+Block Piercing: {}'.format(_subtract(user1['Piercing']['Block Piercing'],
user2['Piercing']['Block Piercing'],
percent=True)),
'Accuracy: {}'.format(_subtract(user1['Accuracy']['Total'],
user2['Accuracy']['Total'])),
'+']
return '\n'.join(send_this)
def avg_dmg(attack_power: str, critical_rate: str, critical_damage: str, elemental_bonus: str='100%'):
"""
AVG Damage
Calculates The Average Damage
:param attack_power: Attack Power (Total)
:param critical_hit_rate: Critical Hit -> Critical Rate
:param critical_damage: Critical Damage (Total)
:param elemental_bonus: Total elemental_bonus% - 500
"""
attack_power = float(attack_power)
crit_rate = float(critical_rate.strip(' %'))
crit_damage = float(critical_damage)
elemental_bonus = float(elemental_bonus.strip(' %'))
# Result is No Blue Buff
# Result 2 is with Blue Buff
result = attack_power * (1 - (crit_rate * 0.01) + (crit_rate * crit_damage * 0.0001))
if (crit_rate < 60):
result2 = attack_power * (1 - ((crit_rate + 50) * 0.01) + (crit_rate + 50) * (crit_damage + 40) * .0001)
else: result2 = attack_power * ((crit_damage + 40) * .01)
if elemental_bonus in [0, 100]: return round(result, 2), round(result2, 2)
result *= (elemental_bonus * 0.01)
result2 *= (elemental_bonus * 0.01)
return round(result, 2), round(result2, 2)
| mit | 8,555,198,700,855,818,000 | 42.542094 | 173 | 0.564395 | false |
Llamatech/sis-fibo | model/vos/cuenta.py | 1 | 2245 | #-*- coding:iso-8859-1 -*-
"""
Clase que modela la información de una cuenta en el sistema
"""
# NUMERO.SALDO.TIPO_CUENTA,CERRADA,CLIENTE,OFICINA
class Cuenta(object):
def __init__(self, numero, saldo, tipo_cuenta, cerrada, cliente, oficina):
self.numero = numero
self.saldo = saldo
self.tipo_cuenta = tipo_cuenta
self.cerrada = cerrada
self.cliente = cliente
self.oficina = oficina
def __repr__(self):
args = [self.numero, self.saldo, self.tipo_cuenta, self.cerrada, self.cliente, self.oficina]
args = map(str, args)
return "numero: %s; saldo: %s; tipo_cuenta:%s; cerrada:%s; cliente: %s; oficina: %s" % tuple(args)
def __str__(self):
return self.__repr__()
class CuentaR(object):
def __init__(self, numero, fecha_creacion, saldo, tipo, cerrada, id_cliente, nom_cliente, ap_cliente, id_of, of_nombre, fecha_umov):
self.numero = numero
if fecha_creacion is not None:
self.fecha_creacion = fecha_creacion.strftime('%d/%m/%Y')
else:
self.fecha_creacion = None
self.saldo = saldo
self.tipo = tipo
self.cerrada = cerrada
self.id_cliente = id_cliente
self.nom_cliente = nom_cliente
self.ap_cliente = ap_cliente
self.id_of = id_of
self.of_nombre = of_nombre
if fecha_umov is not None:
self.fecha_umov = fecha_umov.strftime('%d/%m/%Y')
else:
self.fecha_umov = fecha_umov
def dict_repr(self):
if self.cerrada == 'N':
url = '/cuentas?numero='+str(self.numero)
else:
url = None
d = {
'numero':self.numero,
'fecha_creacion':self.fecha_creacion,
'saldo':self.saldo,
'tipo':self.tipo,
'cerrada':self.cerrada,
'id_cliente':self.id_cliente,
'nom_cliente':self.nom_cliente,
'ap_cliente':self.ap_cliente,
'id_of':self.id_of,
'of_nombre':self.of_nombre,
'fecha_umov':self.fecha_umov,
'delete':url
}
return d
| gpl-2.0 | 7,610,641,571,238,857,000 | 33.619048 | 136 | 0.536988 | false |
99cloud/keystone_register | openstack_dashboard/dashboards/project/access_and_security/security_groups/forms.py | 1 | 12028 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core import validators
from django.core.urlresolvers import reverse
from django.forms import ValidationError
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils.validators import validate_port_range
from horizon.utils import fields
from openstack_dashboard import api
class CreateGroup(forms.SelfHandlingForm):
name = forms.CharField(label=_("Name"),
error_messages={
'required': _('This field is required.'),
'invalid': _("The string may only contain"
" ASCII characters and numbers.")},
validators=[validators.validate_slug])
description = forms.CharField(label=_("Description"))
def handle(self, request, data):
try:
sg = api.nova.security_group_create(request,
data['name'],
data['description'])
messages.success(request,
_('Successfully created security group: %s')
% data['name'])
return sg
except:
redirect = reverse("horizon:project:access_and_security:index")
exceptions.handle(request,
_('Unable to create security group.'),
redirect=redirect)
class AddRule(forms.SelfHandlingForm):
id = forms.IntegerField(widget=forms.HiddenInput())
ip_protocol = forms.ChoiceField(label=_('IP Protocol'),
choices=[('tcp', _('TCP')),
('udp', _('UDP')),
('icmp', _('ICMP'))],
help_text=_("The protocol which this "
"rule should be applied to."),
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'protocol'}))
port_or_range = forms.ChoiceField(label=_('Open'),
choices=[('port', _('Port')),
('range', _('Port Range'))],
widget=forms.Select(attrs={
'class': 'switchable switched',
'data-slug': 'range',
'data-switch-on': 'protocol',
'data-protocol-tcp': _('Open'),
'data-protocol-udp': _('Open')}))
port = forms.IntegerField(label=_("Port"),
required=False,
help_text=_("Enter an integer value "
"between 1 and 65535."),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'range',
'data-range-port': _('Port')}),
validators=[validate_port_range])
from_port = forms.IntegerField(label=_("From Port"),
required=False,
help_text=_("Enter an integer value "
"between 1 and 65535."),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'range',
'data-range-range': _('From Port')}),
validators=[validate_port_range])
to_port = forms.IntegerField(label=_("To Port"),
required=False,
help_text=_("Enter an integer value "
"between 1 and 65535."),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'range',
'data-range-range': _('To Port')}),
validators=[validate_port_range])
icmp_type = forms.IntegerField(label=_("Type"),
required=False,
help_text=_("Enter a value for ICMP type "
"in the range (-1: 255)"),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'protocol',
'data-protocol-icmp': _('Type')}),
validators=[validate_port_range])
icmp_code = forms.IntegerField(label=_("Code"),
required=False,
help_text=_("Enter a value for ICMP code "
"in the range (-1: 255)"),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'protocol',
'data-protocol-icmp': _('Code')}),
validators=[validate_port_range])
source = forms.ChoiceField(label=_('Source'),
choices=[('cidr', _('CIDR')),
('sg', _('Security Group'))],
help_text=_('To specify an allowed IP '
'range, select "CIDR". To '
'allow access from all '
'members of another security '
'group select "Security '
'Group".'),
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'source'}))
cidr = fields.IPField(label=_("CIDR"),
required=False,
initial="0.0.0.0/0",
help_text=_("Classless Inter-Domain Routing "
"(e.g. 192.168.0.0/24)"),
version=fields.IPv4 | fields.IPv6,
mask=True,
widget=forms.TextInput(
attrs={'class': 'switched',
'data-switch-on': 'source',
'data-source-cidr': _('CIDR')}))
security_group = forms.ChoiceField(label=_('Security Group'),
required=False,
widget=forms.Select(attrs={
'class': 'switched',
'data-switch-on': 'source',
'data-source-sg': _('Security '
'Group')}))
def __init__(self, *args, **kwargs):
sg_list = kwargs.pop('sg_list', [])
super(AddRule, self).__init__(*args, **kwargs)
# Determine if there are security groups available for the
# source group option; add the choices and enable the option if so.
if sg_list:
security_groups_choices = sg_list
else:
security_groups_choices = [("", _("No security groups available"))]
self.fields['security_group'].choices = security_groups_choices
def clean(self):
cleaned_data = super(AddRule, self).clean()
ip_proto = cleaned_data.get('ip_protocol')
port_or_range = cleaned_data.get("port_or_range")
source = cleaned_data.get("source")
icmp_type = cleaned_data.get("icmp_type", None)
icmp_code = cleaned_data.get("icmp_code", None)
from_port = cleaned_data.get("from_port", None)
to_port = cleaned_data.get("to_port", None)
port = cleaned_data.get("port", None)
if ip_proto == 'icmp':
if icmp_type is None:
msg = _('The ICMP type is invalid.')
raise ValidationError(msg)
if icmp_code is None:
msg = _('The ICMP code is invalid.')
raise ValidationError(msg)
if icmp_type not in xrange(-1, 256):
msg = _('The ICMP type not in range (-1, 255)')
raise ValidationError(msg)
if icmp_code not in xrange(-1, 256):
msg = _('The ICMP code not in range (-1, 255)')
raise ValidationError(msg)
cleaned_data['from_port'] = icmp_type
cleaned_data['to_port'] = icmp_code
else:
if port_or_range == "port":
cleaned_data["from_port"] = port
cleaned_data["to_port"] = port
if port is None:
msg = _('The specified port is invalid.')
raise ValidationError(msg)
else:
if from_port is None:
msg = _('The "from" port number is invalid.')
raise ValidationError(msg)
if to_port is None:
msg = _('The "to" port number is invalid.')
raise ValidationError(msg)
if to_port < from_port:
msg = _('The "to" port number must be greater than '
'or equal to the "from" port number.')
raise ValidationError(msg)
if source == "cidr":
cleaned_data['security_group'] = None
else:
cleaned_data['cidr'] = None
return cleaned_data
def handle(self, request, data):
try:
rule = api.nova.security_group_rule_create(
request,
data['id'],
data['ip_protocol'],
data['from_port'],
data['to_port'],
data['cidr'],
data['security_group'])
messages.success(request,
_('Successfully added rule: %s') % unicode(rule))
return rule
except:
redirect = reverse("horizon:project:access_and_security:"
"security_groups:detail", args=[data['id']])
exceptions.handle(request,
_('Unable to add rule to security group.'),
redirect=redirect)
| apache-2.0 | -5,296,865,002,722,064,000 | 47.305221 | 79 | 0.427835 | false |
tvalacarta/tvalacarta | python/main-classic/lib/youtube_dl/extractor/vimeo.py | 1 | 46584 | # coding: utf-8
from __future__ import unicode_literals
import base64
import functools
import json
import re
import itertools
from .common import InfoExtractor
from ..compat import (
compat_kwargs,
compat_HTTPError,
compat_str,
compat_urlparse,
)
from ..utils import (
clean_html,
determine_ext,
dict_get,
ExtractorError,
js_to_json,
int_or_none,
merge_dicts,
OnDemandPagedList,
parse_filesize,
RegexNotFoundError,
sanitized_Request,
smuggle_url,
std_headers,
str_or_none,
try_get,
unified_timestamp,
unsmuggle_url,
urlencode_postdata,
unescapeHTML,
)
class VimeoBaseInfoExtractor(InfoExtractor):
_NETRC_MACHINE = 'vimeo'
_LOGIN_REQUIRED = False
_LOGIN_URL = 'https://vimeo.com/log_in'
def _login(self):
username, password = self._get_login_info()
if username is None:
if self._LOGIN_REQUIRED:
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
return
webpage = self._download_webpage(
self._LOGIN_URL, None, 'Downloading login page')
token, vuid = self._extract_xsrft_and_vuid(webpage)
data = {
'action': 'login',
'email': username,
'password': password,
'service': 'vimeo',
'token': token,
}
self._set_vimeo_cookie('vuid', vuid)
try:
self._download_webpage(
self._LOGIN_URL, None, 'Logging in',
data=urlencode_postdata(data), headers={
'Content-Type': 'application/x-www-form-urlencoded',
'Referer': self._LOGIN_URL,
})
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 418:
raise ExtractorError(
'Unable to log in: bad username or password',
expected=True)
raise ExtractorError('Unable to log in')
def _verify_video_password(self, url, video_id, webpage):
password = self._downloader.params.get('videopassword')
if password is None:
raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True)
token, vuid = self._extract_xsrft_and_vuid(webpage)
data = urlencode_postdata({
'password': password,
'token': token,
})
if url.startswith('http://'):
# vimeo only supports https now, but the user can give an http url
url = url.replace('http://', 'https://')
password_request = sanitized_Request(url + '/password', data)
password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
password_request.add_header('Referer', url)
self._set_vimeo_cookie('vuid', vuid)
return self._download_webpage(
password_request, video_id,
'Verifying the password', 'Wrong password')
def _extract_xsrft_and_vuid(self, webpage):
xsrft = self._search_regex(
r'(?:(?P<q1>["\'])xsrft(?P=q1)\s*:|xsrft\s*[=:])\s*(?P<q>["\'])(?P<xsrft>.+?)(?P=q)',
webpage, 'login token', group='xsrft')
vuid = self._search_regex(
r'["\']vuid["\']\s*:\s*(["\'])(?P<vuid>.+?)\1',
webpage, 'vuid', group='vuid')
return xsrft, vuid
def _extract_vimeo_config(self, webpage, video_id, *args, **kwargs):
vimeo_config = self._search_regex(
r'vimeo\.config\s*=\s*(?:({.+?})|_extend\([^,]+,\s+({.+?})\));',
webpage, 'vimeo config', *args, **compat_kwargs(kwargs))
if vimeo_config:
return self._parse_json(vimeo_config, video_id)
def _set_vimeo_cookie(self, name, value):
self._set_cookie('vimeo.com', name, value)
def _vimeo_sort_formats(self, formats):
# Bitrates are completely broken. Single m3u8 may contain entries in kbps and bps
# at the same time without actual units specified. This lead to wrong sorting.
self._sort_formats(formats, field_preference=('preference', 'height', 'width', 'fps', 'tbr', 'format_id'))
def _parse_config(self, config, video_id):
video_data = config['video']
video_title = video_data['title']
live_event = video_data.get('live_event') or {}
is_live = live_event.get('status') == 'started'
formats = []
config_files = video_data.get('files') or config['request'].get('files', {})
for f in config_files.get('progressive', []):
video_url = f.get('url')
if not video_url:
continue
formats.append({
'url': video_url,
'format_id': 'http-%s' % f.get('quality'),
'width': int_or_none(f.get('width')),
'height': int_or_none(f.get('height')),
'fps': int_or_none(f.get('fps')),
'tbr': int_or_none(f.get('bitrate')),
})
# TODO: fix handling of 308 status code returned for live archive manifest requests
for files_type in ('hls', 'dash'):
for cdn_name, cdn_data in config_files.get(files_type, {}).get('cdns', {}).items():
manifest_url = cdn_data.get('url')
if not manifest_url:
continue
format_id = '%s-%s' % (files_type, cdn_name)
if files_type == 'hls':
formats.extend(self._extract_m3u8_formats(
manifest_url, video_id, 'mp4',
'm3u8' if is_live else 'm3u8_native', m3u8_id=format_id,
note='Downloading %s m3u8 information' % cdn_name,
fatal=False))
elif files_type == 'dash':
mpd_pattern = r'/%s/(?:sep/)?video/' % video_id
mpd_manifest_urls = []
if re.search(mpd_pattern, manifest_url):
for suffix, repl in (('', 'video'), ('_sep', 'sep/video')):
mpd_manifest_urls.append((format_id + suffix, re.sub(
mpd_pattern, '/%s/%s/' % (video_id, repl), manifest_url)))
else:
mpd_manifest_urls = [(format_id, manifest_url)]
for f_id, m_url in mpd_manifest_urls:
if 'json=1' in m_url:
real_m_url = (self._download_json(m_url, video_id, fatal=False) or {}).get('url')
if real_m_url:
m_url = real_m_url
mpd_formats = self._extract_mpd_formats(
m_url.replace('/master.json', '/master.mpd'), video_id, f_id,
'Downloading %s MPD information' % cdn_name,
fatal=False)
for f in mpd_formats:
if f.get('vcodec') == 'none':
f['preference'] = -50
elif f.get('acodec') == 'none':
f['preference'] = -40
formats.extend(mpd_formats)
live_archive = live_event.get('archive') or {}
live_archive_source_url = live_archive.get('source_url')
if live_archive_source_url and live_archive.get('status') == 'done':
formats.append({
'format_id': 'live-archive-source',
'url': live_archive_source_url,
'preference': 1,
})
subtitles = {}
text_tracks = config['request'].get('text_tracks')
if text_tracks:
for tt in text_tracks:
subtitles[tt['lang']] = [{
'ext': 'vtt',
'url': 'https://vimeo.com' + tt['url'],
}]
thumbnails = []
if not is_live:
for key, thumb in video_data.get('thumbs', {}).items():
thumbnails.append({
'id': key,
'width': int_or_none(key),
'url': thumb,
})
thumbnail = video_data.get('thumbnail')
if thumbnail:
thumbnails.append({
'url': thumbnail,
})
owner = video_data.get('owner') or {}
video_uploader_url = owner.get('url')
return {
'id': str_or_none(video_data.get('id')) or video_id,
'title': self._live_title(video_title) if is_live else video_title,
'uploader': owner.get('name'),
'uploader_id': video_uploader_url.split('/')[-1] if video_uploader_url else None,
'uploader_url': video_uploader_url,
'thumbnails': thumbnails,
'duration': int_or_none(video_data.get('duration')),
'formats': formats,
'subtitles': subtitles,
'is_live': is_live,
}
def _extract_original_format(self, url, video_id):
download_data = self._download_json(
url, video_id, fatal=False,
query={'action': 'load_download_config'},
headers={'X-Requested-With': 'XMLHttpRequest'})
if download_data:
source_file = download_data.get('source_file')
if isinstance(source_file, dict):
download_url = source_file.get('download_url')
if download_url and not source_file.get('is_cold') and not source_file.get('is_defrosting'):
source_name = source_file.get('public_name', 'Original')
if self._is_valid_url(download_url, video_id, '%s video' % source_name):
ext = (try_get(
source_file, lambda x: x['extension'],
compat_str) or determine_ext(
download_url, None) or 'mp4').lower()
return {
'url': download_url,
'ext': ext,
'width': int_or_none(source_file.get('width')),
'height': int_or_none(source_file.get('height')),
'filesize': parse_filesize(source_file.get('size')),
'format_id': source_name,
'preference': 1,
}
class VimeoIE(VimeoBaseInfoExtractor):
"""Information extractor for vimeo.com."""
# _VALID_URL matches Vimeo URLs
_VALID_URL = r'''(?x)
https?://
(?:
(?:
www|
player
)
\.
)?
vimeo(?:pro)?\.com/
(?!(?:channels|album|showcase)/[^/?#]+/?(?:$|[?#])|[^/]+/review/|ondemand/)
(?:.*?/)?
(?:
(?:
play_redirect_hls|
moogaloop\.swf)\?clip_id=
)?
(?:videos?/)?
(?P<id>[0-9]+)
(?:/[\da-f]+)?
/?(?:[?&].*)?(?:[#].*)?$
'''
IE_NAME = 'vimeo'
_TESTS = [
{
'url': 'http://vimeo.com/56015672#at=0',
'md5': '8879b6cc097e987f02484baf890129e5',
'info_dict': {
'id': '56015672',
'ext': 'mp4',
'title': "youtube-dl test video - \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550",
'description': 'md5:2d3305bad981a06ff79f027f19865021',
'timestamp': 1355990239,
'upload_date': '20121220',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/user7108434',
'uploader_id': 'user7108434',
'uploader': 'Filippo Valsorda',
'duration': 10,
'license': 'by-sa',
},
'params': {
'format': 'best[protocol=https]',
},
},
{
'url': 'http://vimeopro.com/openstreetmapus/state-of-the-map-us-2013/video/68093876',
'md5': '3b5ca6aa22b60dfeeadf50b72e44ed82',
'note': 'Vimeo Pro video (#1197)',
'info_dict': {
'id': '68093876',
'ext': 'mp4',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/openstreetmapus',
'uploader_id': 'openstreetmapus',
'uploader': 'OpenStreetMap US',
'title': 'Andy Allan - Putting the Carto into OpenStreetMap Cartography',
'description': 'md5:2c362968038d4499f4d79f88458590c1',
'duration': 1595,
'upload_date': '20130610',
'timestamp': 1370893156,
},
'params': {
'format': 'best[protocol=https]',
},
},
{
'url': 'http://player.vimeo.com/video/54469442',
'md5': '619b811a4417aa4abe78dc653becf511',
'note': 'Videos that embed the url in the player page',
'info_dict': {
'id': '54469442',
'ext': 'mp4',
'title': 'Kathy Sierra: Building the minimum Badass User, Business of Software 2012',
'uploader': 'The BLN & Business of Software',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/theblnbusinessofsoftware',
'uploader_id': 'theblnbusinessofsoftware',
'duration': 3610,
'description': None,
},
'params': {
'format': 'best[protocol=https]',
},
'expected_warnings': ['Unable to download JSON metadata'],
},
{
'url': 'http://vimeo.com/68375962',
'md5': 'aaf896bdb7ddd6476df50007a0ac0ae7',
'note': 'Video protected with password',
'info_dict': {
'id': '68375962',
'ext': 'mp4',
'title': 'youtube-dl password protected test video',
'timestamp': 1371200155,
'upload_date': '20130614',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/user18948128',
'uploader_id': 'user18948128',
'uploader': 'Jaime Marquínez Ferrándiz',
'duration': 10,
'description': 'md5:dca3ea23adb29ee387127bc4ddfce63f',
},
'params': {
'format': 'best[protocol=https]',
'videopassword': 'youtube-dl',
},
},
{
'url': 'http://vimeo.com/channels/keypeele/75629013',
'md5': '2f86a05afe9d7abc0b9126d229bbe15d',
'info_dict': {
'id': '75629013',
'ext': 'mp4',
'title': 'Key & Peele: Terrorist Interrogation',
'description': 'md5:8678b246399b070816b12313e8b4eb5c',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/atencio',
'uploader_id': 'atencio',
'uploader': 'Peter Atencio',
'channel_id': 'keypeele',
'channel_url': r're:https?://(?:www\.)?vimeo\.com/channels/keypeele',
'timestamp': 1380339469,
'upload_date': '20130928',
'duration': 187,
},
'expected_warnings': ['Unable to download JSON metadata'],
},
{
'url': 'http://vimeo.com/76979871',
'note': 'Video with subtitles',
'info_dict': {
'id': '76979871',
'ext': 'mp4',
'title': 'The New Vimeo Player (You Know, For Videos)',
'description': 'md5:2ec900bf97c3f389378a96aee11260ea',
'timestamp': 1381846109,
'upload_date': '20131015',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/staff',
'uploader_id': 'staff',
'uploader': 'Vimeo Staff',
'duration': 62,
}
},
{
# from https://www.ouya.tv/game/Pier-Solar-and-the-Great-Architects/
'url': 'https://player.vimeo.com/video/98044508',
'note': 'The js code contains assignments to the same variable as the config',
'info_dict': {
'id': '98044508',
'ext': 'mp4',
'title': 'Pier Solar OUYA Official Trailer',
'uploader': 'Tulio Gonçalves',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/user28849593',
'uploader_id': 'user28849593',
},
},
{
# contains original format
'url': 'https://vimeo.com/33951933',
'md5': '53c688fa95a55bf4b7293d37a89c5c53',
'info_dict': {
'id': '33951933',
'ext': 'mp4',
'title': 'FOX CLASSICS - Forever Classic ID - A Full Minute',
'uploader': 'The DMCI',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/dmci',
'uploader_id': 'dmci',
'timestamp': 1324343742,
'upload_date': '20111220',
'description': 'md5:ae23671e82d05415868f7ad1aec21147',
},
},
{
# only available via https://vimeo.com/channels/tributes/6213729 and
# not via https://vimeo.com/6213729
'url': 'https://vimeo.com/channels/tributes/6213729',
'info_dict': {
'id': '6213729',
'ext': 'mp4',
'title': 'Vimeo Tribute: The Shining',
'uploader': 'Casey Donahue',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/caseydonahue',
'uploader_id': 'caseydonahue',
'channel_url': r're:https?://(?:www\.)?vimeo\.com/channels/tributes',
'channel_id': 'tributes',
'timestamp': 1250886430,
'upload_date': '20090821',
'description': 'md5:bdbf314014e58713e6e5b66eb252f4a6',
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Unable to download JSON metadata'],
},
{
# redirects to ondemand extractor and should be passed through it
# for successful extraction
'url': 'https://vimeo.com/73445910',
'info_dict': {
'id': '73445910',
'ext': 'mp4',
'title': 'The Reluctant Revolutionary',
'uploader': '10Ft Films',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/tenfootfilms',
'uploader_id': 'tenfootfilms',
'description': 'md5:0fa704e05b04f91f40b7f3ca2e801384',
'upload_date': '20130830',
'timestamp': 1377853339,
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Unable to download JSON metadata'],
},
{
'url': 'http://player.vimeo.com/video/68375962',
'md5': 'aaf896bdb7ddd6476df50007a0ac0ae7',
'info_dict': {
'id': '68375962',
'ext': 'mp4',
'title': 'youtube-dl password protected test video',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/user18948128',
'uploader_id': 'user18948128',
'uploader': 'Jaime Marquínez Ferrándiz',
'duration': 10,
},
'params': {
'format': 'best[protocol=https]',
'videopassword': 'youtube-dl',
},
},
{
'url': 'http://vimeo.com/moogaloop.swf?clip_id=2539741',
'only_matching': True,
},
{
'url': 'https://vimeo.com/109815029',
'note': 'Video not completely processed, "failed" seed status',
'only_matching': True,
},
{
'url': 'https://vimeo.com/groups/travelhd/videos/22439234',
'only_matching': True,
},
{
'url': 'https://vimeo.com/album/2632481/video/79010983',
'only_matching': True,
},
{
# source file returns 403: Forbidden
'url': 'https://vimeo.com/7809605',
'only_matching': True,
},
{
'url': 'https://vimeo.com/160743502/abd0e13fb4',
'only_matching': True,
}
# https://gettingthingsdone.com/workflowmap/
# vimeo embed with check-password page protected by Referer header
]
@staticmethod
def _smuggle_referrer(url, referrer_url):
return smuggle_url(url, {'http_headers': {'Referer': referrer_url}})
@staticmethod
def _extract_urls(url, webpage):
urls = []
# Look for embedded (iframe) Vimeo player
for mobj in re.finditer(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//player\.vimeo\.com/video/\d+.*?)\1',
webpage):
urls.append(VimeoIE._smuggle_referrer(unescapeHTML(mobj.group('url')), url))
PLAIN_EMBED_RE = (
# Look for embedded (swf embed) Vimeo player
r'<embed[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?vimeo\.com/moogaloop\.swf.+?)\1',
# Look more for non-standard embedded Vimeo player
r'<video[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?vimeo\.com/[0-9]+)\1',
)
for embed_re in PLAIN_EMBED_RE:
for mobj in re.finditer(embed_re, webpage):
urls.append(mobj.group('url'))
return urls
@staticmethod
def _extract_url(url, webpage):
urls = VimeoIE._extract_urls(url, webpage)
return urls[0] if urls else None
def _verify_player_video_password(self, url, video_id, headers):
password = self._downloader.params.get('videopassword')
if password is None:
raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True)
data = urlencode_postdata({
'password': base64.b64encode(password.encode()),
})
headers = merge_dicts(headers, {
'Content-Type': 'application/x-www-form-urlencoded',
})
checked = self._download_json(
url + '/check-password', video_id,
'Verifying the password', data=data, headers=headers)
if checked is False:
raise ExtractorError('Wrong video password', expected=True)
return checked
def _real_initialize(self):
self._login()
def _real_extract(self, url):
url, data = unsmuggle_url(url, {})
headers = std_headers.copy()
if 'http_headers' in data:
headers.update(data['http_headers'])
if 'Referer' not in headers:
headers['Referer'] = url
channel_id = self._search_regex(
r'vimeo\.com/channels/([^/]+)', url, 'channel id', default=None)
# Extract ID from URL
video_id = self._match_id(url)
orig_url = url
is_pro = 'vimeopro.com/' in url
is_player = '://player.vimeo.com/video/' in url
if is_pro:
# some videos require portfolio_id to be present in player url
# https://github.com/ytdl-org/youtube-dl/issues/20070
url = self._extract_url(url, self._download_webpage(url, video_id))
if not url:
url = 'https://vimeo.com/' + video_id
elif is_player:
url = 'https://player.vimeo.com/video/' + video_id
elif any(p in url for p in ('play_redirect_hls', 'moogaloop.swf')):
url = 'https://vimeo.com/' + video_id
try:
# Retrieve video webpage to extract further information
webpage, urlh = self._download_webpage_handle(
url, video_id, headers=headers)
redirect_url = compat_str(urlh.geturl())
except ExtractorError as ee:
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
errmsg = ee.cause.read()
if b'Because of its privacy settings, this video cannot be played here' in errmsg:
raise ExtractorError(
'Cannot download embed-only video without embedding '
'URL. Please call youtube-dl with the URL of the page '
'that embeds this video.',
expected=True)
raise
# Now we begin extracting as much information as we can from what we
# retrieved. First we extract the information common to all extractors,
# and latter we extract those that are Vimeo specific.
self.report_extraction(video_id)
vimeo_config = self._extract_vimeo_config(webpage, video_id, default=None)
if vimeo_config:
seed_status = vimeo_config.get('seed_status', {})
if seed_status.get('state') == 'failed':
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, seed_status['title']),
expected=True)
cc_license = None
timestamp = None
video_description = None
# Extract the config JSON
try:
try:
config_url = self._html_search_regex(
r' data-config-url="(.+?)"', webpage,
'config URL', default=None)
if not config_url:
# Sometimes new react-based page is served instead of old one that require
# different config URL extraction approach (see
# https://github.com/ytdl-org/youtube-dl/pull/7209)
page_config = self._parse_json(self._search_regex(
r'vimeo\.(?:clip|vod_title)_page_config\s*=\s*({.+?});',
webpage, 'page config'), video_id)
config_url = page_config['player']['config_url']
cc_license = page_config.get('cc_license')
timestamp = try_get(
page_config, lambda x: x['clip']['uploaded_on'],
compat_str)
video_description = clean_html(dict_get(
page_config, ('description', 'description_html_escaped')))
config = self._download_json(config_url, video_id)
except RegexNotFoundError:
# For pro videos or player.vimeo.com urls
# We try to find out to which variable is assigned the config dic
m_variable_name = re.search(r'(\w)\.video\.id', webpage)
if m_variable_name is not None:
config_re = [r'%s=({[^}].+?});' % re.escape(m_variable_name.group(1))]
else:
config_re = [r' = {config:({.+?}),assets:', r'(?:[abc])=({.+?});']
config_re.append(r'\bvar\s+r\s*=\s*({.+?})\s*;')
config_re.append(r'\bconfig\s*=\s*({.+?})\s*;')
config = self._search_regex(config_re, webpage, 'info section',
flags=re.DOTALL)
config = json.loads(config)
except Exception as e:
if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage):
raise ExtractorError('The author has restricted the access to this video, try with the "--referer" option')
if re.search(r'<form[^>]+?id="pw_form"', webpage) is not None:
if '_video_password_verified' in data:
raise ExtractorError('video password verification failed!')
self._verify_video_password(redirect_url, video_id, webpage)
return self._real_extract(
smuggle_url(redirect_url, {'_video_password_verified': 'verified'}))
else:
raise ExtractorError('Unable to extract info section',
cause=e)
else:
if config.get('view') == 4:
config = self._verify_player_video_password(redirect_url, video_id, headers)
vod = config.get('video', {}).get('vod', {})
def is_rented():
if '>You rented this title.<' in webpage:
return True
if config.get('user', {}).get('purchased'):
return True
for purchase_option in vod.get('purchase_options', []):
if purchase_option.get('purchased'):
return True
label = purchase_option.get('label_string')
if label and (label.startswith('You rented this') or label.endswith(' remaining')):
return True
return False
if is_rented() and vod.get('is_trailer'):
feature_id = vod.get('feature_id')
if feature_id and not data.get('force_feature_id', False):
return self.url_result(smuggle_url(
'https://player.vimeo.com/player/%s' % feature_id,
{'force_feature_id': True}), 'Vimeo')
# Extract video description
if not video_description:
video_description = self._html_search_regex(
r'(?s)<div\s+class="[^"]*description[^"]*"[^>]*>(.*?)</div>',
webpage, 'description', default=None)
if not video_description:
video_description = self._html_search_meta(
'description', webpage, default=None)
if not video_description and is_pro:
orig_webpage = self._download_webpage(
orig_url, video_id,
note='Downloading webpage for description',
fatal=False)
if orig_webpage:
video_description = self._html_search_meta(
'description', orig_webpage, default=None)
if not video_description and not is_player:
self._downloader.report_warning('Cannot find video description')
# Extract upload date
if not timestamp:
timestamp = self._search_regex(
r'<time[^>]+datetime="([^"]+)"', webpage,
'timestamp', default=None)
try:
view_count = int(self._search_regex(r'UserPlays:(\d+)', webpage, 'view count'))
like_count = int(self._search_regex(r'UserLikes:(\d+)', webpage, 'like count'))
comment_count = int(self._search_regex(r'UserComments:(\d+)', webpage, 'comment count'))
except RegexNotFoundError:
# This info is only available in vimeo.com/{id} urls
view_count = None
like_count = None
comment_count = None
formats = []
source_format = self._extract_original_format(
'https://vimeo.com/' + video_id, video_id)
if source_format:
formats.append(source_format)
info_dict_config = self._parse_config(config, video_id)
formats.extend(info_dict_config['formats'])
self._vimeo_sort_formats(formats)
json_ld = self._search_json_ld(webpage, video_id, default={})
if not cc_license:
cc_license = self._search_regex(
r'<link[^>]+rel=["\']license["\'][^>]+href=(["\'])(?P<license>(?:(?!\1).)+)\1',
webpage, 'license', default=None, group='license')
channel_url = 'https://vimeo.com/channels/%s' % channel_id if channel_id else None
info_dict = {
'formats': formats,
'timestamp': unified_timestamp(timestamp),
'description': video_description,
'webpage_url': url,
'view_count': view_count,
'like_count': like_count,
'comment_count': comment_count,
'license': cc_license,
'channel_id': channel_id,
'channel_url': channel_url,
}
info_dict = merge_dicts(info_dict, info_dict_config, json_ld)
return info_dict
class VimeoOndemandIE(VimeoIE):
IE_NAME = 'vimeo:ondemand'
_VALID_URL = r'https?://(?:www\.)?vimeo\.com/ondemand/([^/]+/)?(?P<id>[^/?#&]+)'
_TESTS = [{
# ondemand video not available via https://vimeo.com/id
'url': 'https://vimeo.com/ondemand/20704',
'md5': 'c424deda8c7f73c1dfb3edd7630e2f35',
'info_dict': {
'id': '105442900',
'ext': 'mp4',
'title': 'המעבדה - במאי יותם פלדמן',
'uploader': 'גם סרטים',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/gumfilms',
'uploader_id': 'gumfilms',
'description': 'md5:4c027c965e439de4baab621e48b60791',
'upload_date': '20140906',
'timestamp': 1410032453,
},
'params': {
'format': 'best[protocol=https]',
},
'expected_warnings': ['Unable to download JSON metadata'],
}, {
# requires Referer to be passed along with og:video:url
'url': 'https://vimeo.com/ondemand/36938/126682985',
'info_dict': {
'id': '126584684',
'ext': 'mp4',
'title': 'Rävlock, rätt läte på rätt plats',
'uploader': 'Lindroth & Norin',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/lindrothnorin',
'uploader_id': 'lindrothnorin',
'description': 'md5:c3c46a90529612c8279fb6af803fc0df',
'upload_date': '20150502',
'timestamp': 1430586422,
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Unable to download JSON metadata'],
}, {
'url': 'https://vimeo.com/ondemand/nazmaalik',
'only_matching': True,
}, {
'url': 'https://vimeo.com/ondemand/141692381',
'only_matching': True,
}, {
'url': 'https://vimeo.com/ondemand/thelastcolony/150274832',
'only_matching': True,
}]
class VimeoChannelIE(VimeoBaseInfoExtractor):
IE_NAME = 'vimeo:channel'
_VALID_URL = r'https://vimeo\.com/channels/(?P<id>[^/?#]+)/?(?:$|[?#])'
_MORE_PAGES_INDICATOR = r'<a.+?rel="next"'
_TITLE = None
_TITLE_RE = r'<link rel="alternate"[^>]+?title="(.*?)"'
_TESTS = [{
'url': 'https://vimeo.com/channels/tributes',
'info_dict': {
'id': 'tributes',
'title': 'Vimeo Tributes',
},
'playlist_mincount': 25,
}]
_BASE_URL_TEMPL = 'https://vimeo.com/channels/%s'
def _page_url(self, base_url, pagenum):
return '%s/videos/page:%d/' % (base_url, pagenum)
def _extract_list_title(self, webpage):
return self._TITLE or self._html_search_regex(
self._TITLE_RE, webpage, 'list title', fatal=False)
def _login_list_password(self, page_url, list_id, webpage):
login_form = self._search_regex(
r'(?s)<form[^>]+?id="pw_form"(.*?)</form>',
webpage, 'login form', default=None)
if not login_form:
return webpage
password = self._downloader.params.get('videopassword')
if password is None:
raise ExtractorError('This album is protected by a password, use the --video-password option', expected=True)
fields = self._hidden_inputs(login_form)
token, vuid = self._extract_xsrft_and_vuid(webpage)
fields['token'] = token
fields['password'] = password
post = urlencode_postdata(fields)
password_path = self._search_regex(
r'action="([^"]+)"', login_form, 'password URL')
password_url = compat_urlparse.urljoin(page_url, password_path)
password_request = sanitized_Request(password_url, post)
password_request.add_header('Content-type', 'application/x-www-form-urlencoded')
self._set_vimeo_cookie('vuid', vuid)
self._set_vimeo_cookie('xsrft', token)
return self._download_webpage(
password_request, list_id,
'Verifying the password', 'Wrong password')
def _title_and_entries(self, list_id, base_url):
for pagenum in itertools.count(1):
page_url = self._page_url(base_url, pagenum)
webpage = self._download_webpage(
page_url, list_id,
'Downloading page %s' % pagenum)
if pagenum == 1:
webpage = self._login_list_password(page_url, list_id, webpage)
yield self._extract_list_title(webpage)
# Try extracting href first since not all videos are available via
# short https://vimeo.com/id URL (e.g. https://vimeo.com/channels/tributes/6213729)
clips = re.findall(
r'id="clip_(\d+)"[^>]*>\s*<a[^>]+href="(/(?:[^/]+/)*\1)(?:[^>]+\btitle="([^"]+)")?', webpage)
if clips:
for video_id, video_url, video_title in clips:
yield self.url_result(
compat_urlparse.urljoin(base_url, video_url),
VimeoIE.ie_key(), video_id=video_id, video_title=video_title)
# More relaxed fallback
else:
for video_id in re.findall(r'id=["\']clip_(\d+)', webpage):
yield self.url_result(
'https://vimeo.com/%s' % video_id,
VimeoIE.ie_key(), video_id=video_id)
if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None:
break
def _extract_videos(self, list_id, base_url):
title_and_entries = self._title_and_entries(list_id, base_url)
list_title = next(title_and_entries)
return self.playlist_result(title_and_entries, list_id, list_title)
def _real_extract(self, url):
channel_id = self._match_id(url)
return self._extract_videos(channel_id, self._BASE_URL_TEMPL % channel_id)
class VimeoUserIE(VimeoChannelIE):
IE_NAME = 'vimeo:user'
_VALID_URL = r'https://vimeo\.com/(?!(?:[0-9]+|watchlater)(?:$|[?#/]))(?P<id>[^/]+)(?:/videos|[#?]|$)'
_TITLE_RE = r'<a[^>]+?class="user">([^<>]+?)</a>'
_TESTS = [{
'url': 'https://vimeo.com/nkistudio/videos',
'info_dict': {
'title': 'Nki',
'id': 'nkistudio',
},
'playlist_mincount': 66,
}]
_BASE_URL_TEMPL = 'https://vimeo.com/%s'
class VimeoAlbumIE(VimeoChannelIE):
IE_NAME = 'vimeo:album'
_VALID_URL = r'https://vimeo\.com/(?:album|showcase)/(?P<id>\d+)(?:$|[?#]|/(?!video))'
_TITLE_RE = r'<header id="page_header">\n\s*<h1>(.*?)</h1>'
_TESTS = [{
'url': 'https://vimeo.com/album/2632481',
'info_dict': {
'id': '2632481',
'title': 'Staff Favorites: November 2013',
},
'playlist_mincount': 13,
}, {
'note': 'Password-protected album',
'url': 'https://vimeo.com/album/3253534',
'info_dict': {
'title': 'test',
'id': '3253534',
},
'playlist_count': 1,
'params': {
'videopassword': 'youtube-dl',
}
}]
_PAGE_SIZE = 100
def _fetch_page(self, album_id, authorizaion, hashed_pass, page):
api_page = page + 1
query = {
'fields': 'link,uri',
'page': api_page,
'per_page': self._PAGE_SIZE,
}
if hashed_pass:
query['_hashed_pass'] = hashed_pass
videos = self._download_json(
'https://api.vimeo.com/albums/%s/videos' % album_id,
album_id, 'Downloading page %d' % api_page, query=query, headers={
'Authorization': 'jwt ' + authorizaion,
})['data']
for video in videos:
link = video.get('link')
if not link:
continue
uri = video.get('uri')
video_id = self._search_regex(r'/videos/(\d+)', uri, 'video_id', default=None) if uri else None
yield self.url_result(link, VimeoIE.ie_key(), video_id)
def _real_extract(self, url):
album_id = self._match_id(url)
webpage = self._download_webpage(url, album_id)
webpage = self._login_list_password(url, album_id, webpage)
api_config = self._extract_vimeo_config(webpage, album_id)['api']
entries = OnDemandPagedList(functools.partial(
self._fetch_page, album_id, api_config['jwt'],
api_config.get('hashed_pass')), self._PAGE_SIZE)
return self.playlist_result(entries, album_id, self._html_search_regex(
r'<title>\s*(.+?)(?:\s+on Vimeo)?</title>', webpage, 'title', fatal=False))
class VimeoGroupsIE(VimeoChannelIE):
IE_NAME = 'vimeo:group'
_VALID_URL = r'https://vimeo\.com/groups/(?P<id>[^/]+)(?:/(?!videos?/\d+)|$)'
_TESTS = [{
'url': 'https://vimeo.com/groups/kattykay',
'info_dict': {
'id': 'kattykay',
'title': 'Katty Kay',
},
'playlist_mincount': 27,
}]
_BASE_URL_TEMPL = 'https://vimeo.com/groups/%s'
class VimeoReviewIE(VimeoBaseInfoExtractor):
IE_NAME = 'vimeo:review'
IE_DESC = 'Review pages on vimeo'
_VALID_URL = r'(?P<url>https://vimeo\.com/[^/]+/review/(?P<id>[^/]+)/[0-9a-f]{10})'
_TESTS = [{
'url': 'https://vimeo.com/user21297594/review/75524534/3c257a1b5d',
'md5': 'c507a72f780cacc12b2248bb4006d253',
'info_dict': {
'id': '75524534',
'ext': 'mp4',
'title': "DICK HARDWICK 'Comedian'",
'uploader': 'Richard Hardwick',
'uploader_id': 'user21297594',
'description': "Comedian Dick Hardwick's five minute demo filmed in front of a live theater audience.\nEdit by Doug Mattocks",
},
'expected_warnings': ['Unable to download JSON metadata'],
}, {
'note': 'video player needs Referer',
'url': 'https://vimeo.com/user22258446/review/91613211/13f927e053',
'md5': '6295fdab8f4bf6a002d058b2c6dce276',
'info_dict': {
'id': '91613211',
'ext': 'mp4',
'title': 're:(?i)^Death by dogma versus assembling agile . Sander Hoogendoorn',
'uploader': 'DevWeek Events',
'duration': 2773,
'thumbnail': r're:^https?://.*\.jpg$',
'uploader_id': 'user22258446',
},
'skip': 'video gone',
}, {
'note': 'Password protected',
'url': 'https://vimeo.com/user37284429/review/138823582/c4d865efde',
'info_dict': {
'id': '138823582',
'ext': 'mp4',
'title': 'EFFICIENT PICKUP MASTERCLASS MODULE 1',
'uploader': 'TMB',
'uploader_id': 'user37284429',
},
'params': {
'videopassword': 'holygrail',
},
'skip': 'video gone',
}]
def _real_initialize(self):
self._login()
def _real_extract(self, url):
page_url, video_id = re.match(self._VALID_URL, url).groups()
clip_data = self._download_json(
page_url.replace('/review/', '/review/data/'),
video_id)['clipData']
config_url = clip_data['configUrl']
config = self._download_json(config_url, video_id)
info_dict = self._parse_config(config, video_id)
source_format = self._extract_original_format(
page_url + '/action', video_id)
if source_format:
info_dict['formats'].append(source_format)
self._vimeo_sort_formats(info_dict['formats'])
info_dict['description'] = clean_html(clip_data.get('description'))
return info_dict
class VimeoWatchLaterIE(VimeoChannelIE):
IE_NAME = 'vimeo:watchlater'
IE_DESC = 'Vimeo watch later list, "vimeowatchlater" keyword (requires authentication)'
_VALID_URL = r'https://vimeo\.com/(?:home/)?watchlater|:vimeowatchlater'
_TITLE = 'Watch Later'
_LOGIN_REQUIRED = True
_TESTS = [{
'url': 'https://vimeo.com/watchlater',
'only_matching': True,
}]
def _real_initialize(self):
self._login()
def _page_url(self, base_url, pagenum):
url = '%s/page:%d/' % (base_url, pagenum)
request = sanitized_Request(url)
# Set the header to get a partial html page with the ids,
# the normal page doesn't contain them.
request.add_header('X-Requested-With', 'XMLHttpRequest')
return request
def _real_extract(self, url):
return self._extract_videos('watchlater', 'https://vimeo.com/watchlater')
class VimeoLikesIE(VimeoChannelIE):
_VALID_URL = r'https://(?:www\.)?vimeo\.com/(?P<id>[^/]+)/likes/?(?:$|[?#]|sort:)'
IE_NAME = 'vimeo:likes'
IE_DESC = 'Vimeo user likes'
_TESTS = [{
'url': 'https://vimeo.com/user755559/likes/',
'playlist_mincount': 293,
'info_dict': {
'id': 'user755559',
'title': 'urza’s Likes',
},
}, {
'url': 'https://vimeo.com/stormlapse/likes',
'only_matching': True,
}]
def _page_url(self, base_url, pagenum):
return '%s/page:%d/' % (base_url, pagenum)
def _real_extract(self, url):
user_id = self._match_id(url)
return self._extract_videos(user_id, 'https://vimeo.com/%s/likes' % user_id)
class VHXEmbedIE(VimeoBaseInfoExtractor):
IE_NAME = 'vhx:embed'
_VALID_URL = r'https?://embed\.vhx\.tv/videos/(?P<id>\d+)'
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
config_url = self._parse_json(self._search_regex(
r'window\.OTTData\s*=\s*({.+})', webpage,
'ott data'), video_id, js_to_json)['config_url']
config = self._download_json(config_url, video_id)
info = self._parse_config(config, video_id)
self._vimeo_sort_formats(info['formats'])
return info
| gpl-3.0 | -5,558,823,295,967,461,000 | 40.264184 | 138 | 0.508637 | false |
IanDoarn/zbsmsa | tests/loans.py | 1 | 15622 | """
loans.py
Final working version of the mutation automation
Mutates items from special excel file from inventory type
CI - Centralized Inventory to type
ZDI - Zimmer Distributor Inventory
Written by: Ian Doarn
Maintained by: Ian Doarn
"""
from zbsmsa.site import Site
from zbsmsa.inventory.stock import Stock, ProductChooser
from zbsmsa.utils.exceptions import InvalidRange, ItemAddError
import xlrd
import ctypes
import sys
import time
import logging
import os
from datetime import datetime
# TODO: Comment this file
__author__ = "Ian Doarn"
__maintainer__ = "Ian Doarn"
__current_date__ = '{:%m-%d-%Y}'.format(datetime.now())
PATH = os.path.dirname(os.path.realpath(__file__))
LOG_FILE_NAME = 'mutation_loans_{}.log'.format(__current_date__)
LOG_FILE_PATH = os.path.join(PATH, LOG_FILE_NAME)
MB_OK = 0x0
MB_HELP = 0x4000
ICON_EXLAIM = 0x30
ICON_INFO = 0x40
ICON_STOP = 0x10
# if os.path.isfile(os.path.join(PATH, LOG_FILE_NAME)):
# os.remove(os.path.join(PATH, 'LOG_FILE_NAME))
# elif os.path.isfile(LOG_FILE_PATH):
# os.remove(LOG_FILE_PATH)
# else:
if not os.path.isfile(LOG_FILE_PATH):
with open(LOG_FILE_PATH, 'w')as l_file:
l_file.close()
logger = logging.getLogger()
handler = logging.FileHandler(LOG_FILE_PATH)
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
def message_box(title, text, style, log=True):
if log:
logger.info("[{}]: {}".format(title, text))
ctypes.windll.user32.MessageBoxW(0, text, title, style)
def load_workbook(file):
workbook = xlrd.open_workbook(file)
sheets = []
for sheet in workbook.sheets():
try:
headers = sheet.row_values(0)
rows = []
for y in range(1, sheet.nrows):
row_dict = dict(zip(headers, sheet.row_values(y)))
if row_dict['serial_number'] == '' and row_dict['lot_number'] == '':
logger.warning("Missing lot and serial at sheet:{} row:{} item:{} lot/serial:{}".format(
sheet.name, str(y + 1), row_dict['product_number'], 'NULL'
))
else:
row_dict['row'] = str(y + 1)
rows.append(row_dict)
sheets.append({'sheet_name': sheet.name,
'data': rows,
'headers': headers,
'total_rows': sheet.nrows})
del rows
del headers
except Exception:
sheets.append({'sheet_name': sheet.name,
'data': None,
'headers': None,
'total_rows': None})
return sheets
def mutate_loans(site, stock, product_chooser, data, note="CI-ZDI Mutation, Loaners Late Debit Policy"):
time.sleep(3)
site.ribbon("InventoryTab")
stock.stock_tab()
for row in data:
current_site = site.get_site_name()
logger.info('logged into [{}]'.format(site.get_site_name()))
current_item_info = "Item: {} Lot: {} Row: {}".format(
row['product_number'],
row['lot_number'],
row['row'])
if row['distributor'] != current_site:
site.change_site(row['distributor'])
logger.info("site changed to [{}]".format(site.get_site_name()))
site.ribbon("InventoryTab")
stock.stock_tab()
stock.stock_tab_product_chooser()
product_chooser.search(row['product_number'])
time.sleep(3)
try:
product_chooser.add(row['product_number'])
except ItemAddError as iae:
message_box('Error',
'Unable to add [{}] to the product chooser. '
'Please add this manually and press ok to continue!'.format(current_item_info),
MB_OK | ICON_INFO)
product_chooser.finish()
stock.stock_tab_search()
time.sleep(3)
stock.iterate_search_table()
try:
for table_row in stock.current_table_data:
if row['lot_number'] != '' and row['serial_number'] == '':
tr_text = table_row['text']
if tr_text.__contains__(row['lot_number']) and tr_text.__contains__('CASE-'+row['case_number']):
stock.select_row(table_row['row'])
try:
if stock.mutate_stock(note, click_ok_on_error=False):
logger.info("SUCCESSFUL MUTATION: [{}]".format(current_item_info))
break
else:
message_box("Mutation",
"Item was not mutated!"
"\nItem:{} Lot:{} Row:{}".format(row['product_number'],
row['lot_number'],
row['row']),
MB_OK | ICON_INFO)
except Exception as m_error:
message_box('Error', "{}. {}\nPlease close any open menus and errors then press ok"
" to continue.".format(m_error, current_item_info), MB_OK | ICON_INFO,
log=False)
logger.error("MutationError:{} {}".format(m_error, current_item_info))
break
break
elif str(int(row['serial_number'])) != '':
tr_text = table_row['text']
if tr_text.__contains__(row['case_number']) and tr_text.__contains__(
"Serial " + row['serial_number']):
stock.select_row(table_row['row'])
try:
if stock.mutate_stock(note, click_ok_on_error=False):
logger.info("SUCCESSFUL MUTATION: [{}]".format(current_item_info))
break
else:
message_box("Mutation", "Item was not mutated! {}".format(current_item_info),
MB_OK | ICON_INFO)
except Exception as m_error:
message_box('Error', "{}. {}\nPlease close any open menus and errors then press ok"
" to continue.".format(m_error, current_item_info), MB_OK | ICON_INFO,
log=False)
logger.error("MutationError:{} {}".format(m_error, current_item_info))
break
break
except ValueError as val_error:
message_box('Error', "No Records found. [{}] {}".format(val_error, current_item_info), MB_OK | ICON_INFO)
except InvalidRange as ivr_error:
message_box('Error', ivr_error.message + " {}".format(current_item_info), MB_OK | ICON_INFO)
except Exception as other_error:
message_box('Error', str(other_error) + " {}".format(current_item_info), MB_OK | ICON_INFO)
else:
continue
def mutate_bins(site, stock, product_chooser, data, note="CI-ZDI Mutation, Loaners Late Debit Policy"):
time.sleep(3)
site.ribbon("InventoryTab")
stock.stock_tab()
for row in data:
current_item_info = "Item: {} Lot: {} Row: {}".format(
row['product_number'],
row['lot_number'],
row['row'])
if row['bin'] == 'CI MISSING ZONE-0-0':
message_box('Error', 'Can not mutate item in bin [{}]: {}'.format(row['bin'], current_item_info),
MB_OK | ICON_EXLAIM, log=False)
logger.warning('Can not mutate item in bin [{}]: {}'.format(row['bin'], current_item_info))
else:
current_site = site.get_site_name()
logger.info('logged into [{}]'.format(site.get_site_name()))
if row['name'] != current_site:
site.change_site(row['name'])
logger.info("site changed to [{}]".format(site.get_site_name()))
site.ribbon("InventoryTab")
stock.stock_tab()
stock.stock_tab_product_chooser()
product_chooser.search(row['product_number'])
time.sleep(3)
try:
product_chooser.add(row['product_number'])
except ItemAddError as iae:
message_box('Error',
'Unable to add [{}] to the product chooser. '
'Please add this manually and press ok to continue!'.format(current_item_info),
MB_OK | ICON_INFO)
product_chooser.finish()
stock.stock_tab_search()
time.sleep(3)
stock.iterate_search_table()
try:
for table_row in stock.current_table_data:
if row['lot_number'] != '' and row['serial_number'] == '':
tr_text = table_row['text']
bin_num = ''
if row['bin'] == 'location_bin':
if tr_text.__contains__('Location Bin'):
bin_num = 'Location Bin'
if tr_text.__contains__('Consigned'):
bin_num = 'Consigned'
else:
bin_num = row['bin']
if tr_text.__contains__(row['lot_number']) and tr_text.__contains__(bin_num):
stock.select_row(table_row['row'])
try:
if stock.mutate_stock(note, click_ok_on_error=False):
logger.info("SUCCESSFUL MUTATION: [{}]".format(current_item_info))
break
else:
message_box("Mutation",
"Item was not mutated!"
"\nItem:{} Lot:{} Row:{}".format(row['product_number'],
row['lot_number'],
row['row']),
MB_OK | ICON_INFO)
except Exception as m_error:
message_box('Error', "{}. {}\nPlease close any open menus and errors then press ok"
" to continue.".format(m_error, current_item_info), MB_OK | ICON_INFO,
log=False)
logger.error("MutationError:{} {}".format(m_error, current_item_info))
break
break
elif row['serial_number'] != '':
tr_text = table_row['text']
bin_num = ''
if row['bin'] == 'location_bin':
if tr_text.__contains__('Location Bin'):
bin_num = 'Location Bin'
if tr_text.__contains__('Consigned'):
bin_num = 'Consigned'
else:
bin_num = row['bin']
if tr_text.__contains__(bin_num) and tr_text.__contains__("Serial " + row['serial_number']):
stock.select_row(table_row['row'])
try:
if stock.mutate_stock(note, click_ok_on_error=False):
logger.info("SUCCESSFUL MUTATION: [{}]".format(current_item_info))
break
else:
message_box("Mutation", "Item was not mutated! {}".format(current_item_info),
MB_OK | ICON_INFO)
except Exception as m_error:
message_box('Error', "{}. {}\nPlease close any open menus and errors then press ok"
" to continue.".format(m_error, current_item_info), MB_OK | ICON_INFO,
log=False)
logger.error("MutationError:{} {}".format(m_error, current_item_info))
break
break
except ValueError as val_error:
message_box('Error', "No Records found. [{}] {}".format(val_error, current_item_info),
MB_OK | ICON_INFO)
except InvalidRange as ivr_error:
message_box('Error', ivr_error.message + " {}".format(current_item_info),
MB_OK | ICON_INFO)
except Exception as other_error:
message_box('Error', str(other_error) + " {}".format(current_item_info),
MB_OK | ICON_INFO)
else:
continue
def main(username, password, file, driver):
site = Site(username, password, driver, launch=False)
mutation_data = load_workbook(file)
site.launch(maximize=False)
# Log launch information
logger.debug("USER[{}]".format(username))
logger.debug("DRIVER[{}]".format(driver))
logger.debug("FILE[{}]".format(file))
logger.debug("URI[{}]".format(site.driver_uri))
logger.debug("SESSIONID[{}]".format(site.driver_session_id))
stock = Stock(site)
pc = ProductChooser(stock)
message_box("Mutation", "Please press ok when the site has fully loaded",
MB_OK | ICON_INFO, log=False)
site.login()
for sheet in mutation_data:
if sheet['sheet_name'] == 'Loans Transferred':
mutate_loans(site, stock, pc, sheet['data'])
if sheet['sheet_name'] == 'Bin Transferred':
mutate_bins(site, stock, pc, sheet['data'])
site.close()
if __name__ == '__main__':
usage = "loans.py [username] [password] [driver location] [file location]"
if len(sys.argv[1:]) not in [4, 5]:
print(usage)
else:
_user = sys.argv[1]
_pass = sys.argv[2]
_drive_loc = sys.argv[3]
_file_loc = sys.argv[4]
try:
logger.info("Begin program execution at main()")
main(_user, _pass, _file_loc, _drive_loc)
except KeyboardInterrupt as ki_error:
message_box("Fatal Error", "[FATAL]::Fatal error caused program to fail.\nERROR:{}".format(ki_error),
MB_OK | ICON_STOP, log=False)
logger.critical("[FATAL]:: Fatal error caused program to fail. ERROR:{}".format(ki_error))
except Exception as fatal_error:
message_box("Fatal Error", "[FATAL]::Fatal error caused program to fail.\nERROR:{}".format(fatal_error),
MB_OK | ICON_STOP, log=False)
logger.critical("[FATAL]:: Fatal error caused program to fail. ERROR:{}".format(fatal_error))
else:
pass
| apache-2.0 | 6,084,375,937,478,261,000 | 44.150289 | 117 | 0.475355 | false |
LAIRLAB/qr_trees | src/python/run_ilqr_diffdrive.py | 1 | 2328 | #!/usr/bin/env python
#
# Arun Venkatraman ([email protected])
# December 2016
#
# If we are not running from the build directory, then add lib to path from
# build assuming we are running from the python folder
import os
full_path = os.path.realpath(__file__)
if full_path.count("src/python") > 0:
import sys
to_add = os.path.abspath(os.path.join(os.path.split(full_path)[0], "../../build/"))
sys.path.append(to_add)
from IPython import embed
import lib.ilqr_diffdrive as ilqr
import visualize_circle_world as vis
import numpy as np
import matplotlib.pyplot as plt
if __name__ == "__main__":
obs_prior = [0.5, 0.5]
world_dims = [-30, 30, -30, 30]
w1 = ilqr.CircleWorld(world_dims)
w2 = ilqr.CircleWorld(world_dims)
obs_pos_1 = [-2, 0.0]
obs_pos_2 = [2, 0.0]
obs_radius = 10.0
obstacle_1 = ilqr.Circle(obs_radius, obs_pos_1);
obstacle_2 = ilqr.Circle(obs_radius, obs_pos_2);
# add obstacle to world 1
w1.add_obstacle(obstacle_1);
# add obstacle to world 2
w2.add_obstacle(obstacle_2);
cost, states_true_1, obs_fname_1 = ilqr.control_diffdrive(ilqr.TRUE_ILQR,
w1, w2, obs_prior, "true1", "true1")
cost, states_true_2, obs_fname_2 = ilqr.control_diffdrive(ilqr.TRUE_ILQR,
w2, w1, obs_prior, "true2", "true2")
cost, states_weighted_1, obs_fname_3 =\
ilqr.control_diffdrive(ilqr.PROB_WEIGHTED_CONTROL,
w1, w2, obs_prior, "weight3", "weight3")
cost, states_weighted_2, obs_fname_4 =\
ilqr.control_diffdrive(ilqr.PROB_WEIGHTED_CONTROL,
w2, w1, obs_prior, "weight4", "weight4")
cost, states_hind_1, obs_fname_5 =\
ilqr.control_diffdrive(ilqr.HINDSIGHT,
w1, w2, obs_prior, "hind3", "hind3")
cost, states_hind_2, obs_fname_6 =\
ilqr.control_diffdrive(ilqr.HINDSIGHT,
w2, w1, obs_prior, "hind4", "hind4")
print("Drawing world 1")
ax1 = vis.parse_draw_files([states_true_1, states_weighted_1, states_hind_1], obs_fname_1,
show=False)
plt.title('World 1')
print("Drawing world 2")
ax2 = vis.parse_draw_files([states_true_2, states_weighted_2, states_hind_2],
obs_fname_2, show=False)
plt.title('World 2')
plt.show()
embed()
| bsd-3-clause | 9,018,473,560,120,826,000 | 28.468354 | 94 | 0.614691 | false |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/gtk/gdk/__init__/DisplayManager.py | 1 | 1186 | # encoding: utf-8
# module gtk.gdk
# from /usr/lib/python2.7/dist-packages/gtk-2.0/pynotify/_pynotify.so
# by generator 1.135
# no doc
# imports
from exceptions import Warning
import gio as __gio
import gobject as __gobject
import gobject._gobject as __gobject__gobject
import pango as __pango
import pangocairo as __pangocairo
class DisplayManager(__gobject__gobject.GObject):
"""
Object GdkDisplayManager
Signals from GdkDisplayManager:
display-opened (GdkDisplay)
Properties from GdkDisplayManager:
default-display -> GdkDisplay: Default Display
The default display for GDK
Signals from GObject:
notify (GParam)
"""
@classmethod
def do_display_opened(cls, *args, **kwargs): # real signature unknown
pass
def get_default_display(self, *args, **kwargs): # real signature unknown
pass
def list_displays(self, *args, **kwargs): # real signature unknown
pass
def set_default_display(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__gtype__ = None # (!) real value is ''
| gpl-2.0 | -982,081,804,465,363,600 | 23.204082 | 76 | 0.661889 | false |
mcvidomi/poim2motif | run_svm_real.py | 1 | 1483 | '''
Created on 08.06.2015
@author: marinavidovic
'''
import os
import pdb
import utils_svm
import pickle
import numpy as np
import copy
import genQ
import makePOIM
import view
import matplotlib
matplotlib.use('Agg')
if __name__ == '__main__':
read_data = 1
datapath = "/home/mvidovic/POIMall/data/real/human_acceptor_splice_data.txt"
savepath = "/home/mvidovic/POIMall/data/real/human_acceptor_splice_data0.pkl"
lines=1000
if read_data:
x,y=utils_svm.extractRealData(datapath,savepath,lines)
else:
fobj=open(savepath,'rb')
x,y=pickle.load(fobj)
fobj.close()
num_pos = 100
num_neg = 4*num_pos
print "reduce samples"
x_red,y_red = utils_svm.reduce_samples(copy.deepcopy(x),copy.deepcopy(y),num_pos,num_neg)
nploci_letters,nploci_positions = utils_svm.non_polymorphic_loci(x_red)
#read data
experiment_name = "real1"
if not os.path.exists(experiment_name):
os.makedirs(experiment_name)
poimpath=experiment_name+"/poim.pkl"
tally=30
positives=25
sequenceno=100
mutation_prob=0.0
motif="ATTTT"
mu=13
x,y = makePOIM.gensequences(tally,positives,sequenceno,mutation_prob,motif,mu)
#compute POIM
poim_degree = 6
kernel_degree = 8
print "start poim computation"
poims = makePOIM.computePOIM(x,y,poim_degree,kernel_degree,poimpath)
Q2 = poims[0][1]
#view.test()
view.figurepoimsimple(Q2, "poim_pic", 0)
| mit | -5,112,159,067,973,785,000 | 25.017544 | 93 | 0.665543 | false |
SevereOverfl0w/MCDirectory | migrations/versions/10723b632a87_.py | 1 | 1036 | """empty message
Revision ID: 10723b632a87
Revises: 3d7ce850941c
Create Date: 2013-11-12 22:18:26.482191
"""
# revision identifiers, used by Alembic.
revision = '10723b632a87'
down_revision = '3d7ce850941c'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('comment',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('commenter_id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('time', sa.DateTime(), nullable=False),
sa.Column('comment', sa.Text(), nullable=False),
sa.Column('stars', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['commenter_id'], ['user.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('comment')
### end Alembic commands ###
| mit | 5,601,540,713,512,676,000 | 27.777778 | 63 | 0.664093 | false |
klmitch/python-keystoneclient | keystoneclient/tests/unit/v3/test_users.py | 2 | 11616 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import uuid
from keystoneclient import exceptions
from keystoneclient.tests.unit.v3 import utils
from keystoneclient.v3 import users
class UserTests(utils.ClientTestCase, utils.CrudTests):
def setUp(self):
super(UserTests, self).setUp()
self.key = 'user'
self.collection_key = 'users'
self.model = users.User
self.manager = self.client.users
def new_ref(self, **kwargs):
kwargs = super(UserTests, self).new_ref(**kwargs)
kwargs.setdefault('description', uuid.uuid4().hex)
kwargs.setdefault('domain_id', uuid.uuid4().hex)
kwargs.setdefault('enabled', True)
kwargs.setdefault('name', uuid.uuid4().hex)
kwargs.setdefault('default_project_id', uuid.uuid4().hex)
return kwargs
def test_add_user_to_group(self):
group_id = uuid.uuid4().hex
ref = self.new_ref()
self.stub_url('PUT',
['groups', group_id, self.collection_key, ref['id']],
status_code=204)
self.manager.add_to_group(user=ref['id'], group=group_id)
self.assertRaises(exceptions.ValidationError,
self.manager.remove_from_group,
user=ref['id'],
group=None)
def test_list_users_in_group(self):
group_id = uuid.uuid4().hex
ref_list = [self.new_ref(), self.new_ref()]
self.stub_entity('GET',
['groups', group_id, self.collection_key],
entity=ref_list)
returned_list = self.manager.list(group=group_id)
self.assertEqual(len(ref_list), len(returned_list))
[self.assertIsInstance(r, self.model) for r in returned_list]
def test_check_user_in_group(self):
group_id = uuid.uuid4().hex
ref = self.new_ref()
self.stub_url('HEAD',
['groups', group_id, self.collection_key, ref['id']],
status_code=204)
self.manager.check_in_group(user=ref['id'], group=group_id)
self.assertRaises(exceptions.ValidationError,
self.manager.check_in_group,
user=ref['id'],
group=None)
def test_remove_user_from_group(self):
group_id = uuid.uuid4().hex
ref = self.new_ref()
self.stub_url('DELETE',
['groups', group_id, self.collection_key, ref['id']],
status_code=204)
self.manager.remove_from_group(user=ref['id'], group=group_id)
self.assertRaises(exceptions.ValidationError,
self.manager.remove_from_group,
user=ref['id'],
group=None)
def test_create_doesnt_log_password(self):
password = uuid.uuid4().hex
ref = self.new_ref()
self.stub_entity('POST', [self.collection_key],
status_code=201, entity=ref)
req_ref = ref.copy()
req_ref.pop('id')
param_ref = req_ref.copy()
param_ref['password'] = password
params = utils.parameterize(param_ref)
self.manager.create(**params)
self.assertNotIn(password, self.logger.output)
def test_create_with_project(self):
# Can create a user with the deprecated project option rather than
# default_project_id.
self.deprecations.expect_deprecations()
ref = self.new_ref()
self.stub_entity('POST', [self.collection_key],
status_code=201, entity=ref)
req_ref = ref.copy()
req_ref.pop('id')
param_ref = req_ref.copy()
# Use deprecated project_id rather than new default_project_id.
param_ref['project_id'] = param_ref.pop('default_project_id')
params = utils.parameterize(param_ref)
returned = self.manager.create(**params)
self.assertIsInstance(returned, self.model)
for attr in ref:
self.assertEqual(
getattr(returned, attr),
ref[attr],
'Expected different %s' % attr)
self.assertEntityRequestBodyIs(req_ref)
def test_create_with_project_and_default_project(self):
# Can create a user with the deprecated project and default_project_id.
# The backend call should only pass the default_project_id.
self.deprecations.expect_deprecations()
ref = self.new_ref()
self.stub_entity('POST',
[self.collection_key],
status_code=201, entity=ref)
req_ref = ref.copy()
req_ref.pop('id')
param_ref = req_ref.copy()
# Add the deprecated project_id in the call, the value will be ignored.
param_ref['project_id'] = 'project'
params = utils.parameterize(param_ref)
returned = self.manager.create(**params)
self.assertIsInstance(returned, self.model)
for attr in ref:
self.assertEqual(
getattr(returned, attr),
ref[attr],
'Expected different %s' % attr)
self.assertEntityRequestBodyIs(req_ref)
def test_update_doesnt_log_password(self):
password = uuid.uuid4().hex
ref = self.new_ref()
req_ref = ref.copy()
req_ref.pop('id')
param_ref = req_ref.copy()
self.stub_entity('PATCH',
[self.collection_key, ref['id']],
status_code=200, entity=ref)
param_ref['password'] = password
params = utils.parameterize(param_ref)
self.manager.update(ref['id'], **params)
self.assertNotIn(password, self.logger.output)
def test_update_with_project(self):
# Can update a user with the deprecated project option rather than
# default_project_id.
self.deprecations.expect_deprecations()
ref = self.new_ref()
req_ref = ref.copy()
req_ref.pop('id')
param_ref = req_ref.copy()
self.stub_entity('PATCH',
[self.collection_key, ref['id']],
status_code=200, entity=ref)
# Use deprecated project_id rather than new default_project_id.
param_ref['project_id'] = param_ref.pop('default_project_id')
params = utils.parameterize(param_ref)
returned = self.manager.update(ref['id'], **params)
self.assertIsInstance(returned, self.model)
for attr in ref:
self.assertEqual(
getattr(returned, attr),
ref[attr],
'Expected different %s' % attr)
self.assertEntityRequestBodyIs(req_ref)
def test_update_with_project_and_default_project(self, ref=None):
self.deprecations.expect_deprecations()
ref = self.new_ref()
req_ref = ref.copy()
req_ref.pop('id')
param_ref = req_ref.copy()
self.stub_entity('PATCH',
[self.collection_key, ref['id']],
status_code=200, entity=ref)
# Add the deprecated project_id in the call, the value will be ignored.
param_ref['project_id'] = 'project'
params = utils.parameterize(param_ref)
returned = self.manager.update(ref['id'], **params)
self.assertIsInstance(returned, self.model)
for attr in ref:
self.assertEqual(
getattr(returned, attr),
ref[attr],
'Expected different %s' % attr)
self.assertEntityRequestBodyIs(req_ref)
def test_update_password(self):
old_password = uuid.uuid4().hex
new_password = uuid.uuid4().hex
self.stub_url('POST',
[self.collection_key, self.TEST_USER_ID, 'password'])
self.client.user_id = self.TEST_USER_ID
self.manager.update_password(old_password, new_password)
exp_req_body = {
'user': {
'password': new_password, 'original_password': old_password
}
}
self.assertEqual(
'%s/users/%s/password' % (self.TEST_URL, self.TEST_USER_ID),
self.requests_mock.last_request.url)
self.assertRequestBodyIs(json=exp_req_body)
self.assertNotIn(old_password, self.logger.output)
self.assertNotIn(new_password, self.logger.output)
def test_update_password_with_no_hardcoded_endpoint_filter(self):
# test to ensure the 'endpoint_filter' parameter is not being
# passed from the manager. Endpoint filtering should be done at
# the Session, not the individual managers.
old_password = uuid.uuid4().hex
new_password = uuid.uuid4().hex
expected_params = {'user': {'password': new_password,
'original_password': old_password}}
user_password_update_path = '/users/%s/password' % self.TEST_USER_ID
self.client.user_id = self.TEST_USER_ID
# NOTE(gyee): user manager subclass keystoneclient.base.Manager
# and utilize the _update() method in the base class to interface
# with the client session to perform the update. In the case, we
# just need to make sure the 'endpoint_filter' parameter is not
# there.
with mock.patch('keystoneclient.base.Manager._update') as m:
self.manager.update_password(old_password, new_password)
m.assert_called_with(user_password_update_path, expected_params,
method='POST', log=False)
def test_update_password_with_bad_inputs(self):
old_password = uuid.uuid4().hex
new_password = uuid.uuid4().hex
# users can't unset their password
self.assertRaises(exceptions.ValidationError,
self.manager.update_password,
old_password, None)
self.assertRaises(exceptions.ValidationError,
self.manager.update_password,
old_password, '')
# users can't start with empty passwords
self.assertRaises(exceptions.ValidationError,
self.manager.update_password,
None, new_password)
self.assertRaises(exceptions.ValidationError,
self.manager.update_password,
'', new_password)
# this wouldn't result in any change anyway
self.assertRaises(exceptions.ValidationError,
self.manager.update_password,
None, None)
self.assertRaises(exceptions.ValidationError,
self.manager.update_password,
'', '')
password = uuid.uuid4().hex
self.assertRaises(exceptions.ValidationError,
self.manager.update_password,
password, password)
| apache-2.0 | -8,940,769,469,299,318,000 | 36.592233 | 79 | 0.577135 | false |
oculusstorystudio/kraken | Python/kraken/ui/GraphView/pyflowgraph/node.py | 1 | 13604 |
#
# Copyright 2015-2017 Eric Thivierge
#
import math
import json
from kraken.ui.Qt import QtWidgets, QtGui, QtCore
from port import InputPort, OutputPort, IOPort
class NodeTitle(QtWidgets.QGraphicsWidget):
__color = QtGui.QColor(25, 25, 25)
__font = QtGui.QFont('Roboto', 14)
__font.setLetterSpacing(QtGui.QFont.PercentageSpacing, 115)
__labelBottomSpacing = 12
def __init__(self, text, parent=None):
super(NodeTitle, self).__init__(parent)
self.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed))
self.__textItem = QtWidgets.QGraphicsTextItem(text, self)
self.__textItem.setDefaultTextColor(self.__color)
self.__textItem.setFont(self.__font)
self.__textItem.setPos(0, -2)
option = self.__textItem.document().defaultTextOption()
option.setWrapMode(QtGui.QTextOption.NoWrap)
self.__textItem.document().setDefaultTextOption(option)
self.__textItem.adjustSize()
self.setPreferredSize(self.textSize())
def setText(self, text):
self.__textItem.setPlainText(text)
self.__textItem.adjustSize()
self.setPreferredSize(self.textSize())
def textSize(self):
return QtCore.QSizeF(
self.__textItem.textWidth(),
self.__font.pointSizeF() + self.__labelBottomSpacing
)
# def paint(self, painter, option, widget):
# super(NodeTitle, self).paint(painter, option, widget)
# painter.setPen(QtGui.QPen(QtGui.QColor(0, 255, 0)))
# painter.drawRect(self.windowFrameRect())
class NodeHeader(QtWidgets.QGraphicsWidget):
def __init__(self, text, parent=None):
super(NodeHeader, self).__init__(parent)
self.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding))
layout = QtWidgets.QGraphicsLinearLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(3)
layout.setOrientation(QtCore.Qt.Horizontal)
self.setLayout(layout)
self._titleWidget = NodeTitle(text, self)
layout.addItem(self._titleWidget)
layout.setAlignment(self._titleWidget, QtCore.Qt.AlignCenter | QtCore.Qt.AlignTop)
def setText(self, text):
self._titleWidget.setText(text)
# def paint(self, painter, option, widget):
# super(NodeHeader, self).paint(painter, option, widget)
# painter.setPen(QtGui.QPen(QtGui.QColor(0, 255, 100)))
# painter.drawRect(self.windowFrameRect())
class PortList(QtWidgets.QGraphicsWidget):
def __init__(self, parent):
super(PortList, self).__init__(parent)
layout = QtWidgets.QGraphicsLinearLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(4)
layout.setOrientation(QtCore.Qt.Vertical)
self.setLayout(layout)
def addPort(self, port, alignment):
layout = self.layout()
layout.addItem(port)
layout.setAlignment(port, alignment)
self.adjustSize()
return port
# def paint(self, painter, option, widget):
# super(PortList, self).paint(painter, option, widget)
# painter.setPen(QtGui.QPen(QtGui.QColor(255, 255, 0)))
# painter.drawRect(self.windowFrameRect())
class Node(QtWidgets.QGraphicsWidget):
nameChanged = QtCore.Signal(str, str)
__defaultColor = QtGui.QColor(154, 205, 50, 255)
__defaultUnselectedColor = QtGui.QColor(25, 25, 25)
__defaultSelectedColor = QtGui.QColor(255, 255, 255, 255)
__defaultUnselectedPen = QtGui.QPen(__defaultUnselectedColor, 1.6)
__defaultSelectedPen = QtGui.QPen(__defaultSelectedColor, 1.6)
__defaultLinePen = QtGui.QPen(QtGui.QColor(25, 25, 25, 255), 1.25)
def __init__(self, graph, name):
super(Node, self).__init__()
self.__name = name
self.__graph = graph
self.__color = self.__defaultColor
self.__unselectedColor = self.__defaultUnselectedColor
self.__selectedColor = self.__defaultSelectedColor
self.__unselectedPen = QtGui.QPen(self.__defaultUnselectedPen)
self.__selectedPen = QtGui.QPen(self.__defaultSelectedPen)
self.__linePen = QtGui.QPen(self.__defaultLinePen)
self.setMinimumWidth(60)
self.setMinimumHeight(20)
self.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding))
layout = QtWidgets.QGraphicsLinearLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
layout.setOrientation(QtCore.Qt.Vertical)
self.setLayout(layout)
self.__headerItem = NodeHeader(self.__name, self)
layout.addItem(self.__headerItem)
layout.setAlignment(self.__headerItem, QtCore.Qt.AlignCenter | QtCore.Qt.AlignTop)
self.__ports = []
self.__ioPortsHolder = PortList(self)
self.__inputPortsHolder = PortList(self)
self.__outputPortsHolder = PortList(self)
self.__outputPortsHolder.layout().setContentsMargins(0, 0, 0, 10)
layout.addItem(self.__ioPortsHolder)
layout.addItem(self.__inputPortsHolder)
layout.addItem(self.__outputPortsHolder)
self.__selected = False
self.__dragging = False
# =====
# Name
# =====
def getName(self):
return self.__name
def setName(self, name):
if name != self.__name:
origName = self.__name
self.__name = name
self.__headerItem.setText(self.__name)
# Emit an event, so that the graph can update itsself.
self.nameChanged.emit(origName, name)
# Update the node so that the size is computed.
self.adjustSize()
# =======
# Colors
# =======
def getColor(self):
return self.__color
def setColor(self, color):
self.__color = color
self.update()
def getUnselectedColor(self):
return self.__unselectedColor
def setUnselectedColor(self, color):
self.__unselectedColor = color
self.__unselectedPen.setColor(self.__unselectedColor)
self.update()
def getSelectedColor(self):
return self.__selectedColor
def setSelectedColor(self, color):
self.__selectedColor = color
self.__selectedPen.setColor(self.__selectedColor)
self.update()
# =============
# Misc Methods
# =============
def getGraph(self):
return self.__graph
def getHeader(self):
return self.__headerItem
# ==========
# Selection
# ==========
def isSelected(self):
return self.__selected
def setSelected(self, selected=True):
self.__selected = selected
self.setZValue(20.0)
self.update()
#########################
## Graph Pos
def getGraphPos(self):
transform = self.transform()
size = self.size()
return QtCore.QPointF(transform.dx()+(size.width()*0.5), transform.dy()+(size.height()*0.5))
def setGraphPos(self, graphPos):
self.prepareConnectionGeometryChange()
size = self.size()
self.setTransform(QtGui.QTransform.fromTranslate(graphPos.x(), graphPos.y()), False)
def translate(self, x, y):
self.prepareConnectionGeometryChange()
super(Node, self).moveBy(x, y)
# Prior to moving the node, we need to tell the connections to prepare for a geometry change.
# This method must be called preior to moving a node.
def prepareConnectionGeometryChange(self):
for port in self.__ports:
if port.inCircle():
for connection in port.inCircle().getConnections():
connection.prepareGeometryChange()
if port.outCircle():
for connection in port.outCircle().getConnections():
connection.prepareGeometryChange()
#########################
## Ports
def addPort(self, port):
if isinstance(port, InputPort):
self.__inputPortsHolder.addPort(port, QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
elif isinstance(port, OutputPort):
self.__outputPortsHolder.addPort(port, QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
else:
self.__ioPortsHolder.addPort(port, QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.__ports.append(port)
self.adjustSize()
return port
def getPort(self, name):
for port in self.__ports:
if port.getName() == name:
return port
return None
def getInputPort(self, name):
for port in self.__ports:
if port.getName() == name and isinstance(port, (InputPort, IOPort)):
return port
return None
def getOutputPort(self, name):
for port in self.__ports:
if port.getName() == name and isinstance(port, (OutputPort, IOPort)):
return port
return None
def paint(self, painter, option, widget):
rect = self.windowFrameRect()
painter.setBrush(self.__color)
painter.setPen(QtGui.QPen(QtGui.QColor(0, 0, 0, 0), 0))
roundingY = 8
roundingX = 8
painter.drawRoundedRect(rect, roundingX, roundingY, mode=QtCore.Qt.AbsoluteSize)
# Title BG
titleHeight = self.__headerItem.size().height() - 3
painter.setBrush(self.__color.darker(125))
roundingY = rect.width() * roundingX / titleHeight
painter.drawRoundedRect(0, 0, rect.width(), titleHeight, roundingX, roundingY, mode=QtCore.Qt.AbsoluteSize)
painter.drawRect(0, titleHeight * 0.5 + 2, rect.width(), titleHeight * 0.5)
painter.setBrush(QtGui.QColor(0, 0, 0, 0))
if self.__selected:
painter.setPen(self.__selectedPen)
else:
painter.setPen(self.__unselectedPen)
roundingY = 8
roundingX = 8
painter.drawRoundedRect(rect, roundingX, roundingY, mode=QtCore.Qt.AbsoluteSize)
#########################
## Events
def mousePressEvent(self, event):
if event.button() is QtCore.Qt.MouseButton.LeftButton:
modifiers = event.modifiers()
if modifiers == QtCore.Qt.ControlModifier:
if not self.isSelected():
self.__graph.selectNode(self, clearSelection=False)
else:
self.__graph.deselectNode(self)
elif modifiers == QtCore.Qt.ShiftModifier:
if not self.isSelected():
self.__graph.selectNode(self, clearSelection=False)
else:
if not self.isSelected():
self.__graph.selectNode(self, clearSelection=True)
# Push all nodes back 1 level in z depth to bring selected
# node to front
for node in [x for x in self.__graph.getNodes().values()]:
if node == self:
continue
if node.zValue() != 0.0:
node.setZValue(node.zValue() - 1)
self.__dragging = True
self._mouseDownPoint = self.mapToScene(event.pos())
self._mouseDelta = self._mouseDownPoint - self.getGraphPos()
self._lastDragPoint = self._mouseDownPoint
self._nodesMoved = False
else:
super(Node, self).mousePressEvent(event)
def mouseMoveEvent(self, event):
if self.__dragging:
newPos = self.mapToScene(event.pos())
graph = self.getGraph()
if graph.getSnapToGrid() is True:
gridSize = graph.getGridSize()
newNodePos = newPos - self._mouseDelta
snapPosX = math.floor(newNodePos.x() / gridSize) * gridSize
snapPosY = math.floor(newNodePos.y() / gridSize) * gridSize
snapPos = QtCore.QPointF(snapPosX, snapPosY)
newPosOffset = snapPos - newNodePos
newPos = newPos + newPosOffset
delta = newPos - self._lastDragPoint
self.__graph.moveSelectedNodes(delta)
self._lastDragPoint = newPos
self._nodesMoved = True
else:
super(Node, self).mouseMoveEvent(event)
def mouseReleaseEvent(self, event):
if self.__dragging:
if self._nodesMoved:
newPos = self.mapToScene(event.pos())
delta = newPos - self._mouseDownPoint
self.__graph.endMoveSelectedNodes(delta)
self.setCursor(QtCore.Qt.ArrowCursor)
self.__dragging = False
else:
super(Node, self).mouseReleaseEvent(event)
#########################
## shut down
def disconnectAllPorts(self):
# gather all the connections into a list, and then remove them from the graph.
# This is because we can't remove connections from ports while
# iterating over the set.
connections = []
for port in self.__ports:
if port.inCircle():
for connection in port.inCircle().getConnections():
connections.append(connection)
if port.outCircle():
for connection in port.outCircle().getConnections():
connections.append(connection)
for connection in connections:
self.__graph.removeConnection(connection) | bsd-3-clause | -1,098,339,499,066,316,300 | 31.625899 | 115 | 0.600485 | false |
odahoda/noisicaa | noisidev/runvmtests.py | 1 | 17693 | #!/usr/bin/env python3
# @begin:license
#
# Copyright (c) 2015-2019, Benjamin Niemann <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @end:license
import asyncio
import argparse
import datetime
import glob
import logging
import os
import os.path
import shutil
import subprocess
import sys
import time
import traceback
import asyncssh
from . import testvm
logger = logging.getLogger(__name__)
ROOT_DIR = os.path.abspath(
os.path.join(os.path.join(os.path.dirname(__file__), '..')))
TEST_SCRIPT = r'''#!/bin/bash
SOURCE="{settings.source}"
BRANCH="{settings.branch}"
set -e
set -x
mkdir -p ~/.pip
cat >~/.pip/pip.conf <<EOF
[global]
index-url = http://10.0.2.2:{settings.devpi_port}/root/pypi/+simple/
trusted-host = 10.0.2.2
EOF
sudo apt-get -q -y install python3 python3-venv
rm -fr noisicaa/
if [ $SOURCE == git ]; then
sudo apt-get -q -y install git
git clone --branch=$BRANCH --single-branch https://github.com/odahoda/noisicaa
elif [ $SOURCE == local ]; then
mkdir noisicaa/
tar -x -z -Cnoisicaa/ -flocal.tar.gz
fi
cd noisicaa/
./waf configure --venvdir=../venv --download --install-system-packages
./waf build
sudo ./waf install
./waf configure --venvdir=../venv --download --install-system-packages --enable-tests
./waf build
./waf test --tags=unit
'''
async def log_dumper(fp_in, out_func, encoding=None):
if encoding is None:
line = ''
lf = '\n'
else:
line = bytearray()
lf = b'\n'
while not fp_in.at_eof():
c = await fp_in.read(1)
if c == lf:
if encoding is not None:
line = line.decode(encoding)
out_func(line)
if encoding is None:
line = ''
else:
line = bytearray()
else:
line += c
if line:
if encoding is not None:
line = line.decode(encoding)
out_func(buf)
class TestMixin(testvm.VM):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.__installed_sentinel = os.path.join(self.vm_dir, 'installed')
@property
def is_installed(self):
return os.path.isfile(self.__installed_sentinel)
async def __spinner(self, prefix, result):
start_time = datetime.datetime.now()
spinner = '-\|/'
spinner_idx = 0
while True:
duration = (datetime.datetime.now() - start_time).total_seconds()
minutes = duration // 60
seconds = duration - 60 * minutes
sys.stdout.write('\033[2K\r%s [%02d:%02d] ... ' % (prefix, minutes, seconds))
if not result.empty():
sys.stdout.write(await result.get())
sys.stdout.write('\n')
break
sys.stdout.write('(%s)' % spinner[spinner_idx])
spinner_idx = (spinner_idx + 1) % len(spinner)
sys.stdout.flush()
await asyncio.sleep(0.2, loop=self.event_loop)
async def install(self):
logger.info("Installing VM '%s'...", self.name)
result = asyncio.Queue(loop=self.event_loop)
spinner_task = self.event_loop.create_task(self.__spinner("Installing VM '%s'" % self.name, result))
try:
if os.path.isdir(self.vm_dir):
shutil.rmtree(self.vm_dir)
await super().install()
await self.create_snapshot('clean')
open(self.__installed_sentinel, 'w').close()
except:
logger.error("Installation of VM '%s' failed.", self.name)
result.put_nowait('FAILED')
raise
else:
logger.info("Installed VM '%s'...", self.name)
result.put_nowait('OK')
finally:
await spinner_task
async def run_test(self, settings):
logger.info("Running test '%s'... ", self.name)
result = asyncio.Queue(loop=self.event_loop)
spinner_task = self.event_loop.create_task(self.__spinner("Running test '%s'" % self.name, result))
try:
await self.do_test(settings)
except Exception as exc:
logger.error("Test '%s' failed with an exception:\n%s", self.name, traceback.format_exc())
result.put_nowait('FAILED')
return False
else:
logger.info("Test '%s' completed successfully.")
result.put_nowait('SUCCESS')
return True
finally:
await spinner_task
async def do_test(self, settings):
vm_logger = logging.getLogger(self.name)
logger.info("Waiting for SSH port to open...")
await self.wait_for_ssh()
logger.info("Connecting to VM...")
client = await asyncssh.connect(
host='localhost',
port=5555,
options=asyncssh.SSHClientConnectionOptions(
username='testuser',
password='123',
known_hosts=None),
loop=self.event_loop)
try:
sftp = await client.start_sftp_client()
try:
logger.info("Copy runtest.sh...")
async with sftp.open('runtest.sh', 'w') as fp:
await fp.write(TEST_SCRIPT.format(settings=settings))
await sftp.chmod('runtest.sh', 0o775)
if settings.source == 'local':
logger.info("Copy local.tar.gz...")
proc = subprocess.Popen(
['git', 'config', 'core.quotepath', 'off'],
cwd=ROOT_DIR)
proc.wait()
assert proc.returncode == 0
proc = subprocess.Popen(
['bash', '-c', 'tar -c -z -T<(git ls-tree --full-tree -r --name-only HEAD) -f-'],
cwd=ROOT_DIR,
stdout=subprocess.PIPE)
async with sftp.open('local.tar.gz', 'wb') as fp:
while True:
buf = proc.stdout.read(1 << 20)
if not buf:
break
await fp.write(buf)
proc.wait()
assert proc.returncode == 0
finally:
sftp.exit()
proc = await client.create_process("./runtest.sh", stderr=subprocess.STDOUT)
stdout_dumper = self.event_loop.create_task(log_dumper(proc.stdout, vm_logger.info))
await proc.wait()
await stdout_dumper
assert proc.returncode == 0
finally:
client.close()
class Ubuntu_16_04(TestMixin, testvm.Ubuntu_16_04):
pass
class Ubuntu_18_04(TestMixin, testvm.Ubuntu_18_04):
pass
ALL_VMTESTS = {
'ubuntu-16.04': Ubuntu_16_04,
'ubuntu-18.04': Ubuntu_18_04,
}
VM_BASE_DIR = os.path.abspath(
os.path.join(os.path.join(os.path.dirname(__file__), '..'), 'vmtests'))
class TestSettings(object):
def __init__(self, args):
self.branch = args.branch
self.source = args.source
self.shutdown = args.shutdown
self.devpi_port = args.devpi_port
def bool_arg(value):
if isinstance(value, bool):
return value
if isinstance(value, str):
if value.lower() in ('true', 'y', 'yes', 'on', '1'):
return True
if value.lower() in ('false', 'n', 'no', 'off', '0'):
return False
raise ValueError("Invalid value '%s'." % value)
raise TypeError("Invalid type '%s'." % type(value).__name__)
async def main(event_loop, argv):
argparser = argparse.ArgumentParser()
argparser.add_argument(
'--log-level',
choices=['debug', 'info', 'warning', 'error', 'critical'],
default='critical',
help="Minimum level for log messages written to STDERR.")
argparser.add_argument('--source', type=str, choices=['local', 'git'], default='local')
argparser.add_argument('--branch', type=str, default='master')
argparser.add_argument(
'--rebuild-vm', type=bool_arg, default=False,
help="Rebuild the VM from scratch, discarding the current state.")
argparser.add_argument(
'--clean-snapshot', type=bool_arg, default=True,
help=("Restore the VM from the 'clean' snapshot (which was created after the VM has"
" been setup) before running the tests."))
argparser.add_argument(
'--just-start', action='store_true', default=False,
help=("Just start the VM in the current state (not restoring the clean snapshot)"
" and don't run the tests."))
argparser.add_argument(
'--login', action='store_true', default=False,
help=("Start the VM in the current state (not restoring the clean snapshot)"
" and open a shell session. The VM is powered off when the shell is closed."))
argparser.add_argument(
'--shutdown', type=bool_arg, default=True,
help="Shut the VM down after running the tests.")
argparser.add_argument(
'--gui', type=bool_arg, default=None,
help="Force showing/hiding the UI.")
argparser.add_argument(
'--force-install', action="store_true", default=False,
help="Force reinstallation of operating system before starting VM.")
argparser.add_argument(
'--cores', type=int,
default=min(4, len(os.sched_getaffinity(0))),
help="Number of emulated cores in the VM.")
argparser.add_argument(
'--devpi-port', type=int,
default=18000,
help="Local port for devpi server.")
argparser.add_argument(
'--apt-cacher-port', type=int,
default=3142,
help="Local port for apt-cacher-ng server.")
argparser.add_argument('vms', nargs='*')
args = argparser.parse_args(argv[1:])
if not args.vms:
args.vms = list(sorted(ALL_VMTESTS.keys()))
for vm_name in args.vms:
if vm_name not in ALL_VMTESTS:
raise ValueError("'%s' is not a valid test name" % vm_name)
root_logger = logging.getLogger()
for handler in root_logger.handlers:
root_logger.removeHandler(handler)
formatter = logging.Formatter(
'%(relativeCreated)8d:%(levelname)-8s:%(name)s: %(message)s')
root_logger.setLevel(logging.DEBUG)
log_path = os.path.join(VM_BASE_DIR, time.strftime('debug-%Y%m%d-%H%M%S.log'))
current_log_path = os.path.join(VM_BASE_DIR, 'debug.log')
if os.path.isfile(current_log_path) or os.path.islink(current_log_path):
os.unlink(current_log_path)
os.symlink(log_path, current_log_path)
handler = logging.FileHandler(log_path, 'w')
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
root_logger.addHandler(handler)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
handler.setLevel(
{'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}[args.log_level])
root_logger.addHandler(handler)
try:
logger.info(' '.join(argv))
devpi_logger = logging.getLogger('devpi')
devpi_serverdir = os.path.join(ROOT_DIR, 'vmtests', '_cache', 'devpi')
if not os.path.isdir(devpi_serverdir):
logger.info("Initializing devpi cache at '%s'...", devpi_serverdir)
devpi = await asyncio.create_subprocess_exec(
'devpi-server',
'--serverdir=%s' % devpi_serverdir,
'--init',
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
loop=event_loop)
devpi_stdout_dumper = event_loop.create_task(log_dumper(devpi.stdout, devpi_logger.debug, encoding='utf-8'))
await devpi.wait()
await devpi_stdout_dumper
logger.info("Starting local devpi server on port %d...", args.devpi_port)
devpi = await asyncio.create_subprocess_exec(
'devpi-server',
'--serverdir=%s' % devpi_serverdir,
'--port=%d' % args.devpi_port,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
loop=event_loop)
devpi_stdout_dumper = event_loop.create_task(log_dumper(devpi.stdout, devpi_logger.debug, encoding='utf-8'))
try:
logger.info("Starting local apt-cacher-ng server on port %d...", args.devpi_port)
apt_cacher_logger = logging.getLogger('apt-cacher-ng')
apt_cacher = await asyncio.create_subprocess_exec(
os.path.join(os.environ['VIRTUAL_ENV'], 'sbin', 'apt-cacher-ng'),
'ForeGround=1',
'Port=%d' % args.apt_cacher_port,
'CacheDir=%s' % os.path.join(ROOT_DIR, 'vmtests', '_cache', 'apt-cacher-ng'),
'LogDir=%s' % os.path.join(ROOT_DIR, 'vmtests', '_cache', 'apt-cacher-ng'),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
loop=event_loop)
apt_cacher_stdout_dumper = event_loop.create_task(log_dumper(apt_cacher.stdout, apt_cacher_logger.debug, encoding='utf-8'))
try:
settings = TestSettings(args)
vm_args = {
'base_dir': VM_BASE_DIR,
'event_loop': event_loop,
'cores': args.cores,
'memory': 2 << 30,
}
if args.just_start:
assert len(args.vms) == 1
vm_name = args.vms[0]
vm_cls = ALL_VMTESTS[vm_name]
vm = vm_cls(name=vm_name, **vm_args)
if args.force_install:
await vm.install()
assert vm.is_installed
try:
await vm.start(gui=args.gui if args.gui is not None else True)
await vm.wait_for_state(vm.POWEROFF, timeout=3600)
finally:
await vm.poweroff()
return
if args.login:
assert len(args.vms) == 1
vm_name = args.vms[0]
vm_cls = ALL_VMTESTS[vm_name]
vm = vm_cls(name=vm_name, **vm_args)
if args.force_install:
await vm.install()
assert vm.is_installed
try:
await vm.start(gui=args.gui if args.gui is not None else False)
await vm.wait_for_ssh()
proc = await asyncio.create_subprocess_exec(
'/usr/bin/sshpass', '-p123',
'/usr/bin/ssh',
'-p5555',
'-X',
'-oStrictHostKeyChecking=off',
'-oUserKnownHostsFile=/dev/null',
'-oLogLevel=quiet',
'testuser@localhost',
loop=event_loop)
await proc.wait()
finally:
await vm.poweroff()
return
results = {}
for vm_name in args.vms:
vm_cls = ALL_VMTESTS[vm_name]
vm = vm_cls(name=vm_name, **vm_args)
if not vm.is_installed or args.force_install:
await vm.install()
elif args.clean_snapshot:
await vm.restore_snapshot('clean')
try:
await vm.start(gui=args.gui if args.gui is not None else False)
results[vm.name] = await vm.run_test(settings)
finally:
await vm.poweroff()
if not all(results.values()):
print()
print('-' * 96)
print("%d/%d tests FAILED." % (
sum(1 for success in results.values() if not success), len(results)))
print()
for vm, success in sorted(results.items(), key=lambda i: i[0]):
print("%s... %s" % (vm, 'SUCCESS' if success else 'FAILED'))
return 1
return 0
finally:
apt_cacher.terminate()
await apt_cacher.wait()
await apt_cacher_stdout_dumper
finally:
devpi.terminate()
await devpi.wait()
await devpi_stdout_dumper
except:
logger.error("runvmtests failed with an exception:\n%s", traceback.format_exc())
raise
finally:
print("Full logs at %s" % log_path)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
sys.exit(loop.run_until_complete(main(loop, sys.argv)))
| gpl-2.0 | 3,167,093,425,330,332,000 | 33.624266 | 135 | 0.542983 | false |
sebmolinari/los-kpos | app/mod_auth/controllers.py | 1 | 4029 | #Flask imports
from flask import Blueprint, render_template, flash, redirect, url_for, abort
from flask.ext.login import LoginManager, login_required, logout_user, login_user
#App imports
from app import app
from app.mod_auth.forms import LoginForm, UserForm, EmailForm, PasswordForm
from app.mod_auth.models import User
from utils import ts, send_email
lm = LoginManager()
lm.init_app(app)
lm.login_view = "auth.user_login"
mod_auth = Blueprint('auth', __name__, url_prefix='/user',
template_folder='templates')
@lm.user_loader
def user_load(id):
return User.get_by_id(int(id))
@mod_auth.route('/login/', methods=['GET', 'POST'])
def user_login():
#special case if database is empty we should create an user
if len(User.get_all()) == 0:
return redirect(url_for('auth.user_create'))
form = LoginForm()
if form.validate_on_submit():
user = User.get_by_email(email=form.email.data)
if user and User.check_password(user.password, form.password.data):
login_user(user)
return redirect(url_for('index'))
flash('Wrong email or password')
return render_template("login.html", form=form)
@mod_auth.route('/logout/')
@login_required
def user_logout():
logout_user()
return redirect(url_for('index'))
@mod_auth.route('/create', methods=["GET", "POST"])
def user_create():
form = UserForm()
if form.validate_on_submit():
user = User()
user.name = form.name.data
user.email = form.email.data
user.password = form.password.data
user.is_admin = form.is_admin.data
#TODO Fix possible duplicated keys!
User.save(user)
# Now we'll send the email confirmation link
subject = "Confirm your email"
token = ts.dumps(user.email, salt='email-confirm-key')
confirm_url = url_for(
'auth.user_confirm_email',
token=token,
_external=True)
html = render_template(
'activate.html',
confirm_url=confirm_url)
# We'll assume that send_email has been defined in myapp/util.py
app.logger.info('Url use to confirm: {0}'.format(confirm_url))
send_email(user.email, subject, html)
return redirect(url_for("index"))
return render_template("create.html", form=form)
@mod_auth.route('/confirm/<token>')
def user_confirm_email(token):
try:
email = ts.loads(token, salt="email-confirm-key", max_age=86400)
except:
abort(404)
user = User.get_by_email(email=email)
user.email_confirmed = True
User.save(user)
return redirect(url_for('auth.user_login'))
@mod_auth.route('/reset', methods=["GET", "POST"])
def user_password_reset():
form = EmailForm()
if form.validate_on_submit():
user = User.get_by_email(email=form.email.data)
subject = "Password reset requested"
# Here we use the URLSafeTimedSerializer we created in `util` at the
# beginning of the chapter
token = ts.dumps(user.email, salt='recover-key')
recover_url = url_for(
'auth.user_reset_password_with_token',
token=token,
_external=True)
html = render_template(
'recover.html',
recover_url=recover_url)
# Let's assume that send_email was defined in myapp/util.py
send_email(user.email, subject, html)
return redirect(url_for('index'))
return render_template('reset.html', form=form)
@mod_auth.route('/reset/<token>', methods=["GET", "POST"])
def user_reset_password_with_token(token):
try:
email = ts.loads(token, salt="recover-key", max_age=86400)
except:
abort(404)
form = PasswordForm()
if form.validate_on_submit():
user = User.get_by_email(email=email)
user.password = form.password.data
User.save(user)
return redirect(url_for('auth.user_login'))
return render_template('reset_with_token.html', form=form, token=token)
| gpl-3.0 | -7,708,140,034,764,960,000 | 28.844444 | 81 | 0.63316 | false |
SRI-CSL/ETB | demos/allsatlive/yices_parse.py | 1 | 2665 | #Defines grammar for reading yices files; used in the include <file> api for yices.
from pyparsing import *
#Grammar for s-expressions which is used to parse Yices expressions
token = Word(alphanums + "-./_:*+=!<>")
LPAR = "("
RPAR = ")"
#Yices comments are ignored; parentheses are retained since Yices expressions are printed back
#as strings for the Yices api
lispStyleComment = Group(";" + restOfLine)
sexp = Forward()
sexpList = ZeroOrMore(sexp)
sexpList.ignore(lispStyleComment)
sexpGroup = Group(LPAR + sexpList + RPAR)
sexp << (token | sexpGroup)
#Grammar for Yices commands
#_LPAR = Suppress(LPAR)
#_RPAR = Suppress(RPAR)
#The command names are enumerated
yDefine = Literal("define")
yAssert = Literal("assert")
yAssertPlus = Literal("assert+")
yRetract = Literal("retract")
yCheck = Literal("check")
yMaxSat = Literal("maxsat")
ySetEvidence = Literal("set-evidence!")
ySetVerbosity = Literal("set-verbosity")
ySetArithOnly = Literal("set-arith-only")
yPush = Literal("push")
yPop = Literal("pop")
yEcho = Literal("echo")
yReset = Literal("reset")
yCommandName = yDefine + yAssert + yAssertPlus + yRetract + yCheck + yMaxSat + ySetEvidence + ySetVerbosity + ySetArithOnly + yPush + yPop + yEcho + yReset
#name is word without colons
name = Word(alphanums + "-./_*+=!<>")
colons = Suppress("::")
#Define commands are treated differently since we have to parse out the '::'
yDefineCommand = Group(yDefine + name + colons + sexp + sexpList)
yOtherCommandName = yAssert | yAssertPlus | yRetract | yCheck | yMaxSat | ySetEvidence | ySetVerbosity | ySetArithOnly | yPush | yPop | yEcho | yReset
yOtherCommand = Group(yOtherCommandName + sexpList)
yCommandBody = yDefineCommand | yOtherCommand
yCommand = Group(LPAR + yCommandBody + RPAR)
yCommandList = ZeroOrMore(yCommand)
yCommandList.ignore(lispStyleComment)
# no longer used: defineName = Group(name + colons + sexp + sexpList)
lparPrint = " ("
rparPrint = ") "
def printSexp(parsedSexp):
if parsedSexp == LPAR:
return lparPrint
elif parsedSexp == RPAR:
return rparPrint
elif type(parsedSexp) == str:
return parsedSexp
elif parsedSexp == []:
return ''
else:
print(parsedSexp)
first = printSexp(parsedSexp[0])
rest = printSexp(parsedSexp[1:])
print('first = %s' % first)
print('rest = %s' % rest)
if (first == lparPrint) or (first == rparPrint) or (rest == rparPrint):
return '%s%s' % (first, rest)
else:
return '%s %s' % (first, rest)
test1 = """(define a::bool)"""
test2 = """(define b ::bool)"""
test3 = """(define c :: bool)"""
| gpl-3.0 | -756,669,965,276,283,000 | 26.193878 | 155 | 0.670544 | false |
Gabotero/GNURadioNext | gr-blocks/python/qa_tagged_stream_mux.py | 1 | 3562 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import pmt
import blocks_swig as blocks
import numpy
def make_len_tags(tupl, key):
tags = []
tag = gr.tag_t()
tag.key = pmt.string_to_symbol(key)
n_read = 0
for element in tupl:
tag.offset = n_read
n_read += len(element)
tag.value = pmt.to_pmt(len(element))
tags.append(tag)
return tags
def make_len_tag(offset, key, value):
tag = gr.tag_t()
tag.offset = offset
tag.key = pmt.string_to_symbol(key)
tag.value = pmt.to_pmt(value)
return tag
class qa_tagged_stream_mux (gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_1(self):
datas = (
0, 1, 2, 5, 6, 10, 14, 15, 16,
3, 4, 7, 8, 9, 11, 12, 13, 17
)
expected = tuple(range(18))
tagname = "packet_length"
len_tags_0 = (
make_len_tag(0, tagname, 3),
make_len_tag(3, tagname, 2),
make_len_tag(5, tagname, 1),
make_len_tag(6, tagname, 3)
)
len_tags_1 = (
make_len_tag(0, tagname, 2),
make_len_tag(2, tagname, 3),
make_len_tag(5, tagname, 3),
make_len_tag(8, tagname, 1)
)
test_tag_0 = gr.tag_t()
test_tag_0.key = pmt.string_to_symbol('spam')
test_tag_0.offset = 4 # On the second '1'
test_tag_0.value = pmt.to_pmt(42)
test_tag_1 = gr.tag_t()
test_tag_1.key = pmt.string_to_symbol('eggs')
test_tag_1.offset = 3 # On the first '3' of the 2nd stream
test_tag_1.value = pmt.to_pmt(23)
src0 = blocks.vector_source_b(datas[0:9], False, 1, len_tags_0 + (test_tag_0,))
src1 = blocks.vector_source_b(datas[9:], False, 1, len_tags_1 + (test_tag_1,))
tagged_stream_mux = blocks.tagged_stream_mux(gr.sizeof_char, tagname)
snk = blocks.vector_sink_b()
self.tb.connect(src0, (tagged_stream_mux, 0))
self.tb.connect(src1, (tagged_stream_mux, 1))
self.tb.connect(tagged_stream_mux, snk)
self.tb.run()
self.assertEqual(expected, snk.data())
tags = [gr.tag_to_python(x) for x in snk.tags()]
tags = sorted([(x.offset, x.key, x.value) for x in tags])
tags_expected = [
(0, 'packet_length', 5),
(5, 'packet_length', 5),
(6, 'spam', 42),
(8, 'eggs', 23),
(10, 'packet_length', 4),
(14, 'packet_length', 4)
]
self.assertEqual(tags, tags_expected)
if __name__ == '__main__':
gr_unittest.run(qa_tagged_stream_mux, "qa_tagged_stream_mux.xml")
| gpl-3.0 | 8,027,701,689,082,090,000 | 31.09009 | 87 | 0.576081 | false |
phev8/dataset_tools | experiment_handler/time_synchronisation.py | 1 | 1444 | import os
import pandas as pd
def read_synchronisation_file(experiment_root):
filepath = os.path.join(experiment_root, "labels", "synchronisation.csv")
return pd.read_csv(filepath)
def convert_timestamps(experiment_root, timestamps, from_reference, to_reference):
"""
Convert numeric timestamps (seconds for start of the video or posix timestamp) of a reference time (e.g. P3_eyetracker) to a different reference time (e.g. video time)
Parameters
----------
experiment_root: str
Root of the current experiment (to find the right synchronisation matrix)
timestamps: float or array like
timestamps to be converted
from_reference: str
name of the reference of the original timestamps
to_reference: str
name of the reference time the timestamp has to be converted to
Returns
-------
converted_timestamps: float or array like
Timestamps given in to_reference time values
"""
synchronisation_file = read_synchronisation_file(experiment_root)
offset = synchronisation_file.loc[synchronisation_file["from"] == from_reference, to_reference].values[0]
converted_timestamps = timestamps + offset
return converted_timestamps
if __name__ == '__main__':
exp_root = "/Volumes/DataDrive/igroups_recordings/igroups_experiment_8"
print(convert_timestamps(exp_root, [1482326641, 1482326642], "P3_eyetracker", "video")) | mit | 1,236,938,527,149,042,200 | 33.404762 | 171 | 0.702216 | false |
xfaxca/pygaero | example/tmax_peakfind_example.py | 1 | 4986 | # tmax_peakfind_example.py
"""
Demonstration of some of the primary functions in pygaero, including Tmax finding and elemental analysis.
"""
# Module import
from pygaero import pio
from pygaero import therm
from pygaero import gen_chem
import os
import matplotlib.pyplot as plt
def example1():
# ------------------------------- File I/O and Data Cleaning Example -------------------------------- #
indir = "" # input directory (same folder as script by default)
infiles = ['desorb1.csv', 'desorb2.csv'] # input files as a list of strings
# Read in list of csvs with figaero desorptions
df_desorbs_ls = pio.read_files(fdir=indir, flist=infiles)
print('# of files imported: ', len(df_desorbs_ls))
# Clean ion names from default A_CxHyOzI_Avg format (strip underscores '_' and remove iodide
for df in df_desorbs_ls:
print("Example of ion names before clean: ", df.columns.values[0:3])
df.columns = gen_chem.cln_molec_names(idx_names=df.columns.values, delim="_") # remove underscores
df.columns = gen_chem.replace_group(molec_ls=df.columns.values, old_groups=["I"], new_group="") # remove I
print('Example of ion names after clean: ', df.columns.values[0:3])
# Alternatively, one can just assign a single thermogram by df_example = pd.DataFrame.from_csv(indir+infile)
# Adjust thermogram signals for 4.0 LPM figaero flow rate relative to nominal 2.0 LPM sample rate
# print('Before flow rate adjust:', df_desorbs_ls[0].values[0:3, 5])
therm.flow_correction(thermograms=df_desorbs_ls, aero_samp_rates=[4.0, 4.0])
# print('After flow rate adjust:', df_desorbs_ls[0].values[0:3, 5])
# ---------------------------------- Elemental Stats Example --------------------------------------- #
# A. Calculate elemental statistics for species in each desorb CSV that was read in. Then append the DataFrames
# containing these statistics into a list. Note, Iodide has been stripped from the names at this point, so
# the parameter cluster_group=None
ele_stats_ls = []
for df in df_desorbs_ls:
df_ele_temp = gen_chem.ele_stats(molec_ls=df.columns.values, ion_state=-1, cluster_group=None,
clst_group_mw=0.0, xtra_elements=["Cl", "F"])
ele_stats_ls.append(df_ele_temp)
# -------------------------------- Peak Finding (TMax) Example --------------------------------------#
# A. Smooth time series as step prior to Tmax (helps prevent mis-identification of TMax in noisy signals)
for df in df_desorbs_ls:
for series in df.columns.values:
# print('series: ', series)
df.ix[:, series] = therm.smooth(x=df.ix[:, series].values, window='hamming', window_len=15)
plt.show()
# B. Find TMax for all loaded thermograms. Returns a pandas DataFrame with ion names as index values and columns:
# TMax1, MaxSig1, TMax2, MaxSig2, DubFlag (double peak flag - binary; -1 for no peaks found). Depending on the
# specific data set, the [pk_thresh] and [pk_win] parameters may need to be optimized. See documentation for
# function peakfind_df_ls in module therm.py for more details. Results are drastically improved by first
# smoothing the time series, so that small fluctuations in signal are not mistaken for a peak.
df_tmax_ls = therm.peakfind_df_ls(df_ls=df_desorbs_ls, pk_thresh=0.05, pk_win=20,
min_temp=40.0, max_temp=190.0)
# C. Quick plot to visualize Tmax values for 15 example ions
# therm.plot_tmax(df=df_desorbs_ls[0], ions=df_tmax_ls[0].index.values[15:29],
# tmax_temps=df_tmax_ls[0].ix[15:29, 'TMax1'], tmax_vals=df_tmax_ls[0].ix[15:29, 'MaxSig1'])
therm.plot_tmax_double(df=df_desorbs_ls[0], ions=df_tmax_ls[0].index.values[15:29],
tmax_temps=df_tmax_ls[0].ix[15:29, 'TMax1'],
tmax_temps2=df_tmax_ls[0].ix[15:29, 'TMax2'],
tmax_vals=df_tmax_ls[0].ix[15:29, 'MaxSig1'],
tmax_vals2=df_tmax_ls[0].ix[15:29, 'MaxSig2'])
# ----------------------------------- Saving Results Example -------------------------------------- #
# Uncomment the following lines to save the example output
# outdir = 'testout'
# if outdir[-1] != '/':
# outdir += '/'
# if not os.path.exists(outdir):
# os.makedirs(outdir)
# # A. Save TMax data
# for df, fname in zip(df_tmax_ls, ["desorb1_tmax", "desorb2_tmax"]):
# df.to_csv(outdir+fname+".csv")
# # B. Save smoothed desorption thermogram time series
# for df, fname in zip(df_desorbs_ls, ["desorb1_smth", "desorb2_smth"]):
# df.to_csv(outdir+fname+".csv")
# # C. Save elemental stats for each desorption
# for df, fname in zip(ele_stats_ls, ["desorb1_ele", "desorb2_ele"]):
# df.to_csv(outdir+fname+".csv")
return 0
if __name__ == '__main__':
example1()
| gpl-3.0 | 1,347,200,790,599,786,200 | 53.791209 | 117 | 0.606899 | false |
AIFDR/inasafe-django | django_project/realtime/helpers/base_indicator.py | 2 | 2769 | # coding=utf-8
from builtins import range
from builtins import object
from datetime import datetime, timedelta
from math import isnan
from django.utils.translation import ugettext as _
import numpy
import pytz
from realtime.models.earthquake import Earthquake
__author__ = 'Rizky Maulana Nugraha "lucernae" <[email protected]>'
__date__ = '04/09/15'
STATUS_HEALTHY = 'Healthy'
STATUS_WARNING = 'Warning'
STATUS_CRITICAL = 'Critical'
class Indicator(object):
"""An abstract class of indicators.
This class should provide a way to generate indicator info to know that
realtime is running fine.
"""
def __init__(self):
self._value = None
self._label = None
self._status = None
@property
def value(self):
return self._value
@property
def label(self):
return self._label
@property
def status(self):
return self._status
def value_humanize(self):
raise NotImplementedError()
def notes(self):
raise NotImplementedError()
def is_healthy(self):
return self.status == STATUS_HEALTHY
def is_warning(self):
return self.status == STATUS_WARNING
def is_critical(self):
return self.status == STATUS_CRITICAL
def status_text(self):
if self.status == STATUS_HEALTHY:
return _('Healthy')
elif self.status == STATUS_WARNING:
return _('Warning')
elif self.status == STATUS_CRITICAL:
return _('Critical')
else:
return _('Not Available')
# this line onward will contains helpers method
def average_shake_interval(num_days=30):
"""Calculates average interval between shake events.
It is calculated in the span of previous 30 days
:param num_days: Number of previous days the function will calculate
:type num_days: int
:return: tuple of mean interval and standard deviation of shake events
:rtype: tuple
"""
last_span = datetime.utcnow() - timedelta(days=num_days)
last_span.replace(tzinfo=pytz.utc)
shakes = Earthquake.objects.filter(time__gte=last_span)
intervals = []
for i in range(1, len(shakes)):
prev_shake = shakes[i - 1]
shake = shakes[i]
intervals.append(shake.time - prev_shake.time)
# using numpy to calculate mean
intervals = numpy.array([i.total_seconds() for i in intervals])
mean_interval = numpy.mean(intervals)
if isinstance(mean_interval, float) and isnan(mean_interval):
mean_interval = 0
# using numpy to calculate std
deviation = numpy.std(intervals)
if isinstance(deviation, float) and isnan(deviation):
deviation = 0
return timedelta(seconds=mean_interval), timedelta(seconds=deviation)
| bsd-2-clause | -3,732,841,021,557,332,000 | 26.415842 | 75 | 0.660166 | false |
morinted/plover | plover/gui_qt/dictionary_editor.py | 1 | 13523 |
from operator import attrgetter, itemgetter
from collections import namedtuple
from itertools import chain
from PyQt5.QtCore import (
QAbstractTableModel,
QModelIndex,
Qt,
)
from PyQt5.QtWidgets import (
QComboBox,
QDialog,
QStyledItemDelegate,
)
from plover.translation import escape_translation, unescape_translation
from plover.misc import expand_path, shorten_path
from plover.steno import normalize_steno
from plover.gui_qt.dictionary_editor_ui import Ui_DictionaryEditor
from plover.gui_qt.utils import ToolBar, WindowState
_COL_STENO, _COL_TRANS, _COL_DICT, _COL_COUNT = range(3 + 1)
class DictionaryItem(namedtuple('DictionaryItem', 'strokes translation dictionary')):
@property
def dictionary_path(self):
return self.dictionary.path
class DictionaryItemDelegate(QStyledItemDelegate):
def __init__(self, dictionary_list):
super().__init__()
self._dictionary_list = dictionary_list
def createEditor(self, parent, option, index):
if index.column() == _COL_DICT:
dictionary_paths = [
shorten_path(dictionary.path)
for dictionary in self._dictionary_list
if not dictionary.readonly
]
combo = QComboBox(parent)
combo.addItems(dictionary_paths)
return combo
return super().createEditor(parent, option, index)
class DictionaryItemModel(QAbstractTableModel):
def __init__(self, dictionary_list, sort_column, sort_order):
super().__init__()
self._dictionary_list = dictionary_list
self._operations = []
self._entries = []
self._sort_column = sort_column
self._sort_order = sort_order
self._update_entries()
def _update_entries(self, strokes_filter=None, translation_filter=None):
self._entries = []
for dictionary in self._dictionary_list:
for strokes, translation in dictionary.items():
if strokes_filter is not None and \
not '/'.join(strokes).startswith(strokes_filter):
continue
if translation_filter is not None and \
not translation.startswith(translation_filter):
continue
item = DictionaryItem(strokes, translation, dictionary)
self._entries.append(item)
self.sort(self._sort_column, self._sort_order)
@property
def has_undo(self):
return bool(self._operations)
@property
def modified(self):
paths = set()
dictionary_list = []
for op_list in self._operations:
if not isinstance(op_list, list):
op_list = (op_list,)
for item in chain(*op_list):
if item is None:
continue
dictionary = item.dictionary
if dictionary.path in paths:
continue
paths.add(dictionary.path)
dictionary_list.append(dictionary)
return dictionary_list
# Note:
# - since switching from a dictionary to a table does not enforce the
# unicity of keys, a deletion can fail when one of the duplicate has
# already been deleted.
# - when undoing an operation at the table level, the item may have
# been filtered-out and not present
def _undo(self, old_item, new_item):
if old_item is None:
# Undo addition.
try:
del new_item.dictionary[new_item.strokes]
except KeyError:
pass
try:
row = self._entries.index(new_item)
except ValueError:
# Happen if the item is filtered-out.
pass
else:
self.remove_rows([row], record=False)
return
if new_item is None:
# Undo deletion.
self.new_row(0, item=old_item, record=False)
return
# Undo update.
try:
del new_item.dictionary[new_item.strokes]
except KeyError:
pass
try:
row = self._entries.index(new_item)
except ValueError:
# Happen if the item is filtered-out,
# "create" a new row so the user see
# the result of the undo.
self.new_row(0, item=old_item, record=False)
else:
old_item.dictionary[old_item.strokes] = old_item.translation
self._entries[row] = old_item
self.dataChanged.emit(self.index(row, _COL_STENO),
self.index(row, _COL_TRANS))
def undo(self, op=None):
op = self._operations.pop()
if isinstance(op, list):
for old_item, new_item in op:
self._undo(old_item, new_item)
else:
self._undo(*op)
def rowCount(self, parent):
return 0 if parent.isValid() else len(self._entries)
def columnCount(self, parent):
return _COL_COUNT
def headerData(self, section, orientation, role):
if orientation != Qt.Horizontal or role != Qt.DisplayRole:
return None
if section == _COL_STENO:
return _('Strokes')
if section == _COL_TRANS:
return _('Translation')
if section == _COL_DICT:
return _('Dictionary')
def data(self, index, role):
if not index.isValid() or role not in (Qt.EditRole, Qt.DisplayRole):
return None
item = self._entries[index.row()]
column = index.column()
if column == _COL_STENO:
return '/'.join(item.strokes)
if column == _COL_TRANS:
return escape_translation(item.translation)
if column == _COL_DICT:
return shorten_path(item.dictionary.path)
def flags(self, index):
if not index.isValid():
return Qt.NoItemFlags
f = Qt.ItemIsEnabled | Qt.ItemIsSelectable
item = self._entries[index.row()]
if not item.dictionary.readonly:
f |= Qt.ItemIsEditable
return f
def filter(self, strokes_filter=None, translation_filter=None):
self.modelAboutToBeReset.emit()
self._update_entries(strokes_filter, translation_filter)
self.modelReset.emit()
def sort(self, column, order):
self.layoutAboutToBeChanged.emit()
if column == _COL_DICT:
key = attrgetter('dictionary_path')
else:
key = itemgetter(column)
self._entries.sort(key=key,
reverse=(order == Qt.DescendingOrder))
self._sort_column = column
self._sort_order = order
self.layoutChanged.emit()
def setData(self, index, value, role=Qt.EditRole, record=True):
assert role == Qt.EditRole
row = index.row()
column = index.column()
old_item = self._entries[row]
strokes, translation, dictionary = old_item
if column == _COL_STENO:
strokes = normalize_steno(value.strip())
if not strokes or strokes == old_item.strokes:
return False
elif column == _COL_TRANS:
translation = unescape_translation(value.strip())
if translation == old_item.translation:
return False
elif column == _COL_DICT:
path = expand_path(value)
for dictionary in self._dictionary_list:
if dictionary.path == path:
break
if dictionary == old_item.dictionary:
return False
try:
del old_item.dictionary[old_item.strokes]
except KeyError:
pass
if not old_item.strokes and not old_item.translation:
# Merge operations when editing a newly added row.
if self._operations and self._operations[-1] == [(None, old_item)]:
self._operations.pop()
old_item = None
new_item = DictionaryItem(strokes, translation, dictionary)
self._entries[row] = new_item
dictionary[strokes] = translation
if record:
self._operations.append((old_item, new_item))
self.dataChanged.emit(index, index)
return True
def new_row(self, row, item=None, record=True):
if item is None:
if row == 0 and not self._entries:
dictionary = self._dictionary_list[0]
else:
dictionary = self._entries[row].dictionary
item = DictionaryItem((), '', dictionary)
self.beginInsertRows(QModelIndex(), row, row)
self._entries.insert(row, item)
if record:
self._operations.append((None, item))
self.endInsertRows()
def remove_rows(self, row_list, record=True):
assert row_list
operations = []
for row in sorted(row_list, reverse=True):
self.beginRemoveRows(QModelIndex(), row, row)
item = self._entries.pop(row)
self.endRemoveRows()
try:
del item.dictionary[item.strokes]
except KeyError:
pass
else:
operations.append((item, None))
if record:
self._operations.append(operations)
class DictionaryEditor(QDialog, Ui_DictionaryEditor, WindowState):
ROLE = 'dictionary_editor'
def __init__(self, engine, dictionary_paths):
super().__init__()
self.setupUi(self)
self._engine = engine
with engine:
dictionary_list = [
dictionary
for dictionary in engine.dictionaries.dicts
if dictionary.path in dictionary_paths
]
sort_column, sort_order = _COL_STENO, Qt.AscendingOrder
self._model = DictionaryItemModel(dictionary_list,
sort_column,
sort_order)
self._model.dataChanged.connect(self.on_data_changed)
self.table.sortByColumn(sort_column, sort_order)
self.table.setModel(self._model)
self.table.setSortingEnabled(True)
self.table.resizeColumnsToContents()
self.table.setItemDelegate(DictionaryItemDelegate(dictionary_list))
self.table.selectionModel().selectionChanged.connect(self.on_selection_changed)
background = self.table.palette().highlightedText().color().name()
text_color = self.table.palette().highlight().color().name()
self.table.setStyleSheet('''
QTableView::item:focus {
background-color: %s;
color: %s;
}''' % (background, text_color))
self.table.setFocus()
for action in (
self.action_Undo,
self.action_Delete,
):
action.setEnabled(False)
# Toolbar.
self.layout().addWidget(ToolBar(
self.action_Undo,
self.action_Delete,
self.action_New,
))
self.restore_state()
self.finished.connect(self.save_state)
@property
def _selection(self):
return list(sorted(
index.row() for index in
self.table.selectionModel().selectedRows(0)
))
def _select(self, row, edit=False):
row = min(row, self._model.rowCount(QModelIndex()) - 1)
index = self._model.index(row, 0)
self.table.setCurrentIndex(index)
if edit:
self.table.edit(index)
def on_data_changed(self, top_left, bottom_right):
self.table.setCurrentIndex(top_left)
self.action_Undo.setEnabled(self._model.has_undo)
def on_selection_changed(self):
enabled = bool(self._selection)
for action in (
self.action_Delete,
):
action.setEnabled(enabled)
def on_undo(self):
assert self._model.has_undo
self._model.undo()
self.action_Undo.setEnabled(self._model.has_undo)
def on_delete(self):
selection = self._selection
assert selection
self._model.remove_rows(selection)
self._select(selection[0])
self.action_Undo.setEnabled(self._model.has_undo)
def on_new(self):
selection = self._selection
if selection:
row = self._selection[0]
else:
row = 0
self.table.reset()
self._model.new_row(row)
self._select(row, edit=True)
self.action_Undo.setEnabled(self._model.has_undo)
def on_apply_filter(self):
self.table.selectionModel().clear()
strokes_filter = '/'.join(normalize_steno(self.strokes_filter.text().strip()))
translation_filter = unescape_translation(self.translation_filter.text().strip())
self._model.filter(strokes_filter=strokes_filter,
translation_filter=translation_filter)
def on_clear_filter(self):
self.strokes_filter.setText('')
self.translation_filter.setText('')
self._model.filter(strokes_filter=None, translation_filter=None)
def on_finished(self, result):
with self._engine:
self._engine.dictionaries.save(dictionary.path
for dictionary
in self._model.modified)
| gpl-2.0 | -4,029,928,462,594,560,500 | 34.124675 | 89 | 0.569844 | false |
RyanJenkins/ISS | ISS/templatetags/pagination.py | 1 | 3715 | import urlparse
import urllib
from django import template
register = template.Library()
def unfuck_percent_encoded_utf8(fucked_unicode_str):
# OK So... *dramatic pause*
# (((Some))) browsers insist on transforming unicode characters outside of
# the ASCII range to their UTF-8 encoding, and then url encoding that byte
# sequence. If you want my opinion this is harmful because it's a big pain
# in my ass necessitating this code when it would be perfectly reasonable
# to just send UTF-8 byte sequences in URLs but fuck it, until Google/Apple
# /Mozilla start considering overly long comments in obscure codebases as
# standards this code is gonna have to stick around.
#
# To compound this issue, python's urlparse.parse_qs has the highly
# questionable behavior of treating every percent encoded octet at a
# seperate codepoint which is like the opposite how the major browser
# vendors have decided to do it. Theoretically this should be fine if
# browsers did The Right Thing but given the reality of the situation it's
# imprudent and requires me to fix this situation here with the jank that
# follows.
#
# So what do we do about it? Instead of trying to monkey patch urlparse or
# something we instead consult the (incorrect) values that it returns. We
# construct a byte string. For each codepoint in the input string we either
#
# A) insert a byte into our byte string iff the codepoint is less than
# 2^8 or...
# B) insert a byte sequence into the byte string corrosponding to the utf-8
# encoded value for that codepoint.
#
# This bytestring should now be correctly encoded UTF-8, caller can decode
# if they want
#
# Browsers doing The Right Thing with high codepoints are covered under B,
# normal ascii range characters are covered under A, and fucked utf-8 then
# percent encoded strings are also covered under A.
#
# This also has the benefit that if someone really decides to be an ass and
# sends a url where there is both "raw" UTF-8 encoded codepoints and percent
# encoded UTF-8 encoded sequences the url will somehow correctly get
# handled.
#
# This is probably pretty slow but I'm fairly confident it's correct.
if isinstance(fucked_unicode_str, unicode):
return ''.join([(chr(ord(c)) if ord(c) < 256 else c.encode('utf-8')) for c in fucked_unicode_str])
else:
return str(fucked_unicode_str)
RANGE_WIDTH = 3
@register.assignment_tag
def nice_page_set(page):
pages = []
pages.extend(range(1, RANGE_WIDTH+1))
pages.extend(range(page.paginator.num_pages-RANGE_WIDTH,
page.paginator.num_pages+1))
pages.extend(range(page.number-RANGE_WIDTH, page.number+RANGE_WIDTH))
pages = [n for n in pages if n <= page.paginator.num_pages and n > 0]
pages = list(set(pages))
pages.sort()
elip_pages = []
for idx, n in enumerate(pages):
if idx != 0 and n != pages[idx-1] + 1:
elip_pages.append(-1)
elip_pages.append(n)
return elip_pages
@register.filter
def mixin_page_param(base_url, page_number):
parsed_url = urlparse.urlparse(base_url)
query = urlparse.parse_qs(parsed_url.query)
query['p'] = [page_number]
one_pairs = []
for key, values in query.items():
for value in values:
one_pairs.append((
unfuck_percent_encoded_utf8(key),
unfuck_percent_encoded_utf8(value)))
qs = urllib.urlencode(one_pairs)
url_dict = parsed_url._asdict()
url_dict['query'] = qs
return urlparse.urlunparse(urlparse.ParseResult(**url_dict))
| gpl-3.0 | 8,622,333,044,276,108,000 | 38.946237 | 106 | 0.679139 | false |
Azure/azure-sdk-for-python | sdk/servermanager/azure-mgmt-servermanager/azure/mgmt/servermanager/models/session_parameters.py | 1 | 2295 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SessionParameters(Model):
"""Parameter collection for creation and other operations on sessions.
:param user_name: Encrypted User name to be used to connect to node.
:type user_name: str
:param password: Encrypted Password associated with user name.
:type password: str
:param retention_period: Session retention period. Possible values
include: 'Session', 'Persistent'
:type retention_period: str or
~azure.mgmt.servermanager.models.RetentionPeriod
:param credential_data_format: Credential data format. Possible values
include: 'RsaEncrypted'
:type credential_data_format: str or
~azure.mgmt.servermanager.models.CredentialDataFormat
:param encryption_certificate_thumbprint: Encryption certificate
thumbprint.
:type encryption_certificate_thumbprint: str
"""
_attribute_map = {
'user_name': {'key': 'properties.userName', 'type': 'str'},
'password': {'key': 'properties.password', 'type': 'str'},
'retention_period': {'key': 'properties.retentionPeriod', 'type': 'RetentionPeriod'},
'credential_data_format': {'key': 'properties.credentialDataFormat', 'type': 'CredentialDataFormat'},
'encryption_certificate_thumbprint': {'key': 'properties.EncryptionCertificateThumbprint', 'type': 'str'},
}
def __init__(self, **kwargs):
super(SessionParameters, self).__init__(**kwargs)
self.user_name = kwargs.get('user_name', None)
self.password = kwargs.get('password', None)
self.retention_period = kwargs.get('retention_period', None)
self.credential_data_format = kwargs.get('credential_data_format', None)
self.encryption_certificate_thumbprint = kwargs.get('encryption_certificate_thumbprint', None)
| mit | 2,877,594,313,000,630,300 | 45.836735 | 114 | 0.659695 | false |
ermo/privateer_wcu | modules/missions/cargo_mission.py | 1 | 8981 | from go_somewhere_significant import *
from go_to_adjacent_systems import *
import Briefing
import Director
import VS
import debug
import faction_ships
import launch
import quest
import unit
import universe
import vsrandom
class cargo_mission (Director.Mission):
def initbriefing(self):
VS.IOmessage (0,"cargo mission","briefing","Your mission for today will be to run some %s cargo" % self.cargoname)
self.briefgametime = 0
self.adjsys.initbriefing()
def loopbriefing(self):
brief_you=self.adjsys.loopbriefing()
if (brief_you != -1):
VS.IOmessage(0,"cargo mission","briefing","Once there, you must drop the cargo off at a specified unit")
if (self.briefgametime==0):
self.briefgametime = VS.GetGameTime()
elif ((VS.GetGameTime()-self.briefgametime)>5):
Briefing.terminate()
def endbriefing(self):
self.adjsys.endbriefing()
del self.briefgametime
def SetVar (self,val):
if (self.var_to_set!=''):
quest.removeQuest (self.you.isPlayerStarship(),self.var_to_set,val)
def __init__ (self,factionname, numsystemsaway, cargoquantity, missiondifficulty, creds, launchoncapship, time_to_complete, category,jumps=(),var_to_set=''):
Director.Mission.__init__(self);
self.you=VS.Unit()
self.base=VS.Unit()
self.role="ESCORTCAP"
self.arrived=0
self.var_to_set=var_to_set
self.mplay="all"
# self.mission_time=VS.GetGameTime()+time_to_complete*100*float(1+numsystemsaway)
self.capship= launchoncapship
self.faction=factionname
self.cred=creds
self.difficulty=missiondifficulty
self.you=VS.getPlayer()
self.adjsys=go_to_adjacent_systems(self.you,numsystemsaway,jumps)
self.quantity=cargoquantity
self.mplay=universe.getMessagePlayer(self.you)
if (self.quantity<1):
self.quantity=1
carg=VS.getRandCargo(self.quantity,category)
if (carg.GetQuantity()==0 or category==''):
carg = VS.getRandCargo(self.quantity,"") #oh no... could be starships...
i=0
while i<50 and carg.GetCategory()[:10]=="Contraband":
debug.info("contraband==bad")
carg = VS.getRandCargo(self.quantity,"")
i+=1
tempquantity=self.quantity
self.cargoname=carg.GetContent()
name = self.you.getName ()
carg.SetMissionFlag(1)
if (not self.you.isNull()):
tmpcarg=self.you.GetCargo(self.cargoname)
if tmpcarg.GetMissionFlag() and tmpcarg.GetQuantity()>2:
quantum=int(tmpcarg.GetQuantity()/3)
quantum=self.you.removeCargo(carg.GetContent(),quantum,True)#use it if player has it
carg.SetQuantity(1+quantum)
self.quantity=self.you.addCargo(carg)
else:
self.quantity = self.you.addCargo(carg) #I add some cargo
else:
VS.IOmessage (2,"cargo mission",self.mplay,"#ff0000Unable to establish communications. Mission failed.")
VS.terminateMission (0)
return
# creds_deducted = (carg.GetPrice()*float(self.quantity)*vsrandom.random()+1)
# self.cred += creds_deducted
if (tempquantity>0):
self.cred*=float(self.quantity)/float(tempquantity)
else:
VS.IOmessage (2,"cargo mission",self.mplay,"#ff0000You do not have space to add our %s cargo to your ship. Mission failed."%self.cargoname)
VS.terminateMission(0)
return
if (self.quantity==0):
VS.IOmessage (2,"cargo mission",self.mplay,"#ff0000You do not have space to add our cargo to the mission. Mission failed.")
VS.terminateMission(0)
return
VS.IOmessage (0,"cargo mission",self.mplay,"Good Day, %s. Your mission is as follows:" % (name))
self.adjsys.Print("You should start in the system named %s","Then jump to %s","Finally, jump to %s, your final destination","cargo mission",1)
VS.IOmessage (2,"cargo mission",self.mplay,"Give the cargo to a %s unit or planet." % (self.faction))
VS.IOmessage (3,"cargo mission",self.mplay,"You will receive %d of the %s cargo" % (self.quantity,self.cargoname))
# VS.IOmessage (4,"cargo mission",self.mplay,"We will deduct %.2f credits from your account for the cargo needed." % (creds_deducted))
VS.IOmessage (4,"cargo mission",self.mplay,"You will earn %.2f credits when you deliver our cargo." % (creds))
VS.IOmessage (4,"cargo mission",self.mplay,"#00ff00Good luck!")
# self.you.addCredits (-creds_deducted)
def takeCargoAndTerminate (self,you, remove):
removenum=0 #if you terminate without remove, you are SKREWED
self.base.setCombatRole(self.role)
if (remove):
removenum=you.removeCargo(self.cargoname,self.quantity,1)
debug.info("removed %d" % removenum)
mpart=VS.GetMasterPartList()
newcarg=mpart.GetCargo(self.cargoname)
newcarg.SetQuantity(removenum)
self.base.addCargo(newcarg)
has=self.you.hasCargo(self.cargoname)
if (has):
has=self.you.removeCargo(self.cargoname,has,1)
newcarg.SetMissionFlag(0)
newcarg.SetQuantity(has)
self.you.addCargo(newcarg) #It seems that removing and then adding it again is the only way...
if ((removenum>=self.quantity) or (self.quantity==0) or removenum>=1):
VS.IOmessage (0,"cargo mission",self.mplay,"#00ff00Excellent work pilot.")
VS.IOmessage (0,"cargo mission",self.mplay,"#00ff00You have been rewarded for your effort as agreed.")
VS.IOmessage (0,"cargo mission",self.mplay,"#00ff00Your excellent work will be remembered.")
you.addCredits(self.cred)
VS.AdjustRelation(you.getFactionName(),self.faction,.01*self.difficulty,1)
self.SetVar(1)
VS.terminateMission(1)
return
else:
VS.IOmessage (0,"cargo mission",self.mplay,"#ff0000You did not follow through on your end of the deal.")
if (self.difficulty<1):
VS.IOmessage (0,"cargo mission",self.mplay,"#ff0000Your pay will be reduced")
VS.IOmessage (0,"cargo mission",self.mplay,"#ff0000And we will consider if we will accept you on future missions.")
addcred=(float(removenum)/(float(self.quantity*(1+self.difficulty))))*self.cred
you.addCredits(addcred)
else:
VS.IOmessage (0,"cargo mission",self.mplay,"#ff0000You will not be paid!")
universe.punish(self.you,self.faction,self.difficulty)
self.SetVar(-1)
VS.terminateMission(0)
return
def Execute (self):
## if (VS.getGameTime()>mission_time):
## VS.IOmessage (0,"cargo mission",self.mplay,"You Have failed to deliver your cargo in a timely manner.")
## VS.IOmessage (0,"cargo mission",self.mplay,"The cargo is no longer of need to us.")
## if (you):
## takeCargoAndTerminate(you,0)
## return
if (self.you.isNull() or (self.arrived and self.base.isNull())):
VS.IOmessage (0,"cargo mission",self.mplay,"#ff0000You were unable to deliver cargo. Mission failed.")
self.SetVar(-1)
VS.terminateMission(0)
return
if (not self.adjsys.Execute() and not self.arrived):
return
if (self.arrived):
self.adjsys.Execute=self.adjsys.HaveArrived
if (self.base.isDocked(self.you)):
self.takeCargoAndTerminate(self.you,1)
return
else:
self.arrived=1
tempfac=self.faction
if vsrandom.random()<=.5:
tempfac=''
self.adjsys=go_somewhere_significant(self.you,1,100,self.capship,tempfac)
capstr="planet"
dockstr="land"
if tempfac=='':
dockstr="dock"
capstr="ship"
self.adjsys.Print("You must visit the %%s %s" % (capstr),"cargo mission",", docked around the %s",0)
VS.IOmessage(0,"cargo mission",self.mplay,"Once there, %s and we will transport the cargo off of your ship." % (dockstr))
self.base=self.adjsys.SignificantUnit()
self.role=self.base.getCombatRole()
self.base.setCombatRole("INERT")
def initrandom (factionname, missiondifficulty,creds_per_jump, launchoncapship, sysmin, sysmax, time_to_complete, category,jumps=(),var_to_set=''):
numsys=vsrandom.randrange(sysmin,sysmax)
return cargo_mission(factionname,numsys, vsrandom.randrange(4,15), missiondifficulty,creds_per_jump*float(1+numsys),launchoncapship, 10.0, category,jumps,var_to_set)
| gpl-2.0 | 812,984,243,763,449,700 | 48.894444 | 169 | 0.620755 | false |
AASHE/hub | hub/apps/metadata/migrations/0001_initial.py | 1 | 2996 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('iss', '__first__'),
]
operations = [
migrations.CreateModel(
name='AcademicDiscipline',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
],
options={
'ordering': ('name',),
'verbose_name': 'Academic Discipline',
'verbose_name_plural': 'Academic Disciplines',
},
),
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
],
options={
'ordering': ('name',),
'verbose_name': 'Country',
'verbose_name_plural': 'Countries',
},
),
migrations.CreateModel(
name='InstitutionalOffice',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
],
options={
'ordering': ('name',),
'verbose_name': 'Institutional Office',
'verbose_name_plural': 'Institutional Offices',
},
),
migrations.CreateModel(
name='ProgramType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
],
options={
'ordering': ('name',),
'verbose_name': 'Program Type',
'verbose_name_plural': 'Program Types',
},
),
migrations.CreateModel(
name='SustainabilityTopic',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('color', models.CharField(default='#ff0000', max_length=7, verbose_name='HEX Color')),
('slug', models.SlugField()),
],
options={
'ordering': ('color', 'name'),
'verbose_name': 'Sustainability Topic',
'verbose_name_plural': 'Sustainability Topics',
},
),
migrations.CreateModel(
name='Organization',
fields=[
],
options={
'proxy': True,
},
bases=('iss.organization',),
),
]
| mit | -6,569,062,075,885,106,000 | 34.247059 | 114 | 0.480975 | false |
googleapis/googleapis-gen | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/enums/types/manager_link_status.py | 1 | 1226 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v8.enums',
marshal='google.ads.googleads.v8',
manifest={
'ManagerLinkStatusEnum',
},
)
class ManagerLinkStatusEnum(proto.Message):
r"""Container for enum describing possible status of a manager
and client link.
"""
class ManagerLinkStatus(proto.Enum):
r"""Possible statuses of a link."""
UNSPECIFIED = 0
UNKNOWN = 1
ACTIVE = 2
INACTIVE = 3
PENDING = 4
REFUSED = 5
CANCELED = 6
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -1,836,098,244,861,664,500 | 27.511628 | 74 | 0.672104 | false |
LividInstruments/LiveRemoteScripts | Launchpad_M4L/LaunchpadM4L.py | 1 | 4621 | # http://lividinstruments.com
from __future__ import with_statement
import Live
import math
""" _Framework files """
from _Framework.ButtonElement import ButtonElement # Class representing a button a the controller
from _Framework.ButtonMatrixElement import ButtonMatrixElement # Class representing a 2-dimensional set of buttons
from _Framework.ChannelStripComponent import ChannelStripComponent # Class attaching to the mixer of a given track
#from _Framework.ClipSlotComponent import ClipSlotComponent # Class representing a ClipSlot within Live
from _Framework.CompoundComponent import CompoundComponent # Base class for classes encompasing other components to form complex components
from _Framework.ControlElement import ControlElement # Base class for all classes representing control elements on a controller
from _Framework.ControlSurface import ControlSurface # Central base class for scripts based on the new Framework
from _Framework.ControlSurfaceComponent import ControlSurfaceComponent # Base class for all classes encapsulating functions in Live
from _Framework.DeviceComponent import DeviceComponent # Class representing a device in Live
from _Framework.EncoderElement import EncoderElement # Class representing a continuous control on the controller
from _Framework.InputControlElement import * # Base class for all classes representing control elements on a controller
from VCM600.MixerComponent import MixerComponent # Class encompassing several channel strips to form a mixer
from _Framework.ModeSelectorComponent import ModeSelectorComponent # Class for switching between modes, handle several functions with few controls
from _Framework.NotifyingControlElement import NotifyingControlElement # Class representing control elements that can send values
from _Framework.SceneComponent import SceneComponent # Class representing a scene in Live
from _Framework.SessionComponent import SessionComponent # Class encompassing several scene to cover a defined section of Live's session
from _Framework.SessionZoomingComponent import DeprecatedSessionZoomingComponent as SessionZoomingComponent # Class using a matrix of buttons to choose blocks of clips in the session
from _Framework.SliderElement import SliderElement # Class representing a slider on the controller
from VCM600.TrackEQComponent import TrackEQComponent # Class representing a track's EQ, it attaches to the last EQ device in the track
from VCM600.TrackFilterComponent import TrackFilterComponent # Class representing a track's filter, attaches to the last filter in the track
from _Framework.TransportComponent import TransportComponent # Class encapsulating all functions in Live's transport section
""" Here we define some global variables """
CHANNEL = 0 #main channel (0 - 15)
LAUNCH_GRID = [0,1,2,3,4,5,6,7,16,17,18,19,20,21,22,23,32,33,34,35,36,37,38,39,48,49,50,51,52,53,54,55,64,65,66,67,68,69,70,71,80,81,82,83,84,85,86,87,96,97,98,99,100,101,102,103,112,113,114,115,116,117,118,119] #there are 64 of these
LAUNCH_SIDE = [8,24,40,56,72,88,104,120] #there are 8 of these
LAUNCH_TOP = [104,105,106,107,108,109,110,111] #there are 8 of these
class LaunchpadM4L(ControlSurface):
__module__ = __name__
__doc__ = " LaunchpadM4L controller script "
def __init__(self, c_instance):
super(LaunchpadM4L, self).__init__(c_instance)
with self.component_guard():
self._host_name = 'LaunchpadM4L'
self._color_type = 'Launchpad'
self.log_message("--------------= LaunchpadM4L log BEGIN SCRIPT =--------------")
self._setup_controls()
"""script initialization methods"""
def _setup_controls(self):
is_momentary = True
self._grid = [None for index in range(64)]
self._side = [None for index in range(8)]
self._top = [None for index in range(8)]
for index in range(64):
self._grid[index] = ButtonElement(is_momentary, MIDI_NOTE_TYPE, CHANNEL, LAUNCH_GRID[index])
self._grid[index].name = 'grid[' + str(index) + ']'
for index in range(8):
self._side[index] = ButtonElement(is_momentary, MIDI_NOTE_TYPE, CHANNEL, LAUNCH_SIDE[index])
self._side[index].name = 'side[' + str(index) + ']'
for index in range(8):
self._top[index] = ButtonElement(is_momentary, MIDI_CC_TYPE, CHANNEL, LAUNCH_TOP[index])
self._top[index].name = 'top[' + str(index) + ']'
def receive_value(self, value):
self._value = value
"""LividBaseM4L script disconnection"""
def disconnect(self):
self.log_message("--------------= LaunchpadM4L log END =--------------")
ControlSurface.disconnect(self)
return None | mit | 2,758,905,294,441,095,000 | 59.815789 | 235 | 0.752651 | false |
CelineBoudier/rapid-router | game/messages.py | 1 | 72360 | # -*- coding: utf-8 -*-
# Code for Life
#
# Copyright (C) 2016, Ocado Innovation Limited
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ADDITIONAL TERMS – Section 7 GNU General Public Licence
#
# This licence does not grant any right, title or interest in any “Ocado” logos,
# trade names or the trademark “Ocado” or any other trademarks or domain names
# owned by Ocado Innovation Limited or the Ocado group of companies or any other
# distinctive brand features of “Ocado” as may be secured from time to time. You
# must not distribute any modification of this program using the trademark
# “Ocado” or claim any affiliation or association with Ocado or its employees.
#
# You are not authorised to use the name Ocado (or any of its trade names) or
# the names of any author or contributor in advertising or for publicity purposes
# pertaining to the distribution of this program, without the prior written
# authorisation of Ocado.
#
# Any propagation, distribution or conveyance of this program must include this
# copyright notice and these terms. You must not misrepresent the origins of this
# program; modified versions of the program must be marked as such and not
# identified as the original program.
from django.utils.translation import ugettext
def youtubeLink(width, height, url, border):
return "<iframe width='" + str(width) + "' height='" + str(height) + "' src='" + str(url) \
+ "?rel=0" + "' frameborder='" + str(border) \
+ "' allowfullscreen class='video'></iframe><br>"
def noPermissionMessage():
return ugettext("You have no permission to see this.")
def notSharedLevel():
return ugettext("This level is private. You can only see the public levels and the ones "
+ "created by other users only if they share them with you.")
""" Strings used in the scoreboard. """
def noPermissionTitle():
return ugettext("No permission ")
def noPermissionScoreboard():
return ugettext("Scoreboard is only visible to school students and teachers. Log in if you "
+ "think you should be able to see it. ")
def noDataToShow():
return ugettext("There is no data to show. Please contact your administrator if this is "
+ "unexpected. ")
""" Strings used on the level moderation page. """
def noPermissionLevelModerationTitle():
return ugettext("No permission ")
def noPermissionLevelModerationPage():
return ugettext("Level moderation is only visible to teachers. Log in if you "
+ "think you should be able to see it. ")
def noPermissionLevelModerationClass():
return ugettext("You do not teach this class. Please contact your administrator if this "
+ "is unexpected.")
def noPermissionLevelModerationStudent():
return ugettext("You do not teach this student. Please contact your administrator if this "
+ "is unexpected.")
def noDataToShowLevelModeration():
return ugettext("You have not created any classes and therefore is no data to show. " +
"Please contact your administrator if this is unexpected.")
""" String messages used on the settings page. """
def shareTitle():
return ugettext("Level Share")
def shareSuccessfulPerson(name, surname):
return ugettext("You shared your level with {0} {1} successfully! ".format(name, surname))
def shareSuccessfulClass(className):
return ugettext("You shared your level with class {0} successfully! ".format(className))
def shareUnsuccessfulPerson(first_name, last_name):
return ugettext("We were unable to find %(name)s %(surname)s. "
% {'name': first_name, 'surname': last_name}
+ "Are you sure you got their name right?")
def shareUnsuccessfulClass(className):
return ugettext("We were unable to find class %(class)s. Are you sure you got it right?"
% {'class': className})
def noLevelsToShow():
return ugettext("It seems that you have not created any levels. How about creating one "
+ "now? ")
def levelsMessage():
return ugettext("All the levels you have created so far. Click on them to play them or "
+ "share them with your friends. ")
def sharedLevelsMessage():
return ugettext("All the levels created by others that were shared with you. Click on "
+ "them to play them")
def noSharedLevels():
return ugettext("No one shared a level with you yet. ")
""" Strings used in the class view. """
def chooseClass():
return ugettext("Choose a class you want to see. ")
def noPermission():
return ugettext("You don't have permissions to see this. ")
"""
"""
""" String messages used as level tips in the game view.
"""
"""
"""
def title_night_mode():
return 'Can you find your way in the dark?'
def build_description(title, message):
return "<b>" + title + "</b><br><br>" + message
def title_level_default():
return ugettext(" ")
def description_level_default():
message = ugettext("Can you find the shortest route? ")
return message
def hint_level_default():
message = ugettext("Think back to earlier levels. What did you learn? ")
return message
def title_level1():
return ugettext("Can you help the van get to the house? ")
def description_level1():
message = ugettext("Choose the right blocks to tell the van where to go. <br> Drag the "
+ "blocks under the <b>Start</b> block to attach them. <br> To remove a "
+ "block, drag it into the bin in the bottom right of the screen. "
+ "<br> When you are happy with your sequence, press <b>Go</b>! ")
return build_description(title_level1(), message)
def hint_level1():
message = ugettext("Drag the <b>Move forwards</b> block so that it is under the <b>Start</b> "
+ "block - close enough to be touching. <br><br>"
+ "Clicking on the arrows next to the <b>Go</b> button will also drag the "
+ "blocks into a sequence for you. <br><br>"
+ "Don't forget to press <b>Go</b> when you are done. ")
return message
def title_level2():
return ugettext("This time the house is further away. ")
def description_level2():
message = ugettext("A block can be placed next to or under another, like a jigsaw. A second "
+ "<b>Move forwards</b> block can be placed under the first <b>Move "
+ "forwards</b> block. <br> To remove a block, drag it back to the "
+ "left of the screen or drop it in the bin. <br> When you are happy with "
+ "your sequence, press <b>Go</b>! ")
return build_description(title_level2(), message)
def hint_level2():
message = ugettext("A second <b>Move forwards</b> block can be placed under the first <b>Move "
+ "forwards</b> block. <br><br>"
+ "The arrows next to the <b>Go</b> button will drag the blocks into a "
+ "sequence for you. ")
return message
def title_level3():
return ugettext("Can you make the van turn right? ")
def description_level3():
message = ugettext("This time, the van has to turn right to reach the house. Make sure you use "
+ "the <b>Turn right</b> block in your sequence. <br> Drag the blocks "
+ "and attach them under the <b>Start</b> block like before. To remove a "
+ "block, drag it back to the left of the screen or drop it in the bin. "
+ "<br> When you are happy with your sequence, press <b>Go</b>! ")
return build_description(title_level3(), message)
def hint_level3():
message = ugettext("A <b>Turn right</b> block can be placed under the first <b>Move "
+ "forwards</b> block. <br><br> The arrows next to the <b>Go</b> button "
+ "will drag the blocks into a sequence for you. ")
return message
def title_level4():
return ugettext("You are getting good at this! Let's try turning left. ")
def description_level4():
message = ugettext("This time the van has to go left. Make sure you use the <b>Turn left</b> "
+ "block in your sequence. <br> Drag and attach the blocks like before. "
+ "<br> When you are happy with your sequence, press <b>Go</b>! ")
return build_description(title_level4(), message)
def hint_level4():
message = ugettext("A <b>Turn left</b> block can be placed under a series of <b>Move "
+ "forwards</b> blocks. <br> The arrows next to the <b>Go</b> button will "
+ "drag the blocks into a sequence for you. ")
return message
def title_level5():
return ugettext("Good work! You are ready for something harder. ")
def description_level5():
message = ugettext("You already know how to make the van turn left or right. This time "
+ "the van has to make lots of turns to reach the house. <br> Drag and "
+ "attach the blocks to make your sequence."
+ "<br> When you are happy with your sequence, press <b>Go</b>! ")
return build_description(title_level5(), message)
def hint_level5():
message = ugettext("This road starts by curving to the <b>left</b>. Then it curves to the "
+ "<b>right</b>. <br><br> The arrows next to the <b>Go</b> button will drag "
+ "the blocks into a sequence for you. ")
return message
def title_level6():
return ugettext("Well done! Let's use all three blocks. ")
def description_level6():
message = ugettext("This time the van has to <b>Move forwards</b>, <b>Turn left</b> and "
+ "<b>Turn right</b>. <br><br> Drag and attach the blocks like before. "
+ "<br> When you are happy with your sequence, press <b>Go</b>! ")
return build_description(title_level6(), message)
def hint_level6():
message = ugettext("Follow the road around. How many <b>Move forwards</b> do you need? <br><br>"
+ "The arrows next to the <b>Go</b> button will drag the blocks into a "
+ "sequence for you. ")
return message
def title_level7():
return ugettext("This road is more complicated. ")
def description_level7():
message = ugettext("Practise your new skills on this road by helping the driver to arrive at "
+ "the house. <br> When you are happy with your sequence, press <b>Go</b>! ")
return build_description(title_level7(), message)
def hint_level7():
message = ugettext("Follow the road around. Don't forget to <b>Turn left</b> first. <br><br>"
+ "The arrows next to the <b>Go</b> button will drag the blocks into a "
+ "sequence for you.")
return message
def title_level8():
return ugettext("The warehouse is not always in the same place. ")
def description_level8():
message = ugettext("This time the warehouse is somewhere else on the screen but you still need "
+ "to use the <b>Move forwards</b> block. <br> Can you use the <b>Move "
+ "forwards</b> block correctly even when it looks like the van goes in a "
+ "different direction? "
+ "<br> When you are happy with your sequence, press <b>Go</b>! ")
return build_description(title_level8(), message)
def hint_level8():
message = ugettext("On screen, the van looks like it follows the road down. If you were in the "
+ "van, it would look like you should <b>Move forwards</b>, then <b>Turn "
+ "right</b>. ")
return message
def title_level9():
return ugettext("Can you go from right to left? ")
def description_level9():
message = ugettext("Practise your new skills on this road by helping the driver to arrive "
+ "at the house. "
+ "<br> When you are happy with your sequence, press <b>Go</b>! ")
return build_description(title_level9(), message)
def hint_level9():
message = ugettext("How many times do you have to <b>Move forwards</b> before you "
+ "<b>Turn left</b>? ")
return message
def title_level10():
return ugettext("Well done! How about another go? ")
def description_level10():
message = ugettext("You've done really well so far. Try to get the van to the house. "
+ "<br> When you are happy with your sequence, press <b>Go</b>! ")
return build_description(title_level10(), message)
def hint_level10():
message = ugettext("This map is not so hard. Notice that to you it looks like the road goes "
+ "up, but if you were in the in the van, you would see the road goes "
+ "right. <br><br> Do you know which turn the van will take next? <br><br> "
+ "The arrows next to the <b>Go</b> button will drag the blocks into a "
+ "sequence for you. ")
return message
def title_level11():
return ugettext("Snail maze! ")
def description_level11():
message = ugettext("Uh oh, a tricky snail maze! Can you take the van through it? "
+ "<br> When you are happy with your sequence, press <b>Go</b>! ")
return build_description(title_level11(), message)
def hint_level11():
message = ugettext("The maze looks a bit like a snail, doesn't it? That means that for most of "
+ "the time the van should only <b>Move forwards</b> and <b>Turn right</b>. "
+ "<br><br> The arrows next to the <b>Go</b> button will drag the blocks "
+ "into a sequence for you. ")
return message
def title_level12():
return ugettext("This road is more complicated. ")
def description_level12():
message = ugettext("Good work, by now you are able to solve quite complicated levels. Prove "
+ "your skills! "
+ "<br> When you are happy with your sequence, press <b>Go</b>! ")
return build_description(title_level12(), message)
def hint_level12():
message = ugettext("This road might look much longer and more complicated, but it's not that "
+ "hard. <br> Start by using <b>Move forwards</b> a few steps and <b>Move "
+ "left</b>. ")
return message
def title_level13():
return ugettext("Multiple routes")
def description_level13():
message = ugettext("Often there is more than one way to get to the house. The route that needs "
+ "the fewest directions is usually best. <br> Help the van find the "
+ "shortest route to the house. <br> You can press the <b>Go</b> or "
+ "<b>Play</b> buttons to start the van. ")
return build_description(title_level13(), message)
def hint_level13():
message = ugettext("Try taking the route that starts by turning left then turns right. Do you "
+ "know what follows next? ")
return message
def title_level14():
return ugettext("Can you spot the shortest route? ")
def description_level14():
message = ugettext("So many options to choose from! <br> Do you know which is the shortest "
+ "route to get the van to house? ")
return build_description(title_level14(), message)
def hint_level14():
message = ugettext("The middle route seems to be shortest. Do you know what sequence of "
+ "instructions will make the van follow it?")
return message
def title_level15():
return ugettext("What if there is more than one delivery? ")
def description_level15():
message = ugettext("Our vans often need to go to more than one house. To make the van deliver "
+ "to a house use the <b>Deliver</b> block. <br> Make sure your sequence "
+ "gets the van to travel the shortest route! ")
return build_description(title_level15(), message)
def hint_level15():
message = ugettext("Make the van turn left and go directly to the closest house first. This is "
+ "the shortest route. <br><br> The <b>Deliver</b> block is not needed when "
+ "the van is only going to one house, but you need it when the van is "
+ "going to two or more houses. <br><br> Use the <b>Deliver</b> block every "
+ "time the van gets to a house. ")
return message
def title_level16():
return ugettext("This time there are even more houses. ")
def description_level16():
message = ugettext("Well done! You have done really well to get so far - let's take it to the "
+ "next level and add another house. <br> Can you work out the shortest, "
+ "most efficient route to each house? ")
return build_description(title_level16(), message)
def hint_level16():
message = ugettext("Although the <b>Deliver</b> block is not needed when there is only one "
+ "house, you need it when there are more houses, like now. <br><br>"
+ "Once the van is at a house, make sure you use the <b>Deliver</b> block. "
+ "Do that for each house. ")
return message
def title_level17():
return ugettext("House overload! ")
def description_level17():
message = ugettext("Well done, you're getting a hang of it! Can you do the same for even more "
+ "houses?<br> Don't forget to use the <b>Deliver</b> block at each house. ")
return build_description(title_level17(), message)
def hint_level17():
message = ugettext("Test your sequence to make sure that the van takes the shortest route to "
+ "visit all the houses on the way. <br><br> Use the <b>Deliver</b> block "
+ "every time the van gets to a house. ")
return message
def title_level18():
return ugettext("This one is quite a tangle. ")
def description_level18():
message = ugettext("Practise your new skills on this road by getting the van to <b>Deliver</b> "
+ "to each of the houses. ")
return build_description(title_level18(), message)
def hint_level18():
message = ugettext("To make sure the van takes the shortest route, first turn left. <br><br> "
+ "Use the <b>Deliver</b> block every time the van gets to a house. ")
return message
def title_level19():
return ugettext("Repeating yourself is boring.")
def description_level19():
message = youtubeLink(600, 400, "//www.youtube.com/embed/vFGd0v3msRE", 0)
message += ugettext("Attach a block inside the <b>Repeat</b> block to make the van repeats "
+ "that instruction. <br> This means you can use one block instead of lots "
+ "of blocks to do the same thing over and over again. <br> How many times "
+ "do you want the instruction repeated? Type the number into the "
+ "<b>Repeat</b> block. <br> The repeated sets of blocks make a 'loop'. "
+ "<br><br> When you are ready, press <b>Play</b>! ")
return build_description(title_level19(), message)
def hint_level19():
message = ugettext("A <b>Move forwards</b> block can be placed inside a <b>Repeat</b> block "
+ "(to the right of the word 'Do'). <br><br> Don't forget to change the "
+ "number of times you need to repeat the instruction. ")
return message
def title_level20():
return ugettext("Use the <b>Repeat</b> block to make your sequence shorter and simpler. ")
def description_level20():
message = ugettext("You drove the van down this road on Level 5. This time, use the "
+ "<b>Repeat</b> block to get the van to the house. <br> This will make "
+ "your sequence shorter and simpler than last time.")
return build_description(title_level20(), message)
def hint_level20():
message = ugettext("This level can be broken down into three repeated sets of: <b>Turn "
+ "left</b>, then <b>Turn right</b>. <br><br> These repeated steps make a "
+ "'loop'. ")
return message
def title_level21():
return ugettext("Four leaf clover.")
def description_level21():
message = ugettext("This path looks a bit like a four leaf clover. Can you take the driver "
+ "through it? ")
return build_description(title_level21(), message)
def hint_level21():
message = ugettext("This level can be broken down into repeated sets of: <b>Move forwards</b>, "
+ "<b>Turn left</b>, <b>Turn right<b>, <b>Turn left</b>. ")
return message
def title_level22():
return ugettext("Now things are getting quite long and complicated. ")
def description_level22():
message = ugettext("An algorithm (a set of instructions in a particular order) to get the van "
+ "to the house might not be very simple, but it can be made shorter by "
+ "using the <b>Repeat</b> blocks. <br> Are you up for this challenge? ")
return build_description(title_level22(), message)
def hint_level22():
message = ugettext("Look to see where you have used <b>Move forwards</b>, <b>Turn "
+ "left</b> and <b>Turn right</b> blocks. Are any blocks next to them the "
+ "same? Put them into one <b>Repeat</b> block. Don't forget to change the "
+ "number of times you need to repeat the instruction. ")
return message
def title_level23():
return ugettext("Sssssssssnake!")
def description_level23():
message = ugettext("This road seems to be winding just like a snake! Can you find a nice and "
+ "simple route to get the van to the house? ")
return build_description(title_level23(), message)
def hint_level23():
message = ugettext("How about using <b>Repeat</b> inside another <b>Repeat</b>? <br><br> This "
+ "level can be broken down into sets of: "
+ "<li> a set (nested loop) of <b>Move forwards</b>, </li> "
+ "<li> two <b>Turn left</b>s, </li> "
+ "<li> a set (nested loop) of <b>Move forwards</b>, </li> "
+ "<li> two <b>Turn right</b>s. </li>")
return message
def title_level24():
return ugettext("The road is very long and very bendy.")
def description_level24():
message = ugettext("Wow! Look at that! It won't get more complicated than this, we promise.")
return build_description(title_level24(), message)
def hint_level24():
message = ugettext("With all these twists and turns, you will have to think hard about what "
+ "sets of repeated instructions to use. <br><br>")
return message
def title_level25():
return ugettext("Waterfall level. ")
def description_level25():
message = ugettext("Since you did so well with the repeat loops, have a go at this level. ")
return build_description(title_level25(), message)
def hint_level25():
message = ugettext("Most of the program will consist of repeated sets of <b>Move forwards</b> "
+ "and a set of <b>Turn right</b> and <b>Turn left</b>. ")
return message
def title_level26():
return ugettext("Winter wonderland!")
def description_level26():
message = ugettext("Notice the snow! You can create new levels with different 'themes' of "
+ "backgrounds and decorations in the Level Editor. But first, try getting "
+ "the van to the house! ")
return build_description(title_level26(), message)
def hint_level26():
message = ugettext("Break the program into two <b>Repeat</b>s with a <b>Turn left</b> in "
+ "between them. ")
return message
def title_level27():
return ugettext("Farmyard")
def description_level27():
message = ugettext("What a muddy road! Can you help Dee find her way from the barn to the "
+ "house? ")
return build_description(title_level27(), message)
def hint_level27():
message = ugettext("Make sure you drag the correct turns into your <b>Repeat</b> block. ")
return message
def title_level28():
return ugettext("The big city")
def description_level28():
message = ugettext("Can you get the van from the warehouse to the house? Don't stop at any "
+ "shops on the way! ")
return build_description(title_level28(), message)
def hint_level28():
message = ugettext("Make sure you drag the correct turns into your <b>Repeat</b> block.")
return message
def title_level29():
return ugettext("No need for numbers. ")
def description_level29():
message = youtubeLink(600, 400, "//www.youtube.com/embed/EDwc80X_LQI", 0)
message += ugettext("Drag a block inside a <b>Repeat until</b> block to make the van repeat an "
+ "instruction. <br> Attach a 'condition' so the van knows when to stop "
+ "repeating the instruction. <br> Here, you want the van to repeat your "
+ "instruction until it is at the destination. <br> Doing this means "
+ "you don't have to work out how many times the van should repeat your "
+ "instruction. ")
return build_description(title_level29(), message)
def hint_level29():
message = ugettext("The blocks should read like a sentence: '<b>Repeat <b>until</b> <b>at "
+ "destination do: Move forwards</b>'. ")
return message
def title_level30():
return ugettext("Can you do that again? ")
def description_level30():
message = ugettext("Well done, you did it! Now have a go at using the <b>Repeat until<b> block "
+ "on a road with lots of turns. ")
return build_description(title_level30(), message)
def hint_level30():
message = ugettext("The blocks should read like a sentence: '<b>Repeat until at "
+ "destination</b> <b>do</b>: <b>Turn left</b>, <b>Turn right</b>'. ")
return message
def title_level31():
return ugettext("Practice makes perfect. ")
def description_level31():
message = ugettext("Have another go to make sure you have got the hang of it. ")
return build_description(title_level31(), message)
def hint_level31():
message = ugettext("This program can be broken into repeated sets of <b>Turn left</b>, <b>Turn "
+ "right</b> and two <b>Move forwards</b>. ")
return message
def title_level32():
return ugettext("Uh oh, it's <b>Until</b> fever! ")
def description_level32():
message = ugettext("Good job! Can you help the driver reach the destination again? ")
return build_description(title_level32(), message)
def hint_level32():
message = ugettext("This program is quite similar to the one you just solved. Do you remember "
+ "the solution you came up with back then? ")
return message
def title_level33():
return ugettext("Now it's time to try the <b>If</b> block. ")
def description_level33():
message = youtubeLink(600, 400, "//www.youtube.com/embed/O0RXbJyYq8o", 0)
message += ugettext("Another way of telling the van what to do is to use the <b>If</b> block. "
+ "For example, <b>If</b> the <b>road exists forwards do</b> <b>Move "
+ "forwards</b>. <br> This is called an 'if statement'. <br> Try "
+ "using the <b>If</b> block and the <b>Repeat</b> block together. <br> "
+ "The <b>Repeat</b> block will stretch if you attach the <b>If</b> block "
+ "inside it. ")
return build_description(title_level33(), message)
def hint_level33():
message = ugettext("We say that the road 'exists' in a direction. For example, if the road "
+ "goes forwards we say that it 'exists forwards'. <br><br> "
+ "<b>If</b> a <b>road exists forwards</b> then <b>do Move forwards</b>."
+ "<br><br>Repeat this set to get the van to the house. ")
return message
def title_level34():
return ugettext("Multiple <b>If</b>s")
def description_level34():
message = ugettext("It can be handy to use <b>If</b> to give your van choices, so you don't "
+ "have to give the van new instructions at every step. <br> For "
+ "example: Tell the van <b>If</b> the <b>road exists forwards do Move "
+ "forwards,</b> but <b>If</b> the <b>road exists left do Turn left</b>. "
+ "<br> The van will choose correctly from the <b>Move forwards</b> and "
+ "<b>Turn left</b> instructions depending on the road. <br> Use an 'if "
+ "statement' in a 'loop' to drive the van down this bendy road. ")
return build_description(title_level34(), message)
def hint_level34():
message = ugettext("At each bend the van can either <b>Move forwards</b> or <b>Turn left</b>. "
+ "Create a loop so it can make the correct choice. <br><br> We say that "
+ "the road 'exists' in a direction. For example, if the road goes forwards "
+ "we say that it 'exists forwards'. ")
return message
def title_level35():
return ugettext("Let's put it all together!")
def description_level35():
message = ugettext("You have discovered the magic of 'if statements'. Can you make a program "
+ "that uses <b>Move forwards</b>, <b>Turn left</b> and <b>Turn right</b> "
+ "to get the van to the house. ")
return build_description(title_level35(), message)
def hint_level35():
message = ugettext("At each bend the van can either <b>Move forwards</b> or <b>Turn left</b>. "
+ "Create a loop so it can make the correct choice. <br><br> We say that "
+ "the road 'exists' in a direction. For example, if the road goes forwards "
+ "we say that it 'exists forwards'. ")
return message
def title_level36():
return ugettext("What else? If-else, that's what! ")
def description_level36():
message = youtubeLink(600, 400, "//www.youtube.com/embed/GUUJSRuAyU0", 0)
message += ugettext("You can change the <b>If</b> block to make more choices. Click on the "
+ "star in the <b>If</b> block and add <b>Else if</b>. <br> This will tell "
+ "the van what to do if the first <b>If</b> direction can't be done. "
+ "<br> For example, tell the van to <b>Turn left</b> <b>If</b> the "
+ "<b>road exists left</b>. Add <b>Else if</b> the <b>road exists right"
+ "</b>, <b>Turn right</b>. <br> This uses fewer blocks and makes sure "
+ "that only one step is taken in each loop. <br> This type of "
+ "algorithm is called a 'general algorithm' as it can be used with most "
+ "simple routes. ")
return build_description(title_level36(), message)
def hint_level36():
message = ugettext("The program should be a simple set of: <b>If road exists forwards do</b> "
+ "<b>Move forwards</b>, <b>Else if road exists left do Turn left</b>, "
+ "<b>Else if road exists right do Turn right</b>. <br><br> You can find "
+ "<b>Else if</b> by clicking the star on the <b>If</b> block and adding "
+ "the <b>Else if</b>.<br><br> If the first 'condition' is true (this means "
+ "if the road exists in the direction you put first) the van will follow "
+ "the blocks after <b>If</b>. <br><br> If not, the van will check to see "
+ "if it can follow the direction you put after <b>Else if</b>. It will "
+ "keep checking until it has a direction it can take. ")
return message
def title_level37():
return ugettext("A bit longer.")
def description_level37():
message = ugettext("Let's see if we can go further - this road is longer. Notice that the "
+ "length of the road does not change the length of your program! ")
return build_description(title_level37(), message)
def hint_level37():
message = ugettext("Think back to the solutions you produced using 'if statements' before. ")
return message
def title_level38():
return ugettext("Third time lucky! ")
def description_level38():
message = ugettext("Well done! You've got so far. <br> Can you apply the knowledge you "
+ "gained going through this part of the game to this level? ")
return build_description(title_level38(), message)
def hint_level38():
message = ugettext("Think back to the solutions you produced using 'if statements' before. ")
return message
def title_level39():
return ugettext("Dead ends! ")
def description_level39():
message = ugettext("Can you change the 'general algorithm' so that the van takes a shorter "
+ "route? <br> What if you change the order the van checks for "
+ "directions? <br> Keep an eye on the fuel level - try to use as "
+ "little as possible. ")
return build_description(title_level39(), message)
def hint_level39():
message = ugettext("Make the van check if the road exists right before it checks if the road "
+ "exists left. <br><br> Then it will be able to reach the destination "
+ "using the 'general algorithm'. Can you see why? ")
return message
def title_level40():
return ugettext("Adjust your previous solution.")
def description_level40():
message = ugettext("Can you think of a way you could change the 'general algorithm' you have "
+ "implemented earlier to make sure the van driver reaches the house having "
+ "travelled the shortest route? ")
return build_description(title_level40(), message)
def hint_level40():
message = ugettext("The 'general algorithm' will work here. <br><br> Make sure you change the "
+ "order the van checks for directions to take the shortest route to the "
+ "destination. ")
return message
def title_level41():
return ugettext("Decision time. ")
def description_level41():
message = ugettext("Do you think changes to the 'general algorithm' will help the van find the "
+ "shortest route? <br> Or do you have to come up with a different "
+ "solution? <br> Time to make a decision... ")
return build_description(title_level41(), message)
def hint_level41():
message = ugettext("Psst! You can simply make a change to the 'general algorithm'. <br><br> "
+ "If you make the van check for turns before it checks the road exists "
+ "forwards, you will come up with the perfect solution. <br><br>"
+ "Notice that here it doesn't matter which turn you check for first - it "
+ "will change the route but provide you with the same score. ")
return message
def title_level42():
return ugettext("What do you think this time? ")
def description_level42():
message = ugettext("Can you use the 'general algorithm' here? <br> Can it be changed so that "
+ "it finds a shorter route, or will you need a new solution? ")
return build_description(title_level42(), message)
def hint_level42():
message = ugettext("Uh oh, moving around the blocks in your 'general algorithm' won't help. "
+ "<br> How about creating a simple solution without 'if statements' that "
+ "will help the van reach the house? ")
return message
def title_level43():
return ugettext("Good work! What else can you do? ")
def description_level43():
message = ugettext("You should be really good at this by now. Can you manage this complicated "
+ "road? ")
return build_description(title_level43(), message)
def hint_level43():
message = ugettext("This road cannot be solved by a 'general algorithm'. Can you solve it "
+ "without 'if statements'? <br><br> Remember to choose the shortest route "
+ "and an algorithm which is as short as possible. ")
return message
def title_level44():
return ugettext("Oh no! Traffic lights! ")
def description_level44():
message = youtubeLink(600, 400, "//www.youtube.com/embed/EDwc80X_LQI", 0)
message += ugettext("The light varies from red to green. <br>"
+ "The van must check which colour the traffic light is when it reaches them "
+ "- if it goes past a red light it will break the Highway Code."
+ "<br> Here, you want the van to repeat the wait instruction while the traffic light is red. "
+ "Drag a block inside a <b>Repeat while</b> block to make the van repeat an instruction. "
+ "<br> Attach a 'condition' so the van knows when to repeat the instruction. ")
return build_description(title_level44(), message)
def hint_level44():
message = ugettext("Don't worry about the 'general algorithm' here. Just go forwards. <br><br>"
+ "Once the van is right under the traffic light, make it wait for a green "
+ "light by adding a <b>Wait</b> block. ")
return message
def title_level45():
return ugettext("Green for go, red for wait. ")
def description_level45():
message = ugettext("Can you write a program so the van moves forwards on a green light but "
+ "waits at a red light? ")
return build_description(title_level45(), message)
def hint_level45():
message = ugettext("Use an 'if statement' to tell the van <b>If traffic light is red, Wait, "
+ "Else Move forwards</b>. <br><br> Remember to repeat that until you get "
+ "to the destination. ")
return message
def title_level46():
return ugettext("Well done - you've made it really far! ")
def description_level46():
message = ugettext("Let's practise what you've learnt so far. <br> Don't forget to add a "
+ "turn and to make the van wait at a traffic light. ")
return build_description(title_level46(), message)
def hint_level46():
message = ugettext("Be careful about the order you put your <b>If</b> blocks in. <br><br>"
+ "If you make the van check the road exists forwards before checking for a "
+ "light, it might break the Highway Code. ")
return message
def title_level47():
return ugettext("What a mess! But can you spot a route? ")
def description_level47():
message = ugettext("Put your knowledge to test. Create an algorithm to lead the van to the "
+ "house. <br> Don't forget to add a turn and to make the van wait at a "
+ "traffic light. ")
return build_description(title_level47(), message)
def hint_level47():
message = ugettext("Use an 'if statement' and check if the light is red. <br><br> "
+ "<b>If traffic light is red, wait, Else if road exists forwards, Move "
+ "forwards, Else Turn left</b>. <br><br> Remember to repeat that until you "
+ "get to the destination! ")
return message
def title_level48():
return ugettext("Put all that hard work to the test. ")
def description_level48():
message = ugettext("Congratulations - you've made it really far! <br> Can you create a "
+ "'general algorithm' that will help the van reach the destination in the "
+ "shortest way but stop at a traffic light? ")
return build_description(title_level48(), message)
def hint_level48():
message = ugettext("You need to check: "
+ "<li> if the lights are red </li>"
+ "<li> if the road exists right </li>"
+ "<li> if the road exists forwards </li> "
+ "<li> if the road exists left </li>"
+ "<li> if it is a dead end </li>"
+ "Make sure you put the checks in the right order. ")
return message
def title_level49():
return ugettext("Amazing! Have another go! ")
def description_level49():
message = ugettext("Can you change the 'general algorithm' you created before to make the van "
+ "take the shortest route to the destination? ")
return build_description(title_level49(), message)
def hint_level49():
message = ugettext("You need to check: "
+ "<li> if the light is red </li>"
+ "<li> if the road exists left </li>"
+ "<li> if the road exists forwards </li>"
+ "<li> or if the road exists right </li>"
+ "Do you think you need to check for a dead end? <br> Make sure you put "
+ "the checks in the right order. ")
return message
def title_level50():
return ugettext("Light maze. ")
def description_level50():
message = ugettext("Well this is tricky. Look at all those lights! <br> Can you find the "
+ "shortest route to the destination? It would be good if the van doesn't "
+ "have to wait at too many lights. ")
return build_description(title_level50(), message)
def hint_level50():
message = ugettext("Don't worry about the algorithm you've already come up with. Take the "
+ "first turn left which has fewer traffic lights. <br><br> Once your van "
+ "is right under the traffic lights, make sure it waits for a green "
+ "light. ")
return message
def title_level51():
return ugettext("Back to basics with a twist")
def description_level51():
message = ugettext("Can you come up with a solution to this level using the limited number of blocks we provide at the start?")
return build_description(title_level51(), message)
def hint_level51():
message = ugettext("Think back to earlier levels - what did you learn?")
return message
def title_level52():
return ugettext("A Bit more Tricky")
def description_level52():
message = ugettext("Well done so far! Can you find a solution to this road? You have to move forward, but you have no forward block to use. Do you know how to help the van get to the destination?")
return build_description(title_level52(), message)
def hint_level52():
message = ugettext("Don't forget to use the repeat loop.")
return message
def title_level53():
return ugettext("Choose your blocks wisely")
def description_level53():
message = ugettext("Can you find the shortest route? Use your blocks carefully and don't forget the <b>repeat</b> loop.")
return build_description(title_level53(), message)
def hint_level53():
message = ugettext("Think back to earlier levels - what did you learn")
return message
def title_level54():
return ugettext("Round and Round")
def description_level54():
message = ugettext("Can you find the shortest route? Use your blocks carefully and don't forget the <b>repeat</b> loop.")
return build_description(title_level54(), message)
def hint_level54():
message = ugettext("Think back to earlier levels - what did you learn")
return message
def title_level55():
return ugettext("Wonky Fish!")
def description_level55():
message = ugettext("Use <b>repeat until</b> and the <b>if</b> statement to find your way around the Wonky Fish.")
return build_description(title_level55(), message)
def hint_level55():
message = ugettext("Think back to earlier levels - what did you learn.")
return message
def title_level56():
return ugettext("Concrete Wasteland")
def description_level56():
message = ugettext("Use <b>repeat until</b> and the <b>if</b> statement to find your way around the Concrete Wasteland")
return build_description(title_level56(), message)
def hint_level56():
message = ugettext("Think back to earlier levels - what did you learn.")
return message
def title_level57():
return ugettext("This is <b>not...</b> the same")
def description_level57():
message = ugettext("Like <b>repeat until</b>, <b>repeat while</b> is the opposite. Here, you want the van to repeat your instructions while it is not at the destination.<br />Doing this means you don't have to work out how many times the van should repeat your instructions.")
return build_description(title_level57(), message)
def hint_level57():
message = ugettext("The blocks should read like a sentence. Repeat while not at destination then add your instructions using the blocks provided.")
return message
def title_level58():
return ugettext("Snow snake")
def description_level58():
message = ugettext("Combining what you have just learnt using <b>repeat while</b> with the repeat loop, can you find your way around the snow snake?")
return build_description(title_level58(), message)
def hint_level58():
message = ugettext("The blocks should read like a sentence: <b>repeat while not at destination</b> then using the <b>repeat</b> add your instructions")
return message
def title_level59():
return ugettext("Tricky turnaround")
def description_level59():
message = ugettext("Use your blocks carefully not forgetting the <b>turnaround</b>.")
return build_description(title_level59(), message)
def hint_level59():
message = ugettext("Inside the repeat <b>repeat until</b> block, <b>turn left</b>, <b>turn around</b> and <b>turn left<b> again should do it.")
return message
def title_level60():
return ugettext("Right around the block")
def description_level60():
message = ugettext("Can you find your way around this puzzle?")
return build_description(title_level60(), message)
def hint_level60():
message = ugettext("The trick to this level is to <b>turn right</b> then <b>turn around</b>.")
return message
def title_level61():
return ugettext("Can you create the 'Wiggle' procedure?")
def description_level61():
message = ugettext("Procedures are groups of instructions that can be executed multiple times without being rewritten. For example, if you want to instruct the van to follow a repeated pattern in the road, you can create a specific procedure. To create a procedure, simply choose the correct blocks and put them in the right order inside the <b>Define do</b> block. Once you have done that, give it a name eg wiggle.<br />Now you're ready! Attach the <b>Call</b> block where you want your 'wiggle' procedure to be executed. Don't forget to put the name in it!")
return build_description(title_level61(), message)
def hint_level61():
message = ugettext("Don't forget to use <b>Define</b>. Name your procedure and attach the blocks in the right order. Start with <b>move forwards</b>, <b>turn left</b>, you can add repeat loops to a procedure and ending with <b>turn left</b>. Call your procedure under your start block and off you go...")
return message
def title_level62():
return ugettext("Lots of Traffic Lights!")
def description_level62():
message = ugettext("Create a procedure which tells the van to wait until the traffic lights are green.")
return build_description(title_level62(), message)
def hint_level62():
message = ugettext("Don't forget to name your procedure eg 'lights' and every time you want the van to check the traffic lights you need to '<b>call</b>' it.")
return message
def title_level63():
return ugettext("Wiggle Wiggle")
def description_level63():
message = ugettext("Can you find the repeating pattern here and create a new 'wiggle' procedure? And do the Wiggle Wiggle!")
return build_description(title_level63(), message)
def hint_level63():
message = ugettext("Can you see the repeating pattern in the path? The 'wiggle' consisting of a <b>turn left</b>, <b>move forwards</b>, <b>turn right</b>, <b>turn right</b>, <b>turn left</b> can be put in a <b>Define</b> block to create a procedure. Once you have named it, attach the <b>Call block with the procedure's name in the text box to execute it.")
return message
def title_level64():
return ugettext("Muddy Patterns with Phil")
def description_level64():
message = ugettext("Can you spot a pattern here? Create several procedures, it can save time when writing a program. Don't forget to clearly name your procedures and then call them.")
return build_description(title_level64(), message)
def hint_level64():
message = ugettext("One procedure could be <b>turn left</b>, <b>turn right</b>, <b>move forwards</b>, <b>turn right</b> and <b>turn left</b>. Don't forget you can create a repeat loop in your procedures.")
return message
def title_level65():
return ugettext("Complicated roads.")
def description_level65():
message = ugettext("This road might be a bit more complicated, but the procedures you could come up with are quite simple. Have a go and find out yourself!")
return build_description(title_level65(), message)
def hint_level65():
message = ugettext("Your first procedure could be <b>turn left</b> and <b>turn right</b> 'left-right' The second procedure could be <b>turn right</b> <b>turn left</b>, 'right-left'.")
return message
def title_level66():
return ugettext("Dee's snowy walk")
def description_level66():
message = ugettext("Did you know procedures can call other procedures?")
return build_description(title_level66(), message)
def hint_level66():
message = ugettext("Create 2 procedures. The first one should read <b>move forwards</b>, <b>move forwards</b>, <b>turn right</b>. The second <b>move forwards</b> then <b>call</b> your first procedure")
return message
def title_level67():
return ugettext("Crazy Farm")
def description_level67():
message = ugettext("This one will really test what you have learnt.")
return build_description(title_level67(), message)
def hint_level67():
message = ugettext("It might be easier to write the program without repeats or procedures then create 3 separate procedures from the patterns that your see.")
return message
def title_level68():
return ugettext("T - time")
def description_level68():
message = ugettext("Can you find the shortest route?")
return build_description(title_level68(), message)
def hint_level68():
message = ugettext("Think back to earlier levels - what did you learn?")
return message
def title_level69():
return ugettext("Duck pond dodge")
def description_level69():
message = ugettext("Can you find the shortest route?")
return build_description(title_level69(), message)
def hint_level69():
message = ugettext("Think back to earlier levels - what did you learn?")
return message
def title_level70():
return ugettext("Winter wonderland")
def description_level70():
message = ugettext("Can you find the shortest route?")
return build_description(title_level70(), message)
def hint_level70():
message = ugettext("Think back to earlier levels - what did you learn?")
return message
def title_level71():
return ugettext("Frozen challenge")
def description_level71():
message = ugettext("Can you find the shortest route?")
return build_description(title_level71(), message)
def hint_level71():
message = ugettext("Think back to earlier levels - what did you learn?")
return message
def title_level72():
return ugettext("Can Wes Find his lunch?")
def description_level72():
message = ugettext("Can you find the shortest route?")
return build_description(title_level72(), message)
def hint_level72():
message = ugettext("Think back to earlier levels - what did you learn?")
return message
def title_level73():
return ugettext("Traffic light freeze up!")
def description_level73():
message = ugettext("Can you find the shortest algorithm?")
return build_description(title_level73(), message)
def hint_level73():
message = ugettext("Think back to earlier levels - what did you learn?")
return message
def title_level74():
return ugettext("Pandemonium")
def description_level74():
message = ugettext("Can you find the shortest route?")
return build_description(title_level74(), message)
def hint_level74():
message = ugettext("Think back to earlier levels - what did you learn?")
return message
def title_level75():
return ugettext("Kirsty's maze time")
def description_level75():
message = ugettext("Can you find the shortest route?")
return build_description(title_level75(), message)
def hint_level75():
message = ugettext("Think back to earlier levels - what did you learn?")
return message
def title_level76():
return ugettext("Cannot turn left!")
def description_level76():
message = ugettext("Can you find the shortest route?")
return build_description(title_level76(), message)
def hint_level76():
message = ugettext("What is that? A barn for ANTS!?")
return message
def title_level77():
return ugettext("G Force")
def description_level77():
message = ugettext("Can you get the van to the house?")
return build_description(title_level77(), message)
def hint_level77():
message = ugettext("Heard of recursion?")
return message
def title_level78():
return ugettext("Wandering Phil")
def description_level78():
message = ugettext("Can you get Phil to the house?")
return build_description(title_level78(), message)
def hint_level78():
message = ugettext("Repeat while not dead end... turn around...")
return message
def title_level79():
return ugettext("Muddy Mayhem")
def description_level79():
message = ugettext("Can you find the shortest route?")
return build_description(title_level79(), message)
def hint_level79():
message = ugettext("Think back to earlier levels - what did you learn?")
return message
def title_level80():
return ugettext("Here's Python!")
def description_level80():
message = ugettext("As you create your program using Blockly see what it looks like in the Python programming language. Can you tell which Python statement matches which block?")
return build_description(title_level80(), message)
def hint_level80():
return ""
def title_level81():
return ugettext("Matching Blockly")
def description_level81():
message = ugettext("As you create your program using Blockly see what it looks like in the Python programming language. Can you tell which Python statement matches which block?")
return build_description(title_level81(), message)
def hint_level81():
return ""
def title_level82():
return ugettext("Don't forget to find the shortest route")
def description_level82():
message = ugettext("As you create your program using Blockly see what it looks like in the Python programming language. Can you tell which Python statement matches which block?")
return build_description(title_level82(), message)
def hint_level82():
return ""
def title_level83():
return ugettext("Repeating yourself in Python looks different")
def description_level83():
message = ugettext("As you create your program using Blockly see what it looks like in the Python programming language. Try adding a <b>repeat</b> block and watch what happens in Python.")
return build_description(title_level83(), message)
def hint_level83():
return ""
def title_level84():
return ugettext("Repeat and watch.")
def description_level84():
message = ugettext("As you create your program using Blockly see what it looks like in the Python programming language. Try adding a <b>repeat</b> block and watch what happens in Python.")
return build_description(title_level84(), message)
def hint_level84():
return ""
def title_level85():
return ugettext("Looks easy but use repeat until and see what happens?")
def description_level85():
message = ugettext("As you create your program using Blockly see what it looks like in the Python programming language. Try adding a <b>repeat</b> until block and watch what happens in Python.")
return build_description(title_level85(), message)
def hint_level85():
return ""
def title_level86():
return ugettext("See what the if blocks looks like in Python")
def description_level86():
message = ugettext("As you create your program using Blockly see what it looks like in the Python programming language. Try adding an <b>if</b> block and watch what happens in Python.")
return build_description(title_level86(), message)
def hint_level86():
return ""
def title_level87():
return ugettext("Don't forget to use else if")
def description_level87():
message = ugettext("As you create your program using Blockly see what it looks like in the Python programming language. Try adding an <b>if</b> block and watch what happens in Python particularly with <b>else if</b> and <b>else</b> statements.")
return build_description(title_level87(), message)
def hint_level87():
return ""
def title_level88():
return ugettext("See what happens when you add Traffic lights")
def description_level88():
message = ugettext("As you create your program using Blockly see what it looks like in the Python programming language. Try adding an <b>if</b> block and watch what happens in Python particularly with <b>else if</b> and <b>else</b> statements.")
return build_description(title_level88(), message)
def hint_level88():
return ""
def title_level89():
return ugettext("Watch carefully as you have another go")
def description_level89():
message = ugettext("As you create your program using Blockly see what it looks like in the Python programming language. Try adding an <b>if</b> block and watch what happens in Python particularly with <b>else if</b> and <b>else</b> statements.")
return build_description(title_level89(), message)
def hint_level89():
return ""
def title_level90():
return ugettext("Have a go at procedures - what do they look like in Python?")
def description_level90():
message = ugettext("As you create your program using Blockly see what it looks like in the Python language. Try adding a procedure and watch what happens in Python.")
return build_description(title_level90(), message)
def hint_level90():
message = ugettext("Don't forget to name your procedure and see what happens in Python.")
return message
def title_level91():
return ugettext("Put it all together")
def description_level91():
message = ugettext("As you create your program using Blockly see what it looks like in the Python language. Try adding a procedure and watch what happens in Python.")
return build_description(title_level91(), message)
def hint_level91():
message = ugettext("Don't forget to name your procedure and see what happens in Python.")
return message
def title_level92():
return ugettext("Start with the basics, <b>forward</b>, <b>left</b> and <b>right</b>")
def description_level92():
message = ugettext("Now you are coding in Python! This is what real developers do!! To start you off, the van object has been created for you already. Under this you need to add the correct Python statements to instruct the van to drive to the destination.<br />For more information about coding in Python refer to <a href='http://www.diveintopython.net/' target='_blank'>www.diveintopython.net</a>.")
return build_description(title_level92(), message)
def hint_level92():
message = ugettext("""Try using the following commands:<br /><pre>v.move_forwards()<br />v.turn_left()<br />v.turn_right()</pre>""")
return message
def title_level93():
return ugettext("Keep it simple")
def description_level93():
message = ugettext("Try this road. Under the van object you need to add the correct Python statements to instruct the van to drive to the destination.")
return build_description(title_level93(), message)
def hint_level93():
message = ugettext("""Try using the following commands:
<pre>v.move_forwards()
v.turn_left()
v.turn_right()</pre>""")
return message.replace('\n','<br />')
def title_level94():
return ugettext("Take the shortest route.")
def description_level94():
message = ugettext("You're getting good at this! Can you drive the van along this road using the correct Python statements.")
return build_description(title_level94(), message)
def hint_level94():
message = ugettext("""Try using the following commands:
<pre>v.move_forwards()
v.turn_left()
v.turn_right()</pre>""")
return message.replace('\n','<br />')
def title_level95():
return ugettext("Count and repeat")
def description_level95():
message = ugettext("Now try to use a <b>repeat</b> loop to solve this level. Look back at level 83 to see what this could look like in Python.")
return build_description(title_level95(), message)
def hint_level95():
message = ugettext("""To repeat some statements a set number of times you can use something like the following:
<pre>for count in range(3):
v.turn left
print count</pre>
The print statement will output the value of count to the console.""")
return message.replace('\n','<br />')
def title_level96():
return ugettext("Count and repeat is easy")
def description_level96():
message = ugettext("Now try to use a <b>repeat loop</b> to solve this level. Look back at level 83 to see what this could look like in Python. This time you could use 2 loops, 1 for each straight piece of road.")
return build_description(title_level96(), message)
def hint_level96():
message = ugettext("""To repeat some statements a set number of times you can use something like the following:
<pre>for count in range(3):
v.turn left
print count</pre>
The print statement will output the value of count to the console.""")
return message.replace('\n','<br />')
def title_level97():
return ugettext("Loop the loop")
def description_level97():
message = ugettext("Now try to use a loop within a loop, known as a 'nested loop'. Look back at level 84 to see what this could look like in Python.")
return build_description(title_level97(), message)
def hint_level97():
message = ugettext("""To repeat within a repeats a set number of times you can use something like the following:
<pre>for i in range(3):
for j in range(5):
v.turn left
print count</pre>
The print statement will output the value of count to the console.""")
return message.replace('\n','<br />')
def title_level98():
return ugettext("Repeat and check")
def description_level98():
message = ugettext("Try to solve this level by repeatedly moving until the van is at the destination. Also, check whether the van can move forward or else must turn left. Now try and write the Python code. Look back at level 86 to give you an idea of what this could look like.")
return build_description(title_level98(), message)
def hint_level98():
message = ugettext("""To repeat while a condition is met you can use something like the following:
<pre>while not v.at_destination():
v.move_forwards()</pre>
To check whether a condition is met you can use something like the following:
<pre>if v.is_road_forward():
v.move_forwards()</pre>
You may also need to use the <b>else</b> statement.""")
return message.replace('\n','<br />')
def title_level99():
return ugettext("Find a general solution")
def description_level99():
message = ugettext("Now try using what you have just learnt to solve this level. You could also try using the <b>if</b>, <b>elif</b> and <b>else</b> statements. Look back at level 86 to give you an idea of what this could look like.")
return build_description(title_level99(), message)
def hint_level99():
message = ugettext("""To repeat while a condition is met you can use something like the following:
<pre>while not v.at_destination():
v.move_forwards()</pre>
To check whether a condition is met you can use something like the following:
<pre>if v.is_road_forward():
v.move_forwards()</pre>
You may also need to use the <b>elif</b> and <b>else</b> statements.""")
return message.replace('\n','<br />')
def title_level100():
return ugettext("Watch out for the dead end!")
def description_level100():
message = ugettext("Practice your new Python skills on this road to get the van to the destination. Look back at level 88 for a dead end check.")
return build_description(title_level100(), message)
def hint_level100():
message = ugettext("Try using<br /><pre>if v.at_dead_end():</pre><br />to check if the van is at a dead end.")
return message
def title_level101():
return ugettext("Function or Junction?")
def description_level101():
message = ugettext("Try defining your own procedure to solve this level. In Python procedures are generally called functions. Look back at level 90 for an example of how to define a function in Python.")
return build_description(title_level101(), message)
def hint_level101():
message = ugettext("""To define a function in Python you could do something like:
<pre>def my_function():
print 'test'</pre>
To call a defined function you could do something like:
<pre>my_function()</pre>
Remember, you must define a function before you call it.""")
return message.replace('\n','<br />')
def title_level102():
return ugettext("Watch for the patterns")
def description_level102():
message = ugettext("For this level try defining more than one function. Try to look for a repeating pattern to simplify your program.")
return build_description(title_level102(), message)
def hint_level102():
message = ugettext("""To define a function in Python you could do something like:
<pre>def my_function():
print 'test'</pre>
To call a defined function you could do something like:
<pre>my_function()</pre>""")
return message.replace('\n','<br />')
def title_level103():
return ugettext("Patterns within patterns.")
def description_level103():
message = ugettext("For this level try to define 2 or more functions where inside one function you call another function.")
return build_description(title_level103(), message)
def hint_level103():
message = ugettext("""To define a function that calls another function you could do something like:
<pre>def my_function():
print 'test'
def my_other_function():
for i in range(3):
my_function()
my_other_function()</pre>""")
return message.replace('\n','<br />')
def title_level104():
return ugettext("Can you see the repeating pattern?")
def description_level104():
message = ugettext("For this level try to define 2 or more functions where inside one function you call another function.")
return build_description(title_level104(), message)
def hint_level104():
message = ugettext("""To define a function that calls another function you could do something like:
<pre>def my_function():
print 'test'
def my_other_function():
for i in range(3):
my_function()
my_other_function()</pre>""")
return message.replace('\n','<br />')
def title_level105():
return ugettext("Find the shortest route.")
def description_level105():
message = ugettext("For this level try to implement a general algorithm. Keep the van going until it arrives at the destination, checking for traffic lights and junctions.")
return build_description(title_level105(), message)
def hint_level105():
message = ugettext("For this you will have to use a combination of the <b>while</b> and <b>if</b> statements.")
return message
def title_level106():
return ugettext("Spiral and add")
def description_level106():
message = ugettext("For this level the van needs to travel in a spiral. The number of grid squares the van has to move keeps increasing by 1 on each turn. To do this you can have a loop that makes use of a variable to track the length of the road you need to travel after each turn.")
return build_description(title_level106(), message)
def hint_level106():
message = ugettext("""To use a variable to store the number of grid squares the van has to move you can do something like the following:
<pre>n = 1
while not v.at_destination():
print n
n += 1</pre>
Variables can be used in place of constants when calling functions. For example to repeat something n times you can do something like the following:
<pre>for count in range(n):</pre>""")
return message.replace('\n','<br />')
def title_level107():
return ugettext("Spiral and double")
def description_level107():
message = ugettext("For this level try something similar to what you have just learnt. This time the straight sections of road are doubling in length after each turn.")
return build_description(title_level107(), message)
def hint_level107():
message = ugettext("To double the value of a variable you can do something like the following:<br /><pre>n *= 2</pre>")
return message
def title_level108():
return ugettext("Think less")
def description_level108():
message = ugettext("This time the straight sections of road decrease in length by 2 after each turn.")
return build_description(title_level108(), message)
def hint_level108():
message = ugettext("To decrease the value of a variable by an amount you can do something like the following:<br /><pre>n -= 5</pre>")
return message
def title_level109():
return ugettext("Final challenge!")
def description_level109():
message = ugettext("For the last challenge, the road straight line sections of road start off increasing by 1 after each turn and then switch to dividing by 2 with a twist!")
return build_description(title_level109(), message)
def hint_level109():
message = ugettext("To halve the value of a variable you can do something like the following:<br /><pre>n /= 2</pre>")
return message
| agpl-3.0 | 2,536,014,707,948,054,500 | 37.623599 | 565 | 0.648973 | false |
StellarCN/py-stellar-base | stellar_sdk/signer_key.py | 1 | 3956 | from . import xdr as stellar_xdr
from .__version__ import __issues__
from .exceptions import ValueError
from .strkey import StrKey
__all__ = ["SignerKey"]
class SignerKey:
"""The :class:`SignerKey` object, which represents an account signer key on Stellar's network.
:param signer_key: The XDR signer object
"""
def __init__(self, signer_key: stellar_xdr.SignerKey) -> "None":
self.signer_key: stellar_xdr.SignerKey = signer_key
@classmethod
def ed25519_public_key(cls, account_id: str) -> "SignerKey":
"""Create ED25519 PUBLIC KEY Signer from account id.
:param account_id: account id
:return: ED25519 PUBLIC KEY Signer
:raises:
:exc:`Ed25519PublicKeyInvalidError <stellar_sdk.exceptions.Ed25519PublicKeyInvalidError>`: if ``account_id``
is not a valid ed25519 public key.
"""
signer_key = stellar_xdr.SignerKey(
stellar_xdr.SignerKeyType.SIGNER_KEY_TYPE_ED25519,
ed25519=stellar_xdr.Uint256(StrKey.decode_ed25519_public_key(account_id)),
)
return cls(signer_key)
@classmethod
def pre_auth_tx(cls, pre_auth_tx_hash: bytes) -> "SignerKey":
"""Create Pre AUTH TX Signer from the sha256 hash of a transaction,
click `here <https://www.stellar.org/developers/guides/concepts/multi-sig.html#pre-authorized-transaction>`__ for more information.
:param pre_auth_tx_hash: The sha256 hash of a transaction.
:return: Pre AUTH TX Signer
"""
signer_key = stellar_xdr.SignerKey(
stellar_xdr.SignerKeyType.SIGNER_KEY_TYPE_PRE_AUTH_TX,
pre_auth_tx=stellar_xdr.Uint256(pre_auth_tx_hash),
)
return cls(signer_key)
@classmethod
def sha256_hash(cls, sha256_hash: bytes) -> "SignerKey":
"""Create SHA256 HASH Signer from a sha256 hash of a preimage,
click `here <https://www.stellar.org/developers/guides/concepts/multi-sig.html#hashx>`__ for more information.
:param sha256_hash: a sha256 hash of a preimage
:return: SHA256 HASH Signer
"""
signer_key = stellar_xdr.SignerKey(
stellar_xdr.SignerKeyType.SIGNER_KEY_TYPE_HASH_X,
hash_x=stellar_xdr.Uint256(sha256_hash),
)
return cls(signer_key)
def to_xdr_object(self) -> stellar_xdr.SignerKey:
"""Returns the xdr object for this SignerKey object.
:return: XDR Signer object
"""
return self.signer_key
@classmethod
def from_xdr_object(cls, xdr_object: stellar_xdr.SignerKey) -> "SignerKey":
"""Create a :class:`SignerKey` from an XDR SignerKey object.
:param xdr_object: The XDR SignerKey object.
:return: A new :class:`SignerKey` object from the given XDR SignerKey object.
"""
if xdr_object.type == stellar_xdr.SignerKeyType.SIGNER_KEY_TYPE_ED25519:
assert xdr_object.ed25519 is not None
account_id = StrKey.encode_ed25519_public_key(xdr_object.ed25519.uint256)
return cls.ed25519_public_key(account_id)
elif xdr_object.type == stellar_xdr.SignerKeyType.SIGNER_KEY_TYPE_PRE_AUTH_TX:
assert xdr_object.pre_auth_tx is not None
return cls.pre_auth_tx(xdr_object.pre_auth_tx.uint256)
elif xdr_object.type == stellar_xdr.SignerKeyType.SIGNER_KEY_TYPE_HASH_X:
assert xdr_object.hash_x is not None
return cls.sha256_hash(xdr_object.hash_x.uint256)
else:
raise ValueError(
f"This is an unknown signer type, please consider creating an issuer at {__issues__}."
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented # pragma: no cover
return self.signer_key == other.signer_key
def __str__(self):
return f"<SignerKey [signer_key={self.signer_key}]>"
| apache-2.0 | -7,404,016,642,933,082,000 | 38.959596 | 139 | 0.638524 | false |
songeater/SONGSHTR | soundfunc.py | 1 | 4391 | import numpy as np
from scipy.fftpack import dct, idct
import sys
'''
----------
FUNCTIONS
----------
'''
def get_config():
config = {}
config['sound_file'] = "harvestmoon-mono-hp500.wav"
config['save_file'] = config['sound_file'] + "_modelsave_"
config['blocksize']=13000
config['compressed_blocksize'] = (config['blocksize']//2+1)
config['seqlen'] = 80 # in number of blocks...
config['win_edge'] = int(config['blocksize'] / 2)
config['out_step'] = 1 # in number of blocks...
config['batchsize'] = 5 # in number of blocks...
config['domain'] = "rfft" # either "rfft" or "dct"
if config['domain'] == "dct": config['win_edge'] = int(config['blocksize'] / 2) # if dct, have to set this
return config
def concat_sound_blocks_mdct(sound_blocks, edge, clickedge=0):
print(edge)
print(np.asarray(sound_blocks).shape)
new_gen = []
for i in range(0, len(sound_blocks)-2):
if i==0:
new_gen.append(sound_blocks[i][0:-edge-clickedge])
else:
temp1 = sound_blocks[i][0:-edge-clickedge]
temp2 = sound_blocks[i-1][-edge+clickedge:]
merge = temp1 + temp2
new_gen.append(merge)
return new_gen
def conv_to_dct(signal, blocksize, edge, out_blocksize):
blocks1 = []
blocks2 = []
for i in range(0, signal.shape[0]-blocksize-edge, blocksize-edge):
dct_block = dct(signal[i:i+blocksize], norm='ortho')
blocks1.append(dct_block)
if blocksize > out_blocksize:
for opw in range(len(blocks1)): blocks2.append(blocks1[opw][0:out_blocksize])
return blocks2
def conv_from_dct(blocks, in_blocksize, out_blocksize):
new_blocks=[]
zeropad = [0]*(out_blocksize-in_blocksize)
dct_pred = blocks
dct_pred = np.append(dct_pred, zeropad)
dct_pred = np.asarray(idct(dct_pred, norm='ortho'), dtype=np.float32)
new_blocks.append(dct_pred)
return new_blocks
def linear(u):
return (1-u, u)
def quadratic_out(u):
u = u * u
return (1-u, u)
def quadratic_in(u):
u = 1-u
u = u * u
return (u, 1-u)
def linear_bounce(u):
u = 2 * ( 0.5-u if u > 0.5 else u)
return (1-u, u)
def merge_sounds(sound1, sound2, fade=linear):
assert len(sound1)==len(sound2)
n = len(sound1)
new_sound = sound1
for t in range(n):
u = t / float(n)
amp1, amp2 = fade(u)
new_sound[t] = sound1[t]*amp1 + sound2[t]*amp2
return new_sound
def concat_sound_blocks(sound_blocks, edge):
print("sound_blocks shape:", np.asarray(sound_blocks[1]).shape)
new_gen = []
for i in range(0, len(sound_blocks)-2):
if i==0: temp1 = sound_blocks[i][0:-edge]
else: temp1 = sound_blocks[i][edge:-edge]
new_gen.append(temp1)
if i%100==0: print("temp1", np.asarray(temp1).shape)
merge_a = sound_blocks[i] [-edge:]
merge_b = sound_blocks[i+1][0:edge]
if edge==0: temp2 = merge_a
else: temp2 = merge_sounds(merge_a, merge_b)
if i%100==0: print("temp2", np.asarray(temp2).shape)
new_gen.append(temp2)
return new_gen
def conv_to_rfft(signal, blocksize, edge):
mag_blocks = []
ang_blocks = []
for i in range(0, signal.shape[0]-blocksize-edge, blocksize-edge):
fft_block = np.fft.rfft(signal[i:i+blocksize], norm='ortho')
mag_blocks.append(np.abs(fft_block))
ang_blocks.append(np.angle(fft_block))
return mag_blocks, ang_blocks
def conv_from_rfft(mag_blocks, ang_blocks=0):
new_blocks=[]
if ang_blocks==0:
fft_pred = []
for opq in range(len(mag_blocks)):
fft_x = np.cos(0)*mag_blocks[opq]
fft_y = np.sin(0)*mag_blocks[opq]
fft_pred.append(fft_x + 1.0j*fft_y)
new_blocks = np.asarray(np.fft.irfft(mag_blocks, norm='ortho'), dtype=np.float32)
print("new_blocks shape:", new_blocks.shape)
else:
for opq in range(len(mag_blocks)):
fft_x = np.cos(ang_blocks[opq])*mag_blocks[opq]
fft_y = np.sin(ang_blocks[opq])*mag_blocks[opq]
fft_pred = fft_x + 1.0j*fft_y
fft_pred = np.asarray(np.fft.irfft(fft_pred, norm='ortho'), dtype=np.float32)
new_blocks.append(fft_pred)
return new_blocks
| agpl-3.0 | 5,898,696,401,641,609,000 | 33.128 | 110 | 0.578228 | false |
mvcsantos/QGIS | python/plugins/processing/algs/qgis/RandomPointsAlongLines.py | 1 | 5227 | # -*- coding: utf-8 -*-
"""
***************************************************************************
RandomPointsAlongLines.py
---------------------
Date : April 2014
Copyright : (C) 2014 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'April 2014'
__copyright__ = '(C) 2014, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import random
from PyQt4.QtCore import QVariant
from qgis.core import QGis, QgsFields, QgsField, QgsGeometry, QgsSpatialIndex, QgsDistanceArea, QgsFeatureRequest, QgsFeature, QgsPoint
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.ProcessingLog import ProcessingLog
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterNumber
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
class RandomPointsAlongLines(GeoAlgorithm):
VECTOR = 'VECTOR'
POINT_NUMBER = 'POINT_NUMBER'
MIN_DISTANCE = 'MIN_DISTANCE'
OUTPUT = 'OUTPUT'
def defineCharacteristics(self):
self.name = 'Random points along line'
self.group = 'Vector creation tools'
self.addParameter(ParameterVector(self.VECTOR,
self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_LINE]))
self.addParameter(ParameterNumber(self.POINT_NUMBER,
self.tr('Number of points'), 1, 9999999, 1))
self.addParameter(ParameterNumber(self.MIN_DISTANCE,
self.tr('Minimum distance'), 0.0, 9999999.0, 0.0))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Random points')))
def processAlgorithm(self):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.VECTOR))
pointCount = float(self.getParameterValue(self.POINT_NUMBER))
minDistance = float(self.getParameterValue(self.MIN_DISTANCE))
fields = QgsFields()
fields.append(QgsField('id', QVariant.Int, '', 10, 0))
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(
fields, QGis.WKBPoint, layer.dataProvider().crs())
nPoints = 0
nIterations = 0
maxIterations = pointCount * 200
featureCount = layer.featureCount()
total = 100.0 / pointCount
index = QgsSpatialIndex()
points = dict()
da = QgsDistanceArea()
request = QgsFeatureRequest()
random.seed()
while nIterations < maxIterations and nPoints < pointCount:
# pick random feature
fid = random.randint(0, featureCount - 1)
f = layer.getFeatures(request.setFilterFid(fid)).next()
fGeom = QgsGeometry(f.geometry())
if fGeom.isMultipart():
lines = fGeom.asMultiPolyline()
# pick random line
lineId = random.randint(0, len(lines) - 1)
vertices = lines[lineId]
else:
vertices = fGeom.asPolyline()
# pick random segment
if len(vertices) == 2:
vid = 0
else:
vid = random.randint(0, len(vertices) - 2)
startPoint = vertices[vid]
endPoint = vertices[vid + 1]
length = da.measureLine(startPoint, endPoint)
dist = length * random.random()
if dist > minDistance:
d = dist / (length - dist)
rx = (startPoint.x() + d * endPoint.x()) / (1 + d)
ry = (startPoint.y() + d * endPoint.y()) / (1 + d)
# generate random point
pnt = QgsPoint(rx, ry)
geom = QgsGeometry.fromPoint(pnt)
if vector.checkMinDistance(pnt, index, minDistance, points):
f = QgsFeature(nPoints)
f.initAttributes(1)
f.setFields(fields)
f.setAttribute('id', nPoints)
f.setGeometry(geom)
writer.addFeature(f)
index.insertFeature(f)
points[nPoints] = pnt
nPoints += 1
self.progress.emit(int(nPoints * total))
nIterations += 1
if nPoints < pointCount:
ProcessingLog.addToLog(ProcessingLog.LOG_INFO,
self.tr('Can not generate requested number of random points. '
'Maximum number of attempts exceeded.'))
del writer
| gpl-2.0 | -8,700,106,233,471,537,000 | 37.718519 | 135 | 0.545246 | false |
saneyuki/servo | python/servo/bootstrap_commands.py | 1 | 22544 | # Copyright 2013 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
from __future__ import absolute_import, print_function, unicode_literals
import base64
import json
import os
import os.path as path
import platform
import re
import subprocess
import sys
import traceback
import six.moves.urllib as urllib
import glob
from mach.decorators import (
CommandArgument,
CommandProvider,
Command,
)
import servo.bootstrap as bootstrap
from servo.command_base import CommandBase, cd, check_call
from servo.util import delete, download_bytes, download_file, extract, check_hash
@CommandProvider
class MachCommands(CommandBase):
@Command('bootstrap',
description='Install required packages for building.',
category='bootstrap')
@CommandArgument('--force', '-f',
action='store_true',
help='Boostrap without confirmation')
def bootstrap(self, force=False):
# This entry point isn't actually invoked, ./mach bootstrap is directly
# called by mach (see mach_bootstrap.bootstrap_command_only) so that
# it can install dependencies without needing mach's dependencies
return bootstrap.bootstrap(self.context, force=force)
@Command('bootstrap-salt',
description='Install and set up the salt environment.',
category='bootstrap')
@CommandArgument('--force', '-f',
action='store_true',
help='Boostrap without confirmation')
def bootstrap_salt(self, force=False):
return bootstrap.bootstrap(self.context, force=force, specific="salt")
@Command('bootstrap-gstreamer',
description='Set up a local copy of the gstreamer libraries (linux only).',
category='bootstrap')
@CommandArgument('--force', '-f',
action='store_true',
help='Boostrap without confirmation')
def bootstrap_gstreamer(self, force=False):
return bootstrap.bootstrap(self.context, force=force, specific="gstreamer")
@Command('bootstrap-android',
description='Install the Android SDK and NDK.',
category='bootstrap')
@CommandArgument('--build',
action='store_true',
help='Install Android-specific dependencies for building')
@CommandArgument('--emulator-x86',
action='store_true',
help='Install Android x86 emulator and system image')
@CommandArgument('--accept-all-licences',
action='store_true',
help='For non-interactive use')
def bootstrap_android(self, build=False, emulator_x86=False, accept_all_licences=False):
if not (build or emulator_x86):
print("Must specify `--build` or `--emulator-x86` or both.")
ndk = "android-ndk-r15c-{system}-{arch}"
tools = "sdk-tools-{system}-4333796"
emulator_platform = "android-28"
emulator_image = "system-images;%s;google_apis;x86" % emulator_platform
known_sha1 = {
# https://dl.google.com/android/repository/repository2-1.xml
"sdk-tools-darwin-4333796.zip": "ed85ea7b59bc3483ce0af4c198523ba044e083ad",
"sdk-tools-linux-4333796.zip": "8c7c28554a32318461802c1291d76fccfafde054",
"sdk-tools-windows-4333796.zip": "aa298b5346ee0d63940d13609fe6bec621384510",
# https://developer.android.com/ndk/downloads/older_releases
"android-ndk-r15c-windows-x86.zip": "f2e47121feb73ec34ced5e947cbf1adc6b56246e",
"android-ndk-r15c-windows-x86_64.zip": "970bb2496de0eada74674bb1b06d79165f725696",
"android-ndk-r15c-darwin-x86_64.zip": "ea4b5d76475db84745aa8828000d009625fc1f98",
"android-ndk-r15c-linux-x86_64.zip": "0bf02d4e8b85fd770fd7b9b2cdec57f9441f27a2",
}
toolchains = path.join(self.context.topdir, "android-toolchains")
if not path.isdir(toolchains):
os.makedirs(toolchains)
def download(target_dir, name, flatten=False):
final = path.join(toolchains, target_dir)
if path.isdir(final):
return
base_url = "https://dl.google.com/android/repository/"
filename = name + ".zip"
url = base_url + filename
archive = path.join(toolchains, filename)
if not path.isfile(archive):
download_file(filename, url, archive)
check_hash(archive, known_sha1[filename], "sha1")
print("Extracting " + filename)
remove = True # Set to False to avoid repeated downloads while debugging this script
if flatten:
extracted = final + "_"
extract(archive, extracted, remove=remove)
contents = os.listdir(extracted)
assert len(contents) == 1
os.rename(path.join(extracted, contents[0]), final)
os.rmdir(extracted)
else:
extract(archive, final, remove=remove)
system = platform.system().lower()
machine = platform.machine().lower()
arch = {"i386": "x86"}.get(machine, machine)
if build:
download("ndk", ndk.format(system=system, arch=arch), flatten=True)
download("sdk", tools.format(system=system))
components = []
if emulator_x86:
components += [
"platform-tools",
"emulator",
"platforms;" + emulator_platform,
emulator_image,
]
if build:
components += [
"platform-tools",
"platforms;android-18",
]
sdkmanager = [path.join(toolchains, "sdk", "tools", "bin", "sdkmanager")] + components
if accept_all_licences:
yes = subprocess.Popen(["yes"], stdout=subprocess.PIPE)
process = subprocess.Popen(
sdkmanager, stdin=yes.stdout, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
)
# Reduce progress bar spam by removing duplicate lines.
# Printing the same line again with \r is a no-op in a real terminal,
# but each line is shown individually in Taskcluster's log viewer.
previous_line = None
line = b""
while 1:
# Read one byte at a time because in Python:
# * readline() blocks until "\n", which doesn't come before the prompt
# * read() blocks until EOF, which doesn't come before the prompt
# * read(n) keeps reading until it gets n bytes or EOF,
# but we don't know reliably how many bytes to read until the prompt
byte = process.stdout.read(1)
if len(byte) == 0:
print(line)
break
line += byte
if byte == b'\n' or byte == b'\r':
if line != previous_line:
print(line.decode("utf-8", "replace"), end="")
sys.stdout.flush()
previous_line = line
line = b""
exit_code = process.wait()
yes.terminate()
if exit_code:
return exit_code
else:
subprocess.check_call(sdkmanager)
if emulator_x86:
avd_path = path.join(toolchains, "avd", "servo-x86")
process = subprocess.Popen(stdin=subprocess.PIPE, stdout=subprocess.PIPE, args=[
path.join(toolchains, "sdk", "tools", "bin", "avdmanager"),
"create", "avd",
"--path", avd_path,
"--name", "servo-x86",
"--package", emulator_image,
"--force",
])
output = b""
while 1:
# Read one byte at a time, see comment above.
byte = process.stdout.read(1)
if len(byte) == 0:
break
output += byte
# There seems to be no way to disable this prompt:
if output.endswith(b"Do you wish to create a custom hardware profile? [no]"):
process.stdin.write("no\n")
assert process.wait() == 0
with open(path.join(avd_path, "config.ini"), "a") as f:
f.write("disk.dataPartition.size=2G\n")
@Command('update-hsts-preload',
description='Download the HSTS preload list',
category='bootstrap')
def bootstrap_hsts_preload(self, force=False):
preload_filename = "hsts_preload.json"
preload_path = path.join(self.context.topdir, "resources")
chromium_hsts_url = "https://chromium.googlesource.com/chromium/src" + \
"/net/+/master/http/transport_security_state_static.json?format=TEXT"
try:
content_base64 = download_bytes("Chromium HSTS preload list", chromium_hsts_url)
except urllib.error.URLError:
print("Unable to download chromium HSTS preload list; are you connected to the internet?")
sys.exit(1)
content_decoded = base64.b64decode(content_base64)
# The chromium "json" has single line comments in it which, of course,
# are non-standard/non-valid json. Simply strip them out before parsing
content_json = re.sub(r'(^|\s+)//.*$', '', content_decoded, flags=re.MULTILINE)
try:
pins_and_static_preloads = json.loads(content_json)
entries = {
"entries": [
{
"host": e["name"],
"include_subdomains": e.get("include_subdomains", False)
}
for e in pins_and_static_preloads["entries"]
]
}
with open(path.join(preload_path, preload_filename), 'w') as fd:
json.dump(entries, fd, indent=4)
except ValueError as e:
print("Unable to parse chromium HSTS preload list, has the format changed?")
sys.exit(1)
@Command('update-pub-domains',
description='Download the public domains list and update resources/public_domains.txt',
category='bootstrap')
def bootstrap_pub_suffix(self, force=False):
list_url = "https://publicsuffix.org/list/public_suffix_list.dat"
dst_filename = path.join(self.context.topdir, "resources", "public_domains.txt")
not_implemented_case = re.compile(r'^[^*]+\*')
try:
content = download_bytes("Public suffix list", list_url)
except urllib.error.URLError:
print("Unable to download the public suffix list; are you connected to the internet?")
sys.exit(1)
lines = [l.strip() for l in content.decode("utf8").split("\n")]
suffixes = [l for l in lines if not l.startswith("//") and not l == ""]
with open(dst_filename, "wb") as fo:
for suffix in suffixes:
if not_implemented_case.match(suffix):
print("Warning: the new list contains a case that servo can't handle: %s" % suffix)
fo.write(suffix.encode("idna") + "\n")
@Command('clean-nightlies',
description='Clean unused nightly builds of Rust and Cargo',
category='bootstrap')
@CommandArgument('--force', '-f',
action='store_true',
help='Actually remove stuff')
@CommandArgument('--keep',
default='1',
help='Keep up to this many most recent nightlies')
def clean_nightlies(self, force=False, keep=None):
default_toolchain = self.default_toolchain()
print("Current Rust version for Servo: {}".format(default_toolchain))
old_toolchains = []
keep = int(keep)
stdout = subprocess.check_output(['git', 'log', '--format=%H', 'rust-toolchain'])
for i, commit_hash in enumerate(stdout.split(), 1):
if i > keep:
toolchain = subprocess.check_output(
['git', 'show', '%s:rust-toolchain' % commit_hash])
old_toolchains.append(toolchain.strip())
removing_anything = False
stdout = subprocess.check_output(['rustup', 'toolchain', 'list'])
for toolchain_with_host in stdout.split():
for old in old_toolchains:
if toolchain_with_host.startswith(old):
removing_anything = True
if force:
print("Removing {}".format(toolchain_with_host))
check_call(["rustup", "uninstall", toolchain_with_host])
else:
print("Would remove {}".format(toolchain_with_host))
if not removing_anything:
print("Nothing to remove.")
elif not force:
print("Nothing done. "
"Run `./mach clean-nightlies -f` to actually remove.")
@Command('clean-cargo-cache',
description='Clean unused Cargo packages',
category='bootstrap')
@CommandArgument('--force', '-f',
action='store_true',
help='Actually remove stuff')
@CommandArgument('--show-size', '-s',
action='store_true',
help='Show packages size')
@CommandArgument('--keep',
default='1',
help='Keep up to this many most recent dependencies')
def clean_cargo_cache(self, force=False, show_size=False, keep=None):
def get_size(path):
if os.path.isfile(path):
return os.path.getsize(path) / (1024 * 1024.0)
total_size = 0
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size / (1024 * 1024.0)
removing_anything = False
packages = {
'crates': {},
'git': {},
}
import toml
if os.environ.get("CARGO_HOME", ""):
cargo_dir = os.environ.get("CARGO_HOME")
else:
home_dir = os.path.expanduser("~")
cargo_dir = path.join(home_dir, ".cargo")
if not os.path.isdir(cargo_dir):
return
cargo_file = open(path.join(self.context.topdir, "Cargo.lock"))
content = toml.load(cargo_file)
for package in content.get("package", []):
source = package.get("source", "")
version = package["version"]
if source == u"registry+https://github.com/rust-lang/crates.io-index":
crate_name = "{}-{}".format(package["name"], version)
if not packages["crates"].get(crate_name, False):
packages["crates"][package["name"]] = {
"current": [],
"exist": [],
}
packages["crates"][package["name"]]["current"].append(crate_name)
elif source.startswith("git+"):
name = source.split("#")[0].split("/")[-1].replace(".git", "")
branch = ""
crate_name = "{}-{}".format(package["name"], source.split("#")[1])
crate_branch = name.split("?")
if len(crate_branch) > 1:
branch = crate_branch[1].replace("branch=", "")
name = crate_branch[0]
if not packages["git"].get(name, False):
packages["git"][name] = {
"current": [],
"exist": [],
}
packages["git"][name]["current"].append(source.split("#")[1][:7])
if branch:
packages["git"][name]["current"].append(branch)
crates_dir = path.join(cargo_dir, "registry")
crates_cache_dir = ""
crates_src_dir = ""
if os.path.isdir(path.join(crates_dir, "cache")):
for p in os.listdir(path.join(crates_dir, "cache")):
crates_cache_dir = path.join(crates_dir, "cache", p)
crates_src_dir = path.join(crates_dir, "src", p)
git_dir = path.join(cargo_dir, "git")
git_db_dir = path.join(git_dir, "db")
git_checkout_dir = path.join(git_dir, "checkouts")
if os.path.isdir(git_db_dir):
git_db_list = filter(lambda f: not f.startswith('.'), os.listdir(git_db_dir))
else:
git_db_list = []
if os.path.isdir(git_checkout_dir):
git_checkout_list = os.listdir(git_checkout_dir)
else:
git_checkout_list = []
for d in list(set(git_db_list + git_checkout_list)):
crate_name = d.replace("-{}".format(d.split("-")[-1]), "")
if not packages["git"].get(crate_name, False):
packages["git"][crate_name] = {
"current": [],
"exist": [],
}
if os.path.isdir(path.join(git_checkout_dir, d)):
with cd(path.join(git_checkout_dir, d)):
git_crate_hash = glob.glob('*')
if not git_crate_hash or not os.path.isdir(path.join(git_db_dir, d)):
packages["git"][crate_name]["exist"].append(("del", d, ""))
continue
for d2 in git_crate_hash:
dep_path = path.join(git_checkout_dir, d, d2)
if os.path.isdir(dep_path):
packages["git"][crate_name]["exist"].append((path.getmtime(dep_path), d, d2))
elif os.path.isdir(path.join(git_db_dir, d)):
packages["git"][crate_name]["exist"].append(("del", d, ""))
if crates_src_dir:
for d in os.listdir(crates_src_dir):
crate_name = re.sub(r"\-\d+(\.\d+){1,3}.+", "", d)
if not packages["crates"].get(crate_name, False):
packages["crates"][crate_name] = {
"current": [],
"exist": [],
}
packages["crates"][crate_name]["exist"].append(d)
total_size = 0
for packages_type in ["git", "crates"]:
sorted_packages = sorted(packages[packages_type])
for crate_name in sorted_packages:
crate_count = 0
existed_crates = packages[packages_type][crate_name]["exist"]
for exist in sorted(existed_crates, reverse=True):
current_crate = packages[packages_type][crate_name]["current"]
size = 0
exist_name = path.join(exist[1], exist[2]) if packages_type == "git" else exist
exist_item = exist[2] if packages_type == "git" else exist
if exist_item not in current_crate:
crate_count += 1
if int(crate_count) >= int(keep) or not current_crate or \
exist[0] == "del" or exist[2] == "master":
removing_anything = True
crate_paths = []
if packages_type == "git":
exist_checkout_path = path.join(git_checkout_dir, exist[1])
exist_db_path = path.join(git_db_dir, exist[1])
exist_path = path.join(git_checkout_dir, exist_name)
if exist[0] == "del":
if os.path.isdir(exist_checkout_path):
crate_paths.append(exist_checkout_path)
if os.path.isdir(exist_db_path):
crate_paths.append(exist_db_path)
crate_count += -1
else:
crate_paths.append(exist_path)
exist_checkout_list = glob.glob(path.join(exist_checkout_path, '*'))
if len(exist_checkout_list) <= 1:
crate_paths.append(exist_checkout_path)
if os.path.isdir(exist_db_path):
crate_paths.append(exist_db_path)
else:
crate_paths.append(path.join(crates_cache_dir, "{}.crate".format(exist)))
crate_paths.append(path.join(crates_src_dir, exist))
size = sum(get_size(p) for p in crate_paths) if show_size else 0
total_size += size
print_msg = (exist_name, " ({}MB)".format(round(size, 2)) if show_size else "", cargo_dir)
if force:
print("Removing `{}`{} package from {}".format(*print_msg))
for crate_path in crate_paths:
if os.path.exists(crate_path):
try:
delete(crate_path)
except:
print(traceback.format_exc())
print("Delete %s failed!" % crate_path)
else:
print("Would remove `{}`{} package from {}".format(*print_msg))
if removing_anything and show_size:
print("\nTotal size of {} MB".format(round(total_size, 2)))
if not removing_anything:
print("Nothing to remove.")
elif not force:
print("\nNothing done. "
"Run `./mach clean-cargo-cache -f` to actually remove.")
| mpl-2.0 | 885,229,770,263,548,800 | 44.821138 | 118 | 0.522489 | false |
cabanm/project-euler | Problem 23/problem23.py | 1 | 1277 | # Find the sum of all the positive integers which
# cannot be written as the sum of two abundant numbers.
#
# Facts:
# All integers greater than 28123 can be
# written as the sum of two abundant numbers.
# Abundant number = sum of proper divisors of n exceed n.
#
# Find all abundant numbers up to and including 28123
# Add all combinations of these and store if not greater then 28123
# Add all integers <= 28123 not in the list to get required sum
from myMath import *
abundants = list()
for n in range(1, 28123 + 1):
if sum(int(n).properDivisors()) > n:
abundants.append(n)
print('stage 1 complete --', 'number of abundants = ', len(abundants))
sums = list()
for i, n in enumerate(abundants):
for m in abundants[i:]:
if n+m <= 28123:
sums.append(n+m)
sums = sorted(set(sums))
print('stage 2 complete --', 'number of sums of abundants = ', len(sums))
sumsIndeces = [0]*(28123 + 1)
for i, n in enumerate(sums):
sumsIndeces.pop(n)
sumsIndeces.insert(n,1) # places a one at every index that is sum of abundants
if i%1000 == 0:
print(i)
print('stage 3 complete')
total = 0
for n in range(len(sumsIndeces)):
if sumsIndeces[n] == 0:
total += n
print('sum = ', total)
| gpl-2.0 | 7,158,230,133,010,168,000 | 29.146341 | 82 | 0.645262 | false |
mapbox/atom-shell | script/create-dist.py | 1 | 7365 | #!/usr/bin/env python
import argparse
import os
import re
import shutil
import subprocess
import sys
import tarfile
from lib.config import LIBCHROMIUMCONTENT_COMMIT, BASE_URL, NODE_VERSION, \
TARGET_PLATFORM, DIST_ARCH
from lib.util import scoped_cwd, rm_rf, get_atom_shell_version, make_zip, \
safe_mkdir, execute
ATOM_SHELL_VERSION = get_atom_shell_version()
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
DIST_DIR = os.path.join(SOURCE_ROOT, 'dist')
OUT_DIR = os.path.join(SOURCE_ROOT, 'out', 'Release')
NODE_DIR = os.path.join(SOURCE_ROOT, 'vendor', 'node')
DIST_HEADERS_NAME = 'node-{0}'.format(NODE_VERSION)
DIST_HEADERS_DIR = os.path.join(DIST_DIR, DIST_HEADERS_NAME)
SYMBOL_NAME = {
'darwin': 'libchromiumcontent.dylib.dSYM',
'linux': 'libchromiumcontent.so.dbg',
'win32': 'chromiumcontent.dll.pdb',
}[TARGET_PLATFORM]
TARGET_BINARIES = {
'darwin': [
],
'win32': [
'atom.exe',
'chromiumcontent.dll',
'content_shell.pak',
'd3dcompiler_43.dll',
'ffmpegsumo.dll',
'icudtl.dat',
'libEGL.dll',
'libGLESv2.dll',
'msvcp120.dll',
'msvcr120.dll',
'ui_resources_200_percent.pak',
'vccorlib120.dll',
'webkit_resources_200_percent.pak',
'xinput1_3.dll',
],
'linux': [
'atom',
'content_shell.pak',
'icudtl.dat',
'libchromiumcontent.so',
'libffmpegsumo.so',
],
}
TARGET_DIRECTORIES = {
'darwin': [
'Atom.app',
],
'win32': [
'resources',
'locales',
],
'linux': [
'resources',
'locales',
],
}
SYSTEM_LIBRARIES = [
'libudev.so',
'libgcrypt.so',
'libnotify.so',
]
HEADERS_SUFFIX = [
'.h',
'.gypi',
]
HEADERS_DIRS = [
'src',
'deps/http_parser',
'deps/zlib',
'deps/uv',
'deps/npm',
'deps/mdb_v8',
]
HEADERS_FILES = [
'common.gypi',
'config.gypi',
]
def main():
rm_rf(DIST_DIR)
os.makedirs(DIST_DIR)
args = parse_args()
force_build()
download_libchromiumcontent_symbols(args.url)
create_symbols()
copy_binaries()
copy_headers()
copy_license()
if TARGET_PLATFORM == 'linux':
copy_system_libraries()
create_version()
create_dist_zip()
create_symbols_zip()
create_header_tarball()
def parse_args():
parser = argparse.ArgumentParser(description='Create distributions')
parser.add_argument('-u', '--url',
help='The base URL from which to download '
'libchromiumcontent (i.e., the URL you passed to '
'libchromiumcontent\'s script/upload script',
default=BASE_URL,
required=False)
return parser.parse_args()
def force_build():
build = os.path.join(SOURCE_ROOT, 'script', 'build.py')
execute([sys.executable, build, '-c', 'Release'])
def copy_binaries():
for binary in TARGET_BINARIES[TARGET_PLATFORM]:
shutil.copy2(os.path.join(OUT_DIR, binary), DIST_DIR)
for directory in TARGET_DIRECTORIES[TARGET_PLATFORM]:
shutil.copytree(os.path.join(OUT_DIR, directory),
os.path.join(DIST_DIR, directory),
symlinks=True)
def copy_headers():
os.mkdir(DIST_HEADERS_DIR)
# Copy standard node headers from node. repository.
for include_path in HEADERS_DIRS:
abs_path = os.path.join(NODE_DIR, include_path)
for dirpath, _, filenames in os.walk(abs_path):
for filename in filenames:
extension = os.path.splitext(filename)[1]
if extension not in HEADERS_SUFFIX:
continue
copy_source_file(os.path.join(dirpath, filename))
for other_file in HEADERS_FILES:
copy_source_file(source = os.path.join(NODE_DIR, other_file))
# Copy V8 headers from chromium's repository.
src = os.path.join(SOURCE_ROOT, 'vendor', 'brightray', 'vendor', 'download',
'libchromiumcontent', 'src')
for dirpath, _, filenames in os.walk(os.path.join(src, 'v8')):
for filename in filenames:
extension = os.path.splitext(filename)[1]
if extension not in HEADERS_SUFFIX:
continue
copy_source_file(source=os.path.join(dirpath, filename),
start=src,
destination=os.path.join(DIST_HEADERS_DIR, 'deps'))
def copy_license():
shutil.copy2(os.path.join(SOURCE_ROOT, 'LICENSE'), DIST_DIR)
def copy_system_libraries():
ldd = execute(['ldd', os.path.join(OUT_DIR, 'atom')])
lib_re = re.compile('\t(.*) => (.+) \(.*\)$')
for line in ldd.splitlines():
m = lib_re.match(line)
if not m:
continue
for i, library in enumerate(SYSTEM_LIBRARIES):
real_library = m.group(1)
if real_library.startswith(library):
shutil.copyfile(m.group(2), os.path.join(DIST_DIR, real_library))
SYSTEM_LIBRARIES[i] = real_library
def create_version():
version_path = os.path.join(SOURCE_ROOT, 'dist', 'version')
with open(version_path, 'w') as version_file:
version_file.write(ATOM_SHELL_VERSION)
def download_libchromiumcontent_symbols(url):
brightray_dir = os.path.join(SOURCE_ROOT, 'vendor', 'brightray', 'vendor')
target_dir = os.path.join(brightray_dir, 'download', 'libchromiumcontent')
symbols_path = os.path.join(target_dir, 'Release', SYMBOL_NAME)
if os.path.exists(symbols_path):
return
download = os.path.join(brightray_dir, 'libchromiumcontent', 'script',
'download')
subprocess.check_call([sys.executable, download, '-f', '-s', '-c',
LIBCHROMIUMCONTENT_COMMIT, url, target_dir])
def create_symbols():
build = os.path.join(SOURCE_ROOT, 'script', 'build.py')
subprocess.check_output([sys.executable, build, '-c', 'Release',
'-t', 'atom_dump_symbols'])
directory = 'Atom-Shell.breakpad.syms'
shutil.copytree(os.path.join(OUT_DIR, directory),
os.path.join(DIST_DIR, directory),
symlinks=True)
def create_dist_zip():
dist_name = 'atom-shell-{0}-{1}-{2}.zip'.format(ATOM_SHELL_VERSION,
TARGET_PLATFORM, DIST_ARCH)
zip_file = os.path.join(SOURCE_ROOT, 'dist', dist_name)
with scoped_cwd(DIST_DIR):
files = TARGET_BINARIES[TARGET_PLATFORM] + ['LICENSE', 'version']
if TARGET_PLATFORM == 'linux':
files += SYSTEM_LIBRARIES
dirs = TARGET_DIRECTORIES[TARGET_PLATFORM]
make_zip(zip_file, files, dirs)
def create_symbols_zip():
dist_name = 'atom-shell-{0}-{1}-{2}-symbols.zip'.format(ATOM_SHELL_VERSION,
TARGET_PLATFORM,
DIST_ARCH)
zip_file = os.path.join(SOURCE_ROOT, 'dist', dist_name)
with scoped_cwd(DIST_DIR):
files = ['LICENSE', 'version']
dirs = ['Atom-Shell.breakpad.syms']
make_zip(zip_file, files, dirs)
def create_header_tarball():
with scoped_cwd(DIST_DIR):
tarball = tarfile.open(name=DIST_HEADERS_DIR + '.tar.gz', mode='w:gz')
tarball.add(DIST_HEADERS_NAME)
tarball.close()
def copy_source_file(source, start=NODE_DIR, destination=DIST_HEADERS_DIR):
relative = os.path.relpath(source, start=start)
final_destination = os.path.join(destination, relative)
safe_mkdir(os.path.dirname(final_destination))
shutil.copy2(source, final_destination)
if __name__ == '__main__':
sys.exit(main())
| mit | 4,588,913,929,787,219,500 | 27.326923 | 78 | 0.621453 | false |
requests/requests-ntlm | tests/functional/test_functional.py | 1 | 2038 | import requests
import requests_ntlm
"""
This test is meant to run with Appveyor but until the integration is solved
it can only be run locally. The script setup_iis.ps1 can set up an IIS server
with the 4 scenarios tested below if you wish to run a sanity check
"""
username = '.\\User'
password = 'Password01'
http_with_cbt = 'http://127.0.0.1:81/contents.txt'
http_without_cbt = 'http://127.0.0.1:82/contents.txt'
https_with_cbt = 'https://127.0.0.1:441/contents.txt'
https_without_cbt = 'https://127.0.0.1:442/contents.txt'
expected = 'contents'
class Test_Functional():
def test_ntlm_http_with_cbt(self):
actual = send_request(http_with_cbt, username, password)
actual_content = actual.content.decode('utf-8')
actual_code = actual.status_code
assert actual_code == 200
assert actual_content == expected
def test_ntlm_http_without_cbt(self):
actual = send_request(http_without_cbt, username, password)
actual_content = actual.content.decode('utf-8')
actual_code = actual.status_code
assert actual_code == 200
assert actual_content == expected
def test_ntlm_https_with_cbt(self):
actual = send_request(https_with_cbt, username, password)
actual_content = actual.content.decode('utf-8')
actual_code = actual.status_code
assert actual_code == 200
assert actual_content == expected
def test_ntlm_https_without_cbt(self):
actual = send_request(https_without_cbt, username, password)
actual_content = actual.content.decode('utf-8')
actual_code = actual.status_code
assert actual_code == 200
assert actual_content == expected
def send_request(url, username, password):
"""
Sends a request to the url with the credentials specified. Returns the final response
"""
session = requests.Session()
session.verify = False
session.auth = requests_ntlm.HttpNtlmAuth(username, password)
response = session.get(url)
return response
| isc | 1,541,724,218,388,008,000 | 32.966667 | 89 | 0.676644 | false |
afh/cmakedash | cmakedash.py | 1 | 1690 | #!/usr/bin/env python
#
# cmakedash - a dash docset generator for CMake
import os
import re
import subprocess
from bs4 import BeautifulSoup, NavigableString, Tag
from docsetgenerator import DocsetGenerator
class CMakeDocsetGenerator (DocsetGenerator):
def __init__(self):
DocsetGenerator.__init__(self)
self.docsetName = 'CMake'
self.iconFilename = 'icon.tiff'
def helpFilename(self):
return os.path.join(self.documentsPath(), 'index.html')
def dashFeedVersion(self):
cmakeVersion = subprocess.check_output('cmake --version'.split()).split()
return cmakeVersion[2]
def generateHtml(self):
os.system("cmake --help-html > '%s'" % (self.helpFilename()))
def generateIndex(self):
page = open(self.helpFilename()).read()
soup = BeautifulSoup(page)
any = re.compile('.*')
for tag in soup.find_all('a', {'href':any}):
name = tag.text.strip()
if len(name) > 0:
path = tag.attrs['href'].strip()
if path.startswith('#command'):
stype = 'Command'
elif path.startswith('#opt'):
stype = 'Option'
elif path.startswith('#variable'):
stype = 'Variable'
elif path.startswith('#module'):
stype = 'Module'
elif path.startswith('#prop_') or path.startswith('#property'):
stype = 'Property'
elif path.startswith('http'):
continue
else:
if self.verbose: print 'Skipping %s' % (path)
continue
path = 'index.html%s' % (path)
self.addIndexEntry(name, stype, path)
if __name__ == '__main__':
generator = CMakeDocsetGenerator()
args = generator.getargs()
generator.run(args)
| mit | -1,009,468,933,443,884,300 | 27.644068 | 77 | 0.621302 | false |
hjanime/VisTrails | vistrails/db/versions/v0_9_3/domain/workflow.py | 1 | 7277 | ###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
from auto_gen import DBWorkflow as _DBWorkflow
from auto_gen import DBAbstractionRef, DBModule, DBGroup
from id_scope import IdScope
import copy
class DBWorkflow(_DBWorkflow):
def __init__(self, *args, **kwargs):
_DBWorkflow.__init__(self, *args, **kwargs)
self.objects = {}
self.tmp_id = IdScope(1,
{DBAbstractionRef.vtType: DBModule.vtType,
DBGroup.vtType: DBModule.vtType})
def __copy__(self):
return DBWorkflow.do_copy(self)
def do_copy(self, new_ids=False, id_scope=None, id_remap=None):
cp = _DBWorkflow.do_copy(self, new_ids, id_scope, id_remap)
cp.__class__ = DBWorkflow
# need to go through and reset the index to the copied objects
cp.build_index()
cp.tmp_id = copy.copy(self.tmp_id)
return cp
@staticmethod
def update_version(old_obj, trans_dict, new_obj=None):
if new_obj is None:
new_obj = DBWorkflow()
new_obj = _DBWorkflow.update_version(old_obj, trans_dict, new_obj)
new_obj.update_id_scope()
new_obj.build_index()
return new_obj
def update_id_scope(self):
pass
_vtTypeMap = {'abstractionRef': 'module', 'group': 'module'}
def build_index(self):
g = self._vtTypeMap.get
self.objects = dict(((g(o.vtType, o.vtType), o._db_id), o)
for (o,_,_) in self.db_children())
def add_to_index(self, object):
obj_type = self._vtTypeMap.get(object.vtType, object.vtType)
self.objects[(obj_type, object.getPrimaryKey())] = object
def delete_from_index(self, object):
obj_type = self._vtTypeMap.get(object.vtType, object.vtType)
del self.objects[(obj_type, object.getPrimaryKey())]
def capitalizeOne(self, str):
return str[0].upper() + str[1:]
def db_print_objects(self):
for k,v in self.objects.iteritems():
print '%s: %s' % (k, v)
def db_has_object(self, type, id):
return (type, id) in self.objects
def db_get_object(self, type, id):
return self.objects[(type, id)]
def db_add_object(self, object, parent_obj_type=None,
parent_obj_id=None, parent_obj=None):
if parent_obj is None:
if parent_obj_type is None or parent_obj_id is None:
parent_obj = self
else:
if parent_obj_type == 'abstractionRef' or \
parent_obj_type == 'group':
parent_obj_type = 'module'
try:
parent_obj = self.objects[(parent_obj_type, parent_obj_id)]
except KeyError:
msg = "Cannot find object of type '%s' with id '%s'" % \
(parent_obj_type, parent_obj_id)
raise Exception(msg)
if object.vtType == 'abstractionRef' or object.vtType == 'group':
obj_type = 'module'
else:
obj_type = object.vtType
funname = 'db_add_' + obj_type
obj_copy = copy.copy(object)
getattr(parent_obj, funname)(obj_copy)
self.add_to_index(obj_copy)
def db_change_object(self, old_id, object, parent_obj_type=None,
parent_obj_id=None, parent_obj=None):
if parent_obj is None:
if parent_obj_type is None or parent_obj_id is None:
parent_obj = self
else:
if parent_obj_type == 'abstractionRef' or \
parent_obj_type == 'group':
parent_obj_type = 'module'
try:
parent_obj = self.objects[(parent_obj_type, parent_obj_id)]
except KeyError:
msg = "Cannot find object of type '%s' with id '%s'" % \
(parent_obj_type, parent_obj_id)
raise Exception(msg)
self.db_delete_object(old_id, object.vtType, None, None, parent_obj)
self.db_add_object(object, None, None, parent_obj)
def db_delete_object(self, obj_id, obj_type, parent_obj_type=None,
parent_obj_id=None, parent_obj=None):
if parent_obj is None:
if parent_obj_type is None or parent_obj_id is None:
parent_obj = self
else:
if parent_obj_type == 'abstractionRef' or \
parent_obj_type == 'group':
parent_obj_type = 'module'
try:
parent_obj = self.objects[(parent_obj_type, parent_obj_id)]
except KeyError:
msg = "Cannot find object of type '%s' with id '%s'" % \
(parent_obj_type, parent_obj_id)
raise Exception(msg)
if obj_type == 'abstractionRef' or obj_type == 'group':
obj_type = 'module'
funname = 'db_get_' + obj_type
if hasattr(parent_obj, funname):
object = getattr(parent_obj, funname)(obj_id)
else:
attr_name = 'db_' + obj_type
object = getattr(parent_obj, attr_name)
funname = 'db_delete_' + obj_type
getattr(parent_obj, funname)(object)
self.delete_from_index(object)
| bsd-3-clause | -6,573,125,723,641,042,000 | 41.063584 | 79 | 0.578123 | false |
semanticize/semanticizest | semanticizest/parse_wikidump/__init__.py | 1 | 11223 | """Parsing utilities for Wikipedia database dumps."""
from __future__ import print_function
from os.path import basename
from bz2 import BZ2File
from collections import Counter, namedtuple
import gzip
from HTMLParser import HTMLParser
from itertools import chain
import logging
import re
import xml.etree.ElementTree as etree # don't use LXML, it's slower (!)
import six
from semanticizest._util import ngrams
from semanticizest._version import __version__
_logger = logging.getLogger(__name__)
Page = namedtuple("Page", ['page_id', 'title', 'content', 'redirect'])
def _get_namespace(tag):
try:
namespace = re.match(r"^{(.*?)}", tag).group(1)
except AttributeError:
namespace = ''
if not namespace.startswith("http://www.mediawiki.org/xml/export-"):
raise ValueError("namespace %r not recognized as MediaWiki dump"
% namespace)
return namespace
if six.PY3:
def _tounicode(s):
return s
else:
def _tounicode(s):
# Convert ASCII strings coming from xml.etree.
if isinstance(s, str):
s = s.decode('ascii')
return s
def extract_pages(f):
"""Extract pages from Wikimedia database dump.
Parameters
----------
f : file-like or str
Handle on Wikimedia article dump. May be any type supported by
etree.iterparse.
Returns
-------
pages : iterable over `Page`s
namedtuples containging the fields (page_id, title, content,
redirect_target) triples. In Python 2.x, may produce either
str or unicode strings.
"""
elems = etree.iterparse(f, events=["end"])
# We can't rely on the namespace for database dumps, since it's changed
# it every time a small modification to the format is made. So, determine
# those from the first element we find, which will be part of the metadata,
# and construct element paths.
_, elem = next(elems)
namespace = _get_namespace(elem.tag)
ns_mapping = {"ns": namespace}
ns_path = "./{%(ns)s}ns" % ns_mapping
page_tag = "{%(ns)s}page" % ns_mapping
text_path = "./{%(ns)s}revision/{%(ns)s}text" % ns_mapping
id_path = "./{%(ns)s}id" % ns_mapping
title_path = "./{%(ns)s}title" % ns_mapping
redir_path = "./{%(ns)s}redirect" % ns_mapping
for _, elem in elems:
if elem.tag == page_tag:
if elem.find(ns_path).text != '0':
continue
text = elem.find(text_path).text
if text is None:
# Empty article; these occur in Wikinews dumps.
continue
redir = elem.find(redir_path)
redir = (_tounicode(redir.attrib['title'])
if redir is not None else None)
text = _tounicode(text)
title = _tounicode(elem.find(title_path).text)
yield Page(int(elem.find(id_path).text), title, text, redir)
# Prune the element tree, as per
# http://www.ibm.com/developerworks/xml/library/x-hiperfparse/
# We do this only for <page>s, since we need to inspect the
# ./revision/text element. That shouldn't matter since the pages
# comprise the bulk of the file.
elem.clear()
def _clean_link(l):
"""Clean links (anchor and titles)."""
l = l.strip()
l = re.sub(r'\s+', ' ', l)
return l
def extract_links(article):
"""Extract all (or most) links from article text (wiki syntax).
Returns an iterable over (target, anchor) pairs.
"""
links = re.findall(r"(\w*) \[\[ ([^]]+) \]\] (\w*)", article,
re.UNICODE | re.VERBOSE)
r = []
for before, l, after in links:
if '|' in l:
target, anchor = l.split('|', 1)
else:
target, anchor = l, l
# If the anchor contains a colon, assume it's a file or category link.
if ':' in target:
continue
# Some links contain newlines...
target = _clean_link(target)
anchor = _clean_link(anchor)
# Remove section links and normalize to the format used in <redirect>
# elements: uppercase first character, spaces instead of underscores.
target = target.split('#', 1)[0].replace('_', ' ')
if not target:
continue # section link
if not target[0].isupper():
target = target[0].upper() + target[1:]
anchor = before + anchor + after
r.append((target, anchor))
return r
_UNWANTED = re.compile(r"""
(:?
\{\{ .*? \}\}
| \{\| .*? \|\}
| ^[|!] .* $ # table content
| <math> .*? </math>
| <ref .*? > .*? </ref>
| <br\s*/>
| </?su[bp]\s*>
| \[\[ [^][:]* : (\[\[.*?\]\]|.)*? \]\] # media, categories
| =+ .*? =+ # headers
| ''+
| ^\* # list bullets
)
""", re.DOTALL | re.MULTILINE | re.UNICODE | re.VERBOSE)
_unescape_entities = HTMLParser().unescape
def clean_text(page):
"""Return the clean-ish running text parts of a page."""
return re.sub(_UNWANTED, "", _unescape_entities(page))
_LINK_SYNTAX = re.compile(r"""
(?:
\[\[
(?: [^]|]* \|)? # "target|" in [[target|anchor]]
|
\]\]
)
""", re.DOTALL | re.MULTILINE | re.VERBOSE)
def remove_links(page):
"""Remove links from clean_text output."""
page = re.sub(r'\]\]\[\[', ' ', page) # hack hack hack, see test
return re.sub(_LINK_SYNTAX, '', page)
def page_statistics(page, N, sentence_splitter=None, tokenizer=None):
"""Gather statistics from a single WP page.
The sentence_splitter should be a callable that splits text into sentences.
It defaults to an unspecified heuristic.
See ``parse_dump`` for the parameters.
Returns
-------
stats : (dict, dict)
The first dict maps (target, anchor) pairs to counts.
The second maps n-grams (up to N) to counts.
"""
if N is not None and not isinstance(N, int):
raise TypeError("expected integer or None for N, got %r" % N)
clean = clean_text(page)
link_counts = Counter(extract_links(clean))
if N:
no_links = remove_links(clean)
if sentence_splitter is None:
sentences = re.split(r'(?:\n{2,}|\.\s+)', no_links,
re.MULTILINE | re.UNICODE)
else:
sentences = [sentence
for paragraph in re.split('\n+', no_links)
for sentence in paragraph]
if tokenizer is None:
tokenizer = re.compile(r'\w+', re.UNICODE).findall
all_ngrams = chain.from_iterable(ngrams(tokenizer(sentence), N)
for sentence in sentences)
ngram_counts = Counter(all_ngrams)
else:
ngram_counts = None
return link_counts, ngram_counts
def _open(f):
if isinstance(f, six.string_types):
if f.endswith('.gz'):
return gzip.open(f)
elif f.endswith('.bz2'):
return BZ2File(f)
return open(f)
return f
def parse_dump(dump, db, N=7, sentence_splitter=None, tokenizer=None):
"""Parse Wikipedia database dump, return n-gram and link statistics.
Parameters
----------
dump : {file-like, str}
Path to or handle on a Wikipedia page dump, e.g.
'chowiki-20140919-pages-articles.xml.bz2'.
db : SQLite connection
Connection to database that will be used to store statistics.
N : integer
Maximum n-gram length. Set this to a false value to disable
n-gram counting; this disables some of the fancier statistics,
but baseline entity linking will still work.
sentence_splitter : callable, optional
Sentence splitter. Called on output of paragraph splitter
(strings).
tokenizer : callable, optional
Tokenizer. Called on output of sentence splitter (strings).
Must return iterable over strings.
"""
f = _open(dump)
redirects = {}
c = db.cursor()
# Store the semanticizer version for later reference
c.execute('''insert into parameters values ('version', ?);''',
(__version__,))
# Store the dump file name
c.execute('''insert into parameters values ('dump', ?);''',
(basename(dump),))
# Store the maximum ngram length, so we can use it later on
c.execute('''insert into parameters values ('N', ?);''', (str(N),))
# Temporary index to speed up insertion
c.execute('''create unique index target_anchor
on linkstats(ngram_id, target)''')
_logger.info("Processing articles")
for i, page in enumerate(extract_pages(f), 1):
if i % 10000 == 0:
_logger.info("%d articles done", i)
if page.redirect is not None:
redirects[page.title] = page.redirect
continue
link, ngram = page_statistics(page.content, N=N, tokenizer=tokenizer,
sentence_splitter=sentence_splitter)
# We don't count the n-grams within the links, but we need them
# in the table, so add them with zero count.
tokens = chain(six.iteritems(ngram or {}),
((anchor, 0) for _, anchor in six.iterkeys(link)))
tokens = list(tokens)
c.executemany('''insert or ignore into ngrams (ngram) values (?)''',
((g,) for g, _ in tokens))
c.executemany('''update ngrams set tf = tf + ?, df = df + 1
where ngram = ?''',
((count, token) for token, count in tokens))
c.executemany('''insert or ignore into linkstats values
((select id from ngrams where ngram = ?), ?, 0)''',
((anchor, target)
for target, anchor in six.iterkeys(link)))
c.executemany('''update linkstats set count = count + ?
where ngram_id = (select rowid from ngrams
where ngram = ?)''',
((count, anchor)
for (_, anchor), count in six.iteritems(link)))
db.commit()
_logger.info("Processing %d redirects", len(redirects))
for redir, target in redirects.items():
for anchor, count in c.execute('''select ngram_id, count from linkstats
where target = ?''', [redir]):
# TODO: combine the next two execute statements
c.execute('''insert or ignore into linkstats values (?, ?, 0)''',
[anchor, target])
c.execute('''update linkstats
set count = count + ?
where target = ? and ngram_id = ?''',
(count, target, anchor))
c.executemany('delete from linkstats where target = ?',
([redir] for redir in redirects))
_logger.info("Finalizing database")
c.executescript('''drop index target_anchor; vacuum;''')
_logger.info("Dump parsing done: processed %d articles", i)
db.commit()
| apache-2.0 | 3,747,019,002,081,521,700 | 32.008824 | 79 | 0.557605 | false |
KelSolaar/sIBL_GUI | sibl_gui/components/core/inspector/models.py | 1 | 2265 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
**models.py**
**Platform:**
Windows, Linux, Mac Os X.
**Description:**
Defines the :class:`sibl_gui.components.core.inspector.inspector.Inspector`
Component Interface class Models.
**Others:**
"""
from __future__ import unicode_literals
import foundations.verbose
import sibl_gui.ui.models
__author__ = "Thomas Mansencal"
__copyright__ = "Copyright (C) 2008 - 2014 - Thomas Mansencal"
__license__ = "GPL V3.0 - http://www.gnu.org/licenses/"
__maintainer__ = "Thomas Mansencal"
__email__ = "[email protected]"
__status__ = "Production"
__all__ = ["LOGGER", "PlatesModel"]
LOGGER = foundations.verbose.install_logger()
class PlatesModel(sibl_gui.ui.models.GraphModel):
"""
Defines the Model used the by :class:`sibl_gui.components.core.inspector.inspector.Inspector`
Component Interface class.
"""
def __init__(self, parent=None, root_node=None, horizontal_headers=None, vertical_headers=None):
"""
Initializes the class.
:param parent: Object parent.
:type parent: QObject
:param root_node: Root node.
:type root_node: AbstractCompositeNode
:param horizontal_headers: Headers.
:type horizontal_headers: OrderedDict
:param vertical_headers: Headers.
:type vertical_headers: OrderedDict
"""
LOGGER.debug("> Initializing '{0}()' class.".format(self.__class__.__name__))
sibl_gui.ui.models.GraphModel.__init__(self,
parent,
root_node,
horizontal_headers,
vertical_headers)
def initialize_model(self, root_node):
"""
Initializes the Model using given root node.
:param root_node: Graph root node.
:type root_node: DefaultNode
:return: Method success
:rtype: bool
"""
LOGGER.debug("> Initializing model with '{0}' root node.".format(root_node))
self.beginResetModel()
self.root_node = root_node
self.enable_model_triggers(True)
self.endResetModel()
return True
| gpl-3.0 | 2,661,083,915,787,938,300 | 27.670886 | 100 | 0.587638 | false |
KirillMysnik/ArcJail | srcds/addons/source-python/plugins/arcjail/modules/games/base_classes/prepare_time.py | 1 | 6961 | # This file is part of ArcJail.
#
# ArcJail is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ArcJail is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ArcJail. If not, see <http://www.gnu.org/licenses/>.
from events.manager import event_manager
from listeners.tick import Delay, on_tick_listener_manager
from messages import TextMsg
from ....internal_events import internal_event_manager
from ...overlays import show_overlay
from ...players import broadcast, player_manager
from .. import config_manager, stage, strings_module
from .jail_game import JailGame
class PrepareTime(JailGame):
stage_groups = {
'init': ["prepare-prepare", ],
'destroy': [
"prepare-cancel-delays",
"unsend-popups",
"cancel-delays",
"destroy",
],
'prepare-start': [
'prepare-freeze',
'prepare-register-event-handlers',
'prepare-entry',
],
'abort-prepare-interrupted': ["abort-prepare-interrupted", ],
'prepare-continue': [
"prepare-cancel-countdown",
"prepare-undo-prepare-start",
"register-event-handlers",
"start-notify",
"basegame-entry",
],
}
def __init__(self, leader_player, players, **kwargs):
super().__init__(leader_player, players, **kwargs)
self._prepare_delay = None
self._prepare_countdown = None
@stage('prepare-prepare')
def stage_prepare_prepare(self):
if self._settings.get('prepare', True):
indexes = list(player.index for player in self._players)
if self.leader.index not in indexes:
indexes.append(self.leader.index)
def callback():
self.undo_stages('prepare-start')
self.set_stage_group('prepare-continue')
self._prepare_delay = Delay(
config_manager['prepare_timeout'], callback)
def countdown(ticks_left):
if (ticks_left > 3 or ticks_left < 1 or config_manager[
'countdown_{}_material'.format(ticks_left)] == ""):
TextMsg(str(ticks_left)).send(*indexes)
else:
for player in self._players:
show_overlay(player, config_manager[
'countdown_{}_material'.format(ticks_left)], 1)
if config_manager['countdown_sound'] is not None:
config_manager['countdown_sound'].play(*indexes)
self._prepare_countdown = Delay(1.0, countdown, ticks_left - 1)
countdown(int(config_manager['prepare_timeout']))
broadcast(strings_module['stage_prepare'])
if config_manager['prepare_sound'] is not None:
config_manager['prepare_sound'].play(*indexes)
self.set_stage_group('prepare-start')
else:
self.set_stage_group('prepare-continue')
def _prepare_event_handler_player_death(self, game_event):
player = player_manager.get_by_userid(game_event['userid'])
if player in self._players or player == self.leader:
self.set_stage_group('abort-prepare-interrupted')
def _prepare_event_handler_player_deleted(self, player):
if player in self._players or player == self.leader:
self.set_stage_group('abort-prepare-interrupted')
def _prepare_event_handler_player_hurt(self, game_event):
player = player_manager.get_by_userid(game_event['userid'])
if player in self._players or player == self.leader:
self.set_stage_group('abort-prepare-interrupted')
@stage('prepare-register-event-handlers')
def stage_prepare_register_event_handlers(self):
event_manager.register_for_event(
'player_death', self._prepare_event_handler_player_death)
event_manager.register_for_event(
'player_hurt', self._prepare_event_handler_player_hurt)
internal_event_manager.register_event_handler(
'player_deleted',
self._prepare_event_handler_player_deleted
)
@stage('undo-prepare-register-event-handlers')
def stage_undo_prepare_register_event_handlers(self):
event_manager.unregister_for_event(
'player_death', self._prepare_event_handler_player_death)
event_manager.unregister_for_event(
'player_hurt', self._prepare_event_handler_player_hurt)
internal_event_manager.unregister_event_handler(
'player_deleted',
self._prepare_event_handler_player_deleted
)
@stage('prepare-cancel-delays')
def stage_prepare_cancel_delays(self):
for delay in (self._prepare_delay, self._prepare_countdown):
if delay is not None and delay.running:
delay.cancel()
@stage('prepare-cancel-countdown')
def stage_prepare_cancel_countdown(self):
if self._prepare_countdown is not None:
self._prepare_countdown.cancel()
@stage('prepare-undo-prepare-start')
def stage_prepare_undo_prepare_start(self):
self.undo_stages('prepare-start')
@stage('prepare-entry')
def stage_prepare_entry(self):
pass
def _prepare_freeze_tick_handler(self):
for player in self._players:
weapon = player.active_weapon
if weapon is None:
continue
weapon.next_attack += 1
weapon.next_secondary_fire_attack += 1
@stage('prepare-freeze')
def stage_prepare_freeze(self):
on_tick_listener_manager.register_listener(
self._prepare_freeze_tick_handler)
for player in self._players:
player.stuck = True
@stage('undo-prepare-freeze')
def stage_undo_prepare_freeze(self):
on_tick_listener_manager.unregister_listener(
self._prepare_freeze_tick_handler)
for player in self._players:
player.stuck = False
weapon = player.active_weapon
if weapon is None:
continue
weapon.next_attack = 0
weapon.next_secondary_fire_attack = 0
@stage('abort-prepare-interrupted')
def stage_abort_prepare_interrupted(self):
broadcast(strings_module['abort_prepare_interrupted'])
if config_manager['prepare_sound'] is not None:
config_manager['prepare_sound'].stop()
self.set_stage_group('destroy')
| gpl-3.0 | -4,094,657,254,956,467,700 | 33.805 | 79 | 0.616003 | false |
sekikn/ambari | ambari-common/src/main/python/ambari_ws4py/server/wsgirefserver.py | 2 | 5353 | # -*- coding: utf-8 -*-
__doc__ = """
Add WebSocket support to the built-in WSGI server
provided by the :py:mod:`wsgiref`. This is clearly not
meant to be a production server so please consider this
only for testing purpose.
Mostly, this module overrides bits and pieces of
the built-in classes so that it supports the WebSocket
workflow.
.. code-block:: python
from wsgiref.simple_server import make_server
from ambari_ws4py.websocket import EchoWebSocket
from ambari_ws4py.server.wsgirefserver import WSGIServer, WebSocketWSGIRequestHandler
from ambari_ws4py.server.wsgiutils import WebSocketWSGIApplication
server = make_server('', 9000, server_class=WSGIServer,
handler_class=WebSocketWSGIRequestHandler,
app=WebSocketWSGIApplication(handler_cls=EchoWebSocket))
server.initialize_websockets_manager()
server.serve_forever()
.. note::
For some reason this server may fail against autobahntestsuite.
"""
import logging
import sys
import itertools
import operator
from wsgiref.handlers import SimpleHandler
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer as _WSGIServer
from wsgiref import util
util._hoppish = {}.__contains__
from ambari_ws4py.manager import WebSocketManager
from ambari_ws4py import format_addresses
from ambari_ws4py.server.wsgiutils import WebSocketWSGIApplication
from ambari_ws4py.compat import get_connection
__all__ = ['WebSocketWSGIHandler', 'WebSocketWSGIRequestHandler',
'WSGIServer']
logger = logging.getLogger('ambari_ws4py')
class WebSocketWSGIHandler(SimpleHandler):
def setup_environ(self):
"""
Setup the environ dictionary and add the
`'ws4py.socket'` key. Its associated value
is the real socket underlying socket.
"""
SimpleHandler.setup_environ(self)
self.environ['ws4py.socket'] = get_connection(self.environ['wsgi.input'])
self.http_version = self.environ['SERVER_PROTOCOL'].rsplit('/')[-1]
def finish_response(self):
"""
Completes the response and performs the following tasks:
- Remove the `'ws4py.socket'` and `'ws4py.websocket'`
environ keys.
- Attach the returned websocket, if any, to the WSGI server
using its ``link_websocket_to_server`` method.
"""
# force execution of the result iterator until first actual content
rest = iter(self.result)
first = list(itertools.islice(rest, 1))
self.result = itertools.chain(first, rest)
# now it's safe to look if environ was modified
ws = None
if self.environ:
self.environ.pop('ws4py.socket', None)
ws = self.environ.pop('ws4py.websocket', None)
try:
SimpleHandler.finish_response(self)
except:
if ws:
ws.close(1011, reason='Something broke')
raise
else:
if ws:
self.request_handler.server.link_websocket_to_server(ws)
class WebSocketWSGIRequestHandler(WSGIRequestHandler):
WebSocketWSGIHandler = WebSocketWSGIHandler
def handle(self):
"""
Unfortunately the base class forces us
to override the whole method to actually provide our wsgi handler.
"""
self.raw_requestline = self.rfile.readline()
if not self.parse_request(): # An error code has been sent, just exit
return
# next line is where we'd have expect a configuration key somehow
handler = self.WebSocketWSGIHandler(
self.rfile, self.wfile, self.get_stderr(), self.get_environ()
)
handler.request_handler = self # backpointer for logging
handler.run(self.server.get_app())
class WSGIServer(_WSGIServer):
def initialize_websockets_manager(self):
"""
Call thos to start the underlying websockets
manager. Make sure to call it once your server
is created.
"""
self.manager = WebSocketManager()
self.manager.start()
def shutdown_request(self, request):
"""
The base class would close our socket
if we didn't override it.
"""
pass
def link_websocket_to_server(self, ws):
"""
Call this from your WSGI handler when a websocket
has been created.
"""
self.manager.add(ws)
def server_close(self):
"""
Properly initiate closing handshakes on
all websockets when the WSGI server terminates.
"""
if hasattr(self, 'manager'):
self.manager.close_all()
self.manager.stop()
self.manager.join()
delattr(self, 'manager')
_WSGIServer.server_close(self)
if __name__ == '__main__':
from ambari_ws4py import configure_logger
configure_logger()
from wsgiref.simple_server import make_server
from ambari_ws4py.websocket import EchoWebSocket
server = make_server('', 9000, server_class=WSGIServer,
handler_class=WebSocketWSGIRequestHandler,
app=WebSocketWSGIApplication(handler_cls=EchoWebSocket))
server.initialize_websockets_manager()
try:
server.serve_forever()
except KeyboardInterrupt:
server.server_close()
| apache-2.0 | 4,859,754,912,478,700,000 | 33.095541 | 89 | 0.653092 | false |
fishroot/nemoa | nemoa/dataset/commons/labels/__init__.py | 1 | 2018 | # -*- coding: utf-8 -*-
__author__ = 'Patrick Michl'
__email__ = '[email protected]'
__license__ = 'GPLv3'
import nemoa
import numpy
import importlib
def convert(list, input, output = None, filter = False):
generic_types = ['number', 'string', 'float']
if isinstance(list, (numpy.ndarray)):
list = list.tolist()
input_dtype = 'nparray'
else: input_dtype = 'list'
# 'input'
if input in generic_types:
input_class = 'generic'
input_format = input
elif ':' in input:
input_class = input.lower().split(':')[0].strip()
input_format = input.lower().split(':')[1].strip()
else: raise Warning("""could not convert list:
unknown input format '%s'.""" % input)
# 'output'
if output in generic_types:
output_class = 'generic'
output_format = output
elif not output:
output_class = input_class
output_format = None
elif ':' in input:
output_class = output.lower().split(':')[0].strip()
output_format = output.lower().split(':')[1].strip()
else: raise Warning("""could not convert list:
unknown output format '%s'.""" % output)
# 'input' vs 'output'
if input_class != output_class:
raise Warning("'%s' can not be converted to '%s'"
% (input_class, output_class))
# trivial cases
if input_class == 'generic' or input_format == output_format:
if input_dtype == 'nparray':
return numpy.asarray(list), numpy.asarray([])
else: return list, []
# import annotation module
module_name = input_class.lower()
module = importlib.import_module('nemoa.dataset.commons.labels.'
+ module_name)
converter = getattr(module, module_name)()
output_list, output_lost = converter.convert_list(
list, input_format, output_format, filter)
if input_dtype == 'nparray':
return numpy.asarray(output_list), numpy.asarray(output_lost)
return output_list, output_lost
| gpl-3.0 | -1,435,464,430,566,239,200 | 31.031746 | 69 | 0.600099 | false |
marado/youtube-dl | youtube_dl/extractor/npo.py | 1 | 12656 | from __future__ import unicode_literals
from .subtitles import SubtitlesInfoExtractor
from .common import InfoExtractor
from ..utils import (
fix_xml_ampersands,
parse_duration,
qualities,
strip_jsonp,
unified_strdate,
url_basename,
)
class NPOBaseIE(SubtitlesInfoExtractor):
def _get_token(self, video_id):
token_page = self._download_webpage(
'http://ida.omroep.nl/npoplayer/i.js',
video_id, note='Downloading token')
return self._search_regex(
r'npoplayer\.token = "(.+?)"', token_page, 'token')
class NPOIE(NPOBaseIE):
IE_NAME = 'npo.nl'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/(?!live|radio)[^/]+/[^/]+/(?P<id>[^/?]+)'
_TESTS = [
{
'url': 'http://www.npo.nl/nieuwsuur/22-06-2014/VPWON_1220719',
'md5': '4b3f9c429157ec4775f2c9cb7b911016',
'info_dict': {
'id': 'VPWON_1220719',
'ext': 'm4v',
'title': 'Nieuwsuur',
'description': 'Dagelijks tussen tien en elf: nieuws, sport en achtergronden.',
'upload_date': '20140622',
},
},
{
'url': 'http://www.npo.nl/de-mega-mike-mega-thomas-show/27-02-2009/VARA_101191800',
'md5': 'da50a5787dbfc1603c4ad80f31c5120b',
'info_dict': {
'id': 'VARA_101191800',
'ext': 'm4v',
'title': 'De Mega Mike & Mega Thomas show',
'description': 'md5:3b74c97fc9d6901d5a665aac0e5400f4',
'upload_date': '20090227',
'duration': 2400,
},
},
{
'url': 'http://www.npo.nl/tegenlicht/25-02-2013/VPWON_1169289',
'md5': 'f8065e4e5a7824068ed3c7e783178f2c',
'info_dict': {
'id': 'VPWON_1169289',
'ext': 'm4v',
'title': 'Tegenlicht',
'description': 'md5:d6476bceb17a8c103c76c3b708f05dd1',
'upload_date': '20130225',
'duration': 3000,
},
},
{
'url': 'http://www.npo.nl/de-nieuwe-mens-deel-1/21-07-2010/WO_VPRO_043706',
'info_dict': {
'id': 'WO_VPRO_043706',
'ext': 'wmv',
'title': 'De nieuwe mens - Deel 1',
'description': 'md5:518ae51ba1293ffb80d8d8ce90b74e4b',
'duration': 4680,
},
'params': {
# mplayer mms download
'skip_download': True,
}
},
# non asf in streams
{
'url': 'http://www.npo.nl/hoe-gaat-europa-verder-na-parijs/10-01-2015/WO_NOS_762771',
'md5': 'b3da13de374cbe2d5332a7e910bef97f',
'info_dict': {
'id': 'WO_NOS_762771',
'ext': 'mp4',
'title': 'Hoe gaat Europa verder na Parijs?',
},
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
return self._get_info(video_id)
def _get_info(self, video_id):
metadata = self._download_json(
'http://e.omroep.nl/metadata/aflevering/%s' % video_id,
video_id,
# We have to remove the javascript callback
transform_source=strip_jsonp,
)
token = self._get_token(video_id)
formats = []
pubopties = metadata.get('pubopties')
if pubopties:
quality = qualities(['adaptive', 'wmv_sb', 'h264_sb', 'wmv_bb', 'h264_bb', 'wvc1_std', 'h264_std'])
for format_id in pubopties:
format_info = self._download_json(
'http://ida.omroep.nl/odi/?prid=%s&puboptions=%s&adaptive=yes&token=%s'
% (video_id, format_id, token),
video_id, 'Downloading %s JSON' % format_id)
if format_info.get('error_code', 0) or format_info.get('errorcode', 0):
continue
streams = format_info.get('streams')
if streams:
video_info = self._download_json(
streams[0] + '&type=json',
video_id, 'Downloading %s stream JSON' % format_id)
else:
video_info = format_info
video_url = video_info.get('url')
if not video_url:
continue
if format_id == 'adaptive':
formats.extend(self._extract_m3u8_formats(video_url, video_id))
else:
formats.append({
'url': video_url,
'format_id': format_id,
'quality': quality(format_id),
})
streams = metadata.get('streams')
if streams:
for i, stream in enumerate(streams):
stream_url = stream.get('url')
if not stream_url:
continue
if '.asf' not in stream_url:
formats.append({
'url': stream_url,
'quality': stream.get('kwaliteit'),
})
continue
asx = self._download_xml(
stream_url, video_id,
'Downloading stream %d ASX playlist' % i,
transform_source=fix_xml_ampersands)
ref = asx.find('./ENTRY/Ref')
if ref is None:
continue
video_url = ref.get('href')
if not video_url:
continue
formats.append({
'url': video_url,
'ext': stream.get('formaat', 'asf'),
'quality': stream.get('kwaliteit'),
})
self._sort_formats(formats)
subtitles = {}
if metadata.get('tt888') == 'ja':
subtitles['nl'] = 'http://e.omroep.nl/tt888/%s' % video_id
if self._downloader.params.get('listsubtitles', False):
self._list_available_subtitles(video_id, subtitles)
return
subtitles = self.extract_subtitles(video_id, subtitles)
return {
'id': video_id,
'title': metadata['titel'],
'description': metadata['info'],
'thumbnail': metadata.get('images', [{'url': None}])[-1]['url'],
'upload_date': unified_strdate(metadata.get('gidsdatum')),
'duration': parse_duration(metadata.get('tijdsduur')),
'formats': formats,
'subtitles': subtitles,
}
class NPOLiveIE(NPOBaseIE):
IE_NAME = 'npo.nl:live'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/live/(?P<id>.+)'
_TEST = {
'url': 'http://www.npo.nl/live/npo-1',
'info_dict': {
'id': 'LI_NEDERLAND1_136692',
'display_id': 'npo-1',
'ext': 'mp4',
'title': 're:^Nederland 1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'Livestream',
'is_live': True,
},
'params': {
'skip_download': True,
}
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
live_id = self._search_regex(
r'data-prid="([^"]+)"', webpage, 'live id')
metadata = self._download_json(
'http://e.omroep.nl/metadata/%s' % live_id,
display_id, transform_source=strip_jsonp)
token = self._get_token(display_id)
formats = []
streams = metadata.get('streams')
if streams:
for stream in streams:
stream_type = stream.get('type').lower()
if stream_type == 'ss':
continue
stream_info = self._download_json(
'http://ida.omroep.nl/aapi/?stream=%s&token=%s&type=jsonp'
% (stream.get('url'), token),
display_id, 'Downloading %s JSON' % stream_type)
if stream_info.get('error_code', 0) or stream_info.get('errorcode', 0):
continue
stream_url = self._download_json(
stream_info['stream'], display_id,
'Downloading %s URL' % stream_type,
transform_source=strip_jsonp)
if stream_type == 'hds':
f4m_formats = self._extract_f4m_formats(stream_url, display_id)
# f4m downloader downloads only piece of live stream
for f4m_format in f4m_formats:
f4m_format['preference'] = -1
formats.extend(f4m_formats)
elif stream_type == 'hls':
formats.extend(self._extract_m3u8_formats(stream_url, display_id, 'mp4'))
else:
formats.append({
'url': stream_url,
})
self._sort_formats(formats)
return {
'id': live_id,
'display_id': display_id,
'title': self._live_title(metadata['titel']),
'description': metadata['info'],
'thumbnail': metadata.get('images', [{'url': None}])[-1]['url'],
'formats': formats,
'is_live': True,
}
class NPORadioIE(InfoExtractor):
IE_NAME = 'npo.nl:radio'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/radio/(?P<id>[^/]+)/?$'
_TEST = {
'url': 'http://www.npo.nl/radio/radio-1',
'info_dict': {
'id': 'radio-1',
'ext': 'mp3',
'title': 're:^NPO Radio 1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'is_live': True,
},
'params': {
'skip_download': True,
}
}
@staticmethod
def _html_get_attribute_regex(attribute):
return r'{0}\s*=\s*\'([^\']+)\''.format(attribute)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
self._html_get_attribute_regex('data-channel'), webpage, 'title')
stream = self._parse_json(
self._html_search_regex(self._html_get_attribute_regex('data-streams'), webpage, 'data-streams'),
video_id)
codec = stream.get('codec')
return {
'id': video_id,
'url': stream['url'],
'title': self._live_title(title),
'acodec': codec,
'ext': codec,
'is_live': True,
}
class NPORadioFragmentIE(InfoExtractor):
IE_NAME = 'npo.nl:radio:fragment'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/radio/[^/]+/fragment/(?P<id>\d+)'
_TEST = {
'url': 'http://www.npo.nl/radio/radio-5/fragment/174356',
'md5': 'dd8cc470dad764d0fdc70a9a1e2d18c2',
'info_dict': {
'id': '174356',
'ext': 'mp3',
'title': 'Jubileumconcert Willeke Alberti',
},
}
def _real_extract(self, url):
audio_id = self._match_id(url)
webpage = self._download_webpage(url, audio_id)
title = self._html_search_regex(
r'href="/radio/[^/]+/fragment/%s" title="([^"]+)"' % audio_id,
webpage, 'title')
audio_url = self._search_regex(
r"data-streams='([^']+)'", webpage, 'audio url')
return {
'id': audio_id,
'url': audio_url,
'title': title,
}
class TegenlichtVproIE(NPOIE):
IE_NAME = 'tegenlicht.vpro.nl'
_VALID_URL = r'https?://tegenlicht\.vpro\.nl/afleveringen/.*?'
_TESTS = [
{
'url': 'http://tegenlicht.vpro.nl/afleveringen/2012-2013/de-toekomst-komt-uit-afrika.html',
'md5': 'f8065e4e5a7824068ed3c7e783178f2c',
'info_dict': {
'id': 'VPWON_1169289',
'ext': 'm4v',
'title': 'Tegenlicht',
'description': 'md5:d6476bceb17a8c103c76c3b708f05dd1',
'upload_date': '20130225',
},
},
]
def _real_extract(self, url):
name = url_basename(url)
webpage = self._download_webpage(url, name)
urn = self._html_search_meta('mediaurn', webpage)
info_page = self._download_json(
'http://rs.vpro.nl/v2/api/media/%s.json' % urn, name)
return self._get_info(info_page['mid'])
| unlicense | -5,777,262,629,636,759,000 | 33.579235 | 111 | 0.477323 | false |
googleapis/googleapis-gen | google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/resources/types/mobile_device_constant.py | 1 | 2416 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v6.enums.types import mobile_device_type
__protobuf__ = proto.module(
package='google.ads.googleads.v6.resources',
marshal='google.ads.googleads.v6',
manifest={
'MobileDeviceConstant',
},
)
class MobileDeviceConstant(proto.Message):
r"""A mobile device constant.
Attributes:
resource_name (str):
Output only. The resource name of the mobile device
constant. Mobile device constant resource names have the
form:
``mobileDeviceConstants/{criterion_id}``
id (int):
Output only. The ID of the mobile device
constant.
name (str):
Output only. The name of the mobile device.
manufacturer_name (str):
Output only. The manufacturer of the mobile
device.
operating_system_name (str):
Output only. The operating system of the
mobile device.
type_ (google.ads.googleads.v6.enums.types.MobileDeviceTypeEnum.MobileDeviceType):
Output only. The type of mobile device.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
id = proto.Field(
proto.INT64,
number=7,
optional=True,
)
name = proto.Field(
proto.STRING,
number=8,
optional=True,
)
manufacturer_name = proto.Field(
proto.STRING,
number=9,
optional=True,
)
operating_system_name = proto.Field(
proto.STRING,
number=10,
optional=True,
)
type_ = proto.Field(
proto.ENUM,
number=6,
enum=mobile_device_type.MobileDeviceTypeEnum.MobileDeviceType,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -6,480,759,131,112,648,000 | 27.423529 | 90 | 0.627897 | false |
lalanza808/rharvest | rharvest.py | 1 | 2791 | #!/usr/bin/env python
"""
Image scraper utilizing Reddit Python API, PRAW.
Requires PRAW library installed, or pip installed to get it.
Choose a subreddit, it identifies pictures in that sub
and downloads to specified directory.
"""
__author__ = 'lance - github.com/lalanza808'
################################################################################
# Library Import
# Builtin libs
from urllib import urlretrieve
from os import path,system,getenv
from time import strftime, sleep
import ConfigParser
import argparse
# 3rd party libs
try:
import praw
except ImportError:
print("\nPython library PRAW not installed.\n\nTry:\n\nsudo pip install praw")
exit()
################################################################################
# Variable Declaration
# Open config
configfile = '{}/.config/rharvest.conf'.format(getenv('HOME'))
configparse = ConfigParser.ConfigParser()
configparse.readfp(open(configfile))
savedir = configparse.get('rharvest', 'savedir')
maxthreads = configparse.get('rharvest', 'maxthreads')
useragent = configparse.get('rharvest', 'useragent')
curtime = strftime(str(configparse.get('rharvest', 'timeformat')))
counter = 1
args = ''
parser = argparse.ArgumentParser(description='Command line Reddit image scraper. Supply the subreddit(s) separated by commas (no space), supply the number of images.')
parser.add_argument('--sub', '-s', type=str, dest='subreddit', help='The SubReddit name(s), separated by commas. Eg: minimalwallpaper,gifs,funny', required=True)
parser.add_argument('--count', '-c', type=int, dest='count', help='The amount of images you wish to download', required=True)
parser.add_argument('--dest', '-d', type=str, dest='dest', help='Save location. Set manually here or it defaults to value in config file', required=False)
args = parser.parse_args()
if args.dest == None:
savedir = configparse.get('rharvest', 'savedir')
else:
savedir = args.dest
def main():
CreateFolders(args.subreddit)
def CreateFolders(subs):
subs = subs.split(',')
for sub in subs:
if not path.exists("{}/{}/{}".format(savedir, sub, curtime)):
print("Creating new directory : {}/{}/{}".format(savedir, sub, str(curtime)))
sleep(2)
system("mkdir -p {}/{}/{}".format(savedir, sub, curtime))
DownloadImages(sub)
def DownloadImages(sub):
global counter
ua = praw.Reddit(useragent)
subscrape = ua.get_subreddit(sub).get_hot(limit=(args.count + args.count))
for image in subscrape:
extension = image.url[-4:]
if extension == '.jpg' or extension == '.png' or extension == '.gif':
img = path.basename(image.url)
dlas = "{}/{}/{}/{}.{}".format(savedir, sub, curtime, str(counter), img)
urlretrieve(image.url, dlas)
print "Downloading {} as {}".format(img, dlas)
counter += 1
if __name__ == "__main__":
main()
| mit | -5,845,447,432,851,582,000 | 33.036585 | 167 | 0.668936 | false |
6/GeoDJ | geodj/settings.py | 1 | 5663 | import os
# Override these on production env
os.environ.setdefault("APP_ENV", "development")
os.environ.setdefault("SECRET_KEY", "^uhrm48x9y=1f&+$bg=oc(#23mp0*g5k%8+si9tdz7&4_xk&lf")
if os.environ["APP_ENV"] == "development":
try:
# Add secret ENV varibales for development (e.g. API keys) to secrets.py
import secrets
os.environ.setdefault("LASTFM_API_KEY", secrets.LASTFM_API_KEY)
except:
pass
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DEBUG = os.environ['APP_ENV'] != 'production'
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {}
if os.environ["APP_ENV"] == "production":
import dj_database_url
DATABASES['default'] = dj_database_url.config()
else:
DATABASES['default'] = {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'geodj_development',
'USER': '',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '',
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = 'staticfiles'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = os.environ["SECRET_KEY"]
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'geodj.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'geodj.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'geodj',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
| mit | 8,175,222,836,632,787,000 | 31.545977 | 89 | 0.693449 | false |
userzimmermann/python-moretools | moretools/_types.py | 1 | 2808 | # python-moretools
#
# many more basic tools for python 2/3
# extending itertools, functools and operator
#
# Copyright (C) 2011-2016 Stefan Zimmermann <[email protected]>
#
# python-moretools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-moretools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with python-moretools. If not, see <http://www.gnu.org/licenses/>.
from ._common import *
from ._simpledict import SimpleDictType
from six.moves import UserString, UserList, UserDict
number_types = integer_types + (float, complex)
string_types = (string_types) + (UserString,)
list_types = (list, UserList)
dict_types = (dict, UserDict, SimpleDictType)
def isintclass(cls):
return issubclass(cls, int)
isinttype = isintclass
def isint(value):
return isinstance(value, int)
if PY2:
def islongclass(cls):
return issubclass(cls, long)
islongtype = islongclass
def islong(value):
return isinstance(value, long)
def isintegerclass(cls):
return issubclass(cls, integer_types)
isintegertype = isintegerclass
def isinteger(value):
return isinstance(value, integer_types)
def isfloatclass(cls):
return issubclass(cls, float)
isfloattype = isfloatclass
def isfloat(value):
return isinstance(value, float)
def iscomplexclass(cls):
return issubclass(cls, complex)
iscomplextype = iscomplexclass
def iscomplex(value):
return isinstance(value, complex)
def isnumberclass(cls):
return issubclass(cls, number_types)
isnumbertype = isnumberclass
def isnumber(value):
return isinstance(value, number_types)
def isstringclass(cls):
return issubclass(cls, string_types)
isstringtype = isstringclass
def isstring(value):
return isinstance(value, string_types)
def istupleclass(cls):
return issubclass(cls, tuple)
istupletype = istupleclass
def istuple(value):
return isinstance(value, tuple)
def islistclass(cls):
return issubclass(cls, list_types)
islisttype = islistclass
def islist(value):
return isinstance(value, list_types)
def issetclass(cls):
return issubclass(cls, set)
issettype = issetclass
def isset(value):
return isinstance(value, set)
def isdictclass(cls):
return issubclass(cls, dict_types)
isdicttype = isdictclass
def isdict(value):
return isinstance(value, dict_types)
| gpl-3.0 | 2,180,999,676,987,541,200 | 20.112782 | 77 | 0.742877 | false |
petry/kanboard | apps/core/migrations/0001_initial.py | 1 | 1775 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Board'
db.create_table(u'core_board', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal(u'core', ['Board'])
# Adding model 'Story'
db.create_table(u'core_story', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('board', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Board'])),
))
db.send_create_signal(u'core', ['Story'])
def backwards(self, orm):
# Deleting model 'Board'
db.delete_table(u'core_board')
# Deleting model 'Story'
db.delete_table(u'core_story')
models = {
u'core.board': {
'Meta': {'object_name': 'Board'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'core.story': {
'Meta': {'object_name': 'Story'},
'board': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Board']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['core'] | apache-2.0 | 3,847,779,870,579,923,000 | 35.244898 | 102 | 0.56169 | false |
Ricyteach/parmatter | src/parmatter/parmatter.py | 1 | 6022 | import string
import parse as _parse # avoid potential name conflicts with parse methods
# NOTE: All the Formatter docstrings mostly copied from the string docs page (Formatter does
# not have its own docstrings... <sad_face>).
class Formatter():
'''Re-implementation of `string.Formatter` (using the composition patter) to add docstrings, and so
that child classes can more easily override API methods using super().
In general, the `format`, `vformat`, and `_vformat` methods shouldn't be overridden.'''
def format(self, format_string, *args, **kwargs):
'''The primary API method. Takes a format string and injects an
arbitrary set of positional and keyword arguments using format string
syntax.
Handle formmatting logic by overriding get_value, format_field,
check_unused_args, and others per PEP 3101 and the docs.'''
return string.Formatter.format(self, format_string, *args, **kwargs)
def vformat(self, format_string, args, kwargs):
'''This function does the actual work of formatting. It is exposed as a separate function for cases
where you want to pass in a predefined dictionary of arguments, rather than unpacking and repacking
the dictionary as individual arguments using the *args and **kwargs syntax. vformat() does the work
of breaking up the format string into character data and replacement fields. It calls the various
other methods used by the string formatting API.'''
return string.Formatter.vformat(self, format_string, args, kwargs)
def _vformat(self, format_string, args, kwargs, used_args, recursion_depth, auto_arg_index=0):
'''The vformat workhorse.'''
return string.Formatter._vformat(self, format_string, args, kwargs, used_args, recursion_depth, auto_arg_index)
def parse(self, format_string):
'''Loop over the format_string and return an iterable of tuples (literal_text, field_name, format_spec,
conversion). This is used by vformat() to break the string into either literal text, or replacement
fields.
The values in the tuple conceptually represent a span of literal text followed by a single replacement
field. If there is no literal text (which can happen if two replacement fields occur consecutively),
then literal_text will be a zero-length string. If there is no replacement field, then the values of
field_name, format_spec and conversion will be None.'''
return string.Formatter.parse(self, format_string)
def get_field(self, field_name, args, kwargs):
'''Given field_name as returned by parse() (see above), convert it to an object to be formatted.
Returns a tuple (obj, used_key). The default version takes strings of the form defined in PEP 3101,
such as “0[name]” or “label.title”. args and kwargs are as passed in to vformat(). The return value
used_key has the same meaning as the key parameter to get_value().'''
return string.Formatter.get_field(self, field_name, args, kwargs)
def get_value(self, key, args, kwargs):
'''Retrieve a given field value. The key argument will be either an integer or a string.
If it is an integer, it represents the index of the positional argument in args; if it
is a string, then it represents a named argument in kwargs.
The args parameter is set to the list of positional arguments to vformat(), and the kwargs
parameter is set to the dictionary of keyword arguments.
For compound field names, these functions are only called for the first component of the
field name; Subsequent components are handled through normal attribute and indexing operations.
So for example, the field expression ‘0.name’ would cause get_value() to be called with a key
argument of 0. The name attribute will be looked up after get_value() returns by calling the
built-in getattr() function.
If the index or keyword refers to an item that does not exist, then an IndexError or KeyError
should be raised.'''
return string.Formatter.get_value(self, key, args, kwargs)
def check_unused_args(self, used_args, args, kwargs):
'''Implement checking for unused arguments if desired. The arguments to this function is the set
of all argument keys that were actually referred to in the format string (integers for positional
arguments, and strings for named arguments), and a reference to the args and kwargs that was passed
to vformat. The set of unused args can be calculated from these parameters. check_unused_args() is
assumed to raise an exception if the check fails.'''
string.Formatter.check_unused_args(self, used_args, args, kwargs)
def format_field(self, value, format_spec):
'''Simply calls the global format() built-in. Provided so that subclasses can override it.'''
return string.Formatter.format_field(self, value, format_spec)
def convert_field(self, value, conversion):
'''Converts the value (returned by get_field()) given a conversion type (as in the tuple returned by
the parse() method). The default version understands ‘s’ (str), ‘r’ (repr) and ‘a’ (ascii) conversion
types.'''
return string.Formatter.convert_field(self, value, conversion)
class Parmatter(Formatter):
'''A parsing formatter; i.e., a formatter that can also "unformat".
The various string format API methods can be overridden by child classes using super() for convenience.'''
def unformat(self, format, string, extra_types=dict(s=str), evaluate_result=True):
'''Inverse of format. Match my format to the string exactly.
Return a parse.Result or parse.Match instance (or None if there's no match).
'''
return _parse.parse(format, string, extra_types, evaluate_result) | bsd-2-clause | 5,580,975,050,867,865,000 | 67.170455 | 119 | 0.6999 | false |
izapolsk/integration_tests | cfme/roles.py | 1 | 28996 | from cfme.utils.log import logger
def _remove_page(roles, group, pages):
if group in roles:
for page in pages:
if page in roles[group]:
roles[group].remove(page)
else:
logger.info("Page %s attempted to be removed from role %s, "
"but isn't in there anyway", page, group)
else:
logger.info("Attempted to remove a page from role %s, but role "
"doesn't exist", group)
def _remove_from_all(roles, r_page):
for group in roles:
for page in roles[group]:
if page == r_page:
roles[group].remove(page)
else:
logger.info("Page %s attempted to be removed from role %s, "
"but isn't in there anyway", page, group)
group_data = {
'evmgroup-administrator': [
'control_explorer',
'control_simulation',
'control_import_export',
'control_log',
'infrastructure_providers',
'infrastructure_clusters',
'infrastructure_hosts',
'infrastructure_virtual_machines',
'infrastructure_resource_pools',
'infrastructure_datastores',
'infrastructure_pxe',
'infrastructure_requests',
'clouds_providers',
'clouds_availability_zones',
'clouds_flavors',
'clouds_security_groups',
'clouds_instances',
'clouds_stacks',
'my_settings',
'tasks',
'about',
'dashboard',
'reports',
'chargeback',
'timelines',
'rss',
'automate_explorer',
'automate_simulation',
'automate_customization',
'automate_import_export',
'automate_log',
'automate_requests',
'my_services',
'services_catalogs',
'services_requests',
'services_workloads',
'utilization',
'planning',
'bottlenecks'
],
'evmgroup-approver': [
'control_explorer',
'control_simulation',
'control_log',
'infrastructure_providers',
'infrastructure_clusters',
'infrastructure_hosts',
'infrastructure_virtual_machines',
'infrastructure_resource_pools',
'infrastructure_datastores',
'infrastructure_pxe',
'infrastructure_requests',
'clouds_instances',
'my_settings',
'tasks',
'about',
'dashboard',
'reports',
'chargeback',
'timelines',
'rss',
'services_requ,ests'
'services_workloads'
],
'evmgroup-auditor': [
'control_explorer',
'control_simulation',
'control_log',
'infrastructure_providers',
'infrastructure_clusters',
'infrastructure_hosts',
'infrastructure_virtual_machines',
'infrastructure_resource_pools',
'infrastructure_datastores',
'infrastructure_pxe',
'clouds_instances',
'my_settings',
'tasks',
'about',
'dashboard',
'reports',
'chargeback',
'timelines',
'rss',
'services_workloads',
'utilization',
'planning',
'bottlenecks'
],
'evmgroup-desktop': [
'services_requests',
'services_workloads',
'dashboard',
'infrastructure_config_management',
'infrastructure_requests',
'infrastructure_virtual_machines',
'clouds_instances',
'my_settings',
'about'
],
'evmgroup-operator': [
'services_workloads',
'dashboard',
'reports',
'chargeback',
'timelines',
'rss',
'infrastructure_providers',
'infrastructure_clusters',
'infrastructure_hosts',
'infrastructure_virtual_machines',
'infrastructure_resource_pools',
'infrastructure_datastores',
'infrastructure_pxe',
'clouds_instances',
'my_settings',
'tasks',
'about'
],
'evmgroup-security': [
'control_explorer',
'control_simulation',
'control_log',
'infrastructure_providers',
'infrastructure_clusters',
'infrastructure_hosts',
'infrastructure_virtual_machines',
'infrastructure_resource_pools',
'infrastructure_datastores',
'clouds_instances',
'my_settings',
'tasks',
'about',
'dashboard',
'reports',
'chargeback',
'timelines',
'rss',
'services_workloads'
],
'evmgroup-super_administrator': [
'control_explorer',
'control_simulation',
'control_import_export',
'control_log',
'infrastructure_providers',
'infrastructure_clusters',
'infrastructure_hosts',
'infrastructure_virtual_machines',
'infrastructure_resource_pools',
'infrastructure_datastores',
'infrastructure_pxe',
'infrastructure_requests',
'infrastructure_config_management',
'clouds_providers',
'clouds_availability_zones',
'clouds_flavors',
'clouds_security_groups',
'clouds_instances',
'clouds_tenants',
'clouds_stacks',
'my_settings',
'tasks',
'configuration',
'about',
'dashboard',
'reports',
'chargeback',
'timelines',
'rss',
'automate_explorer',
'automate_simulation',
'automate_customization',
'automate_import_export',
'automate_log',
'automate_requests',
'my_services',
'services_catalogs',
'services_requests',
'services_workloads',
'utilization',
'planning',
'bottlenecks'
],
'evmgroup-support': [
'control_explorer',
'control_simulation',
'control_log',
'infrastructure_providers',
'infrastructure_clusters',
'infrastructure_hosts',
'infrastructure_virtual_machines',
'infrastructure_resource_pools',
'infrastructure_datastores',
'clouds_instances',
'my_settings',
'tasks',
'about',
'dashboard',
'reports',
'chargeback',
'timelines',
'rss',
'services_workloads'
],
'evmgroup-user': [
'services_workloads',
'services_requests',
'dashboard',
'reports',
'chargeback',
'timelines',
'rss',
'infrastructure_providers',
'infrastructure_clusters',
'infrastructure_hosts',
'infrastructure_virtual_machines',
'infrastructure_resource_pools',
'infrastructure_datastores',
'infrastructure_requests',
'clouds_instances',
'my_settings',
'tasks',
'about'
],
'evmgroup-user_limited_self_service': [
'clouds_instances',
'services_requests',
'infrastructure_virtual_machines',
'infrastructure_requests',
'my_settings',
'about'
],
'evmgroup-user_self_service': [
'clouds_instances',
'services_requests',
'infrastructure_config_management',
'infrastructure_virtual_machines',
'infrastructure_requests',
'my_settings',
'about'
],
'evmgroup-vm_user': [
'clouds_instances',
'infrastructure_config_management',
'infrastructure_virtual_machines',
'infrastructure_requests',
'services_requests',
'services_workloads',
'my_settings',
'about'
]
}
# Matches structure/string format of VerticalNavigation output for tree, not UI access control tree
# TODO include non-vertical nav RBAC to settings, help
# TODO RBAC goes deeper than veritcal nav, into accordions. example cloud intel -> Reports
role_access_ui_510z = {
'evmgroup-super_administrator': {
'Cloud Intel': ['Dashboard', 'Reports', 'Chargeback', 'Timelines', 'RSS'],
'Services': ['My Services', 'Catalogs', 'Workloads', 'Requests'],
'Compute': {
'Clouds': [
'Providers',
'Availability Zones',
'Host Aggregates',
'Tenants',
'Flavors',
'Instances',
'Stacks',
'Key Pairs',
'Topology'
],
'Infrastructure': [
'Providers',
'Clusters',
'Hosts',
'Virtual Machines',
'Resource Pools',
'Datastores',
'PXE',
'Networking',
'Topology'
],
'Physical Infrastructure': [
'Overview',
'Providers',
'Chassis',
'Racks',
'Servers',
'Storages',
'Switches',
'Topology'
],
'Containers': [
'Overview',
'Providers',
'Projects',
'Routes',
'Container Services',
'Replicators',
'Pods',
'Containers',
'Container Nodes',
'Volumes',
'Container Builds',
'Image Registries',
'Container Images',
'Container Templates',
'Topology'
],
'Migration': [
'Migration Plans',
'Infrastructure Mappings',
'Migration Settings'
]
},
'Configuration': ['Management'],
'Networks': [
'Providers',
'Networks',
'Subnets',
'Network Routers',
'Security Groups',
'Floating IPs',
'Network Ports',
'Load Balancers',
'Topology'
],
'Storage': {
'Block Storage': [
'Managers',
'Volumes',
'Volume Snapshots',
'Volume Backups',
'Volume Types'
],
'Object Storage': [
'Managers',
'Object Store Containers',
'Object Store Objects'
]
},
'Control': ['Explorer', 'Simulation', 'Import / Export', 'Log'],
'Automation': {
'Ansible': ['Playbooks', 'Repositories', 'Credentials'],
'Ansible Tower': ['Explorer', 'Jobs'],
'Automate': [
'Explorer',
'Simulation',
'Generic Objects',
'Customization',
'Import / Export',
'Log',
'Requests'
]
},
'Optimize': ['Utilization', 'Planning', 'Bottlenecks'],
'Monitor': {
'Alerts': ['Overview', 'All Alerts']
}
},
'evmgroup-administrator': {
'Automation': {
'Ansible': ['Credentials', 'Repositories', 'Playbooks'],
'Ansible Tower': ['Jobs', 'Explorer'],
'Automate': ['Log', 'Simulation', 'Import / Export', 'Customization', 'Explorer']},
'Cloud Intel': ['Timelines', 'RSS', 'Dashboard', 'Reports', 'Chargeback'],
'Compute': {
'Clouds': ['Flavors', 'Instances', 'Providers', 'Host Aggregates', 'Availability Zones',
'Stacks', 'Topology'],
'Containers': ['Container Nodes', 'Containers', 'Providers', 'Overview',
'Image Registries', 'Container Builds', 'Container Services',
'Volumes', 'Container Images', 'Routes', 'Pods', 'Replicators',
'Projects', 'Topology'],
'Infrastructure': ['Datastores', 'Networking', 'Providers', 'Virtual Machines', 'Hosts',
'Clusters', 'Topology', 'PXE', 'Resource Pools'],
'Physical Infrastructure': ['Providers', 'Chassis', 'Racks', 'Switches', 'Servers',
'Storages', 'Topology']},
'Configuration': ['Management'],
'Control': ['Import / Export', 'Log', 'Explorer', 'Simulation'],
'Networks': ['Providers', 'Security Groups', 'Floating IPs', 'Networks'],
'Optimize': ['Bottlenecks', 'Planning', 'Utilization'],
'Services': ['Requests', 'Workloads', 'Catalogs', 'My Services'],
'Storage': {
'Object Storage': ['Object Store Containers', 'Object Store Objects']}
},
'evmgroup-approver': {
'Cloud Intel': ['Timelines', 'RSS', 'Dashboard', 'Reports', 'Chargeback'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Datastores', 'Providers', 'Virtual Machines', 'Hosts', 'Clusters',
'PXE', 'Resource Pools'],
'Physical Infrastructure': ['Providers', 'Chassis', 'Racks', 'Switches', 'Servers',
'Storages', 'Topology']},
'Control': ['Explorer', 'Log', 'Simulation'],
'Services': ['Requests', 'Workloads', 'My Services'],
},
'evmgroup-auditor': {
'Automation': {
'Ansible': ['Credentials', 'Repositories', 'Playbooks'],
'Ansible Tower': ['Explorer']},
'Cloud Intel': ['Timelines', 'RSS', 'Dashboard', 'Reports', 'Chargeback'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Datastores', 'Providers', 'Virtual Machines', 'Hosts', 'Clusters',
'Networking', 'PXE', 'Resource Pools'],
'Physical Infrastructure': ['Providers', 'Chassis', 'Racks', 'Switches', 'Servers',
'Storages', 'Topology']},
'Control': ['Explorer', 'Log', 'Simulation'],
'Optimize': ['Bottlenecks', 'Planning', 'Utilization'],
'Services': ['Workloads', 'My Services']},
'evmgroup-desktop': {
'Automation': {
'Ansible Tower': ['Explorer']},
'Cloud Intel': ['Dashboard'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Virtual Machines'],
'Physical Infrastructure': ['Providers', 'Chassis', 'Racks', 'Switches', 'Servers',
'Storages', 'Topology']},
'Configuration': ['Management'],
'Services': ['Requests', 'Workloads']
},
'evmgroup-operator': {
'Automation': {
'Ansible': ['Credentials', 'Repositories', 'Playbooks'],
'Ansible Tower': ['Explorer']},
'Cloud Intel': ['Timelines', 'RSS', 'Dashboard', 'Reports', 'Chargeback'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Datastores', 'Providers', 'Virtual Machines', 'Hosts', 'Clusters',
'PXE', 'Resource Pools'],
'Physical Infrastructure': ['Providers', 'Chassis', 'Racks', 'Switches', 'Servers',
'Storages', 'Topology']},
'Configuration': ['Management'],
'Services': ['Workloads', 'My Services']
},
'evmgroup-security': {
'Cloud Intel': ['Timelines', 'RSS', 'Dashboard', 'Reports', 'Chargeback'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Datastores', 'Providers', 'Virtual Machines', 'Hosts',
'Clusters', 'Resource Pools'],
'Physical Infrastructure': ['Providers', 'Servers']},
'Control': ['Explorer', 'Log', 'Simulation'],
'Services': ['My Services', 'Workloads']
},
'evmgroup-support': {
'Cloud Intel': ['Timelines', 'RSS', 'Dashboard', 'Reports', 'Chargeback'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Datastores', 'Providers', 'Virtual Machines', 'Hosts', 'Clusters',
'Resource Pools'],
'Physical Infrastructure': ['Providers', 'Chassis', 'Racks', 'Switches', 'Servers',
'Storages', 'Topology']},
'Control': ['Explorer', 'Log', 'Simulation'],
'Services': ['My Services', 'Workloads']
},
'evmgroup-user': {
'Cloud Intel': ['Timelines', 'RSS', 'Dashboard', 'Reports', 'Chargeback'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Datastores', 'Providers', 'Virtual Machines', 'Hosts', 'Clusters',
'Resource Pools'],
'Physical Infrastructure': ['Providers', 'Chassis', 'Racks', 'Switches', 'Servers',
'Storages', 'Topology']},
'Services': ['Requests', 'Workloads', 'My Services']
},
'evmgroup-vm_user': {
'Automation': {
'Ansible': ['Credentials', 'Repositories', 'Playbooks'],
'Ansible Tower': ['Explorer']},
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Virtual Machines']},
'Configuration': ['Management'],
'Services': ['Requests', 'Workloads'],
}
}
role_access_ssui = {
'evmgroup-user_limited_self_service': {
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Virtual Machines']},
'Services': ['Requests', 'Catalogs', 'My Services']
},
'evmgroup-user_self_service': {
'Automation': {
'Ansible': ['Credentials', 'Repositories', 'Playbooks'],
'Ansible Tower': ['Explorer']},
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Virtual Machines'],
'Physical Infrastructure': ['Providers']},
'Configuration': ['Management'],
'Services': ['Requests', 'Catalogs', 'My Services']
},
}
role_access_ui_511z = {
'evmgroup-super_administrator': {
'Overview': [
'Dashboard',
'Reports',
'Utilization',
'Chargeback',
'Optimization'
],
'Services': ['My Services', 'Catalogs', 'Workloads', 'Requests'],
'Compute': {
'Clouds': [
'Providers',
'Availability Zones',
'Host Aggregates',
'Tenants',
'Flavors',
'Instances',
'Stacks',
'Key Pairs',
'Topology'
],
'Infrastructure': [
'Providers',
'Clusters',
'Hosts',
'Virtual Machines',
'Resource Pools',
'Datastores',
'PXE',
'Firmware Registry',
'Networking',
'Topology'
],
'Physical Infrastructure': [
'Overview',
'Providers',
'Chassis',
'Racks',
'Servers',
'Storages',
'Switches',
'Topology'
],
'Containers': [
'Overview',
'Providers',
'Projects',
'Routes',
'Container Services',
'Replicators',
'Pods',
'Containers',
'Container Nodes',
'Volumes',
'Container Builds',
'Image Registries',
'Container Images',
'Container Templates',
'Topology'
]
},
'Migration': [
'Migration Plans',
'Infrastructure Mappings',
'Migration Settings'
],
'Configuration': ['Management'],
'Networks': [
'Providers',
'Networks',
'Subnets',
'Network Routers',
'Security Groups',
'Floating IPs',
'Network Ports',
'Topology'
],
'Storage': {
'Block Storage': [
'Managers',
'Volumes',
'Volume Snapshots',
'Volume Backups',
'Volume Types'
],
'Object Storage': [
'Managers',
'Object Store Containers',
'Object Store Objects'
]
},
'Control': ['Explorer', 'Simulation', 'Import / Export', 'Log'],
'Automation': {
'Ansible': ['Playbooks', 'Repositories', 'Credentials'],
'Ansible Tower': ['Explorer', 'Jobs'],
'Automate': [
'Explorer',
'Simulation',
'Generic Objects',
'Customization',
'Import / Export',
'Log',
'Requests'
]
},
'Monitor': {
'Alerts': ['Overview', 'All Alerts']
},
},
'evmgroup-administrator': {
'Overview': ['Dashboard', 'Reports', 'Utilization', 'Chargeback'],
'Services': ['My Services', 'Catalogs', 'Workloads', 'Requests'],
'Compute': {
'Clouds': [
'Providers',
'Availability Zones',
'Host Aggregates',
'Flavors',
'Instances',
'Stacks',
'Topology'
],
'Infrastructure': [
'Providers',
'Clusters',
'Hosts',
'Virtual Machines',
'Resource Pools',
'Datastores',
'PXE',
'Networking',
'Topology'
],
'Physical Infrastructure': [
'Providers',
'Chassis',
'Racks',
'Servers',
'Storages',
'Switches',
'Topology'
],
'Containers': [
'Overview',
'Providers',
'Projects',
'Routes',
'Container Services',
'Replicators',
'Pods',
'Containers',
'Container Nodes',
'Volumes',
'Container Builds',
'Image Registries',
'Container Images',
'Topology'
]
},
'Configuration': ['Management'],
'Networks': ['Providers', 'Networks', 'Security Groups', 'Floating IPs'],
'Storage': {
'Object Storage': [
'Object Store Containers',
'Object Store Objects'
]
},
'Control': ['Explorer', 'Simulation', 'Import / Export', 'Log'],
'Automation': {
'Ansible': ['Playbooks', 'Repositories', 'Credentials'],
'Ansible Tower': ['Explorer', 'Jobs'],
'Automate': [
'Explorer',
'Simulation',
'Customization',
'Import / Export',
'Log'
]
}
},
'evmgroup-approver': {
'Overview': ['Dashboard', 'Reports', 'Chargeback'],
'Services': ['My Services', 'Workloads', 'Requests'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': [
'Providers',
'Clusters',
'Hosts',
'Virtual Machines',
'Resource Pools',
'Datastores',
'PXE'
],
'Physical Infrastructure': [
'Providers',
'Chassis',
'Racks',
'Servers',
'Storages',
'Switches',
'Topology'
]
},
'Control': ['Explorer', 'Simulation', 'Log']
},
'evmgroup-auditor': {
'Overview': ['Dashboard', 'Reports', 'Utilization', 'Chargeback'],
'Services': ['My Services', 'Workloads'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': [
'Providers',
'Clusters',
'Hosts',
'Virtual Machines',
'Resource Pools',
'Datastores',
'PXE',
'Networking'
],
'Physical Infrastructure': [
'Providers',
'Chassis',
'Racks',
'Servers',
'Storages',
'Switches',
'Topology'
]
},
'Control': ['Explorer', 'Simulation', 'Log'],
'Automation': {
'Ansible': ['Playbooks', 'Repositories', 'Credentials'],
'Ansible Tower': ['Explorer']
}
},
'evmgroup-desktop': {
'Overview': ['Dashboard'],
'Services': ['Workloads', 'Requests'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Virtual Machines'],
'Physical Infrastructure': [
'Providers',
'Chassis',
'Racks',
'Servers',
'Storages',
'Switches',
'Topology'
]
},
'Configuration': ['Management'],
'Automation': {
'Ansible Tower': ['Explorer']
}
},
'evmgroup-operator': {
'Overview': ['Dashboard', 'Reports', 'Chargeback'],
'Services': ['My Services', 'Workloads'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': [
'Providers',
'Clusters',
'Hosts',
'Virtual Machines',
'Resource Pools',
'Datastores',
'PXE'
],
'Physical Infrastructure': [
'Providers',
'Chassis',
'Racks',
'Servers',
'Storages',
'Switches',
'Topology'
]
},
'Configuration': ['Management'],
'Automation': {
'Ansible': ['Playbooks', 'Repositories', 'Credentials'],
'Ansible Tower': ['Explorer']
}
},
'evmgroup-security': {
'Overview': ['Dashboard', 'Reports', 'Chargeback'],
'Services': ['My Services', 'Workloads'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': [
'Providers',
'Clusters',
'Hosts',
'Virtual Machines',
'Resource Pools',
'Datastores'
],
'Physical Infrastructure': ['Providers', 'Servers']
},
'Control': ['Explorer', 'Simulation', 'Log']
},
'evmgroup-support': {
'Overview': ['Dashboard', 'Reports', 'Chargeback'],
'Services': ['My Services', 'Workloads'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': [
'Providers',
'Clusters',
'Hosts',
'Virtual Machines',
'Resource Pools',
'Datastores'
],
'Physical Infrastructure': [
'Providers',
'Chassis',
'Racks',
'Servers',
'Storages',
'Switches',
'Topology'
]
},
'Control': ['Explorer', 'Simulation', 'Log']
},
'evmgroup-user': {
'Overview': ['Dashboard', 'Reports', 'Chargeback'],
'Services': ['My Services', 'Workloads', 'Requests'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': [
'Providers',
'Clusters',
'Hosts',
'Virtual Machines',
'Resource Pools',
'Datastores'
],
'Physical Infrastructure': [
'Providers',
'Chassis',
'Racks',
'Servers',
'Storages',
'Switches',
'Topology'
]
}
},
'evmgroup-vm_user': {
'Services': ['Workloads', 'Requests'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Virtual Machines']
},
'Configuration': ['Management'],
'Automation': {
'Ansible': ['Playbooks', 'Repositories', 'Credentials'],
'Ansible Tower': ['Explorer']
}
}
}
| gpl-2.0 | -6,055,738,492,595,354,000 | 31.32553 | 100 | 0.453373 | false |
HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/Examples/Catalyst/PythonDolfinExample/simulation-catalyst-step6.py | 1 | 8271 | """This demo program solves the incompressible Navier-Stokes equations
on an L-shaped domain using Chorin's splitting method."""
# Copyright (C) 2010-2011 Anders Logg
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# Modified by Mikael Mortensen 2011
#
# First added: 2010-08-30
# Last changed: 2011-06-30
#
# SC14 Paraview's Catalyst tutorial
#
# Step 6 : Add field data arrays to VTK grid
#
# [SC14-Catalyst] we need a python environment that enables import of both Dolfin and ParaView
execfile("simulation-env.py")
# [SC14-Catalyst] import paraview, vtk and paraview's simple API
import sys
import paraview
import paraview.vtk as vtk
import paraview.simple as pvsimple
# [SC14-Catalyst] check for command line arguments
if len(sys.argv) != 3:
print "command is 'python",sys.argv[0],"<script name> <number of time steps>'"
sys.exit(1)
# [SC14-Catalyst] initialize and read input parameters
paraview.options.batch = True
paraview.options.symmetric = True
# [SC14-Catalyst] import user co-processing script
import vtkPVCatalystPython
import os
scriptpath, scriptname = os.path.split(sys.argv[1])
sys.path.append(scriptpath)
if scriptname.endswith(".py"):
print 'script name is ', scriptname
scriptname = scriptname[0:len(scriptname)-3]
try:
cpscript = __import__(scriptname)
except:
print sys.exc_info()
print 'Cannot find ', scriptname, ' -- no coprocessing will be performed.'
sys.exit(1)
# [SC14-Catalyst] Co-Processing routine to be called at the end of each simulation time step
def coProcess(grid, time, step):
# initialize data description
datadescription = vtkPVCatalystPython.vtkCPDataDescription()
datadescription.SetTimeData(time, step)
datadescription.AddInput("input")
cpscript.RequestDataDescription(datadescription)
inputdescription = datadescription.GetInputDescriptionByName("input")
if inputdescription.GetIfGridIsNecessary() == False:
return
if grid != None:
# attach VTK data set to pipeline input
inputdescription.SetGrid(grid)
# execute catalyst processing
cpscript.DoCoProcessing(datadescription)
# [SC14-Catalyst] convert dolfin mesh to a VTK unstructured grid
def Mesh2VTKUGrid(mesh):
vtkcelltypes=((),(vtk.VTK_EMPTY_CELL,vtk.VTK_VERTEX,vtk.VTK_LINE),(vtk.VTK_EMPTY_CELL,vtk.VTK_VERTEX,vtk.VTK_LINE,vtk.VTK_TRIANGLE,vtk.VTK_QUAD,vtk.VTK_POLYGON,vtk.VTK_POLYGON),(vtk.VTK_EMPTY_CELL,vtk.VTK_VERTEX,vtk.VTK_LINE,vtk.VTK_TRIANGLE,vtk.VTK_TETRA,vtk.VTK_CONVEX_POINT_SET,vtk.VTK_CONVEX_POINT_SET,vtk.VTK_CONVEX_POINT_SET,vtk.VTK_HEXAHEDRON))
npoints=mesh.num_vertices()
geom=mesh.geometry()
pts=vtk.vtkPoints()
pts.SetNumberOfPoints(npoints)
for i in xrange(npoints):
p=geom.point(i)
pts.SetPoint(i,p.x(),p.y(),p.z())
dim = mesh.topology().dim()
ncells=mesh.num_cells()
cells=vtk.vtkCellArray()
cellTypes=vtk.vtkUnsignedCharArray()
cellTypes.SetNumberOfTuples(ncells)
cellLocations=vtk.vtkIdTypeArray()
cellLocations.SetNumberOfTuples(ncells)
loc=0
for (cell,i) in zip(mesh.cells(),xrange(ncells)) :
ncellpoints=len(cell)
cells.InsertNextCell(ncellpoints)
for cpoint in cell:
cells.InsertCellPoint(cpoint)
cellTypes.SetTuple1(i,vtkcelltypes[dim][ncellpoints])
cellLocations.SetTuple1(i,loc)
loc+=1+ncellpoints
ugrid = vtk.vtkUnstructuredGrid()
ugrid.SetPoints(pts)
ugrid.SetCells(cellTypes,cellLocations,cells)
return ugrid
# [SC14-Catalyst] convert a flattened sequence of values to VTK double array
def Values2VTKArray(values,n,name):
ncomps=len(values)/n
array=vtk.vtkDoubleArray()
array.SetNumberOfComponents(ncomps)
array.SetNumberOfTuples(n)
for i in range(n):
a = []
for j in range(ncomps):
a.append(values[i+j*n])
array.SetTupleValue(i, a)
array.SetName(name)
return array
def AddFieldData(ugrid, pointArrays, cellArrays ):
# add Point data fields
npoints = ugrid.GetNumberOfPoints()
for (name,values) in pointArrays:
ugrid.GetPointData().AddArray( Values2VTKArray(values,npoints,name) )
# add Cell data fields
ncells = ugrid.GetNumberOfCells()
for (name,values) in cellArrays:
ugrid.GetCellData().AddArray( Values2VTKArray(values,ncells,name) )
# Begin demo
from dolfin import *
# Print log messages only from the root process in parallel
parameters["std_out_all_processes"] = False;
# Load mesh from file
mesh = Mesh(DOLFIN_EXAMPLE_DATA_DIR+"/lshape.xml.gz")
# Define function spaces (P2-P1)
V = VectorFunctionSpace(mesh, "Lagrange", 2)
Q = FunctionSpace(mesh, "Lagrange", 1)
# Define trial and test functions
u = TrialFunction(V)
p = TrialFunction(Q)
v = TestFunction(V)
q = TestFunction(Q)
# Set parameter values
dt = 0.01
T = 3
nu = 0.01
# Define time-dependent pressure boundary condition
p_in = Expression("sin(3.0*t)", t=0.0)
# Define boundary conditions
noslip = DirichletBC(V, (0, 0),
"on_boundary && \
(x[0] < DOLFIN_EPS | x[1] < DOLFIN_EPS | \
(x[0] > 0.5 - DOLFIN_EPS && x[1] > 0.5 - DOLFIN_EPS))")
inflow = DirichletBC(Q, p_in, "x[1] > 1.0 - DOLFIN_EPS")
outflow = DirichletBC(Q, 0, "x[0] > 1.0 - DOLFIN_EPS")
bcu = [noslip]
bcp = [inflow, outflow]
# Create functions
u0 = Function(V)
u1 = Function(V)
p1 = Function(Q)
# Define coefficients
k = Constant(dt)
f = Constant((0, 0))
# Tentative velocity step
F1 = (1/k)*inner(u - u0, v)*dx + inner(grad(u0)*u0, v)*dx + \
nu*inner(grad(u), grad(v))*dx - inner(f, v)*dx
a1 = lhs(F1)
L1 = rhs(F1)
# Pressure update
a2 = inner(grad(p), grad(q))*dx
L2 = -(1/k)*div(u1)*q*dx
# Velocity update
a3 = inner(u, v)*dx
L3 = inner(u1, v)*dx - k*inner(grad(p1), v)*dx
# Assemble matrices
A1 = assemble(a1)
A2 = assemble(a2)
A3 = assemble(a3)
# Use amg preconditioner if available
prec = "amg" if has_krylov_solver_preconditioner("amg") else "default"
# Create files for storing solution
ufile = File("results/velocity.pvd")
pfile = File("results/pressure.pvd")
# Time-stepping
maxtimestep = int(sys.argv[2])
tstep = 0
t = dt
while tstep < maxtimestep:
# Update pressure boundary condition
p_in.t = t
# Compute tentative velocity step
begin("Computing tentative velocity")
b1 = assemble(L1)
[bc.apply(A1, b1) for bc in bcu]
solve(A1, u1.vector(), b1, "gmres", "default")
end()
# Pressure correction
begin("Computing pressure correction")
b2 = assemble(L2)
[bc.apply(A2, b2) for bc in bcp]
solve(A2, p1.vector(), b2, "gmres", prec)
end()
# Velocity correction
begin("Computing velocity correction")
b3 = assemble(L3)
[bc.apply(A3, b3) for bc in bcu]
solve(A3, u1.vector(), b3, "gmres", "default")
end()
# Plot solution [SC14-Catalyst] Not anymore
# plot(p1, title="Pressure", rescale=True)
# plot(u1, title="Velocity", rescale=True)
# Save to file [SC14-Catalyst] Not anymore
# ufile << u1
# pfile << p1
# [SC14-Catalyst] convert solution to VTK grid
ugrid = Mesh2VTKUGrid( u1.function_space().mesh() )
# [SC14-Catalyst] add field data to the VTK grid
velocity = u1.compute_vertex_values()
pressure = p1.compute_vertex_values()
AddFieldData( ugrid, [ ("Velocity",velocity) , ("Pressure",pressure) ] , [] )
# [SC14-Catalyst] trigger catalyst execution
coProcess(ugrid,t,tstep)
# Move to next time step
u0.assign(u1)
t += dt
tstep += 1
print "t =", t, "step =",tstep
# Hold plot [SC14-Catalyst] Not anymore
# interactive()
| gpl-3.0 | -5,108,514,945,316,003,000 | 29.977528 | 355 | 0.685407 | false |
progdupeupl/pdp_website | pdp/forum/migrations/0001_initial.py | 1 | 5896 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import datetime
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('title', models.CharField(max_length=80, verbose_name='Titre')),
('position', models.IntegerField(verbose_name='Position', null=True, blank=True)),
('slug', models.SlugField(max_length=80)),
],
options={
'verbose_name_plural': 'Catégories',
'verbose_name': 'Catégorie',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Forum',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('title', models.CharField(max_length=80, verbose_name='Titre')),
('subtitle', models.CharField(max_length=200, verbose_name='Sous-titre', blank=True)),
('position_in_category', models.IntegerField(verbose_name='Position dans la catégorie', null=True, blank=True)),
('slug', models.SlugField(max_length=80)),
('category', models.ForeignKey(to='forum.Category', verbose_name='Catégorie')),
],
options={
'verbose_name_plural': 'Forums',
'verbose_name': 'Forum',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('text', models.TextField(verbose_name='Texte')),
('pubdate', models.DateTimeField(auto_now_add=True, verbose_name='Date de publication')),
('update', models.DateTimeField(verbose_name="Date d'édition", null=True, blank=True)),
('position_in_topic', models.IntegerField(verbose_name='Position dans le sujet')),
('is_useful', models.BooleanField(default=False, verbose_name='Est utile')),
('is_moderated', models.BooleanField(default=False, verbose_name='Est modéré')),
('moderation_time', models.DateTimeField(default=datetime.datetime(2014, 11, 26, 20, 15, 36, 701382), verbose_name="Date d'édition")),
('moderation_text', models.TextField(default='', verbose_name='Explication de modération', blank=True)),
('author', models.ForeignKey(to=settings.AUTH_USER_MODEL, verbose_name='Auteur', related_name='posts')),
('moderated_by', models.ForeignKey(to=settings.AUTH_USER_MODEL, verbose_name='Modérateur', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('title', models.CharField(max_length=80, verbose_name='Titre')),
('subtitle', models.CharField(max_length=200, verbose_name='Sous-titre', blank=True)),
('pubdate', models.DateTimeField(auto_now_add=True, verbose_name='Date de création')),
('is_solved', models.BooleanField(default=False, verbose_name='Est résolu')),
('is_locked', models.BooleanField(default=False, verbose_name='Est verrouillé')),
('is_sticky', models.BooleanField(default=False, verbose_name='Est en post-it')),
('author', models.ForeignKey(to=settings.AUTH_USER_MODEL, verbose_name='Auteur', related_name='topics')),
('forum', models.ForeignKey(to='forum.Forum', verbose_name='Forum')),
('last_message', models.ForeignKey(to='forum.Post', verbose_name='Dernier message', related_name='last_message', null=True)),
],
options={
'verbose_name_plural': 'Sujets',
'verbose_name': 'Sujet',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TopicFollowed',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('topic', models.ForeignKey(to='forum.Topic')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='topics_followed')),
],
options={
'verbose_name_plural': 'Sujets suivis',
'verbose_name': 'Sujet suivi',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TopicRead',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('post', models.ForeignKey(to='forum.Post')),
('topic', models.ForeignKey(to='forum.Topic')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='topics_read')),
],
options={
'verbose_name_plural': 'Sujets lus',
'verbose_name': 'Sujet lu',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='post',
name='topic',
field=models.ForeignKey(to='forum.Topic', verbose_name='Sujet'),
preserve_default=True,
),
]
| agpl-3.0 | 5,790,191,699,543,362,000 | 48.855932 | 150 | 0.556009 | false |
racmariano/skidom | backend/resorts/models/conditions.py | 1 | 2431 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .resort import Resort
from django.db import models
from django.contrib.postgres.fields import ArrayField
from dynamic_scraper.models import Scraper, SchedulerRuntime
from scrapy_djangoitem import DjangoItem
import datetime
# Past and forecasted conditions for a resort
class Conditions(models.Model):
# Hard-coded attributes needed for scraping
resort = models.ForeignKey(Resort, null = True, default=6)
conditions_page_url = models.URLField(blank = True)
checker_runtime = models.ForeignKey(SchedulerRuntime, blank = True, null = True, on_delete = models.SET_NULL)
# Attributes collected during scraping
date = models.DateField(default = datetime.date.today)
base_temp = models.DecimalField(max_digits = 6, decimal_places = 2, default = 0)
summit_temp = models.DecimalField(max_digits = 6, decimal_places = 2, default = 0)
wind_speed = models.DecimalField(max_digits = 6, decimal_places = 2, default = 0)
base_depth = models.DecimalField(max_digits = 6, decimal_places = 2, default = 0)
num_trails_open = models.IntegerField(default = 0)
new_snow_24_hr = models.IntegerField(default = 0)
#past_n_day_snowfall = ArrayField(models.DecimalField(max_digits = 6, decimal_places = 2, default = 0), size = 15)
#past_n_day_wind_speed = ArrayField(models.DecimalField(max_digits = 6, decimal_places = 2, default = 0), size = 15)
#future_n_day_snowfall = ArrayField(models.DecimalField(max_digits = 6, decimal_places = 2, default = 0), size = 15)
#future_n_day_wind_speed = ArrayField(models.DecimalField(max_digits = 6, decimal_places = 2, default = 0), size = 15)
# For database querying
unique_id = models.CharField(default='', max_length = 200)
def __init__(self, *args, **kwargs):
super(Conditions, self).__init__(*args, **kwargs)
if not self.id:
day = datetime.date.today
self.conditions_page_url = self.resort.conditions_page_url
self.unique_id = self.resort.name+str(datetime.date.today())
def __unicode__(self):
return self.resort.name+": "+str(self.date)
def __str__(self):
return self.resort.name+": "+str(self.date)
class Meta:
verbose_name_plural = "Conditions"
class ConditionsItem(DjangoItem):
django_model = Conditions
| mit | -8,431,988,466,548,880,000 | 42.2 | 122 | 0.673797 | false |
kubernetes-client/python | kubernetes/client/models/v1alpha1_pod_preset_spec.py | 1 | 7114 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.18
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1alpha1PodPresetSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'env': 'list[V1EnvVar]',
'env_from': 'list[V1EnvFromSource]',
'selector': 'V1LabelSelector',
'volume_mounts': 'list[V1VolumeMount]',
'volumes': 'list[V1Volume]'
}
attribute_map = {
'env': 'env',
'env_from': 'envFrom',
'selector': 'selector',
'volume_mounts': 'volumeMounts',
'volumes': 'volumes'
}
def __init__(self, env=None, env_from=None, selector=None, volume_mounts=None, volumes=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1PodPresetSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._env = None
self._env_from = None
self._selector = None
self._volume_mounts = None
self._volumes = None
self.discriminator = None
if env is not None:
self.env = env
if env_from is not None:
self.env_from = env_from
if selector is not None:
self.selector = selector
if volume_mounts is not None:
self.volume_mounts = volume_mounts
if volumes is not None:
self.volumes = volumes
@property
def env(self):
"""Gets the env of this V1alpha1PodPresetSpec. # noqa: E501
Env defines the collection of EnvVar to inject into containers. # noqa: E501
:return: The env of this V1alpha1PodPresetSpec. # noqa: E501
:rtype: list[V1EnvVar]
"""
return self._env
@env.setter
def env(self, env):
"""Sets the env of this V1alpha1PodPresetSpec.
Env defines the collection of EnvVar to inject into containers. # noqa: E501
:param env: The env of this V1alpha1PodPresetSpec. # noqa: E501
:type: list[V1EnvVar]
"""
self._env = env
@property
def env_from(self):
"""Gets the env_from of this V1alpha1PodPresetSpec. # noqa: E501
EnvFrom defines the collection of EnvFromSource to inject into containers. # noqa: E501
:return: The env_from of this V1alpha1PodPresetSpec. # noqa: E501
:rtype: list[V1EnvFromSource]
"""
return self._env_from
@env_from.setter
def env_from(self, env_from):
"""Sets the env_from of this V1alpha1PodPresetSpec.
EnvFrom defines the collection of EnvFromSource to inject into containers. # noqa: E501
:param env_from: The env_from of this V1alpha1PodPresetSpec. # noqa: E501
:type: list[V1EnvFromSource]
"""
self._env_from = env_from
@property
def selector(self):
"""Gets the selector of this V1alpha1PodPresetSpec. # noqa: E501
:return: The selector of this V1alpha1PodPresetSpec. # noqa: E501
:rtype: V1LabelSelector
"""
return self._selector
@selector.setter
def selector(self, selector):
"""Sets the selector of this V1alpha1PodPresetSpec.
:param selector: The selector of this V1alpha1PodPresetSpec. # noqa: E501
:type: V1LabelSelector
"""
self._selector = selector
@property
def volume_mounts(self):
"""Gets the volume_mounts of this V1alpha1PodPresetSpec. # noqa: E501
VolumeMounts defines the collection of VolumeMount to inject into containers. # noqa: E501
:return: The volume_mounts of this V1alpha1PodPresetSpec. # noqa: E501
:rtype: list[V1VolumeMount]
"""
return self._volume_mounts
@volume_mounts.setter
def volume_mounts(self, volume_mounts):
"""Sets the volume_mounts of this V1alpha1PodPresetSpec.
VolumeMounts defines the collection of VolumeMount to inject into containers. # noqa: E501
:param volume_mounts: The volume_mounts of this V1alpha1PodPresetSpec. # noqa: E501
:type: list[V1VolumeMount]
"""
self._volume_mounts = volume_mounts
@property
def volumes(self):
"""Gets the volumes of this V1alpha1PodPresetSpec. # noqa: E501
Volumes defines the collection of Volume to inject into the pod. # noqa: E501
:return: The volumes of this V1alpha1PodPresetSpec. # noqa: E501
:rtype: list[V1Volume]
"""
return self._volumes
@volumes.setter
def volumes(self, volumes):
"""Sets the volumes of this V1alpha1PodPresetSpec.
Volumes defines the collection of Volume to inject into the pod. # noqa: E501
:param volumes: The volumes of this V1alpha1PodPresetSpec. # noqa: E501
:type: list[V1Volume]
"""
self._volumes = volumes
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1PodPresetSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1PodPresetSpec):
return True
return self.to_dict() != other.to_dict()
| apache-2.0 | 2,289,788,507,147,505,700 | 29.663793 | 142 | 0.598538 | false |
justinmeister/spaceinvaders-spyral | game/level.py | 1 | 1501 | import os
import spyral
from .sprites import sprite
from . import collision
WIDTH = 1200
HEIGHT = 900
WHITE = (255, 255, 255)
SIZE = (WIDTH, HEIGHT)
GREEN = (60, 179, 113)
RED = (255, 0, 0)
BLACKBLUE = (19, 15, 48)
BG_COLOR = BLACKBLUE
ENEMYGAP = 30
XMARGIN = 175
YMARGIN = 100
MOVEX = 15
MOVEY = 20
ENEMYSIDE = 50
BACKGROUND = os.path.join("game", "graphics", "spacebackground.png")
class Level1(spyral.Scene):
def __init__(self):
spyral.Scene.__init__(self, SIZE)
self.space = spyral.Image(filename=BACKGROUND)
self.background = self.space.scale((1200, 900))
self.collision_handler = collision.CollisionHandler(self)
self.player = sprite.Player(self, 'left', self.collision_handler)
self.alien_list = self.make_aliens(6, 3)
self.collision_handler.add_player(self.player)
self.collision_handler.add_aliens(self.alien_list)
spyral.event.register("system.quit", spyral.director.pop)
spyral.event.register("director.update", self.update)
spyral.event.register("input.keyboard.down.q", spyral.director.pop)
def update(self, delta):
pass
def make_aliens(self, columns, rows):
"""
Make aliens and send them to collision handler.
"""
alien_list = []
for column in range(columns):
for row in range(rows):
alien = sprite.Alien(self, row, column)
alien_list.append(alien)
return alien_list
| mit | -697,301,725,593,934,000 | 25.803571 | 75 | 0.632245 | false |
nakamura-akifumi/kassis_orange | app_search/helpers/paginate_helper.py | 1 | 1579 | import math
class Paginate:
def __init__(self, pagetab_count = 5, per_page = 10):
pass
self.pagetab_count = pagetab_count
self.per_page = per_page
def paginate(self, result_count, current_page):
paginate_list = []
pagetab_count = self.pagetab_count
per_page = self.per_page
max_page = math.floor((result_count) / per_page)
if max_page <= pagetab_count:
sp = current_page
ep = sp + pagetab_count
elif current_page > 3 and max_page - 2 > current_page:
sp = current_page - 2
ep = sp + pagetab_count
elif current_page <= 3 and max_page > current_page + pagetab_count:
sp = 1
ep = sp + pagetab_count
else:
sp = max_page - pagetab_count + 1
ep = max_page + 1
for p in range(sp, ep):
x = {"key": str(p), "display_name": str(p), "current": "0"}
if p == current_page:
x.update({"current": "1"})
paginate_list.append(x)
paginate = {}
paginate.update({"list": paginate_list})
if current_page != 1:
paginate.update({"first": {"key": "1"}})
if current_page != max_page:
paginate.update({"last": {"key": str(max_page)}})
if current_page - 1 > 1:
paginate.update({"previous": {"key": str(current_page - 1)}})
if current_page + 1 <= max_page:
paginate.update({"next": {"key": str(current_page + 1)}})
return {"paginate": paginate}
| mit | 1,459,151,800,332,375,000 | 32.595745 | 75 | 0.513616 | false |
willemt/docopt2ragel | setup.py | 1 | 1202 | from setuptools import setup, find_packages
import codecs
from os import path
here = path.abspath(path.dirname(__file__))
def long_description():
with codecs.open('README.rst', encoding='utf8') as f:
return f.read()
setup(
name='docopt2ragel',
version='0.1.3',
description='Convert your docopt usage text into a Ragel FSM',
long_description=long_description(),
# The project's main homepage.
url='https://github.com/willemt/docopt2ragel',
author='willemt',
author_email='[email protected]',
license='BSD',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: System :: Logging',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
keywords='development',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=['docopt'],
include_package_data=True,
package_data={
'': ['template.rl']
},
entry_points={
'console_scripts': [
'docopt2ragel = docopt2ragel.__main__:main',
],
},
)
| bsd-3-clause | -9,220,080,780,478,510,000 | 26.318182 | 66 | 0.608153 | false |
mm1ke/portage | pym/_emerge/actions.py | 1 | 108119 | # Copyright 1999-2016 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import division, print_function, unicode_literals
import errno
import logging
import operator
import platform
import pwd
import random
import re
import signal
import socket
import stat
import subprocess
import sys
import tempfile
import textwrap
import time
import warnings
from itertools import chain
import portage
portage.proxy.lazyimport.lazyimport(globals(),
'portage.dbapi._similar_name_search:similar_name_search',
'portage.debug',
'portage.news:count_unread_news,display_news_notifications',
'portage.util._get_vm_info:get_vm_info',
'portage.util.locale:check_locale',
'portage.emaint.modules.sync.sync:SyncRepos',
'_emerge.chk_updated_cfg_files:chk_updated_cfg_files',
'_emerge.help:help@emerge_help',
'_emerge.post_emerge:display_news_notification,post_emerge',
'_emerge.stdout_spinner:stdout_spinner',
)
from portage import os
from portage import shutil
from portage import eapi_is_supported, _encodings, _unicode_decode
from portage.cache.cache_errors import CacheError
from portage.const import GLOBAL_CONFIG_PATH, VCS_DIRS, _DEPCLEAN_LIB_CHECK_DEFAULT
from portage.const import SUPPORTED_BINPKG_FORMATS, TIMESTAMP_FORMAT
from portage.dbapi.dep_expand import dep_expand
from portage.dbapi._expand_new_virt import expand_new_virt
from portage.dbapi.IndexedPortdb import IndexedPortdb
from portage.dbapi.IndexedVardb import IndexedVardb
from portage.dep import Atom, _repo_separator, _slot_separator
from portage.eclass_cache import hashed_path
from portage.exception import InvalidAtom, InvalidData, ParseError
from portage.output import blue, colorize, create_color_func, darkgreen, \
red, xtermTitle, xtermTitleReset, yellow
good = create_color_func("GOOD")
bad = create_color_func("BAD")
warn = create_color_func("WARN")
from portage.package.ebuild._ipc.QueryCommand import QueryCommand
from portage.package.ebuild.doebuild import _check_temp_dir
from portage._sets import load_default_config, SETPREFIX
from portage._sets.base import InternalPackageSet
from portage.util import cmp_sort_key, writemsg, varexpand, \
writemsg_level, writemsg_stdout
from portage.util.digraph import digraph
from portage.util.SlotObject import SlotObject
from portage.util._async.run_main_scheduler import run_main_scheduler
from portage.util._async.SchedulerInterface import SchedulerInterface
from portage.util._eventloop.global_event_loop import global_event_loop
from portage._global_updates import _global_updates
from portage.sync.old_tree_timestamp import old_tree_timestamp_warn
from portage.localization import _
from portage.metadata import action_metadata
from portage.emaint.main import print_results
from _emerge.clear_caches import clear_caches
from _emerge.countdown import countdown
from _emerge.create_depgraph_params import create_depgraph_params
from _emerge.Dependency import Dependency
from _emerge.depgraph import backtrack_depgraph, depgraph, resume_depgraph
from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
from _emerge.emergelog import emergelog
from _emerge.is_valid_package_atom import is_valid_package_atom
from _emerge.MetadataRegen import MetadataRegen
from _emerge.Package import Package
from _emerge.ProgressHandler import ProgressHandler
from _emerge.RootConfig import RootConfig
from _emerge.Scheduler import Scheduler
from _emerge.search import search
from _emerge.SetArg import SetArg
from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
from _emerge.unmerge import unmerge
from _emerge.UnmergeDepPriority import UnmergeDepPriority
from _emerge.UseFlagDisplay import pkg_use_display
from _emerge.UserQuery import UserQuery
if sys.hexversion >= 0x3000000:
long = int
_unicode = str
else:
_unicode = unicode
def action_build(emerge_config, trees=DeprecationWarning,
mtimedb=DeprecationWarning, myopts=DeprecationWarning,
myaction=DeprecationWarning, myfiles=DeprecationWarning, spinner=None):
if not isinstance(emerge_config, _emerge_config):
warnings.warn("_emerge.actions.action_build() now expects "
"an _emerge_config instance as the first parameter",
DeprecationWarning, stacklevel=2)
emerge_config = load_emerge_config(
action=myaction, args=myfiles, trees=trees, opts=myopts)
adjust_configs(emerge_config.opts, emerge_config.trees)
settings, trees, mtimedb = emerge_config
myopts = emerge_config.opts
myaction = emerge_config.action
myfiles = emerge_config.args
if '--usepkgonly' not in myopts:
old_tree_timestamp_warn(settings['PORTDIR'], settings)
# It's best for config updates in /etc/portage to be processed
# before we get here, so warn if they're not (bug #267103).
chk_updated_cfg_files(settings['EROOT'], ['/etc/portage'])
# validate the state of the resume data
# so that we can make assumptions later.
for k in ("resume", "resume_backup"):
if k not in mtimedb:
continue
resume_data = mtimedb[k]
if not isinstance(resume_data, dict):
del mtimedb[k]
continue
mergelist = resume_data.get("mergelist")
if not isinstance(mergelist, list):
del mtimedb[k]
continue
for x in mergelist:
if not (isinstance(x, list) and len(x) == 4):
continue
pkg_type, pkg_root, pkg_key, pkg_action = x
if pkg_root not in trees:
# Current $ROOT setting differs,
# so the list must be stale.
mergelist = None
break
if not mergelist:
del mtimedb[k]
continue
resume_opts = resume_data.get("myopts")
if not isinstance(resume_opts, (dict, list)):
del mtimedb[k]
continue
favorites = resume_data.get("favorites")
if not isinstance(favorites, list):
del mtimedb[k]
continue
resume = False
if "--resume" in myopts and \
("resume" in mtimedb or
"resume_backup" in mtimedb):
resume = True
if "resume" not in mtimedb:
mtimedb["resume"] = mtimedb["resume_backup"]
del mtimedb["resume_backup"]
mtimedb.commit()
# "myopts" is a list for backward compatibility.
resume_opts = mtimedb["resume"].get("myopts", [])
if isinstance(resume_opts, list):
resume_opts = dict((k,True) for k in resume_opts)
for opt in ("--ask", "--color", "--skipfirst", "--tree"):
resume_opts.pop(opt, None)
# Current options always override resume_opts.
resume_opts.update(myopts)
myopts.clear()
myopts.update(resume_opts)
if "--debug" in myopts:
writemsg_level("myopts %s\n" % (myopts,))
# Adjust config according to options of the command being resumed.
for myroot in trees:
mysettings = trees[myroot]["vartree"].settings
mysettings.unlock()
adjust_config(myopts, mysettings)
mysettings.lock()
del myroot, mysettings
ldpath_mtimes = mtimedb["ldpath"]
favorites=[]
buildpkgonly = "--buildpkgonly" in myopts
pretend = "--pretend" in myopts
fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
ask = "--ask" in myopts
enter_invalid = '--ask-enter-invalid' in myopts
nodeps = "--nodeps" in myopts
oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
tree = "--tree" in myopts
if nodeps and tree:
tree = False
del myopts["--tree"]
portage.writemsg(colorize("WARN", " * ") + \
"--tree is broken with --nodeps. Disabling...\n")
debug = "--debug" in myopts
verbose = "--verbose" in myopts
quiet = "--quiet" in myopts
myparams = create_depgraph_params(myopts, myaction)
mergelist_shown = False
if pretend or fetchonly:
# make the mtimedb readonly
mtimedb.filename = None
if '--digest' in myopts or 'digest' in settings.features:
if '--digest' in myopts:
msg = "The --digest option"
else:
msg = "The FEATURES=digest setting"
msg += " can prevent corruption from being" + \
" noticed. The `repoman manifest` command is the preferred" + \
" way to generate manifests and it is capable of doing an" + \
" entire repository or category at once."
prefix = bad(" * ")
writemsg(prefix + "\n")
for line in textwrap.wrap(msg, 72):
writemsg("%s%s\n" % (prefix, line))
writemsg(prefix + "\n")
if resume:
favorites = mtimedb["resume"].get("favorites")
if not isinstance(favorites, list):
favorites = []
resume_data = mtimedb["resume"]
mergelist = resume_data["mergelist"]
if mergelist and "--skipfirst" in myopts:
for i, task in enumerate(mergelist):
if isinstance(task, list) and \
task and task[-1] == "merge":
del mergelist[i]
break
success = False
mydepgraph = None
try:
success, mydepgraph, dropped_tasks = resume_depgraph(
settings, trees, mtimedb, myopts, myparams, spinner)
except (portage.exception.PackageNotFound,
depgraph.UnsatisfiedResumeDep) as e:
if isinstance(e, depgraph.UnsatisfiedResumeDep):
mydepgraph = e.depgraph
from portage.output import EOutput
out = EOutput()
resume_data = mtimedb["resume"]
mergelist = resume_data.get("mergelist")
if not isinstance(mergelist, list):
mergelist = []
if mergelist and debug or (verbose and not quiet):
out.eerror("Invalid resume list:")
out.eerror("")
indent = " "
for task in mergelist:
if isinstance(task, list):
out.eerror(indent + str(tuple(task)))
out.eerror("")
if isinstance(e, depgraph.UnsatisfiedResumeDep):
out.eerror("One or more packages are either masked or " + \
"have missing dependencies:")
out.eerror("")
indent = " "
for dep in e.value:
if dep.atom is None:
out.eerror(indent + "Masked package:")
out.eerror(2 * indent + str(dep.parent))
out.eerror("")
else:
out.eerror(indent + str(dep.atom) + " pulled in by:")
out.eerror(2 * indent + str(dep.parent))
out.eerror("")
msg = "The resume list contains packages " + \
"that are either masked or have " + \
"unsatisfied dependencies. " + \
"Please restart/continue " + \
"the operation manually, or use --skipfirst " + \
"to skip the first package in the list and " + \
"any other packages that may be " + \
"masked or have missing dependencies."
for line in textwrap.wrap(msg, 72):
out.eerror(line)
elif isinstance(e, portage.exception.PackageNotFound):
out.eerror("An expected package is " + \
"not available: %s" % str(e))
out.eerror("")
msg = "The resume list contains one or more " + \
"packages that are no longer " + \
"available. Please restart/continue " + \
"the operation manually."
for line in textwrap.wrap(msg, 72):
out.eerror(line)
if success:
if dropped_tasks:
portage.writemsg("!!! One or more packages have been " + \
"dropped due to\n" + \
"!!! masking or unsatisfied dependencies:\n\n",
noiselevel=-1)
for task, atoms in dropped_tasks.items():
if not atoms:
writemsg(" %s is masked or unavailable\n" %
(task,), noiselevel=-1)
else:
writemsg(" %s requires %s\n" %
(task, ", ".join(atoms)), noiselevel=-1)
portage.writemsg("\n", noiselevel=-1)
del dropped_tasks
else:
if mydepgraph is not None:
mydepgraph.display_problems()
if not (ask or pretend):
# delete the current list and also the backup
# since it's probably stale too.
for k in ("resume", "resume_backup"):
mtimedb.pop(k, None)
mtimedb.commit()
return 1
else:
if ("--resume" in myopts):
print(darkgreen("emerge: It seems we have nothing to resume..."))
return os.EX_OK
try:
success, mydepgraph, favorites = backtrack_depgraph(
settings, trees, myopts, myparams, myaction, myfiles, spinner)
except portage.exception.PackageSetNotFound as e:
root_config = trees[settings['EROOT']]['root_config']
display_missing_pkg_set(root_config, e.value)
return 1
if success and mydepgraph.need_config_reload():
load_emerge_config(emerge_config=emerge_config)
adjust_configs(emerge_config.opts, emerge_config.trees)
settings, trees, mtimedb = emerge_config
# After config reload, the freshly instantiated binarytree
# instances need to load remote metadata if --getbinpkg
# is enabled. Use getbinpkg_refresh=False to use cached
# metadata, since the cache is already fresh.
if "--getbinpkg" in emerge_config.opts:
for root_trees in emerge_config.trees.values():
try:
root_trees["bintree"].populate(
getbinpkgs=True,
getbinpkg_refresh=False)
except ParseError as e:
writemsg("\n\n!!!%s.\nSee make.conf(5) for more info.\n"
% e, noiselevel=-1)
return 1
if "--autounmask-only" in myopts:
mydepgraph.display_problems()
return 0
if not success:
mydepgraph.display_problems()
return 1
mergecount = None
if "--pretend" not in myopts and \
("--ask" in myopts or "--tree" in myopts or \
"--verbose" in myopts) and \
not ("--quiet" in myopts and "--ask" not in myopts):
if "--resume" in myopts:
mymergelist = mydepgraph.altlist()
if len(mymergelist) == 0:
print(colorize("INFORM", "emerge: It seems we have nothing to resume..."))
return os.EX_OK
favorites = mtimedb["resume"]["favorites"]
retval = mydepgraph.display(
mydepgraph.altlist(),
favorites=favorites)
mydepgraph.display_problems()
mergelist_shown = True
if retval != os.EX_OK:
return retval
prompt="Would you like to resume merging these packages?"
else:
retval = mydepgraph.display(
mydepgraph.altlist(),
favorites=favorites)
mydepgraph.display_problems()
mergelist_shown = True
if retval != os.EX_OK:
return retval
mergecount=0
for x in mydepgraph.altlist():
if isinstance(x, Package) and x.operation == "merge":
mergecount += 1
prompt = None
if mergecount==0:
sets = trees[settings['EROOT']]['root_config'].sets
world_candidates = None
if "selective" in myparams and \
not oneshot and favorites:
# Sets that are not world candidates are filtered
# out here since the favorites list needs to be
# complete for depgraph.loadResumeCommand() to
# operate correctly.
world_candidates = [x for x in favorites \
if not (x.startswith(SETPREFIX) and \
not sets[x[1:]].world_candidate)]
if "selective" in myparams and \
not oneshot and world_candidates:
# Prompt later, inside saveNomergeFavorites.
prompt = None
else:
print()
print("Nothing to merge; quitting.")
print()
return os.EX_OK
elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
prompt="Would you like to fetch the source files for these packages?"
else:
prompt="Would you like to merge these packages?"
print()
uq = UserQuery(myopts)
if prompt is not None and "--ask" in myopts and \
uq.query(prompt, enter_invalid) == "No":
print()
print("Quitting.")
print()
return 128 + signal.SIGINT
# Don't ask again (e.g. when auto-cleaning packages after merge)
if mergecount != 0:
myopts.pop("--ask", None)
if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
if ("--resume" in myopts):
mymergelist = mydepgraph.altlist()
if len(mymergelist) == 0:
print(colorize("INFORM", "emerge: It seems we have nothing to resume..."))
return os.EX_OK
favorites = mtimedb["resume"]["favorites"]
retval = mydepgraph.display(
mydepgraph.altlist(),
favorites=favorites)
mydepgraph.display_problems()
mergelist_shown = True
if retval != os.EX_OK:
return retval
else:
retval = mydepgraph.display(
mydepgraph.altlist(),
favorites=favorites)
mydepgraph.display_problems()
mergelist_shown = True
if retval != os.EX_OK:
return retval
else:
if not mergelist_shown:
# If we haven't already shown the merge list above, at
# least show warnings about missed updates and such.
mydepgraph.display_problems()
need_write_vardb = not Scheduler. \
_opts_no_self_update.intersection(myopts)
need_write_bindb = not any(x in myopts for x in
("--fetchonly", "--fetch-all-uri",
"--pretend", "--usepkgonly")) and \
(any("buildpkg" in trees[eroot]["root_config"].
settings.features for eroot in trees) or
any("buildsyspkg" in trees[eroot]["root_config"].
settings.features for eroot in trees))
if need_write_bindb or need_write_vardb:
eroots = set()
ebuild_eroots = set()
for x in mydepgraph.altlist():
if isinstance(x, Package) and x.operation == "merge":
eroots.add(x.root)
if x.type_name == "ebuild":
ebuild_eroots.add(x.root)
for eroot in eroots:
if need_write_vardb and \
not trees[eroot]["vartree"].dbapi.writable:
writemsg_level("!!! %s\n" %
_("Read-only file system: %s") %
trees[eroot]["vartree"].dbapi._dbroot,
level=logging.ERROR, noiselevel=-1)
return 1
if need_write_bindb and eroot in ebuild_eroots and \
("buildpkg" in trees[eroot]["root_config"].
settings.features or
"buildsyspkg" in trees[eroot]["root_config"].
settings.features) and \
not trees[eroot]["bintree"].dbapi.writable:
writemsg_level("!!! %s\n" %
_("Read-only file system: %s") %
trees[eroot]["bintree"].pkgdir,
level=logging.ERROR, noiselevel=-1)
return 1
if ("--resume" in myopts):
favorites=mtimedb["resume"]["favorites"]
else:
if "resume" in mtimedb and \
"mergelist" in mtimedb["resume"] and \
len(mtimedb["resume"]["mergelist"]) > 1:
mtimedb["resume_backup"] = mtimedb["resume"]
del mtimedb["resume"]
mtimedb.commit()
mydepgraph.saveNomergeFavorites()
if mergecount == 0:
retval = os.EX_OK
else:
mergetask = Scheduler(settings, trees, mtimedb, myopts,
spinner, favorites=favorites,
graph_config=mydepgraph.schedulerGraph())
del mydepgraph
clear_caches(trees)
retval = mergetask.merge()
if retval == os.EX_OK and \
not (buildpkgonly or fetchonly or pretend):
if "yes" == settings.get("AUTOCLEAN"):
portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
unmerge(trees[settings['EROOT']]['root_config'],
myopts, "clean", [],
ldpath_mtimes, autoclean=1)
else:
portage.writemsg_stdout(colorize("WARN", "WARNING:")
+ " AUTOCLEAN is disabled. This can cause serious"
+ " problems due to overlapping packages.\n")
return retval
def action_config(settings, trees, myopts, myfiles):
enter_invalid = '--ask-enter-invalid' in myopts
uq = UserQuery(myopts)
if len(myfiles) != 1:
print(red("!!! config can only take a single package atom at this time\n"))
sys.exit(1)
if not is_valid_package_atom(myfiles[0], allow_repo=True):
portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
noiselevel=-1)
portage.writemsg("!!! Please check ebuild(5) for full details.\n")
portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
sys.exit(1)
print()
try:
pkgs = trees[settings['EROOT']]['vartree'].dbapi.match(myfiles[0])
except portage.exception.AmbiguousPackageName as e:
# Multiple matches thrown from cpv_expand
pkgs = e.args[0]
if len(pkgs) == 0:
print("No packages found.\n")
sys.exit(0)
elif len(pkgs) > 1:
if "--ask" in myopts:
options = []
print("Please select a package to configure:")
idx = 0
for pkg in pkgs:
idx += 1
options.append(str(idx))
print(options[-1]+") "+pkg)
print("X) Cancel")
options.append("X")
idx = uq.query("Selection?", enter_invalid, responses=options)
if idx == "X":
sys.exit(128 + signal.SIGINT)
pkg = pkgs[int(idx)-1]
else:
print("The following packages available:")
for pkg in pkgs:
print("* "+pkg)
print("\nPlease use a specific atom or the --ask option.")
sys.exit(1)
else:
pkg = pkgs[0]
print()
if "--ask" in myopts:
if uq.query("Ready to configure %s?" % pkg, enter_invalid) == "No":
sys.exit(128 + signal.SIGINT)
else:
print("Configuring pkg...")
print()
ebuildpath = trees[settings['EROOT']]['vartree'].dbapi.findname(pkg)
mysettings = portage.config(clone=settings)
vardb = trees[mysettings['EROOT']]['vartree'].dbapi
debug = mysettings.get("PORTAGE_DEBUG") == "1"
retval = portage.doebuild(ebuildpath, "config", settings=mysettings,
debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
mydbapi = trees[settings['EROOT']]['vartree'].dbapi, tree="vartree")
if retval == os.EX_OK:
portage.doebuild(ebuildpath, "clean", settings=mysettings,
debug=debug, mydbapi=vardb, tree="vartree")
print()
def action_depclean(settings, trees, ldpath_mtimes,
myopts, action, myfiles, spinner, scheduler=None):
# Kill packages that aren't explicitly merged or are required as a
# dependency of another package. World file is explicit.
# Global depclean or prune operations are not very safe when there are
# missing dependencies since it's unknown how badly incomplete
# the dependency graph is, and we might accidentally remove packages
# that should have been pulled into the graph. On the other hand, it's
# relatively safe to ignore missing deps when only asked to remove
# specific packages.
msg = []
if "preserve-libs" not in settings.features and \
not myopts.get("--depclean-lib-check", _DEPCLEAN_LIB_CHECK_DEFAULT) != "n":
msg.append("Depclean may break link level dependencies. Thus, it is\n")
msg.append("recommended to use a tool such as " + good("`revdep-rebuild`") + " (from\n")
msg.append("app-portage/gentoolkit) in order to detect such breakage.\n")
msg.append("\n")
msg.append("Always study the list of packages to be cleaned for any obvious\n")
msg.append("mistakes. Packages that are part of the world set will always\n")
msg.append("be kept. They can be manually added to this set with\n")
msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
msg.append("package.provided (see portage(5)) will be removed by\n")
msg.append("depclean, even if they are part of the world set.\n")
msg.append("\n")
msg.append("As a safety measure, depclean will not remove any packages\n")
msg.append("unless *all* required dependencies have been resolved. As a\n")
msg.append("consequence of this, it often becomes necessary to run \n")
msg.append("%s" % good("`emerge --update --newuse --deep @world`")
+ " prior to depclean.\n")
if action == "depclean" and "--quiet" not in myopts and not myfiles:
portage.writemsg_stdout("\n")
for x in msg:
portage.writemsg_stdout(colorize("WARN", " * ") + x)
root_config = trees[settings['EROOT']]['root_config']
vardb = root_config.trees['vartree'].dbapi
args_set = InternalPackageSet(allow_repo=True)
if myfiles:
args_set.update(myfiles)
matched_packages = False
for x in args_set:
if vardb.match(x):
matched_packages = True
else:
writemsg_level("--- Couldn't find '%s' to %s.\n" % \
(x.replace("null/", ""), action),
level=logging.WARN, noiselevel=-1)
if not matched_packages:
writemsg_level(">>> No packages selected for removal by %s\n" % \
action)
return 0
# The calculation is done in a separate function so that depgraph
# references go out of scope and the corresponding memory
# is freed before we call unmerge().
rval, cleanlist, ordered, req_pkg_count = \
calc_depclean(settings, trees, ldpath_mtimes,
myopts, action, args_set, spinner)
clear_caches(trees)
if rval != os.EX_OK:
return rval
if cleanlist:
rval = unmerge(root_config, myopts, "unmerge",
cleanlist, ldpath_mtimes, ordered=ordered,
scheduler=scheduler)
if action == "prune":
return rval
if not cleanlist and "--quiet" in myopts:
return rval
set_atoms = {}
for k in ("profile", "system", "selected"):
try:
set_atoms[k] = root_config.setconfig.getSetAtoms(k)
except portage.exception.PackageSetNotFound:
# A nested set could not be resolved, so ignore nested sets.
set_atoms[k] = root_config.sets[k].getAtoms()
print("Packages installed: " + str(len(vardb.cpv_all())))
print("Packages in world: %d" % len(set_atoms["selected"]))
print("Packages in system: %d" % len(set_atoms["system"]))
if set_atoms["profile"]:
print("Packages in profile: %d" % len(set_atoms["profile"]))
print("Required packages: "+str(req_pkg_count))
if "--pretend" in myopts:
print("Number to remove: "+str(len(cleanlist)))
else:
print("Number removed: "+str(len(cleanlist)))
return rval
def calc_depclean(settings, trees, ldpath_mtimes,
myopts, action, args_set, spinner):
allow_missing_deps = bool(args_set)
debug = '--debug' in myopts
xterm_titles = "notitles" not in settings.features
root_len = len(settings["ROOT"])
eroot = settings['EROOT']
root_config = trees[eroot]["root_config"]
psets = root_config.setconfig.psets
deselect = myopts.get('--deselect') != 'n'
required_sets = {}
required_sets['world'] = psets['world']
# When removing packages, a temporary version of the world 'selected'
# set may be used which excludes packages that are intended to be
# eligible for removal.
selected_set = psets['selected']
required_sets['selected'] = selected_set
protected_set = InternalPackageSet()
protected_set_name = '____depclean_protected_set____'
required_sets[protected_set_name] = protected_set
set_error = False
set_atoms = {}
for k in ("profile", "system", "selected"):
try:
set_atoms[k] = root_config.setconfig.getSetAtoms(k)
except portage.exception.PackageSetNotFound as e:
# A nested set could not be resolved, so ignore nested sets.
set_atoms[k] = root_config.sets[k].getAtoms()
writemsg_level(_("!!! The set '%s' "
"contains a non-existent set named '%s'.\n") %
(k, e), level=logging.ERROR, noiselevel=-1)
set_error = True
# Support @profile as an alternative to @system.
if not (set_atoms["system"] or set_atoms["profile"]):
writemsg_level(_("!!! You have no system list.\n"),
level=logging.WARNING, noiselevel=-1)
if not set_atoms["selected"]:
writemsg_level(_("!!! You have no world file.\n"),
level=logging.WARNING, noiselevel=-1)
# Suppress world file warnings unless @world is completely empty,
# since having an empty world file can be a valid state.
try:
world_atoms = bool(root_config.setconfig.getSetAtoms('world'))
except portage.exception.PackageSetNotFound as e:
writemsg_level(_("!!! The set '%s' "
"contains a non-existent set named '%s'.\n") %
("world", e), level=logging.ERROR, noiselevel=-1)
set_error = True
else:
if not world_atoms:
writemsg_level(_("!!! Your @world set is empty.\n"),
level=logging.ERROR, noiselevel=-1)
set_error = True
if set_error:
writemsg_level(_("!!! Aborting due to set configuration "
"errors displayed above.\n"),
level=logging.ERROR, noiselevel=-1)
return 1, [], False, 0
if action == "depclean":
emergelog(xterm_titles, " >>> depclean")
writemsg_level("\nCalculating dependencies ")
resolver_params = create_depgraph_params(myopts, "remove")
resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
resolver._load_vdb()
vardb = resolver._frozen_config.trees[eroot]["vartree"].dbapi
real_vardb = trees[eroot]["vartree"].dbapi
if action == "depclean":
if args_set:
if deselect:
# Start with an empty set.
selected_set = InternalPackageSet()
required_sets['selected'] = selected_set
# Pull in any sets nested within the selected set.
selected_set.update(psets['selected'].getNonAtoms())
# Pull in everything that's installed but not matched
# by an argument atom since we don't want to clean any
# package if something depends on it.
for pkg in vardb:
if spinner:
spinner.update()
try:
if args_set.findAtomForPackage(pkg) is None:
protected_set.add("=" + pkg.cpv)
continue
except portage.exception.InvalidDependString as e:
show_invalid_depstring_notice(pkg,
pkg._metadata["PROVIDE"], _unicode(e))
del e
protected_set.add("=" + pkg.cpv)
continue
elif action == "prune":
if deselect:
# Start with an empty set.
selected_set = InternalPackageSet()
required_sets['selected'] = selected_set
# Pull in any sets nested within the selected set.
selected_set.update(psets['selected'].getNonAtoms())
# Pull in everything that's installed since we don't
# to prune a package if something depends on it.
protected_set.update(vardb.cp_all())
if not args_set:
# Try to prune everything that's slotted.
for cp in vardb.cp_all():
if len(vardb.cp_list(cp)) > 1:
args_set.add(cp)
# Remove atoms from world that match installed packages
# that are also matched by argument atoms, but do not remove
# them if they match the highest installed version.
for pkg in vardb:
if spinner is not None:
spinner.update()
pkgs_for_cp = vardb.match_pkgs(Atom(pkg.cp))
if not pkgs_for_cp or pkg not in pkgs_for_cp:
raise AssertionError("package expected in matches: " + \
"cp = %s, cpv = %s matches = %s" % \
(pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
highest_version = pkgs_for_cp[-1]
if pkg == highest_version:
# pkg is the highest version
protected_set.add("=" + pkg.cpv)
continue
if len(pkgs_for_cp) <= 1:
raise AssertionError("more packages expected: " + \
"cp = %s, cpv = %s matches = %s" % \
(pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
try:
if args_set.findAtomForPackage(pkg) is None:
protected_set.add("=" + pkg.cpv)
continue
except portage.exception.InvalidDependString as e:
show_invalid_depstring_notice(pkg,
pkg._metadata["PROVIDE"], _unicode(e))
del e
protected_set.add("=" + pkg.cpv)
continue
if resolver._frozen_config.excluded_pkgs:
excluded_set = resolver._frozen_config.excluded_pkgs
required_sets['__excluded__'] = InternalPackageSet()
for pkg in vardb:
if spinner:
spinner.update()
try:
if excluded_set.findAtomForPackage(pkg):
required_sets['__excluded__'].add("=" + pkg.cpv)
except portage.exception.InvalidDependString as e:
show_invalid_depstring_notice(pkg,
pkg._metadata["PROVIDE"], _unicode(e))
del e
required_sets['__excluded__'].add("=" + pkg.cpv)
success = resolver._complete_graph(required_sets={eroot:required_sets})
writemsg_level("\b\b... done!\n")
resolver.display_problems()
if not success:
return 1, [], False, 0
def unresolved_deps():
soname_deps = set()
unresolvable = set()
for dep in resolver._dynamic_config._initially_unsatisfied_deps:
if isinstance(dep.parent, Package) and \
(dep.priority > UnmergeDepPriority.SOFT):
if dep.atom.soname:
soname_deps.add((dep.atom, dep.parent.cpv))
else:
unresolvable.add((dep.atom, dep.parent.cpv))
if soname_deps:
# Generally, broken soname dependencies can safely be
# suppressed by a REQUIRES_EXCLUDE setting in the ebuild,
# so they should only trigger a warning message.
prefix = warn(" * ")
msg = []
msg.append("Broken soname dependencies found:")
msg.append("")
for atom, parent in soname_deps:
msg.append(" %s required by:" % (atom,))
msg.append(" %s" % (parent,))
msg.append("")
writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
level=logging.WARNING, noiselevel=-1)
if not unresolvable:
return False
if unresolvable and not allow_missing_deps:
if "--debug" in myopts:
writemsg("\ndigraph:\n\n", noiselevel=-1)
resolver._dynamic_config.digraph.debug_print()
writemsg("\n", noiselevel=-1)
prefix = bad(" * ")
msg = []
msg.append("Dependencies could not be completely resolved due to")
msg.append("the following required packages not being installed:")
msg.append("")
for atom, parent in unresolvable:
if atom.package and atom != atom.unevaluated_atom and \
vardb.match(_unicode(atom)):
msg.append(" %s (%s) pulled in by:" %
(atom.unevaluated_atom, atom))
else:
msg.append(" %s pulled in by:" % (atom,))
msg.append(" %s" % (parent,))
msg.append("")
msg.extend(textwrap.wrap(
"Have you forgotten to do a complete update prior " + \
"to depclean? The most comprehensive command for this " + \
"purpose is as follows:", 65
))
msg.append("")
msg.append(" " + \
good("emerge --update --newuse --deep --with-bdeps=y @world"))
msg.append("")
msg.extend(textwrap.wrap(
"Note that the --with-bdeps=y option is not required in " + \
"many situations. Refer to the emerge manual page " + \
"(run `man emerge`) for more information about " + \
"--with-bdeps.", 65
))
msg.append("")
msg.extend(textwrap.wrap(
"Also, note that it may be necessary to manually uninstall " + \
"packages that no longer exist in the portage tree, since " + \
"it may not be possible to satisfy their dependencies.", 65
))
if action == "prune":
msg.append("")
msg.append("If you would like to ignore " + \
"dependencies then use %s." % good("--nodeps"))
writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
level=logging.ERROR, noiselevel=-1)
return True
return False
if unresolved_deps():
return 1, [], False, 0
graph = resolver._dynamic_config.digraph.copy()
required_pkgs_total = 0
for node in graph:
if isinstance(node, Package):
required_pkgs_total += 1
def show_parents(child_node):
parent_atoms = \
resolver._dynamic_config._parent_atoms.get(child_node, [])
# Never display the special internal protected_set.
parent_atoms = [parent_atom for parent_atom in parent_atoms
if not (isinstance(parent_atom[0], SetArg) and
parent_atom[0].name == protected_set_name)]
if not parent_atoms:
# With --prune, the highest version can be pulled in without any
# real parent since all installed packages are pulled in. In that
# case there's nothing to show here.
return
parent_atom_dict = {}
for parent, atom in parent_atoms:
parent_atom_dict.setdefault(parent, []).append(atom)
parent_strs = []
for parent, atoms in parent_atom_dict.items():
# Display package atoms and soname
# atoms in separate groups.
atoms = sorted(atoms, reverse=True,
key=operator.attrgetter('package'))
parent_strs.append("%s requires %s" %
(getattr(parent, "cpv", parent),
", ".join(_unicode(atom) for atom in atoms)))
parent_strs.sort()
msg = []
msg.append(" %s pulled in by:\n" % (child_node.cpv,))
for parent_str in parent_strs:
msg.append(" %s\n" % (parent_str,))
msg.append("\n")
portage.writemsg_stdout("".join(msg), noiselevel=-1)
def cmp_pkg_cpv(pkg1, pkg2):
"""Sort Package instances by cpv."""
if pkg1.cpv > pkg2.cpv:
return 1
elif pkg1.cpv == pkg2.cpv:
return 0
else:
return -1
def create_cleanlist():
if "--debug" in myopts:
writemsg("\ndigraph:\n\n", noiselevel=-1)
graph.debug_print()
writemsg("\n", noiselevel=-1)
pkgs_to_remove = []
if action == "depclean":
if args_set:
for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
arg_atom = None
try:
arg_atom = args_set.findAtomForPackage(pkg)
except portage.exception.InvalidDependString:
# this error has already been displayed by now
continue
if arg_atom:
if pkg not in graph:
pkgs_to_remove.append(pkg)
elif "--verbose" in myopts:
show_parents(pkg)
else:
for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
if pkg not in graph:
pkgs_to_remove.append(pkg)
elif "--verbose" in myopts:
show_parents(pkg)
elif action == "prune":
for atom in args_set:
for pkg in vardb.match_pkgs(atom):
if pkg not in graph:
pkgs_to_remove.append(pkg)
elif "--verbose" in myopts:
show_parents(pkg)
if not pkgs_to_remove:
writemsg_level(
">>> No packages selected for removal by %s\n" % action)
if "--verbose" not in myopts:
writemsg_level(
">>> To see reverse dependencies, use %s\n" % \
good("--verbose"))
if action == "prune":
writemsg_level(
">>> To ignore dependencies, use %s\n" % \
good("--nodeps"))
return pkgs_to_remove
cleanlist = create_cleanlist()
clean_set = set(cleanlist)
depclean_lib_check = cleanlist and real_vardb._linkmap is not None and \
myopts.get("--depclean-lib-check", _DEPCLEAN_LIB_CHECK_DEFAULT) != "n"
preserve_libs = "preserve-libs" in settings.features
preserve_libs_restrict = False
if depclean_lib_check and preserve_libs:
for pkg in cleanlist:
if "preserve-libs" in pkg.restrict:
preserve_libs_restrict = True
break
if depclean_lib_check and \
(preserve_libs_restrict or not preserve_libs):
# Check if any of these packages are the sole providers of libraries
# with consumers that have not been selected for removal. If so, these
# packages and any dependencies need to be added to the graph.
linkmap = real_vardb._linkmap
consumer_cache = {}
provider_cache = {}
consumer_map = {}
writemsg_level(">>> Checking for lib consumers...\n")
for pkg in cleanlist:
if preserve_libs and "preserve-libs" not in pkg.restrict:
# Any needed libraries will be preserved
# when this package is unmerged, so there's
# no need to account for it here.
continue
pkg_dblink = real_vardb._dblink(pkg.cpv)
consumers = {}
for lib in pkg_dblink.getcontents():
lib = lib[root_len:]
lib_key = linkmap._obj_key(lib)
lib_consumers = consumer_cache.get(lib_key)
if lib_consumers is None:
try:
lib_consumers = linkmap.findConsumers(lib_key)
except KeyError:
continue
consumer_cache[lib_key] = lib_consumers
if lib_consumers:
consumers[lib_key] = lib_consumers
if not consumers:
continue
for lib, lib_consumers in list(consumers.items()):
for consumer_file in list(lib_consumers):
if pkg_dblink.isowner(consumer_file):
lib_consumers.remove(consumer_file)
if not lib_consumers:
del consumers[lib]
if not consumers:
continue
for lib, lib_consumers in consumers.items():
soname = linkmap.getSoname(lib)
consumer_providers = []
for lib_consumer in lib_consumers:
providers = provider_cache.get(lib)
if providers is None:
providers = linkmap.findProviders(lib_consumer)
provider_cache[lib_consumer] = providers
if soname not in providers:
# Why does this happen?
continue
consumer_providers.append(
(lib_consumer, providers[soname]))
consumers[lib] = consumer_providers
consumer_map[pkg] = consumers
if consumer_map:
search_files = set()
for consumers in consumer_map.values():
for lib, consumer_providers in consumers.items():
for lib_consumer, providers in consumer_providers:
search_files.add(lib_consumer)
search_files.update(providers)
writemsg_level(">>> Assigning files to packages...\n")
file_owners = {}
for f in search_files:
owner_set = set()
for owner in linkmap.getOwners(f):
owner_dblink = real_vardb._dblink(owner)
if owner_dblink.exists():
owner_set.add(owner_dblink)
if owner_set:
file_owners[f] = owner_set
for pkg, consumers in list(consumer_map.items()):
for lib, consumer_providers in list(consumers.items()):
lib_consumers = set()
for lib_consumer, providers in consumer_providers:
owner_set = file_owners.get(lib_consumer)
provider_dblinks = set()
provider_pkgs = set()
if len(providers) > 1:
for provider in providers:
provider_set = file_owners.get(provider)
if provider_set is not None:
provider_dblinks.update(provider_set)
if len(provider_dblinks) > 1:
for provider_dblink in provider_dblinks:
provider_pkg = resolver._pkg(
provider_dblink.mycpv, "installed",
root_config, installed=True)
if provider_pkg not in clean_set:
provider_pkgs.add(provider_pkg)
if provider_pkgs:
continue
if owner_set is not None:
lib_consumers.update(owner_set)
for consumer_dblink in list(lib_consumers):
if resolver._pkg(consumer_dblink.mycpv, "installed",
root_config, installed=True) in clean_set:
lib_consumers.remove(consumer_dblink)
continue
if lib_consumers:
consumers[lib] = lib_consumers
else:
del consumers[lib]
if not consumers:
del consumer_map[pkg]
if consumer_map:
# TODO: Implement a package set for rebuilding consumer packages.
msg = "In order to avoid breakage of link level " + \
"dependencies, one or more packages will not be removed. " + \
"This can be solved by rebuilding " + \
"the packages that pulled them in."
prefix = bad(" * ")
writemsg_level("".join(prefix + "%s\n" % line for \
line in textwrap.wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
msg = []
for pkg in sorted(consumer_map, key=cmp_sort_key(cmp_pkg_cpv)):
consumers = consumer_map[pkg]
consumer_libs = {}
for lib, lib_consumers in consumers.items():
for consumer in lib_consumers:
consumer_libs.setdefault(
consumer.mycpv, set()).add(linkmap.getSoname(lib))
unique_consumers = set(chain(*consumers.values()))
unique_consumers = sorted(consumer.mycpv \
for consumer in unique_consumers)
msg.append("")
msg.append(" %s pulled in by:" % (pkg.cpv,))
for consumer in unique_consumers:
libs = consumer_libs[consumer]
msg.append(" %s needs %s" % \
(consumer, ', '.join(sorted(libs))))
msg.append("")
writemsg_level("".join(prefix + "%s\n" % line for line in msg),
level=logging.WARNING, noiselevel=-1)
# Add lib providers to the graph as children of lib consumers,
# and also add any dependencies pulled in by the provider.
writemsg_level(">>> Adding lib providers to graph...\n")
for pkg, consumers in consumer_map.items():
for consumer_dblink in set(chain(*consumers.values())):
consumer_pkg = resolver._pkg(consumer_dblink.mycpv,
"installed", root_config, installed=True)
if not resolver._add_pkg(pkg,
Dependency(parent=consumer_pkg,
priority=UnmergeDepPriority(runtime=True,
runtime_slot_op=True),
root=pkg.root)):
resolver.display_problems()
return 1, [], False, 0
writemsg_level("\nCalculating dependencies ")
success = resolver._complete_graph(
required_sets={eroot:required_sets})
writemsg_level("\b\b... done!\n")
resolver.display_problems()
if not success:
return 1, [], False, 0
if unresolved_deps():
return 1, [], False, 0
graph = resolver._dynamic_config.digraph.copy()
required_pkgs_total = 0
for node in graph:
if isinstance(node, Package):
required_pkgs_total += 1
cleanlist = create_cleanlist()
if not cleanlist:
return 0, [], False, required_pkgs_total
clean_set = set(cleanlist)
if clean_set:
writemsg_level(">>> Calculating removal order...\n")
# Use a topological sort to create an unmerge order such that
# each package is unmerged before it's dependencies. This is
# necessary to avoid breaking things that may need to run
# during pkg_prerm or pkg_postrm phases.
# Create a new graph to account for dependencies between the
# packages being unmerged.
graph = digraph()
del cleanlist[:]
runtime = UnmergeDepPriority(runtime=True)
runtime_post = UnmergeDepPriority(runtime_post=True)
buildtime = UnmergeDepPriority(buildtime=True)
priority_map = {
"RDEPEND": runtime,
"PDEPEND": runtime_post,
"HDEPEND": buildtime,
"DEPEND": buildtime,
}
for node in clean_set:
graph.add(node, None)
for dep_type in Package._dep_keys:
depstr = node._metadata[dep_type]
if not depstr:
continue
priority = priority_map[dep_type]
if debug:
writemsg_level("\nParent: %s\n"
% (node,), noiselevel=-1, level=logging.DEBUG)
writemsg_level( "Depstring: %s\n"
% (depstr,), noiselevel=-1, level=logging.DEBUG)
writemsg_level( "Priority: %s\n"
% (priority,), noiselevel=-1, level=logging.DEBUG)
try:
atoms = resolver._select_atoms(eroot, depstr,
myuse=node.use.enabled, parent=node,
priority=priority)[node]
except portage.exception.InvalidDependString:
# Ignore invalid deps of packages that will
# be uninstalled anyway.
continue
if debug:
writemsg_level("Candidates: [%s]\n" % \
', '.join("'%s'" % (x,) for x in atoms),
noiselevel=-1, level=logging.DEBUG)
for atom in atoms:
if not isinstance(atom, portage.dep.Atom):
# Ignore invalid atoms returned from dep_check().
continue
if atom.blocker:
continue
matches = vardb.match_pkgs(atom)
if not matches:
continue
for child_node in matches:
if child_node in clean_set:
mypriority = priority.copy()
if atom.slot_operator_built:
if mypriority.buildtime:
mypriority.buildtime_slot_op = True
if mypriority.runtime:
mypriority.runtime_slot_op = True
graph.add(child_node, node, priority=mypriority)
if debug:
writemsg_level("\nunmerge digraph:\n\n",
noiselevel=-1, level=logging.DEBUG)
graph.debug_print()
writemsg_level("\n", noiselevel=-1, level=logging.DEBUG)
ordered = True
if len(graph.order) == len(graph.root_nodes()):
# If there are no dependencies between packages
# let unmerge() group them by cat/pn.
ordered = False
cleanlist = [pkg.cpv for pkg in graph.order]
else:
# Order nodes from lowest to highest overall reference count for
# optimal root node selection (this can help minimize issues
# with unaccounted implicit dependencies).
node_refcounts = {}
for node in graph.order:
node_refcounts[node] = len(graph.parent_nodes(node))
def cmp_reference_count(node1, node2):
return node_refcounts[node1] - node_refcounts[node2]
graph.order.sort(key=cmp_sort_key(cmp_reference_count))
ignore_priority_range = [None]
ignore_priority_range.extend(
range(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
while graph:
for ignore_priority in ignore_priority_range:
nodes = graph.root_nodes(ignore_priority=ignore_priority)
if nodes:
break
if not nodes:
raise AssertionError("no root nodes")
if ignore_priority is not None:
# Some deps have been dropped due to circular dependencies,
# so only pop one node in order to minimize the number that
# are dropped.
del nodes[1:]
for node in nodes:
graph.remove(node)
cleanlist.append(node.cpv)
return 0, cleanlist, ordered, required_pkgs_total
return 0, [], False, required_pkgs_total
def action_deselect(settings, trees, opts, atoms):
enter_invalid = '--ask-enter-invalid' in opts
root_config = trees[settings['EROOT']]['root_config']
world_set = root_config.sets['selected']
if not hasattr(world_set, 'update'):
writemsg_level("World @selected set does not appear to be mutable.\n",
level=logging.ERROR, noiselevel=-1)
return 1
pretend = '--pretend' in opts
locked = False
if not pretend and hasattr(world_set, 'lock'):
world_set.lock()
locked = True
try:
world_set.load()
world_atoms = world_set.getAtoms()
vardb = root_config.trees["vartree"].dbapi
expanded_atoms = set(atoms)
for atom in atoms:
if not atom.startswith(SETPREFIX):
if atom.cp.startswith("null/"):
# try to expand category from world set
null_cat, pn = portage.catsplit(atom.cp)
for world_atom in world_atoms:
cat, world_pn = portage.catsplit(world_atom.cp)
if pn == world_pn:
expanded_atoms.add(
Atom(atom.replace("null", cat, 1),
allow_repo=True, allow_wildcard=True))
for cpv in vardb.match(atom):
pkg = vardb._pkg_str(cpv, None)
expanded_atoms.add(Atom("%s:%s" % (pkg.cp, pkg.slot)))
discard_atoms = set()
for atom in world_set:
for arg_atom in expanded_atoms:
if arg_atom.startswith(SETPREFIX):
if atom.startswith(SETPREFIX) and \
arg_atom == atom:
discard_atoms.add(atom)
break
else:
if not atom.startswith(SETPREFIX) and \
arg_atom.intersects(atom) and \
not (arg_atom.slot and not atom.slot) and \
not (arg_atom.repo and not atom.repo):
discard_atoms.add(atom)
break
if discard_atoms:
for atom in sorted(discard_atoms):
if pretend:
action_desc = "Would remove"
else:
action_desc = "Removing"
if atom.startswith(SETPREFIX):
filename = "world_sets"
else:
filename = "world"
writemsg_stdout(
">>> %s %s from \"%s\" favorites file...\n" %
(action_desc, colorize("INFORM", _unicode(atom)),
filename), noiselevel=-1)
if '--ask' in opts:
prompt = "Would you like to remove these " + \
"packages from your world favorites?"
uq = UserQuery(opts)
if uq.query(prompt, enter_invalid) == 'No':
return 128 + signal.SIGINT
remaining = set(world_set)
remaining.difference_update(discard_atoms)
if not pretend:
world_set.replace(remaining)
else:
print(">>> No matching atoms found in \"world\" favorites file...")
finally:
if locked:
world_set.unlock()
return os.EX_OK
class _info_pkgs_ver(object):
def __init__(self, ver, repo_suffix, provide_suffix):
self.ver = ver
self.repo_suffix = repo_suffix
self.provide_suffix = provide_suffix
def __lt__(self, other):
return portage.versions.vercmp(self.ver, other.ver) < 0
def toString(self):
"""
This may return unicode if repo_name contains unicode.
Don't use __str__ and str() since unicode triggers compatibility
issues between python 2.x and 3.x.
"""
return self.ver + self.repo_suffix + self.provide_suffix
def action_info(settings, trees, myopts, myfiles):
# See if we can find any packages installed matching the strings
# passed on the command line
mypkgs = []
eroot = settings['EROOT']
vardb = trees[eroot]["vartree"].dbapi
portdb = trees[eroot]['porttree'].dbapi
bindb = trees[eroot]["bintree"].dbapi
repos = portdb.settings.repositories
for x in myfiles:
any_match = False
cp_exists = bool(vardb.match(x.cp))
installed_match = vardb.match(x)
for installed in installed_match:
mypkgs.append((installed, "installed"))
any_match = True
if any_match:
continue
for db, pkg_type in ((portdb, "ebuild"), (bindb, "binary")):
if pkg_type == "binary" and "--usepkg" not in myopts:
continue
# Use match instead of cp_list, to account for old-style virtuals.
if not cp_exists and db.match(x.cp):
cp_exists = True
# Search for masked packages too.
if not cp_exists and hasattr(db, "xmatch") and \
db.xmatch("match-all", x.cp):
cp_exists = True
matches = db.match(x)
matches.reverse()
for match in matches:
if pkg_type == "binary":
if db.bintree.isremote(match):
continue
auxkeys = ["EAPI", "DEFINED_PHASES"]
metadata = dict(zip(auxkeys, db.aux_get(match, auxkeys)))
if metadata["EAPI"] not in ("0", "1", "2", "3") and \
"info" in metadata["DEFINED_PHASES"].split():
mypkgs.append((match, pkg_type))
break
if not cp_exists:
xinfo = '"%s"' % x.unevaluated_atom
# Discard null/ from failed cpv_expand category expansion.
xinfo = xinfo.replace("null/", "")
if settings["ROOT"] != "/":
xinfo = "%s for %s" % (xinfo, eroot)
writemsg("\nemerge: there are no ebuilds to satisfy %s.\n" %
colorize("INFORM", xinfo), noiselevel=-1)
if myopts.get("--misspell-suggestions", "y") != "n":
writemsg("\nemerge: searching for similar names..."
, noiselevel=-1)
search_index = myopts.get("--search-index", "y") != "n"
dbs = [IndexedVardb(vardb) if search_index else vardb]
#if "--usepkgonly" not in myopts:
dbs.append(IndexedPortdb(portdb) if search_index else portdb)
if "--usepkg" in myopts:
dbs.append(bindb)
matches = similar_name_search(dbs, x)
if len(matches) == 1:
writemsg("\nemerge: Maybe you meant " + matches[0] + "?\n"
, noiselevel=-1)
elif len(matches) > 1:
writemsg(
"\nemerge: Maybe you meant any of these: %s?\n" % \
(", ".join(matches),), noiselevel=-1)
else:
# Generally, this would only happen if
# all dbapis are empty.
writemsg(" nothing similar found.\n"
, noiselevel=-1)
return 1
output_buffer = []
append = output_buffer.append
root_config = trees[settings['EROOT']]['root_config']
chost = settings.get("CHOST")
append(getportageversion(settings["PORTDIR"], None,
settings.profile_path, chost,
trees[settings['EROOT']]["vartree"].dbapi))
header_width = 65
header_title = "System Settings"
if myfiles:
append(header_width * "=")
append(header_title.rjust(int(header_width/2 + len(header_title)/2)))
append(header_width * "=")
append("System uname: %s" % (platform.platform(aliased=1),))
vm_info = get_vm_info()
if "ram.total" in vm_info:
line = "%-9s %10d total" % ("KiB Mem:", vm_info["ram.total"] // 1024)
if "ram.free" in vm_info:
line += ",%10d free" % (vm_info["ram.free"] // 1024,)
append(line)
if "swap.total" in vm_info:
line = "%-9s %10d total" % ("KiB Swap:", vm_info["swap.total"] // 1024)
if "swap.free" in vm_info:
line += ",%10d free" % (vm_info["swap.free"] // 1024,)
append(line)
for repo in repos:
last_sync = portage.grabfile(os.path.join(repo.location, "metadata", "timestamp.chk"))
head_commit = None
if last_sync:
append("Timestamp of repository %s: %s" % (repo.name, last_sync[0]))
if repo.sync_type:
sync = portage.sync.module_controller.get_class(repo.sync_type)()
options = { 'repo': repo }
try:
head_commit = sync.retrieve_head(options=options)
except NotImplementedError:
head_commit = (1, False)
if head_commit and head_commit[0] == os.EX_OK:
append("Head commit of repository %s: %s" % (repo.name, head_commit[1]))
# Searching contents for the /bin/sh provider is somewhat
# slow. Therefore, use the basename of the symlink target
# to locate the package. If this fails, then only the
# basename of the symlink target will be displayed. So,
# typical output is something like "sh bash 4.2_p53". Since
# realpath is used to resolve symlinks recursively, this
# approach is also able to handle multiple levels of symlinks
# such as /bin/sh -> bb -> busybox. Note that we do not parse
# the output of "/bin/sh --version" because many shells
# do not have a --version option.
basename = os.path.basename(os.path.realpath(os.path.join(
os.sep, portage.const.EPREFIX, "bin", "sh")))
try:
Atom("null/%s" % basename)
except InvalidAtom:
matches = None
else:
try:
# Try a match against the basename, which should work for
# busybox and most shells.
matches = (trees[trees._running_eroot]["vartree"].dbapi.
match(basename))
except portage.exception.AmbiguousPackageName:
# If the name is ambiguous, then restrict our match
# to the app-shells category.
matches = (trees[trees._running_eroot]["vartree"].dbapi.
match("app-shells/%s" % basename))
if matches:
pkg = matches[-1]
name = pkg.cp
version = pkg.version
# Omit app-shells category from the output.
if name.startswith("app-shells/"):
name = name[len("app-shells/"):]
sh_str = "%s %s" % (name, version)
else:
sh_str = basename
append("sh %s" % sh_str)
ld_names = []
if chost:
ld_names.append(chost + "-ld")
ld_names.append("ld")
for name in ld_names:
try:
proc = subprocess.Popen([name, "--version"],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except OSError:
pass
else:
output = _unicode_decode(proc.communicate()[0]).splitlines()
proc.wait()
if proc.wait() == os.EX_OK and output:
append("ld %s" % (output[0]))
break
try:
proc = subprocess.Popen(["distcc", "--version"],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except OSError:
output = (1, None)
else:
output = _unicode_decode(proc.communicate()[0]).rstrip("\n")
output = (proc.wait(), output)
if output[0] == os.EX_OK:
distcc_str = output[1].split("\n", 1)[0]
if "distcc" in settings.features:
distcc_str += " [enabled]"
else:
distcc_str += " [disabled]"
append(distcc_str)
try:
proc = subprocess.Popen(["ccache", "-V"],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except OSError:
output = (1, None)
else:
output = _unicode_decode(proc.communicate()[0]).rstrip("\n")
output = (proc.wait(), output)
if output[0] == os.EX_OK:
ccache_str = output[1].split("\n", 1)[0]
if "ccache" in settings.features:
ccache_str += " [enabled]"
else:
ccache_str += " [disabled]"
append(ccache_str)
myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
"sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
atoms = []
for x in myvars:
try:
x = Atom(x)
except InvalidAtom:
append("%-20s %s" % (x+":", "[NOT VALID]"))
else:
for atom in expand_new_virt(vardb, x):
if not atom.blocker:
atoms.append((x, atom))
myvars = sorted(set(atoms))
cp_map = {}
cp_max_len = 0
for orig_atom, x in myvars:
pkg_matches = vardb.match(x)
versions = []
for cpv in pkg_matches:
matched_cp = portage.versions.cpv_getkey(cpv)
ver = portage.versions.cpv_getversion(cpv)
ver_map = cp_map.setdefault(matched_cp, {})
prev_match = ver_map.get(ver)
if prev_match is not None:
if prev_match.provide_suffix:
# prefer duplicate matches that include
# additional virtual provider info
continue
if len(matched_cp) > cp_max_len:
cp_max_len = len(matched_cp)
repo = vardb.aux_get(cpv, ["repository"])[0]
if repo:
repo_suffix = _repo_separator + repo
else:
repo_suffix = _repo_separator + "<unknown repository>"
if matched_cp == orig_atom.cp:
provide_suffix = ""
else:
provide_suffix = " (%s)" % (orig_atom,)
ver_map[ver] = _info_pkgs_ver(ver, repo_suffix, provide_suffix)
for cp in sorted(cp_map):
versions = sorted(cp_map[cp].values())
versions = ", ".join(ver.toString() for ver in versions)
append("%s %s" % \
((cp + ":").ljust(cp_max_len + 1), versions))
append("Repositories:\n")
for repo in repos:
append(repo.info_string())
installed_sets = sorted(s for s in
root_config.sets['selected'].getNonAtoms() if s.startswith(SETPREFIX))
if installed_sets:
sets_line = "Installed sets: "
sets_line += ", ".join(installed_sets)
append(sets_line)
if "--verbose" in myopts:
myvars = list(settings)
else:
myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
'PORTAGE_BUNZIP2_COMMAND',
'PORTAGE_BZIP2_COMMAND',
'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
'ACCEPT_KEYWORDS', 'ACCEPT_LICENSE', 'FEATURES',
'EMERGE_DEFAULT_OPTS']
myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
myvars_ignore_defaults = {
'PORTAGE_BZIP2_COMMAND' : 'bzip2',
}
skipped_vars = ['PORTAGE_REPOSITORIES']
# Deprecated variables
skipped_vars.extend(('PORTDIR', 'PORTDIR_OVERLAY', 'SYNC'))
myvars = set(myvars)
myvars.difference_update(skipped_vars)
myvars = sorted(myvars)
use_expand = settings.get('USE_EXPAND', '').split()
use_expand.sort()
unset_vars = []
for k in myvars:
v = settings.get(k)
if v is not None:
if k != "USE":
default = myvars_ignore_defaults.get(k)
if default is not None and \
default == v:
continue
append('%s="%s"' % (k, v))
else:
use = set(v.split())
for varname in use_expand:
flag_prefix = varname.lower() + "_"
for f in list(use):
if f.startswith(flag_prefix):
use.remove(f)
use = list(use)
use.sort()
use = ['USE="%s"' % " ".join(use)]
for varname in use_expand:
myval = settings.get(varname)
if myval:
use.append('%s="%s"' % (varname, myval))
append(" ".join(use))
else:
unset_vars.append(k)
if unset_vars:
append("Unset: "+", ".join(unset_vars))
append("")
append("")
writemsg_stdout("\n".join(output_buffer),
noiselevel=-1)
del output_buffer[:]
# If some packages were found...
if mypkgs:
# Get our global settings (we only print stuff if it varies from
# the current config)
mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
auxkeys = mydesiredvars + list(vardb._aux_cache_keys)
auxkeys.append('DEFINED_PHASES')
pkgsettings = portage.config(clone=settings)
# Loop through each package
# Only print settings if they differ from global settings
header_title = "Package Settings"
append(header_width * "=")
append(header_title.rjust(int(header_width/2 + len(header_title)/2)))
append(header_width * "=")
append("")
writemsg_stdout("\n".join(output_buffer),
noiselevel=-1)
del output_buffer[:]
out = portage.output.EOutput()
for mypkg in mypkgs:
cpv = mypkg[0]
pkg_type = mypkg[1]
# Get all package specific variables
if pkg_type == "installed":
metadata = dict(zip(auxkeys, vardb.aux_get(cpv, auxkeys)))
elif pkg_type == "ebuild":
metadata = dict(zip(auxkeys, portdb.aux_get(cpv, auxkeys)))
elif pkg_type == "binary":
metadata = dict(zip(auxkeys, bindb.aux_get(cpv, auxkeys)))
pkg = Package(built=(pkg_type!="ebuild"), cpv=cpv,
installed=(pkg_type=="installed"), metadata=zip(Package.metadata_keys,
(metadata.get(x, '') for x in Package.metadata_keys)),
root_config=root_config, type_name=pkg_type)
if pkg_type == "installed":
append("\n%s was built with the following:" % \
colorize("INFORM", str(pkg.cpv + _repo_separator + pkg.repo)))
elif pkg_type == "ebuild":
append("\n%s would be built with the following:" % \
colorize("INFORM", str(pkg.cpv + _repo_separator + pkg.repo)))
elif pkg_type == "binary":
append("\n%s (non-installed binary) was built with the following:" % \
colorize("INFORM", str(pkg.cpv + _repo_separator + pkg.repo)))
append('%s' % pkg_use_display(pkg, myopts))
if pkg_type == "installed":
for myvar in mydesiredvars:
if metadata[myvar].split() != settings.get(myvar, '').split():
append("%s=\"%s\"" % (myvar, metadata[myvar]))
append("")
append("")
writemsg_stdout("\n".join(output_buffer),
noiselevel=-1)
del output_buffer[:]
if metadata['DEFINED_PHASES']:
if 'info' not in metadata['DEFINED_PHASES'].split():
continue
writemsg_stdout(">>> Attempting to run pkg_info() for '%s'\n"
% pkg.cpv, noiselevel=-1)
if pkg_type == "installed":
ebuildpath = vardb.findname(pkg.cpv)
elif pkg_type == "ebuild":
ebuildpath = portdb.findname(pkg.cpv, myrepo=pkg.repo)
elif pkg_type == "binary":
tbz2_file = bindb.bintree.getname(pkg.cpv)
ebuild_file_name = pkg.cpv.split("/")[1] + ".ebuild"
ebuild_file_contents = portage.xpak.tbz2(tbz2_file).getfile(ebuild_file_name)
tmpdir = tempfile.mkdtemp()
ebuildpath = os.path.join(tmpdir, ebuild_file_name)
file = open(ebuildpath, 'w')
file.write(ebuild_file_contents)
file.close()
if not ebuildpath or not os.path.exists(ebuildpath):
out.ewarn("No ebuild found for '%s'" % pkg.cpv)
continue
if pkg_type == "installed":
portage.doebuild(ebuildpath, "info", settings=pkgsettings,
debug=(settings.get("PORTAGE_DEBUG", "") == 1),
mydbapi=trees[settings['EROOT']]["vartree"].dbapi,
tree="vartree")
elif pkg_type == "ebuild":
portage.doebuild(ebuildpath, "info", settings=pkgsettings,
debug=(settings.get("PORTAGE_DEBUG", "") == 1),
mydbapi=trees[settings['EROOT']]['porttree'].dbapi,
tree="porttree")
elif pkg_type == "binary":
portage.doebuild(ebuildpath, "info", settings=pkgsettings,
debug=(settings.get("PORTAGE_DEBUG", "") == 1),
mydbapi=trees[settings['EROOT']]["bintree"].dbapi,
tree="bintree")
shutil.rmtree(tmpdir)
def action_regen(settings, portdb, max_jobs, max_load):
xterm_titles = "notitles" not in settings.features
emergelog(xterm_titles, " === regen")
#regenerate cache entries
sys.stdout.flush()
regen = MetadataRegen(portdb, max_jobs=max_jobs,
max_load=max_load, main=True)
signum = run_main_scheduler(regen)
if signum is not None:
sys.exit(128 + signum)
portage.writemsg_stdout("done!\n")
return regen.returncode
def action_search(root_config, myopts, myfiles, spinner):
if not myfiles:
print("emerge: no search terms provided.")
else:
searchinstance = search(root_config,
spinner, "--searchdesc" in myopts,
"--quiet" not in myopts, "--usepkg" in myopts,
"--usepkgonly" in myopts,
search_index=myopts.get("--search-index", "y") != "n",
search_similarity=myopts.get("--search-similarity"),
fuzzy=myopts.get("--fuzzy-search") != "n",
)
for mysearch in myfiles:
try:
searchinstance.execute(mysearch)
except re.error as comment:
print("\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment ))
sys.exit(1)
searchinstance.output()
def action_sync(emerge_config, trees=DeprecationWarning,
mtimedb=DeprecationWarning, opts=DeprecationWarning,
action=DeprecationWarning):
if not isinstance(emerge_config, _emerge_config):
warnings.warn("_emerge.actions.action_sync() now expects "
"an _emerge_config instance as the first parameter",
DeprecationWarning, stacklevel=2)
emerge_config = load_emerge_config(
action=action, args=[], trees=trees, opts=opts)
syncer = SyncRepos(emerge_config)
return_messages = "--quiet" not in emerge_config.opts
options = {'return-messages' : return_messages}
if emerge_config.args:
options['repo'] = emerge_config.args
success, msgs = syncer.repo(options=options)
else:
success, msgs = syncer.auto_sync(options=options)
if return_messages:
print_results(msgs)
return os.EX_OK if success else 1
def action_uninstall(settings, trees, ldpath_mtimes,
opts, action, files, spinner):
# For backward compat, some actions do not require leading '='.
ignore_missing_eq = action in ('clean', 'rage-clean', 'unmerge')
root = settings['ROOT']
eroot = settings['EROOT']
vardb = trees[settings['EROOT']]['vartree'].dbapi
valid_atoms = []
lookup_owners = []
# Ensure atoms are valid before calling unmerge().
# For backward compat, leading '=' is not required.
for x in files:
if is_valid_package_atom(x, allow_repo=True) or \
(ignore_missing_eq and is_valid_package_atom('=' + x)):
try:
atom = dep_expand(x, mydb=vardb, settings=settings)
except portage.exception.AmbiguousPackageName as e:
msg = "The short ebuild name \"" + x + \
"\" is ambiguous. Please specify " + \
"one of the following " + \
"fully-qualified ebuild names instead:"
for line in textwrap.wrap(msg, 70):
writemsg_level("!!! %s\n" % (line,),
level=logging.ERROR, noiselevel=-1)
for i in e.args[0]:
writemsg_level(" %s\n" % colorize("INFORM", i),
level=logging.ERROR, noiselevel=-1)
writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
return 1
else:
if atom.use and atom.use.conditional:
writemsg_level(
("\n\n!!! '%s' contains a conditional " + \
"which is not allowed.\n") % (x,),
level=logging.ERROR, noiselevel=-1)
writemsg_level(
"!!! Please check ebuild(5) for full details.\n",
level=logging.ERROR)
return 1
valid_atoms.append(atom)
elif x.startswith(os.sep):
if not x.startswith(eroot):
writemsg_level(("!!! '%s' does not start with" + \
" $EROOT.\n") % x, level=logging.ERROR, noiselevel=-1)
return 1
# Queue these up since it's most efficient to handle
# multiple files in a single iter_owners() call.
lookup_owners.append(x)
elif x.startswith(SETPREFIX) and action == "deselect":
valid_atoms.append(x)
elif "*" in x:
try:
ext_atom = Atom(x, allow_repo=True, allow_wildcard=True)
except InvalidAtom:
msg = []
msg.append("'%s' is not a valid package atom." % (x,))
msg.append("Please check ebuild(5) for full details.")
writemsg_level("".join("!!! %s\n" % line for line in msg),
level=logging.ERROR, noiselevel=-1)
return 1
for cpv in vardb.cpv_all():
if portage.match_from_list(ext_atom, [cpv]):
require_metadata = False
atom = portage.cpv_getkey(cpv)
if ext_atom.operator == '=*':
atom = "=" + atom + "-" + \
portage.versions.cpv_getversion(cpv)
if ext_atom.slot:
atom += _slot_separator + ext_atom.slot
require_metadata = True
if ext_atom.repo:
atom += _repo_separator + ext_atom.repo
require_metadata = True
atom = Atom(atom, allow_repo=True)
if require_metadata:
try:
cpv = vardb._pkg_str(cpv, ext_atom.repo)
except (KeyError, InvalidData):
continue
if not portage.match_from_list(atom, [cpv]):
continue
valid_atoms.append(atom)
else:
msg = []
msg.append("'%s' is not a valid package atom." % (x,))
msg.append("Please check ebuild(5) for full details.")
writemsg_level("".join("!!! %s\n" % line for line in msg),
level=logging.ERROR, noiselevel=-1)
return 1
if lookup_owners:
relative_paths = []
search_for_multiple = False
if len(lookup_owners) > 1:
search_for_multiple = True
for x in lookup_owners:
if not search_for_multiple and os.path.isdir(x):
search_for_multiple = True
relative_paths.append(x[len(root)-1:])
owners = set()
for pkg, relative_path in \
vardb._owners.iter_owners(relative_paths):
owners.add(pkg.mycpv)
if not search_for_multiple:
break
if owners:
for cpv in owners:
pkg = vardb._pkg_str(cpv, None)
atom = '%s:%s' % (pkg.cp, pkg.slot)
valid_atoms.append(portage.dep.Atom(atom))
else:
writemsg_level(("!!! '%s' is not claimed " + \
"by any package.\n") % lookup_owners[0],
level=logging.WARNING, noiselevel=-1)
if files and not valid_atoms:
return 1
if action == 'unmerge' and \
'--quiet' not in opts and \
'--quiet-unmerge-warn' not in opts:
msg = "This action can remove important packages! " + \
"In order to be safer, use " + \
"`emerge -pv --depclean <atom>` to check for " + \
"reverse dependencies before removing packages."
out = portage.output.EOutput()
for line in textwrap.wrap(msg, 72):
out.ewarn(line)
if action == 'deselect':
return action_deselect(settings, trees, opts, valid_atoms)
# Use the same logic as the Scheduler class to trigger redirection
# of ebuild pkg_prerm/postrm phase output to logs as appropriate
# for options such as --jobs, --quiet and --quiet-build.
max_jobs = opts.get("--jobs", 1)
background = (max_jobs is True or max_jobs > 1 or
"--quiet" in opts or opts.get("--quiet-build") == "y")
sched_iface = SchedulerInterface(global_event_loop(),
is_background=lambda: background)
if background:
settings.unlock()
settings["PORTAGE_BACKGROUND"] = "1"
settings.backup_changes("PORTAGE_BACKGROUND")
settings.lock()
if action in ('clean', 'rage-clean', 'unmerge') or \
(action == 'prune' and "--nodeps" in opts):
# When given a list of atoms, unmerge them in the order given.
ordered = action in ('rage-clean', 'unmerge')
rval = unmerge(trees[settings['EROOT']]['root_config'], opts, action,
valid_atoms, ldpath_mtimes, ordered=ordered,
scheduler=sched_iface)
else:
rval = action_depclean(settings, trees, ldpath_mtimes,
opts, action, valid_atoms, spinner,
scheduler=sched_iface)
return rval
def adjust_configs(myopts, trees):
for myroot in trees:
mysettings = trees[myroot]["vartree"].settings
mysettings.unlock()
adjust_config(myopts, mysettings)
mysettings.lock()
def adjust_config(myopts, settings):
"""Make emerge specific adjustments to the config."""
# Kill noauto as it will break merges otherwise.
if "noauto" in settings.features:
settings.features.remove('noauto')
fail_clean = myopts.get('--fail-clean')
if fail_clean is not None:
if fail_clean is True and \
'fail-clean' not in settings.features:
settings.features.add('fail-clean')
elif fail_clean == 'n' and \
'fail-clean' in settings.features:
settings.features.remove('fail-clean')
CLEAN_DELAY = 5
try:
CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
except ValueError as e:
portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
settings["CLEAN_DELAY"], noiselevel=-1)
settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
settings.backup_changes("CLEAN_DELAY")
EMERGE_WARNING_DELAY = 10
try:
EMERGE_WARNING_DELAY = int(settings.get(
"EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
except ValueError as e:
portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
settings.backup_changes("EMERGE_WARNING_DELAY")
buildpkg = myopts.get("--buildpkg")
if buildpkg is True:
settings.features.add("buildpkg")
elif buildpkg == 'n':
settings.features.discard("buildpkg")
if "--quiet" in myopts:
settings["PORTAGE_QUIET"]="1"
settings.backup_changes("PORTAGE_QUIET")
if "--verbose" in myopts:
settings["PORTAGE_VERBOSE"] = "1"
settings.backup_changes("PORTAGE_VERBOSE")
# Set so that configs will be merged regardless of remembered status
if ("--noconfmem" in myopts):
settings["NOCONFMEM"]="1"
settings.backup_changes("NOCONFMEM")
# Set various debug markers... They should be merged somehow.
PORTAGE_DEBUG = 0
try:
PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
if PORTAGE_DEBUG not in (0, 1):
portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
PORTAGE_DEBUG, noiselevel=-1)
portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
noiselevel=-1)
PORTAGE_DEBUG = 0
except ValueError as e:
portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
settings["PORTAGE_DEBUG"], noiselevel=-1)
del e
if "--debug" in myopts:
PORTAGE_DEBUG = 1
settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
settings.backup_changes("PORTAGE_DEBUG")
if settings.get("NOCOLOR") not in ("yes","true"):
portage.output.havecolor = 1
# The explicit --color < y | n > option overrides the NOCOLOR environment
# variable and stdout auto-detection.
if "--color" in myopts:
if "y" == myopts["--color"]:
portage.output.havecolor = 1
settings["NOCOLOR"] = "false"
else:
portage.output.havecolor = 0
settings["NOCOLOR"] = "true"
settings.backup_changes("NOCOLOR")
elif settings.get('TERM') == 'dumb' or \
not sys.stdout.isatty():
portage.output.havecolor = 0
settings["NOCOLOR"] = "true"
settings.backup_changes("NOCOLOR")
if "--pkg-format" in myopts:
settings["PORTAGE_BINPKG_FORMAT"] = myopts["--pkg-format"]
settings.backup_changes("PORTAGE_BINPKG_FORMAT")
def display_missing_pkg_set(root_config, set_name):
msg = []
msg.append(("emerge: There are no sets to satisfy '%s'. " + \
"The following sets exist:") % \
colorize("INFORM", set_name))
msg.append("")
for s in sorted(root_config.sets):
msg.append(" %s" % s)
msg.append("")
writemsg_level("".join("%s\n" % l for l in msg),
level=logging.ERROR, noiselevel=-1)
def relative_profile_path(portdir, abs_profile):
realpath = os.path.realpath(abs_profile)
basepath = os.path.realpath(os.path.join(portdir, "profiles"))
if realpath.startswith(basepath):
profilever = realpath[1 + len(basepath):]
else:
profilever = None
return profilever
def getportageversion(portdir, _unused, profile, chost, vardb):
pythonver = 'python %d.%d.%d-%s-%d' % sys.version_info[:]
profilever = None
repositories = vardb.settings.repositories
if profile:
profilever = relative_profile_path(portdir, profile)
if profilever is None:
try:
for parent in portage.grabfile(
os.path.join(profile, 'parent')):
profilever = relative_profile_path(portdir,
os.path.join(profile, parent))
if profilever is not None:
break
colon = parent.find(":")
if colon != -1:
p_repo_name = parent[:colon]
try:
p_repo_loc = \
repositories.get_location_for_name(p_repo_name)
except KeyError:
pass
else:
profilever = relative_profile_path(p_repo_loc,
os.path.join(p_repo_loc, 'profiles',
parent[colon+1:]))
if profilever is not None:
break
except portage.exception.PortageException:
pass
if profilever is None:
try:
profilever = "!" + os.readlink(profile)
except (OSError):
pass
if profilever is None:
profilever = "unavailable"
libcver = []
libclist = set()
for atom in expand_new_virt(vardb, portage.const.LIBC_PACKAGE_ATOM):
if not atom.blocker:
libclist.update(vardb.match(atom))
if libclist:
for cpv in sorted(libclist):
libc_split = portage.catpkgsplit(cpv)[1:]
if libc_split[-1] == "r0":
libc_split = libc_split[:-1]
libcver.append("-".join(libc_split))
else:
libcver = ["unavailable"]
gccver = getgccversion(chost)
unameout=platform.release()+" "+platform.machine()
return "Portage %s (%s, %s, %s, %s, %s)" % \
(portage.VERSION, pythonver, profilever, gccver, ",".join(libcver), unameout)
class _emerge_config(SlotObject):
__slots__ = ('action', 'args', 'opts',
'running_config', 'target_config', 'trees')
# Support unpack as tuple, for load_emerge_config backward compatibility.
def __iter__(self):
yield self.target_config.settings
yield self.trees
yield self.target_config.mtimedb
def __getitem__(self, index):
return list(self)[index]
def __len__(self):
return 3
def load_emerge_config(emerge_config=None, **kargs):
if emerge_config is None:
emerge_config = _emerge_config(**kargs)
kwargs = {}
for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT"),
("eprefix", "EPREFIX")):
v = os.environ.get(envvar, None)
if v and v.strip():
kwargs[k] = v
emerge_config.trees = portage.create_trees(trees=emerge_config.trees,
**kwargs)
for root_trees in emerge_config.trees.values():
settings = root_trees["vartree"].settings
settings._init_dirs()
setconfig = load_default_config(settings, root_trees)
root_config = RootConfig(settings, root_trees, setconfig)
if "root_config" in root_trees:
# Propagate changes to the existing instance,
# which may be referenced by a depgraph.
root_trees["root_config"].update(root_config)
else:
root_trees["root_config"] = root_config
target_eroot = emerge_config.trees._target_eroot
emerge_config.target_config = \
emerge_config.trees[target_eroot]['root_config']
emerge_config.target_config.mtimedb = portage.MtimeDB(
os.path.join(target_eroot, portage.CACHE_PATH, "mtimedb"))
emerge_config.running_config = emerge_config.trees[
emerge_config.trees._running_eroot]['root_config']
QueryCommand._db = emerge_config.trees
return emerge_config
def getgccversion(chost=None):
"""
rtype: C{str}
return: the current in-use gcc version
"""
gcc_ver_command = ['gcc', '-dumpversion']
gcc_ver_prefix = 'gcc-'
gcc_not_found_error = red(
"!!! No gcc found. You probably need to 'source /etc/profile'\n" +
"!!! to update the environment of this terminal and possibly\n" +
"!!! other terminals also.\n"
)
if chost:
try:
proc = subprocess.Popen(["gcc-config", "-c"],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except OSError:
myoutput = None
mystatus = 1
else:
myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
mystatus = proc.wait()
if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
try:
proc = subprocess.Popen(
[chost + "-" + gcc_ver_command[0]] + gcc_ver_command[1:],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except OSError:
myoutput = None
mystatus = 1
else:
myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
mystatus = proc.wait()
if mystatus == os.EX_OK:
return gcc_ver_prefix + myoutput
try:
proc = subprocess.Popen(gcc_ver_command,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except OSError:
myoutput = None
mystatus = 1
else:
myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
mystatus = proc.wait()
if mystatus == os.EX_OK:
return gcc_ver_prefix + myoutput
portage.writemsg(gcc_not_found_error, noiselevel=-1)
return "[unavailable]"
# Warn about features that may confuse users and
# lead them to report invalid bugs.
_emerge_features_warn = frozenset(['keeptemp', 'keepwork'])
def validate_ebuild_environment(trees):
features_warn = set()
for myroot in trees:
settings = trees[myroot]["vartree"].settings
settings.validate()
features_warn.update(
_emerge_features_warn.intersection(settings.features))
if features_warn:
msg = "WARNING: The FEATURES variable contains one " + \
"or more values that should be disabled under " + \
"normal circumstances: %s" % " ".join(features_warn)
out = portage.output.EOutput()
for line in textwrap.wrap(msg, 65):
out.ewarn(line)
check_locale()
def check_procfs():
procfs_path = '/proc'
if platform.system() not in ("Linux",) or \
os.path.ismount(procfs_path):
return os.EX_OK
msg = "It seems that %s is not mounted. You have been warned." % procfs_path
writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
level=logging.ERROR, noiselevel=-1)
return 1
def config_protect_check(trees):
for root, root_trees in trees.items():
settings = root_trees["root_config"].settings
if not settings.get("CONFIG_PROTECT"):
msg = "!!! CONFIG_PROTECT is empty"
if settings["ROOT"] != "/":
msg += " for '%s'" % root
msg += "\n"
writemsg_level(msg, level=logging.WARN, noiselevel=-1)
def apply_priorities(settings):
ionice(settings)
nice(settings)
def nice(settings):
try:
os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
except (OSError, ValueError) as e:
out = portage.output.EOutput()
out.eerror("Failed to change nice value to '%s'" % \
settings.get("PORTAGE_NICENESS", "0"))
out.eerror("%s\n" % str(e))
def ionice(settings):
ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
if ionice_cmd:
ionice_cmd = portage.util.shlex_split(ionice_cmd)
if not ionice_cmd:
return
variables = {"PID" : str(os.getpid())}
cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
try:
rval = portage.process.spawn(cmd, env=os.environ)
except portage.exception.CommandNotFound:
# The OS kernel probably doesn't support ionice,
# so return silently.
return
if rval != os.EX_OK:
out = portage.output.EOutput()
out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
def setconfig_fallback(root_config):
setconfig = root_config.setconfig
setconfig._create_default_config()
setconfig._parse(update=True)
root_config.sets = setconfig.getSets()
def get_missing_sets(root_config):
# emerge requires existence of "world", "selected", and "system"
missing_sets = []
for s in ("selected", "system", "world",):
if s not in root_config.sets:
missing_sets.append(s)
return missing_sets
def missing_sets_warning(root_config, missing_sets):
if len(missing_sets) > 2:
missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
missing_sets_str += ', and "%s"' % missing_sets[-1]
elif len(missing_sets) == 2:
missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
else:
missing_sets_str = '"%s"' % missing_sets[-1]
msg = ["emerge: incomplete set configuration, " + \
"missing set(s): %s" % missing_sets_str]
if root_config.sets:
msg.append(" sets defined: %s" % ", ".join(root_config.sets))
global_config_path = portage.const.GLOBAL_CONFIG_PATH
if portage.const.EPREFIX:
global_config_path = os.path.join(portage.const.EPREFIX,
portage.const.GLOBAL_CONFIG_PATH.lstrip(os.sep))
msg.append(" This usually means that '%s'" % \
(os.path.join(global_config_path, "sets/portage.conf"),))
msg.append(" is missing or corrupt.")
msg.append(" Falling back to default world and system set configuration!!!")
for line in msg:
writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
def ensure_required_sets(trees):
warning_shown = False
for root_trees in trees.values():
missing_sets = get_missing_sets(root_trees["root_config"])
if missing_sets and not warning_shown:
warning_shown = True
missing_sets_warning(root_trees["root_config"], missing_sets)
if missing_sets:
setconfig_fallback(root_trees["root_config"])
def expand_set_arguments(myfiles, myaction, root_config):
retval = os.EX_OK
setconfig = root_config.setconfig
sets = setconfig.getSets()
# In order to know exactly which atoms/sets should be added to the
# world file, the depgraph performs set expansion later. It will get
# confused about where the atoms came from if it's not allowed to
# expand them itself.
do_not_expand = myaction is None
newargs = []
for a in myfiles:
if a in ("system", "world"):
newargs.append(SETPREFIX+a)
else:
newargs.append(a)
myfiles = newargs
del newargs
newargs = []
# separators for set arguments
ARG_START = "{"
ARG_END = "}"
for i in range(0, len(myfiles)):
if myfiles[i].startswith(SETPREFIX):
start = 0
end = 0
x = myfiles[i][len(SETPREFIX):]
newset = ""
while x:
start = x.find(ARG_START)
end = x.find(ARG_END)
if start > 0 and start < end:
namepart = x[:start]
argpart = x[start+1:end]
# TODO: implement proper quoting
args = argpart.split(",")
options = {}
for a in args:
if "=" in a:
k, v = a.split("=", 1)
options[k] = v
else:
options[a] = "True"
setconfig.update(namepart, options)
newset += (x[:start-len(namepart)]+namepart)
x = x[end+len(ARG_END):]
else:
newset += x
x = ""
myfiles[i] = SETPREFIX+newset
sets = setconfig.getSets()
# display errors that occurred while loading the SetConfig instance
for e in setconfig.errors:
print(colorize("BAD", "Error during set creation: %s" % e))
unmerge_actions = ("unmerge", "prune", "clean", "depclean", "rage-clean")
for a in myfiles:
if a.startswith(SETPREFIX):
s = a[len(SETPREFIX):]
if s not in sets:
display_missing_pkg_set(root_config, s)
return (None, 1)
if s == "installed":
msg = ("The @installed set is not recommended when "
"updating packages because it will often "
"introduce unsolved blocker conflicts. Please "
"refer to bug #387059 for details.")
out = portage.output.EOutput()
for line in textwrap.wrap(msg, 57):
out.ewarn(line)
setconfig.active.append(s)
if do_not_expand:
# Loading sets can be slow, so skip it here, in order
# to allow the depgraph to indicate progress with the
# spinner while sets are loading (bug #461412).
newargs.append(a)
continue
try:
set_atoms = setconfig.getSetAtoms(s)
except portage.exception.PackageSetNotFound as e:
writemsg_level(("emerge: the given set '%s' " + \
"contains a non-existent set named '%s'.\n") % \
(s, e), level=logging.ERROR, noiselevel=-1)
if s in ('world', 'selected') and \
SETPREFIX + e.value in sets['selected']:
writemsg_level(("Use `emerge --deselect %s%s` to "
"remove this set from world_sets.\n") %
(SETPREFIX, e,), level=logging.ERROR,
noiselevel=-1)
return (None, 1)
if myaction in unmerge_actions and \
not sets[s].supportsOperation("unmerge"):
writemsg_level("emerge: the given set '%s' does " % s + \
"not support unmerge operations\n",
level=logging.ERROR, noiselevel=-1)
retval = 1
elif not set_atoms:
writemsg_level("emerge: '%s' is an empty set\n" % s,
level=logging.INFO, noiselevel=-1)
else:
newargs.extend(set_atoms)
for error_msg in sets[s].errors:
writemsg_level("%s\n" % (error_msg,),
level=logging.ERROR, noiselevel=-1)
else:
newargs.append(a)
return (newargs, retval)
def repo_name_check(trees):
missing_repo_names = set()
for root_trees in trees.values():
porttree = root_trees.get("porttree")
if porttree:
portdb = porttree.dbapi
missing_repo_names.update(portdb.getMissingRepoNames())
# Skip warnings about missing repo_name entries for
# /usr/local/portage (see bug #248603).
try:
missing_repo_names.remove('/usr/local/portage')
except KeyError:
pass
if missing_repo_names:
msg = []
msg.append("WARNING: One or more repositories " + \
"have missing repo_name entries:")
msg.append("")
for p in missing_repo_names:
msg.append("\t%s/profiles/repo_name" % (p,))
msg.append("")
msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
"should be a plain text file containing a unique " + \
"name for the repository on the first line.", 70))
msg.append("\n")
writemsg_level("".join("%s\n" % l for l in msg),
level=logging.WARNING, noiselevel=-1)
return bool(missing_repo_names)
def repo_name_duplicate_check(trees):
ignored_repos = {}
for root, root_trees in trees.items():
if 'porttree' in root_trees:
portdb = root_trees['porttree'].dbapi
if portdb.settings.get('PORTAGE_REPO_DUPLICATE_WARN') != '0':
for repo_name, paths in portdb.getIgnoredRepos():
k = (root, repo_name, portdb.getRepositoryPath(repo_name))
ignored_repos.setdefault(k, []).extend(paths)
if ignored_repos:
msg = []
msg.append('WARNING: One or more repositories ' + \
'have been ignored due to duplicate')
msg.append(' profiles/repo_name entries:')
msg.append('')
for k in sorted(ignored_repos):
msg.append(' %s overrides' % ", ".join(k))
for path in ignored_repos[k]:
msg.append(' %s' % (path,))
msg.append('')
msg.extend(' ' + x for x in textwrap.wrap(
"All profiles/repo_name entries must be unique in order " + \
"to avoid having duplicates ignored. " + \
"Set PORTAGE_REPO_DUPLICATE_WARN=\"0\" in " + \
"/etc/portage/make.conf if you would like to disable this warning."))
msg.append("\n")
writemsg_level(''.join('%s\n' % l for l in msg),
level=logging.WARNING, noiselevel=-1)
return bool(ignored_repos)
def run_action(emerge_config):
# skip global updates prior to sync, since it's called after sync
if emerge_config.action not in ('help', 'info', 'sync', 'version') and \
emerge_config.opts.get('--package-moves') != 'n' and \
_global_updates(emerge_config.trees,
emerge_config.target_config.mtimedb["updates"],
quiet=("--quiet" in emerge_config.opts)):
emerge_config.target_config.mtimedb.commit()
# Reload the whole config from scratch.
load_emerge_config(emerge_config=emerge_config)
xterm_titles = "notitles" not in \
emerge_config.target_config.settings.features
if xterm_titles:
xtermTitle("emerge")
if "--digest" in emerge_config.opts:
os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
# Reload the whole config from scratch so that the portdbapi internal
# config is updated with new FEATURES.
load_emerge_config(emerge_config=emerge_config)
# NOTE: adjust_configs() can map options to FEATURES, so any relevant
# options adjustments should be made prior to calling adjust_configs().
if "--buildpkgonly" in emerge_config.opts:
emerge_config.opts["--buildpkg"] = True
if "getbinpkg" in emerge_config.target_config.settings.features:
emerge_config.opts["--getbinpkg"] = True
if "--getbinpkgonly" in emerge_config.opts:
emerge_config.opts["--getbinpkg"] = True
if "--getbinpkgonly" in emerge_config.opts:
emerge_config.opts["--usepkgonly"] = True
if "--getbinpkg" in emerge_config.opts:
emerge_config.opts["--usepkg"] = True
if "--usepkgonly" in emerge_config.opts:
emerge_config.opts["--usepkg"] = True
if "--buildpkgonly" in emerge_config.opts:
# --buildpkgonly will not merge anything, so
# it cancels all binary package options.
for opt in ("--getbinpkg", "--getbinpkgonly",
"--usepkg", "--usepkgonly"):
emerge_config.opts.pop(opt, None)
adjust_configs(emerge_config.opts, emerge_config.trees)
apply_priorities(emerge_config.target_config.settings)
if ("--autounmask-continue" in emerge_config.opts and
emerge_config.opts.get("--autounmask") == "n"):
writemsg_level(
" %s --autounmask-continue has been disabled by --autounmask=n\n" %
warn("*"), level=logging.WARNING, noiselevel=-1)
for fmt in emerge_config.target_config.settings.get("PORTAGE_BINPKG_FORMAT", "").split():
if not fmt in portage.const.SUPPORTED_BINPKG_FORMATS:
if "--pkg-format" in emerge_config.opts:
problematic="--pkg-format"
else:
problematic="PORTAGE_BINPKG_FORMAT"
writemsg_level(("emerge: %s is not set correctly. Format " + \
"'%s' is not supported.\n") % (problematic, fmt),
level=logging.ERROR, noiselevel=-1)
return 1
if emerge_config.action == 'version':
writemsg_stdout(getportageversion(
emerge_config.target_config.settings["PORTDIR"],
None,
emerge_config.target_config.settings.profile_path,
emerge_config.target_config.settings.get("CHOST"),
emerge_config.target_config.trees['vartree'].dbapi) + '\n',
noiselevel=-1)
return 0
elif emerge_config.action == 'help':
emerge_help()
return 0
spinner = stdout_spinner()
if "candy" in emerge_config.target_config.settings.features:
spinner.update = spinner.update_scroll
if "--quiet" not in emerge_config.opts:
portage.deprecated_profile_check(
settings=emerge_config.target_config.settings)
repo_name_check(emerge_config.trees)
repo_name_duplicate_check(emerge_config.trees)
config_protect_check(emerge_config.trees)
check_procfs()
for mytrees in emerge_config.trees.values():
mydb = mytrees["porttree"].dbapi
# Freeze the portdbapi for performance (memoize all xmatch results).
mydb.freeze()
if emerge_config.action in ('search', None) and \
"--usepkg" in emerge_config.opts:
# Populate the bintree with current --getbinpkg setting.
# This needs to happen before expand_set_arguments(), in case
# any sets use the bintree.
try:
mytrees["bintree"].populate(
getbinpkgs="--getbinpkg" in emerge_config.opts)
except ParseError as e:
writemsg("\n\n!!!%s.\nSee make.conf(5) for more info.\n"
% e, noiselevel=-1)
return 1
del mytrees, mydb
for x in emerge_config.args:
if x.endswith((".ebuild", ".tbz2")) and \
os.path.exists(os.path.abspath(x)):
print(colorize("BAD", "\n*** emerging by path is broken "
"and may not always work!!!\n"))
break
if emerge_config.action == "list-sets":
writemsg_stdout("".join("%s\n" % s for s in
sorted(emerge_config.target_config.sets)))
return os.EX_OK
elif emerge_config.action == "check-news":
news_counts = count_unread_news(
emerge_config.target_config.trees["porttree"].dbapi,
emerge_config.target_config.trees["vartree"].dbapi)
if any(news_counts.values()):
display_news_notifications(news_counts)
elif "--quiet" not in emerge_config.opts:
print("", colorize("GOOD", "*"), "No news items were found.")
return os.EX_OK
ensure_required_sets(emerge_config.trees)
if emerge_config.action is None and \
"--resume" in emerge_config.opts and emerge_config.args:
writemsg("emerge: unexpected argument(s) for --resume: %s\n" %
" ".join(emerge_config.args), noiselevel=-1)
return 1
# only expand sets for actions taking package arguments
oldargs = emerge_config.args[:]
if emerge_config.action in ("clean", "config", "depclean",
"info", "prune", "unmerge", "rage-clean", None):
newargs, retval = expand_set_arguments(
emerge_config.args, emerge_config.action,
emerge_config.target_config)
if retval != os.EX_OK:
return retval
# Need to handle empty sets specially, otherwise emerge will react
# with the help message for empty argument lists
if oldargs and not newargs:
print("emerge: no targets left after set expansion")
return 0
emerge_config.args = newargs
if "--tree" in emerge_config.opts and \
"--columns" in emerge_config.opts:
print("emerge: can't specify both of \"--tree\" and \"--columns\".")
return 1
if '--emptytree' in emerge_config.opts and \
'--noreplace' in emerge_config.opts:
writemsg_level("emerge: can't specify both of " + \
"\"--emptytree\" and \"--noreplace\".\n",
level=logging.ERROR, noiselevel=-1)
return 1
if ("--quiet" in emerge_config.opts):
spinner.update = spinner.update_quiet
portage.util.noiselimit = -1
if "--fetch-all-uri" in emerge_config.opts:
emerge_config.opts["--fetchonly"] = True
if "--skipfirst" in emerge_config.opts and \
"--resume" not in emerge_config.opts:
emerge_config.opts["--resume"] = True
# Allow -p to remove --ask
if "--pretend" in emerge_config.opts:
emerge_config.opts.pop("--ask", None)
# forbid --ask when not in a terminal
# note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
if ("--ask" in emerge_config.opts) and (not sys.stdin.isatty()):
portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
noiselevel=-1)
return 1
if emerge_config.target_config.settings.get("PORTAGE_DEBUG", "") == "1":
spinner.update = spinner.update_quiet
portage.util.noiselimit = 0
if "python-trace" in emerge_config.target_config.settings.features:
portage.debug.set_trace(True)
if not ("--quiet" in emerge_config.opts):
if '--nospinner' in emerge_config.opts or \
emerge_config.target_config.settings.get('TERM') == 'dumb' or \
not sys.stdout.isatty():
spinner.update = spinner.update_basic
if "--debug" in emerge_config.opts:
print("myaction", emerge_config.action)
print("myopts", emerge_config.opts)
if not emerge_config.action and not emerge_config.args and \
"--resume" not in emerge_config.opts:
emerge_help()
return 1
pretend = "--pretend" in emerge_config.opts
fetchonly = "--fetchonly" in emerge_config.opts or \
"--fetch-all-uri" in emerge_config.opts
buildpkgonly = "--buildpkgonly" in emerge_config.opts
# check if root user is the current user for the actions where emerge needs this
if portage.data.secpass < 2:
# We've already allowed "--version" and "--help" above.
if "--pretend" not in emerge_config.opts and \
emerge_config.action not in ("search", "info"):
need_superuser = emerge_config.action in ('clean', 'depclean',
'deselect', 'prune', 'unmerge', "rage-clean") or not \
(fetchonly or \
(buildpkgonly and portage.data.secpass >= 1) or \
emerge_config.action in ("metadata", "regen", "sync"))
if portage.data.secpass < 1 or \
need_superuser:
if need_superuser:
access_desc = "superuser"
else:
access_desc = "portage group"
# Always show portage_group_warning() when only portage group
# access is required but the user is not in the portage group.
if "--ask" in emerge_config.opts:
writemsg_stdout("This action requires %s access...\n" % \
(access_desc,), noiselevel=-1)
if portage.data.secpass < 1 and not need_superuser:
portage.data.portage_group_warning()
uq = UserQuery(emerge_config.opts)
if uq.query("Would you like to add --pretend to options?",
"--ask-enter-invalid" in emerge_config.opts) == "No":
return 128 + signal.SIGINT
emerge_config.opts["--pretend"] = True
emerge_config.opts.pop("--ask")
else:
sys.stderr.write(("emerge: %s access is required\n") \
% access_desc)
if portage.data.secpass < 1 and not need_superuser:
portage.data.portage_group_warning()
return 1
# Disable emergelog for everything except build or unmerge operations.
# This helps minimize parallel emerge.log entries that can confuse log
# parsers like genlop.
disable_emergelog = False
for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
if x in emerge_config.opts:
disable_emergelog = True
break
if disable_emergelog:
pass
elif emerge_config.action in ("search", "info"):
disable_emergelog = True
elif portage.data.secpass < 1:
disable_emergelog = True
import _emerge.emergelog
_emerge.emergelog._disable = disable_emergelog
if not disable_emergelog:
emerge_log_dir = \
emerge_config.target_config.settings.get('EMERGE_LOG_DIR')
if emerge_log_dir:
try:
# At least the parent needs to exist for the lock file.
portage.util.ensure_dirs(emerge_log_dir)
except portage.exception.PortageException as e:
writemsg_level("!!! Error creating directory for " + \
"EMERGE_LOG_DIR='%s':\n!!! %s\n" % \
(emerge_log_dir, e),
noiselevel=-1, level=logging.ERROR)
portage.util.ensure_dirs(_emerge.emergelog._emerge_log_dir)
else:
_emerge.emergelog._emerge_log_dir = emerge_log_dir
else:
_emerge.emergelog._emerge_log_dir = os.path.join(os.sep,
portage.const.EPREFIX.lstrip(os.sep), "var", "log")
portage.util.ensure_dirs(_emerge.emergelog._emerge_log_dir)
if not "--pretend" in emerge_config.opts:
time_fmt = "%b %d, %Y %H:%M:%S"
if sys.hexversion < 0x3000000:
time_fmt = portage._unicode_encode(time_fmt)
time_str = time.strftime(time_fmt, time.localtime(time.time()))
# Avoid potential UnicodeDecodeError in Python 2, since strftime
# returns bytes in Python 2, and %b may contain non-ascii chars.
time_str = _unicode_decode(time_str,
encoding=_encodings['content'], errors='replace')
emergelog(xterm_titles, "Started emerge on: %s" % time_str)
myelogstr=""
if emerge_config.opts:
opt_list = []
for opt, arg in emerge_config.opts.items():
if arg is True:
opt_list.append(opt)
elif isinstance(arg, list):
# arguments like --exclude that use 'append' action
for x in arg:
opt_list.append("%s=%s" % (opt, x))
else:
opt_list.append("%s=%s" % (opt, arg))
myelogstr=" ".join(opt_list)
if emerge_config.action:
myelogstr += " --" + emerge_config.action
if oldargs:
myelogstr += " " + " ".join(oldargs)
emergelog(xterm_titles, " *** emerge " + myelogstr)
oldargs = None
def emergeexitsig(signum, frame):
signal.signal(signal.SIGTERM, signal.SIG_IGN)
portage.util.writemsg(
"\n\nExiting on signal %(signal)s\n" % {"signal":signum})
sys.exit(128 + signum)
signal.signal(signal.SIGTERM, emergeexitsig)
def emergeexit():
"""This gets out final log message in before we quit."""
if "--pretend" not in emerge_config.opts:
emergelog(xterm_titles, " *** terminating.")
if xterm_titles:
xtermTitleReset()
portage.atexit_register(emergeexit)
if emerge_config.action in ("config", "metadata", "regen", "sync"):
if "--pretend" in emerge_config.opts:
sys.stderr.write(("emerge: The '%s' action does " + \
"not support '--pretend'.\n") % emerge_config.action)
return 1
if "sync" == emerge_config.action:
return action_sync(emerge_config)
elif "metadata" == emerge_config.action:
action_metadata(emerge_config.target_config.settings,
emerge_config.target_config.trees['porttree'].dbapi,
emerge_config.opts)
elif emerge_config.action=="regen":
validate_ebuild_environment(emerge_config.trees)
return action_regen(emerge_config.target_config.settings,
emerge_config.target_config.trees['porttree'].dbapi,
emerge_config.opts.get("--jobs"),
emerge_config.opts.get("--load-average"))
# HELP action
elif "config" == emerge_config.action:
validate_ebuild_environment(emerge_config.trees)
action_config(emerge_config.target_config.settings,
emerge_config.trees, emerge_config.opts, emerge_config.args)
# SEARCH action
elif "search" == emerge_config.action:
validate_ebuild_environment(emerge_config.trees)
action_search(emerge_config.target_config,
emerge_config.opts, emerge_config.args, spinner)
elif emerge_config.action in \
('clean', 'depclean', 'deselect', 'prune', 'unmerge', 'rage-clean'):
validate_ebuild_environment(emerge_config.trees)
rval = action_uninstall(emerge_config.target_config.settings,
emerge_config.trees, emerge_config.target_config.mtimedb["ldpath"],
emerge_config.opts, emerge_config.action,
emerge_config.args, spinner)
if not (emerge_config.action == 'deselect' or
buildpkgonly or fetchonly or pretend):
post_emerge(emerge_config.action, emerge_config.opts,
emerge_config.args, emerge_config.target_config.root,
emerge_config.trees, emerge_config.target_config.mtimedb, rval)
return rval
elif emerge_config.action == 'info':
# Ensure atoms are valid before calling unmerge().
vardb = emerge_config.target_config.trees['vartree'].dbapi
portdb = emerge_config.target_config.trees['porttree'].dbapi
bindb = emerge_config.target_config.trees['bintree'].dbapi
valid_atoms = []
for x in emerge_config.args:
if is_valid_package_atom(x, allow_repo=True):
try:
#look at the installed files first, if there is no match
#look at the ebuilds, since EAPI 4 allows running pkg_info
#on non-installed packages
valid_atom = dep_expand(x, mydb=vardb)
if valid_atom.cp.split("/")[0] == "null":
valid_atom = dep_expand(x, mydb=portdb)
if valid_atom.cp.split("/")[0] == "null" and \
"--usepkg" in emerge_config.opts:
valid_atom = dep_expand(x, mydb=bindb)
valid_atoms.append(valid_atom)
except portage.exception.AmbiguousPackageName as e:
msg = "The short ebuild name \"" + x + \
"\" is ambiguous. Please specify " + \
"one of the following " + \
"fully-qualified ebuild names instead:"
for line in textwrap.wrap(msg, 70):
writemsg_level("!!! %s\n" % (line,),
level=logging.ERROR, noiselevel=-1)
for i in e.args[0]:
writemsg_level(" %s\n" % colorize("INFORM", i),
level=logging.ERROR, noiselevel=-1)
writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
return 1
continue
msg = []
msg.append("'%s' is not a valid package atom." % (x,))
msg.append("Please check ebuild(5) for full details.")
writemsg_level("".join("!!! %s\n" % line for line in msg),
level=logging.ERROR, noiselevel=-1)
return 1
return action_info(emerge_config.target_config.settings,
emerge_config.trees, emerge_config.opts, valid_atoms)
# "update", "system", or just process files:
else:
validate_ebuild_environment(emerge_config.trees)
for x in emerge_config.args:
if x.startswith(SETPREFIX) or \
is_valid_package_atom(x, allow_repo=True):
continue
if x[:1] == os.sep:
continue
try:
os.lstat(x)
continue
except OSError:
pass
msg = []
msg.append("'%s' is not a valid package atom." % (x,))
msg.append("Please check ebuild(5) for full details.")
writemsg_level("".join("!!! %s\n" % line for line in msg),
level=logging.ERROR, noiselevel=-1)
return 1
# GLEP 42 says to display news *after* an emerge --pretend
if "--pretend" not in emerge_config.opts:
uq = UserQuery(emerge_config.opts)
if display_news_notification(emerge_config.target_config,
emerge_config.opts) \
and "--ask" in emerge_config.opts \
and "--read-news" in emerge_config.opts \
and uq.query("Would you like to read the news items while " \
"calculating dependencies?",
'--ask-enter-invalid' in emerge_config.opts) == "Yes":
try:
subprocess.call(['eselect', 'news', 'read'])
# If eselect is not installed, Python <3.3 will throw an
# OSError. >=3.3 will throw a FileNotFoundError, which is a
# subclass of OSError.
except OSError:
writemsg("Please install eselect to use this feature.\n",
noiselevel=-1)
retval = action_build(emerge_config, spinner=spinner)
post_emerge(emerge_config.action, emerge_config.opts,
emerge_config.args, emerge_config.target_config.root,
emerge_config.trees, emerge_config.target_config.mtimedb, retval)
return retval
| gpl-2.0 | -8,773,382,806,802,808,000 | 31.743489 | 93 | 0.670465 | false |
KamLii/Databaes | Databaes/urls.py | 1 | 1369 | """Databaes URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
#from account.views import UserRegistrationFormView, LoginView, logout_view
from . import views
from user_profile.views import SignupView
urlpatterns = [
url(r'^$', views.homepage, name='homepage'),
url(r'^$', views.homepage, name='home'),
url(r'^admin/', admin.site.urls),
url(r'^crate/', include('Crate.urls')),
url(r"^account/signup/$", SignupView.as_view(), name="account_signup"),
url(r"^account/", include("account.urls")),
url(r'^payments/', include('pinax.stripe.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| mit | 7,490,611,764,296,033,000 | 40.484848 | 79 | 0.705625 | false |
santhoshtr/mlmorph | tests/mlmorph-test.py | 1 | 2470 | import json
import unittest
import sys
import os
import re
from mlmorph import Generator, Analyser
CURR_DIR = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
class Struct:
def __init__(self, entries):
self.__dict__.update(**entries)
class AnalyserGeneratorTests(unittest.TestCase):
generator = Generator()
analyser = Analyser()
def setUp(self):
self.testFile = open(os.path.join(CURR_DIR, 'tests.json'))
self.tests = json.load(self.testFile, object_hook=Struct)
def tearDown(self):
self.testFile.close()
def test_analyse(self):
print('\t**** Analyse tests ****\t')
line = 0
for test in self.tests:
line += 1
with self.subTest(test.word):
anals = self.analyser.analyse(test.word)
match = False
if not (hasattr(test, 'skip') and test.skip):
self.assertTrue(len(anals) != 0,
'Analysis failed for ' + test.word)
else:
continue
print('%3d %s\t<--\t%s' % (line, test.word, anals))
for index in range(len(anals)):
if test.analysis == anals[index][0]:
match = True
break
if not (hasattr(test, 'skip') and test.skip):
self.assertEqual(
match, True, 'Analysis for ' + test.analysis)
def test_generate(self):
print('\t**** Generate tests ****\t')
line = 0
for test in self.tests:
line += 1
with self.subTest(test.word):
match = False
gens = self.generator.generate(test.analysis, True)
if not (hasattr(test, 'skip') and test.skip):
self.assertTrue(
len(gens) != 0, 'Generate failed for ' + test.analysis)
else:
continue
print('%3d %s\t<--\t%s' % (line, test.analysis, gens))
for index in range(len(gens)):
if test.word == gens[index][0]:
match = True
break
if not (hasattr(test, 'skip') and test.skip):
self.assertEqual(
match, True, 'Generate for ' + test.analysis)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -3,491,526,984,626,035,000 | 33.305556 | 79 | 0.487045 | false |
Chilledheart/vbox | src/VBox/ValidationKit/testmanager/webui/wuiadmintestbox.py | 1 | 18147 | # -*- coding: utf-8 -*-
# $Id$
"""
Test Manager WUI - TestBox.
"""
__copyright__ = \
"""
Copyright (C) 2012-2014 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision$"
# Standard python imports.
import socket;
# Validation Kit imports.
from testmanager.webui.wuicontentbase import WuiListContentWithActionBase, WuiFormContentBase, WuiLinkBase, WuiSvnLink, \
WuiTmLink, WuiSpanText, WuiRawHtml;
from testmanager.core.db import TMDatabaseConnection;
from testmanager.core.schedgroup import SchedGroupLogic, SchedGroupData;
from testmanager.core.testbox import TestBoxData;
from testmanager.core.testset import TestSetData;
from common import utils;
from testmanager.core.db import isDbTimestampInfinity;
class WuiTestBox(WuiFormContentBase):
"""
WUI TestBox Form Content Generator.
"""
def __init__(self, oData, sMode, oDisp):
if sMode == WuiFormContentBase.ksMode_Add:
sTitle = 'Create TextBox';
if oData.uuidSystem is not None and len(oData.uuidSystem) > 10:
sTitle += ' - ' + oData.uuidSystem;
elif sMode == WuiFormContentBase.ksMode_Edit:
sTitle = 'Edit TestBox - %s (#%s)' % (oData.sName, oData.idTestBox);
else:
assert sMode == WuiFormContentBase.ksMode_Show;
sTitle = 'TestBox - %s (#%s)' % (oData.sName, oData.idTestBox);
WuiFormContentBase.__init__(self, oData, sMode, 'TestBox', oDisp, sTitle);
# Try enter sName as hostname (no domain) when creating the testbox.
if sMode == WuiFormContentBase.ksMode_Add \
and self._oData.sName in [None, ''] \
and self._oData.ip not in [None, '']:
try:
(self._oData.sName, _, _) = socket.gethostbyaddr(self._oData.ip);
except:
pass;
offDot = self._oData.sName.find('.');
if offDot > 0:
self._oData.sName = self._oData.sName[:offDot];
def _populateForm(self, oForm, oData):
oForm.addIntRO( TestBoxData.ksParam_idTestBox, oData.idTestBox, 'TestBox ID');
oForm.addIntRO( TestBoxData.ksParam_idGenTestBox, oData.idGenTestBox, 'TestBox generation ID');
oForm.addTimestampRO(TestBoxData.ksParam_tsEffective, oData.tsEffective, 'Last changed');
oForm.addTimestampRO(TestBoxData.ksParam_tsExpire, oData.tsExpire, 'Expires (excl)');
oForm.addIntRO( TestBoxData.ksParam_uidAuthor, oData.uidAuthor, 'Changed by UID');
oForm.addText( TestBoxData.ksParam_ip, oData.ip, 'TestBox IP Address');
oForm.addUuid( TestBoxData.ksParam_uuidSystem, oData.uuidSystem, 'TestBox System/Firmware UUID');
oForm.addText( TestBoxData.ksParam_sName, oData.sName, 'TestBox Name');
oForm.addText( TestBoxData.ksParam_sDescription, oData.sDescription, 'TestBox Description');
oForm.addComboBox( TestBoxData.ksParam_idSchedGroup, oData.idSchedGroup, 'Scheduling Group',
SchedGroupLogic(TMDatabaseConnection()).getSchedGroupsForCombo());
oForm.addCheckBox( TestBoxData.ksParam_fEnabled, oData.fEnabled, 'Enabled');
oForm.addComboBox( TestBoxData.ksParam_enmLomKind, oData.enmLomKind, 'Lights-out-management',
TestBoxData.kaoLomKindDescs);
oForm.addText( TestBoxData.ksParam_ipLom, oData.ipLom, 'Lights-out-management IP Address');
oForm.addInt( TestBoxData.ksParam_pctScaleTimeout, oData.pctScaleTimeout, 'Timeout scale factor (%)');
## @todo Pretty format the read-only fields and use hidden fields for
# passing the actual values. (Yes, we need the values so we can
# display the form correctly on input error.)
oForm.addTextRO( TestBoxData.ksParam_sOs, oData.sOs, 'TestBox OS');
oForm.addTextRO( TestBoxData.ksParam_sOsVersion, oData.sOsVersion, 'TestBox OS version');
oForm.addTextRO( TestBoxData.ksParam_sCpuArch, oData.sCpuArch, 'TestBox OS kernel architecture');
oForm.addTextRO( TestBoxData.ksParam_sCpuVendor, oData.sCpuVendor, 'TestBox CPU vendor');
oForm.addTextRO( TestBoxData.ksParam_sCpuName, oData.sCpuName, 'TestBox CPU name');
if oData.lCpuRevision:
oForm.addTextRO( TestBoxData.ksParam_lCpuRevision, '%#x' % (oData.lCpuRevision,), 'TestBox CPU revision',
sPostHtml = ' (family=%#x model=%#x stepping=%#x)'
% (oData.getCpuFamily(), oData.getCpuModel(), oData.getCpuStepping(),),
sSubClass = 'long');
else:
oForm.addLongRO( TestBoxData.ksParam_lCpuRevision, oData.lCpuRevision, 'TestBox CPU revision');
oForm.addIntRO( TestBoxData.ksParam_cCpus, oData.cCpus, 'Number of CPUs, cores and threads');
oForm.addCheckBoxRO( TestBoxData.ksParam_fCpuHwVirt, oData.fCpuHwVirt, 'VT-x or AMD-V supported');
oForm.addCheckBoxRO( TestBoxData.ksParam_fCpuNestedPaging, oData.fCpuNestedPaging, 'Nested paging supported');
oForm.addCheckBoxRO( TestBoxData.ksParam_fCpu64BitGuest, oData.fCpu64BitGuest, '64-bit guest supported');
oForm.addCheckBoxRO( TestBoxData.ksParam_fChipsetIoMmu, oData.fChipsetIoMmu, 'I/O MMU supported');
oForm.addMultilineTextRO(TestBoxData.ksParam_sReport, oData.sReport, 'Hardware/software report');
oForm.addLongRO( TestBoxData.ksParam_cMbMemory, oData.cMbMemory, 'Installed RAM size (MB)');
oForm.addLongRO( TestBoxData.ksParam_cMbScratch, oData.cMbScratch, 'Available scratch space (MB)');
oForm.addIntRO( TestBoxData.ksParam_iTestBoxScriptRev, oData.iTestBoxScriptRev,
'TestBox Script SVN revision');
# Later:
#if not self.isAttributeNull(''):
# sHexVer = '%s.%s.%.%s' % (oData.iPythonHexVersion >> 24, (oData.iPythonHexVersion >> 16) & 0xff,
# (oData.iPythonHexVersion >> 8) & 0xff, oData.iPythonHexVersion & 0xff);
#else:
# sHexVer = str(oData.iPythonHexVersion);
oForm.addIntRO( TestBoxData.ksParam_iPythonHexVersion, oData.iPythonHexVersion,
'Python version (hex)');
if self._sMode == WuiFormContentBase.ksMode_Edit:
oForm.addComboBox(TestBoxData.ksParam_enmPendingCmd, oData.enmPendingCmd, 'Pending command',
TestBoxData.kaoTestBoxCmdDescs);
else:
oForm.addComboBoxRO(TestBoxData.ksParam_enmPendingCmd, oData.enmPendingCmd, 'Pending command',
TestBoxData.kaoTestBoxCmdDescs);
if self._sMode != WuiFormContentBase.ksMode_Show:
oForm.addSubmit('Create TestBox' if self._sMode == WuiFormContentBase.ksMode_Add else 'Change TestBox');
return True;
class WuiTestBoxList(WuiListContentWithActionBase):
"""
WUI TestBox List Content Generator.
"""
## Descriptors for the combo box.
kasTestBoxActionDescs = \
[ \
[ 'none', 'Select an action...', '' ],
[ 'enable', 'Enable', '' ],
[ 'disable', 'Disable', '' ],
TestBoxData.kaoTestBoxCmdDescs[1],
TestBoxData.kaoTestBoxCmdDescs[2],
TestBoxData.kaoTestBoxCmdDescs[3],
TestBoxData.kaoTestBoxCmdDescs[4],
TestBoxData.kaoTestBoxCmdDescs[5],
];
def __init__(self, aoEntries, iPage, cItemsPerPage, tsEffective, fnDPrint, oDisp):
WuiListContentWithActionBase.__init__(self, aoEntries, iPage, cItemsPerPage, tsEffective,
sTitle = 'TestBoxes', sId = 'users', fnDPrint = fnDPrint, oDisp = oDisp);
self._asColumnHeaders.extend([ 'Name', 'LOM', 'Status',
'Cmd', 'Script', 'Python', 'Group',
'OS', 'CPU', 'Features', 'CPUs', 'RAM', 'Scratch',
'Actions' ]);
self._asColumnAttribs.extend([ 'align="center"', 'align="center"', 'align="center"',
'align="center"', 'align="center"', 'align="center"', 'align="center"',
'', '', '', 'align="right"', 'align="right"', 'align="right"',
'align="center"' ]);
self._aoActions = list(self.kasTestBoxActionDescs);
self._aoSchedGroups = SchedGroupLogic(self._oDisp.getDb()).fetchOrderedByName();
self._dSchedGroups = dict();
for oSchedGroup in self._aoSchedGroups:
self._aoActions.append([ 'setgroup-%u' % (oSchedGroup.idSchedGroup,),
'Migrate to group %s (#%u)' % (oSchedGroup.sName, oSchedGroup.idSchedGroup,),
oSchedGroup.sDescription ]);
self._dSchedGroups[oSchedGroup.idSchedGroup] = oSchedGroup;
self._sAction = oDisp.ksActionTestBoxListPost;
self._sCheckboxName = TestBoxData.ksParam_idTestBox;
def _formatListEntry(self, iEntry): # pylint: disable=R0914
from testmanager.webui.wuiadmin import WuiAdmin;
oEntry = self._aoEntries[iEntry];
# Lights outs managment.
if oEntry.enmLomKind == TestBoxData.ksLomKind_ILOM:
aoLom = [ WuiLinkBase('ILOM', 'https://%s/' % (oEntry.ipLom,), fBracketed = False), ];
elif oEntry.enmLomKind == TestBoxData.ksLomKind_ELOM:
aoLom = [ WuiLinkBase('ELOM', 'http://%s/' % (oEntry.ipLom,), fBracketed = False), ];
elif oEntry.enmLomKind == TestBoxData.ksLomKind_AppleXserveLom:
aoLom = [ 'Apple LOM' ];
elif oEntry.enmLomKind == TestBoxData.ksLomKind_None:
aoLom = [ 'none' ];
else:
aoLom = [ 'Unexpected enmLomKind value "%s"' % (oEntry.enmLomKind,) ];
if oEntry.ipLom is not None:
if oEntry.enmLomKind in [ TestBoxData.ksLomKind_ILOM, TestBoxData.ksLomKind_ELOM ]:
aoLom += [ WuiLinkBase('(ssh)', 'ssh://%s' % (oEntry.ipLom,), fBracketed = False) ];
aoLom += [ WuiRawHtml('<br>'), '%s' % (oEntry.ipLom,) ];
# State and Last seen.
if oEntry.oStatus is None:
oSeen = WuiSpanText('tmspan-offline', 'Never');
oState = '';
else:
oDelta = oEntry.tsCurrent - oEntry.oStatus.tsUpdated;
if oDelta.days <= 0 and oDelta.seconds <= 15*60: # 15 mins and we consider you dead.
oSeen = WuiSpanText('tmspan-online', u'%s\u00a0s\u00a0ago' % (oDelta.days * 24 * 3600 + oDelta.seconds,));
else:
oSeen = WuiSpanText('tmspan-offline', u'%s' % (self.formatTsShort(oEntry.oStatus.tsUpdated),));
if oEntry.oStatus.idTestSet is None:
oState = str(oEntry.oStatus.enmState);
else:
from testmanager.webui.wuimain import WuiMain;
oState = WuiTmLink(oEntry.oStatus.enmState, WuiMain.ksScriptName,
{ WuiMain.ksParamAction: WuiMain.ksActionTestResultDetails,
TestSetData.ksParam_idTestSet: oEntry.oStatus.idTestSet, },
sTitle = '#%u' % (oEntry.oStatus.idTestSet,),
fBracketed = False);
# Group link.
oGroup = self._dSchedGroups.get(oEntry.idSchedGroup);
oGroupLink = WuiTmLink(oGroup.sName if oGroup is not None else str(oEntry.idSchedGroup),
WuiAdmin.ksScriptName,
{ WuiAdmin.ksParamAction: WuiAdmin.ksActionSchedGroupEdit,
SchedGroupData.ksParam_idSchedGroup: oEntry.idSchedGroup, },
sTitle = '#%u' % (oEntry.idSchedGroup,),
fBracketed = False);
# Reformat the OS version to take less space.
aoOs = [ 'N/A' ];
if oEntry.sOs is not None and oEntry.sOsVersion is not None and oEntry.sCpuArch:
sOsVersion = oEntry.sOsVersion;
if sOsVersion[0] not in [ 'v', 'V', 'r', 'R'] \
and sOsVersion[0].isdigit() \
and sOsVersion.find('.') in range(4) \
and oEntry.sOs in [ 'linux', 'solaris', 'darwin', ]:
sOsVersion = 'v' + sOsVersion;
sVer1 = sOsVersion;
sVer2 = None;
if oEntry.sOs == 'linux':
iSep = sOsVersion.find(' / ');
if iSep > 0:
sVer1 = sOsVersion[:iSep].strip();
sVer2 = sOsVersion[iSep + 3:].strip();
sVer2 = sVer2.replace('Red Hat Enterprise Linux Server', 'RHEL');
elif oEntry.sOs == 'solaris':
iSep = sOsVersion.find(' (');
if iSep > 0 and sOsVersion[-1] == ')':
sVer1 = sOsVersion[:iSep].strip();
sVer2 = sOsVersion[iSep + 2:-1].strip();
aoOs = [
WuiSpanText('tmspan-osarch', u'%s.%s' % (oEntry.sOs, oEntry.sCpuArch,)),
WuiSpanText('tmspan-osver1', sVer1.replace('-', u'\u2011'),),
];
if sVer2 is not None:
aoOs += [ WuiRawHtml('<br>'), WuiSpanText('tmspan-osver2', sVer2.replace('-', u'\u2011')), ];
# Format the CPU revision.
oCpu = None;
if oEntry.lCpuRevision is not None and oEntry.sCpuVendor is not None and oEntry.sCpuName is not None:
oCpu = [
u'%s (fam:%xh\u00a0m:%xh\u00a0s:%xh)'
% (oEntry.sCpuVendor, oEntry.getCpuFamily(), oEntry.getCpuModel(), oEntry.getCpuStepping(),),
WuiRawHtml('<br>'),
oEntry.sCpuName,
];
else:
oCpu = [];
if oEntry.sCpuVendor is not None:
oCpu.append(oEntry.sCpuVendor);
if oEntry.lCpuRevision is not None:
oCpu.append('%#x' % (oEntry.lCpuRevision,));
if oEntry.sCpuName is not None:
oCpu.append(oEntry.sCpuName);
# Stuff cpu vendor and cpu/box features into one field.
asFeatures = []
if oEntry.fCpuHwVirt is True: asFeatures.append(u'HW\u2011Virt');
if oEntry.fCpuNestedPaging is True: asFeatures.append(u'Nested\u2011Paging');
if oEntry.fCpu64BitGuest is True: asFeatures.append(u'64\u2011bit\u2011Guest');
if oEntry.fChipsetIoMmu is True: asFeatures.append(u'I/O\u2011MMU');
sFeatures = u' '.join(asFeatures) if len(asFeatures) > 0 else u'';
# Collection applicable actions.
aoActions = [
WuiTmLink('Details', WuiAdmin.ksScriptName,
{ WuiAdmin.ksParamAction: WuiAdmin.ksActionTestBoxDetails,
TestBoxData.ksParam_idTestBox: oEntry.idTestBox,
WuiAdmin.ksParamEffectiveDate: self._tsEffectiveDate, } ),
]
if isDbTimestampInfinity(oEntry.tsExpire):
aoActions += [
WuiTmLink('Edit', WuiAdmin.ksScriptName,
{ WuiAdmin.ksParamAction: WuiAdmin.ksActionTestBoxEdit,
TestBoxData.ksParam_idTestBox: oEntry.idTestBox, } ),
WuiTmLink('Remove', WuiAdmin.ksScriptName,
{ WuiAdmin.ksParamAction: WuiAdmin.ksActionTestBoxRemovePost,
TestBoxData.ksParam_idTestBox: oEntry.idTestBox },
sConfirm = 'Are you sure that you want to remove %s (%s)?' % (oEntry.sName, oEntry.ip) ),
]
if oEntry.sOs not in [ 'win', 'os2', ] and oEntry.ip is not None:
aoActions.append(WuiLinkBase('ssh', 'ssh://vbox@%s' % (oEntry.ip,),));
return [ self._getCheckBoxColumn(iEntry, oEntry.idTestBox),
[ WuiSpanText('tmspan-name', oEntry.sName), WuiRawHtml('<br>'), '%s' % (oEntry.ip,),],
aoLom,
[
'' if oEntry.fEnabled else 'disabled / ',
oState,
WuiRawHtml('<br>'),
oSeen,
],
oEntry.enmPendingCmd,
WuiSvnLink(oEntry.iTestBoxScriptRev),
oEntry.formatPythonVersion(),
oGroupLink,
aoOs,
oCpu,
sFeatures,
oEntry.cCpus if oEntry.cCpus is not None else 'N/A',
utils.formatNumberNbsp(oEntry.cMbMemory) + u'\u00a0MB' if oEntry.cMbMemory is not None else 'N/A',
utils.formatNumberNbsp(oEntry.cMbScratch) + u'\u00a0MB' if oEntry.cMbScratch is not None else 'N/A',
aoActions,
];
| gpl-2.0 | 2,805,759,869,928,952,300 | 53.824773 | 123 | 0.578333 | false |
Makki1/old-svn | avr/sketchbook/GiraRM_Debug/freebus/freebus_ets/software/freebus-ets/src/GUI/FB_ProgramFrame.py | 1 | 10920 | #!/usr/bin/
#-*- coding: iso-8859-1 -*-
#===============================================================================
# __________ ________________ __ _______
# / ____/ __ \/ ____/ ____/ __ )/ / / / ___/
# / /_ / /_/ / __/ / __/ / __ / / / /\__ \
# / __/ / _, _/ /___/ /___/ /_/ / /_/ /___/ /
# /_/ /_/ |_/_____/_____/_____/\____//____/
#
#Source File: FB_ProgramFrame.py
#Version: V0.1 , 29.08.2009
#Author: Jerome Leisner
#email: [email protected]
#===============================================================================
import os
import sys
import time
#import thread
#import Queue
#import threading
#import thread
import pygtk
pygtk.require("2.0")
import gtk
import gtk.glade
import pickle
import jpype
import thread
from Global import Global
from GUI import FB_DlgConnectionManager
class FB_ProgramFrame(object):
__curProject = None #project object
__cbConnections = None #widget combo connections
__bConnect = None #widget connect button
__parentClass = None #object of its own class
__curConnectionInstance = None #instance of the current connection (FB_EIBConnection)
#Devices in programming mode
__ListViewProgDevices = None #widget Tree/Listview to show devices in programming mode
__CheckTimer = None #timer object for check devices in cycle
__toggleCheckProgDevices = None
def __init__(self,curProject):
self.__parentClass = self
self.__curProject = curProject
GladeObj = gtk.glade.XML(Global.GUIPath + Global.GladeFile,"winProgramming")
dic = { "on_bConnectionConfig_clicked":self.ShowConnectionManager ,
"on_bTestConnection_clicked":self.ClickTestConnection,
"on_bConnect_toggled":self.ToggleConnect,
"on_cbConnections_changed":self.ConnectionsChanged,
"on_toggleCheckProgDevices_toggled":self.ToggleCheckProgDevices,
}
GladeObj.signal_autoconnect(dic)
#read widgets
self.__cbConnections = GladeObj.get_widget("cbConnections")
self.__bConnect = GladeObj.get_widget("bConnect")
self.__ListViewProgDevices = GladeObj.get_widget("ListViewProgDevices")
self.__toggleCheckProgDevices = GladeObj.get_widget("toggleCheckProgDevices")
#init model combobox to show connections
liststore = gtk.ListStore(str,str) #just one string at first..., 2nd string for GUID
self.__cbConnections.set_model(liststore)
self.text_cell = gtk.CellRendererText()
self.__cbConnections.pack_start(self.text_cell,True)
self.__cbConnections.add_attribute(self.text_cell, "text", 0)
#init model tree/listview to show devices in progmode
liststore = gtk.ListStore(gtk.gdk.Pixbuf, str)
self.__ListViewProgDevices.set_model(liststore)
self.text_cell = gtk.CellRendererText() #Text Object
self.img_cell = gtk.CellRendererPixbuf() #Image Object
self.column = gtk.TreeViewColumn()
self.column.pack_start(self.img_cell, False)
self.column.pack_start(self.text_cell,True)
self.column.add_attribute(self.img_cell, "pixbuf",0)
self.column.add_attribute(self.text_cell, "text", 1)
self.column.set_attributes(self.text_cell, markup=1)
self.__ListViewProgDevices.append_column(self.column)
#init timer to check devices in progmode
#self.__CheckTimer = threading.Timer(5.0, self.ReadDevicesInProgMode)
self.LoadConnectionFromDB()
self.UpdateUserConnections()
winProgramming = GladeObj.get_widget("winProgramming")
winProgramming.show()
#Dialog: Connection-Manager
def ShowConnectionManager(self,widget, data=None):
FB_DlgConnectionManager.FB_DlgConnectionManager(self.__curProject, self.__parentClass)
#button: Test-Connection
#open the current connection and test it...
def ClickTestConnection(self,widget, data=None):
pass
def ToggleConnect(self,widget, data=None):
model = self.__cbConnections.get_model()
iter = self.__cbConnections.get_active_iter()
id = model.get_value(iter,1)
self.__curConnectionInstance = self.getEIBConnection(id)
if widget.get_active() == True:
#connect
self.__curConnectionInstance.doConnect()
else:
#disconnect
self.__curConnectionInstance.doDisconnect()
self.SetConnectButtonState(widget)
#callback change combo connections
def ConnectionsChanged(self,widget, data=None):
#disconnect in case of changing the connection
if self.__curConnectionInstance <> None:
self.__curConnectionInstance.doDisconnect()
self.SetConnectButtonState(self.__bConnect)
def SetConnectButtonState(self,widget):
if self.__curConnectionInstance.isConnected() == True:
widget.set_active(True)
widget.set_label("Verbunden")
else:
widget.set_active(False)
widget.set_label("Verbinden")
#gets the instance of a FB_EIBConnection with the given id
def getEIBConnection(self,id):
RValue = None
if self.__curProject <> None:
if self.__curProject.eibConnectionList <> None:
for i in range(len(self.__curProject.eibConnectionList)):
if id == self.__curProject.eibConnectionList[i].getID():
RValue = self.__curProject.eibConnectionList[i]
break
return RValue
##function to update the combobox in parentframe to show/select for user
#@param cbConnections: widget of the combobox in parentframe which should be loaded
def UpdateUserConnections(self):
try:
#copy list in combo connections in program_Frame (parent)
if(self.__curProject <> None):# and self._MyConnection <> None):
model = self.__cbConnections.get_model()
#save id of the current connection / which is currently selected
curIter = self.__cbConnections.get_active_iter()
if curIter <> None:
idsaved = model.get_value(curIter,1) #column 1 = id
else:
idsaved = 0
model.clear()
IterSaved = None #init Iterator
for i in range(len(self.__curProject.eibConnectionList)):
Name = self.__curProject.eibConnectionList[i].getName()
typeID = self.__curProject.eibConnectionList[i].getType()
Type = str(Global.ConTypesText[typeID])
id = self.__curProject.eibConnectionList[i].getID()
tmp = Name + " mit '" + Type + "'"
iter = model.append([tmp, id])
#look if saved id is still in list and set this item to the active item
if idsaved == id:
IterSaved = iter
#connection still existing...
if IterSaved <> None:
self.__cbConnections.set_active_iter(IterSaved)
else:
if len(self.__curProject.eibConnectionList) > 0:
self.__cbConnections.set_active(0)
else:
#no connections in list or no valid project is loaded
model = self.__cbConnections.get_model()
model.clear()
except:
pass
def LoadConnectionFromDB(self):
#try:
cursor = Global.DatabaseConnection.cursor()
cursor.execute("SELECT * FROM Connections")
del self.__curProject.eibConnectionList[0:len(self.__curProject.eibConnectionList)]
for row in cursor:
tmpCon = pickle.loads(row[2]) #column 2 contains class data
self.__curProject.eibConnectionList.append(tmpCon)
#except:
# pass
#---------------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------------
##button to start reading Devices in progmode
##
def ToggleCheckProgDevices(self,widget,Data=None):
if widget.get_active() == True:
widget.set_label("zyklischer Suchlauf...")
self.ReadDevicesInProgMode()
#self.__CheckTimer.start()
else:
widget.set_label("Suchlauf starten")
#self.__CheckTimer.cancel()
#---------------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------------
#section physical addresses
def ReadDevicesInProgMode(self):
#read the PA of devices in programming mode
try:
mngClient = Global.ManagementClientImpl(self.__curConnectionInstance.getKNXNetworkLink())
IndivAddrList = mngClient.readAddress(False)
model = self.__ListViewProgDevices.get_model()
model.clear()
image=gtk.gdk.pixbuf_new_from_file(Global.ImagePath + "Device.png")
for Addr in IndivAddrList:
Iterator = model.append([image,Addr.toString()])
except jpype.JavaException, ex :
error = ""
if jpype.JavaException.javaClass(ex) is Global.KNXTimeoutException:
error = U"keine Geräte im Programmiermodus : " + str(jpype.JavaException.message(ex))
elif jpype.JavaException.javaClass(ex) is Global.KNXInvalidResponseException :
error = U"ungültige Antwort beim Lesen der Addressen : " + str(jpype.JavaException.message(ex))
elif jpype.JavaException.javaClass(ex) is Global.KNXLinkClosedException:
error = U"kein geöffneter Netzwerk-Link : " + str(jpype.JavaException.message(ex))
elif jpype.JavaException.javaClass(ex) is Global.KNXRemoteException:
error = U"Fehler beim Remote-Server : " + str(jpype.JavaException.message(ex))
msgbox = gtk.MessageDialog(parent = None, buttons = gtk.BUTTONS_OK,
flags = gtk.DIALOG_MODAL, type = gtk.MESSAGE_ERROR,
message_format = error )
msgbox.set_title(Global.ERRORCONNECTIONTITLE)
#result = msgbox.run()
#msgbox.destroy()
| gpl-3.0 | 865,511,498,934,486,400 | 39.83908 | 111 | 0.554487 | false |
rapidpro/chatpro | chatpro/profiles/models.py | 1 | 4753 | from __future__ import absolute_import, unicode_literals
from chatpro.rooms.models import Room
from dash.orgs.models import Org
from dash.utils import intersection
from dash.utils.sync import ChangeType
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext_lazy as _
from temba.types import Contact as TembaContact
from uuid import uuid4
from .tasks import push_contact_change
class AbstractParticipant(models.Model):
full_name = models.CharField(verbose_name=_("Full name"), max_length=128, null=True)
chat_name = models.CharField(verbose_name=_("Chat name"), max_length=16, null=True,
help_text=_("Shorter name used for chat messages"))
class Meta:
abstract = True
class Contact(AbstractParticipant):
"""
Corresponds to a RapidPro contact who is tied to a single room
"""
uuid = models.CharField(max_length=36, unique=True)
org = models.ForeignKey(Org, verbose_name=_("Organization"), related_name='contacts')
room = models.ForeignKey(Room, verbose_name=_("Room"), related_name='contacts',
help_text=_("Room which this contact belongs in"))
urn = models.CharField(verbose_name=_("URN"), max_length=255)
is_active = models.BooleanField(default=True, help_text=_("Whether this contact is active"))
created_by = models.ForeignKey(User, null=True, related_name="contact_creations",
help_text="The user which originally created this item")
created_on = models.DateTimeField(auto_now_add=True,
help_text="When this item was originally created")
modified_by = models.ForeignKey(User, null=True, related_name="contact_modifications",
help_text="The user which last modified this item")
modified_on = models.DateTimeField(auto_now=True,
help_text="When this item was last modified")
@classmethod
def create(cls, org, user, full_name, chat_name, urn, room, uuid=None):
if org.id != room.org_id: # pragma: no cover
raise ValueError("Room does not belong to org")
# if we don't have a UUID, then we created this contact
if not uuid:
do_push = True
uuid = unicode(uuid4())
else:
do_push = False
# create contact
contact = cls.objects.create(org=org, full_name=full_name, chat_name=chat_name, urn=urn, room=room, uuid=uuid,
created_by=user, modified_by=user)
if do_push:
contact.push(ChangeType.created)
return contact
@classmethod
def kwargs_from_temba(cls, org, temba_contact):
org_room_uuids = [r.uuid for r in Room.get_all(org)]
room_uuids = intersection(org_room_uuids, temba_contact.groups)
room = Room.objects.get(org=org, uuid=room_uuids[0]) if room_uuids else None
if not room:
raise ValueError("No room with uuid in %s" % ", ".join(temba_contact.groups))
return dict(org=org,
full_name=temba_contact.name,
chat_name=temba_contact.fields.get(org.get_chat_name_field(), None),
urn=temba_contact.urns[0],
room=room,
uuid=temba_contact.uuid)
def as_temba(self):
temba_contact = TembaContact()
temba_contact.name = self.full_name
temba_contact.urns = [self.urn]
temba_contact.fields = {self.org.get_chat_name_field(): self.chat_name}
temba_contact.groups = [self.room.uuid]
temba_contact.uuid = self.uuid
return temba_contact
def push(self, change_type):
push_contact_change.delay(self.id, change_type)
def get_urn(self):
return tuple(self.urn.split(':', 1))
def release(self):
self.is_active = False
self.save()
self.push(ChangeType.deleted)
def as_participant_json(self):
return dict(id=self.id, type='C', full_name=self.full_name, chat_name=self.chat_name)
def __unicode__(self):
if self.full_name:
return self.full_name
elif self.chat_name:
return self.chat_name
else:
return self.get_urn()[1]
class Profile(AbstractParticipant):
"""
Extension for the user class
"""
user = models.OneToOneField(User)
change_password = models.BooleanField(default=False, help_text=_("User must change password on next login"))
def as_participant_json(self):
return dict(id=self.user_id, type='U', full_name=self.full_name, chat_name=self.chat_name)
| bsd-3-clause | 4,602,666,662,489,233,000 | 36.132813 | 118 | 0.622554 | false |
AlandSailingRobots/sailingrobot | update_config.py | 1 | 1807 | #!/usr/bin/python3
# Updates the configuration in the json to the database
# Can run without argument for using standard file
# Or specify the file by passing it as a argument
import json
import sqlite3
import sys
if len(sys.argv) > 1:
if str(sys.argv[1]) == 'ASPire':
filename = 'config_ASPire.json'
elif str(sys.argv[1]) == 'Janet':
filename = 'config_Janet.json'
else :
filename = str(sys.argv[1])
else:
filename = 'config_ASPire.json'
print(filename)
try:
cfg = json.load(open(filename))
except FileNotFoundError:
sys.exit('Error to open the file.\nPlease enter in argument either \'ASPire\', \'Janet\' or the filepath.')
conn = sqlite3.connect('asr.db')
db = conn.cursor()
for table in cfg:
data = cfg[table]
setstr = ''
keystr = ''
valstr = ''
for key, value in cfg[table].items():
if isinstance(value, str):
value = '"' + value + '"'
else:
value = str(value)
if (setstr == ''):
setstr = key + ' = ' + value
keystr = key
valstr = value
else:
setstr = setstr + ', ' + key + ' = ' + value
keystr = keystr + ', ' + key
valstr = valstr + ', ' + value
try:
db.execute('SELECT count(*) FROM ' + str(table) + ';')
except sqlite3.OperationalError:
sys.exit('Error to retrieve the tables.\nCheck if the selected file \''+filename+'\' correspond to the current Database configuration')
count = db.fetchone()[0]
if count == 0:
db.execute('INSERT INTO ' + str(table) + ' (' + keystr +
') VALUES (' + valstr + ');')
else:
db.execute('UPDATE ' + str(table) + ' SET ' +
setstr + ' WHERE ID = 1;')
conn.commit()
db.close()
| gpl-2.0 | 8,881,442,081,132,022,000 | 28.145161 | 143 | 0.556724 | false |
xu6148152/Binea_Python_Project | PythonCookbook/text_str/strs_and_text.py | 1 | 7706 | # !python3
import re
def test_re_split():
line = 'asdf fjdk; dfjkaf, fdjksf, jdksf, foo'
print(re.split(r'[;,\s]\s*', line))
fields = re.split(r'(;|,|\s)\s*', line)
print(fields)
values = fields[::2]
print(values)
delimiter = fields[1::2] + ['']
print(delimiter)
print(re.split(r'(?:,|;|\s)\s*', line))
def test_start_with():
filenames = ['Makefile', 'foo.c', 'bar.py', 'spam.c', 'spam.h']
print([name for name in filenames if name.endswith(('.c', '.h'))])
print(any(name.endswith('.py')) for name in filenames)
def test_fnmatch():
from fnmatch import fnmatch, fnmatchcase
print(fnmatch('foo.txt', '*.txt'))
print(fnmatchcase('foo.txt', '*.TXT'))
def test_str_match():
datepat = re.compile(r'(\d+)/(\d+)/(\d+)')
text1 = '11/27/2012'
text2 = 'Nov 27, 2012'
m = datepat.match(text1)
print(m.group(0))
print(m.group(1))
print(m.group(2))
print(m.group(3))
print(m.groups())
text = 'Today is 11/27/2012. PyCon starts 3/13/2013'
print(datepat.findall(text))
def test_str_replace():
text = 'Today is 11/27/2012. PyCon starts 3/13/2013'
datepat = re.compile(r'(\d+)/(\d+)/(\d+)')
print(datepat.sub(r'\3-\1-\2', text))
print(datepat.sub(change_date, text))
def change_date(m):
from calendar import month_abbr
mon_name = month_abbr[int(m.group(1))]
return '{} {} {}'.format(m.group(2), mon_name, m.group(3))
def test_unicode():
s1 = 'Spicy Jalape\u00f1o'
s2 = 'Spicy Jalapen\u0303o'
s3 = 'Spicy Jalape\xf1o'
import unicodedata
# NFC表示字符整体组成
t1 = unicodedata.normalize('NFC', s1)
t2 = unicodedata.normalize('NFC', s2)
# NFD表示字符分解多个表示
t3 = unicodedata.normalize('NFD', s3)
print(t1)
print(t2)
print(t3)
def test_strip():
s = ' Hello world \n'
print(s.strip())
t = '--------------hello========'
print(t.strip('-='))
def test_translate():
import unicodedata
import sys
digitmap = {c: ord('0') + unicodedata.digit(chr(c))
for c in range(sys.maxunicode)
if unicodedata.category(chr(c)) == 'Nd'}
x = '\u0661\u0662\u0663'
print(x.translate(digitmap))
def test_just():
text = 'Hello World'
print(text.ljust(20, '='))
print(text.rjust(20))
print(text.center(20, '*'))
print(format(text, '=>20'))
print(format(text, '*^20'))
print('{:>10s} {:>10s}'.format('Hello', 'World'))
def test_join():
parts = ['Is', 'Chicago', 'Not', 'Chicago?']
print(' '.join(parts))
print(','.join(parts))
print(''.join(parts))
a = 'Is Chicago'
b = 'Not Chicago'
c = 'None'
print(a + ' ' + b)
print('Hello' 'World')
date = ['ACME', 50, 91.1]
print(','.join(str(d) for d in date))
print(a, b, c, sep=':')
def test_format():
s = '{name} has {n} message'
print(s.format(name='Guido', n=37))
name = 'Guido'
# n = 37
# print(s.format_map(vars()))
print(s.format_map(SafeSub(vars())))
print(sub('Hello {name}'))
print(sub('You have {n} messages.'))
class SafeSub(dict):
def __missing__(self, key):
return '{' + key + '}'
def sub(text):
import sys
return text.format_map(SafeSub(sys._getframe(1).f_locals))
def test_textwrap():
s = "Look into my eyes, look into my eyes, the eyes, the eyes, " \
"the eyes, not around the eyes, don't look around the eyes," \
"look into my eyes, you're under"
import textwrap
print(textwrap.fill(s, 40, initial_indent=' '))
print(textwrap.fill(s, 40, subsequent_indent=' '))
# os.get_terminal_size().columns
def generate_tokens(pat, text):
from collections import namedtuple
Token = namedtuple('Token', ['type', 'value'])
scanner = pat.scanner(text)
for m in iter(scanner.match, None):
yield Token(m.lastgroup, m.group())
def test_bin_text():
a = b'Hello World'
print(a)
print(a[0])
print(a.decode('ascii'))
def test_gz_file():
import gzip
with gzip.open('somefile.gz', 'rt') as f:
text = f.read()
print(text)
def test_gz_file():
import bz2
with bz2.open('somefile.bz2', 'rt') as f:
text = f.read()
print(text)
def test_partial_file():
from functools import partial
RECORD_SIZE = 32
with open('somefile.data', 'rb') as f:
records = iter(partial(f.read, RECORD_SIZE), b'')
def read_into_buffer(filename):
import os.path
buf = bytearray(os.path.getsize(filename))
with open(filename, 'rb') as f:
f.readinto(buf)
return buf
def test_buffer():
with open('sample.bin', 'wb') as f:
f.write(b'Hello World')
buf = read_into_buffer('sample.bin')
print(buf)
print(buf[0:5])
m1 = memoryview(buf)
m2 = m1[-5:]
print(m2)
m2[:] = b'WORLD'
print(buf)
import os
import mmap
def memory_map(filename, access=mmap.ACCESS_WRITE):
size = os.path.getsize(filename)
fd = os.open(filename, os.O_RDWR)
return mmap.mmap(fd, size, access=access)
def test_mmap():
size = 1000000
with open('data', 'wb') as f:
f.seek(size - 1)
f.write(b'\x00')
m = memory_map('data')
print(len(m))
print(m[0:10])
print(m[0])
m[0:11] = b'Hello World'
m.close()
with open('data', 'rb') as f:
print(f.read(11))
def test_filepath():
import os
path = os.path.abspath('.')
print(os.path.basename(path))
print(os.path.dirname(path))
print(os.path.join('tmp', 'data', os.path.basename(path)))
print(os.path.expanduser(path))
print(os.path.split(path))
def test_file_exist():
print(os.path.exists('.'))
print(os.path.isfile('xt.bin'))
print(os.path.isdir(os.path.dirname(os.path.abspath('.'))))
print(os.path.islink('.'))
print(os.path.getsize('.'))
def test_file_list():
print(os.listdir('.'))
from fnmatch import fnmatch
pyfiles = [name for name in os.listdir('.') if fnmatch(name, '*.py')]
print(pyfiles)
import glob
print(glob.glob('./*.py'))
import time
name_sz_date = [(name, os.path.getsize(name), os.path.getmtime(name)) for name in pyfiles]
for name, size, mtime in name_sz_date:
try:
print(name, size, time.ctime(mtime))
except UnicodeEncodeError:
print(bad_filename(name))
def test_filename_encode():
import sys
print(sys.getfilesystemencoding())
def bad_filename(filename):
return repr(filename)[1:-1]
def test_write_bin_file():
import sys
sys.stdout.buffer.write(b'Hello\n')
def test_tempfile():
from tempfile import TemporaryFile
from tempfile import NamedTemporaryFile
with TemporaryFile('w+t') as f:
f.write('Hello World')
f.write('Testing\n')
f.seek(0)
data = f.read()
with NamedTemporaryFile('w+t') as f:
print('filename is:', f.name)
def test_serial():
import pickle
data = 'Hello, World'
f = open('somefile', 'wb')
pickle.dump(data, f)
f = open('somefile', 'rb')
data = pickle.load(f)
print(data)
f = open('somedata', 'wb')
pickle.dump([1, 2, 3, 4], f)
pickle.dump('hello', f)
pickle.dump({'Apple', 'Pear', 'Banana'}, f)
f.close()
f = open('somedata', 'rb')
print(pickle.load(f))
print(pickle.load(f))
print(pickle.load(f))
def test_countdown():
from class_object import countdown
c = countdown.Countdown(30)
print(c)
f = open('cstate.p', 'wb')
import pickle
pickle.dump(c, f)
f.close()
f = open('cstate.p', 'rb')
print(pickle.load(f))
if __name__ == '__main__':
test_countdown() | mit | 9,049,429,624,892,995,000 | 22.530675 | 94 | 0.580574 | false |
microsoft/EconML | econml/drlearner.py | 1 | 1068 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import econml.dr as dr
from .utilities import deprecated
@deprecated("The econml.drlearner.DRLearner class has been moved to econml.dr.DRLearner; "
"an upcoming release will remove support for the old name")
class DRLearner(dr.DRLearner):
pass
@deprecated("The econml.drlearner.LinearDRLearner class has been moved to econml.dr.LinearDRLearner; "
"an upcoming release will remove support for the old name")
class LinearDRLearner(dr.LinearDRLearner):
pass
@deprecated("The econml.drlearner.SparseLinearDRLearner class has been moved to econml.dr.SparseLinearDRLearner; "
"an upcoming release will remove support for the old name")
class SparseLinearDRLearner(dr.SparseLinearDRLearner):
pass
@deprecated("The econml.drlearner.ForestDRLearner class has been moved to econml.dr.ForestDRLearner; "
"an upcoming release will remove support for the old name")
class ForestDRLearner(dr.ForestDRLearner):
pass
| mit | 8,100,481,333,035,169,000 | 35.827586 | 114 | 0.7603 | false |
andrewderekjackson/python_lcd_menu | lcd_menu/menu.py | 1 | 4202 | import os
class MenuItem(object):
'''A single menu item which can contain child menu items'''
def __init__(self, title, items=None, refresh_callback=None, refresh_callback_args = None):
self._title = title
self._items = items
self._refresh_callback = refresh_callback
self._refresh_callback_args = refresh_callback_args
@property
def title(self):
return self._title
@property
def items(self):
return self._items
def refresh(self):
if self._refresh_callback is not None:
self._items = self._refresh_callback(self, self._refresh_callback_args)
class Command(MenuItem):
'''A single menu item which executes a callback when selected'''
def __init__(self, title, command, arg=None):
MenuItem.__init__(self, title, None)
self._command = command
self._arg = arg
def invoke_command(self):
if self._command is not None:
self._command(self, self._arg)
return True
return False
def refresh(self):
pass
class MenuView(object):
'''Represents a current menu level and tracks the selected item'''
def __init__(self, items):
self._selected_index = 0
self._items = items
@property
def selected_index(self):
return self._selected_index
@selected_index.setter
def selected_index(self, val):
if val >= len(self._items):
self._selected_index = len(self._items)-1
else:
if val > 0:
self._selected_index = val
else:
self._selected_index = 0
@property
def items(self):
return self._items
def down(self):
self.selected_index += 1
def up(self):
self.selected_index -= 1
def refresh(self):
self.selected_item.refresh()
@property
def selected_item(self):
return self._items[self._selected_index]
class Menu(object):
'''Base menu controller responsible for managing the menu'''
def __init__(self, items, update):
self._history = []
self.main_menu = MenuView(items)
self.current_menu = self.main_menu
self.update = update
self.showing_menu = False
# start with the menu closed
self.close()
def menu(self):
"""
Shows the main menu
"""
self.current_menu = self.main_menu
self.showing_menu = True
self.update(self.current_menu)
def up(self):
"""
Navigates up in the menu
"""
self.current_menu.up()
self.update(self.current_menu)
def down(self):
"""
Navigates down in the menu
"""
self.current_menu.down()
self.update(self.current_menu)
def select(self):
"""
Selects the current menu. Either enters a submenu or invokes the command
"""
if isinstance(self.current_menu.selected_item, Command):
self.current_menu.selected_item.invoke_command()
return
if isinstance(self.current_menu.selected_item, MenuItem):
self.current_menu.selected_item.refresh()
if self.current_menu.selected_item.items is not None:
# add current menu to history
self._history.append(self.current_menu)
self.current_menu = MenuView(self.current_menu.selected_item.items)
self.update(self.current_menu)
def back(self):
"""
Returns back to a previous menu
"""
if len(self._history) > 0:
self.current_menu = self._history.pop()
self.update(self.current_menu)
else:
self.close()
def show(self):
"""
Shows the main menu
"""
self.current_menu = self.main_menu
self.showing_menu = True
self.update(self.current_menu)
def close(self):
"""
Closes the menu.
"""
self.current_menu = None
self.showing_menu = False
self.update(self.current_menu)
pass
def update(self):
pass
| mit | 5,973,343,013,663,982,000 | 24.011905 | 95 | 0.565445 | false |
jozz68x/PyBrowser | src/browser.py | 1 | 7502 | #!usr/bin/python
__author__="Jose Diaz - email: [email protected]"
__date__ ="$04/03/2015 05:55:19 AM$"
from homepage import Homepage
from tkinter import *
from tkinter import ttk
from PIL import Image, ImageTk
from urllib.request import urlopen # Interaccion con la web
from urllib.error import HTTPError,URLError # Excepciones
class GuiBrowser(ttk.Frame):
MENUBUTTON = dict(relief=FLAT, bd=0, width=30, height=30,
font=("Arial", 11), activebackground="#d6d9db",
cursor="hand2")
ENTRY = dict(relief=FLAT, bd=1, font=("Arial", 11), width=50,
highlightbackground="#acb1b4", highlightcolor="#549beb",
highlightthickness=1)
TEXT = dict(font=("Arial",10), cursor='arrow', state='normal',
autoseparators=5, spacing1=5, wrap=WORD)
FONT = ("Arial", 11)
def __init__(self, master):
""" Contructor."""
super().__init__(master)
self.cargar_imagenes()
self.variables_declarados()
# Crear todos los widgets.
self.barra_navegacion = self.crear_barra_navegacion()
self.separador = Frame(self, bg="#8a9398", height=1)
self.area_navegacion = self.crear_area_navegacion()
self.area_detalles = self.crear_area_detalles()
# Posicionar los widgets.
self.barra_navegacion.pack(side=TOP, fill=X, expand=True)
self.separador.pack(side=TOP, fill=X)
self.area_navegacion.pack(side=TOP, fill=BOTH, expand=True)
self.area_detalles.pack(side=BOTTOM, fill=X, expand=True)
def cargar_imagenes(self):
""" Carga todas las imagenes usados en este script."""
imenu_option = Image.open(r"images\menu_option.png")
self.imagenMenuOptions = ImageTk.PhotoImage(imenu_option)
ihome = Image.open(r"images\home.png")
self.imagenHome = ImageTk.PhotoImage(ihome)
iprueba = Image.open(r"images\icon_prueba.png")
self.imagenPrueba = ImageTk.PhotoImage(iprueba)
def variables_declarados(self):
""" Declaracion de variables. """
self.var_entry_search_url = StringVar()
self.var_entry_search_url.set("https://www.python.org/")
def crear_barra_navegacion(self):
""" Crea la barra de navegacion o cabecera del browser implementado con
sus widgets internos donde retona el frame principal."""
barra_browser = Frame(self)
# Crear widget interno.
btn_home = Menubutton(barra_browser, image=self.imagenHome, bg=barra_browser['bg'],
**self.MENUBUTTON)
lb_url = Label(barra_browser, text= 'URL: ', font=(self.FONT[0],10,"bold"),
bg=barra_browser['bg'])
entry = Entry(barra_browser, textvariable=self.var_entry_search_url,
**self.ENTRY)
btn_menu = Menubutton(barra_browser, image=self.imagenMenuOptions, bg=barra_browser['bg'],
**self.MENUBUTTON)
# Posiciona los widgets.
btn_home.pack(side=LEFT, padx=5, pady=5)
lb_url.pack(side=LEFT)
entry.pack(side=LEFT, fill=X, expand=True, pady=5)
btn_menu.pack(side=RIGHT, padx=5, pady=5)
# Eventos de los widgets.
btn_home .bind("<Button-1>", lambda e: self.homepage())
entry.bind("<Return>", lambda e: self.search_url())
# Retorna el Frame.
return barra_browser
def crear_area_navegacion(self):
""" Crea la area de navegacion o cuerpo del browser implementado con
sus widgets internos donde retona el frame principal."""
area_navegacion = Frame(self)
# Crear widget interno.
self.text = Text(area_navegacion, **self.TEXT)
scroller = ttk.Scrollbar(area_navegacion, command=self.text.yview)
self.text.config(yscrollcommand=scroller.set)
# Posiciona los widgets.
scroller.pack(side=RIGHT, fill=Y)
self.text.pack(fill=BOTH, expand=True)
self.text.configure(state="disabled")
# Retorna el Frame.
return area_navegacion
def crear_area_detalles(self):
""" Crea un area para los detalles de la pagina consultada implementado
con sus widgets internos donde retona el frame principal."""
area_detalles = Frame(self)
# Crear widget interno.
self.text_detalles = Text(area_detalles, **self.TEXT)
scroller = ttk.Scrollbar(area_detalles, command=self.text_detalles.yview)
self.text_detalles.config(yscrollcommand=scroller.set)
# Posiciona los widgets.
scroller.pack(side=RIGHT, fill=Y)
self.text_detalles.pack(fill=BOTH, expand=True)
#Retorna el Frame.
return area_detalles
def homepage(self):
homepage = Homepage(self.text)
homepage.pack(fill=BOTH, expand=True)
def search_url(self):
""" Metodo para Buscar y obtiener informacion de una url.
Escribe y muestra los resultados en los widgets
de Text creados anteriormente."""
self.text.configure(state="normal")
try:
if self.var_entry_search_url.get()=="":
self.message_estado("Ingrese url de una pagina web.")
elif self.var_entry_search_url.get()=="https://":
self.message_estado("Ingrese url de una pagina web.")
else:
try:
# Muestra los datos de la url el en area de navegacion principal.
self.message_estado("Leyendo archivos...")
self.text.delete(1.0, END)
data = urlopen(self.var_entry_search_url.get())
self.text.insert(INSERT, data.read())
# Muestra la url de la pagina en la barra de estado.
geturl = data.geturl()
self.message_estado(geturl)
# Muestra los detalles de la url el en area de detalles.
self.text_detalles.configure(state="normal")
self.text_detalles.delete(1.0, END)
headers = data.info()
self.text_detalles.insert(INSERT, headers)
self.text_detalles.configure(state="disabled")
data.close()
except URLError as e:
msj = "URL Error:",e.reason , self.var_entry_search_url.get()
self.message_estado(msj)
except HTTPError as e:
msj = "HTTP Error:",e.code , self.var_entry_search_url.get()
self.message_estado(msj)
except ValueError:
self.message_estado("Ingrese url valida: Error digitacion: '%s'" %self.var_entry_search_url.get())
self.text.configure(state="disabled")
def message_estado(self, text):
""" Muestra un mensaje en la parte inferior de la ventana principal.
Tiene como parametro el texto del mensaje."""
msj_estado = Message(self, text=text, bg='#c6dedd', font=("Arial",8), width=1400)
msj_estado.place(in_=self, relx=0, rely=1, x=0, y=0, anchor="sw", bordermode="outside")
msj_estado.after(2000,lambda: msj_estado.destroy())
| gpl-3.0 | 2,472,997,562,993,499,000 | 45.194969 | 110 | 0.578912 | false |
daniel-kurushin/iisu | biu/khc.py | 1 | 6304 | import sys
from struct import pack, unpack
from time import sleep
class KHC(object):
NAME = 'KHC'
cmd_inc_engine = b'\xae\xae\x01\x00\x01\x08\x00' # увеличить обороты и подтвердить результат
cmd_dec_engine = b'\xae\xae\x01\x00\x02\x08\x00' # уменьшить обороты и подтвердить результат
cmd_stop_engine = b'\xae\xae\x01\x00\x07\x07\x00' # остановка
cmd_get_distances = b'\xae\xae\x01\x00\x08\x07\x00' # вернуть расстояния от дальномеров
cmd_get_encoders = b'\xae\xae\x01\x00\x09\x07\x00' # энкодеры колес
cmd_reverse = b'\xae\xae\x01\x00\x0a\x08\x00' # вкл-выкл реверса
cmd_brakes = b'\xae\xae\x01\x00\x11\x0a\x00' # тормоза
# | | +--- правый 0 - выкл, 1 - вкл
# | +----- левый
# +------- передний
cmd_get_state = b'\xae\xae\x01\x00\xff\x07\x00' # вернуть состояние КХЧ
# currentAccelPos - обороты
# is_frw_brake ff - вкл передний тормоз 00 - выкл
# is_lgt_brake ff - вкл левый тормоз 00 - выкл
# is_rgt_brake ff - вкл правый тормоз 00 - выкл
# is_reverse ff - вкл реверс 00 - выкл
# enc_sec - срабатываний энкодера в сек
# enc_min - срабатываний энкодера в мин
currentAccelPos = 0
def parse_distances(self, x):
return dict(
ok = True,
rear = int(unpack('<B', x[3:4])[0]) * 128.0 / 58.0 / 100.0,
left = int(unpack('<B', x[4:5])[0]) * 128.0 / 58.0 / 100.0,
front = int(unpack('<B', x[5:6])[0]) * 128.0 / 58.0 / 100.0,
right = int(unpack('<B', x[6:7])[0]) * 128.0 / 58.0 / 100.0,
)
def parse_engine(self, x):
return dict(
ok = True,
currentAccelPos = int(unpack('<b', x[3:4])[0]),
)
def parse_reverse(self, x):
return dict(
ok = True,
is_reverse = bool(unpack('<b', x[3:4])[0]),
)
def parse_brakes(self, x):
return dict(
ok = True,
is_frw_brake = bool(unpack('<b', x[3:4])[0]),
is_lgt_brake = bool(unpack('<b', x[4:5])[0]),
is_rgt_brake = bool(unpack('<b', x[5:6])[0]),
)
def parse_encoders(self, x):
return dict(a=0)
def parse_state(self, x):
return dict(
ok = True,
currentAccelPos = int(unpack('<b', x[3: 4])[0]),
is_frw_brake = bool(unpack('<b', x[4: 5])[0]),
is_lgt_brake = bool(unpack('<b', x[5: 6])[0]),
is_rgt_brake = bool(unpack('<b', x[6: 7])[0]),
is_reverse = bool(unpack('<b', x[7: 8])[0]),
enc_sec = int(unpack('<b', x[8: 9])[0]),
enc_min = int(unpack('<b', x[9:10])[0]),
)
def inc_engine(self):
cmd = self.cmd_inc_engine
v = pack('>b', 1)
print('>>>', cmd, v, file = sys.stderr)
self.port.write(cmd)
self.port.write(v)
ret = self.port.read(4)
print('<<<', ret, file = sys.stderr)
assert len(ret) == 4
self.currentAccelPos += 1
return self.parse_engine(ret)
def dec_engine(self):
cmd = self.cmd_dec_engine
v = pack('>b', 1)
print('>>>', cmd, v, file = sys.stderr)
self.port.write(cmd)
self.port.write(v)
ret = self.port.read(4)
print('<<<', ret, file = sys.stderr)
assert len(ret) == 4
self.currentAccelPos -= 1
return self.parse_engine(ret)
def gooo(self, req_acc_pos = 31, rgt_brk = 0, lgt_brk = 0):
backward_needed = req_acc_pos < 0
acc_pos = abs(req_acc_pos)
stop_needed = acc_pos == 0
self.state = self.get_state()
self.brakes(rgt = rgt_brk, lgt = lgt_brk, frw = 0)
if self.state['is_reverse'] != backward_needed and backward_needed:
print(backward_needed, self.state['is_reverse'])
self.reverse(1)
if self.state['is_reverse'] != backward_needed and not backward_needed: self.reverse(0)
self.state = self.get_state()
D = int(acc_pos - self.state['currentAccelPos'])
if D > 0: f = self.inc_engine
else: f = self.dec_engine
for i in range(abs(D)): f()
_ = self.get_state()
pos = _['currentAccelPos']
if _['is_reverse']: pos = -1 * pos
return dict(
ok = pos == req_acc_pos,
requiredAccelPos = req_acc_pos,
currentAccelPos = pos,
)
def stop_engine(self):
cmd = self.cmd_stop_engine
print('>>>', cmd, file = sys.stderr)
self.port.write(cmd)
ret = self.port.read(4)
print('<<<', ret, file = sys.stderr)
assert len(ret) == 4
self.currentAccelPos = 0
return self.parse_engine(ret)
def reverse(self, v = 1):
cmd = self.cmd_reverse
v = pack('>b', v)
print('>>>', cmd, v, file = sys.stderr)
self.port.write(cmd)
self.port.write(v)
ret = self.port.read(4)
print('<<<', ret, file = sys.stderr)
assert len(ret) == 4
return self.parse_reverse(ret)
def brakes(self, rgt = 0, lgt = 0, frw = 1):
cmd = self.cmd_brakes
rgt = pack('>b', rgt)
lgt = pack('>b', lgt)
frw = pack('>b', frw)
print('>>>', cmd, file = sys.stderr)
self.port.write(cmd)
self.port.write(frw)
self.port.write(rgt)
self.port.write(lgt)
ret = self.port.read(6)
print('<<<', ret, file = sys.stderr)
assert len(ret) == 6
return self.parse_brakes(ret)
def get_encoders(self):
cmd = self.cmd_get_distances
print('>>>', cmd, file = sys.stderr)
self.port.write(cmd)
ret = self.port.read(7)
print('<<<', ret, file = sys.stderr)
assert len(ret) == 7
return self.parse_encoders(ret)
def get_state(self):
cmd = self.cmd_get_state
print('>>>', cmd, file = sys.stderr)
self.port.write(cmd)
ret = self.port.read(10)
print('<<<', ret, file = sys.stderr)
assert len(ret) == 10
return self.parse_state(ret)
def get_distances(self):
cmd = self.cmd_get_distances
print('>>>', cmd, file = sys.stderr)
self.port.write(cmd)
ret = self.port.read(7)
print('<<<', ret, file = sys.stderr)
assert len(ret) == 7
return self.parse_distances(ret)
def __init__(self, port = None):
if port != None:
self.port = port
else:
raise Exception('port is None')
self.state = self.get_state()
if __name__ == "__main__":
from biu import BIU
khc = KHC(BIU())
print(khc.get_distances())
# print(khc.gooo(31))
# sleep(6)
# print(khc.gooo(-31))
# sleep(6)
print(khc.stop_engine())
| gpl-3.0 | 1,060,215,723,341,621,400 | 28.781095 | 96 | 0.589208 | false |
markgw/jazzparser | src/jazzparser/utils/latex.py | 1 | 1979 | """Latex output utility functions to help with producing valid Latex files.
Utility functions for handling processing and output of Latex.
"""
"""
============================== License ========================================
Copyright (C) 2008, 2010-12 University of Edinburgh, Mark Granroth-Wilding
This file is part of The Jazz Parser.
The Jazz Parser is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
The Jazz Parser is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with The Jazz Parser. If not, see <http://www.gnu.org/licenses/>.
============================ End license ======================================
"""
__author__ = "Mark Granroth-Wilding <[email protected]>"
def filter_latex(text):
"""
Applies necessary filters to Latex text before outputting. Mainly
involves escaping strings.
"""
text = text.replace("#","\\#")
text = text.replace("%","\\%")
text = text.replace("_", "\\_")
return text
def start_document(title=None, author=None, packages=[], options=[], toc=False):
output = ""
output += "\\documentclass[%s]{article}\n" % ",".join(options+['a4paper'])
for package in packages:
output += "\\usepackage{%s}\n" % package
output += "\\begin{document}\n"
if title is not None:
output += "\\title{%s}\n" % title
if author is not None:
output += "\\author{%s}\n" % author
else:
output += "\\author{}\n"
output += "\\maketitle\n"
if toc:
output += "\\tableofcontents\n"
return output
| gpl-3.0 | -1,280,402,752,457,710,600 | 34.339286 | 80 | 0.622537 | false |
landier/imdb-crawler | crawler/libs/sqlalchemy/orm/deprecated_interfaces.py | 1 | 21785 | # orm/deprecated_interfaces.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from sqlalchemy import event, util
from .interfaces import EXT_CONTINUE
class MapperExtension(object):
"""Base implementation for :class:`.Mapper` event hooks.
.. note::
:class:`.MapperExtension` is deprecated. Please
refer to :func:`.event.listen` as well as
:class:`.MapperEvents`.
New extension classes subclass :class:`.MapperExtension` and are specified
using the ``extension`` mapper() argument, which is a single
:class:`.MapperExtension` or a list of such::
from sqlalchemy.orm.interfaces import MapperExtension
class MyExtension(MapperExtension):
def before_insert(self, mapper, connection, instance):
print "instance %s before insert !" % instance
m = mapper(User, users_table, extension=MyExtension())
A single mapper can maintain a chain of ``MapperExtension``
objects. When a particular mapping event occurs, the
corresponding method on each ``MapperExtension`` is invoked
serially, and each method has the ability to halt the chain
from proceeding further::
m = mapper(User, users_table, extension=[ext1, ext2, ext3])
Each ``MapperExtension`` method returns the symbol
EXT_CONTINUE by default. This symbol generally means "move
to the next ``MapperExtension`` for processing". For methods
that return objects like translated rows or new object
instances, EXT_CONTINUE means the result of the method
should be ignored. In some cases it's required for a
default mapper activity to be performed, such as adding a
new instance to a result list.
The symbol EXT_STOP has significance within a chain
of ``MapperExtension`` objects that the chain will be stopped
when this symbol is returned. Like EXT_CONTINUE, it also
has additional significance in some cases that a default
mapper activity will not be performed.
"""
@classmethod
def _adapt_instrument_class(cls, self, listener):
cls._adapt_listener_methods(self, listener, ('instrument_class',))
@classmethod
def _adapt_listener(cls, self, listener):
cls._adapt_listener_methods(
self, listener,
(
'init_instance',
'init_failed',
'translate_row',
'create_instance',
'append_result',
'populate_instance',
'reconstruct_instance',
'before_insert',
'after_insert',
'before_update',
'after_update',
'before_delete',
'after_delete'
))
@classmethod
def _adapt_listener_methods(cls, self, listener, methods):
for meth in methods:
me_meth = getattr(MapperExtension, meth)
ls_meth = getattr(listener, meth)
if not util.methods_equivalent(me_meth, ls_meth):
if meth == 'reconstruct_instance':
def go(ls_meth):
def reconstruct(instance, ctx):
ls_meth(self, instance)
return reconstruct
event.listen(self.class_manager, 'load',
go(ls_meth), raw=False, propagate=True)
elif meth == 'init_instance':
def go(ls_meth):
def init_instance(instance, args, kwargs):
ls_meth(self, self.class_,
self.class_manager.original_init,
instance, args, kwargs)
return init_instance
event.listen(self.class_manager, 'init',
go(ls_meth), raw=False, propagate=True)
elif meth == 'init_failed':
def go(ls_meth):
def init_failed(instance, args, kwargs):
util.warn_exception(ls_meth, self, self.class_,
self.class_manager.original_init,
instance, args, kwargs)
return init_failed
event.listen(self.class_manager, 'init_failure',
go(ls_meth), raw=False, propagate=True)
else:
event.listen(self, "%s" % meth, ls_meth,
raw=False, retval=True, propagate=True)
def instrument_class(self, mapper, class_):
"""Receive a class when the mapper is first constructed, and has
applied instrumentation to the mapped class.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def init_instance(self, mapper, class_, oldinit, instance, args, kwargs):
"""Receive an instance when it's constructor is called.
This method is only called during a userland construction of
an object. It is not called when an object is loaded from the
database.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def init_failed(self, mapper, class_, oldinit, instance, args, kwargs):
"""Receive an instance when it's constructor has been called,
and raised an exception.
This method is only called during a userland construction of
an object. It is not called when an object is loaded from the
database.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def translate_row(self, mapper, context, row):
"""Perform pre-processing on the given result row and return a
new row instance.
This is called when the mapper first receives a row, before
the object identity or the instance itself has been derived
from that row. The given row may or may not be a
``RowProxy`` object - it will always be a dictionary-like
object which contains mapped columns as keys. The
returned object should also be a dictionary-like object
which recognizes mapped columns as keys.
If the ultimate return value is EXT_CONTINUE, the row
is not translated.
"""
return EXT_CONTINUE
def create_instance(self, mapper, selectcontext, row, class_):
"""Receive a row when a new object instance is about to be
created from that row.
The method can choose to create the instance itself, or it can return
EXT_CONTINUE to indicate normal object creation should take place.
mapper
The mapper doing the operation
selectcontext
The QueryContext generated from the Query.
row
The result row from the database
class\_
The class we are mapping.
return value
A new object instance, or EXT_CONTINUE
"""
return EXT_CONTINUE
def append_result(self, mapper, selectcontext, row, instance,
result, **flags):
"""Receive an object instance before that instance is appended
to a result list.
If this method returns EXT_CONTINUE, result appending will proceed
normally. if this method returns any other value or None,
result appending will not proceed for this instance, giving
this extension an opportunity to do the appending itself, if
desired.
mapper
The mapper doing the operation.
selectcontext
The QueryContext generated from the Query.
row
The result row from the database.
instance
The object instance to be appended to the result.
result
List to which results are being appended.
\**flags
extra information about the row, same as criterion in
``create_row_processor()`` method of
:class:`~sqlalchemy.orm.interfaces.MapperProperty`
"""
return EXT_CONTINUE
def populate_instance(self, mapper, selectcontext, row,
instance, **flags):
"""Receive an instance before that instance has
its attributes populated.
This usually corresponds to a newly loaded instance but may
also correspond to an already-loaded instance which has
unloaded attributes to be populated. The method may be called
many times for a single instance, as multiple result rows are
used to populate eagerly loaded collections.
If this method returns EXT_CONTINUE, instance population will
proceed normally. If any other value or None is returned,
instance population will not proceed, giving this extension an
opportunity to populate the instance itself, if desired.
.. deprecated:: 0.5
Most usages of this hook are obsolete. For a
generic "object has been newly created from a row" hook, use
``reconstruct_instance()``, or the ``@orm.reconstructor``
decorator.
"""
return EXT_CONTINUE
def reconstruct_instance(self, mapper, instance):
"""Receive an object instance after it has been created via
``__new__``, and after initial attribute population has
occurred.
This typically occurs when the instance is created based on
incoming result rows, and is only called once for that
instance's lifetime.
Note that during a result-row load, this method is called upon
the first row received for this instance. Note that some
attributes and collections may or may not be loaded or even
initialized, depending on what's present in the result rows.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def before_insert(self, mapper, connection, instance):
"""Receive an object instance before that instance is inserted
into its table.
This is a good place to set up primary key values and such
that aren't handled otherwise.
Column-based attributes can be modified within this method
which will result in the new value being inserted. However
*no* changes to the overall flush plan can be made, and
manipulation of the ``Session`` will not have the desired effect.
To manipulate the ``Session`` within an extension, use
``SessionExtension``.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def after_insert(self, mapper, connection, instance):
"""Receive an object instance after that instance is inserted.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def before_update(self, mapper, connection, instance):
"""Receive an object instance before that instance is updated.
Note that this method is called for all instances that are marked as
"dirty", even those which have no net changes to their column-based
attributes. An object is marked as dirty when any of its column-based
attributes have a "set attribute" operation called or when any of its
collections are modified. If, at update time, no column-based
attributes have any net changes, no UPDATE statement will be issued.
This means that an instance being sent to before_update is *not* a
guarantee that an UPDATE statement will be issued (although you can
affect the outcome here).
To detect if the column-based attributes on the object have net
changes, and will therefore generate an UPDATE statement, use
``object_session(instance).is_modified(instance,
include_collections=False)``.
Column-based attributes can be modified within this method
which will result in the new value being updated. However
*no* changes to the overall flush plan can be made, and
manipulation of the ``Session`` will not have the desired effect.
To manipulate the ``Session`` within an extension, use
``SessionExtension``.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def after_update(self, mapper, connection, instance):
"""Receive an object instance after that instance is updated.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def before_delete(self, mapper, connection, instance):
"""Receive an object instance before that instance is deleted.
Note that *no* changes to the overall flush plan can be made
here; and manipulation of the ``Session`` will not have the
desired effect. To manipulate the ``Session`` within an
extension, use ``SessionExtension``.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def after_delete(self, mapper, connection, instance):
"""Receive an object instance after that instance is deleted.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
class SessionExtension(object):
"""Base implementation for :class:`.Session` event hooks.
.. note::
:class:`.SessionExtension` is deprecated. Please
refer to :func:`.event.listen` as well as
:class:`.SessionEvents`.
Subclasses may be installed into a :class:`.Session` (or
:func:`.sessionmaker`) using the ``extension`` keyword
argument::
from sqlalchemy.orm.interfaces import SessionExtension
class MySessionExtension(SessionExtension):
def before_commit(self, session):
print "before commit!"
Session = sessionmaker(extension=MySessionExtension())
The same :class:`.SessionExtension` instance can be used
with any number of sessions.
"""
@classmethod
def _adapt_listener(cls, self, listener):
for meth in [
'before_commit',
'after_commit',
'after_rollback',
'before_flush',
'after_flush',
'after_flush_postexec',
'after_begin',
'after_attach',
'after_bulk_update',
'after_bulk_delete',
]:
me_meth = getattr(SessionExtension, meth)
ls_meth = getattr(listener, meth)
if not util.methods_equivalent(me_meth, ls_meth):
event.listen(self, meth, getattr(listener, meth))
def before_commit(self, session):
"""Execute right before commit is called.
Note that this may not be per-flush if a longer running
transaction is ongoing."""
def after_commit(self, session):
"""Execute after a commit has occurred.
Note that this may not be per-flush if a longer running
transaction is ongoing."""
def after_rollback(self, session):
"""Execute after a rollback has occurred.
Note that this may not be per-flush if a longer running
transaction is ongoing."""
def before_flush( self, session, flush_context, instances):
"""Execute before flush process has started.
`instances` is an optional list of objects which were passed to
the ``flush()`` method. """
def after_flush(self, session, flush_context):
"""Execute after flush has completed, but before commit has been
called.
Note that the session's state is still in pre-flush, i.e. 'new',
'dirty', and 'deleted' lists still show pre-flush state as well
as the history settings on instance attributes."""
def after_flush_postexec(self, session, flush_context):
"""Execute after flush has completed, and after the post-exec
state occurs.
This will be when the 'new', 'dirty', and 'deleted' lists are in
their final state. An actual commit() may or may not have
occurred, depending on whether or not the flush started its own
transaction or participated in a larger transaction. """
def after_begin( self, session, transaction, connection):
"""Execute after a transaction is begun on a connection
`transaction` is the SessionTransaction. This method is called
after an engine level transaction is begun on a connection. """
def after_attach(self, session, instance):
"""Execute after an instance is attached to a session.
This is called after an add, delete or merge. """
def after_bulk_update( self, session, query, query_context, result):
"""Execute after a bulk update operation to the session.
This is called after a session.query(...).update()
`query` is the query object that this update operation was
called on. `query_context` was the query context object.
`result` is the result object returned from the bulk operation.
"""
def after_bulk_delete( self, session, query, query_context, result):
"""Execute after a bulk delete operation to the session.
This is called after a session.query(...).delete()
`query` is the query object that this delete operation was
called on. `query_context` was the query context object.
`result` is the result object returned from the bulk operation.
"""
class AttributeExtension(object):
"""Base implementation for :class:`.AttributeImpl` event hooks, events
that fire upon attribute mutations in user code.
.. note::
:class:`.AttributeExtension` is deprecated. Please
refer to :func:`.event.listen` as well as
:class:`.AttributeEvents`.
:class:`.AttributeExtension` is used to listen for set,
remove, and append events on individual mapped attributes.
It is established on an individual mapped attribute using
the `extension` argument, available on
:func:`.column_property`, :func:`.relationship`, and
others::
from sqlalchemy.orm.interfaces import AttributeExtension
from sqlalchemy.orm import mapper, relationship, column_property
class MyAttrExt(AttributeExtension):
def append(self, state, value, initiator):
print "append event !"
return value
def set(self, state, value, oldvalue, initiator):
print "set event !"
return value
mapper(SomeClass, sometable, properties={
'foo':column_property(sometable.c.foo, extension=MyAttrExt()),
'bar':relationship(Bar, extension=MyAttrExt())
})
Note that the :class:`.AttributeExtension` methods
:meth:`~.AttributeExtension.append` and
:meth:`~.AttributeExtension.set` need to return the
``value`` parameter. The returned value is used as the
effective value, and allows the extension to change what is
ultimately persisted.
AttributeExtension is assembled within the descriptors associated
with a mapped class.
"""
active_history = True
"""indicates that the set() method would like to receive the 'old' value,
even if it means firing lazy callables.
Note that ``active_history`` can also be set directly via
:func:`.column_property` and :func:`.relationship`.
"""
@classmethod
def _adapt_listener(cls, self, listener):
event.listen(self, 'append', listener.append,
active_history=listener.active_history,
raw=True, retval=True)
event.listen(self, 'remove', listener.remove,
active_history=listener.active_history,
raw=True, retval=True)
event.listen(self, 'set', listener.set,
active_history=listener.active_history,
raw=True, retval=True)
def append(self, state, value, initiator):
"""Receive a collection append event.
The returned value will be used as the actual value to be
appended.
"""
return value
def remove(self, state, value, initiator):
"""Receive a remove event.
No return value is defined.
"""
pass
def set(self, state, value, oldvalue, initiator):
"""Receive a set event.
The returned value will be used as the actual value to be
set.
"""
return value
| gpl-3.0 | 4,175,316,921,779,378,000 | 35.923729 | 84 | 0.629653 | false |
Haynie-Research-and-Development/jarvis | deps/lib/python3.4/site-packages/netdisco/discoverables/__init__.py | 1 | 5004 | """Provides helpful stuff for discoverables."""
# pylint: disable=abstract-method
import ipaddress
from urllib.parse import urlparse
from ..const import (
ATTR_NAME, ATTR_MODEL_NAME, ATTR_HOST, ATTR_PORT, ATTR_SSDP_DESCRIPTION,
ATTR_SERIAL, ATTR_MODEL_NUMBER, ATTR_HOSTNAME, ATTR_MAC_ADDRESS,
ATTR_PROPERTIES)
class BaseDiscoverable(object):
"""Base class for discoverable services or device types."""
def is_discovered(self):
"""Return True if it is discovered."""
return len(self.get_entries()) > 0
def get_info(self):
"""Return a list with the important info for each item.
Uses self.info_from_entry internally.
"""
return [self.info_from_entry(entry) for entry in self.get_entries()]
# pylint: disable=no-self-use
def info_from_entry(self, entry):
"""Return an object with important info from the entry."""
return entry
# pylint: disable=no-self-use
def get_entries(self):
"""Return all the discovered entries."""
raise NotImplementedError()
class SSDPDiscoverable(BaseDiscoverable):
"""uPnP discoverable base class."""
def __init__(self, netdis):
"""Initialize SSDPDiscoverable."""
self.netdis = netdis
def info_from_entry(self, entry):
"""Get most important info, by default the description location."""
url = urlparse(entry.location)
info = {
ATTR_HOST: url.hostname,
ATTR_PORT: url.port,
ATTR_SSDP_DESCRIPTION: entry.location
}
device = entry.description.get('device')
if device:
info[ATTR_NAME] = device.get('friendlyName')
info[ATTR_MODEL_NAME] = device.get('modelName')
info[ATTR_MODEL_NUMBER] = device.get('modelNumber')
info[ATTR_SERIAL] = device.get('serialNumber')
return info
# Helper functions
# pylint: disable=invalid-name
def find_by_st(self, st):
"""Find entries by ST (the device identifier)."""
return self.netdis.ssdp.find_by_st(st)
def find_by_device_description(self, values):
"""Find entries based on values from their description."""
return self.netdis.ssdp.find_by_device_description(values)
class MDNSDiscoverable(BaseDiscoverable):
"""mDNS Discoverable base class."""
def __init__(self, netdis, typ):
"""Initialize MDNSDiscoverable."""
self.netdis = netdis
self.typ = typ
self.services = {}
netdis.mdns.register_service(self)
def reset(self):
"""Reset found services."""
self.services.clear()
def is_discovered(self):
"""Return True if any device has been discovered."""
return len(self.get_entries()) > 0
# pylint: disable=unused-argument
def remove_service(self, zconf, typ, name):
"""Callback when a service is removed."""
self.services.pop(name, None)
def add_service(self, zconf, typ, name):
"""Callback when a service is found."""
service = None
tries = 0
while service is None and tries < 3:
service = zconf.get_service_info(typ, name)
tries += 1
if service is not None:
self.services[name] = service
def get_entries(self):
"""Return all found services."""
return self.services.values()
def info_from_entry(self, entry):
"""Return most important info from mDNS entries."""
properties = {}
for key, value in entry.properties.items():
if isinstance(value, bytes):
value = value.decode('utf-8')
properties[key.decode('utf-8')] = value
info = {
ATTR_HOST: str(ipaddress.ip_address(entry.address)),
ATTR_PORT: entry.port,
ATTR_HOSTNAME: entry.server,
ATTR_PROPERTIES: properties,
}
if "mac" in properties:
info[ATTR_MAC_ADDRESS] = properties["mac"]
return info
def find_by_device_name(self, name):
"""Find entries based on the beginning of their entry names."""
return [entry for entry in self.services.values()
if entry.name.startswith(name)]
class GDMDiscoverable(BaseDiscoverable):
"""GDM discoverable base class."""
def __init__(self, netdis):
"""Initialize GDMDiscoverable."""
self.netdis = netdis
def info_from_entry(self, entry):
"""Get most important info, by default the description location."""
return {
ATTR_HOST: entry.values['location'],
ATTR_PORT: entry.values['port'],
}
def find_by_content_type(self, value):
"""Find entries based on values from their content_type."""
return self.netdis.gdm.find_by_content_type(value)
def find_by_data(self, values):
"""Find entries based on values from any returned field."""
return self.netdis.gdm.find_by_data(values)
| gpl-2.0 | -675,812,168,902,687,900 | 30.275 | 76 | 0.609712 | false |
jsidabras/GA-PMR | HFSS-loadbest.py | 1 | 1771 | # ----------------------------------------------
# Script Written by Jason W. Sidabras ([email protected])
# requires jsidabras/hycohanz as of 20-04-2017
# Loads a file with a list of 1s and 0s and implements it to HFSS as Silv/Vac
# used to load the best results per generation or final
# ----------------------------------------------
from random import *
import argparse
import hycohanz as hfss
[oAnsoftApp, oDesktop] = hfss.setup_interface()
oProject = oDesktop.SetActiveProject("GA_PlanarResonator")
oDesign = hfss.set_active_design(oProject, 'HFSSDesign1')
oEditor = hfss.set_active_editor(oDesign)
oFieldsReporter = hfss.get_module(oDesign, 'FieldsReporter')
parser = argparse.ArgumentParser(description='Load GA best file and run solution in HFSS.')
parser.add_argument('file', type=str, help='the filename to load')
args = parser.parse_args()
f = open(args.file, 'r')
loadthing = f.readline()
f.close()
dump = loadthing.strip("[")
dump = dump.rstrip()
dump = dump.strip(r"']").split(", ")
thing = []
for i in dump:
thing.append(int(i))
print(len(dump))
index = 0
Vac = []
Silv = []
for i in thing:
if i == 1:
Silv.append("Elm_"+str(index))
index += 1
else:
Vac.append("Elm_"+str(index))
index += 1
oDesktop.ClearMessages("", "", 3)
# Check if list is empty
if Vac:
hfss.assign_White(oEditor, Vac)
hfss.assign_material(oEditor, Vac, MaterialName="vacuum", SolveInside=True)
if Silv:
hfss.assign_Orange(oEditor, Silv)
hfss.assign_material(oEditor, Silv, MaterialName="pec", SolveInside=False)
oDesktop.ClearMessages("", "", 3)
# try:
#oDesign.Analyze("Setup1")
# except:
# print("Simulation Error")
#oProject.Save()
| mit | 856,297,286,986,434,200 | 27.032787 | 91 | 0.629588 | false |
eeriks/velo.lv | velo/payment/forms.py | 1 | 19328 | from django import forms
from django.contrib import messages
from django.utils import timezone
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from crispy_forms.layout import Layout, Div, HTML, Field
from crispy_forms.helper import FormHelper
from velo.payment.models import ActivePaymentChannel, Payment, DiscountCode
from velo.payment.utils import create_application_invoice, create_bank_transaction, create_team_invoice, \
approve_payment
from velo.payment.widgets import PaymentTypeWidget, DoNotRenderWidget
from velo.registration.models import Application
from velo.velo.mixins.forms import RequestKwargModelFormMixin, GetClassNameMixin
from velo.velo.utils import load_class
class ApplicationPayUpdateForm(GetClassNameMixin, RequestKwargModelFormMixin, forms.ModelForm):
accept_terms = forms.BooleanField(label=_("I confirm, that: the competition organizers are not responsible for possible injuries of participants, during the competition; my health condition corresponds to the selected distance; I will definitely use a fastened helmet and will observe road traffic regulations and competition regulations; I agree with the conditions for participation in the competition, mentioned in the regulations; I am informed, that the paid participation fee will not be returned and the participant’s starting number shall not be transferred to any other person."),
required=True)
accept_inform_participants = forms.BooleanField(label=_("I will inform all registered participants about rules."),
required=True)
accept_insurance = forms.BooleanField(label="", required=False)
discount_code = forms.CharField(label=_("Discount code"), required=False)
payment_type = forms.ChoiceField(choices=(), label="", widget=PaymentTypeWidget)
prepend = 'payment_'
participants = None
success_url = None
class Meta:
model = Application
fields = ('discount_code', 'company_name', 'company_vat', 'company_regnr', 'company_address', 'company_juridical_address',
'invoice_show_names', 'donation')
widgets = {
'donation': DoNotRenderWidget, # We will add field manually
}
def _post_clean(self):
super()._post_clean()
if not bool(self.errors):
try:
instance = self.instance
instance.set_final_price() # if donation have changed, then we need to recalculate,
# because instance is not yet saved and it means,
# that this function on model is not yet run.
if instance.final_price == 0:
payment = Payment.objects.create(content_object=instance,
total=instance.final_price,
status=Payment.STATUSES.ok,
competition=instance.competition)
approve_payment(payment, self.request.user, self.request)
self.success_url = reverse('application_ok', kwargs={'slug': instance.code})
else:
active_payment_type = ActivePaymentChannel.objects.get(id=self.cleaned_data.get('payment_type'))
if active_payment_type.payment_channel.is_bill:
create_application_invoice(instance, active_payment_type)
self.success_url = reverse('application_ok', kwargs={'slug': instance.code})
messages.success(self.request,
_('Invoice successfully created and sent to %(email)s') % {'email': instance.email})
else:
self.success_url = create_bank_transaction(instance, active_payment_type, self.request)
except:
# TODO We need to catch exception and log it to sentry
self._errors['payment_type'] = self.error_class([_("Error in connection with bank. Try again later.")])
def save(self, commit=True):
instance = super(ApplicationPayUpdateForm, self).save(commit=False)
if self.request:
instance.updated_by = self.request.user
if instance.payment_status < Application.PAY_STATUS.waiting:
instance.payment_status = Application.PAY_STATUS.waiting
instance.params = dict(self.cleaned_data)
instance.params.pop("donation", None)
discount_code = instance.params.pop("discount_code", None)
if discount_code:
instance.params.update({'discount_code': discount_code.code})
if commit:
instance.save()
return instance
def clean_donation(self):
donation = self.cleaned_data.get('donation', 0.00)
# If person have already taken invoice, then we do not allow changing donation amount
if self.instance.invoice:
return float(self.instance.donation)
else:
return donation
def clean_discount_code(self):
code = self.cleaned_data.get('discount_code', "")
if not code:
return None
else:
if isinstance(code, DiscountCode):
return code
try:
return DiscountCode.objects.get(code=code)
except:
return None
def clean(self):
if not self.cleaned_data.get('donation', ''):
self.cleaned_data.update({'donation': 0.00})
super(ApplicationPayUpdateForm, self).clean()
try:
active_payment_type = ActivePaymentChannel.objects.get(id=self.cleaned_data.get('payment_type'))
if self.data.get("discount_code", None) and active_payment_type.payment_channel.is_bill:
active_payment_type = None
self._errors.update({'payment_type': [_("Invoice is not available with discount code."), ]})
except:
active_payment_type = None
if active_payment_type and active_payment_type.payment_channel.is_bill: # Hard coded bill ids.
if self.cleaned_data.get('company_name', '') == '':
self._errors.update({'company_name': [_("Company Name required."), ]})
if self.cleaned_data.get('company_regnr', '') == '':
self._errors.update({'company_regnr': [_("Company registration number required."), ]})
if self.cleaned_data.get('company_address', '') == '':
self._errors.update({'company_address': [_("Company Address required."), ]})
return self.cleaned_data
def __init__(self, *args, **kwargs):
self.participants = kwargs.pop('participants', None)
super(ApplicationPayUpdateForm, self).__init__(*args, **kwargs)
insured_participants = self.participants.exclude(insurance=None)
if insured_participants:
self.fields['accept_insurance'].required = True
insurance_company = insured_participants[0].insurance.insurance_company
terms_doc = "<a href='%s' target='_blank'>%s</a>" % (insurance_company.terms_doc.url, _("Regulation")) if insurance_company.terms_doc else ""
self.fields['accept_insurance'].label = mark_safe("%s %s" % (insurance_company.term, terms_doc))
else:
self.fields['accept_insurance'].widget = forms.HiddenInput()
now = timezone.now()
competition = self.instance.competition
checkboxes = (
'accept_terms',
'accept_inform_participants',
'accept_insurance',
)
if competition.processing_class:
_class = load_class(competition.processing_class)
processing = _class(competition=competition)
if hasattr(processing, 'payment_additional_checkboxes'):
for key, field in processing.payment_additional_checkboxes(application=self.instance):
self.fields[key] = field
checkboxes += (key,)
payments = competition.activepaymentchannel_set.filter(from_date__lte=now, till_date__gte=now).select_related(
'payment_channel')
# If user have already requested bill, then we are not showing possibility to request one more.
if self.instance.invoice:
payments = payments.filter(payment_channel__is_bill=False)
if self.instance.final_price == 0:
self.fields['payment_type'].required = False
self.fields['payment_type'].widget = forms.HiddenInput()
else:
self.fields['payment_type'].choices = [(obj.id, obj) for obj in payments]
if self.instance.discount_code:
self.initial['discount_code'] = self.instance.discount_code.code
self.fields['donation'].required = False
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.layout = Layout(
*checkboxes,
Div(
Div(
Div(
Field(
"discount_code",
css_class="input-field if--50 if--dark js-placeholder-up"
),
),
css_class="input-wrap w100 bottom-margin--15 col-s-24 col-m-12 col-l-12 col-xl-12"
),
css_class="input-wrap w100 bottom-margin--15",
),
Div(
Div(
css_class="w100 bottom-margin--30",
),
Div(
Div(
HTML(_("Payment method")) if self.instance.final_price > 0 else HTML(""),
css_class="fs14 fw700 uppercase w100 bottom-margin--30"
),
Div(
Div(
Field('payment_type', wrapper_class="row row--gutters-20"),
css_class="w100"
),
css_class="input-wrap w100"
),
css_class="inner no-padding--560"
),
css_class="w100 border-top"
),
Div(
Div(
# company_name
Div(
Div(
Field(
"company_name",
css_class="input-field if--50 if--dark js-placeholder-up",
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
# company_vat
Div(
Div(
Field(
"company_vat",
css_class="input-field if--50 if--dark js-placeholder-up"
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
# company_regnr
Div(
Div(
Field(
"company_regnr",
css_class="input-field if--50 if--dark js-placeholder-up"
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
# company_address
Div(
Div(
Field(
"company_address",
css_class="input-field if--50 if--dark js-placeholder-up"
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
# company_juridical_address
Div(
Div(
Field(
"company_juridical_address",
css_class="input-field if--50 if--dark js-placeholder-up"
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
'invoice_show_names',
css_class=""
),
css_class="invoice_fields"
)
)
class TeamPayForm(GetClassNameMixin, RequestKwargModelFormMixin, forms.ModelForm):
payment_type = forms.ChoiceField(choices=(), label="", widget=PaymentTypeWidget)
prepend = 'payment_'
success_url = None
class Meta:
model = Application
fields = ('company_name', 'company_vat', 'company_regnr', 'company_address', 'company_juridical_address',)
def _post_clean(self):
super(TeamPayForm, self)._post_clean()
if not bool(self.errors):
try:
instance = self.instance
active_payment_type = ActivePaymentChannel.objects.get(id=self.cleaned_data.get('payment_type'))
if active_payment_type.payment_channel.is_bill:
create_team_invoice(instance, active_payment_type)
self.success_url = reverse('account:team', kwargs={'pk2': instance.id})
messages.info(self.request,
_('Invoice successfully created and sent to %(email)s') % {'email': instance.email})
else:
self.success_url = create_bank_transaction(instance, active_payment_type, self.request)
except:
# TODO We need to catch exception and log it to sentry
self._errors['payment_type'] = self.error_class([_("Error in connection with bank. Try again later.")])
def clean(self):
super(TeamPayForm, self).clean()
try:
active_payment_type = ActivePaymentChannel.objects.get(id=self.cleaned_data.get('payment_type'))
except:
active_payment_type = None
if active_payment_type and active_payment_type.payment_channel.is_bill: # Hard coded bill ids.
if self.cleaned_data.get('company_name', '') == '':
self._errors.update({'company_name': [_("Company Name required."), ]})
if self.cleaned_data.get('company_regnr', '') == '':
self._errors.update({'company_regnr': [_("Company registration number required."), ]})
if self.cleaned_data.get('company_address', '') == '':
self._errors.update({'company_address': [_("Company Address required."), ]})
if self.cleaned_data.get('company_juridical_address', '') == '':
self._errors.update({'company_juridical_address': [_("Company Juridical Address required."), ]})
return self.cleaned_data
def __init__(self, *args, **kwargs):
super(TeamPayForm, self).__init__(*args, **kwargs)
now = timezone.now()
competition = self.instance.distance.competition
payments = competition.activepaymentchannel_set.filter(from_date__lte=now, till_date__gte=now).select_related(
'payment_channel')
# If user have already requested bill, then we are not showing possibility to request one more.
if self.instance.invoice:
payments = payments.filter(payment_channel__is_bill=False)
self.fields['payment_type'].choices = [(obj.id, obj) for obj in payments]
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.layout = Layout(
Div(
Div(
css_class="w100 bottom-margin--30",
),
Div(
Div(
HTML(_("Payment method")),
css_class="fs14 fw700 uppercase w100 bottom-margin--30"
),
Div(
Div(
Field('payment_type', wrapper_class="row row--gutters-20"),
css_class="w100"
),
css_class="input-wrap w100"
),
css_class="inner no-padding--560"
),
css_class="w100 border-top"
),
Div(
Div(
# company_name
Div(
Div(
Field(
"company_name",
css_class="input-field if--50 if--dark js-placeholder-up",
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
# company_vat
Div(
Div(
Field(
"company_vat",
css_class="input-field if--50 if--dark js-placeholder-up"
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
# company_regnr
Div(
Div(
Field(
"company_regnr",
css_class="input-field if--50 if--dark js-placeholder-up"
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
# company_address
Div(
Div(
Field(
"company_address",
css_class="input-field if--50 if--dark js-placeholder-up"
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
# company_juridical_address
Div(
Div(
Field(
"company_juridical_address",
css_class="input-field if--50 if--dark js-placeholder-up"
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
'invoice_show_names',
css_class=""
),
css_class="invoice_fields"
)
)
| gpl-3.0 | -5,545,904,322,587,059,000 | 41.946667 | 593 | 0.506623 | false |
saffsd/assignmentprint | assignmentprint.py | 1 | 15044 | """
Utility funtions and classes for preparing project marking bundles
for student assignments.
Marco Lui <[email protected]>, November 2012
"""
import os, sys, csv, re
import tokenize, textwrap, token
import trace, threading
import imp
import contextlib
from cStringIO import StringIO
from pprint import pformat
import pep8
from collections import Sequence, Mapping, Sized
RE_FILENAME = re.compile(r'proj2-(?P<filename>\w+).py')
RE_DIRNAME = re.compile(r'proj2-(?P<dirname>\w+)')
def as_module(path, name='submitted'):
module = imp.new_module(name)
with open(path) as f:
try:
# suppress stdout
sys.stdout = mystdout = StringIO()
exec f in module.__dict__
except Exception, e:
raise ImportError, "import failed: '{0}'".format(e)
finally:
sys.stdout = sys.__stdout__
return module, mystdout.getvalue()
def item2strs(item, max_lines=None):
output = pformat(item)
if max_lines is None or len(output.splitlines()) <= max_lines:
retval = output.splitlines()
else:
if isinstance(item, Mapping):
itemlen = len(item)
retval = ["<{0} of len {1}>".format(type(item),itemlen)]
for i in item.items()[:max_lines-2]:
retval.append(str(i))
retval.append(['... ({0} more items)'.format(itemlen-max_lines+2)])
elif isinstance(item, Sequence):
itemlen = len(item)
retval = ["<{0} of len {1}>".format(type(item),itemlen)]
for i in item[:max_lines-2]:
retval.append(str(i))
retval.append(['... ({0} more items)'.format(itemlen-max_lines+2)])
else:
retval = ["<item with repr len {0}>".format(len(repr(item)))]
# Add the item type to the start
retval[0] = "({0}) {1}".format(type(item), retval[0])
return retval
def split_comments(line):
code = []
noncode = []
try:
for tk in tokenize.generate_tokens(StringIO(line).readline):
if tk[2][0] != 1:
break
if tk[0] == tokenize.COMMENT:
noncode.append(tk[:2])
else:
code.append(tk)
except tokenize.TokenError:
pass
retval = tokenize.untokenize(code).strip(), tokenize.untokenize(noncode).strip()
#retval = ''.join(c[1] for c in code), ''.join(c[1] for c in noncode)
return retval
def get_indent(code):
tokens = tokenize.generate_tokens(StringIO(code).readline)
tk = tokens.next()
indent = tk[1] if tk[0] == token.INDENT else ''
return indent
def wrap_comment(line, width, add_indent=2):
"""
This assumes that line contains a (potentially whitespace-indented)
comment, and no actual code. It will assume anything before the
comment marker is padding, and will maintain the indent level
thereof.
"""
code, comm = split_comments(line)
indent = get_indent(line)
if len(indent) > width/2:
# Comment starts really far right, we shift it
# to start quarter way through the width
indent = ' ' * width/4
retval = textwrap.wrap(comm, width,
initial_indent= indent,
subsequent_indent= indent + '#' + ' '*add_indent,
)
return retval
def wrap_code(code, width, add_indent =' '):
"""
Attempts to wrap a single line of code, respecting token
boundaries.
"""
tokens = tokenize.generate_tokens(StringIO(code).readline)
indent = get_indent(code)
chunk_width = width - len(indent)
chunk_start = 0
chunk_end = 0
chunks = []
first_chunk = True
try:
for tk_type, tk_text, tk_start, tk_end, _ in tokens:
if tk_start[0] != tk_end[0]:
raise ValueError, "token spanning multiple lines"
tk_len = tk_end[1] - tk_start[1]
if first_chunk:
chunk_indent = '' # the indent is part of the tokens
else:
chunk_indent = indent + add_indent
chunk_width = width - len(chunk_indent)
if tk_end[1]-chunk_start >= chunk_width:
# this token starts a new chunk
chunk = chunk_indent+code[chunk_start:chunk_end]+'\\'
assert len(chunk) <= width
chunks.append(chunk)
chunk_start = tk_start[1]
first_chunk = False
chunk_end = tk_end[1]
assert len(chunk_indent+code[chunk_start:chunk_end]+'\\') <= width
except tokenize.TokenError:
# unmatched somethingorother, we don't really care as it
# may be matched on another line
pass
finally:
# flush remaining chunk
rest = code[chunk_start:]
if len(rest) == 1:
# if the token is only 1 character, it can replace the line continuation
chunks[-1] = chunks[-1][:-1] + rest
else:
chunk = chunk_indent + rest
assert len(chunk) <= width
chunks.append(chunk)
return chunks
def wrap_line(line, width):
"""
Attempt to intelligently wrap Python code to width
This also moves any comments to a line prior.
"""
if len(line) <= width:
# shortcut if the line is shorter than the width required
return [line]
_line = line.lstrip()
indent = len(line) - len(_line)
code, comm = split_comments(_line)
if code:
# there are comments, we output these first
if comm:
c = ' ' * indent + comm
retval = wrap_comment(c, width)
else:
retval = []
c = ' ' * indent + code
retval.extend(wrap_code(c, width))
return retval
elif comm:
# This line only contains comments. Wrap accordingly.
return wrap_comment(line, width)
else:
return ['']
def find_submission(path):
"""
Tries to find a submission in a given path.
Returns username, submission_path, else None
"""
if os.path.isdir(path):
m = RE_DIRNAME.search(path)
if m is not None:
dir_items = set(os.listdir(path))
username = m.group('dirname')
submission_name = username + '.py'
if submission_name in dir_items:
item_path = os.path.join(path, submission_name)
return username, item_path
elif os.path.isfile(path):
m = RE_FILENAME.search(path)
if m is not None:
username = m.group('filename')
return username, path
# from http://code.activestate.com/recipes/534166-redirectedio-context-manager-and-redirect_io-decor/
class RedirectedIO(object):
def __init__(self, target=None, mode='a+',
close_target=True):
try:
target = open(target, mode)
except TypeError:
if target is None:
target = StringIO()
self.target = target
self.close_target = close_target
def __enter__(self):
""" Redirect IO to self.target.
"""
self.original_stdout = sys.stdout
sys.stdout = self.target
return self.target
def __exit__(self, exc_type, exc_val, exc_tb):
""" Restore stdio and close the file.
"""
sys.stdout = self.original_stdout
if self.close_target:
self.target.close()
class ProjectPrinter(object):
"""
This class wraps a file-like object and provides
a series of methods for doing relevant output to
it.
"""
def __init__(self, target, pagewidth):
self.target = target
self.pagewidth = pagewidth
def writeln(self, line='', wrap=False):
if wrap:
self.target.write(textwrap.fill(line, width=self.pagewidth) + '\n')
else:
for l in line.splitlines():
self.target.write(textwrap.fill(l, width=self.pagewidth) + '\n')
def cwriteln(self, line):
"""
Write a centered line
"""
self.writeln("{0:^{1}}".format(line, self.pagewidth))
def hr(self, symbol='#'):
if len(symbol) != 1:
raise ValueError, "symbol must be a single character"
self.writeln(symbol * self.pagewidth)
def boxed_text(self, text, symbol='+', boxwidth=None, align='c', wrap=False):
if boxwidth is None:
boxwidth = self.pagewidth
if boxwidth < 0:
boxwidth = self.pagewidth + boxwidth
if self.pagewidth < boxwidth:
raise ValueError, "box wider than page"
if len(symbol) != 1:
raise ValueError, "symbol must be a single character"
if isinstance(text, basestring):
if wrap:
lines = textwrap.wrap(text, width=boxwidth-2*(len(symbol)+1))
else:
lines = text.splitlines()
else:
lines = text
self.cwriteln(symbol * boxwidth)
for line in lines:
if len(line) > boxwidth-2*(len(symbol)+1):
# line too long!
_lines = textwrap.wrap(line, width=boxwidth-2*(len(symbol)+1), subsequent_indent = ' ')
else:
_lines = [line]
for _line in _lines:
if align == 'c':
self.cwriteln('{0}{1:^{2}}{0}'.format(symbol, _line, boxwidth-2))
elif align == 'r':
self.cwriteln('{0}{1:>{2}} {0}'.format(symbol, _line, boxwidth-3))
else:
self.cwriteln('{0} {1:<{2}}{0}'.format(symbol, _line, boxwidth-3))
self.cwriteln(symbol * boxwidth)
def display_code(self, path):
"""
Display code with intelligent wrapping
"""
with open(path) as f:
for i, line in enumerate(f):
if len(line) > self.pagewidth - 6:
# Line too wide. Need to cleverly wrap it.
#_line = line.lstrip()
#indent = len(line) - len(_line)
indent = get_indent(line)
code, comm = split_comments(line)
if code:
if comm:
for l in wrap_comment(line, self.pagewidth-6):
self.writeln(' {0}'.format(l))
clines = wrap_code(indent + code, self.pagewidth - 6)
self.writeln('{0:>4}* {1}'.format(i+1, clines[0]))
for l in clines[1:]:
self.writeln(' {0}'.format(l))
else:
# only comments on this line
c_wrap = wrap_comment(line, self.pagewidth-6)
if c_wrap:
self.writeln( '{0:>4}: {1}'.format(i+1, c_wrap[0]) )
for l in c_wrap[1:]:
self.writeln(' {0}'.format(l))
"""
# We splice out comments
try:
tokens = list(tokenize.generate_tokens(StringIO(line).readline))
comments = ''.join(t[1] for t in tokens if t[0] == tokenize.COMMENT)
noncomments = [ (t[0],t[1]) for t in tokens if t[0] != tokenize.COMMENT ]
ncline = tokenize.untokenize(noncomments).rstrip()
except tokenize.TokenError:
# This happens with unmatched things - in particular triplequote
# we just pretend the line had no comments in this case
comments = ''
ncline = line
if ncline.lstrip():
# More than comments on this line
# Write the comments first, followed by the code
if comments.strip():
lead_gap = len(ncline) - len(ncline.lstrip())
comments = ' '*lead_gap + comments
c_wrap = wrap_comment(comments, self.pagewidth-6)
self.writeln(' {0}'.format(c_wrap[0]))
for l in c_wrap[1:]:
self.writeln(' {0}'.format(l))
if (len(ncline) + 6) > self.pagewidth:
# code is too long, must break
#self.writeln('line:{0} tokens:{1}'.format(len(ncline), len(noncomments)))
try:
broken = wrap_code(ncline, self.pagewidth-6)
except tokenize.TokenError:
# Can't tokenize, so we just wrap this with the same wrapping used
# for noncode and hope for the best.
broken = wrap_comment(ncline, self.pagewidth-6)
self.writeln('{0:>4}* {1}'.format(i+1, broken[0]))
for l in broken[1:]:
self.writeln(' {0}'.format(l))
else:
self.writeln('{0:>4}: {1}'.format(i+1, ncline))
else:
# Only comments on this line
c_wrap = wrap_comment(line, self.pagewidth-6)
self.writeln( '{0:>4}: {1}'.format(i+1, c_wrap[0]) )
for l in c_wrap[1:]:
self.writeln(' {0}'.format(l))
"""
else:
# Line fits on page
self.writeln( '{0:>4}: {1}'.format(i+1, line.rstrip()) )
def display_pep8(self, path, summary=True):
pep8_out = StringIO()
try:
with RedirectedIO(target=pep8_out, close_target=False):
pep8.process_options([path])
pep8.input_file(path)
error_stats = pep8.get_error_statistics()
warning_stats = pep8.get_warning_statistics()
val = pep8_out.getvalue().splitlines()
for line in [ x.split(':',1)[1] for x in val if ':' in x]:
self.writeln(line)
if summary:
self.writeln()
self.writeln("Summary:")
for e in error_stats:
self.writeln(e)
for w in warning_stats:
self.writeln(w)
self.writeln()
except tokenize.TokenError:
self.boxed_text(["PEP8 processing failed - check your source code"], symbol="#")
# adapted from http://code.activestate.com/recipes/473878/
class TimeOutExceeded(Exception): pass
class KThread(threading.Thread):
"""A subclass of threading.Thread, with a kill() method."""
def __init__(self, *args, **keywords):
threading.Thread.__init__(self, *args, **keywords)
self.killed = False
self.result = None
def start(self):
"""Start the thread."""
self.__run_backup = self.run
self.run = self.__run # Force the Thread to install our trace.
threading.Thread.start(self)
def run(self):
# TODO: Capture STDOUT, STDERR
success = True
outstream = StringIO()
try:
with RedirectedIO(target=outstream, close_target=False):
val = self._Thread__target(*self._Thread__args, **self._Thread__kwargs)
except Exception, e:
val = sys.exc_info()
success = False
output = outstream.getvalue()
self.result = success, val, output
def __run(self):
"""Hacked run function, which installs the trace."""
sys.settrace(self.globaltrace)
self.__run_backup()
self.run = self.__run_backup
def globaltrace(self, frame, why, arg):
if why == 'call':
return self.localtrace
else:
return None
def localtrace(self, frame, why, arg):
if self.killed:
if why == 'line':
raise SystemExit()
return self.localtrace
def kill(self):
self.killed = True
def timeout(func, args=(), kwargs={}, timeout_duration=10, default=None):
"""This function will spawn a thread and run the given function
using the args, kwargs and return the given default value if the
timeout_duration is exceeded.
"""
if isinstance(args, basestring):
args = eval(args)
if isinstance(kwargs, basestring):
kwargs = eval(kwargs)
t = KThread(target=func, args=args, kwargs=kwargs)
t.start()
t.join(timeout_duration)
if t.isAlive():
t.kill()
raise TimeOutExceeded()
else:
return t.result
@contextlib.contextmanager
def working_directory(path):
prev_cwd = os.getcwd()
os.chdir(path)
yield
os.chdir(prev_cwd)
| gpl-3.0 | -6,263,825,766,030,460,000 | 30.276507 | 101 | 0.590335 | false |
artificialnull/IshanBoot | aliasbot.py | 1 | 9126 | #!/usr/bin/python3
import requests
import json
import os
import time
import random as rand
import subprocess
#telegram bot stuff
url = "https://api.telegram.org/bot%s/%s"
token = open("token.txt").read().replace('\n', '')
print(url % (token, "getUpdates"))
path = os.path.dirname(__file__)
#globals
locked = []
aliases = {}
commands = {}
chat_id = 0
SCH_CHID = -1001032618176
LOG_CHID = -1001098108881
#requests stuff
ConnectionError = requests.exceptions.ConnectionError
def isCommand(text, command):
if text[:len(command)] != command:
return False
else:
return True
def stripCommand(text, command):
return text[len(command) + 1:]
def getUpdates():
try:
r = requests.get(
url % (token, "getUpdates"),
data={"offset": getUpdates.offset},
timeout=60
)
try:
r = json.loads(r.text)
except:
print("Loading error while getting updates")
return [], True
r = r['result']
if len(r) > 0:
getUpdates.offset = int(r[-1]['update_id']) + 1
except ConnectionError:
print("Connection error while getting updates")
return [], True
return r, False
getUpdates.offset = 0
def sendMessage(message, reply_id=False, markdown=True):
payload = {
"chat_id": chat_id,
"text": message,
"parse_mode": "Markdown",
"disable_web_page_preview": True
}
if reply_id:
payload['reply_to_message_id'] = reply_id
if not markdown:
del payload['parse_mode']
try:
tresponse = requests.post(
url % (token, "sendMessage"),
data=payload,
timeout=2
)
resp = json.loads(tresponse.text)
if not resp["ok"]:
return sendMessage(message, reply_id, False)
except KeyboardInterrupt:
raise KeyboardInterrupt
except:
print("Connection error while sending message")
return True
return False
def loadAliases():
aliases = {}
aliasFile = open(path + "/aliases.json").read()
aliases = json.loads(aliasFile)
return aliases
def saveAliases():
aliasFile = open(path + "/aliases.json", "w")
aliasFile.write(json.dumps(aliases, indent=4))
aliasFile.close()
def loadLocked():
locked = []
lfile = open(path + "/locked.txt").read()
for line in lfile.split('\n'):
if line != '':
locked.append(line)
return locked
def logMessage(message):
baseLM = "user: %s ; mesg: %s ; chid: %s\n"
if 'text' in message.keys():
filledLM = baseLM % (message['from']['first_name'],
message['text'],
message['chat']['id'])
logfile = open(path + "/logfile.txt", "a")
logfile.write(filledLM)
logfile.close()
if message['chat']['id'] == SCH_CHID:
payload = {
'chat_id': LOG_CHID,
'from_chat_id': SCH_CHID,
'disable_notification': True,
'message_id': message['message_id']
}
# try:
# tresponse = requests.post(url % (token, "forwardMessage"),
# data=payload, timeout=2)
# except:
# return
def alias(content, uid):
alias = content.split('=')[0]
while alias[0] == ' ':
alias = alias[1:]
while alias[-1] == ' ':
alias = alias[:-1]
alias = alias.replace(' ', '_')
value = '='.join(content.split('=')[1:])
if len(alias.split()) == 1:
if alias not in locked or uid == 204403520:
aliases[alias] = value
print("alias " + alias + "=" + value + " by " + name)
saveAliases()
sendMessage("Aliased " + alias + " to " + value, message_id)
else:
print("cannot unlock alias")
sendMessage("Alias is locked, sorry", message_id)
else:
print("alias malformed")
sendMessage("Alias must be a single term", message_id)
def unalias(content, uid):
alias = content
if alias not in locked:
if len(alias.split()) == 1 and alias in aliases.keys():
aliases[alias] = ''
print("del " + alias)
saveAliases()
sendMessage("Unaliased " + alias, message_id)
else:
print("unalias malformed")
sendMessage("Invalid alias", message_id)
else:
print("cannot unlock alias")
sendMessage("Alias is locked, sorry", message_id)
def random(content, uid):
randomAlias = rand.choice(list(aliases.keys()))
randomAliasStr = "/%s = %s" % (randomAlias, aliases[randomAlias])
print(randomAliasStr)
sendMessage(randomAliasStr)
def uptime(content, uid):
sendMessage('`' + subprocess.Popen('uptime', stdout=subprocess.PIPE).communicate()[0].decode("utf-8") + '`')
def welp(content, uid):
sendMessage("gg")
def rip(content, uid):
response = rand.choice(["me", "rip is right", "rip is me"])
sendMessage(response)
def amirite(content, uid):
if rand.randint(1, 10) == 4:
response = "yep"
else:
response = "¬_¬"
sendMessage(response)
def remind(content, uid):
global chat_id
chat_id = SCH_CHID
sendMessage("heres your periodic schedule reminder!!!\n" + aliases["schedule"])
def newdaypb(content, uid):
sendMessage(aliases["newdaypb"])
def queue(content, uid):
print("cue")
if rand.randint(1, 10) < 3:
print("Q")
sendMessage("u wot m8", message_id)
def stan(content, uid):
sendMessage('no', message_id)
commands = {
'/alias': alias,
'/unalias': unalias,
'/random': random,
'/time': uptime,
'w/elp': welp,
'/rip': rip,
'/amirite': amirite,
'/remindme': remind,
'/newdaypb': newdaypb,
'/q@IshanBot': queue,
'stan': stan,
'hi stan': stan
}
if __name__ == "__main__":
aliases = loadAliases()
locked = loadLocked()
print("Started")
loffset = getUpdates.offset - 1
while getUpdates.offset != loffset:
loffset = getUpdates.offset
getUpdates()
print("Updated to:", getUpdates.offset)
while __name__ == "__main__":
try:
r, err = getUpdates()
if len(r) != 0 and not err:
print("received updates")
elif err:
time.sleep(1)
for update in r:
message = update.get('message')
if message == None:
continue
logMessage(message)
message_id = message['message_id']
print(message_id)
chat = message['chat']
chat_id = chat['id']
user = message.get('from')
name = "@" + user.get('username')
if name == None:
name = user.get('first_name')
uid = user['id']
if chat_id == LOG_CHID:
try:
payload = {
'chat_id': LOG_CHID,
'user_id': uid
}
requests.post(
url % (token, "kickChatMember"),
data=payload,
timeout=2
)
continue
except ConnectionError:
pass
text = message.get('text', ' ')
found = False
for command in commands.keys():
if isCommand(text, command):
content = stripCommand(text, command)
found = True
commands[command](content, uid)
if found:
continue
if "/" in text:
terms = text.split()
response = ''
for term in terms:
if '/' == term[0]:
alias = ''
if '@' in term and term[1:].split('@')[-1] == "IshanBot":
alias = term[1:].split('@')[0]
else:
alias = term[1:]
"""
for key in aliases.keys():
if 'legendary' in aliases[key]:
print(key)
print([ord(c) for c in key])
print([ord(c) for c in alias])
print(alias == key)
"""
response += aliases.get(alias, '')
if response != '':
sendMessage(response + ' ' + name)
except KeyboardInterrupt:
print("Control menu:\n 0 - Quit\n 1 - Reload locks")
choice = int(input("> "))
if choice == 1:
locked = loadLocked()
else:
saveAliases()
raise SystemExit
except BaseException as e:
print(str(e))
| gpl-3.0 | -6,381,240,377,747,982,000 | 28.432258 | 112 | 0.49463 | false |
persepolisdm/translation-API | pdm_api/views/default.py | 1 | 1257 | from pyramid.response import Response
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPForbidden
from pyramid import request
from sqlalchemy.exc import DBAPIError
from ..models.mymodel import MyModel, request_log, access_log, banlist
from ..settings import get_settings
import datetime
@view_config(route_name='home', renderer='../templates/mytemplate.jinja2')
def my_view(request):
try:
query = request.dbsession.query(MyModel)
one = query.filter(MyModel.name == 'one').first()
except DBAPIError:
return Response(db_err_msg, content_type='text/plain', status=500)
return {'one': one, 'project': 'pdm_api'}
db_err_msg = """\
Pyramid is having a problem using your SQL database. The problem
might be caused by one of the following things:
1. You may need to run the "initialize_pdm_api_db" script
to initialize your database tables. Check your virtual
environment's "bin" directory for this script and try to run it.
2. Your database server may not be running. Check that the
database server referred to by the "sqlalchemy.url" setting in
your "development.ini" file is running.
After you fix the problem, please restart the Pyramid application to
try it again.
"""
| gpl-3.0 | -3,306,338,348,368,249,300 | 33.916667 | 74 | 0.739857 | false |
AbhilashReddyM/GeometricMultigrid | mgd3d.py | 1 | 4921 | """
2017 (c) A. R. Malipeddi
3D geometric multigrid code for poissons equation in a cube.
- Finite difference method
- 7pt operator
- trilinear interpolation
- Two-color Gauss Seidel smoothing
"""
import numpy as np
def GSrelax(nx,ny,nz,u,f,iters=1,flag=1):
'''
Red-Black Gauss Seidel smoothing
flag : 1 = pre-sweep
2 = post-sweep
'''
dx=1.0/nx
dy=1.0/ny
dz=1.0/nz
Ax=1.0/dx**2
Ay=1.0/dy**2
Az=1.0/dz**2
Ap=1.0/(2.0*(1.0/dx**2+1.0/dy**2+1.0/dz**2))
#BCs. Needs to be generalized!
u[ 0,:,:] = -u[ 1,:,:]
u[-1,:,:] = -u[-2,:,:]
u[: ,0,:] = -u[:, 1,:]
u[:,-1,:] = -u[:,-2,:]
u[:,:, 0] = -u[:,:, 1]
u[:,:,-1] = -u[:,:,-2]
for it in range(iters):
c=0
for _ in [1,2]:
for i in range(1,nx+1):
cs=c
for j in range(1,ny+1):
for k in range(1+c,nz+1,2):
u[i,j,k]= Ap*( Ax*(u[i+1,j,k]+u[i-1,j,k])
+ Ay*(u[i,j+1,k]+u[i,j-1,k])
+ Az*(u[i,j,k+1]+u[i,j,k-1])
- f[i,j,k])
c=1-c
c=1-cs
c=1
#BCs. Needs to be generalized!
u[ 0,:,:] = -u[ 1,:,:]
u[-1,:,:] = -u[-2,:,:]
u[: ,0,:] = -u[:, 1,:]
u[:,-1,:] = -u[:,-2,:]
u[:,:, 0] = -u[:,:, 1]
u[:,:,-1] = -u[:,:,-2]
#if residual not needed
if(flag==2):
return u,None
res=np.zeros([nx+2,ny+2,nz+2])
for i in range(1,nx+1):
for j in range(1,ny+1):
for k in range(1,nz+1):
res[i,j,k]=f[i,j,k] - (Ax*(u[i+1,j,k]+u[i-1,j,k])
+ Ay*(u[i,j+1,k]+u[i,j-1,k])
+ Az*(u[i,j,k+1]+u[i,j,k-1])
- 2.0*(Ax+Ay+Az)*u[i,j,k])
return u,res
def restrict(nx,ny,nz,v):
'''
restrict 'v' to the coarser grid
'''
v_c=np.zeros([nx+2,ny+2,nz+2])
for i in range(1,nx+1):
for j in range(1,ny+1):
for k in range(1,nz+1):
v_c[i,j,k]=0.125*(v[2*i-1,2*j-1,2*k-1]+v[2*i,2*j-1,2*k-1]+v[2*i-1,2*j,2*k-1]+v[2*i,2*j,2*k-1]
+v[2*i-1,2*j-1,2*k ]+v[2*i,2*j-1,2*k ]+v[2*i-1,2*j,2*k ]+v[2*i,2*j,2*k ])
return v_c
def prolong(nx,ny,nz,v):
'''
interpolate correction to the fine grid
'''
v_f=np.zeros([2*nx+2,2*ny+2,2*nz+2])
a=27.0/64
b= 9.0/64
c= 3.0/64
d= 1.0/64
for i in range(1,nx+1):
for j in range(1,ny+1):
for k in range(1,nz+1):
v_f[2*i-1,2*j-1,2*k-1] = a*v[i,j,k] + b*(v[i-1,j,k] + v[i,j-1,k] + v[i,j,k-1]) + c*(v[i-1,j-1,k] + v[i-1,j,k-1] + v[i,j-1,k-1]) + d*v[i-1,j-1,k-1]
v_f[2*i ,2*j-1,2*k-1] = a*v[i,j,k] + b*(v[i+1,j,k] + v[i,j-1,k] + v[i,j,k-1]) + c*(v[i+1,j-1,k] + v[i+1,j,k-1] + v[i,j-1,k-1]) + d*v[i+1,j-1,k-1]
v_f[2*i-1,2*j ,2*k-1] = a*v[i,j,k] + b*(v[i-1,j,k] + v[i,j+1,k] + v[i,j,k-1]) + c*(v[i-1,j+1,k] + v[i-1,j,k-1] + v[i,j+1,k-1]) + d*v[i-1,j+1,k-1]
v_f[2*i ,2*j ,2*k-1] = a*v[i,j,k] + b*(v[i+1,j,k] + v[i,j+1,k] + v[i,j,k-1]) + c*(v[i+1,j+1,k] + v[i+1,j,k-1] + v[i,j+1,k-1]) + d*v[i+1,j+1,k-1]
v_f[2*i-1,2*j-1,2*k ] = a*v[i,j,k] + b*(v[i-1,j,k] + v[i,j-1,k] + v[i,j,k+1]) + c*(v[i-1,j-1,k] + v[i-1,j,k+1] + v[i,j-1,k+1]) + d*v[i-1,j-1,k+1]
v_f[2*i ,2*j-1,2*k ] = a*v[i,j,k] + b*(v[i+1,j,k] + v[i,j-1,k] + v[i,j,k+1]) + c*(v[i+1,j-1,k] + v[i+1,j,k+1] + v[i,j-1,k+1]) + d*v[i+1,j-1,k+1]
v_f[2*i-1,2*j ,2*k ] = a*v[i,j,k] + b*(v[i-1,j,k] + v[i,j+1,k] + v[i,j,k+1]) + c*(v[i-1,j+1,k] + v[i-1,j,k+1] + v[i,j+1,k+1]) + d*v[i-1,j+1,k+1]
v_f[2*i ,2*j ,2*k ] = a*v[i,j,k] + b*(v[i+1,j,k] + v[i,j+1,k] + v[i,j,k+1]) + c*(v[i+1,j+1,k] + v[i+1,j,k+1] + v[i,j+1,k+1]) + d*v[i+1,j+1,k+1]
return v_f
def V_cycle(nx,ny,nz,num_levels,u,f,level=1):
'''
V cycle
'''
if(level==num_levels):#bottom solve
u,res=GSrelax(nx,ny,nz,u,f,iters=100)
return u,res
#Step 1: Relax Au=f on this grid
u,res=GSrelax(nx,ny,nz,u,f,2)
#Step 2: Restrict residual to coarse grid
res_c=restrict(nx//2,ny//2,nz//2,res)
#Step 3:Solve A e_c=res_c on the coarse grid. (Recursively)
e_c=np.zeros_like(res_c)
e_c,res_c=V_cycle(nx//2,ny//2,nz//2,num_levels,e_c,res_c,level+1)
#Step 4: Interpolate(prolong) e_c to fine grid and add to u
u+=prolong(nx//2,ny//2,nz//2,e_c)
#Step 5: Relax Au=f on this grid
if(level==1):
u,res=GSrelax(nx,ny,nz,u,f,2,flag=1)
else:
u,res=GSrelax(nx,ny,nz,u,f,2,flag=2)
return u,res
def FMG(nx,ny,nz,num_levels,f,nv=1,level=1):
if(level==num_levels):#bottom solve
u=np.zeros([nx+2,ny+2,nz+2])
u,res=GSrelax(nx,ny,nz,u,f,iters=100)
return u,res
#Step 1: Restrict the rhs to a coarse grid
f_c=restrict(nx//2,ny//2,nz//2,f)
#Step 2: Solve the coarse grid problem using FMG
u_c,_=FMG(nx//2,ny//2,nz//2,num_levels,f_c,nv,level+1)
#Step 3: Interpolate u_c to the fine grid
u=prolong(nx//2,ny//2,nz//2,u_c)
#step 4: Execute 'nv' V-cycles
for _ in range(nv):
u,res=V_cycle(nx,ny,nz,num_levels-level,u,f)
return u,res
| mit | -4,327,896,811,227,233,300 | 28.291667 | 154 | 0.470839 | false |
aalien/mib | mib.py | 1 | 7386 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# mib: Modular irc bot
# Copyright Antti Laine <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from ircsocket import IrcSocket
from ircutils import regexpify
from parser import parse, IRCMsg
import config
import os
import re
import sys
class Mib:
""" Main class which handles most of the core functionality.
"""
def __init__(self):
""" Initialize variables and read config.
"""
sys.path.append('plugins')
self.loaded_plugins = {} # plugin name : module
self.cmd_callbacks = {} # command : set(function)
self.privmsg_cmd_callbacks = {} # command : set(function)
self.command_masks = {} # command : list(regexp)
self.plugins = set(config.LOAD_PLUGINS)
self.cmd_prefixes = set(config.CMD_PREFIXES)
self.nick = config.NICK
self.username = config.USERNAME
self.realname = config.REALNAME
self.server, self.port = config.SERVER
self.channels = config.CHANNELS
self.socket = IrcSocket(self.server, self.port, self.nick,
self.username, self.realname)
self.socket.register_readline_cb(self.parse_line)
for channel in self.channels:
self.socket.join(channel)
for plugin in self.plugins:
print self.load_plugin(plugin)[1]
def run(self):
""" Start socket's main loop.
"""
self.socket.run()
def clean(self):
for plugin in self.loaded_plugins.itervalues():
plugin.clean()
def parse_line(self, line):
""" Parse line and call callbacks registered for command.
"""
print line
parsed = parse(line)
if not parsed:
print 'Unable to parse line: "%s"' %(line)
return
# call registered functions
for function in self.cmd_callbacks.get(parsed.cmd, ()):
try:
function(parsed)
except Exception, e:
print 'Error from function', repr(function), ':', e
# call registered privmsg functions with pre-parsed line
if parsed.cmd == 'PRIVMSG':
cmd_prefix = parsed.postfix.split(' ', 1)[0]
postfix = parsed.postfix[len(cmd_prefix):].lstrip()
if cmd_prefix in self.cmd_prefixes:
print 'Found command prefix', cmd_prefix
cmd = postfix.lstrip().split(' ', 1)[0]
postfix = postfix[len(cmd):].lstrip()
stripped_parsed = IRCMsg(parsed.prefix, parsed.cmd,
parsed.params, postfix)
print "stripped_parsed = ", stripped_parsed
print 'Searching for command', cmd
for function in self.privmsg_cmd_callbacks.get(cmd, ()):
run = False
if cmd not in self.command_masks:
run = True
else:
print 'There are limitations for this command'
for regexp in self.command_masks[cmd]:
print 'Matching %s to %s' % (parsed.prefix,
regexp.pattern)
if regexp.match(parsed.prefix):
run = True
break
if run:
try:
print 'Executing command %s' % cmd
function(stripped_parsed)
except Exception, e:
print 'Error from function', repr(function), ':', e
def load_plugin(self, plugin, params=None):
""" str, ([]) -> (bool, str)
Loads plugin from plugins/<plugin>.py
Params will be given to plugin's constructor.
Returns a tuple with a boolean stating if the plugin
was loaded properly and a message telling what happened.
"""
if plugin in self.loaded_plugins:
return (False, 'Plugin %s already loaded' %(plugin))
if not os.path.exists(os.path.join('plugins', plugin + '.py')):
return (False, 'Plugin %s does not exists' %(plugin))
try:
module = __import__(plugin)
if params:
obj = module.init(self, params)
else:
obj = module.init(self)
success = True
except Exception, err:
success = False
print err
if success:
self.loaded_plugins[plugin] = obj
return (True, 'Loaded plugin %s' %(plugin))
else:
return (False, 'Failed to load plugin %s' %(plugin))
def register_cmd(self, cmd, function):
""" Registers a function to be called when a line with
cmd is seen. Function must take one named tuple parameter.
Tuple contains line in parsed form with fields
(prefix, cmd, params, postfix)
"""
self.cmd_callbacks.setdefault(cmd, set()).add(function)
def register_privmsg_cmd(self, cmd, function):
""" Registers a function to be called when a PRIVMSG with
cmd is seen. Function must take one named tuple parameter.
Tuple contains line in parsed form with fields
(prefix, cmd, params,
postfix stripped from one of CMD_PREFIXES and cmd)
"""
self.privmsg_cmd_callbacks.setdefault(cmd, set()).add(function)
def add_cmd_permission(self, cmd, mask, regexpify=True):
""" Creates a regular expression from the mask and adds it
to the list of allowed regexps for the cmd.
mask is an IRC mask, and will be changed into a corresponding
regular expression.
"""
mask = regexpify(mask)
m = re.compile(mask)
self.command_masks.setdefault(cmd, []).append(m)
def rm_cmd_permission(self, cmd, mask):
""" Creates a regular expression from the mask, and removes
the permission for that expression from cmd's list.
mask is an IRC mask, and will be changed into a corresponding
regular expression.
"""
mask = regexpify(mask)
if cmd in self.command_masks:
for index, regexp in enumerate(self.command_masks[cmd]):
if regexp.pattern == mask:
del self.command_masks[cmd][index]
break
if __name__ == "__main__":
mib = Mib()
try:
mib.run()
except Exception, e:
print 'ERROR: ', e
except:
pass
mib.clean()
print 'Quiting!'
| mit | 7,265,648,255,817,794,000 | 37.670157 | 79 | 0.558218 | false |
daigotanaka/kawaraban | wsgi.py | 1 | 1445 | """
WSGI config for the website project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
application = Cling(get_wsgi_application())
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| mit | -662,257,155,404,534,100 | 42.787879 | 79 | 0.792388 | false |
team-vigir/vigir_behaviors | vigir_flexbe_states/src/vigir_flexbe_states/read_dynamic_parameter_state.py | 1 | 2586 | #!/usr/bin/env python
from flexbe_core import EventState, Logger
import rospy
from dynamic_reconfigure.client import Client
"""
Created on 11/03/2014
@author: Philipp Schillinger
"""
class ReadDynamicParameterState(EventState):
"""
Reads a given trajectory controller parameter.
"""
LEFT_ARM_WRX = ['left_arm_traj_controller', 'l_arm_wrx']
LEFT_ARM_WRY = ['left_arm_traj_controller', 'l_arm_wry']
LEFT_ARM_ELX = ['left_arm_traj_controller', 'l_arm_elx']
LEFT_ARM_ELY = ['left_arm_traj_controller', 'l_arm_ely']
LEFT_ARM_SHX = ['left_arm_traj_controller', 'l_arm_shx']
LEFT_ARM_SHZ = ['left_arm_traj_controller', 'l_arm_shz']
RIGHT_ARM_WRX = ['right_arm_traj_controller', 'r_arm_wrx']
RIGHT_ARM_WRY = ['right_arm_traj_controller', 'r_arm_wry']
RIGHT_ARM_ELX = ['right_arm_traj_controller', 'r_arm_elx']
RIGHT_ARM_ELY = ['right_arm_traj_controller', 'r_arm_ely']
RIGHT_ARM_SHX = ['right_arm_traj_controller', 'r_arm_shx']
RIGHT_ARM_SHZ = ['right_arm_traj_controller', 'r_arm_shz']
def __init__(self, param):
"""Constructor"""
super(ReadDynamicParameterState, self).__init__(outcomes=['read', 'failed'],
input_keys=['traj_controller'],
output_keys=['parameter_value'])
self._param = param
self._failed = False
self._clients = {}
self._waiting_for_response = []
self._parameter_value_list = []
def execute(self, userdata):
if self._failed:
return 'failed'
value_offset = 0
for i in range(len(self._clients.keys())):
if self._waiting_for_response[i]:
param_dict = self._clients.values()[i].get_configuration(0.1)
if param_dict is not None:
self._waiting_for_response[i] = False
value_list = []
for j in range(len(self._param.values()[i])):
value_list.append(param_dict[self._param.values()[i][j]])
self._parameter_value_list[value_offset:value_offset+len(value_list)] = value_list
value_offset += len(self._param.values()[i])
if all(not waiting for waiting in self._waiting_for_response):
userdata.parameter_value = self._parameter_value_list
return 'read'
def on_enter(self, userdata):
self._clients = {}
self._waiting_for_response = [True] * len(self._param.keys())
self._parameter_value_list = [None] * sum(map(len, self._param.values()))
try:
for server in self._param.keys():
self._clients[server] = Client("/trajectory_controllers/" + userdata.traj_controller[0] + "/" + server + "/" + userdata.traj_controller[1])
except Exception as e:
Logger.logwarn('Was unable to reach parameter server:\n%s' % str(e))
self._failed = True
| bsd-3-clause | 9,053,889,298,215,037,000 | 30.536585 | 143 | 0.664346 | false |
will-iam/Variant | script/process/ergodicity_scaling.py | 1 | 4083 | #!/usr/bin/python3
# -*- coding:utf-8 -*-
import __future__
import parser
import sys
import matplotlib.pyplot as plt
#plt.style.use('ggplot')
import numpy as np
import operator
from collections import *
caseSize = (8192, 8192)
if parser.args.res:
maxAvailableNode = parser.args.res
else:
maxAvailableNode = 8
sizeDataDict = []
for p in range(0, int(np.log2(maxAvailableNode)) + 1):
filterDict = {'nSizeX' : caseSize[0], 'nSizeY' : caseSize[1], 'R' : 64 * 2**p}
print filterDict
data = parser.getData(filterDict)
if len(data):
sizeDataDict.append(data)
if len(sizeDataDict) == 0:
print("No data found.")
sys.exit(1)
loopTimeDict = dict()
for data in sizeDataDict:
for key, value in data.items():
keyDict = parser.extractKey(key)
Nt = keyDict['Nt']
R = keyDict['R']
if keyDict['Ny'] != caseSize[0] or keyDict['Nx'] != caseSize[1]:
print("Error in collected data")
sys.exit(1)
for run in value:
nSDD = run['point'][0] * run['point'][1]
# On several nodes, select only pure SDD, which is the best result.
if R > 64 and nSDD < R:
continue
# Don't remove HyperThreading.
# We assume that hyperthreading with SDD leads to same results as with SDS.
#if R > 64 and nSDD == R and Nt > 1.0:
# continue
# On a single node, select only pure SDS
if R == 64 and nSDD > 1:
continue
loopT = run['loopTime'] * caseSize[0] * caseSize[1] * keyDict['Ni'] / 1000.
if R not in loopTimeDict.keys():
loopTimeDict[R] = list()
loopTimeDict[R].append(loopT)
# And now, we must plot that
fig = plt.figure(0, figsize=(9, 6))
ax = fig.add_subplot(111)
#ax = fig.add_subplot(211)
#ax.set_xscale('log', basex=2)
#ax.set_yscale('log')
maxSimulationNumber = 42
xArray = range(1, maxSimulationNumber + 1)
'''
#Perfect Scale
loopTimeDict[128] = [k / 2. for k in loopTimeDict[64]]
loopTimeDict[256] = [k / 4. for k in loopTimeDict[64]]
loopTimeDict[512] = [k / 8. for k in loopTimeDict[64]]
'''
for r in sorted(loopTimeDict):
nodeNeeded = r // 64
minT = np.min(loopTimeDict[r])
print("Min Time %s node(s) = %s" % (nodeNeeded, minT))
totalTimeArray = np.zeros(maxSimulationNumber)
for i in xArray:
totalTimeArray[i-1] = minT * (1 + (i * nodeNeeded - 1) // maxAvailableNode)
ax.plot(xArray, totalTimeArray, '-', label="Batch Size %s" % (r // 64))
parser.outputCurve("ergodicity_scaling-%s.dat" % (r//64), xArray, totalTimeArray)
'''
minSize = int(np.sqrt(np.min(syncTimeDict.keys())))
maxSize = int(np.sqrt(np.max(syncTimeDict.keys())))
nodeNumber = (caseSize[0] * caseSize[1] / (maxSize * maxSize))
'''
plt.title('%sx%s batch time with %s node(s) available at the same time.' % (caseSize[0], caseSize[1], maxAvailableNode))
plt.xlabel('Total number of simulation to run')
plt.ylabel('Loop Time')
plt.legend()
'''
bx = fig.add_subplot(212)
bx.set_xscale('log', basex=2)
bx.plot(sorted(sdsWeakDict), [np.min(v) for k, v in sorted(sdsWeakDict.items(), key=operator.itemgetter(0))], 'g+-', label="SDS scaling")
bx.plot(sorted(sddWeakDict), [np.min(v) for k, v in sorted(sddWeakDict.items())], 'b+-', label="SDD scaling")
#bx.plot(sorted(hybridWeakDict), [np.min(v) for k, v in sorted(hybridWeakDict.items())], 'y+-', label="Hybrid scaling")
bx.plot(sorted(sddWeakDict), [firstValueSDD for k in sorted(sddWeakDict.keys())], 'b--', label="SDD ideal")
bx.plot(sorted(sdsWeakDict), [firstValueSDS for k in sorted(sdsWeakDict.keys())], 'g--', label="SDS ideal")
for k in sdsWeakDict:
bx.plot(np.full(len(sdsWeakDict[k]), k), sdsWeakDict[k], 'g+')
for k in sddWeakDict:
bx.plot(np.full(len(sddWeakDict[k]), k), sddWeakDict[k], 'b+')
plt.title('Weak Scaling from %sx%s to %sx%s' % (initSize, initSize, initSize * 2**((maxPower-1) / 2), initSize * 2**((maxPower-1) / 2)) )
plt.xlabel('Core(s)')
plt.ylabel('Loop Time / iteration')
plt.legend()
'''
plt.show()
| mit | 3,501,576,982,939,167,000 | 30.898438 | 137 | 0.62552 | false |
rfleschenberg/django-shop | example/myshop/migrations/polymorphic/0003_add_polymorphic.py | 1 | 9751 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import cms.models.fields
import djangocms_text_ckeditor.fields
class Migration(migrations.Migration):
dependencies = [
('cms', '0013_urlconfrevision'),
('contenttypes', '0002_remove_content_type_name'),
('filer', '0002_auto_20150606_2003'),
('myshop', '0002_add_i18n'),
]
operations = [
migrations.CreateModel(
name='OperatingSystem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50, verbose_name='Name')),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Updated at')),
('active', models.BooleanField(default=True, help_text='Is this product publicly visible.', verbose_name='Active')),
('product_name', models.CharField(max_length=255, verbose_name='Product Name')),
('slug', models.SlugField(unique=True, verbose_name='Slug')),
('order', models.PositiveIntegerField(verbose_name='Sort by', db_index=True)),
],
options={
'ordering': ('order',),
},
),
migrations.CreateModel(
name='ProductTranslation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('language_code', models.CharField(max_length=15, verbose_name='Language', db_index=True)),
('description', djangocms_text_ckeditor.fields.HTMLField(help_text='Description for the list view of products.', verbose_name='Description')),
],
),
migrations.CreateModel(
name='SmartPhone',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('product_code', models.CharField(unique=True, max_length=255, verbose_name='Product code')),
('unit_price', models.DecimalField(default='0', help_text='Net price for this product', max_digits=30, decimal_places=3)),
('storage', models.PositiveIntegerField(help_text='Internal storage in MB', verbose_name='Internal Storage')),
],
),
migrations.AlterUniqueTogether(
name='smartcardtranslation',
unique_together=set([]),
),
migrations.RemoveField(
model_name='smartcardtranslation',
name='master',
),
migrations.AlterModelOptions(
name='smartcard',
options={'verbose_name': 'Smart Card', 'verbose_name_plural': 'Smart Cards'},
),
migrations.RemoveField(
model_name='smartcard',
name='active',
),
migrations.RemoveField(
model_name='smartcard',
name='cms_pages',
),
migrations.RemoveField(
model_name='smartcard',
name='created_at',
),
migrations.RemoveField(
model_name='smartcard',
name='id',
),
migrations.RemoveField(
model_name='smartcard',
name='images',
),
migrations.RemoveField(
model_name='smartcard',
name='manufacturer',
),
migrations.RemoveField(
model_name='smartcard',
name='order',
),
migrations.RemoveField(
model_name='smartcard',
name='polymorphic_ctype',
),
migrations.RemoveField(
model_name='smartcard',
name='product_name',
),
migrations.RemoveField(
model_name='smartcard',
name='slug',
),
migrations.RemoveField(
model_name='smartcard',
name='updated_at',
),
migrations.AlterField(
model_name='cartitem',
name='product',
field=models.ForeignKey(to='myshop.Product'),
),
migrations.AlterField(
model_name='orderitem',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, verbose_name='Product', blank=True, to='myshop.Product', null=True),
),
migrations.AlterField(
model_name='productimage',
name='product',
field=models.ForeignKey(to='myshop.Product'),
),
migrations.AlterField(
model_name='productpage',
name='product',
field=models.ForeignKey(to='myshop.Product'),
),
migrations.CreateModel(
name='Commodity',
fields=[
('product_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='myshop.Product')),
('unit_price', models.DecimalField(default='0', help_text='Net price for this product', max_digits=30, decimal_places=3)),
('product_code', models.CharField(unique=True, max_length=255, verbose_name='Product code')),
('placeholder', cms.models.fields.PlaceholderField(slotname='Commodity Details', editable=False, to='cms.Placeholder', null=True)),
],
options={
'verbose_name': 'Commodity',
'verbose_name_plural': 'Commodities',
},
bases=('myshop.product',),
),
migrations.CreateModel(
name='SmartPhoneModel',
fields=[
('product_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='myshop.Product')),
('battery_type', models.PositiveSmallIntegerField(verbose_name='Battery type', choices=[(1, 'Lithium Polymer (Li-Poly)'), (2, 'Lithium Ion (Li-Ion)')])),
('battery_capacity', models.PositiveIntegerField(help_text='Battery capacity in mAh', verbose_name='Capacity')),
('ram_storage', models.PositiveIntegerField(help_text='RAM storage in MB', verbose_name='RAM')),
('wifi_connectivity', models.PositiveIntegerField(help_text='WiFi Connectivity', verbose_name='WiFi', choices=[(1, '802.11 b/g/n')])),
('bluetooth', models.PositiveIntegerField(help_text='Bluetooth Connectivity', verbose_name='Bluetooth', choices=[(1, 'Bluetooth 4.0')])),
('gps', models.BooleanField(default=False, help_text='GPS integrated', verbose_name='GPS')),
('width', models.DecimalField(help_text='Width in mm', verbose_name='Width', max_digits=4, decimal_places=1)),
('height', models.DecimalField(help_text='Height in mm', verbose_name='Height', max_digits=4, decimal_places=1)),
('weight', models.DecimalField(help_text='Weight in gram', verbose_name='Weight', max_digits=5, decimal_places=1)),
('screen_size', models.DecimalField(help_text='Diagonal screen size in inch', verbose_name='Screen size', max_digits=4, decimal_places=2)),
('operating_system', models.ForeignKey(verbose_name='Operating System', to='myshop.OperatingSystem')),
],
options={
'verbose_name': 'Smart Phone',
'verbose_name_plural': 'Smart Phones',
},
bases=('myshop.product',),
),
migrations.DeleteModel(
name='SmartCardTranslation',
),
migrations.AddField(
model_name='producttranslation',
name='master',
field=models.ForeignKey(related_name='translations', to='myshop.Product', null=True),
),
migrations.AddField(
model_name='product',
name='cms_pages',
field=models.ManyToManyField(help_text='Choose list view this product shall appear on.', to='cms.Page', through='myshop.ProductPage'),
),
migrations.AddField(
model_name='product',
name='images',
field=models.ManyToManyField(to='filer.Image', through='myshop.ProductImage'),
),
migrations.AddField(
model_name='product',
name='manufacturer',
field=models.ForeignKey(verbose_name='Manufacturer', to='myshop.Manufacturer'),
),
migrations.AddField(
model_name='product',
name='polymorphic_ctype',
field=models.ForeignKey(related_name='polymorphic_myshop.product_set+', editable=False, to='contenttypes.ContentType', null=True),
),
migrations.AddField(
model_name='smartcard',
name='product_ptr',
field=models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, default=None, serialize=False, to='myshop.Product'),
preserve_default=False,
),
migrations.AddField(
model_name='smartphone',
name='product',
field=models.ForeignKey(verbose_name='Smart-Phone Model', to='myshop.SmartPhoneModel'),
),
migrations.AlterUniqueTogether(
name='producttranslation',
unique_together=set([('language_code', 'master')]),
),
]
| bsd-3-clause | -1,009,144,222,055,504,600 | 44.565421 | 169 | 0.573172 | false |
elastic/elasticsearch-py | test_elasticsearch/test_types/async_types.py | 1 | 3095 | # Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Any, AsyncGenerator, Dict
from elasticsearch import (
AIOHttpConnection,
AsyncElasticsearch,
AsyncTransport,
ConnectionPool,
)
from elasticsearch.helpers import (
async_bulk,
async_reindex,
async_scan,
async_streaming_bulk,
)
es = AsyncElasticsearch(
[{"host": "localhost", "port": 9443}],
transport_class=AsyncTransport,
)
t = AsyncTransport(
[{}],
connection_class=AIOHttpConnection,
connection_pool_class=ConnectionPool,
sniff_on_start=True,
sniffer_timeout=0.1,
sniff_timeout=1,
sniff_on_connection_fail=False,
max_retries=1,
retry_on_status={100, 400, 503},
retry_on_timeout=True,
send_get_body_as="source",
)
async def async_gen() -> AsyncGenerator[Dict[Any, Any], None]:
yield {}
async def async_scan_types() -> None:
async for _ in async_scan(
es,
query={"query": {"match_all": {}}},
request_timeout=10,
clear_scroll=True,
scroll_kwargs={"request_timeout": 10},
):
pass
async for _ in async_scan(
es,
raise_on_error=False,
preserve_order=False,
scroll="10m",
size=10,
request_timeout=10.0,
):
pass
async def async_streaming_bulk_types() -> None:
async for _ in async_streaming_bulk(es, async_gen()):
pass
async for _ in async_streaming_bulk(es, async_gen().__aiter__()):
pass
async for _ in async_streaming_bulk(es, [{}]):
pass
async for _ in async_streaming_bulk(es, ({},)):
pass
async def async_bulk_types() -> None:
_, _ = await async_bulk(es, async_gen())
_, _ = await async_bulk(es, async_gen().__aiter__())
_, _ = await async_bulk(es, [{}])
_, _ = await async_bulk(es, ({},))
async def async_reindex_types() -> None:
_, _ = await async_reindex(
es, "src-index", "target-index", query={"query": {"match": {"key": "val"}}}
)
_, _ = await async_reindex(
es, source_index="src-index", target_index="target-index", target_client=es
)
_, _ = await async_reindex(
es,
"src-index",
"target-index",
chunk_size=1,
scroll="10m",
scan_kwargs={"request_timeout": 10},
bulk_kwargs={"request_timeout": 10},
)
| apache-2.0 | 6,571,657,479,850,991,000 | 27.394495 | 83 | 0.625202 | false |
gemagomez/keepnote | keepnote/gui/main_window.py | 1 | 52638 | """
KeepNote
Graphical User Interface for KeepNote Application
"""
#
# KeepNote
# Copyright (c) 2008-2009 Matt Rasmussen
# Author: Matt Rasmussen <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
#
# python imports
import mimetypes
import os
import shutil
import subprocess
import sys
import time
import thread
import threading
import uuid
# pygtk imports
import pygtk
pygtk.require('2.0')
import gtk
import gobject
# keepnote imports
import keepnote
from keepnote import \
KeepNoteError, \
ensure_unicode, \
unicode_gtk, \
FS_ENCODING
from keepnote.notebook import \
NoteBookError, \
NoteBookVersionError
from keepnote import notebook as notebooklib
from keepnote import tasklib
from keepnote.gui import \
get_resource, \
get_resource_image, \
get_resource_pixbuf, \
Action, \
ToggleAction, \
add_actions, \
CONTEXT_MENU_ACCEL_PATH, \
FileChooserDialog, \
init_key_shortcuts, \
UIManager
from keepnote.gui.icons import \
lookup_icon_filename
from keepnote.gui import richtext
from keepnote.gui import \
dialog_image_new, \
dialog_drag_drop_test, \
dialog_wait, \
update_file_preview
from keepnote.gui.icon_menu import IconMenu
from keepnote.gui.three_pane_viewer import ThreePaneViewer
from keepnote.gui.tabbed_viewer import TabbedViewer
_ = keepnote.translate
CLIPBOARD_NAME = "CLIPBOARD"
class KeepNoteWindow (gtk.Window):
"""Main windows for KeepNote"""
def __init__(self, app, winid=None):
gtk.Window.__init__(self, gtk.WINDOW_TOPLEVEL)
self._app = app # application object
self._winid = winid if winid else unicode(uuid.uuid4())
self._viewers = []
# window state
self._maximized = False # True if window is maximized
self._was_maximized = False # True if iconified and was maximized
self._iconified = False # True if window is minimized
self._tray_icon = None # True if tray icon is present
self._recent_notebooks = []
self._uimanager = UIManager()
self._accel_group = self._uimanager.get_accel_group()
self.add_accel_group(self._accel_group)
init_key_shortcuts()
self.init_layout()
self.setup_systray()
# load preferences for the first time
self.load_preferences(True)
def get_id(self):
return self._winid
def init_layout(self):
# init main window
self.set_title(keepnote.PROGRAM_NAME)
self.set_default_size(*keepnote.DEFAULT_WINDOW_SIZE)
self.set_icon_list(get_resource_pixbuf("keepnote-16x16.png"),
get_resource_pixbuf("keepnote-32x32.png"),
get_resource_pixbuf("keepnote-64x64.png"))
# main window signals
self.connect("error", lambda w,m,e,t: self.error(m,e,t))
self.connect("delete-event", lambda w,e: self._on_close())
self.connect("window-state-event", self._on_window_state)
self.connect("size-allocate", self._on_window_size)
#self._app.pref.changed.add(self._on_app_options_changed)
#====================================
# Dialogs
self.drag_test = dialog_drag_drop_test.DragDropTestDialog(self)
self.viewer = self.new_viewer()
#====================================
# Layout
# vertical box
main_vbox = gtk.VBox(False, 0)
self.add(main_vbox)
# menu bar
main_vbox.set_border_width(0)
self.menubar = self.make_menubar()
main_vbox.pack_start(self.menubar, False, True, 0)
# toolbar
main_vbox.pack_start(self.make_toolbar(), False, True, 0)
main_vbox2 = gtk.VBox(False, 0)
main_vbox2.set_border_width(1)
main_vbox.pack_start(main_vbox2, True, True, 0)
# viewer
self.viewer_box = gtk.VBox(False, 0)
main_vbox2.pack_start(self.viewer_box, True, True, 0)
# status bar
status_hbox = gtk.HBox(False, 0)
main_vbox.pack_start(status_hbox, False, True, 0)
# message bar
self.status_bar = gtk.Statusbar()
status_hbox.pack_start(self.status_bar, False, True, 0)
self.status_bar.set_property("has-resize-grip", False)
self.status_bar.set_size_request(300, -1)
# stats bar
self.stats_bar = gtk.Statusbar()
status_hbox.pack_start(self.stats_bar, True, True, 0)
#====================================================
# viewer
self.viewer_box.pack_start(self.viewer, True, True, 0)
# add viewer menus
self.viewer.add_ui(self)
def setup_systray(self):
"""Setup systray for window"""
# system tray icon
if gtk.gtk_version > (2, 10):
if not self._tray_icon:
self._tray_icon = gtk.StatusIcon()
self._tray_icon.set_from_pixbuf(
get_resource_pixbuf("keepnote-32x32.png"))
self._tray_icon.set_tooltip(keepnote.PROGRAM_NAME)
self._statusicon_menu = self.make_statusicon_menu()
self._tray_icon.connect("activate", self._on_tray_icon_activate)
self._tray_icon.connect('popup-menu',
self._on_systray_popup_menu)
self._tray_icon.set_property(
"visible", self._app.pref.get("window", "use_systray",
default=True))
else:
self._tray_icon = None
def _on_systray_popup_menu(self, status, button, time):
self._statusicon_menu.popup(None, None, None, button, time)
#==============================================
# viewers
def new_viewer(self):
"""Creates a new viewer for this window"""
#viewer = ThreePaneViewer(self._app, self)
viewer = TabbedViewer(self._app, self)
viewer.connect("error", lambda w,m,e: self.error(m, e, None))
viewer.connect("status", lambda w,m,b: self.set_status(m, b))
viewer.connect("window-request", self._on_window_request)
viewer.connect("current-node", self._on_current_node)
viewer.connect("modified", self._on_viewer_modified)
return viewer
def add_viewer(self, viewer):
"""Adds a viewer to the window"""
self._viewers.append(viewer)
def remove_viewer(self, viewer):
"""Removes a viewer from the window"""
self._viewers.remove(viewer)
def get_all_viewers(self):
"""Returns list of all viewers associated with window"""
return self._viewers
def get_all_notebooks(self):
"""Returns all notebooks loaded by all viewers"""
return set(filter(lambda n: n is not None,
(v.get_notebook() for v in self._viewers)))
#===============================================
# accessors
def get_app(self):
"""Returns application object"""
return self._app
def get_uimanager(self):
"""Returns the UIManager for the window"""
return self._uimanager
def get_viewer(self):
"""Returns window's viewer"""
return self.viewer
def get_accel_group(self):
"""Returns the accel group for the window"""
return self._accel_group
def get_notebook(self):
"""Returns the currently loaded notebook"""
return self.viewer.get_notebook()
def get_current_page(self):
"""Returns the currently selected page"""
return self.viewer.get_current_page()
#=========================================================
# main window gui callbacks
def _on_window_state(self, window, event):
"""Callback for window state"""
iconified = self._iconified
# keep track of maximized and minimized state
self._iconified = bool(event.new_window_state &
gtk.gdk.WINDOW_STATE_ICONIFIED)
# detect recent iconification
if not iconified and self._iconified:
# save maximized state before iconification
self._was_maximized = self._maximized
self._maximized = bool(event.new_window_state &
gtk.gdk.WINDOW_STATE_MAXIMIZED)
# detect recent de-iconification
if iconified and not self._iconified:
# explicitly maximize if not maximized
# NOTE: this is needed to work around a MS windows GTK bug
if self._was_maximized:
gobject.idle_add(self.maximize)
def _on_window_size(self, window, event):
"""Callback for resize events"""
# record window size if it is not maximized or minimized
if not self._maximized and not self._iconified:
self._app.pref.get("window")["window_size"] = self.get_size()
#def _on_app_options_changed(self):
# self.load_preferences()
def _on_tray_icon_activate(self, icon):
"""Try icon has been clicked in system tray"""
if self.is_active():
self.minimize_window()
else:
self.restore_window()
#=============================================================
# viewer callbacks
def _on_window_request(self, viewer, action):
"""Callback for requesting an action from the main window"""
if action == "minimize":
self.minimize_window()
elif action == "restore":
self.restore_window()
else:
raise Exception("unknown window request: " + str(action))
#=================================================
# Window manipulation
def minimize_window(self):
"""Minimize the window (block until window is minimized"""
if self._iconified:
return
# TODO: add timer in case minimize fails
def on_window_state(window, event):
if event.new_window_state & gtk.gdk.WINDOW_STATE_ICONIFIED:
gtk.main_quit()
sig = self.connect("window-state-event", on_window_state)
self.iconify()
gtk.main()
self.disconnect(sig)
def restore_window(self):
"""Restore the window from minimization"""
self.deiconify()
self.present()
def on_new_window(self):
"""Open a new window"""
win = self._app.new_window()
notebook = self.get_notebook()
if notebook:
self._app.ref_notebook(notebook)
win.set_notebook(notebook)
#==============================================
# Application preferences
def load_preferences(self, first_open=False):
"""Load preferences"""
p = self._app.pref
# notebook
window_size = p.get("window", "window_size",
default=keepnote.DEFAULT_WINDOW_SIZE)
window_maximized = p.get("window", "window_maximized", default=True)
self.setup_systray()
use_systray = p.get("window", "use_systray", default=True)
# window config for first open
if first_open:
self.resize(*window_size)
if window_maximized:
self.maximize()
minimize = p.get("window", "minimize_on_start", default=False)
if use_systray and minimize:
self.iconify()
# config window
skip = p.get("window", "skip_taskbar", default=False)
if use_systray:
self.set_property("skip-taskbar-hint", skip)
self.set_keep_above(p.get("window", "keep_above", default=False))
if p.get("window", "stick", default=False):
self.stick()
else:
self.unstick()
# other window wide properties
self._recent_notebooks = p.get("recent_notebooks", default=[])
self.set_recent_notebooks_menu(self._recent_notebooks)
self._uimanager.set_force_stock(
p.get("look_and_feel", "use_stock_icons", default=False))
self.viewer.load_preferences(self._app.pref, first_open)
def save_preferences(self):
"""Save preferences"""
p = self._app.pref
# save window preferences
p.set("window", "window_maximized", self._maximized)
p.set("recent_notebooks", self._recent_notebooks)
# let viewer save preferences
self.viewer.save_preferences(self._app.pref)
def set_recent_notebooks_menu(self, recent_notebooks):
"""Set the recent notebooks in the file menu"""
menu = self._uimanager.get_widget("/main_menu_bar/File/Open Recent Notebook")
# init menu
if menu.get_submenu() is None:
submenu = gtk.Menu()
submenu.show()
menu.set_submenu(submenu)
menu = menu.get_submenu()
# clear menu
menu.foreach(lambda x: menu.remove(x))
def make_filename(filename, maxsize=30):
if len(filename) > maxsize:
base = os.path.basename(filename)
pre = max(maxsize - len(base), 10)
return os.path.join(filename[:pre] + u"...", base)
else:
return filename
def make_func(filename):
return lambda w: self.open_notebook(filename)
# populate menu
for i, notebook in enumerate(recent_notebooks):
item = gtk.MenuItem(u"%d. %s" % (i+1, make_filename(notebook)))
item.connect("activate", make_func(notebook))
item.show()
menu.append(item)
def add_recent_notebook(self, filename):
"""Add recent notebook"""
if filename in self._recent_notebooks:
self._recent_notebooks.remove(filename)
self._recent_notebooks = [filename] + \
self._recent_notebooks[:keepnote.gui.MAX_RECENT_NOTEBOOKS]
self.set_recent_notebooks_menu(self._recent_notebooks)
#=============================================
# Notebook open/save/close UI
def on_new_notebook(self):
"""Launches New NoteBook dialog"""
dialog = FileChooserDialog(
_("New Notebook"), self,
action=gtk.FILE_CHOOSER_ACTION_SAVE,
buttons=(_("Cancel"), gtk.RESPONSE_CANCEL,
_("New"), gtk.RESPONSE_OK),
app=self._app,
persistent_path="new_notebook_path")
response = dialog.run()
if response == gtk.RESPONSE_OK:
# create new notebook
if dialog.get_filename():
self.new_notebook(unicode_gtk(dialog.get_filename()))
dialog.destroy()
def on_open_notebook(self):
"""Launches Open NoteBook dialog"""
dialog = gtk.FileChooserDialog(
_("Open Notebook"), self,
action=gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER,
buttons=(_("Cancel"), gtk.RESPONSE_CANCEL,
_("Open"), gtk.RESPONSE_OK))
def on_folder_changed(filechooser):
folder = unicode_gtk(filechooser.get_current_folder())
if os.path.exists(os.path.join(folder, notebooklib.PREF_FILE)):
filechooser.response(gtk.RESPONSE_OK)
dialog.connect("current-folder-changed", on_folder_changed)
path = self._app.get_default_path("new_notebook_path")
if os.path.exists(path):
dialog.set_current_folder(path)
file_filter = gtk.FileFilter()
file_filter.add_pattern("*.nbk")
file_filter.set_name(_("Notebook (*.nbk)"))
dialog.add_filter(file_filter)
file_filter = gtk.FileFilter()
file_filter.add_pattern("*")
file_filter.set_name(_("All files (*.*)"))
dialog.add_filter(file_filter)
response = dialog.run()
if response == gtk.RESPONSE_OK:
path = dialog.get_current_folder()
if path:
self._app.pref.set("default_paths", "new_notebook_path",
os.path.dirname(path))
notebook_file = unicode_gtk(dialog.get_filename())
if notebook_file:
self.open_notebook(notebook_file)
dialog.destroy()
def _on_close(self):
"""Callback for window close"""
try:
# TODO: decide if a clipboard action is needed before
# closing down.
#clipboard = self.get_clipboard(selection=CLIPBOARD_NAME)
#clipboard.set_can_store(None)
#clipboard.store()
self._app.save()
self.close_notebook()
if self._tray_icon:
# turn off try icon
self._tray_icon.set_property("visible", False)
except Exception, e:
self.error("Error while closing", e, sys.exc_info()[2])
return False
def close(self):
"""Close the window"""
self._on_close()
self.emit("delete-event", None)
self.destroy()
def on_quit(self):
"""Quit the application"""
self._app.save()
self._app.quit()
#===============================================
# Notebook actions
def save_notebook(self, silent=False):
"""Saves the current notebook"""
try:
# save window information for all notebooks associated with this
# window
for notebook in self.get_all_notebooks():
p = notebook.pref.get("windows", "ids", define=True)
p[self._winid] = {
"viewer_type": self.viewer.get_name(),
"viewerid": self.viewer.get_id()}
# let the viewer save its information
self.viewer.save()
self.set_status(_("Notebook saved"))
except Exception, e:
if not silent:
self.error(_("Could not save notebook."), e, sys.exc_info()[2])
self.set_status(_("Error saving notebook"))
return
def reload_notebook(self):
"""Reload the current NoteBook"""
notebook = self.viewer.get_notebook()
if notebook is None:
self.error(_("Reloading only works when a notebook is open."))
return
filename = notebook.get_path()
self._app.close_all_notebook(notebook, False)
self.open_notebook(filename)
self.set_status(_("Notebook reloaded"))
def new_notebook(self, filename):
"""Creates and opens a new NoteBook"""
if self.viewer.get_notebook() is not None:
self.close_notebook()
try:
# make sure filename is unicode
filename = ensure_unicode(filename, FS_ENCODING)
notebook = notebooklib.NoteBook(filename)
notebook.create()
notebook.close()
self.set_status(_("Created '%s'") % notebook.get_title())
except NoteBookError, e:
self.error(_("Could not create new notebook."), e, sys.exc_info()[2])
self.set_status("")
return None
return self.open_notebook(filename, new=True)
def _load_notebook(self, filename):
"""Loads notebook in background with progress bar"""
notebook = self._app.get_notebook(filename, self)
if notebook is None:
return None
# check for indexing
# TODO: is this the best place for checking?
# There is a difference between normal incremental indexing
# and indexing due version updating.
# incremental updating (checking a few files that have changed on
# disk) should be done within notebook.load().
# Whole notebook re-indexing, triggered by version upgrade
# should be done separately, and with a different wait dialog
# clearly indicating that notebook loading is going to take
# longer than usual.
if notebook.index_needed():
self.update_index(notebook)
return notebook
def _restore_windows(self, notebook, open_here=True):
"""
Restore multiple windows for notebook
open_here -- if True, will open notebook in this window
Cases:
1. if notebook has no saved windows, just open notebook in this window
2. if notebook has 1 saved window
if open_here:
open it in this window
else:
if this window has no opened notebooks,
reassign its ids to the notebook and open it here
else
reassign notebooks saved ids to this window and viewer
3. if notebook has >1 saved windows, open them in their own windows
if this window has no notebook, reassign its id to one of the
saved ids.
"""
# init window lookup
win_lookup = dict((w.get_id(), w) for w in
self._app.get_windows())
def open_in_window(winid, viewerid, notebook):
win = win_lookup.get(winid, None)
if win is None:
# open new window
win = self._app.new_window()
win_lookup[winid] = win
win._winid = winid
if viewerid:
win.get_viewer().set_id(viewerid)
# set notebook
self._app.ref_notebook(notebook)
win.set_notebook(notebook)
# find out how many windows this notebook had last time
# init viewer if needed
windows = notebook.pref.get("windows", "ids", define=True)
notebook.pref.get("viewers", "ids", define=True)
if len(windows) == 0:
# no presistence info found, just open notebook in this window
self.set_notebook(notebook)
elif len(windows) == 1:
# restore a single window
winid, winpref = windows.items()[0]
viewerid = winpref.get("viewerid", None)
if viewerid is not None:
if len(self.get_all_notebooks()) == 0:
# no notebooks are open, so it is ok to reassign
# the viewer's id to match the notebook pref
self._winid = winid
self.viewer.set_id(viewerid)
self.set_notebook(notebook)
elif open_here:
# TODO: needs more testing
# notebooks are open, so reassign the notebook's pref to
# match the existing viewer
notebook.pref.set("windows", "ids",
{self._winid:
{"viewerid": self.viewer.get_id(),
"viewer_type": self.viewer.get_name()}})
notebook.pref.set(
"viewers", "ids", self.viewer.get_id(),
notebook.pref.get("viewers", "ids", viewerid,
define=True))
del notebook.pref.get("viewers", "ids")[viewerid]
self.set_notebook(notebook)
else:
# open in whatever window the notebook wants
open_in_window(winid, viewerid, notebook)
self._app.unref_notebook(notebook)
elif len(windows) > 1:
# get different kinds of window ids
restoring_ids = set(windows.keys())
new_ids = restoring_ids - set(win_lookup.keys())
if len(self.get_all_notebooks()) == 0:
# special case: if no notebooks opened, then make sure
# to reuse this window
if self._winid not in restoring_ids:
self._winid = iter(restoring_ids).next()
restoring_ids.remove(self._winid)
viewerid = windows[self._winid].get("viewerid", None)
if viewerid:
self.viewer.set_id(viewerid)
self.set_notebook(notebook)
# restore remaining windows
while len(restoring_ids) > 0:
winid = restoring_ids.pop()
viewerid = windows[winid].get("viewerid", None)
open_in_window(winid, viewerid, notebook)
self._app.unref_notebook(notebook)
def open_notebook(self, filename, new=False, open_here=True):
"""Opens a new notebook"""
try:
filename = notebooklib.normalize_notebook_dirname(
filename, longpath=False)
except Exception, e:
self.error(_("Could note find notebook '%s'.") % filename, e,
sys.exc_info()[2])
notebook = None
else:
notebook = self._load_notebook(filename)
if notebook is None:
return
# setup notebook
self._restore_windows(notebook, open_here=open_here)
if not new:
self.set_status(_("Loaded '%s'") % notebook.get_title())
self.update_title()
# save notebook to recent notebooks
self.add_recent_notebook(filename)
return notebook
def close_notebook(self, notebook=None):
"""Close the NoteBook"""
if notebook is None:
notebook = self.get_notebook()
self.viewer.close_notebook(notebook)
self.set_status(_("Notebook closed"))
def _on_close_notebook(self, notebook):
"""Callback when notebook is closing"""
pass
def set_notebook(self, notebook):
"""Set the NoteBook for the window"""
self.viewer.set_notebook(notebook)
def update_index(self, notebook=None, clear=False):
"""Update notebook index"""
if notebook is None:
notebook = self.viewer.get_notebook()
if notebook is None:
return
def update(task):
# erase database first
# NOTE: I do this right now so that corrupt databases can be
# cleared out of the way.
if clear:
notebook.clear_index()
try:
for node in notebook.index_all():
# terminate if search is canceled
if task.aborted():
break
except Exception, e:
self.error(_("Error during index"), e, sys.exc_info()[2])
task.finish()
# launch task
self.wait_dialog(_("Indexing notebook"), _("Indexing..."),
tasklib.Task(update))
#=====================================================
# viewer callbacks
def update_title(self, node=None):
"""Set the modification state of the notebook"""
notebook = self.viewer.get_notebook()
if notebook is None:
self.set_title(keepnote.PROGRAM_NAME)
else:
title = notebook.get_attr("title", u"")
if node is None:
node = self.get_current_page()
if node is not None:
title += u": " + node.get_attr("title", "")
modified = notebook.save_needed()
if modified:
self.set_title(u"* %s" % title)
self.set_status(_("Notebook modified"))
else:
self.set_title(title)
def _on_current_node(self, viewer, node):
"""Callback for when viewer changes the current node"""
self.update_title(node)
def _on_viewer_modified(self, viewer, modified):
"""Callback for when viewer has a modified notebook"""
self.update_title()
#===========================================================
# page and folder actions
def get_selected_nodes(self):
"""
Returns list of selected nodes
"""
return self.viewer.get_selected_nodes()
def confirm_delete_nodes(self, nodes):
"""Confirm whether nodes should be deleted"""
# TODO: move to app?
# TODO: add note names to dialog
# TODO: assume one node is selected
# could make this a stand alone function/dialog box
for node in nodes:
if node.get_attr("content_type") == notebooklib.CONTENT_TYPE_TRASH:
self.error(_("The Trash folder cannot be deleted."), None)
return False
if node.get_parent() == None:
self.error(_("The top-level folder cannot be deleted."), None)
return False
if len(nodes) > 1 or len(nodes[0].get_children()) > 0:
message = _("Do you want to delete this note and all of its children?")
else:
message = _("Do you want to delete this note?")
return self._app.ask_yes_no(message, _("Delete Note"),
parent=self.get_toplevel())
def on_empty_trash(self):
"""Empty Trash folder in NoteBook"""
if self.get_notebook() is None:
return
try:
self.get_notebook().empty_trash()
except NoteBookError, e:
self.error(_("Could not empty trash."), e, sys.exc_info()[2])
#=================================================
# action callbacks
def on_view_node_external_app(self, app, node=None, kind=None):
"""View a node with an external app"""
self._app.save()
# determine node to view
if node is None:
nodes = self.get_selected_nodes()
if len(nodes) == 0:
self.emit("error", _("No notes are selected."), None, None)
return
node = nodes[0]
try:
self._app.run_external_app_node(app, node, kind)
except KeepNoteError, e:
self.emit("error", e.msg, e, sys.exc_info()[2])
#=====================================================
# Cut/copy/paste
# forward cut/copy/paste to the correct widget
def on_cut(self):
"""Cut callback"""
widget = self.get_focus()
if gobject.signal_lookup("cut-clipboard", widget) != 0:
widget.emit("cut-clipboard")
def on_copy(self):
"""Copy callback"""
widget = self.get_focus()
if gobject.signal_lookup("copy-clipboard", widget) != 0:
widget.emit("copy-clipboard")
def on_copy_tree(self):
"""Copy tree callback"""
widget = self.get_focus()
if gobject.signal_lookup("copy-tree-clipboard", widget) != 0:
widget.emit("copy-tree-clipboard")
def on_paste(self):
"""Paste callback"""
widget = self.get_focus()
if gobject.signal_lookup("paste-clipboard", widget) != 0:
widget.emit("paste-clipboard")
def on_undo(self):
"""Undo callback"""
self.viewer.undo()
def on_redo(self):
"""Redo callback"""
self.viewer.redo()
#===================================================
# Misc.
def view_error_log(self):
"""View error in text editor"""
# windows locks open files
# therefore we should copy error log before viewing it
try:
filename = os.path.realpath(keepnote.get_user_error_log())
filename2 = filename + u".bak"
shutil.copy(filename, filename2)
# use text editor to view error log
self._app.run_external_app("text_editor", filename2)
except Exception, e:
self.error(_("Could not open error log") + ":\n" + str(e),
e, sys.exc_info()[2])
def view_config_files(self):
"""View config folder in a file explorer"""
try:
# use text editor to view error log
filename = keepnote.get_user_pref_dir()
self._app.run_external_app("file_explorer", filename)
except Exception, e:
self.error(_("Could not open error log") + ":\n" + str(e),
e, sys.exc_info()[2])
#==================================================
# Help/about dialog
def on_about(self):
"""Display about dialog"""
def func(dialog, link, data):
try:
self._app.open_webpage(link)
except KeepNoteError, e:
self.error(e.msg, e, sys.exc_info()[2])
gtk.about_dialog_set_url_hook(func, None)
about = gtk.AboutDialog()
about.set_name(keepnote.PROGRAM_NAME)
about.set_version(keepnote.PROGRAM_VERSION_TEXT)
about.set_copyright(keepnote.COPYRIGHT)
about.set_logo(get_resource_pixbuf("keepnote-icon.png"))
about.set_website(keepnote.WEBSITE)
about.set_license(keepnote.LICENSE_NAME)
about.set_translator_credits(keepnote.TRANSLATOR_CREDITS)
license_file = keepnote.get_resource(u"rc", u"COPYING")
if os.path.exists(license_file):
about.set_license(open(license_file).read())
#about.set_authors(["Matt Rasmussen <[email protected]>"])
about.set_transient_for(self)
about.set_position(gtk.WIN_POS_CENTER_ON_PARENT)
about.connect("response", lambda d,r: about.destroy())
about.show()
#===========================================
# Messages, warnings, errors UI/dialogs
def set_status(self, text, bar="status"):
"""Sets a status message in the status bar"""
if bar == "status":
self.status_bar.pop(0)
self.status_bar.push(0, text)
elif bar == "stats":
self.stats_bar.pop(0)
self.stats_bar.push(0, text)
else:
raise Exception("unknown bar '%s'" % bar)
def error(self, text, error=None, tracebk=None):
"""Display an error message"""
self._app.error(text, error, tracebk)
def wait_dialog(self, title, text, task, cancel=True):
"""Display a wait dialog"""
# NOTE: pause autosave while performing long action
self._app.pause_auto_save(True)
dialog = dialog_wait.WaitDialog(self)
dialog.show(title, text, task, cancel=cancel)
self._app.pause_auto_save(False)
#================================================
# Menus
def get_actions(self):
actions = map(lambda x: Action(*x),
[
("File", None, _("_File")),
("New Notebook", gtk.STOCK_NEW, _("_New Notebook..."),
"", _("Start a new notebook"),
lambda w: self.on_new_notebook()),
("Open Notebook", gtk.STOCK_OPEN, _("_Open Notebook..."),
"<control>O", _("Open an existing notebook"),
lambda w: self.on_open_notebook()),
("Open Recent Notebook", gtk.STOCK_OPEN,
_("Open Re_cent Notebook")),
("Reload Notebook", gtk.STOCK_REVERT_TO_SAVED,
_("_Reload Notebook"),
"", _("Reload the current notebook"),
lambda w: self.reload_notebook()),
("Save Notebook", gtk.STOCK_SAVE, _("_Save Notebook"),
"<control>S", _("Save the current notebook"),
lambda w: self._app.save()),
("Close Notebook", gtk.STOCK_CLOSE, _("_Close Notebook"),
"", _("Close the current notebook"),
lambda w: self._app.close_all_notebook(self.get_notebook())),
("Export", None, _("_Export Notebook")),
("Import", None, _("_Import Notebook")),
("Quit", gtk.STOCK_QUIT, _("_Quit"),
"<control>Q", _("Quit KeepNote"),
lambda w: self.on_quit()),
#=======================================
("Edit", None, _("_Edit")),
("Undo", gtk.STOCK_UNDO, None,
"<control>Z", None,
lambda w: self.on_undo()),
("Redo", gtk.STOCK_REDO, None,
"<control><shift>Z", None,
lambda w: self.on_redo()),
("Cut", gtk.STOCK_CUT, None,
"<control>X", None,
lambda w: self.on_cut()),
("Copy", gtk.STOCK_COPY, None,
"<control>C", None,
lambda w: self.on_copy()),
("Copy Tree", gtk.STOCK_COPY, None,
"<control><shift>C", None,
lambda w: self.on_copy_tree()),
("Paste", gtk.STOCK_PASTE, None,
"<control>V", None,
lambda w: self.on_paste()),
("Empty Trash", gtk.STOCK_DELETE, _("Empty _Trash"),
"", None,
lambda w: self.on_empty_trash()),
#========================================
("Search", None, _("_Search")),
("Search All Notes", gtk.STOCK_FIND, _("_Search All Notes"),
"<control>K", None,
lambda w: self.search_box.grab_focus()),
#=======================================
("Go", None, _("_Go")),
#========================================
("View", None, _("_View")),
("View Note in File Explorer", gtk.STOCK_OPEN,
_("View Note in File Explorer"),
"", None,
lambda w: self.on_view_node_external_app("file_explorer",
kind="dir")),
("View Note in Text Editor", gtk.STOCK_OPEN,
_("View Note in Text Editor"),
"", None,
lambda w: self.on_view_node_external_app("text_editor",
kind="page")),
("View Note in Web Browser", gtk.STOCK_OPEN,
_("View Note in Web Browser"),
"", None,
lambda w: self.on_view_node_external_app("web_browser",
kind="page")),
("Open File", gtk.STOCK_OPEN,
_("_Open File"),
"", None,
lambda w: self.on_view_node_external_app("file_launcher",
kind="file")),
#=========================================
("Tools", None, _("_Tools")),
("Update Notebook Index", None, _("_Update Notebook Index"),
"", None,
lambda w: self.update_index(clear=True)),
("KeepNote Preferences", gtk.STOCK_PREFERENCES, _("_Preferences"),
"", None,
lambda w: self._app.app_options_dialog.show(self)),
#=========================================
("Window", None, _("Window")),
("New Window", None, _("New Window"),
"", _("Open a new window"),
lambda w: self.on_new_window()),
("Close Window", None, _("Close Window"),
"", _("Close window"),
lambda w: self.close()),
#=========================================
("Help", None, _("_Help")),
("View Error Log...", gtk.STOCK_DIALOG_ERROR, _("View _Error Log..."),
"", None,
lambda w: self.view_error_log()),
("View Preference Files...", None, _("View Preference Files..."), "", None,
lambda w: self.view_config_files()),
("Drag and Drop Test...", None, _("Drag and Drop Test..."),
"", None,
lambda w: self.drag_test.on_drag_and_drop_test()),
("About", gtk.STOCK_ABOUT, _("_About"),
"", None,
lambda w: self.on_about())
]) + [
Action("Main Spacer Tool"),
Action("Search Box Tool", None, None, "", _("Search All Notes")),
Action("Search Button Tool", gtk.STOCK_FIND, None, "",
_("Search All Notes"),
lambda w: self.search_box.on_search_nodes())]
# make sure recent notebooks is always visible
recent = [x for x in actions
if x.get_property("name") == "Open Recent Notebook"][0]
recent.set_property("is-important", True)
return actions
def setup_menus(self, uimanager):
pass
def get_ui(self):
return ["""
<ui>
<!-- main window menu bar -->
<menubar name="main_menu_bar">
<menu action="File">
<menuitem action="New Notebook"/>
<placeholder name="Viewer"/>
<placeholder name="New"/>
<separator/>
<menuitem action="Open Notebook"/>
<menuitem action="Open Recent Notebook"/>
<menuitem action="Save Notebook"/>
<menuitem action="Close Notebook"/>
<menuitem action="Reload Notebook"/>
<menuitem action="Empty Trash"/>
<separator/>
<menu action="Export" />
<menu action="Import" />
<separator/>
<placeholder name="Extensions"/>
<separator/>
<menuitem action="Quit"/>
</menu>
<menu action="Edit">
<menuitem action="Undo"/>
<menuitem action="Redo"/>
<separator/>
<menuitem action="Cut"/>
<menuitem action="Copy"/>
<menuitem action="Copy Tree"/>
<menuitem action="Paste"/>
<separator/>
<placeholder name="Viewer"/>
<separator/>
<menuitem action="KeepNote Preferences"/>
</menu>
<menu action="Search">
<menuitem action="Search All Notes"/>
<placeholder name="Viewer"/>
</menu>
<placeholder name="Viewer"/>
<menu action="Go">
<placeholder name="Viewer"/>
</menu>
<menu action="Tools">
<placeholder name="Viewer"/>
<menuitem action="Update Notebook Index"/>
<placeholder name="Extensions"/>
</menu>
<menu action="Window">
<menuitem action="New Window"/>
<menuitem action="Close Window"/>
<placeholder name="Viewer Window"/>
</menu>
<menu action="Help">
<menuitem action="View Error Log..."/>
<menuitem action="View Preference Files..."/>
<menuitem action="Drag and Drop Test..."/>
<separator/>
<menuitem action="About"/>
</menu>
</menubar>
<!-- main window tool bar -->
<toolbar name="main_tool_bar">
<placeholder name="Viewer"/>
<toolitem action="Main Spacer Tool"/>
<toolitem action="Search Box Tool"/>
<toolitem action="Search Button Tool"/>
</toolbar>
<!-- popup menus -->
<menubar name="popup_menus">
</menubar>
</ui>
"""]
def get_actions_statusicon(self):
"""Set actions for StatusIcon menu and return."""
actions = map(lambda x: Action(*x),
[
("KeepNote Preferences", gtk.STOCK_PREFERENCES, _("_Preferences"),
"", None,
lambda w: self._app.app_options_dialog.show(self)),
("Quit", gtk.STOCK_QUIT, _("_Quit"),
"<control>Q", _("Quit KeepNote"),
lambda w: self.close()),
("About", gtk.STOCK_ABOUT, _("_About"),
"", None,
lambda w: self.on_about())
])
return actions
def get_ui_statusicon(self):
"""Create UI xml-definition for StatusIcon menu and return."""
return ["""
<ui>
<!-- statusicon_menu -->
<popup name="statusicon_menu">
<menuitem action="KeepNote Preferences"/>
<menuitem action="About"/>
<separator/>
<menuitem action="Quit"/>
</popup>
</ui>
"""]
def make_menubar(self):
"""Initialize the menu bar"""
#===============================
# ui manager
self._actiongroup = gtk.ActionGroup('MainWindow')
self._uimanager.insert_action_group(self._actiongroup, 0)
# setup menus
add_actions(self._actiongroup, self.get_actions())
for s in self.get_ui():
self._uimanager.add_ui_from_string(s)
self.setup_menus(self._uimanager)
# return menu bar
menubar = self._uimanager.get_widget('/main_menu_bar')
return menubar
def make_toolbar(self):
# configure toolbar
toolbar = self._uimanager.get_widget('/main_tool_bar')
toolbar.set_orientation(gtk.ORIENTATION_HORIZONTAL)
toolbar.set_style(gtk.TOOLBAR_ICONS)
toolbar.set_border_width(0)
try:
# NOTE: if this version of GTK doesn't have this size, then
# ignore it
toolbar.set_property("icon-size", gtk.ICON_SIZE_SMALL_TOOLBAR)
except:
pass
# separator (is there a better way to do this?)
spacer = self._uimanager.get_widget("/main_tool_bar/Main Spacer Tool")
spacer.remove(spacer.child)
spacer.set_expand(True)
# search box
self.search_box = SearchBox(self)
self.search_box.show()
w = self._uimanager.get_widget("/main_tool_bar/Search Box Tool")
w.remove(w.child)
w.add(self.search_box)
return toolbar
def make_statusicon_menu(self):
"""Initialize the StatusIcon menu."""
#===============================
# ui manager
self._actiongroup_statusicon = gtk.ActionGroup('StatusIcon')
self._tray_icon.uimanager = gtk.UIManager()
self._tray_icon.uimanager.insert_action_group(
self._actiongroup_statusicon, 0)
# setup menu
add_actions(self._actiongroup_statusicon,
self.get_actions_statusicon())
for s in self.get_ui_statusicon():
self._tray_icon.uimanager.add_ui_from_string(s)
self.setup_menus(self._tray_icon.uimanager)
# return menu
statusicon_menu = self._tray_icon.uimanager.get_widget(
'/statusicon_menu')
return statusicon_menu
gobject.type_register(KeepNoteWindow)
gobject.signal_new("error", KeepNoteWindow, gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE, (str, object, object))
class SearchBox (gtk.Entry):
def __init__(self, window):
gtk.Entry.__init__(self)
self._window = window
self.connect("changed", self._on_search_box_text_changed)
self.connect("activate", lambda w: self.on_search_nodes())
self.search_box_list = gtk.ListStore(gobject.TYPE_STRING,
gobject.TYPE_STRING)
self.search_box_completion = gtk.EntryCompletion()
self.search_box_completion.connect("match-selected",
self._on_search_box_completion_match)
self.search_box_completion.set_match_func(lambda c, k, i: True)
self.search_box_completion.set_model(self.search_box_list)
self.search_box_completion.set_text_column(0)
self.set_completion(self.search_box_completion)
def on_search_nodes(self):
"""Search nodes"""
# do nothing if notebook is not defined
if not self._window.get_notebook():
return
# TODO: add parsing grammar
# get words
words = [x.lower() for x in
unicode_gtk(self.get_text()).strip().split()]
# clear listview
self._window.get_viewer().start_search_result()
# queue for sending results between threads
from threading import Lock
from Queue import Queue
queue = Queue()
lock = Lock() # a mutex for the notebook (protect sqlite)
# update gui with search result
def search(task):
alldone = Lock() # ensure gui and background sync up at end
alldone.acquire()
def gui_update():
lock.acquire()
more = True
try:
maxstep = 20
for i in xrange(maxstep):
# check if search is aborted
if task.aborted():
more = False
break
# skip if queue is empty
if queue.empty():
break
node = queue.get()
# no more nodes left, finish
if node is None:
more = False
break
# add result to gui
self._window.get_viewer().add_search_result(node)
except Exception, e:
self._window.error(_("Unexpected error"), e)
more = False
finally:
lock.release()
if not more:
alldone.release()
return more
gobject.idle_add(gui_update)
# init search
notebook = self._window.get_notebook()
try:
nodes = (notebook.get_node_by_id(nodeid)
for nodeid in
notebook.search_node_contents(" ".join(words)))
except:
keepnote.log_error()
# do search in thread
try:
lock.acquire()
for node in nodes:
if task.aborted():
break
lock.release()
if node:
queue.put(node)
lock.acquire()
lock.release()
queue.put(None)
except Exception, e:
self.error(_("Unexpected error"), e)
# wait for gui thread to finish
# NOTE: if task is aborted, then gui_update stops itself for
# some reason, thus no need to acquire alldone.
if not task.aborted():
alldone.acquire()
# launch task
task = tasklib.Task(search)
self._window.wait_dialog(
_("Searching notebook"), _("Searching..."), task)
if task.exc_info()[0]:
e, t, tr = task.exc_info()
keepnote.log_error(e, tr)
self._window.get_viewer().end_search_result()
def focus_on_search_box(self):
"""Place cursor in search box"""
self.grab_focus()
def _on_search_box_text_changed(self, url_text):
self.search_box_update_completion()
def search_box_update_completion(self):
if not self._window.get_notebook():
return
text = unicode_gtk(self.get_text())
self.search_box_list.clear()
if len(text) > 0:
results = self._window.get_notebook().search_node_titles(text)[:10]
for nodeid, title in results:
self.search_box_list.append([title, nodeid])
def _on_search_box_completion_match(self, completion, model, iter):
if not self._window.get_notebook():
return
nodeid = model[iter][1]
node = self._window.get_notebook().get_node_by_id(nodeid)
if node:
self._window.get_viewer().goto_node(node, False)
| gpl-2.0 | -3,410,582,267,432,658,000 | 30.576485 | 87 | 0.520195 | false |
heytcass/homeassistant-config | deps/cherrypy/_cpreqbody.py | 1 | 37427 | """Request body processing for CherryPy.
.. versionadded:: 3.2
Application authors have complete control over the parsing of HTTP request
entities. In short,
:attr:`cherrypy.request.body<cherrypy._cprequest.Request.body>`
is now always set to an instance of
:class:`RequestBody<cherrypy._cpreqbody.RequestBody>`,
and *that* class is a subclass of :class:`Entity<cherrypy._cpreqbody.Entity>`.
When an HTTP request includes an entity body, it is often desirable to
provide that information to applications in a form other than the raw bytes.
Different content types demand different approaches. Examples:
* For a GIF file, we want the raw bytes in a stream.
* An HTML form is better parsed into its component fields, and each text field
decoded from bytes to unicode.
* A JSON body should be deserialized into a Python dict or list.
When the request contains a Content-Type header, the media type is used as a
key to look up a value in the
:attr:`request.body.processors<cherrypy._cpreqbody.Entity.processors>` dict.
If the full media
type is not found, then the major type is tried; for example, if no processor
is found for the 'image/jpeg' type, then we look for a processor for the
'image' types altogether. If neither the full type nor the major type has a
matching processor, then a default processor is used
(:func:`default_proc<cherrypy._cpreqbody.Entity.default_proc>`). For most
types, this means no processing is done, and the body is left unread as a
raw byte stream. Processors are configurable in an 'on_start_resource' hook.
Some processors, especially those for the 'text' types, attempt to decode bytes
to unicode. If the Content-Type request header includes a 'charset' parameter,
this is used to decode the entity. Otherwise, one or more default charsets may
be attempted, although this decision is up to each processor. If a processor
successfully decodes an Entity or Part, it should set the
:attr:`charset<cherrypy._cpreqbody.Entity.charset>` attribute
on the Entity or Part to the name of the successful charset, so that
applications can easily re-encode or transcode the value if they wish.
If the Content-Type of the request entity is of major type 'multipart', then
the above parsing process, and possibly a decoding process, is performed for
each part.
For both the full entity and multipart parts, a Content-Disposition header may
be used to fill :attr:`name<cherrypy._cpreqbody.Entity.name>` and
:attr:`filename<cherrypy._cpreqbody.Entity.filename>` attributes on the
request.body or the Part.
.. _custombodyprocessors:
Custom Processors
=================
You can add your own processors for any specific or major MIME type. Simply add
it to the :attr:`processors<cherrypy._cprequest.Entity.processors>` dict in a
hook/tool that runs at ``on_start_resource`` or ``before_request_body``.
Here's the built-in JSON tool for an example::
def json_in(force=True, debug=False):
request = cherrypy.serving.request
def json_processor(entity):
\"""Read application/json data into request.json.\"""
if not entity.headers.get("Content-Length", ""):
raise cherrypy.HTTPError(411)
body = entity.fp.read()
try:
request.json = json_decode(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
if force:
request.body.processors.clear()
request.body.default_proc = cherrypy.HTTPError(
415, 'Expected an application/json content type')
request.body.processors['application/json'] = json_processor
We begin by defining a new ``json_processor`` function to stick in the
``processors`` dictionary. All processor functions take a single argument,
the ``Entity`` instance they are to process. It will be called whenever a
request is received (for those URI's where the tool is turned on) which
has a ``Content-Type`` of "application/json".
First, it checks for a valid ``Content-Length`` (raising 411 if not valid),
then reads the remaining bytes on the socket. The ``fp`` object knows its
own length, so it won't hang waiting for data that never arrives. It will
return when all data has been read. Then, we decode those bytes using
Python's built-in ``json`` module, and stick the decoded result onto
``request.json`` . If it cannot be decoded, we raise 400.
If the "force" argument is True (the default), the ``Tool`` clears the
``processors`` dict so that request entities of other ``Content-Types``
aren't parsed at all. Since there's no entry for those invalid MIME
types, the ``default_proc`` method of ``cherrypy.request.body`` is
called. But this does nothing by default (usually to provide the page
handler an opportunity to handle it.)
But in our case, we want to raise 415, so we replace
``request.body.default_proc``
with the error (``HTTPError`` instances, when called, raise themselves).
If we were defining a custom processor, we can do so without making a ``Tool``.
Just add the config entry::
request.body.processors = {'application/json': json_processor}
Note that you can only replace the ``processors`` dict wholesale this way,
not update the existing one.
"""
try:
from io import DEFAULT_BUFFER_SIZE
except ImportError:
DEFAULT_BUFFER_SIZE = 8192
import re
import sys
import tempfile
try:
from urllib import unquote_plus
except ImportError:
def unquote_plus(bs):
"""Bytes version of urllib.parse.unquote_plus."""
bs = bs.replace(ntob('+'), ntob(' '))
atoms = bs.split(ntob('%'))
for i in range(1, len(atoms)):
item = atoms[i]
try:
pct = int(item[:2], 16)
atoms[i] = bytes([pct]) + item[2:]
except ValueError:
pass
return ntob('').join(atoms)
import cherrypy
from cherrypy._cpcompat import text_or_bytes, ntob, ntou
from cherrypy.lib import httputil
# ------------------------------- Processors -------------------------------- #
def process_urlencoded(entity):
"""Read application/x-www-form-urlencoded data into entity.params."""
qs = entity.fp.read()
for charset in entity.attempt_charsets:
try:
params = {}
for aparam in qs.split(ntob('&')):
for pair in aparam.split(ntob(';')):
if not pair:
continue
atoms = pair.split(ntob('='), 1)
if len(atoms) == 1:
atoms.append(ntob(''))
key = unquote_plus(atoms[0]).decode(charset)
value = unquote_plus(atoms[1]).decode(charset)
if key in params:
if not isinstance(params[key], list):
params[key] = [params[key]]
params[key].append(value)
else:
params[key] = value
except UnicodeDecodeError:
pass
else:
entity.charset = charset
break
else:
raise cherrypy.HTTPError(
400, "The request entity could not be decoded. The following "
"charsets were attempted: %s" % repr(entity.attempt_charsets))
# Now that all values have been successfully parsed and decoded,
# apply them to the entity.params dict.
for key, value in params.items():
if key in entity.params:
if not isinstance(entity.params[key], list):
entity.params[key] = [entity.params[key]]
entity.params[key].append(value)
else:
entity.params[key] = value
def process_multipart(entity):
"""Read all multipart parts into entity.parts."""
ib = ""
if 'boundary' in entity.content_type.params:
# http://tools.ietf.org/html/rfc2046#section-5.1.1
# "The grammar for parameters on the Content-type field is such that it
# is often necessary to enclose the boundary parameter values in quotes
# on the Content-type line"
ib = entity.content_type.params['boundary'].strip('"')
if not re.match("^[ -~]{0,200}[!-~]$", ib):
raise ValueError('Invalid boundary in multipart form: %r' % (ib,))
ib = ('--' + ib).encode('ascii')
# Find the first marker
while True:
b = entity.readline()
if not b:
return
b = b.strip()
if b == ib:
break
# Read all parts
while True:
part = entity.part_class.from_fp(entity.fp, ib)
entity.parts.append(part)
part.process()
if part.fp.done:
break
def process_multipart_form_data(entity):
"""Read all multipart/form-data parts into entity.parts or entity.params.
"""
process_multipart(entity)
kept_parts = []
for part in entity.parts:
if part.name is None:
kept_parts.append(part)
else:
if part.filename is None:
# It's a regular field
value = part.fullvalue()
else:
# It's a file upload. Retain the whole part so consumer code
# has access to its .file and .filename attributes.
value = part
if part.name in entity.params:
if not isinstance(entity.params[part.name], list):
entity.params[part.name] = [entity.params[part.name]]
entity.params[part.name].append(value)
else:
entity.params[part.name] = value
entity.parts = kept_parts
def _old_process_multipart(entity):
"""The behavior of 3.2 and lower. Deprecated and will be changed in 3.3."""
process_multipart(entity)
params = entity.params
for part in entity.parts:
if part.name is None:
key = ntou('parts')
else:
key = part.name
if part.filename is None:
# It's a regular field
value = part.fullvalue()
else:
# It's a file upload. Retain the whole part so consumer code
# has access to its .file and .filename attributes.
value = part
if key in params:
if not isinstance(params[key], list):
params[key] = [params[key]]
params[key].append(value)
else:
params[key] = value
# -------------------------------- Entities --------------------------------- #
class Entity(object):
"""An HTTP request body, or MIME multipart body.
This class collects information about the HTTP request entity. When a
given entity is of MIME type "multipart", each part is parsed into its own
Entity instance, and the set of parts stored in
:attr:`entity.parts<cherrypy._cpreqbody.Entity.parts>`.
Between the ``before_request_body`` and ``before_handler`` tools, CherryPy
tries to process the request body (if any) by calling
:func:`request.body.process<cherrypy._cpreqbody.RequestBody.process>`.
This uses the ``content_type`` of the Entity to look up a suitable
processor in
:attr:`Entity.processors<cherrypy._cpreqbody.Entity.processors>`,
a dict.
If a matching processor cannot be found for the complete Content-Type,
it tries again using the major type. For example, if a request with an
entity of type "image/jpeg" arrives, but no processor can be found for
that complete type, then one is sought for the major type "image". If a
processor is still not found, then the
:func:`default_proc<cherrypy._cpreqbody.Entity.default_proc>` method
of the Entity is called (which does nothing by default; you can
override this too).
CherryPy includes processors for the "application/x-www-form-urlencoded"
type, the "multipart/form-data" type, and the "multipart" major type.
CherryPy 3.2 processes these types almost exactly as older versions.
Parts are passed as arguments to the page handler using their
``Content-Disposition.name`` if given, otherwise in a generic "parts"
argument. Each such part is either a string, or the
:class:`Part<cherrypy._cpreqbody.Part>` itself if it's a file. (In this
case it will have ``file`` and ``filename`` attributes, or possibly a
``value`` attribute). Each Part is itself a subclass of
Entity, and has its own ``process`` method and ``processors`` dict.
There is a separate processor for the "multipart" major type which is more
flexible, and simply stores all multipart parts in
:attr:`request.body.parts<cherrypy._cpreqbody.Entity.parts>`. You can
enable it with::
cherrypy.request.body.processors['multipart'] = _cpreqbody.process_multipart
in an ``on_start_resource`` tool.
"""
# http://tools.ietf.org/html/rfc2046#section-4.1.2:
# "The default character set, which must be assumed in the
# absence of a charset parameter, is US-ASCII."
# However, many browsers send data in utf-8 with no charset.
attempt_charsets = ['utf-8']
"""A list of strings, each of which should be a known encoding.
When the Content-Type of the request body warrants it, each of the given
encodings will be tried in order. The first one to successfully decode the
entity without raising an error is stored as
:attr:`entity.charset<cherrypy._cpreqbody.Entity.charset>`. This defaults
to ``['utf-8']`` (plus 'ISO-8859-1' for "text/\*" types, as required by
`HTTP/1.1 <http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1>`_),
but ``['us-ascii', 'utf-8']`` for multipart parts.
"""
charset = None
"""The successful decoding; see "attempt_charsets" above."""
content_type = None
"""The value of the Content-Type request header.
If the Entity is part of a multipart payload, this will be the Content-Type
given in the MIME headers for this part.
"""
default_content_type = 'application/x-www-form-urlencoded'
"""This defines a default ``Content-Type`` to use if no Content-Type header
is given. The empty string is used for RequestBody, which results in the
request body not being read or parsed at all. This is by design; a missing
``Content-Type`` header in the HTTP request entity is an error at best,
and a security hole at worst. For multipart parts, however, the MIME spec
declares that a part with no Content-Type defaults to "text/plain"
(see :class:`Part<cherrypy._cpreqbody.Part>`).
"""
filename = None
"""The ``Content-Disposition.filename`` header, if available."""
fp = None
"""The readable socket file object."""
headers = None
"""A dict of request/multipart header names and values.
This is a copy of the ``request.headers`` for the ``request.body``;
for multipart parts, it is the set of headers for that part.
"""
length = None
"""The value of the ``Content-Length`` header, if provided."""
name = None
"""The "name" parameter of the ``Content-Disposition`` header, if any."""
params = None
"""
If the request Content-Type is 'application/x-www-form-urlencoded' or
multipart, this will be a dict of the params pulled from the entity
body; that is, it will be the portion of request.params that come
from the message body (sometimes called "POST params", although they
can be sent with various HTTP method verbs). This value is set between
the 'before_request_body' and 'before_handler' hooks (assuming that
process_request_body is True)."""
processors = {'application/x-www-form-urlencoded': process_urlencoded,
'multipart/form-data': process_multipart_form_data,
'multipart': process_multipart,
}
"""A dict of Content-Type names to processor methods."""
parts = None
"""A list of Part instances if ``Content-Type`` is of major type
"multipart"."""
part_class = None
"""The class used for multipart parts.
You can replace this with custom subclasses to alter the processing of
multipart parts.
"""
def __init__(self, fp, headers, params=None, parts=None):
# Make an instance-specific copy of the class processors
# so Tools, etc. can replace them per-request.
self.processors = self.processors.copy()
self.fp = fp
self.headers = headers
if params is None:
params = {}
self.params = params
if parts is None:
parts = []
self.parts = parts
# Content-Type
self.content_type = headers.elements('Content-Type')
if self.content_type:
self.content_type = self.content_type[0]
else:
self.content_type = httputil.HeaderElement.from_str(
self.default_content_type)
# Copy the class 'attempt_charsets', prepending any Content-Type
# charset
dec = self.content_type.params.get("charset", None)
if dec:
self.attempt_charsets = [dec] + [c for c in self.attempt_charsets
if c != dec]
else:
self.attempt_charsets = self.attempt_charsets[:]
# Length
self.length = None
clen = headers.get('Content-Length', None)
# If Transfer-Encoding is 'chunked', ignore any Content-Length.
if (
clen is not None and
'chunked' not in headers.get('Transfer-Encoding', '')
):
try:
self.length = int(clen)
except ValueError:
pass
# Content-Disposition
self.name = None
self.filename = None
disp = headers.elements('Content-Disposition')
if disp:
disp = disp[0]
if 'name' in disp.params:
self.name = disp.params['name']
if self.name.startswith('"') and self.name.endswith('"'):
self.name = self.name[1:-1]
if 'filename' in disp.params:
self.filename = disp.params['filename']
if (
self.filename.startswith('"') and
self.filename.endswith('"')
):
self.filename = self.filename[1:-1]
# The 'type' attribute is deprecated in 3.2; remove it in 3.3.
type = property(
lambda self: self.content_type,
doc="A deprecated alias for "
":attr:`content_type<cherrypy._cpreqbody.Entity.content_type>`."
)
def read(self, size=None, fp_out=None):
return self.fp.read(size, fp_out)
def readline(self, size=None):
return self.fp.readline(size)
def readlines(self, sizehint=None):
return self.fp.readlines(sizehint)
def __iter__(self):
return self
def __next__(self):
line = self.readline()
if not line:
raise StopIteration
return line
def next(self):
return self.__next__()
def read_into_file(self, fp_out=None):
"""Read the request body into fp_out (or make_file() if None).
Return fp_out.
"""
if fp_out is None:
fp_out = self.make_file()
self.read(fp_out=fp_out)
return fp_out
def make_file(self):
"""Return a file-like object into which the request body will be read.
By default, this will return a TemporaryFile. Override as needed.
See also :attr:`cherrypy._cpreqbody.Part.maxrambytes`."""
return tempfile.TemporaryFile()
def fullvalue(self):
"""Return this entity as a string, whether stored in a file or not."""
if self.file:
# It was stored in a tempfile. Read it.
self.file.seek(0)
value = self.file.read()
self.file.seek(0)
else:
value = self.value
value = self.decode_entity(value)
return value
def decode_entity(self , value):
"""Return a given byte encoded value as a string"""
for charset in self.attempt_charsets:
try:
value = value.decode(charset)
except UnicodeDecodeError:
pass
else:
self.charset = charset
return value
else:
raise cherrypy.HTTPError(
400,
"The request entity could not be decoded. The following "
"charsets were attempted: %s" % repr(self.attempt_charsets)
)
def process(self):
"""Execute the best-match processor for the given media type."""
proc = None
ct = self.content_type.value
try:
proc = self.processors[ct]
except KeyError:
toptype = ct.split('/', 1)[0]
try:
proc = self.processors[toptype]
except KeyError:
pass
if proc is None:
self.default_proc()
else:
proc(self)
def default_proc(self):
"""Called if a more-specific processor is not found for the
``Content-Type``.
"""
# Leave the fp alone for someone else to read. This works fine
# for request.body, but the Part subclasses need to override this
# so they can move on to the next part.
pass
class Part(Entity):
"""A MIME part entity, part of a multipart entity."""
# "The default character set, which must be assumed in the absence of a
# charset parameter, is US-ASCII."
attempt_charsets = ['us-ascii', 'utf-8']
"""A list of strings, each of which should be a known encoding.
When the Content-Type of the request body warrants it, each of the given
encodings will be tried in order. The first one to successfully decode the
entity without raising an error is stored as
:attr:`entity.charset<cherrypy._cpreqbody.Entity.charset>`. This defaults
to ``['utf-8']`` (plus 'ISO-8859-1' for "text/\*" types, as required by
`HTTP/1.1 <http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1>`_),
but ``['us-ascii', 'utf-8']`` for multipart parts.
"""
boundary = None
"""The MIME multipart boundary."""
default_content_type = 'text/plain'
"""This defines a default ``Content-Type`` to use if no Content-Type header
is given. The empty string is used for RequestBody, which results in the
request body not being read or parsed at all. This is by design; a missing
``Content-Type`` header in the HTTP request entity is an error at best,
and a security hole at worst. For multipart parts, however (this class),
the MIME spec declares that a part with no Content-Type defaults to
"text/plain".
"""
# This is the default in stdlib cgi. We may want to increase it.
maxrambytes = 1000
"""The threshold of bytes after which point the ``Part`` will store
its data in a file (generated by
:func:`make_file<cherrypy._cprequest.Entity.make_file>`)
instead of a string. Defaults to 1000, just like the :mod:`cgi`
module in Python's standard library.
"""
def __init__(self, fp, headers, boundary):
Entity.__init__(self, fp, headers)
self.boundary = boundary
self.file = None
self.value = None
@classmethod
def from_fp(cls, fp, boundary):
headers = cls.read_headers(fp)
return cls(fp, headers, boundary)
@classmethod
def read_headers(cls, fp):
headers = httputil.HeaderMap()
while True:
line = fp.readline()
if not line:
# No more data--illegal end of headers
raise EOFError("Illegal end of headers.")
if line == ntob('\r\n'):
# Normal end of headers
break
if not line.endswith(ntob('\r\n')):
raise ValueError("MIME requires CRLF terminators: %r" % line)
if line[0] in ntob(' \t'):
# It's a continuation line.
v = line.strip().decode('ISO-8859-1')
else:
k, v = line.split(ntob(":"), 1)
k = k.strip().decode('ISO-8859-1')
v = v.strip().decode('ISO-8859-1')
existing = headers.get(k)
if existing:
v = ", ".join((existing, v))
headers[k] = v
return headers
def read_lines_to_boundary(self, fp_out=None):
"""Read bytes from self.fp and return or write them to a file.
If the 'fp_out' argument is None (the default), all bytes read are
returned in a single byte string.
If the 'fp_out' argument is not None, it must be a file-like
object that supports the 'write' method; all bytes read will be
written to the fp, and that fp is returned.
"""
endmarker = self.boundary + ntob("--")
delim = ntob("")
prev_lf = True
lines = []
seen = 0
while True:
line = self.fp.readline(1 << 16)
if not line:
raise EOFError("Illegal end of multipart body.")
if line.startswith(ntob("--")) and prev_lf:
strippedline = line.strip()
if strippedline == self.boundary:
break
if strippedline == endmarker:
self.fp.finish()
break
line = delim + line
if line.endswith(ntob("\r\n")):
delim = ntob("\r\n")
line = line[:-2]
prev_lf = True
elif line.endswith(ntob("\n")):
delim = ntob("\n")
line = line[:-1]
prev_lf = True
else:
delim = ntob("")
prev_lf = False
if fp_out is None:
lines.append(line)
seen += len(line)
if seen > self.maxrambytes:
fp_out = self.make_file()
for line in lines:
fp_out.write(line)
else:
fp_out.write(line)
if fp_out is None:
result = ntob('').join(lines)
return result
else:
fp_out.seek(0)
return fp_out
def default_proc(self):
"""Called if a more-specific processor is not found for the
``Content-Type``.
"""
if self.filename:
# Always read into a file if a .filename was given.
self.file = self.read_into_file()
else:
result = self.read_lines_to_boundary()
if isinstance(result, text_or_bytes):
self.value = result
else:
self.file = result
def read_into_file(self, fp_out=None):
"""Read the request body into fp_out (or make_file() if None).
Return fp_out.
"""
if fp_out is None:
fp_out = self.make_file()
self.read_lines_to_boundary(fp_out=fp_out)
return fp_out
Entity.part_class = Part
try:
inf = float('inf')
except ValueError:
# Python 2.4 and lower
class Infinity(object):
def __cmp__(self, other):
return 1
def __sub__(self, other):
return self
inf = Infinity()
comma_separated_headers = [
'Accept', 'Accept-Charset', 'Accept-Encoding',
'Accept-Language', 'Accept-Ranges', 'Allow',
'Cache-Control', 'Connection', 'Content-Encoding',
'Content-Language', 'Expect', 'If-Match',
'If-None-Match', 'Pragma', 'Proxy-Authenticate',
'Te', 'Trailer', 'Transfer-Encoding', 'Upgrade',
'Vary', 'Via', 'Warning', 'Www-Authenticate'
]
class SizedReader:
def __init__(self, fp, length, maxbytes, bufsize=DEFAULT_BUFFER_SIZE,
has_trailers=False):
# Wrap our fp in a buffer so peek() works
self.fp = fp
self.length = length
self.maxbytes = maxbytes
self.buffer = ntob('')
self.bufsize = bufsize
self.bytes_read = 0
self.done = False
self.has_trailers = has_trailers
def read(self, size=None, fp_out=None):
"""Read bytes from the request body and return or write them to a file.
A number of bytes less than or equal to the 'size' argument are read
off the socket. The actual number of bytes read are tracked in
self.bytes_read. The number may be smaller than 'size' when 1) the
client sends fewer bytes, 2) the 'Content-Length' request header
specifies fewer bytes than requested, or 3) the number of bytes read
exceeds self.maxbytes (in which case, 413 is raised).
If the 'fp_out' argument is None (the default), all bytes read are
returned in a single byte string.
If the 'fp_out' argument is not None, it must be a file-like
object that supports the 'write' method; all bytes read will be
written to the fp, and None is returned.
"""
if self.length is None:
if size is None:
remaining = inf
else:
remaining = size
else:
remaining = self.length - self.bytes_read
if size and size < remaining:
remaining = size
if remaining == 0:
self.finish()
if fp_out is None:
return ntob('')
else:
return None
chunks = []
# Read bytes from the buffer.
if self.buffer:
if remaining is inf:
data = self.buffer
self.buffer = ntob('')
else:
data = self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
datalen = len(data)
remaining -= datalen
# Check lengths.
self.bytes_read += datalen
if self.maxbytes and self.bytes_read > self.maxbytes:
raise cherrypy.HTTPError(413)
# Store the data.
if fp_out is None:
chunks.append(data)
else:
fp_out.write(data)
# Read bytes from the socket.
while remaining > 0:
chunksize = min(remaining, self.bufsize)
try:
data = self.fp.read(chunksize)
except Exception:
e = sys.exc_info()[1]
if e.__class__.__name__ == 'MaxSizeExceeded':
# Post data is too big
raise cherrypy.HTTPError(
413, "Maximum request length: %r" % e.args[1])
else:
raise
if not data:
self.finish()
break
datalen = len(data)
remaining -= datalen
# Check lengths.
self.bytes_read += datalen
if self.maxbytes and self.bytes_read > self.maxbytes:
raise cherrypy.HTTPError(413)
# Store the data.
if fp_out is None:
chunks.append(data)
else:
fp_out.write(data)
if fp_out is None:
return ntob('').join(chunks)
def readline(self, size=None):
"""Read a line from the request body and return it."""
chunks = []
while size is None or size > 0:
chunksize = self.bufsize
if size is not None and size < self.bufsize:
chunksize = size
data = self.read(chunksize)
if not data:
break
pos = data.find(ntob('\n')) + 1
if pos:
chunks.append(data[:pos])
remainder = data[pos:]
self.buffer += remainder
self.bytes_read -= len(remainder)
break
else:
chunks.append(data)
return ntob('').join(chunks)
def readlines(self, sizehint=None):
"""Read lines from the request body and return them."""
if self.length is not None:
if sizehint is None:
sizehint = self.length - self.bytes_read
else:
sizehint = min(sizehint, self.length - self.bytes_read)
lines = []
seen = 0
while True:
line = self.readline()
if not line:
break
lines.append(line)
seen += len(line)
if seen >= sizehint:
break
return lines
def finish(self):
self.done = True
if self.has_trailers and hasattr(self.fp, 'read_trailer_lines'):
self.trailers = {}
try:
for line in self.fp.read_trailer_lines():
if line[0] in ntob(' \t'):
# It's a continuation line.
v = line.strip()
else:
try:
k, v = line.split(ntob(":"), 1)
except ValueError:
raise ValueError("Illegal header line.")
k = k.strip().title()
v = v.strip()
if k in comma_separated_headers:
existing = self.trailers.get(envname)
if existing:
v = ntob(", ").join((existing, v))
self.trailers[k] = v
except Exception:
e = sys.exc_info()[1]
if e.__class__.__name__ == 'MaxSizeExceeded':
# Post data is too big
raise cherrypy.HTTPError(
413, "Maximum request length: %r" % e.args[1])
else:
raise
class RequestBody(Entity):
"""The entity of the HTTP request."""
bufsize = 8 * 1024
"""The buffer size used when reading the socket."""
# Don't parse the request body at all if the client didn't provide
# a Content-Type header. See
# https://github.com/cherrypy/cherrypy/issues/790
default_content_type = ''
"""This defines a default ``Content-Type`` to use if no Content-Type header
is given. The empty string is used for RequestBody, which results in the
request body not being read or parsed at all. This is by design; a missing
``Content-Type`` header in the HTTP request entity is an error at best,
and a security hole at worst. For multipart parts, however, the MIME spec
declares that a part with no Content-Type defaults to "text/plain"
(see :class:`Part<cherrypy._cpreqbody.Part>`).
"""
maxbytes = None
"""Raise ``MaxSizeExceeded`` if more bytes than this are read from
the socket.
"""
def __init__(self, fp, headers, params=None, request_params=None):
Entity.__init__(self, fp, headers, params)
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1
# When no explicit charset parameter is provided by the
# sender, media subtypes of the "text" type are defined
# to have a default charset value of "ISO-8859-1" when
# received via HTTP.
if self.content_type.value.startswith('text/'):
for c in ('ISO-8859-1', 'iso-8859-1', 'Latin-1', 'latin-1'):
if c in self.attempt_charsets:
break
else:
self.attempt_charsets.append('ISO-8859-1')
# Temporary fix while deprecating passing .parts as .params.
self.processors['multipart'] = _old_process_multipart
if request_params is None:
request_params = {}
self.request_params = request_params
def process(self):
"""Process the request entity based on its Content-Type."""
# "The presence of a message-body in a request is signaled by the
# inclusion of a Content-Length or Transfer-Encoding header field in
# the request's message-headers."
# It is possible to send a POST request with no body, for example;
# however, app developers are responsible in that case to set
# cherrypy.request.process_body to False so this method isn't called.
h = cherrypy.serving.request.headers
if 'Content-Length' not in h and 'Transfer-Encoding' not in h:
raise cherrypy.HTTPError(411)
self.fp = SizedReader(self.fp, self.length,
self.maxbytes, bufsize=self.bufsize,
has_trailers='Trailer' in h)
super(RequestBody, self).process()
# Body params should also be a part of the request_params
# add them in here.
request_params = self.request_params
for key, value in self.params.items():
# Python 2 only: keyword arguments must be byte strings (type
# 'str').
if sys.version_info < (3, 0):
if isinstance(key, unicode):
key = key.encode('ISO-8859-1')
if key in request_params:
if not isinstance(request_params[key], list):
request_params[key] = [request_params[key]]
request_params[key].append(value)
else:
request_params[key] = value
| mit | 8,550,716,814,100,659,000 | 35.765226 | 84 | 0.58538 | false |
lykops/lykops | lykops/urls.py | 1 | 5817 | """lykops URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
# from django.contrib import admin
from library.connecter.database.mongo import Op_Mongo
from library.connecter.database.redis_api import Op_Redis
# from lykops import settings
from lykops.ansible.execute import Exec
from lykops.ansible.options import Options
from lykops.ansible.report import Report
from lykops.ansible.yaml import Yaml
from lykops.sysadmin.inventory import Inventory
# from lykops.sysadmin.privacy import Privacy
from lykops.sysadmin.user import User
from lykops.views import Login
mongoclient = Op_Mongo()
redisclient = Op_Redis()
urlpatterns = [
# url(r'^admin/', admin.site.urls),
url(r'^$', User(mongoclient=mongoclient, redisclient=redisclient).summary, name='index'),
url(r'^login.html', Login(mongoclient=mongoclient, redisclient=redisclient).login, name='login'),
url(r'^logout.html', Login(mongoclient=mongoclient, redisclient=redisclient).logout, name='logout'),
url(r'^user/create_admin', Login(mongoclient=mongoclient, redisclient=redisclient).create_admin, name='create_admin'),
url(r'^user/detail', User(mongoclient=mongoclient, redisclient=redisclient).detail),
url(r'^user/list', User(mongoclient=mongoclient, redisclient=redisclient).summary, name='user_list'),
url(r'^user/add', User(mongoclient=mongoclient, redisclient=redisclient).add, name='user_add'),
url(r'^user/edit', User(mongoclient=mongoclient, redisclient=redisclient).edit),
url(r'^user/chgpwd', User(mongoclient=mongoclient, redisclient=redisclient).change_pwd),
url(r'^user/chgpvltwd', User(mongoclient=mongoclient, redisclient=redisclient).change_vaultpwd),
url(r'^user/del', User(mongoclient=mongoclient, redisclient=redisclient).delete),
url(r'^user/disable', User(mongoclient=mongoclient, redisclient=redisclient).disable),
url(r'^user/enable', User(mongoclient=mongoclient, redisclient=redisclient).enable),
url(r'^user/$', User(mongoclient=mongoclient, redisclient=redisclient).summary),
# url(r'^privacy/edit', Privacy(mongoclient=mongoclient, redisclient=redisclient).edit, name='privacy_edit'),
# url(r'^privacy/detail', Privacy(mongoclient=mongoclient, redisclient=redisclient).detail, name='privacy_detail'),
# url(r'^privacy/$', Privacy(mongoclient=mongoclient, redisclient=redisclient).detail),
# 该功能用于保存用户的机密数据,但该版本暂时不需要使用,故暂时不做展示
url(r'^inventory/add$', Inventory(mongoclient=mongoclient, redisclient=redisclient).add, name='inventory_add'),
url(r'^inventory/list$', Inventory(mongoclient=mongoclient, redisclient=redisclient).summary, name='inventory_list'),
url(r'^inventory/$', Inventory(mongoclient=mongoclient, redisclient=redisclient).summary),
url(r'^inventory/detail$', Inventory(mongoclient=mongoclient, redisclient=redisclient).detail, name='inventory_detail'),
url(r'^inventory/edit$', Inventory(mongoclient=mongoclient, redisclient=redisclient).edit, name='inventory_edit'),
url(r'^inventory/del$', Inventory(mongoclient=mongoclient, redisclient=redisclient).delete, name='inventory_del'),
url(r'^ansible/$', Report(mongoclient=mongoclient, redisclient=redisclient).summary, name='ansible'),
url(r'^ansible/report/$', Report(mongoclient=mongoclient, redisclient=redisclient).summary, name='ansible_report'),
url(r'^ansible/report/list$', Report(mongoclient=mongoclient, redisclient=redisclient).summary, name='ansible_report_list'),
url(r'^ansible/report/detail$', Report(mongoclient=mongoclient, redisclient=redisclient).detail),
url(r'^ansible/yaml/add$', Yaml(mongoclient=mongoclient, redisclient=redisclient).add, name='ansible_yaml_add'),
url(r'^ansible/yaml/import$', Yaml(mongoclient=mongoclient, redisclient=redisclient).import_file, name='ansible_yaml_import'),
url(r'^ansible/yaml/list$', Yaml(mongoclient=mongoclient, redisclient=redisclient).summary, name='ansible_yaml_list'),
url(r'^ansible/yaml/detail$', Yaml(mongoclient=mongoclient, redisclient=redisclient).detail, name='ansible_yaml_detail'),
url(r'^ansible/yaml/edit$', Yaml(mongoclient=mongoclient, redisclient=redisclient).edit),
url(r'^ansible/yaml/$', Yaml(mongoclient=mongoclient, redisclient=redisclient).summary, name='ansible_yaml'),
url(r'^ansible/exec/adhoc$', Exec(mongoclient=mongoclient, redisclient=redisclient).adhoc, name='ansible_exec_adhoc'),
url(r'^ansible/exec/playbook$', Exec(mongoclient=mongoclient, redisclient=redisclient).playbook, name='ansible_exec_playbook'),
url(r'^ansible/option/$', Options(mongoclient=mongoclient, redisclient=redisclient).detail, name='ansible_option'),
url(r'^ansible/option/edit$', Options(mongoclient=mongoclient, redisclient=redisclient).edit),
url(r'^ansible/option/detail$', Options(mongoclient=mongoclient, redisclient=redisclient).detail),
# url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root':settings.STATICFILES_DIRS, 'show_indexes':False}),
# url(r'^file/(?P<path>.*)$', 'django.views.static.serve', {'document_root':settings.MEDIA_ROOT, 'show_indexes':False}),
]
| apache-2.0 | 4,648,493,441,235,367,000 | 63.595506 | 132 | 0.744651 | false |
dk379/asn-tryst | src/load_asns.py | 1 | 3927 |
import argparse
import logging
from multiprocessing import Pool
import MySQLdb
import netaddr
import requests
import sys
from read_config import asntryst_read_config
#
# Project specific settings
#
CONFIG = asntryst_read_config()
DB_HOST = CONFIG["database"]["hostname"]
DB_NAME = CONFIG["database"]["database"]
DB_USER = CONFIG["database"]["username"]
DB_PASS = CONFIG["database"]["password"]
INVALID_ASN = 0xffffffff
# IP versions supported
# 4 ... IP version 4
# 6 ... IP version 6
IP_VERSIONS = [4, 6]
# Data call URL for fetching ASN
RIPESTAT_DC_URL = "http://stat.ripe.net/data/network-info/data.json?resource={}"
log = logging.getLogger(__file__)
def is_private_ip(ip):
"""Identifies private IP addresses
Based on RFC1918 some IP addresses are intended for private use only.
If 'ip' is a private IP address True is returned.
"""
is_private = False
try:
ip_parsed = netaddr.IPNetwork(ip)
is_private = ip_parsed.is_private()
except netaddr.AddrFormatError, e:
log.error(e)
return is_private
def fetch_from_ripestat(url):
"""Result is returned in JSON format, unless in an
error case in which it returns None."""
try:
response = requests.get(url = url, headers={'Connection':'close'})
response = response.json()
except requests.exceptions.RequestException, e:
log.error(e)
response = None
return response
def asn_for_ip(ip):
""" Returns the ASN looked up on RIPEstat.
"""
if not is_private_ip(ip):
json_response = fetch_from_ripestat(RIPESTAT_DC_URL.format(ip))
try:
asn = json_response["data"]["asns"][0]
asn = int(asn)
except (KeyError,TypeError,IndexError, ValueError), e:
asn = INVALID_ASN
else:
asn = INVALID_ASN
return asn
MP_POOL = Pool(10)
def asns_for_ips(ips):
"""Returns ip to ASN mapping."""
asns = MP_POOL.map(asn_for_ip, ips)
return zip(ips, asns)
def load_asns_for_ips(ip_version=4, fetch_size=10):
"""IPs are fetched from MySQL, looked up on RIPEstat and written back,
fetch_size at a time.
"""
conn = MySQLdb.connect(host=DB_HOST,
user=DB_NAME, passwd=DB_PASS, db=DB_NAME)
cur = conn.cursor()
sql = "SELECT COUNT(*) FROM IPSV{} WHERE AUTNUM = 0xffff".format(ip_version)
cur.execute(sql)
total = cur.fetchone()[0]
to_ascii_func = "inet_ntoa" if ip_version ==4 else "inet6_ntoa"
count = 0
while( count < total ):
sql = "SELECT {}(IP) FROM IPSV{} WHERE AUTNUM = 0 limit {}".format(
to_ascii_func, ip_version, fetch_size)
cur.execute(sql)
ips = [ result[0] for result in cur.fetchall() ]
if not ips:
break
else:
count += len(ips)
sys.stdout.write(" Progress: {0:.0f}%\r".format( (count*1./total)*100))
sys.stdout.flush()
annotated_ips = asns_for_ips(ips)
to_num_func = "inet_aton" if ip_version == 4 else "inet6_aton"
insert_sql = "REPLACE INTO IPSV{} (IP,AUTNUM) VALUES ({}(%s),%s)".format(
ip_version,
to_num_func)
values = [ (ip, asn) for ip, asn in annotated_ips ]
cur.executemany(
insert_sql, values)
conn.commit()
print "Finished: ASN loaded for {} IPs totally".format(count)
cur.close()
conn.close()
return count
def get_parser():
"""Command line parser
Arguments:
ip: select IP version to load ASNs for
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
help_msg = "load ASNs for IP version 4 or 6"
parser.add_argument("-ip", default=4, choices=IP_VERSIONS, type=int, help=help_msg )
return parser
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
load_asns_for_ips(args.ip)
| bsd-3-clause | -587,182,562,095,277,700 | 24.335484 | 88 | 0.614719 | false |
google/tmppy | _py2tmp/ir0_optimization/_expression_simplification.py | 1 | 17752 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
from _py2tmp.ir0 import ir, Transformation, is_expr_variadic, GlobalLiterals, select1st_literal
from _py2tmp.ir0_optimization._compute_non_expanded_variadic_vars import compute_non_expanded_variadic_vars
from _py2tmp.ir0_optimization._recalculate_template_instantiation_can_trigger_static_asserts_info import expr_can_trigger_static_asserts
class ExpressionSimplificationTransformation(Transformation):
def __init__(self) -> None:
super().__init__()
self.in_variadic_type_expansion = False
def transform_not_expr(self, not_expr: ir.NotExpr) -> ir.Expr:
expr = self.transform_expr(not_expr.inner_expr)
# !true => false
# !false => true
if isinstance(expr, ir.Literal):
assert isinstance(expr.value, bool)
return ir.Literal(not expr.value)
# !!x => x
if isinstance(expr, ir.NotExpr):
return expr.inner_expr
# !(x && y) => (!x || !y)
# !(x || y) => (!x && !y)
if isinstance(expr, ir.BoolBinaryOpExpr):
op = {
'&&': '||',
'||': '&&',
}[expr.op]
return self.transform_expr(
ir.BoolBinaryOpExpr(lhs=ir.NotExpr(expr.lhs), rhs=ir.NotExpr(expr.rhs), op=op))
# !(x == y) => x != y
# !(x != y) => x == y
# !(x < y) => x >= y
# !(x <= y) => x > y
# !(x > y) => x <= y
# !(x >= y) => x < y
if isinstance(expr, ir.ComparisonExpr) and expr.op in ('==', '!='):
op = {
'==': '!=',
'!=': '==',
'<': '>=',
'<=': '>',
'>': '<=',
'>=': '<',
}[expr.op]
return ir.ComparisonExpr(expr.lhs, expr.rhs, op)
return ir.NotExpr(expr)
def transform_unary_minus_expr(self, unary_minus: ir.UnaryMinusExpr) -> ir.Expr:
expr = self.transform_expr(unary_minus.inner_expr)
# -(3) => -3
if isinstance(expr, ir.Literal):
assert isinstance(expr.value, int)
return ir.Literal(-expr.value)
# -(x - y) => y - x
if isinstance(expr, ir.Int64BinaryOpExpr) and expr.op == '-':
return ir.Int64BinaryOpExpr(lhs=expr.rhs, rhs=expr.lhs, op='-')
return ir.UnaryMinusExpr(expr)
def transform_int64_binary_op_expr(self, binary_op: ir.Int64BinaryOpExpr) -> ir.Expr:
lhs = binary_op.lhs
rhs = binary_op.rhs
op = binary_op.op
# (x - y) => (x + -y)
# This pushes down the minus, so that e.g. (x - (-y)) => (x + y).
if op == '-':
rhs = ir.UnaryMinusExpr(rhs)
op = '+'
lhs = self.transform_expr(lhs)
rhs = self.transform_expr(rhs)
if op == '+' and isinstance(rhs, ir.UnaryMinusExpr):
# We could not push down the minus, so switch back to a subtraction.
op = '-'
rhs = rhs.inner_expr
if op == '+':
# 3 + 5 => 8
if isinstance(lhs, ir.Literal) and isinstance(rhs, ir.Literal):
return ir.Literal(lhs.value + rhs.value)
# 0 + x => x
if isinstance(lhs, ir.Literal) and lhs.value == 0:
return rhs
# x + 0 => x
if isinstance(rhs, ir.Literal) and rhs.value == 0:
return lhs
if op == '-':
# 8 - 5 => 3
if isinstance(lhs, ir.Literal) and isinstance(rhs, ir.Literal):
return ir.Literal(lhs.value - rhs.value)
# 0 - x => -x
if isinstance(lhs, ir.Literal) and lhs.value == 0:
return ir.UnaryMinusExpr(rhs)
# x - 0 => x
if isinstance(rhs, ir.Literal) and rhs.value == 0:
return lhs
if op == '*':
# 3 * 5 => 15
if isinstance(lhs, ir.Literal) and isinstance(rhs, ir.Literal):
return ir.Literal(lhs.value * rhs.value)
# 0 * x => 0
if isinstance(lhs, ir.Literal) and lhs.value == 0:
if self._can_remove_subexpression(rhs):
return ir.Literal(0)
# x * 0 => 0
if isinstance(rhs, ir.Literal) and rhs.value == 0:
if self._can_remove_subexpression(lhs):
return ir.Literal(0)
# 1 * x => x
if isinstance(lhs, ir.Literal) and lhs.value == 1:
return rhs
# x * 1 => x
if isinstance(rhs, ir.Literal) and rhs.value == 1:
return lhs
if op == '/':
# 16 / 3 => 5
if isinstance(lhs, ir.Literal) and isinstance(rhs, ir.Literal):
return ir.Literal(lhs.value // rhs.value)
# x / 1 => x
if isinstance(rhs, ir.Literal) and rhs.value == 1:
return lhs
if op == '%':
# 16 % 3 => 1
if isinstance(lhs, ir.Literal) and isinstance(rhs, ir.Literal):
return ir.Literal(lhs.value % rhs.value)
# x % 1 => 0
if isinstance(rhs, ir.Literal) and rhs.value == 1:
return ir.Literal(0)
return ir.Int64BinaryOpExpr(lhs, rhs, op)
def transform_bool_binary_op_expr(self, binary_op: ir.BoolBinaryOpExpr) -> ir.Expr:
lhs = binary_op.lhs
rhs = binary_op.rhs
op = binary_op.op
lhs = self.transform_expr(lhs)
rhs = self.transform_expr(rhs)
if op == '&&':
# true && false => false
if isinstance(lhs, ir.Literal) and isinstance(rhs, ir.Literal):
return ir.Literal(lhs.value and rhs.value)
# true && x => x
if isinstance(lhs, ir.Literal) and lhs.value is True:
return rhs
# x && true => x
if isinstance(rhs, ir.Literal) and rhs.value is True:
return lhs
# false && x => false
if isinstance(lhs, ir.Literal) and lhs.value is False:
if self._can_remove_subexpression(rhs):
return ir.Literal(False)
# x && false => false
if isinstance(rhs, ir.Literal) and rhs.value is False:
if self._can_remove_subexpression(lhs):
return ir.Literal(False)
if op == '||':
# true || false => true
if isinstance(lhs, ir.Literal) and isinstance(rhs, ir.Literal):
return ir.Literal(lhs.value or rhs.value)
# false || x => x
if isinstance(lhs, ir.Literal) and lhs.value is False:
return rhs
# x || false => x
if isinstance(rhs, ir.Literal) and rhs.value is False:
return lhs
# true || x => true
if isinstance(lhs, ir.Literal) and lhs.value is True:
if self._can_remove_subexpression(rhs):
return ir.Literal(True)
# x || true => true
if isinstance(rhs, ir.Literal) and rhs.value is True:
if self._can_remove_subexpression(lhs):
return ir.Literal(True)
return ir.BoolBinaryOpExpr(lhs, rhs, op)
def transform_comparison_expr(self, comparison: ir.ComparisonExpr) -> ir.Expr:
lhs = comparison.lhs
rhs = comparison.rhs
op = comparison.op
lhs = self.transform_expr(lhs)
rhs = self.transform_expr(rhs)
if isinstance(lhs, ir.Literal) and isinstance(rhs, ir.Literal):
if op == '==':
return ir.Literal(lhs.value == rhs.value)
if op == '!=':
return ir.Literal(lhs.value != rhs.value)
if op == '<':
return ir.Literal(lhs.value < rhs.value)
if op == '<=':
return ir.Literal(lhs.value <= rhs.value)
if op == '>':
return ir.Literal(lhs.value > rhs.value)
if op == '>=':
return ir.Literal(lhs.value >= rhs.value)
if op in ('==', '!=') and self._is_syntactically_equal(lhs, rhs) and not expr_can_trigger_static_asserts(lhs):
if self._can_remove_subexpression(lhs) and self._can_remove_subexpression(rhs):
return {
'==': ir.Literal(True),
'!=': ir.Literal(False),
}[op]
if op in ('==', '!=') and isinstance(rhs, ir.Literal) and rhs.expr_type == ir.BoolType():
rhs, lhs = lhs, rhs
if op in ('==', '!=') and isinstance(lhs, ir.Literal) and lhs.expr_type == ir.BoolType():
return {
('==', True): lambda: rhs,
('==', False): lambda: self.transform_expr(ir.NotExpr(rhs)),
('!=', True): lambda: self.transform_expr(ir.NotExpr(rhs)),
('!=', False): lambda: rhs,
}[(op, lhs.value)]()
return ir.ComparisonExpr(lhs, rhs, op)
def transform_static_assert(self, static_assert: ir.StaticAssert):
expr = self.transform_expr(static_assert.expr)
if isinstance(expr, ir.Literal) and expr.value is True:
return
self.writer.write(ir.StaticAssert(expr=expr,
message=static_assert.message))
def _is_syntactically_equal(self, lhs: ir.Expr, rhs: ir.Expr):
if not lhs.is_same_expr_excluding_subexpressions(rhs):
return False
lhs_exprs = list(lhs.direct_subexpressions)
rhs_exprs = list(rhs.direct_subexpressions)
if len(lhs_exprs) != len(rhs_exprs):
return False
return all(self._is_syntactically_equal(lhs_expr, rhs_expr)
for lhs_expr, rhs_expr in zip(lhs_exprs, rhs_exprs))
def transform_variadic_type_expansion(self, expr: ir.VariadicTypeExpansion):
old_in_variadic_type_expansion = self.in_variadic_type_expansion
self.in_variadic_type_expansion = True
result = super().transform_variadic_type_expansion(expr)
self.in_variadic_type_expansion = old_in_variadic_type_expansion
return result
def transform_class_member_access(self, class_member_access: ir.ClassMemberAccess):
if (isinstance(class_member_access.inner_expr, ir.TemplateInstantiation)
and isinstance(class_member_access.inner_expr.template_expr, ir.AtomicTypeLiteral)):
if class_member_access.inner_expr.template_expr.cpp_type == 'GetFirstError':
args = self.transform_exprs(class_member_access.inner_expr.args, original_parent_element=class_member_access.inner_expr)
return self.transform_get_first_error(args)
if class_member_access.inner_expr.template_expr.cpp_type == 'std::is_same':
args = self.transform_exprs(class_member_access.inner_expr.args, original_parent_element=class_member_access.inner_expr)
return self.transform_is_same(args)
if class_member_access.inner_expr.template_expr.cpp_type.startswith('Select1st'):
args = self.transform_exprs(class_member_access.inner_expr.args, original_parent_element=class_member_access.inner_expr)
return self.transform_select1st(args)
return super().transform_class_member_access(class_member_access)
def _can_remove_subexpression(self, expr: ir.Expr):
# If we're in a variadic type expr, we can't remove variadic sub-exprs (not in general at least).
# E.g. BoolList<(F<Ts>::value || true)...> can't be optimized to BoolList<true>
if self.in_variadic_type_expansion and is_expr_variadic(expr):
return False
return True
def transform_get_first_error(self, args: Tuple[ir.Expr, ...]):
new_args = []
for arg in args:
if isinstance(arg, ir.AtomicTypeLiteral) and arg.cpp_type == 'void':
pass
elif (isinstance(arg, ir.VariadicTypeExpansion)
and isinstance(arg.inner_expr, ir.ClassMemberAccess)
and isinstance(arg.inner_expr.inner_expr, ir.TemplateInstantiation)
and isinstance(arg.inner_expr.inner_expr.template_expr, ir.AtomicTypeLiteral)
and arg.inner_expr.inner_expr.template_expr.cpp_type.startswith('Select1stType')
and len(arg.inner_expr.inner_expr.args) == 2
and isinstance(arg.inner_expr.inner_expr.args[0], ir.AtomicTypeLiteral)
and arg.inner_expr.inner_expr.args[0].cpp_type == 'void'):
# Select1stType*<void, expr>...
pass
else:
new_args.append(arg)
return ir.ClassMemberAccess(inner_expr=ir.TemplateInstantiation(template_expr=GlobalLiterals.GET_FIRST_ERROR,
args=tuple(new_args),
instantiation_might_trigger_static_asserts=False),
expr_type=ir.TypeType(),
member_name='type')
def transform_is_same(self, args: Tuple[ir.Expr, ...]):
assert len(args) == 2
lhs, rhs = args
list_template_names = {'List', 'BoolList', 'Int64List'}
if (isinstance(lhs, ir.TemplateInstantiation) and isinstance(lhs.template_expr, ir.AtomicTypeLiteral) and lhs.template_expr.cpp_type in list_template_names
and isinstance(rhs, ir.TemplateInstantiation) and isinstance(rhs.template_expr, ir.AtomicTypeLiteral) and rhs.template_expr.cpp_type in list_template_names
and lhs.template_expr.cpp_type == rhs.template_expr.cpp_type
and not any(isinstance(arg, ir.VariadicTypeExpansion) for arg in lhs.args)
and not any(isinstance(arg, ir.VariadicTypeExpansion) for arg in rhs.args)
and len(lhs.args) == len(rhs.args)
and lhs.args):
# std::is_same<List<X1, X2, X3>, List<Y1, Y2, Y3>>::value
# -> std::is_same<X1, Y1>::value && std::is_same<X2, Y2>::value && std::is_same<X3, Y3>::value
if lhs.template_expr.cpp_type == 'List':
result = None
for lhs_arg, rhs_arg in zip(lhs.args, rhs.args):
if result:
result = ir.BoolBinaryOpExpr(lhs=result,
rhs=self._create_is_same_expr(lhs_arg, rhs_arg),
op='&&')
else:
result = self._create_is_same_expr(lhs_arg, rhs_arg)
return self.transform_expr(result)
# std::is_same<IntList<n1, n2, n3>, IntList<m1, m2, m3>>::value
# -> (n1 == m1) && (n2 == m2) && (n3 == m3)
# (and same for BoolList)
result = None
for lhs_arg, rhs_arg in zip(lhs.args, rhs.args):
if result:
result = ir.BoolBinaryOpExpr(lhs=result,
rhs=ir.ComparisonExpr(lhs_arg, rhs_arg, op='=='),
op='&&')
else:
result = ir.ComparisonExpr(lhs_arg, rhs_arg, op='==')
return self.transform_expr(result)
return self._create_is_same_expr(lhs, rhs)
def _create_is_same_expr(self, lhs: ir.Expr, rhs: ir.Expr):
return ir.ClassMemberAccess(
inner_expr=ir.TemplateInstantiation(template_expr=GlobalLiterals.STD_IS_SAME,
args=(lhs, rhs),
instantiation_might_trigger_static_asserts=False),
expr_type=ir.BoolType(),
member_name='value')
def transform_select1st(self, args: Tuple[ir.Expr, ...]):
lhs, rhs = args
best_var = None
# First preference to non-expanded variadic vars, to keep the Select1st* expression variadic if it is now.
for var_name in compute_non_expanded_variadic_vars(rhs):
[best_var] = (var
for var in rhs.free_vars
if var.cpp_type == var_name)
break
# If there are none, then any non-variadic var is also ok.
if not best_var:
for var in rhs.free_vars:
if not var.is_variadic and isinstance(var.expr_type, (ir.BoolType, ir.Int64Type, ir.TypeType)):
best_var = var
break
if best_var:
rhs = best_var
return ir.ClassMemberAccess(inner_expr=ir.TemplateInstantiation(template_expr=select1st_literal(lhs.expr_type, rhs.expr_type),
args=(lhs, rhs),
instantiation_might_trigger_static_asserts=False),
expr_type=lhs.expr_type,
member_name='value')
| apache-2.0 | -2,463,025,841,349,026,000 | 44.634961 | 171 | 0.534193 | false |
melmothx/jsonbot | jsb/plugs/wave/gadget.py | 1 | 4625 | # jsb.plugs.wave/gadget.py
#
#
## jsb imports
from jsb.lib.commands import cmnds
from jsb.lib.examples import examples
from jsb.lib.persist import PlugPersist
gadgeturls = PlugPersist('gadgeturls')
gadgeturls.data['gadget'] = 'https://jsonbot.appspot.com/gadget.xml'
gadgeturls.data['poll'] = 'https://jsonbot.appspot.com/poll.xml'
gadgeturls.data['iframe'] = 'https://jsonbot.appspot.com/iframe.xml'
gadgeturls.data['loadiframe'] = 'https://jsonbot.appspot.com/loadiframe.xml'
def loadroot(event, url):
if event.rootblip:
from waveapi import element
event.rootblip.append(element.Gadget(url))
return True
else:
event.reply("can't find root blip.")
return False
def load(event, url):
if event.blip:
from waveapi import element
event.blip.append(element.Gadget(url))
return True
else:
event.reply("can't find root blip.")
return False
def handle_gadgetload(bot, event):
if event.bottype != "wave":
event.reply("this command only works in google wave.");
return
if not event.rest:
event.missing('<gadgetname>')
return
try:
url = gadgeturls.data[event.rest]
if load(event, url):
event.reply('loaded %s' % url)
except KeyError:
event.reply("we don't have a url for %s" % event.rest)
cmnds.add("gadget-load", handle_gadgetload, 'USER')
examples.add("gadget-load", "load a gadget into a blip", "gadget-load")
def handle_gadgetloadroot(bot, event):
if event.bottype != "wave":
event.reply("this command only works in google wave.");
return
if not event.rest:
event.missing('<gadgetname>')
return
try:
url = gadgeturls.data[event.rest]
if loadroot(event, url):
event.reply('loaded %s' % url)
except KeyError:
event.reply("we don't have a url for %s" % event.rest)
cmnds.add("gadget-loadroot", handle_gadgetloadroot, 'USER')
examples.add("gadget-loadroot", "load a gadget into the root blip", "gadget-loadroot")
def handle_gadgetiframe(bot, event):
if event.bottype != "wave":
event.reply("this command only works in google wave.");
return
if not event.rest:
event.missing('<url>')
return
try:
url = gadgeturls.data['loadiframe'] + "?&iframeurl=%s" % event.rest
event.reply('loading %s' % url)
load(event, url)
except KeyError:
event.reply("we don't have a iframe url")
cmnds.add("gadget-iframe", handle_gadgetiframe, 'USER')
examples.add("gadget-iframe", "load a url into a iframe", "gadget-iframe")
def handle_gadgetaddurl(bot, event):
try:
(name, url) = event.args
except ValueError:
event.missing('<name> <url>')
return
if not gadgeturls.data.has_key(name):
gadgeturls.data[name] = url
gadgeturls.save()
else:
event.reply("we already have a %s gadget" % name)
cmnds.add("gadget-addurl", handle_gadgetaddurl, 'USER')
examples.add("gadget-addurl", "store a gadget url", "gadget-addurl jsb https://jsonbot.appspot.com/iframe.xml")
def handle_gadgetdelurl(bot, event):
try:
(name, url) = event.args
except ValueError:
event.missing('<name> <url>')
return
gadgeturls.data[name] = url
gadgeturls.save()
cmnds.add("gadget-delurl", handle_gadgetdelurl, 'OPER')
examples.add("gadget-delurl", "delete a gadget url", "gadget-delurl mygadget")
def handle_gadgetlist(bot, event):
result = []
for name, url in gadgeturls.data.iteritems():
result.append("%s - %s" % (name, url))
event.reply("available gadgets: ", result)
cmnds.add("gadget-list", handle_gadgetlist, 'USER')
examples.add("gadget-list", "list known gadget urls", "gadget-list")
def handle_gadgetconsole(bot, event):
if event.bottype != "wave":
event.reply("this command only works in google wave.");
return
wave = event.chan
if wave.data.feeds and wave.data.dotitle:
event.set_title("JSONBOT - %s #%s" % (" - ".join(wave.data.feeds), str(wave.data.nrcloned)))
from waveapi import element
#url = gadgeturls.data['loadiframe'] + "?&iframeurl=https://jsonbot.appspot.com"
#event.reply('loading %s' % url)
event.append("loading ...\n")
#load(event, "http://jsonbot.appspot.com/iframe.xml")
event.append(
element.Gadget('http://jsonbot.appspot.com/console.xml?gadget_cache=0'))
cmnds.add("gadget-console", handle_gadgetconsole, 'OPER')
examples.add("gadget-console", "load the console gadget", "gadget-console")
| mit | 9,067,515,519,668,668,000 | 28.647436 | 111 | 0.644108 | false |
mellowizz/ocny_tax_info | qgis_show_ocny_tax_info.py | 1 | 1143 | from qgis.core import *
from qgis.gui import *
import mechanize
import cookielib
@qgsfunction(args='auto', group='Custom')
def show_tax_info(pin, feature, parent):
br = mechanize.Browser()
# Cookie Jar
cj = cookielib.LWPCookieJar()
br.set_cookiejar(cj)
br.set_handle_equiv(True)
br.set_handle_gzip(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
url = 'http://propertydata.orangecountygov.com/imate/propdetail.aspx'
# first 4 of PIN are town code: str(pin)[0:4]
# search = '/'.join([BASE_URL, 'viewlist.aspx?sort=printkey&swis={tcode}'])
# get cookie
br.open('http://www.co.orange.ny.us/content/124/1368/4136.aspx')
for link in br.links():
if 'index.aspx' in link.url:
br.follow_link(link)
break
swis = str(pin)[:6]
printkey = str(pin)[6:]
search_terms = 'swis={}&printkey={}'.format(swis, printkey)
full_url = '?'.join([url, search_terms])
response = br.open(full_url)
return response.read()
| gpl-3.0 | -1,176,792,807,719,864,800 | 26.214286 | 79 | 0.64392 | false |
AlexWoo/pyed | pysys/pycmdserver.py | 1 | 1693 | from pyevent.event import event
from pyevent.tcpserver import tcpserver
from pyevent.tcpconnection import tcpconnection
class cmdserver(tcpserver):
def __init__(self, pesys):
self.evs = pesys.evs
self.tms = pesys.tms
self.log = pesys.log
self.proc = pesys.proc
self.proc.setcmdserver(self)
self.srvconf = pesys.conf.cmdserver
self.c = None
tcpserver.__init__(self, self.accepthandler, self.srvconf,
self.evs, self.tms)
def accepthandler(self, ev):
csock, _ = ev.sock.accept()
if self.c:
csock.close()
self.log.logInfo("CmdServer", "Cmdserver has cmd to process, close new cmdclient")
return
self.c = tcpconnection(csock, self.srvconf, self.evs, self.tms)
self.c.set_recvmsg(self.recvmsg)
self.c.set_broken(self.brokenhandler)
def recvmsg(self, c):
buf = self.c.read()
self.log.logInfo("CmdServer", "Send cmd[%s] to worker", buf.strip())
self.proc.sendcmd(buf)
self.ev = event(self.evs, self.tms)
self.ev.add_timer(5000, self.timeouthandler) # set cmd response timeout to 5s
def sendresp(self, buf, islast):
self.c.write(buf)
if islast:
self.c.close()
self.c = None
self.ev.del_timer()
def brokenhandler(self, c):
self.c = None
self.ev.del_timer()
self.log.logInfo("CmdServer", "Cmdclient link broken")
def timeouthandler(self, ev):
self.log.logInfo("CmdServer", "Wait for Worker response timeout")
self.c.close()
self.c = None
self.ev.del_timer()
| bsd-2-clause | -195,326,906,366,246,620 | 32.196078 | 94 | 0.594802 | false |
ecreall/dace | dace/processinstance/tests/test_signal.py | 1 | 7346 | # Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
import transaction
from pyramid.threadlocal import get_current_registry
from dace.interfaces import IProcessDefinition
import dace.processinstance.tests.example.process as example
from dace.processdefinition.processdef import ProcessDefinition
from dace.processdefinition.activitydef import ActivityDefinition
from dace.processdefinition.gatewaydef import ParallelGatewayDefinition
from dace.processdefinition.transitiondef import TransitionDefinition
from dace.processdefinition.eventdef import (
StartEventDefinition,
EndEventDefinition,
IntermediateCatchEventDefinition,
IntermediateThrowEventDefinition,
SignalEventDefinition)
from dace.testing import FunctionalTests
def ref_signal(process):
return "X"
class TestsSignal(FunctionalTests):
def tearDown(self):
registry = get_current_registry()
registry.unregisterUtility(provided=IProcessDefinition)
super(TestsSignal, self).tearDown()
def _process_definition(self):
"""
G1(+), G2(+): parallel gateways
S: start event
E: End event
St: Signal throwing
Sc: Signal catching
A, D: activities
----- ------
-->| A |-->| St |--
----- --------- / ----- ------ \ --------- -----
| S |-->| G1(+) |- ------ ----- -| G2(+) |-->| E |
----- --------- \-->| Sc |->| D |---/ --------- -----
------ -----
"""
pd = ProcessDefinition(**{'id':u'sample'})
self.app['sample'] = pd
pd.defineNodes(
s = StartEventDefinition(),
g1 = ParallelGatewayDefinition(),
g2 = ParallelGatewayDefinition(),
a = ActivityDefinition(),
d = ActivityDefinition(),
e = EndEventDefinition(),
st = IntermediateThrowEventDefinition(
SignalEventDefinition(ref_signal)),
sc = IntermediateCatchEventDefinition(
SignalEventDefinition(ref_signal)),
)
pd.defineTransitions(
TransitionDefinition('s', 'g1'),
TransitionDefinition('g1', 'a'),
TransitionDefinition('g1', 'sc'),
TransitionDefinition('a', 'st'),
TransitionDefinition('sc', 'd'),
TransitionDefinition('st', 'g2'),
TransitionDefinition('d', 'g2'),
TransitionDefinition('g2', 'e'),
)
self.config.scan(example)
return pd
def xtest_signal_event_start_sc(self):
pd = self._process_definition()
self.def_container.add_definition(pd)
start_wi = pd.start_process('sc')['sc']
sc_wi, proc = start_wi.consume()
sc_wi.start_test_activity()
self.assertEqual(len(proc.getWorkItems()), 2)
self.assertEqual(sorted(proc.getWorkItems().keys()), ['sample.a', 'sample.sc'])
def xtest_signal_event(self):
pd = self._process_definition()
self.def_container.add_definition(pd)
start_wi = pd.start_process('a')['a']
# commit the application
transaction.commit()
a_wi, proc = start_wi.consume()
a_wi.start_test_activity()
transaction.commit()
import time
time.sleep(5)
transaction.begin()
self.assertEqual(sorted(proc.getWorkItems().keys()), ['sample.d'])
d_wi = proc.getWorkItems()['sample.d']
self.assertEqual(len(proc.getWorkItems()), 1)
self.assertEqual(sorted(proc.getWorkItems().keys()), ['sample.d'])
d_wi.consume().start_test_activity()
self.assertEqual(len(proc.getWorkItems()), 0)
def _process_definition_with_activity_after_start_event(self):
"""
G1(+), G2(+): parallel gateways
S: start event
E: End event
St: Signal throwing
Sc: Signal catching
A, B, D: activities
----- ------
-->| A |-->| St |--
----- ----- --------- / ----- ------ \ --------- -----
| S |-->| B |-->| G1(+) |- ------ ----- -| G2(+) |-->| E |
----- ----- --------- \-->| Sc |->| D |---/ --------- -----
------ -----
"""
pd = ProcessDefinition(**{'id':u'sample'})
self.app['sample'] = pd
pd.defineNodes(
s = StartEventDefinition(),
g1 = ParallelGatewayDefinition(),
g2 = ParallelGatewayDefinition(),
a = ActivityDefinition(),
b = ActivityDefinition(),
d = ActivityDefinition(),
e = EndEventDefinition(),
st = IntermediateThrowEventDefinition(
SignalEventDefinition(ref_signal)),
sc = IntermediateCatchEventDefinition(
SignalEventDefinition(ref_signal)),
)
pd.defineTransitions(
TransitionDefinition('s', 'b'),
TransitionDefinition('b', 'g1'),
TransitionDefinition('g1', 'a'),
TransitionDefinition('g1', 'sc'),
TransitionDefinition('a', 'st'),
TransitionDefinition('sc', 'd'),
TransitionDefinition('st', 'g2'),
TransitionDefinition('d', 'g2'),
TransitionDefinition('g2', 'e'),
)
self.config.scan(example)
return pd
def test_start_intermediate_events_on_startup(self):
from zope.processlifetime import DatabaseOpenedWithRoot
from dace.processinstance import event
from dace.subscribers import stop_ioloop
pd = self._process_definition_with_activity_after_start_event()
self.def_container.add_definition(pd)
start_wi = pd.start_process('b')['b']
# commit the application
transaction.commit()
b_wi, proc = start_wi.consume()
b_wi.start_test_activity()
transaction.commit()
self.assertEqual(sorted(proc.getWorkItems().keys()), ['sample.a', 'sample.sc'])
# simulate application shutdown
import time
# we need to wait ZMQStream to start on ioloop side and read
# the Listener from the socket so we have the listener in
# event.callbacks
time.sleep(2.2)
self.assertEqual(len(event.callbacks), 1)
stop_ioloop()
time.sleep(1)
self.assertEqual(len(event.callbacks), 0)
# simulate application startup
e = DatabaseOpenedWithRoot(self.app._p_jar.db())
self.registry.notify(e)
time.sleep(1)
self.assertEqual(len(event.callbacks), 1)
a_wi = proc.getWorkItems()['sample.a']
a_wi.consume().start_test_activity()
# we need to commit so the catching event Job
# see the modified process.
transaction.commit()
# The job wait 2 sec before executing
time.sleep(5)
transaction.begin()
self.assertEqual(sorted(proc.getWorkItems().keys()), ['sample.d'])
| agpl-3.0 | -2,470,153,901,930,591,000 | 37.062176 | 87 | 0.545467 | false |
NCI-Cloud/cloud-tools | check-defunct-instances.py | 1 | 1214 | #!/usr/bin/env python
#
# Take a list of instance UUIDs and check their status. If the last activity
# recorded for them is more than six months ago mark them as defunct.
from util import get_nova_client, get_keystone_client
from util import get_instance, is_instance_to_be_expired
from util import output_report
from util import parser_with_common_args
def parse_args():
parser = parser_with_common_args()
parser.add_argument("-d", "--days", action='store', required=False,
type=int, default='90',
help=(
"Number of days before an instance is considered"
"defunct"
))
return parser.parse_args()
def main():
args = parse_args()
nc = get_nova_client()
kc = get_keystone_client()
instances = []
for uuid in args.hosts:
instance = get_instance(nc, uuid)
if instance is None:
print("Instance %s not found" % (uuid))
else:
if is_instance_to_be_expired(nc, instance, days=args.days):
instances.append(instance)
output_report(nc, kc, instances)
if __name__ == '__main__':
main()
| gpl-3.0 | 3,662,251,379,629,455,400 | 27.904762 | 77 | 0.584843 | false |
Azure/azure-sdk-for-python | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2021_01_01/operations/_usages_operations.py | 1 | 5335 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class UsagesOperations(object):
"""UsagesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2021_01_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_location(
self,
location, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.UsageListResult"]
"""Gets the current usage count and the limit for the resources of the location under the
subscription.
:param location: The location of the Azure Storage resource.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either UsageListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.storage.v2021_01_01.models.UsageListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UsageListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_location.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'location': self._serialize.url("location", location, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('UsageListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_location.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/locations/{location}/usages'} # type: ignore
| mit | 263,078,380,412,774,980 | 44.211864 | 146 | 0.635614 | false |
maeltac/hazanet | sense.py | 1 | 1881 |
import pdb
"""
Each sensor that uses this will follow these rules:
calling sensor.startup() function will initialize and calibrate the sensor. It will return 'Green' on success, 'Red' on failure
calling sensor.read() will return a float for that tick
calling sensor.reset() will attempt to reset the sensor, returning 0 for success, 1 for failure, or 2 for wait
"""
class Sensor():
def startup(self,sentype):
#pdb.set_trace()
if sentype == 'RAD':
return RAD.startup(self,sentype)
elif sentype =='CO':
return CO.startup(self,sentype)
elif sentype =='CH4':
return CH4.startup(self,sentype)
elif sentype =='C6H6':
return C6H6.startup(self,sentype)
elif sentype =='C3H8':
return C3H8.startup(self,sentype)
else:
return 'Error Initializing'
def read(self):
return 0
def reset(self):
return 0
def supported(self):
supportlist = ['RAD', 'CO', 'CH4', 'C6H6', 'C3H8']
return supportlist
class RAD(Sensor):
def startup(self,sentype):
retstring = 'Sickly Green'
return retstring
def read(self):
return 0
def reset(self):
return 0
class CO(Sensor):
def startup(self,sentype):
return 'Blue'
def read(self):
return 0
def reset(self):
return 0
class CH4(Sensor):
def startup(self,sentype):
return 'Nausious'
def read(self):
return 0
def reset(self):
return 0
class C6H6(Sensor):
def startup(self, sentype):
return 'Toxic'
def read(self):
return 0
def reset(self):
return 0
class C3H8(Sensor):
def startup(self, sentype):
return 'On Fire'
def read(self):
return 0
def reset(self):
return 0
| apache-2.0 | -8,357,207,316,017,917,000 | 16.416667 | 127 | 0.5832 | false |
cubledesarrollo/cubledotes | cuble/suit/config.py | 1 | 2073 | from django.contrib.admin import ModelAdmin
from django.conf import settings
from . import VERSION
def default_config():
return {
'VERSION': VERSION,
# configurable
'ADMIN_NAME': 'Django Suit',
'HEADER_DATE_FORMAT': 'l, jS F Y',
'HEADER_TIME_FORMAT': 'H:i',
# form
'SHOW_REQUIRED_ASTERISK': True,
'CONFIRM_UNSAVED_CHANGES': True,
# menu
'SEARCH_URL': '/admin/auth/user/',
'MENU_OPEN_FIRST_CHILD': True,
'MENU_ICONS': {
'auth': 'icon-lock',
'sites': 'icon-leaf',
},
# 'MENU_EXCLUDE': ('auth.group',),
# 'MENU': (
# 'sites',
# {'app': 'auth', 'icon':'icon-lock', 'models': ('user', 'group')},
# {'label': 'Settings', 'icon':'icon-cog', 'models': ('auth.user', 'auth.group')},
# {'label': 'Support', 'icon':'icon-question-sign', 'url': '/support/'},
# ),
# misc
'LIST_PER_PAGE': 20
}
def get_config(param=None):
config_key = 'SUIT_CONFIG'
if hasattr(settings, config_key):
config = getattr(settings, config_key, {})
else:
config = default_config()
if param:
value = config.get(param)
if value is None:
value = default_config().get(param)
return value
return config
# Reverse default actions position
ModelAdmin.actions_on_top = False
ModelAdmin.actions_on_bottom = True
# Set global list_per_page
ModelAdmin.list_per_page = get_config('LIST_PER_PAGE')
def setup_filer():
from suit.widgets import AutosizedTextarea
from filer.admin.imageadmin import ImageAdminForm
from filer.admin.fileadmin import FileAdminChangeFrom
def ensure_meta_widgets(meta_cls):
if not hasattr(meta_cls, 'widgets'):
meta_cls.widgets = {}
meta_cls.widgets['description'] = AutosizedTextarea
ensure_meta_widgets(ImageAdminForm.Meta)
ensure_meta_widgets(FileAdminChangeFrom.Meta)
# if 'filer' in settings.INSTALLED_APPS:
# setup_filer()
| mit | 6,604,521,707,563,215,000 | 26.64 | 94 | 0.585142 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.