repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
elssar/calibre | src/calibre/gui2/actions/embed.py | 8 | 5073 | #!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
from functools import partial
from PyQt5.Qt import QTimer, QProgressDialog, Qt
from calibre import force_unicode
from calibre.gui2 import gprefs
from calibre.gui2.actions import InterfaceAction
class EmbedAction(InterfaceAction):
name = 'Embed Metadata'
action_spec = (_('Embed metadata'), 'modified.png', _('Embed metadata into book files'), None)
action_type = 'current'
action_add_menu = True
action_menu_clone_qaction = _('Embed metadata into book files')
accepts_drops = True
def accept_enter_event(self, event, mime_data):
if mime_data.hasFormat("application/calibre+from_library"):
return True
return False
def accept_drag_move_event(self, event, mime_data):
if mime_data.hasFormat("application/calibre+from_library"):
return True
return False
def drop_event(self, event, mime_data):
mime = 'application/calibre+from_library'
if mime_data.hasFormat(mime):
self.dropped_ids = tuple(map(int, str(mime_data.data(mime)).split()))
QTimer.singleShot(1, self.do_drop)
return True
return False
def do_drop(self):
book_ids = self.dropped_ids
del self.dropped_ids
if book_ids:
self.do_embed(book_ids)
def genesis(self):
self.qaction.triggered.connect(self.embed)
self.embed_menu = self.qaction.menu()
m = partial(self.create_menu_action, self.embed_menu)
m('embed-specific',
_('Embed metadata into files of a specific format from selected books..'),
triggered=self.embed_selected_formats)
self.qaction.setMenu(self.embed_menu)
self.pd_timer = t = QTimer()
t.timeout.connect(self.do_one)
def embed(self):
rb = self.gui.iactions['Remove Books']
ids = rb._get_selected_ids(err_title=_('Cannot embed'))
if not ids:
return
self.do_embed(ids)
def embed_selected_formats(self):
rb = self.gui.iactions['Remove Books']
ids = rb._get_selected_ids(err_title=_('Cannot embed'))
if not ids:
return
fmts = rb._get_selected_formats(
_('Choose formats to be updated'), ids)
if not fmts:
return
self.do_embed(ids, fmts)
def do_embed(self, book_ids, only_fmts=None):
pd = QProgressDialog(_('Embedding updated metadata into book files...'), _('&Stop'), 0, len(book_ids), self.gui)
pd.setWindowTitle(_('Embedding metadata...'))
pd.setWindowModality(Qt.WindowModal)
errors = []
self.job_data = (0, tuple(book_ids), pd, only_fmts, errors)
self.pd_timer.start()
def do_one(self):
try:
i, book_ids, pd, only_fmts, errors = self.job_data
except (TypeError, AttributeError):
return
if i >= len(book_ids) or pd.wasCanceled():
pd.setValue(pd.maximum())
pd.hide()
self.pd_timer.stop()
self.job_data = None
self.gui.library_view.model().refresh_ids(book_ids)
if i > 0:
self.gui.status_bar.show_message(_('Embedded metadata in %d books') % i, 5000)
if errors:
det_msg = '\n\n'.join([_('The {0} format of {1}:\n\n{2}\n').format(
(fmt or '').upper(), force_unicode(mi.title), force_unicode(tb)) for mi, fmt, tb in errors])
from calibre.gui2.dialogs.message_box import MessageBox
title, msg = _('Failed for some files'), _(
'Failed to embed metadata into some book files. Click "Show details" for details.')
d = MessageBox(MessageBox.WARNING, _('WARNING:')+ ' ' + title, msg, det_msg, parent=self.gui, show_copy_button=True)
tc = d.toggle_checkbox
tc.setVisible(True), tc.setText(_('Show the &failed books in the main book list'))
tc.setChecked(gprefs.get('show-embed-failed-books', False))
d.resize_needed.emit()
d.exec_()
gprefs['show-embed-failed-books'] = tc.isChecked()
if tc.isChecked():
failed_ids = {mi.book_id for mi, fmt, tb in errors}
db = self.gui.current_db
db.data.set_marked_ids(failed_ids)
self.gui.search.set_search_string('marked:true')
return
pd.setValue(i)
db = self.gui.current_db.new_api
book_id = book_ids[i]
def report_error(mi, fmt, tb):
mi.book_id = book_id
errors.append((mi, fmt, tb))
db.embed_metadata((book_id,), only_fmts=only_fmts, report_error=report_error)
self.job_data = (i + 1, book_ids, pd, only_fmts, errors)
| gpl-3.0 | -8,363,610,454,218,823,000 | 38.944882 | 132 | 0.579342 | false | 3.743911 | false | false | false |
dstndstn/astrometry.net | util/imageutils.py | 2 | 1447 | # This file is part of the Astrometry.net suite.
# Licensed under a 3-clause BSD style license - see LICENSE
from __future__ import print_function
import numpy
def write_pnm_to(img, f, maxval=255):
if len(img.shape) == 1:
raise RuntimeError('write_pnm: img is one-dimensional: must be 2 or 3.')
elif len(img.shape) == 2:
#pnmtype = 'G'
pnmcode = 5
(h,w) = img.shape
elif len(img.shape) == 3:
(h,w,planes) = img.shape
#pnmtype = 'P'
pnmcode = 6
if planes != 3:
raise RuntimeError('write_pnm: img must have 3 planes, not %i' % planes)
else:
raise RuntimeError('write_pnm: img must have <= 3 dimensions.')
if img.max() > maxval:
print('write_pnm: Some pixel values are > maxval (%i): clipping them.' % maxval)
if img.min() < 0:
print('write_pnm: Some pixel values are < 0: clipping them.')
clipped = img.clip(0, maxval)
maxval = int(maxval)
if maxval > 65535:
raise RuntimeError('write_pnm: maxval must be <= 65535')
if maxval < 0:
raise RuntimeError('write_pnm: maxval must be positive')
f.write('P%i %i %i %i ' % (pnmcode, w, h, maxval))
if maxval <= 255:
f.write(img.astype(numpy.uint8).data)
else:
f.write(img.astype(numpy.uint16).data)
def write_pnm(img, filename, maxval=255):
f = open(filename, 'wb')
write_pnm_to(img, f, maxval)
f.close()
| bsd-3-clause | -5,043,115,592,459,789,000 | 31.155556 | 88 | 0.594333 | false | 3.215556 | false | false | false |
wilmandx/ipos | ventas/viewsPedido.py | 1 | 8065 | from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from gestion.models import ValorTipo,Producto
from django.contrib.auth.models import User
from django.http import HttpResponse
import json
from ventas.models import VentaMaestro,VentaDetalle,PagoVentaMaestro
from django.utils.dateformat import DateFormat
from django.db.models import Q
from django.utils.timezone import get_default_timezone
# Create your views here.
class Categoria:
listProductos=[]
@login_required
def venta_desktop(request,nroFactura=None):
nroFactura=request.GET.get('nroFact',None)
print(get_default_timezone())
ventaMaestro=None
if nroFactura!=None :
#si viene con una m es una mesa
if nroFactura[0]=='m':
#buscar pedido en estado sin pagar con la mesa m
try:
ventaMaestro=VentaMaestro.objects.get(mesa=nroFactura[1:])
except VentaMaestro.DoesNotExist:
ventaMaestro=None
else:
#buscar por el numero de factura
try:
ventaMaestro=VentaMaestro.objects.get(numFactura=nroFactura)
except VentaMaestro.DoesNotExist:
ventaMaestro=None
#Consultar los detalles
listDetalles=None
if(ventaMaestro!=None):
listDetalles=ventaMaestro.ventadetalle_set.all()
else:
#crear un ventaMaestro con valores por defecto...cliente y vendedor
ventaMaestro=VentaMaestro()
ventaMaestro.cliente=User.objects.get(id=2)
ventaMaestro.vendedor=request.user
#Calcular el total para cada detalle y total general
dictTotales={}
granTotal=0
ivaTotal=0
ivaTmp=100
if listDetalles!=None:
for detalle in listDetalles:
dictTotales[detalle.id]=(detalle.valorUni*detalle.cantidad)-detalle.descuento
granTotal=granTotal+dictTotales[detalle.id]
if detalle.iva==0 :
ivaTmp=100
else:
ivaTmp=detalle.iva
ivaTotal=ivaTotal+(dictTotales[detalle.id]*ivaTmp)/100
dictTotales['granTotal']=granTotal
dictTotales['ivaTotal']=ivaTotal
dictTotales['subTotal']=granTotal-ivaTotal
context = {'message':'ok','ventaMaestro':ventaMaestro,'listDetalles':listDetalles,'dictTotales':dictTotales}
return render(request, 'venta.html',context)
@login_required
def venta_mobile(request):
list_cate_prod={}
list_categories=ValorTipo.objects.filter(padre=1)
list_productos=Producto.objects.all()
for producto in list_productos:
for categoria in list_categories:
if categoria.id==producto.tipoCategoria.id :
if categoria.id in list_cate_prod :
temp=list_cate_prod[categoria.id]
else :
temp=Categoria()
temp.listProductos=[]
temp.tipoCategoria=categoria
temp.listProductos.append(producto)
list_cate_prod[categoria.id]=temp
context = {'message':'ok','list_categories':list_cate_prod}
return render(request, 'mobile/venta.html',context)
@login_required
def clientes(request):
query = request.GET.get('q','')
if(len(query) > 0):
results = User.objects.filter(Q(last_name__icontains=query)|Q(first_name__icontains=query))
result_list = []
for item in results:
result_list.append({'id':item.id,'nombre_completo':item.last_name+' '+item.first_name})
else:
result_list = []
response_text = json.dumps(result_list, separators=(',',':'))
return HttpResponse(response_text, content_type="application/json")
@login_required
def vendedores(request):
query = request.GET.get('q','')
if(len(query) > 0):
results = User.objects.filter(Q(last_name__icontains=query)|Q(first_name__icontains=query))
result_list = []
for item in results:
result_list.append({'id':item.id,'nombre_completo':item.last_name+' '+item.first_name})
else:
result_list = []
response_text = json.dumps(result_list, separators=(',',':'))
return HttpResponse(response_text, content_type="application/json")
@login_required
def codproducto(request):
query = request.GET.get('q','')
if(len(query) > 0):
results = Producto.objects.filter(codigo__icontains=query)
result_list = []
for item in results:
result_list.append({'id':item.id,'nombre':item.nombre,'codigo':item.codigo,'valorVenta':item.valorVenta,'iva':item.ivaPorcentaje})
else:
result_list = []
response_text = json.dumps(result_list, separators=(',',':'))
return HttpResponse(response_text, content_type="application/json")
@login_required
def nomproducto(request):
query = request.GET.get('q','')
if(len(query) > 0):
results = Producto.objects.filter(nombre__icontains=query)
result_list = []
for item in results:
result_list.append({'id':item.id,'nombre':item.nombre,'codigo':item.codigo,'valorVenta':item.valorVenta,'iva':item.ivaPorcentaje})
else:
result_list = []
response_text = json.dumps(result_list, separators=(',',':'))
return HttpResponse(response_text, content_type="application/json")
@login_required
def savePedido(request,anyway=None):
#Validar si se debe guardar anyway
#request.GET.get('anyway')
if anyway==None:
#Buscar si ya esta esa mesa con un pedido sin pagar
idMaestroDetalle=buscarPedido(int((request.POST['mesa_p'],'0')[request.POST['mesa_p']=='']))
if idMaestroDetalle:
response_text = {'code':'01'}#Ya existe un pedido para la mesa sin pagar
return HttpResponse(json.dumps(response_text), content_type="application/json")
idFactura = None
if request.POST['idFactura']!='':
idFactura=int(request.POST['idFactura'])
factura=VentaMaestro.objects.get(id=idFactura)
else:
factura=VentaMaestro()
factura.cliente=User(id=(int(request.POST['idcliente_p']),1)[request.POST['idcliente_p']==''])
factura.vendedor=User(id=(int(request.POST['idvendedor_p']),1)[request.POST['idvendedor_p']==''])
factura.cajero=request.user
#factura.valorPropina=int((request.POST['propina_p'],'0')[request.POST['propina_p']==''])
factura.mesa=int((request.POST['mesa_p'],'0')[request.POST['mesa_p']==''])
factura.save()
df = DateFormat(factura.fechaVenta)
response_text = {'code':'00','nroFactura':factura.id,'fechaVenta':df.format('d/M/Y'),'horaVenta':df.format('h:i A')}
return HttpResponse(json.dumps(response_text), content_type="application/json")
def buscarPedido(nroMesa):
lista=VentaMaestro.objects.filter(mesa=nroMesa,pagoventamaestro__valorPago__gt=0)
return len(lista)
@login_required
def saveDetalle(request):
factura=None
if request.POST['idFacturaD']!='':
idFactura=int(request.POST['idFacturaD'])
factura=VentaMaestro.objects.get(id=idFactura)
if request.POST['iddetalle_p']!='':
idDetalle=int(request.POST['iddetalle_p'])
ventaDetalle=VentaDetalle.objects.get(id=idDetalle)
else:
ventaDetalle=VentaDetalle()
idProducto=int(request.POST['idproducto_p'])
ventaDetalle.ventaMaestro=factura
ventaDetalle.producto=Producto(id=idProducto)
ventaDetalle.cantidad=int(request.POST['cantidad_p'])
ventaDetalle.valorUni=int(request.POST['unitario_p'])
ventaDetalle.iva=int(request.POST['valori_p'])
ventaDetalle.descuento=int(request.POST['descuento_p'])
ventaDetalle.save()
response_text = {'idDetalle':ventaDetalle.id}
return HttpResponse(json.dumps(response_text), content_type="application/json")
@login_required
def deleteDetalle(request,id):
ventaDetalle=VentaDetalle.objects.get(id=id)
ventaDetalle.delete()
response_text = {'code':'00'}
return HttpResponse(json.dumps(response_text), content_type="application/json")
@login_required
def pagarPedido(request):
idFactura = None
print("id factura="+request.POST['idFactura'])
if request.POST['idFactura']!='':
idFactura=int(request.POST['idFactura'])
factura=VentaMaestro.objects.get(id=idFactura)
else:
response_text = {'code':'01'}#No se envio un identificador de pedido
return HttpResponse(json.dumps(response_text), content_type="application/json")
pago=PagoVentaMaestro()
pago.ventaMaestro=factura
pago.valorPago=(int(request.POST['vlr-efectivo']),0)[request.POST['vlr-efectivo']=='']
pago.tipoMedioPago=ValorTipo(id=1)
pago.save()
factura.valorPropina=int((request.POST['propina_p'],'0')[request.POST['propina_p']==''])
factura.descuento=int((request.POST['descuento_p'],'0')[request.POST['descuento_p']==''])
factura.save()
response_text = {'code':'00'}
return HttpResponse(json.dumps(response_text), content_type="application/json") | lgpl-3.0 | -7,989,542,337,020,959,000 | 36 | 133 | 0.7416 | false | 2.711836 | false | false | false |
ThePletch/ansible | lib/ansible/modules/identity/ipa/ipa_user.py | 28 | 12043 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ipa_user
author: Thomas Krahn (@Nosmoht)
short_description: Manage FreeIPA users
description:
- Add, modify and delete user within IPA server
options:
displayname:
description: Display name
required: false
givenname:
description: First name
required: false
loginshell:
description: Login shell
required: false
mail:
description:
- List of mail addresses assigned to the user.
- If an empty list is passed all assigned email addresses will be deleted.
- If None is passed email addresses will not be checked or changed.
required: false
password:
description:
- Password
required: false
sn:
description: Surname
required: false
sshpubkey:
description:
- List of public SSH key.
- If an empty list is passed all assigned public keys will be deleted.
- If None is passed SSH public keys will not be checked or changed.
required: false
state:
description: State to ensure
required: false
default: "present"
choices: ["present", "absent", "enabled", "disabled"]
telephonenumber:
description:
- List of telephone numbers assigned to the user.
- If an empty list is passed all assigned telephone numbers will be deleted.
- If None is passed telephone numbers will not be checked or changed.
required: false
title:
description: Title
required: false
uid:
description: uid of the user
required: true
aliases: ["name"]
ipa_port:
description: Port of IPA server
required: false
default: 443
ipa_host:
description: IP or hostname of IPA server
required: false
default: "ipa.example.com"
ipa_user:
description: Administrative account used on IPA server
required: false
default: "admin"
ipa_pass:
description: Password of administrative user
required: true
ipa_prot:
description: Protocol used by IPA server
required: false
default: "https"
choices: ["http", "https"]
validate_certs:
description:
- This only applies if C(ipa_prot) is I(https).
- If set to C(no), the SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates.
required: false
default: true
version_added: "2.3"
requirements:
- base64
- hashlib
'''
EXAMPLES = '''
# Ensure pinky is present
- ipa_user:
name: pinky
state: present
givenname: Pinky
sn: Acme
mail:
- [email protected]
telephonenumber:
- '+555123456'
sshpubkeyfp:
- ssh-rsa ....
- ssh-dsa ....
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
# Ensure brain is absent
- ipa_user:
name: brain
state: absent
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
'''
RETURN = '''
user:
description: User as returned by IPA API
returned: always
type: dict
'''
import base64
import hashlib
from ansible.module_utils.ipa import IPAClient
class UserIPAClient(IPAClient):
def __init__(self, module, host, port, protocol):
super(UserIPAClient, self).__init__(module, host, port, protocol)
def user_find(self, name):
return self._post_json(method='user_find', name=None, item={'all': True, 'uid': name})
def user_add(self, name, item):
return self._post_json(method='user_add', name=name, item=item)
def user_mod(self, name, item):
return self._post_json(method='user_mod', name=name, item=item)
def user_del(self, name):
return self._post_json(method='user_del', name=name)
def user_disable(self, name):
return self._post_json(method='user_disable', name=name)
def user_enable(self, name):
return self._post_json(method='user_enable', name=name)
def get_user_dict(displayname=None, givenname=None, loginshell=None, mail=None, nsaccountlock=False, sn=None,
sshpubkey=None, telephonenumber=None, title=None, userpassword=None):
user = {}
if displayname is not None:
user['displayname'] = displayname
if givenname is not None:
user['givenname'] = givenname
if loginshell is not None:
user['loginshell'] = loginshell
if mail is not None:
user['mail'] = mail
user['nsaccountlock'] = nsaccountlock
if sn is not None:
user['sn'] = sn
if sshpubkey is not None:
user['ipasshpubkey'] = sshpubkey
if telephonenumber is not None:
user['telephonenumber'] = telephonenumber
if title is not None:
user['title'] = title
if userpassword is not None:
user['userpassword'] = userpassword
return user
def get_user_diff(ipa_user, module_user):
"""
Return the keys of each dict whereas values are different. Unfortunately the IPA
API returns everything as a list even if only a single value is possible.
Therefore some more complexity is needed.
The method will check if the value type of module_user.attr is not a list and
create a list with that element if the same attribute in ipa_user is list. In this way I hope that the method
must not be changed if the returned API dict is changed.
:param ipa_user:
:param module_user:
:return:
"""
# return [item for item in module_user.keys() if module_user.get(item, None) != ipa_user.get(item, None)]
result = []
# sshpubkeyfp is the list of ssh key fingerprints. IPA doesn't return the keys itself but instead the fingerprints.
# These are used for comparison.
sshpubkey = None
if 'ipasshpubkey' in module_user:
module_user['sshpubkeyfp'] = [get_ssh_key_fingerprint(pubkey) for pubkey in module_user['ipasshpubkey']]
# Remove the ipasshpubkey element as it is not returned from IPA but save it's value to be used later on
sshpubkey = module_user['ipasshpubkey']
del module_user['ipasshpubkey']
for key in module_user.keys():
mod_value = module_user.get(key, None)
ipa_value = ipa_user.get(key, None)
if isinstance(ipa_value, list) and not isinstance(mod_value, list):
mod_value = [mod_value]
if isinstance(ipa_value, list) and isinstance(mod_value, list):
mod_value = sorted(mod_value)
ipa_value = sorted(ipa_value)
if mod_value != ipa_value:
result.append(key)
# If there are public keys, remove the fingerprints and add them back to the dict
if sshpubkey is not None:
del module_user['sshpubkeyfp']
module_user['ipasshpubkey'] = sshpubkey
return result
def get_ssh_key_fingerprint(ssh_key):
"""
Return the public key fingerprint of a given public SSH key
in format "FB:0C:AC:0A:07:94:5B:CE:75:6E:63:32:13:AD:AD:D7 [user@host] (ssh-rsa)"
:param ssh_key:
:return:
"""
parts = ssh_key.strip().split()
if len(parts) == 0:
return None
key_type = parts[0]
key = base64.b64decode(parts[1].encode('ascii'))
fp_plain = hashlib.md5(key).hexdigest()
key_fp = ':'.join(a + b for a, b in zip(fp_plain[::2], fp_plain[1::2])).upper()
if len(parts) < 3:
return "%s (%s)" % (key_fp, key_type)
else:
user_host = parts[2]
return "%s %s (%s)" % (key_fp, user_host, key_type)
def ensure(module, client):
state = module.params['state']
name = module.params['name']
nsaccountlock = state == 'disabled'
module_user = get_user_dict(displayname=module.params.get('displayname'),
givenname=module.params.get('givenname'),
loginshell=module.params['loginshell'],
mail=module.params['mail'], sn=module.params['sn'],
sshpubkey=module.params['sshpubkey'], nsaccountlock=nsaccountlock,
telephonenumber=module.params['telephonenumber'], title=module.params['title'],
userpassword=module.params['password'])
ipa_user = client.user_find(name=name)
changed = False
if state in ['present', 'enabled', 'disabled']:
if not ipa_user:
changed = True
if not module.check_mode:
ipa_user = client.user_add(name=name, item=module_user)
else:
diff = get_user_diff(ipa_user, module_user)
if len(diff) > 0:
changed = True
if not module.check_mode:
ipa_user = client.user_mod(name=name, item=module_user)
else:
if ipa_user:
changed = True
if not module.check_mode:
client.user_del(name)
return changed, ipa_user
def main():
module = AnsibleModule(
argument_spec=dict(
displayname=dict(type='str', required=False),
givenname=dict(type='str', required=False),
loginshell=dict(type='str', required=False),
mail=dict(type='list', required=False),
sn=dict(type='str', required=False),
uid=dict(type='str', required=True, aliases=['name']),
password=dict(type='str', required=False, no_log=True),
sshpubkey=dict(type='list', required=False),
state=dict(type='str', required=False, default='present',
choices=['present', 'absent', 'enabled', 'disabled']),
telephonenumber=dict(type='list', required=False),
title=dict(type='str', required=False),
ipa_prot=dict(type='str', required=False, default='https', choices=['http', 'https']),
ipa_host=dict(type='str', required=False, default='ipa.example.com'),
ipa_port=dict(type='int', required=False, default=443),
ipa_user=dict(type='str', required=False, default='admin'),
ipa_pass=dict(type='str', required=True, no_log=True),
validate_certs=dict(type='bool', required=False, default=True),
),
supports_check_mode=True,
)
client = UserIPAClient(module=module,
host=module.params['ipa_host'],
port=module.params['ipa_port'],
protocol=module.params['ipa_prot'])
# If sshpubkey is defined as None than module.params['sshpubkey'] is [None]. IPA itself returns None (not a list).
# Therefore a small check here to replace list(None) by None. Otherwise get_user_diff() would return sshpubkey
# as different which should be avoided.
if module.params['sshpubkey'] is not None:
if len(module.params['sshpubkey']) == 1 and module.params['sshpubkey'][0] is "":
module.params['sshpubkey'] = None
try:
client.login(username=module.params['ipa_user'],
password=module.params['ipa_pass'])
changed, user = ensure(module, client)
module.exit_json(changed=changed, user=user)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
if __name__ == '__main__':
main()
| gpl-3.0 | 9,129,725,585,159,846,000 | 33.806358 | 119 | 0.631321 | false | 3.834129 | false | false | false |
google-research/falken | service/generated_flatbuffers/tflite/SignatureDef.py | 1 | 7508 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class SignatureDef(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsSignatureDef(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = SignatureDef()
x.Init(buf, n + offset)
return x
@classmethod
def SignatureDefBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# SignatureDef
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# SignatureDef
def Inputs(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from tflite.TensorMap import TensorMap
obj = TensorMap()
obj.Init(self._tab.Bytes, x)
return obj
return None
# SignatureDef
def InputsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
# SignatureDef
def InputsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
return o == 0
# SignatureDef
def Outputs(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from tflite.TensorMap import TensorMap
obj = TensorMap()
obj.Init(self._tab.Bytes, x)
return obj
return None
# SignatureDef
def OutputsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.VectorLen(o)
return 0
# SignatureDef
def OutputsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
return o == 0
# SignatureDef
def MethodName(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# SignatureDef
def Key(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
def SignatureDefStart(builder): builder.StartObject(4)
def SignatureDefAddInputs(builder, inputs): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(inputs), 0)
def SignatureDefStartInputsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def SignatureDefAddOutputs(builder, outputs): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(outputs), 0)
def SignatureDefStartOutputsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def SignatureDefAddMethodName(builder, methodName): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(methodName), 0)
def SignatureDefAddKey(builder, key): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(key), 0)
def SignatureDefEnd(builder): return builder.EndObject()
import tflite.TensorMap
try:
from typing import List
except:
pass
class SignatureDefT(object):
# SignatureDefT
def __init__(self):
self.inputs = None # type: List[tflite.TensorMap.TensorMapT]
self.outputs = None # type: List[tflite.TensorMap.TensorMapT]
self.methodName = None # type: str
self.key = None # type: str
@classmethod
def InitFromBuf(cls, buf, pos):
signatureDef = SignatureDef()
signatureDef.Init(buf, pos)
return cls.InitFromObj(signatureDef)
@classmethod
def InitFromObj(cls, signatureDef):
x = SignatureDefT()
x._UnPack(signatureDef)
return x
# SignatureDefT
def _UnPack(self, signatureDef):
if signatureDef is None:
return
if not signatureDef.InputsIsNone():
self.inputs = []
for i in range(signatureDef.InputsLength()):
if signatureDef.Inputs(i) is None:
self.inputs.append(None)
else:
tensorMap_ = tflite.TensorMap.TensorMapT.InitFromObj(signatureDef.Inputs(i))
self.inputs.append(tensorMap_)
if not signatureDef.OutputsIsNone():
self.outputs = []
for i in range(signatureDef.OutputsLength()):
if signatureDef.Outputs(i) is None:
self.outputs.append(None)
else:
tensorMap_ = tflite.TensorMap.TensorMapT.InitFromObj(signatureDef.Outputs(i))
self.outputs.append(tensorMap_)
self.methodName = signatureDef.MethodName()
self.key = signatureDef.Key()
# SignatureDefT
def Pack(self, builder):
if self.inputs is not None:
inputslist = []
for i in range(len(self.inputs)):
inputslist.append(self.inputs[i].Pack(builder))
SignatureDefStartInputsVector(builder, len(self.inputs))
for i in reversed(range(len(self.inputs))):
builder.PrependUOffsetTRelative(inputslist[i])
inputs = builder.EndVector(len(self.inputs))
if self.outputs is not None:
outputslist = []
for i in range(len(self.outputs)):
outputslist.append(self.outputs[i].Pack(builder))
SignatureDefStartOutputsVector(builder, len(self.outputs))
for i in reversed(range(len(self.outputs))):
builder.PrependUOffsetTRelative(outputslist[i])
outputs = builder.EndVector(len(self.outputs))
if self.methodName is not None:
methodName = builder.CreateString(self.methodName)
if self.key is not None:
key = builder.CreateString(self.key)
SignatureDefStart(builder)
if self.inputs is not None:
SignatureDefAddInputs(builder, inputs)
if self.outputs is not None:
SignatureDefAddOutputs(builder, outputs)
if self.methodName is not None:
SignatureDefAddMethodName(builder, methodName)
if self.key is not None:
SignatureDefAddKey(builder, key)
signatureDef = SignatureDefEnd(builder)
return signatureDef
| apache-2.0 | -6,426,101,599,357,591,000 | 37.306122 | 153 | 0.644513 | false | 3.822811 | false | false | false |
MonamAgarwal/final | GTG/plugins/task_reaper/reaper.py | 3 | 7205 | # -*- coding: utf-8 -*-
# Copyright (c) 2009 - Luca Invernizzi <[email protected]>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
import os
from gi.repository import Gtk
from threading import Timer
from GTG.tools.logger import Log
from GTG.tools.dates import Date
class pluginReaper:
DEFAULT_PREFERENCES = {'max_days': 30,
'is_automatic': False,
'show_menu_item': True}
PLUGIN_NAME = "task-reaper"
# In case of automatic removing tasks, the time
# between two runs of the cleaner function
TIME_BETWEEN_PURGES = 60 * 60
def __init__(self):
self.path = os.path.dirname(os.path.abspath(__file__))
# GUI initialization
self.builder = Gtk.Builder()
self.builder.add_from_file(os.path.join(
os.path.dirname(os.path.abspath(__file__)) +
"/reaper.ui"))
self.preferences_dialog = self.builder.get_object("preferences_dialog")
self.pref_chbox_show_menu_item = \
self.builder.get_object("pref_chbox_show_menu_item")
self.pref_chbox_is_automatic = \
self.builder.get_object("pref_chbox_is_automatic")
self.pref_spinbtn_max_days = \
self.builder.get_object("pref_spinbtn_max_days")
SIGNAL_CONNECTIONS_DIC = {
"on_preferences_dialog_delete_event":
self.on_preferences_cancel,
"on_btn_preferences_cancel_clicked":
self.on_preferences_cancel,
"on_btn_preferences_ok_clicked":
self.on_preferences_ok,
}
self.builder.connect_signals(SIGNAL_CONNECTIONS_DIC)
self.menu_item = Gtk.MenuItem("Delete old closed tasks")
self.menu_item.connect('activate', self.delete_old_closed_tasks)
def activate(self, plugin_api):
self.plugin_api = plugin_api
# preferences initialization
self.menu_item_is_shown = False
self.is_automatic = False
self.timer = None
self.preferences_load()
self.preferences_apply()
def onTaskClosed(self, plugin_api):
pass
def onTaskOpened(self, plugin_api):
pass
def onQuit(self, plugin_api):
if self.is_automatic is True:
self.cancel_autopurge()
def deactivate(self, plugin_api):
if self.is_automatic is True:
self.cancel_autopurge()
if self.menu_item_is_shown is True:
plugin_api.remove_menu_item(self.menu_item)
## HELPER FUNCTIONS ###########################################################
def __log(self, message):
Log.debug(message)
## CORE FUNCTIONS #############################################################
def schedule_autopurge(self):
self.timer = Timer(self.TIME_BETWEEN_PURGES,
self.delete_old_closed_tasks)
self.timer.setDaemon(True)
self.timer.start()
self.__log("Automatic deletion of old tasks scheduled")
def cancel_autopurge(self):
if self.timer:
self.__log("Automatic deletion of old tasks cancelled")
self.timer.cancel()
def delete_old_closed_tasks(self, widget=None):
self.__log("Starting deletion of old tasks")
today = Date.today()
max_days = self.preferences["max_days"]
requester = self.plugin_api.get_requester()
closed_tree = requester.get_tasks_tree(name='inactive')
closed_tasks = [requester.get_task(tid) for tid in
closed_tree.get_all_nodes()]
to_remove = [t for t in closed_tasks
if (today - t.get_closed_date()).days > max_days]
for task in to_remove:
if requester.has_task(task.get_id()):
requester.delete_task(task.get_id())
# If automatic purging is on, schedule another run
if self.is_automatic:
self.schedule_autopurge()
## Preferences methods ########################################################
def is_configurable(self):
"""A configurable plugin should have this method and return True"""
return True
def configure_dialog(self, manager_dialog):
self.preferences_load()
self.preferences_dialog.set_transient_for(manager_dialog)
self.pref_chbox_is_automatic.set_active(
self.preferences["is_automatic"])
self.pref_chbox_show_menu_item.set_active(
self.preferences["show_menu_item"])
self.pref_spinbtn_max_days.set_value(
self.preferences["max_days"])
self.preferences_dialog.show_all()
def on_preferences_cancel(self, widget=None, data=None):
self.preferences_dialog.hide()
return True
def on_preferences_ok(self, widget=None, data=None):
self.preferences["is_automatic"] = \
self.pref_chbox_is_automatic.get_active()
self.preferences["show_menu_item"] = \
self.pref_chbox_show_menu_item.get_active()
self.preferences["max_days"] = \
self.pref_spinbtn_max_days.get_value()
self.preferences_apply()
self.preferences_store()
self.preferences_dialog.hide()
def preferences_load(self):
self.preferences = self.plugin_api.load_configuration_object(
self.PLUGIN_NAME, "preferences",
default_values=self.DEFAULT_PREFERENCES)
def preferences_store(self):
self.plugin_api.save_configuration_object(self.PLUGIN_NAME,
"preferences",
self.preferences)
def preferences_apply(self):
# Showing the GUI
if self.preferences['show_menu_item'] is True and \
self.menu_item_is_shown is False:
self.plugin_api.add_menu_item(self.menu_item)
self.menu_item_is_shown = True
elif self.preferences['show_menu_item'] is False and \
self.menu_item_is_shown is True:
self.plugin_api.remove_menu_item(self.menu_item)
self.menu_item_is_shown = False
# Auto-purge
if self.preferences['is_automatic'] is True and \
self.is_automatic is False:
self.is_automatic = True
# Run the first iteration immediately and schedule next iteration
self.delete_old_closed_tasks()
elif self.preferences['is_automatic'] is False and \
self.is_automatic is True:
self.cancel_autopurge()
self.is_automatic = False
| gpl-3.0 | 5,194,328,026,330,886,000 | 37.945946 | 79 | 0.596253 | false | 4.016165 | false | false | false |
hermanschaaf/mafan | mafan/pinyin.py | 2 | 1861 | # -*- coding: utf-8 -*-
import re
import codecs
PinyinToneMark = {
0: u"aoeiuv\u00fc",
1: u"\u0101\u014d\u0113\u012b\u016b\u01d6\u01d6",
2: u"\u00e1\u00f3\u00e9\u00ed\u00fa\u01d8\u01d8",
3: u"\u01ce\u01d2\u011b\u01d0\u01d4\u01da\u01da",
4: u"\u00e0\u00f2\u00e8\u00ec\u00f9\u01dc\u01dc",
}
def decode(s):
"""
Converts text in the numbering format of pinyin ("ni3hao3") to text with the
appropriate tone marks ("nǐhǎo").
"""
s = s.lower()
r = ""
t = ""
for c in s:
if c >= 'a' and c <= 'z':
t += c
elif c == ':':
try:
if t[-1] == 'u':
t = t[:-1] + u"\u00fc"
except:
pass
else:
if c >= '0' and c <= '5':
tone = int(c) % 5
if tone != 0:
m = re.search(u"[aoeiuv\u00fc]+", t)
if m is None:
t += c
elif len(m.group(0)) == 1:
t = t[:m.start(0)] + PinyinToneMark[tone][PinyinToneMark[0].index(m.group(0))] + t[m.end(0):]
else:
if 'a' in t:
t = t.replace("a", PinyinToneMark[tone][0])
elif 'o' in t:
t = t.replace("o", PinyinToneMark[tone][1])
elif 'e' in t:
t = t.replace("e", PinyinToneMark[tone][2])
elif t.endswith("ui"):
t = t.replace("i", PinyinToneMark[tone][3])
elif t.endswith("iu"):
t = t.replace("u", PinyinToneMark[tone][4])
else:
t += "!"
r += t
t = ""
r += t
return r
| mit | -181,050,468,988,118,600 | 31.051724 | 117 | 0.384077 | false | 3.062603 | false | false | false |
DongjunLee/kino-bot | kino/bot/worker.py | 1 | 8029 | # -*- coding: utf-8 -*-
import threading
from hbconfig import Config
from ..background import schedule
from ..functions import FunctionRunner
from ..nlp.ner import NamedEntitiyRecognizer
from ..notifier.scheduler import Scheduler
from ..slack.resource import MsgResource
from ..slack.slackbot import SlackerAdapter
from ..utils.data_handler import DataHandler
from ..utils.logger import Logger
class Worker(object):
def __init__(self, text=None, slackbot=None):
self.input = text
self.data_handler = DataHandler()
self.logger = Logger().get_logger()
self.ner = NamedEntitiyRecognizer()
self.function_runner = FunctionRunner().load_function
if slackbot is None:
self.slackbot = SlackerAdapter()
else:
self.slackbot = slackbot
if Config.profile.personal:
from ..utils.profile import Profile
self.profile = Profile()
else:
self.profile = None
def create(self):
ner_dict = {
k: self.ner.parse(v, self.input) for k, v in self.ner.schedule.items()
}
day_of_week = self.ner.parse(
self.ner.schedule["day_of_week"], self.input, get_all=True
)
ner_dict["day_of_week"] = day_of_week
time_unit = self.ner.parse(
self.ner.schedule["time_unit"], self.input, get_all=True
)
ner_dict["time_unit"] = time_unit
skill_keywords = {k: v["keyword"] for k, v in self.ner.skills.items()}
func_name = self.ner.parse(skill_keywords, self.input)
ner_dict["skills"] = func_name
params = {k: self.ner.parse(v, self.input) for k, v in self.ner.params.items()}
ner_dict["params"] = params
Scheduler().create_with_ner(**ner_dict)
def run(self, init=False):
if self.is_running():
self.logger.info("Already running.")
return
self.set_schedules()
schedule.run_continuously(interval=1)
if not init:
self.slackbot.send_message(text=MsgResource.WORKER_START)
def is_running(self):
if len(schedule.jobs) > 0:
return True
else:
return False
def set_schedules(self):
if self.profile:
self.__set_profile_schedule()
self.__set_custom_schedule()
def __set_profile_schedule(self):
self.__excute_profile_schedule(
self.profile.get_schedule("WAKE_UP"),
False,
"good_morning",
{},
True,
)
self.__excute_profile_schedule(
self.profile.get_schedule("WORK_START"),
False,
"send_message",
{"text": MsgResource.PROFILE_WORK_START},
True,
)
self.__excute_profile_schedule(
self.profile.get_schedule("WORK_END"),
False,
"send_message",
{"text": MsgResource.PROFILE_WORK_END},
True,
)
self.__excute_profile_schedule(
self.profile.get_schedule("GO_TO_BED"),
False,
"good_night",
{},
False,
)
# Toggl Tasks <-> Activity Tasks Sync
self.__excute_profile_schedule(
"23:55",
False,
"activity_task_sync",
{},
False,
)
# slack presence issue
# self.__excute_profile_schedule(
# self.profile.get_schedule('CHECK_GO_TO_BED'), False,
# 'check_go_to_bed', {}, False)
interval = Config.profile.feed.INTERVAL
self.__excute_feed_schedule(interval)
self.__excute_health_check()
def __excute_profile_schedule(self, time, repeat, func_name, params, not_holiday):
schedule.every().day.at(time).do(
self.__run_threaded,
self.function_runner,
{
"repeat": repeat,
"func_name": func_name,
"params": params,
"day_of_week": [0],
"not_holiday": not_holiday,
},
)
def __excute_feed_schedule(self, interval):
schedule.every(interval).minutes.do(
self.__run_threaded,
self.function_runner,
{
"repeat": True,
"func_name": "feed_notify",
"params": {},
"day_of_week": [0],
"not_holiday": False,
},
)
def __excute_health_check(self):
schedule.every(30).minutes.do(
self.__run_threaded,
self.function_runner,
{
"repeat": True,
"func_name": "health_check",
"params": {},
"day_of_week": [0],
"not_holiday": False,
},
)
def __set_custom_schedule(self):
schedule_fname = "schedule.json"
schedule_data = self.data_handler.read_file(schedule_fname)
alarm_data = schedule_data.get("alarm", {})
between_data = schedule_data.get("between", {})
for _, v in alarm_data.items():
if not isinstance(v, type({})):
continue
day_of_week = v.get("day_of_week", [0])
if "time" in v:
time = v["time"]
param = {
# Do only once
"repeat": False,
"func_name": v["f_name"],
"day_of_week": day_of_week,
"params": v.get("f_params", {}),
}
try:
schedule.every().day.at(time).do(
self.__run_threaded, self.function_runner, param
)
except Exception as e:
print("Function Schedule Error: ", e)
self.slackbot.send_message(text=MsgResource.ERROR)
if "between_id" in v:
between = between_data[v["between_id"]]
start_time, end_time = self.__time_interval2start_end(
between["time_interval"]
)
# Repeat
period = v["period"].split(" ")
number = int(period[0])
datetime_unit = self.__replace_datetime_unit_ko2en(period[1])
param = {
"start_time": start_time,
"end_time": end_time,
"repeat": True,
"day_of_week": day_of_week,
"func_name": v["f_name"],
"params": v.get("f_params", {}),
}
try:
getattr(schedule.every(number), datetime_unit).do(
self.__run_threaded, self.function_runner, param
)
except Exception as e:
print("Error: " + e)
def __replace_datetime_unit_ko2en(self, datetime_unit):
ko2en_dict = {"초": "seconds", "분": "minutes", "시": "hours", "시간": "hours"}
if datetime_unit in ko2en_dict:
return ko2en_dict[datetime_unit]
return datetime_unit
def __time_interval2start_end(self, time_interval):
if "~" in time_interval:
time_interval = time_interval.split("~")
start_time = time_interval[0].split(":")
end_time = time_interval[1].split(":")
start_time = tuple(map(lambda x: int(x), start_time))
end_time = tuple(map(lambda x: int(x), end_time))
else:
start_time = time_interval
end_time = None
return start_time, end_time
def __run_threaded(self, job_func, param):
job_thread = threading.Thread(target=job_func, kwargs=param)
job_thread.start()
def stop(self, init=False):
schedule.clear()
if not init:
self.slackbot.send_message(text=MsgResource.WORKER_STOP)
| mit | -6,698,951,712,818,450,000 | 29.490494 | 87 | 0.501684 | false | 4.013514 | false | false | false |
yugangw-msft/azure-cli | src/azure-cli-core/azure/cli/core/azclierror.py | 2 | 8635 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import sys
import azure.cli.core.telemetry as telemetry
from knack.util import CLIError
from knack.log import get_logger
logger = get_logger(__name__)
# pylint: disable=unnecessary-pass
# Error types in AzureCLI are from different sources, and there are many general error types like CLIError, AzureError.
# Besides, many error types with different names are actually showing the same kind of error.
# For example, CloudError, CLIError and ValidationError all could be a resource-not-found error.
# Therefore, here we define the new error classes to map and categorize all of the error types from different sources.
# region: Base Layer
# Base class for all the AzureCLI defined error classes.
class AzCLIError(CLIError):
""" Base class for all the AzureCLI defined error classes.
DO NOT raise this error class in your codes. """
def __init__(self, error_msg, recommendation=None):
# error message
self.error_msg = error_msg
# manual recommendations provided based on developers' knowledge
self.recommendations = []
self.set_recommendation(recommendation)
# AI recommendations provided by Aladdin service, with tuple form: (recommendation, description)
self.aladdin_recommendations = []
# exception trace for the error
self.exception_trace = None
super().__init__(error_msg)
def set_recommendation(self, recommendation):
"""" Set manual recommendations for the error.
Command module or extension authors could call this method to provide recommendations,
the recommendations will be printed after the error message, one recommendation per line
"""
if isinstance(recommendation, str):
self.recommendations.append(recommendation)
elif isinstance(recommendation, list):
self.recommendations.extend(recommendation)
def set_aladdin_recommendation(self, recommendations):
""" Set aladdin recommendations for the error.
One item should be a tuple with the form: (recommendation, description)
"""
self.aladdin_recommendations.extend(recommendations)
def set_exception_trace(self, exception_trace):
self.exception_trace = exception_trace
def print_error(self):
from azure.cli.core.azlogging import CommandLoggerContext
from azure.cli.core.style import print_styled_text
with CommandLoggerContext(logger):
# print error message
logger.error(self.error_msg)
# print exception trace if there is
if self.exception_trace:
logger.exception(self.exception_trace)
# print recommendations to action
if self.recommendations:
for recommendation in self.recommendations:
print(recommendation, file=sys.stderr)
if self.aladdin_recommendations:
print('\nTRY THIS:', file=sys.stderr)
for recommendation, description in self.aladdin_recommendations:
print_styled_text(recommendation, file=sys.stderr)
print_styled_text(description, file=sys.stderr)
def send_telemetry(self):
telemetry.set_error_type(self.__class__.__name__)
# endregion
# region: Second Layer
# Main categories of the AzureCLI error types, used for Telemetry analysis
class UserFault(AzCLIError):
""" Users should be responsible for the errors.
DO NOT raise this error class in your codes. """
def send_telemetry(self):
super().send_telemetry()
telemetry.set_user_fault(self.error_msg)
class ServiceError(AzCLIError):
""" Azure Services should be responsible for the errors.
DO NOT raise this error class in your codes. """
def send_telemetry(self):
super().send_telemetry()
telemetry.set_failure(self.error_msg)
class ClientError(AzCLIError):
""" AzureCLI should be responsible for the errors.
DO NOT raise this error class in your codes. """
def send_telemetry(self):
super().send_telemetry()
telemetry.set_failure(self.error_msg)
if self.exception_trace:
telemetry.set_exception(self.exception_trace, '')
class UnknownError(AzCLIError):
""" Unclear errors, could not know who should be responsible for the errors.
DO NOT raise this error class in your codes. """
def send_telemetry(self):
super().send_telemetry()
telemetry.set_failure(self.error_msg)
# endregion
# region: Third Layer
# Specific categories of the AzureCLI error types
# Raise the error classes here in your codes. Avoid using fallback error classes unless you can not find a proper one.
# Command related error types
class CommandNotFoundError(UserFault):
""" Command is misspelled or not recognized by AzureCLI. """
pass
# Argument related error types
class UnrecognizedArgumentError(UserFault):
""" Argument is misspelled or not recognized by AzureCLI. """
pass
class RequiredArgumentMissingError(UserFault):
""" Required argument is not specified. """
pass
class MutuallyExclusiveArgumentError(UserFault):
""" Arguments can not be specified together. """
pass
class InvalidArgumentValueError(UserFault):
""" Argument value is not valid. """
pass
class ArgumentUsageError(UserFault):
""" Fallback of the argument usage related errors.
Avoid using this class unless the error can not be classified
into the Argument related specific error types. """
pass
# Response related error types
class BadRequestError(UserFault):
""" Bad request from client: 400 error """
pass
class UnauthorizedError(UserFault):
""" Unauthorized request: 401 error """
pass
class ForbiddenError(UserFault):
""" Service refuse to response: 403 error """
pass
class ResourceNotFoundError(UserFault):
""" Can not find Azure resources: 404 error """
pass
class AzureInternalError(ServiceError):
""" Azure service internal error: 5xx error """
pass
class AzureResponseError(UserFault):
""" Fallback of the response related errors.
Avoid using this class unless the error can not be classified
into the Response related specific error types. """
pass
# Request related error types
class AzureConnectionError(UserFault):
""" Connection issues like connection timeout, aborted or broken. """
pass
class ClientRequestError(UserFault):
""" Fallback of the request related errors. Error occurs while attempting
to make a request to the service. No request is sent.
Avoid using this class unless the error can not be classified
into the Request related specific errors types. """
pass
# File operation related error types
class FileOperationError(UserFault):
""" For file or directory operation related errors. """
pass
# Keyboard interrupt error type
class ManualInterrupt(UserFault):
""" Keyboard interrupt. """
pass
class NoTTYError(UserFault):
""" No tty available for prompt. """
pass
# ARM template related error types
class InvalidTemplateError(UserFault):
""" ARM template validation fails. It could be caused by incorrect template files or parameters """
pass
class DeploymentError(UserFault):
""" ARM template deployment fails. Template file is valid, and error occurs in deployment. """
pass
# Validation related error types
class ValidationError(UserFault):
""" Fallback of the errors in validation functions.
Avoid using this class unless the error can not be classified into
the Argument, Request and Response related specific error types. """
pass
class UnclassifiedUserFault(UserFault):
""" Fallback of the UserFault related error types.
Avoid using this class unless the error can not be classified into
the UserFault related specific error types.
"""
pass
# CLI internal error type
class CLIInternalError(ClientError):
""" AzureCLI internal error """
pass
# Client error for az next
class RecommendationError(ClientError):
""" The client error raised by `az next`. It is needed in `az next` to skip error records. """
pass
class AuthenticationError(ServiceError):
""" Raised when AAD authentication fails. """
# endregion
| mit | -2,844,716,572,592,482,300 | 30.981481 | 119 | 0.691951 | false | 4.53043 | false | false | false |
Yuliang-Zou/Automatic_Group_Photography_Enhancement | lib/networks/caffenet.py | 1 | 1795 | import tensorflow as tf
from networks.network import Network
class caffenet(Network):
def __init__(self, trainable=True):
self.inputs = []
self.data = tf.placeholder(tf.float32, shape=[None, None, None, 3])
self.rois = tf.placeholder(tf.float32, shape=[None, 5])
self.keep_prob = tf.placeholder(tf.float32)
self.layers = dict({'data':self.data, 'rois':self.rois})
self.trainable = trainable
self.setup()
def setup(self):
(self.feed('data')
.conv(11, 11, 96, 4, 4, padding='VALID', name='conv1', trainable=False)
.max_pool(3, 3, 2, 2, padding='VALID', name='pool1')
.lrn(2, 2e-05, 0.75, name='norm1')
.conv(5, 5, 256, 1, 1, group=2, name='conv2')
.max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
.lrn(2, 2e-05, 0.75, name='norm2')
.conv(3, 3, 384, 1, 1, name='conv3')
.conv(3, 3, 384, 1, 1, group=2, name='conv4')
.conv(3, 3, 256, 1, 1, group=2, name='conv5')
.feature_extrapolating([1.0, 2.0, 3.0, 4.0], 4, 4, name='conv5_feature'))
(self.feed('conv5_feature','im_info')
.conv(3,3,)
(self.feed('conv5_feature', 'rois')
.roi_pool(6, 6, 1.0/16, name='pool5')
.fc(4096, name='fc6')
.dropout(self.keep_prob, name='drop6')
.fc(4096, name='fc7')
.dropout(self.keep_prob, name='drop7')
.fc(174, relu=False, name='subcls_score')
.softmax(name='subcls_prob'))
(self.feed('subcls_score')
.fc(4, relu=False, name='cls_score')
.softmax(name='cls_prob'))
(self.feed('subcls_score')
.fc(16, relu=False, name='bbox_pred'))
| mit | -8,519,936,757,939,628,000 | 39.795455 | 86 | 0.518663 | false | 3.032095 | false | false | false |
thenetcircle/dino | test/api/test_api_join.py | 1 | 9160 | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from test.base import BaseTest
from activitystreams import parse as as_parser
from dino import api
from dino.config import ApiActions
from dino.utils import b64d
class ApiJoinTest(BaseTest):
def setUp(self):
super(ApiJoinTest, self).setUp()
self.create_channel_and_room()
def test_join_non_owner_no_acl(self):
self.assert_join_succeeds()
def test_join_owner_no_acl(self):
self.set_owner()
self.assert_join_succeeds()
def test_join_non_owner_correct_country(self):
self.remove_owner_channel()
self.remove_owner()
self.set_acl({ApiActions.JOIN: {'country': 'de,cn,dk'}})
self.assert_join_succeeds()
def test_join_non_owner_with_all_acls(self):
self.remove_owner_channel()
self.remove_owner()
self.set_acl({ApiActions.JOIN: {
'country': 'de,cn,dk',
'city': 'Beijing,Shanghai,Berlin,Copenhagen',
'age': '18:45',
'gender': 'm,f',
'membership': '0,1',
'has_webcam': 'y',
'fake_checked': 'y,n',
'image': 'y'
}})
self.assert_join_succeeds()
def test_join_owner_with_all_acls(self):
self.set_owner()
self.set_acl({ApiActions.JOIN: {
'country': 'de,cn,dk',
'city': 'Beijing,Shanghai,Berlin,Copenhagen',
'age': '18:45',
'gender': 'm,f',
'membership': '0,1',
'has_webcam': 'y',
'fake_checked': 'y,n',
'image': 'n'
}})
self.assert_join_succeeds()
def test_join_returns_activity_with_4_attachments(self):
act = self.activity_for_join()
response = api.on_join(act, as_parser(act))
self.assertEqual(4, len(response[1]['object']['attachments']))
def test_join_returns_activity_with_acl_attachment(self):
act = self.activity_for_join()
response = api.on_join(act, as_parser(act))
attachments = response[1]['object']['attachments']
acls = self.get_attachment_for_key(attachments, 'acl')
self.assertIsNotNone(acls)
def test_join_returns_activity_with_history_attachment(self):
act = self.activity_for_join()
response = api.on_join(act, as_parser(act))
attachments = response[1]['object']['attachments']
history = self.get_attachment_for_key(attachments, 'history')
self.assertIsNotNone(history)
def test_join_returns_activity_with_owner_attachment(self):
act = self.activity_for_join()
response = api.on_join(act, as_parser(act))
attachments = response[1]['object']['attachments']
owners = self.get_attachment_for_key(attachments, 'owner')
self.assertIsNotNone(owners)
def test_join_returns_activity_with_users_attachment(self):
act = self.activity_for_join()
response = api.on_join(act, as_parser(act))
attachments = response[1]['object']['attachments']
users = self.get_attachment_for_key(attachments, 'user')
self.assertIsNotNone(users)
def test_join_returns_activity_with_empty_acl_attachment(self):
act = self.activity_for_join()
response = api.on_join(act, as_parser(act))
attachments = response[1]['object']['attachments']
self.assert_attachment_equals(attachments, 'acl', [])
def test_join_returns_activity_with_empty_history_attachment(self):
act = self.activity_for_join()
response = api.on_join(act, as_parser(act))
attachments = response[1]['object']['attachments']
self.assert_attachment_equals(attachments, 'history', [])
def test_join_returns_activity_with_empty_owner_attachment(self):
act = self.activity_for_join()
response = api.on_join(act, as_parser(act))
attachments = response[1]['object']['attachments']
self.assert_attachment_equals(attachments, 'owner', [])
def test_join_returns_activity_with_one_user_as_attachment(self):
third_user_id = "9876"
self.env.db.set_user_name(third_user_id, third_user_id)
act = self.activity_for_join()
response = api.on_join(act, as_parser(act))
attachments = response[1]['object']['attachments']
users = self.get_attachment_for_key(attachments, 'user')
self.assertEqual(0, len(users))
act = self.activity_for_join()
act['actor']['id'] = third_user_id
response = api.on_join(act, as_parser(act))
attachments = response[1]['object']['attachments']
users = self.get_attachment_for_key(attachments, 'user')
self.assertEqual(1, len(users))
def test_join_returns_activity_with_one_owner(self):
self.set_owner()
act = self.activity_for_join()
response = api.on_join(act, as_parser(act))
attachments = response[1]['object']['attachments']
owners = self.get_attachment_for_key(attachments, 'owner')
self.assertEqual(1, len(owners))
def test_join_returns_activity_with_correct_owner(self):
self.set_owner()
act = self.activity_for_join()
response = api.on_join(act, as_parser(act))
attachments = response[1]['object']['attachments']
owners = self.get_attachment_for_key(attachments, 'owner')
user_id, user_name = owners[0]['id'], owners[0]['displayName']
self.assertEqual(ApiJoinTest.USER_ID, user_id)
self.assertEqual(ApiJoinTest.USER_NAME, b64d(user_name))
def test_join_returns_correct_nr_of_acls(self):
correct_acls = {ApiActions.JOIN: {'country': 'de,cn,dk', 'city': 'Shanghai,Berlin,Copenhagen'}}
self.set_acl(correct_acls)
self.set_owner()
act = self.activity_for_join()
response = api.on_join(act, as_parser(act))
attachments = response[1]['object']['attachments']
returned_acls = self.get_attachment_for_key(attachments, 'acl')
self.assertEqual(len(correct_acls.get(ApiActions.JOIN)), len(returned_acls))
def test_join_returns_correct_acls(self):
correct_acls = {ApiActions.JOIN: {'country': 'de,cn,dk', 'city': 'Shanghai,Berlin,Copenhagen'}}
self.set_acl(correct_acls)
self.set_owner()
act = self.activity_for_join()
response = api.on_join(act, as_parser(act))
attachments = response[1]['object']['attachments']
returned_acls = self.get_attachment_for_key(attachments, 'acl')
for acl in returned_acls:
acl_key = acl['objectType']
acl_value = acl['content']
self.assertTrue(acl_key in correct_acls.get(ApiActions.JOIN))
self.assertEqual(correct_acls.get(ApiActions.JOIN)[acl_key], acl_value)
def test_join_returns_history(self):
msg = 'this is a test message'
self.set_owner()
self.assert_join_succeeds()
self.send_message(msg)
self.assert_in_room(True)
self.leave_room()
self.assert_in_room(False)
act = self.activity_for_join()
response = api.on_join(act, as_parser(act))
attachments = response[1]['object']['attachments']
returned_history = self.get_attachment_for_key(attachments, 'history')
self.assertEqual(1, len(returned_history))
def test_join_returns_correct_history(self):
msg = 'this is a test message'
self.set_owner()
self.assert_join_succeeds()
msg_response = self.send_message(msg)[1]
self.leave_room()
act = self.activity_for_join()
response = api.on_join(act, as_parser(act))
attachments = response[1]['object']['attachments']
from pprint import pprint
pprint(self.get_attachment_for_key(attachments, 'history'))
all_history = self.get_attachment_for_key(attachments, 'history')
self.assertEqual(1, len(all_history))
history_obj = all_history[0]
self.assertEqual(msg_response['id'], history_obj['id'])
self.assertEqual(msg, b64d(history_obj['content']))
self.assertEqual(msg_response['published'], history_obj['published'])
self.assertEqual(ApiJoinTest.USER_NAME, b64d(history_obj['author']['displayName']))
def assert_attachment_equals(self, attachments, key, value):
found = self.get_attachment_for_key(attachments, key)
self.assertEqual(value, found)
def get_attachment_for_key(self, attachments, key):
for attachment in attachments:
if attachment['objectType'] == key:
return attachment['attachments']
return None
| apache-2.0 | 8,859,849,382,184,028,000 | 39.892857 | 103 | 0.628821 | false | 3.645046 | true | false | false |
erigones/esdc-ce | api/exceptions.py | 1 | 9305 | """
Some parts are copied from rest_framework.exceptions, which is licensed under the BSD license:
*******************************************************************************
Copyright (c) 2011-2016, Tom Christie
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************
Handled exceptions raised by REST framework.
In addition Django's built in 403 and 404 exceptions are handled.
(`django.http.Http404` and `django.core.exceptions.PermissionDenied`)
"""
from __future__ import unicode_literals
import math
from django.utils.encoding import force_text
from django.utils.translation import ungettext, ugettext_lazy as _
from django.db import (
DatabaseError,
OperationalError as DatabaseOperationalError,
InterfaceError as DatabaseInterfaceError,
)
from redis.exceptions import (
TimeoutError as RedisTimeoutError,
ConnectionError as RedisConnectionError,
)
from kombu.exceptions import (
TimeoutError as RabbitTimeoutError,
ConnectionError as RabbitConnectionError,
)
from api import status
# List of operational errors that affect the application in a serious manner
# (e.g. callback tasks that fail because of this must be retried)
OPERATIONAL_ERRORS = (
DatabaseOperationalError,
DatabaseInterfaceError,
RabbitConnectionError,
RabbitTimeoutError,
RedisTimeoutError,
RedisConnectionError,
)
class APIException(Exception):
"""
Base class for REST framework exceptions.
Subclasses should provide `.status_code` and `.default_detail` properties.
"""
status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
default_detail = _('A server error occurred.')
def __init__(self, detail=None):
if detail is None:
self.detail = force_text(self.default_detail)
else:
self.detail = force_text(detail)
def __str__(self):
return self.detail
class TransactionError(DatabaseError):
"""Use this to break atomic transactions"""
pass
class ObjectAPIException(APIException):
"""Inject object's name or model's verbose name into detail"""
default_object_name = _('Object')
default_model = None
def __init__(self, detail=None, object_name=None, model=None, task_id=None):
super(ObjectAPIException, self).__init__(detail=detail)
self.task_id = task_id
if not object_name:
model = model or self.default_model
if model:
# noinspection PyProtectedMember
object_name = model._meta.verbose_name_raw
else:
object_name = self.default_object_name
self.detail = self.detail.format(object=object_name)
class BadRequest(APIException):
status_code = status.HTTP_400_BAD_REQUEST
default_detail = _('Bad request')
class ParseError(APIException):
status_code = status.HTTP_400_BAD_REQUEST
default_detail = _('Malformed request')
class AuthenticationFailed(APIException):
status_code = status.HTTP_401_UNAUTHORIZED
default_detail = _('Incorrect authentication credentials.')
class NotAuthenticated(APIException):
status_code = status.HTTP_401_UNAUTHORIZED
default_detail = _('Authentication credentials were not provided.')
class PermissionDenied(APIException):
status_code = status.HTTP_403_FORBIDDEN
default_detail = _('You do not have permission to perform this action.')
class NotFound(APIException):
status_code = status.HTTP_404_NOT_FOUND
default_detail = _('Not found')
class MethodNotAllowed(APIException):
status_code = status.HTTP_405_METHOD_NOT_ALLOWED
default_detail = _('Method "{method}" not allowed.')
def __init__(self, method, detail=None):
if detail is None:
self.detail = force_text(self.default_detail).format(method=method)
else:
self.detail = force_text(detail)
class NotAcceptable(APIException):
status_code = status.HTTP_406_NOT_ACCEPTABLE
default_detail = _('Could not satisfy the request Accept header.')
def __init__(self, detail=None, available_renderers=None):
if detail is None:
self.detail = force_text(self.default_detail)
else:
self.detail = force_text(detail)
self.available_renderers = available_renderers
class ObjectNotFound(ObjectAPIException):
status_code = status.HTTP_404_NOT_FOUND
default_detail = _('{object} not found')
class ObjectAlreadyExists(ObjectAPIException):
status_code = status.HTTP_406_NOT_ACCEPTABLE
default_detail = _('{object} already exists')
class ObjectOutOfRange(ObjectAPIException):
status_code = status.HTTP_406_NOT_ACCEPTABLE
default_detail = _('{object} out of range')
class ItemNotFound(ObjectNotFound):
default_object_name = _('Item')
class ItemAlreadyExists(ObjectAlreadyExists):
default_object_name = _('Item')
class ItemOutOfRange(ObjectOutOfRange):
default_object_name = _('Item')
class InvalidInput(APIException):
status_code = status.HTTP_412_PRECONDITION_FAILED
default_detail = _('Invalid input')
class UnsupportedMediaType(APIException):
status_code = status.HTTP_415_UNSUPPORTED_MEDIA_TYPE
default_detail = _('Unsupported media type "{media_type}" in request.')
def __init__(self, media_type, detail=None):
if detail is None:
self.detail = force_text(self.default_detail).format(media_type=media_type)
else:
self.detail = force_text(detail)
class NodeIsNotOperational(APIException):
status_code = status.HTTP_423_LOCKED
default_detail = _('Node is not operational')
class VmIsNotOperational(APIException):
status_code = status.HTTP_423_LOCKED
default_detail = _('VM is not operational')
class VmIsLocked(APIException):
status_code = status.HTTP_423_LOCKED
default_detail = _('VM is locked or has slave VMs')
class TaskIsAlreadyRunning(APIException):
status_code = status.HTTP_423_LOCKED
default_detail = _('Task is already running')
class NodeHasPendingTasks(APIException):
status_code = status.HTTP_409_CONFLICT
default_detail = _('Node has pending tasks')
class VmHasPendingTasks(APIException):
status_code = status.HTTP_409_CONFLICT
default_detail = _('VM has pending tasks')
class ExpectationFailed(APIException):
status_code = status.HTTP_417_EXPECTATION_FAILED
default_detail = _('Expectation Failed')
class PreconditionRequired(APIException):
status_code = status.HTTP_428_PRECONDITION_REQUIRED
default_detail = _('Precondition Required')
class FailedDependency(APIException):
status_code = status.HTTP_424_FAILED_DEPENDENCY
default_detail = _('Failed Dependency')
class Throttled(APIException):
status_code = status.HTTP_429_TOO_MANY_REQUESTS
default_detail = _('Request was throttled.')
extra_detail_singular = 'Expected available in {wait} second.'
extra_detail_plural = 'Expected available in {wait} seconds.'
def __init__(self, wait=None, detail=None):
if detail is None:
self.detail = force_text(self.default_detail)
else:
self.detail = force_text(detail)
if wait is None:
self.wait = None
else:
self.wait = math.ceil(wait)
self.detail += ' ' + force_text(ungettext(
self.extra_detail_singular.format(wait=self.wait),
self.extra_detail_plural.format(wait=self.wait),
self.wait
))
class APIError(APIException):
status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
default_detail = _('Internal Server Error')
class OperationNotSupported(APIException):
status_code = status.HTTP_501_NOT_IMPLEMENTED
default_detail = _('Operation not supported')
class ServiceUnavailable(APIException):
status_code = status.HTTP_503_SERVICE_UNAVAILABLE
default_detail = _('Service Unavailable')
class GatewayTimeout(APIException):
status_code = status.HTTP_504_GATEWAY_TIMEOUT
default_detail = _('Gateway Timeout')
| apache-2.0 | -4,071,679,858,600,380,000 | 30.975945 | 94 | 0.700913 | false | 4.244982 | false | false | false |
AmitMY/pose-format | pose_format/tensorflow/representation/point_line_distance.py | 1 | 1115 | import tensorflow as tf
from .distance import DistanceRepresentation
class PointLineDistanceRepresentation:
def __init__(self):
self.distance = DistanceRepresentation()
def __call__(self, p1s: tf.Tensor, p2s: tf.Tensor, p3s: tf.Tensor) -> tf.Tensor:
"""
Distance between the point p1s to the line <p2s, p3s>
:param p1s: tf.Tensor (Points, Batch, Len, Dims)
:param p2s: tf.Tensor (Points, Batch, Len, Dims)
:param p3s: tf.Tensor (Points, Batch, Len, Dims)
:return: tf.Tensor (Points, Batch, Len)
"""
# Following Heron's Formula https://en.wikipedia.org/wiki/Heron%27s_formula
a = self.distance.distance(p1s, p2s)
b = self.distance.distance(p2s, p3s)
c = self.distance.distance(p1s, p3s)
s: tf.Tensor = (a + b + c) / 2
squared = s * (s - a) * (s - b) * (s - c)
area = tf.sqrt(squared)
# Calc "height" of the triangle
square_area: tf.Tensor = area * 2
distance = tf.math.divide_no_nan(square_area, b)
# TODO add .zero_filled()
return distance
| mit | -6,572,269,213,718,925,000 | 33.84375 | 84 | 0.593722 | false | 3.204023 | false | false | false |
stefantkeller/VECSELsetup | exp/eval/reflectivity.py | 1 | 3069 | #! /usr/bin/python2.7
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import errorvalues as ev # github.com/stefantkeller/errorvalues
from VECSELsetup.eval.varycolor import varycolor
from VECSELsetup.eval.gen_functions import extract, lut_from_calibfolder, lut_interp_from_calibfolder
def main():
logfile = '20150204_sample21-1-d6/spot333um.csv'
calib_folder = '20150204_calib_333um_s21-1-d6'
# calibration
#pump_lut, refl_lut, emis_lut = lut_from_calibfolder(calib_folder)
emis_lut = lut_from_calibfolder(calib_folder,identifiers=['Laser'],ignore_error=False) # emission has constant value solely due to BS, no ND in front of detector etc.
pump_lut, refl_lut = lut_interp_from_calibfolder(calib_folder,identifiers=['Pump','Refl'])
#------------------------------------
# load measurement
current_set, current, pump, refl, laser, meantemp = extract(logfile, identifiers=['Current','Pump','Refl','Laser','Temperature'])
Temperatures = sorted(current_set.keys())
absorbed, reflected, emitted, pumped, dissipated, relref = {}, {}, {}, {}, {}, {}
for T in Temperatures:
reflected[T] = refl_lut(refl[T])
pumped[T] = pump_lut(pump[T])
absorbed[T] = pumped[T] - reflected[T]
emitted[T] = emis_lut(laser[T])
dissipated[T] = absorbed[T] - emitted[T]
relref[T] = reflected[T]/pumped[T]*100
cols = varycolor(3*len(Temperatures))
cnt = 0
#plt.subplot(1,2,1)
baserefl = ev.errvallist()
for T in Temperatures:
# plot
pstart, pend = 1, 9 # W pumped
istart, iend = np.sum([pumped[T].v()<pstart]), np.sum([pumped[T].v()<pend])
baserefl.append(ev.wmean(relref[T][istart:iend]))
xplot = current
xlabel = 'Pump current (A)'
plt.errorbar(xplot[T].v(),relref[T].v(),
xerr=xplot[T].e(),yerr=relref[T].e(),
c=cols[cnt],linestyle=' ',
label='$({0})^\circ$C'.format(meantemp[T].round(2)))
plt.plot(xplot[T][istart:iend].v(), (iend-istart)*[baserefl[-1].v()],color='k')
cnt+=3
plt.xlabel(xlabel)
plt.ylabel('Reflectivity (%)')
#plt.xlim([0, 20])
reflylim = [25, 70]
plt.ylim(reflylim)
plt.legend(loc='best',prop={'size':12},labelspacing=-0.4)
plt.grid('on')
plt.show()
##
#plt.subplot(1,2,2)
templist = [meantemp[T] for T in Temperatures]
Temp = ev.errvallist(templist)
q,m = ev.linreg(Temp.v(),baserefl.v(),baserefl.e())
plt.errorbar(Temp.v(),baserefl.v(),
xerr=Temp.e(),yerr=baserefl.e(),
color='r',linestyle=' ')
plt.plot(Temp.v(),q.v()+Temp.v()*m.v(),'k')
plt.text((Temp[0].v()+Temp[1].v())/2.0,baserefl[0].v()+2,
r'$({})+({})T_{{hs}}$'.format(q.round(2),m.round(2)))
plt.ylim(reflylim)
plt.xlabel('Heat sink temperature ($^\circ$C)')
plt.ylabel('Reflectivity (%)')
plt.grid('on')
##
plt.show()
if __name__ == "__main__":
main()
| mit | 8,243,186,852,352,914,000 | 29.69 | 170 | 0.580319 | false | 2.9173 | false | false | false |
ColumBrennan/data-dumper | app.py | 1 | 1987 | #!/usr/bin/python3
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import psycopg2
import xlsxwriter
import os
import sys
import smtplib
#usage python3 colum.py filename toaddress
SQL_Code = open(str(sys.argv[3]), 'r').read()
#Connecting to PostgreSQL
def main():
conn_string = "host='db' dbname='directski' user='pgsql' password=''"
print ("Connecting to database\n ->%s" % (conn_string))
conn = psycopg2.connect(conn_string)
cursor = conn.cursor()
print ("Connected!\n")
cursor.execute(SQL_Code)
filename = str(sys.argv[1]).replace(" ", "_").lower()
workbook = xlsxwriter.Workbook(filename + ".xlsx", {'remove_timezone': True})
worksheet = workbook.add_worksheet()
data = cursor.fetchall()
# Headers
for colidx,heading in enumerate(cursor.description):
worksheet.write(0, colidx, heading[0])
# Writing the Rows
for rowid, row in enumerate(data):
for colid, col in enumerate(row):
worksheet.write(rowid+1, colid, col)
# Saving
workbook.close()
fromaddr = "[email protected]"
toaddr = str(sys.argv[2])
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = str(sys.argv[1])
body = ""
msg.attach(MIMEText(body, 'plain'))
attachment = open(filename + ".xlsx", "rb")
part = MIMEBase('application', 'octet-stream')
part.set_payload((attachment).read())
encoders.encode_base64(part)
part.add_header('Content-Disposition', "attachment; filename= %s" % filename + ".xlsx")
msg.attach(part)
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(fromaddr, "temp123")
text = msg.as_string()
server.sendmail(fromaddr, toaddr, text)
server.quit()
if __name__ == "__main__":
main()
| gpl-3.0 | -8,380,741,031,090,985,000 | 25.219178 | 91 | 0.625566 | false | 3.437716 | false | false | false |
kovidgoyal/kitty | kitty/fonts/core_text.py | 1 | 4126 | #!/usr/bin/env python3
# vim:fileencoding=utf-8
# License: GPL v3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
import re
from typing import Dict, Generator, Iterable, List, Optional, Tuple
from kitty.fast_data_types import coretext_all_fonts
from kitty.fonts import FontFeature
from kitty.options.types import Options
from kitty.typing import CoreTextFont
from kitty.utils import log_error
from . import ListedFont
attr_map = {(False, False): 'font_family',
(True, False): 'bold_font',
(False, True): 'italic_font',
(True, True): 'bold_italic_font'}
FontMap = Dict[str, Dict[str, List[CoreTextFont]]]
def create_font_map(all_fonts: Iterable[CoreTextFont]) -> FontMap:
ans: FontMap = {'family_map': {}, 'ps_map': {}, 'full_map': {}}
for x in all_fonts:
f = (x['family'] or '').lower()
s = (x['style'] or '').lower()
ps = (x['postscript_name'] or '').lower()
ans['family_map'].setdefault(f, []).append(x)
ans['ps_map'].setdefault(ps, []).append(x)
ans['full_map'].setdefault(f + ' ' + s, []).append(x)
return ans
def all_fonts_map() -> FontMap:
ans: Optional[FontMap] = getattr(all_fonts_map, 'ans', None)
if ans is None:
ans = create_font_map(coretext_all_fonts())
setattr(all_fonts_map, 'ans', ans)
return ans
def list_fonts() -> Generator[ListedFont, None, None]:
for fd in coretext_all_fonts():
f = fd['family']
if f:
fn = (f + ' ' + (fd['style'] or '')).strip()
is_mono = bool(fd['monospace'])
yield {'family': f, 'full_name': fn, 'postscript_name': fd['postscript_name'] or '', 'is_monospace': is_mono}
def find_font_features(postscript_name: str) -> Tuple[FontFeature, ...]:
"""Not Implemented"""
return ()
def find_best_match(family: str, bold: bool = False, italic: bool = False) -> CoreTextFont:
q = re.sub(r'\s+', ' ', family.lower())
font_map = all_fonts_map()
def score(candidate: CoreTextFont) -> Tuple[int, int, int, float]:
style_match = 1 if candidate['bold'] == bold and candidate[
'italic'
] == italic else 0
monospace_match = 1 if candidate['monospace'] else 0
is_regular_width = not candidate['expanded'] and not candidate['condensed']
# prefer demi-bold to bold to heavy, less bold means less chance of
# overflow
weight_distance_from_medium = abs(candidate['weight'])
return style_match, monospace_match, 1 if is_regular_width else 0, 1 - weight_distance_from_medium
# First look for an exact match
for selector in ('ps_map', 'full_map'):
candidates = font_map[selector].get(q)
if candidates:
return sorted(candidates, key=score)[-1]
# Let CoreText choose the font if the family exists, otherwise
# fallback to Menlo
if q not in font_map['family_map']:
log_error('The font {} was not found, falling back to Menlo'.format(family))
q = 'menlo'
candidates = font_map['family_map'][q]
return sorted(candidates, key=score)[-1]
def resolve_family(f: str, main_family: str, bold: bool = False, italic: bool = False) -> str:
if (bold or italic) and f == 'auto':
f = main_family
if f.lower() == 'monospace':
f = 'Menlo'
return f
def get_font_files(opts: Options) -> Dict[str, CoreTextFont]:
ans: Dict[str, CoreTextFont] = {}
for (bold, italic), attr in attr_map.items():
face = find_best_match(resolve_family(getattr(opts, attr), opts.font_family, bold, italic), bold, italic)
key = {(False, False): 'medium',
(True, False): 'bold',
(False, True): 'italic',
(True, True): 'bi'}[(bold, italic)]
ans[key] = face
if key == 'medium':
setattr(get_font_files, 'medium_family', face['family'])
return ans
def font_for_family(family: str) -> Tuple[CoreTextFont, bool, bool]:
ans = find_best_match(resolve_family(family, getattr(get_font_files, 'medium_family')))
return ans, ans['bold'], ans['italic']
| gpl-3.0 | 1,779,891,089,951,357,400 | 35.513274 | 121 | 0.606398 | false | 3.381967 | false | false | false |
pymir3/pymir3 | scripts/ismir2016/resultados/results.py | 1 | 3424 | import os
import glob
import sys
import numpy as np
import matplotlib.pyplot as plt
if __name__ == "__main__":
dirs = [ name for name in os.listdir(".") if os.path.isdir(os.path.join(".", name)) ]
files = []
for d in sorted(dirs):
if d == "graphs":
continue
p = "." + "/" + d
files = (sorted(glob.glob(p + "/*.csv")))
csvs = dict()
dataset = p.split("./")[1]
dataset_results = open(dataset + "_results.csv", 'w')
for i in files:
csv = np.genfromtxt(i, dtype='string' , delimiter=',')
pipeline = i.split("/")[2].split("_")[1:]
pipeline = (''.join(str(elem) + "_" for elem in pipeline)).replace(".csv_", "")
csvs[pipeline] = csv
stats = csv[0][:]
pipelines = csv[1:,[0]]
for i in range(1,len(stats)):
if stats[i] == 'sorted_features':
print dataset
for j in sorted(csvs.keys()):
print "\t" + j
p = -1
for l in (csvs[j])[1:,[i]]:
data = str(l[0])
data = data.replace("[","")
data = data.replace("]","")
data = data.replace("/","")
data = data.replace("'","")
p+=1
if data[0] == 'P' or data[0] == 'n':
continue
# print "\t\t", pipelines[p][0]
# print "\t\t\t", data.split(" ")
graph = open( "graphs/" + dataset + "_" + j + "_anova_frequencies.txt", "w")
feat_seq = 0;
bf = []
for feat in data.split(" "):
if feat.find("MFCC") != -1:
continue
else:
s = feat.split("_")
if len(s) <=4:
continue
graph.write(s[0][0] + "_" + s[2][0] + "_" + s[3] + ", " + s[3] + ", " + str(feat_seq) + ", " + s[-1] + "\n" )
bf.append(s)
feat_seq+=1
graph.close()
# feats = [s[0][0] + "_" + s[2][0] + "_" + s[3] for s in bf]
# freqs = [max(5,int(s[-1])) for s in bf]
# feat_seqs = [fs for fs in range(len(bf))]
#
# print feats
# print freqs
#
# f = plt.figure(0)
# lefts = np.arange(len(bf))
# plt.bar(lefts, freqs, width=0.5)
# plt.show()
continue
dataset_results.write("\n" + stats[i] + "\n\n")
dataset_results.write("configuration,")
for k in csv[1:,[0]]:
dataset_results.write(k[0] + ",")
dataset_results.write("\n")
for j in sorted(csvs.keys()):
dataset_results.write(j + ",")
for l in (csvs[j])[1:,[i]]:
dataset_results.write(str(l[0]) + ",")
dataset_results.write("\n")
dataset_results.close()
| mit | 5,321,663,387,969,981,000 | 36.626374 | 141 | 0.350175 | false | 4.190942 | false | false | false |
WaveBlocks/WaveBlocksND | WaveBlocksND/TimeManager.py | 1 | 9570 | """The WaveBlocks Project
Provides several computation routines for
handling time and timesteps.
@author: R. Bourquin
@copyright: Copyright (C) 2010, 2011, 2012, 2013, 2015, 2016 R. Bourquin
@license: Modified BSD License
"""
from scipy import floor
__all__ = ["TimeManager"]
class TimeManager(object):
r"""This class performs several computations with time, timesteps and so forth.
The important quantities here are:
============ ============== ======================================================
Quantity Parameter Name Description
============ ============== ======================================================
:math:`T` T the fixed simulation end time
:math:`\tau` dt the size of the timestep
:math:`N` nsteps the overall number of timesteps.
:math:`t` an unspecified time in the interval :math:`[0, T]`
:math:`n` an unspecified timestep in the interval :math:`[0, N]`
============ ============== ======================================================
The important relations that hold are :math:`T = N \tau` and
in analogy :math:`t = n \tau`. There are also conversion routines
for :math:`t` and :math:`n`.
The simulation parameters handed over to the constructor must contain at least
two out of the three values :math:`T`, :math:`\tau` and :math:`N`. If all three
are given, the user is responsible for compatible values.
Additionally the class contains some routines for determining
if and when some events (for example saving data) should occur.
"""
def __init__(self, parameters):
if parameters is None:
parameters = {}
# We need two out of three: T, dt and nsteps
have_enough = 0
if "T" in parameters:
self._T = float(parameters["T"])
have_enough += 1
else:
self._T = None
if "dt" in parameters:
self._dt = float(parameters["dt"])
have_enough += 1
else:
self._dt = None
if "nsteps" in parameters:
self._nsteps = int(parameters["nsteps"])
have_enough += 1
else:
self._nsteps = None
if have_enough < 2:
raise KeyError("Parameters provide to little data to construct a 'TimeManager'.")
if self._T is None:
self._T = self.compute_endtime()
if self._dt is None:
self._dt = self.compute_timestep_size()
if self._nsteps is None:
self._nsteps = self.compute_number_timesteps()
# Interval for regular events
self._interval = 1
if "write_nth" in parameters:
self.set_interval(int(parameters["write_nth"]))
# List of timesteps of irregular events
self._eventtimes = []
if "save_at" in parameters:
self.add_to_eventlist(parameters["save_at"])
def __str__(self):
s = "TimeManager configured with:\n"
s += " Final time T: "+str(self._T)+"\n"
s += " Timestep size dt: "+str(self._dt)+"\n"
s += " Number of steps : "+str(self._nsteps)+"\n"
return s
def set_T(self, T):
r"""Set the simulation endtime :math:`T`.
:param T: The simulation end time.
"""
self._T = float(T)
def set_dt(self, dt):
r"""Set the simulation timestep size :math:`\tau`.
:param dt: The simulation timestep size.
"""
self._dt = float(dt)
def set_nsteps(self, nsteps):
r"""Set the number of timesteps the simulation runs.
:param nsteps: The number :math:`n` timesteps we do.
"""
self._nsteps = int(nsteps)
def get_T(self):
r"""Set the simulation endtime :math:`T`.
:returns: The endtime :math:`T`.
"""
return self._T
def get_dt(self):
r"""Get the simulation timestep size :math:`\tau`.
:returns: The timestep :math:`\tau`.
"""
return self._dt
def get_nsteps(self):
r"""Get the number :math:`n` of timesteps the simulation runs.
:returns: the number :math:`n` of timesteps.
"""
return self._nsteps
def compute_endtime(self):
r"""Computes the simulation endtime :math:`T`.
:returns: The endtime :math:`T`.
"""
if self._T is not None:
return self._T
else:
return float(self._nsteps * self._dt)
def compute_timestep_size(self):
r"""Computes the simulation timestep size :math:`\tau`.
:returns: The timestep :math:`\tau`.
"""
if self._dt is not None:
return self._dt
else:
return self._T / float(self._nsteps)
def compute_number_timesteps(self):
r"""Computes the number :math:`n` of time steps we will perform.
:returns: the number :math:`n` of timesteps.
"""
if self._nsteps is not None:
return self._nsteps
else:
return int(floor(self._T / float(self._dt)))
def compute_timestep(self, t):
r"""Compute the timestep :math:`n` from a time :math:`t` such that
:math:`t = n \tau` holds.
:param t: The time t of which we want to find the timestep number.
:returns: The corresponding timestep :math:`n`.
Note that the user has to ensure that time :math:`t` is an integral
multiple of :math:`\tau`.
"""
stepo = t / self._dt
step = round(stepo)
if abs(stepo - step) > 1e-10:
print("Warning: Questionable rounding for timestep computation!")
return int(step)
def compute_time(self, n):
r"""Compute the time :math:`t` from a timestep :math:`n` such that
:math:`t = n \tau` holds.
:param n: The timestep n of which we want to find the corresponding time.
:returns: The corresponding time :math:`t`.
"""
return float(n * self._dt)
def set_interval(self, interval):
r"""Set the interval for regular events.
:param interval: The interval at which regular events get triggered.
Note that a value of ``0`` means there are no regular events.
"""
self._interval = int(interval)
def add_to_eventlist(self, alist):
r"""Add a list of times and/or timesteps to the list of
times when irregular events get triggered.
:param alist: A list with integers (interpreted as timesteps)
and/or floats (interpreted as times)
Note that the times and timesteps can be mixed and need not to be
given in monotone order.
"""
timesteps = []
# If the list is empty (global default), shortcut
if len(alist) == 0:
return
# Integers are interpreted as timesteps, floats are interpreted as times (and converted to timesteps)
for item in alist:
if type(item) == int:
timesteps.append(item)
elif type(item) == float:
timesteps.append(self.compute_timestep(item))
# Validate timesteps and check if n in [0,...,N]
tmp = len(timesteps)
timesteps = [i for i in timesteps if 0 <= i <= self._nsteps]
if tmp != len(timesteps):
print("Warning: Dropped %d timestep(s) due to invalidity!" % (tmp - len(timesteps)))
# Assure unique elements, just silently remove duplicates
oldlist = set(self._eventtimes)
newlist = set(timesteps)
times = list(oldlist.union(newlist))
# Sort in ascending order
times.sort()
# Write back
self._eventtimes = times
def compute_number_events(self):
r"""Compute the number of events we will perform during the simulation.
This can for example be used to determine how much space to allocate
in the output files if the events are times at which simulation data
is saved.
:returns: The number of events.
"""
# We do not save at regular intervals
if self._interval == 0:
# The number of saves resulting from saving at a regular interval is zero
n_si = 0
# Determine the number of saves resulting from the savelist
n_sl = len(self._eventtimes)
# We do save at regular intervals
else:
# Determine the number of saves resulting from saving at a regular interval
n_si = 1 + self._nsteps // self._interval
# Determine the number of saves resulting from the savelist and
# exclude the timesteps which coincide with the regular intervals
n_sl = len([i for i in self._eventtimes if i % self._interval != 0])
# Total number of saves we will perform is given by the sum
number_events = n_si + n_sl
return number_events
def is_event(self, n):
r"""Determine if an event occurs right now.
:param n: The current timestep in question.
:returns: ``True`` or ``False``.
"""
if self._interval == 1:
# Save every timestep
return True
elif self._interval != 0 and n % self._interval == 0:
# Save every k-th timestep specified by the interval
return True
elif n in self._eventtimes:
# Save if the n is in the list of timesteps
return True
return False
| bsd-3-clause | 5,725,022,761,244,994,000 | 30.584158 | 109 | 0.560606 | false | 4.212148 | false | false | false |
Jumpscale/jumpscale6_core | lib/JumpScale/baselib/admin2/Node.py | 1 | 9598 | from JumpScale import j
import JumpScale.baselib.remote
import sys
# import importlib
import imp
try:
import ujson as json
except:
import json
import JumpScale.baselib.redis
import copy
import time
import JumpScale.baselib.webdis
from fabric.api import hide
import time
redis=j.clients.redis.getRedisClient("127.0.0.1", 9999)
class Node():
def __init__(self,name,args={}):
self.model=j.core.admin.hrd.getDictFromPrefix("node.%s"%name)
self.ssh=None
self.args=args
def executeCmds(self,cmds,die=True,insandbox=False):
scriptRun=self.getScriptRun()
out=scriptRun.out
for line in cmds.split("\n"):
if line.strip()<>"" and line[0]<>"#":
self.log("execcmd",line)
if insandbox:
line2="source /opt/jsbox/activate;%s"%line
else:
line2=line
try:
out+="%s\n"%self.ssh.run(line2)
except BaseException,e:
if die:
self.raiseError("execcmd","error execute:%s"%line,e)
def killProcess(self,filterstr,die=True):
found=self.getPids(filterstr)
for item in found:
self.log("killprocess","kill:%s"%item)
try:
self.ssh.run("kill -9 %s"%item)
except Exception,e:
if die:
self.raiseError("killprocess","kill:%s"%item,e)
def getPids(self,filterstr,die=True):
self.log("getpids","")
with hide('output'):
try:
out=self.ssh.run("ps ax")
except Exception,e:
if die:
self.raiseError("getpids","ps ax",e)
found=[]
for line in out.split("\n"):
if line.strip()<>"":
if line.find(filterstr)<>-1:
line=line.strip()
found.append(int(line.split(" ")[0]))
return found
def deployssh(self):
self.connectSSH()
keyloc="/root/.ssh/id_dsa.pub"
if not j.system.fs.exists(path=keyloc):
if j.console.askYesNo("do you want to generate new local ssh key, if you have one please put it there manually!"):
do=j.system.process.executeWithoutPipe
do("ssh-keygen -t dsa")
else:
j.application.stop()
key=j.system.fs.fileGetContents(keyloc)
self.ssh.ssh_authorize("root",key)
def jpackageStop(self,name,filterstr,die=True):
self.log("jpackagestop","%s (%s)"%(name,filterstr))
try:
self.ssh.run("source /opt/jsbox/activate;jpackage stop -n %s"%name)
except Exception,e:
if die:
self.raiseError("jpackagestop","%s"%name,e)
found=self.getPids(filterstr)
if len(found)>0:
for item in found:
try:
self.ssh.run("kill -9 %s"%item)
except:
pass
def jpackageStart(self,name,filterstr,nrtimes=1,retry=1):
found=self.getPids(filterstr)
self.log("jpackagestart","%s (%s)"%(name,filterstr))
for i in range(retry):
if len(found)==nrtimes:
return
scriptRun=self.getScriptRun()
try:
self.ssh.run("source /opt/jsbox/activate;jpackage start -n %s"%name)
except Exception,e:
if die:
self.raiseError("jpackagestart","%s"%name,e)
time.sleep(1)
found=self.getPids(filterstr)
if len(found)<nrtimes:
self.raiseError("jpackagestart","could not jpackageStart %s"%name)
def serviceStop(self,name,filterstr):
self.log("servicestop","%s (%s)"%(name,filterstr))
try:
self.ssh.run("sudo stop %s"%name)
except:
pass
found=self.getPids(filterstr)
scriptRun=self.getScriptRun()
if len(found)>0:
for item in found:
try:
self.ssh.run("kill -9 %s"%item)
except:
pass
found=self.getPids(filterstr)
if len(found)>0:
self.raiseError("servicestop","could not serviceStop %s"%name)
def serviceStart(self,name,filterstr,die=True):
self.log("servicestart","%s (%s)"%(name,filterstr))
found=self.getPids(filterstr)
if len(found)==0:
try:
self.ssh.run("sudo start %s"%name)
except:
pass
found=self.getPids(filterstr)
if len(found)==0 and die:
self.raiseError("servicestart","could not serviceStart %s"%name)
def serviceReStart(self,name,filterstr):
self.serviceStop(name,filterstr)
self.serviceStart(name,filterstr)
def raiseError(self,action,msg,e=None):
scriptRun=self.getScriptRun()
scriptRun.state="ERROR"
if e<>None:
msg="Stack:\n%s\nError:\n%s\n"%(j.errorconditionhandler.parsePythonErrorObject(e),e)
scriptRun.state="ERROR"
scriptRun.error+=msg
for line in msg.split("\n"):
toadd="%-10s: %s\n" % (action,line)
scriptRun.error+=toadd
print "**ERROR** %-10s:%s"%(self.name,toadd)
self.lastcheck=0
j.admin.setNode(self)
j.admin.setNode(self)
raise RuntimeError("**ERROR**")
def log(self,action,msg):
out=""
for line in msg.split("\n"):
toadd="%-10s: %s\n" % (action,line)
print "%-10s:%s"%(self.name,toadd)
out+=toadd
def setpasswd(self,passwd):
#this will make sure new password is set
self.log("setpasswd","")
cl=j.tools.expect.new("sh")
if self.args.seedpasswd=="":
self.args.seedpasswd=self.findpasswd()
try:
cl.login(remote=self.name,passwd=passwd,seedpasswd=None)
except Exception,e:
self.raiseError("setpasswd","Could not set root passwd.")
def findpasswd(self):
self.log("findpasswd","find passwd for superadmin")
cl=j.tools.expect.new("sh")
for passwd in j.admin.rootpasswds:
try:
pass
cl.login(remote=self.name,passwd=passwd,seedpasswd=None)
except Exception,e:
self.raiseError("findpasswd","could not login using:%s"%passwd,e)
continue
self.passwd=passwd
j.admin.setNode(self)
return "unknown"
def check(self):
j.base.time.getTimeEpoch()
def connectSSH(self):
ip=self.model["ip"]
port=self.model["port"]
passwd=self.model["passwd"]
self.ssh=j.remote.cuisine.connect(ip,port,passwd)
# if j.system.net.pingMachine(self.args.remote,1):
# self.ip=self.args.remote
# else:
# j.events.opserror_critical("Could not ping node:'%s'"% self.args.remote)
return self.ssh
def uploadFromCfgDir(self,ttype,dest,additionalArgs={}):
dest=j.dirs.replaceTxtDirVars(dest)
cfgdir=j.system.fs.joinPaths(self._basepath, "cfgs/%s/%s"%(j.admin.args.cfgname,ttype))
additionalArgs["hostname"]=self.name
cuapi=self.ssh
if j.system.fs.exists(path=cfgdir):
self.log("uploadcfg","upload from %s to %s"%(ttype,dest))
tmpcfgdir=j.system.fs.getTmpDirPath()
j.system.fs.copyDirTree(cfgdir,tmpcfgdir)
j.dirs.replaceFilesDirVars(tmpcfgdir)
j.application.config.applyOnDir(tmpcfgdir,additionalArgs=additionalArgs)
items=j.system.fs.listFilesInDir(tmpcfgdir,True)
done=[]
for item in items:
partpath=j.system.fs.pathRemoveDirPart(item,tmpcfgdir)
partpathdir=j.system.fs.getDirName(partpath).rstrip("/")
if partpathdir not in done:
cuapi.dir_ensure("%s/%s"%(dest,partpathdir), True)
done.append(partpathdir)
try:
cuapi.file_upload("%s/%s"%(dest,partpath),item)#,True,True)
except Exception,e:
j.system.fs.removeDirTree(tmpcfgdir)
self.raiseError("uploadcfg","could not upload file %s to %s"%(ttype,dest))
j.system.fs.removeDirTree(tmpcfgdir)
def upload(self,source,dest):
args=j.admin.args
if not j.system.fs.exists(path=source):
self.raiseError("upload","could not find path:%s"%source)
self.log("upload","upload %s to %s"%(source,dest))
# from IPython import embed
# print "DEBUG NOW implement upload in Admin" #@todo
# embed()
for item in items:
partpath=j.system.fs.pathRemoveDirPart(item,cfgdir)
partpathdir=j.system.fs.getDirName(partpath).rstrip("/")
if partpathdir not in done:
print cuapi.dir_ensure("%s/%s"%(dest,partpathdir), True)
done.append(partpathdir)
cuapi.file_upload("%s/%s"%(dest,partpath),item)#,True,True)
def __repr__(self):
roles=",".join(self.roles)
return ("%-10s %-10s %-50s %-15s %-10s %s"%(self.gridname,self.name,roles,self.ip,self.host,self.enabled))
__str__=__repr__
| bsd-2-clause | 7,284,220,591,263,518,000 | 35.218868 | 126 | 0.542196 | false | 3.738995 | false | false | false |
mapado/redis_stats | redis_stats/stats.py | 1 | 2601 | from pprint import pprint
import numpy
import random
import redis
import argparse
from sklearn import cluster
from sklearn import metrics
from sklearn.feature_extraction import DictVectorizer
from sklearn.metrics.pairwise import pairwise_distances
def vectorize_key(key):
return dict(enumerate(key.split(':')))
def unvectorize_key(key):
return ':'.join(key.values())
def clusterize_keys(keys_vector, dbname):
vectorizer = DictVectorizer()
X = vectorizer.fit_transform(keys_vector)
if dbname == 'kmeans':
db = cluster.KMeans(n_clusters=10)
else:
X = pairwise_distances(X, metric='cosine')
db = cluster.DBSCAN(min_samples=1)
print "Feature len: {}".format(len(vectorizer.get_feature_names()))
db.fit(X)
labels = db.labels_
nb_clusters = len(set(labels)) - (1 if -1 in labels else 0)
print 'Number of cluster found: {}'.format(nb_clusters)
return labels
parser = argparse.ArgumentParser(description="Configuration for redis stats")
parser.add_argument('-r', '--redis-host',
default='luke2.mapado.com', help='Redis hostname (default: localhost)')
parser.add_argument('-p', '--redis-port', type=int,
default=6379, help='Redis port (default: 6379)')
parser.add_argument('--max-keys', type=int,
default=None, help='Redis port (default: None)')
args = parser.parse_args()
print args
redis = redis.StrictRedis(host=args.redis_host, port=args.redis_port)
keys = redis.keys()
print "Keys OK: {}".format(len(keys))
keys_vector = [vectorize_key(key) for key in keys]
if args.max_keys:
random.shuffle(keys_vector)
keys_vector = keys_vector[:args.max_keys]
# X = pairwise_distances(X, metric='cosine')
# db = cluster.DBSCAN()
# import ipdb; ipdb.set_trace()
labels =clusterize_keys(keys_vector, 'kmeans')
groups = {}
keys_map = {}
for index, c in enumerate(labels):
if c == -1:
continue
key = unvectorize_key(keys_vector[index])
if not keys_map.get(c):
keys_map[c] = key
groups[key] = 1
else:
groups[keys_map[c]] += 1
pprint(groups)
second_keys = [vectorize_key(key) for key in groups.keys()]
labels = clusterize_keys(second_keys, 'dbscan')
out = {}
for index, c in enumerate(labels):
key = unvectorize_key(second_keys[index])
if not groups.get(c):
out[c] = {
'example': key,
'number': groups[key]
}
else:
out[c]['number'] += groups[key]
pprint(out)
#Y = vectorizer.fit_transform(second_keys)
#Y = pairwise_distances(Y, metric='cosine')
#dby = cluster.DBSCAN()
#dby.fit(Y)
#
| gpl-3.0 | 4,981,092,865,338,342,000 | 23.537736 | 79 | 0.658977 | false | 3.259398 | false | false | false |
vesellov/bitdust.devel | transport/udp/udp_interface.py | 1 | 14024 | #!/usr/bin/python
# udp_interface.py
#
#
# Copyright (C) 2008-2018 Veselin Penev, https://bitdust.io
#
# This file (udp_interface.py) is part of BitDust Software.
#
# BitDust is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BitDust Software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with BitDust Software. If not, see <http://www.gnu.org/licenses/>.
#
# Please contact us if you have any questions at [email protected]
#
#
#
#
"""
..
module:: udp_interface
"""
#------------------------------------------------------------------------------
from __future__ import absolute_import
import six
#------------------------------------------------------------------------------
import os
import sys
#------------------------------------------------------------------------------
try:
from twisted.internet import reactor
except:
sys.exit('Error initializing twisted.internet.reactor in udp_interface.py')
from twisted.web import xmlrpc
from twisted.internet.defer import Deferred, succeed, fail
#------------------------------------------------------------------------------
from logs import lg
from lib import nameurl
#------------------------------------------------------------------------------
_Debug = True
#------------------------------------------------------------------------------
_GateProxy = None
#------------------------------------------------------------------------------
def proxy():
global _GateProxy
return _GateProxy
#------------------------------------------------------------------------------
def idurl_to_id(idurl):
"""
"""
proto, host, port, filename = nameurl.UrlParse(idurl)
assert proto == 'http'
user_id = filename.replace('.xml', '') + '@' + host
if port and port not in ['80', 80, ]:
user_id += ':%s' % str(port)
return user_id
def id_to_idurl(user_id):
try:
filename, host = user_id.split('@')
filename += '.xml'
except:
return None
return 'http://%s/%s' % (host, filename)
#------------------------------------------------------------------------------
class GateInterface():
def init(self, xml_rpc_url_or_object):
"""
"""
global _GateProxy
if _Debug:
lg.out(4, 'udp_interface.init %s' % xml_rpc_url_or_object)
if isinstance(xml_rpc_url_or_object, six.string_types):
_GateProxy = xmlrpc.Proxy(xml_rpc_url_or_object, allowNone=True)
else:
_GateProxy = xml_rpc_url_or_object
_GateProxy.callRemote('transport_initialized', 'udp')
return True
def shutdown(self):
"""
"""
from transport.udp import udp_node
global _GateProxy
if _Debug:
lg.out(4, 'udp_interface.shutdown')
udp_node.Destroy()
if _GateProxy:
# del _GateProxy
_GateProxy = None
return succeed(True)
def connect(self, options):
"""
"""
from transport.udp import udp_node
if _Debug:
lg.out(8, 'udp_interface.connect %s' % str(options))
udp_node.A('go-online', options)
return True
def disconnect(self):
"""
"""
from transport.udp import udp_node
if _Debug:
lg.out(4, 'udp_interface.disconnect')
udp_node.A('go-offline')
return succeed(True)
def build_contacts(self, id_obj):
"""
"""
result = []
result.append(
'udp://%s@%s' %
(id_obj.getIDName().lower(),
id_obj.getIDHost()))
if _Debug:
lg.out(4, 'udp_interface.build_contacts : %s' % str(result))
return result
def verify_contacts(self, id_obj):
"""
"""
udp_contact = 'udp://%s@%s' % (id_obj.getIDName().lower(),
id_obj.getIDHost())
if id_obj.getContactIndex(contact=udp_contact) < 0:
if _Debug:
lg.out(
4,
'udp_interface.verify_contacts returning False: udp contact not found or changed')
return False
if _Debug:
lg.out(4, 'udp_interface.verify_contacts returning True')
return True
def send_file(self, remote_idurl, filename, host, description=''):
"""
"""
from transport.udp import udp_session
from transport.udp import udp_node
# lg.out(20, 'udp_interface.send_file %s %s %s' % (filename, host, description))
result_defer = Deferred()
# if udp_node.A().state not in ['LISTEN', 'DHT_READ',]:
# result_defer.callback(False)
# lg.out(4, 'udp_interface.send_file WARNING udp_node state is %s' % udp_node.A().state)
# return result_defer
active_sessions = udp_session.get_by_peer_id(host)
if active_sessions:
if description.startswith('Identity') or description.startswith('Ack'):
active_sessions[0].file_queue.insert_outbox_file(
filename, description, result_defer, keep_alive=True)
else:
active_sessions[0].file_queue.append_outbox_file(
filename, description, result_defer, keep_alive=True)
else:
udp_session.add_pending_outbox_file(
filename, host, description, result_defer, keep_alive=True)
udp_node.A('connect', host)
return result_defer
def send_file_single(self, remote_idurl, filename, host, description=''):
"""
"""
return self.send_file(self, remote_idurl, filename, host, description, keep_alive=False)
def send_keep_alive(self, host):
"""
"""
from transport.udp import udp_session
for sess in udp_session.sessions_by_peer_id().get(host, []):
sess.automat('send-keep-alive')
def connect_to_host(self, host=None, idurl=None):
"""
"""
from transport.udp import udp_node
if not host:
host = idurl_to_id(idurl)
if _Debug:
lg.out(12, 'udp_interface.connect %s' % host)
udp_node.A('connect', host)
def disconnect_from_host(self, host):
"""
"""
def cancel_outbox_file(self, host, filename):
"""
"""
from transport.udp import udp_session
ok = False
for sess in udp_session.sessions().values():
if sess.peer_id != host:
continue
i = 0
while i < len(sess.file_queue.outboxQueue):
fn, descr, result_defer, keep_alive = sess.file_queue.outboxQueue[i]
if fn == filename:
if _Debug:
lg.out(14, 'udp_interface.cancel_outbox_file removed %s in %s' % (os.path.basename(fn), sess))
sess.file_queue.outboxQueue.pop(i)
ok = True
else:
i += 1
udp_session.remove_pending_outbox_file(host, filename)
# for fn, descr, result_defer, single in sess.file_queue.outboxQueue:
# if fn == filename and sess.peer_id == host:
# lg.out(6, 'udp_interface.cancel_outbox_file host=%s want to close session' % host)
# sess.automat('shutdown')
# return True
return ok
def cancel_file_sending(self, transferID):
"""
"""
from transport.udp import udp_session
for sess in udp_session.sessions().values():
for out_file in sess.file_queue.outboxFiles.values():
if out_file.transfer_id and out_file.transfer_id == transferID:
out_file.cancel()
return True
return False
def cancel_file_receiving(self, transferID):
"""
"""
# at the moment for UDP transport we can not stop particular file transfer
# we can only close the whole session which is not we really want
# for sess in udp_session.sessions().values():
# for in_file in sess.file_queue.inboxFiles.values():
# if in_file.transfer_id and in_file.transfer_id == transferID:
# if _Debug:
# lg.out(6, 'udp_interface.cancel_file_receiving transferID=%s want to close session' % transferID)
# sess.automat('shutdown')
# return True
# return False
return False
def list_sessions(self):
"""
"""
from transport.udp import udp_session
return list(udp_session.sessions().values())
def list_streams(self, sorted_by_time=True):
"""
"""
from transport.udp import udp_stream
result = []
for stream in udp_stream.streams().values():
result.append(stream.consumer)
if sorted_by_time:
result.sort(key=lambda stream: stream.started)
return result
def find_session(self, host):
"""
"""
from transport.udp import udp_session
return udp_session.sessions_by_peer_id().get(host, [])
def find_stream(self, stream_id=None, transfer_id=None):
"""
"""
from transport.udp import udp_stream
for stream in udp_stream.streams().values():
if stream_id and stream_id == stream.consumer.stream_id:
return stream.consumer
if transfer_id and transfer_id == stream.consumer.transfer_id:
return stream.consumer
return None
#------------------------------------------------------------------------------
def proxy_errback(x):
if _Debug:
lg.out(6, 'udp_interface.proxy_errback ERROR %s' % x)
return None
#------------------------------------------------------------------------------
def interface_transport_initialized():
"""
"""
if proxy():
return proxy().callRemote('transport_initialized', 'udp').addErrback(proxy_errback)
lg.warn('transport_udp is not ready')
return fail(Exception('transport_udp is not ready')).addErrback(proxy_errback)
def interface_receiving_started(host, new_options={}):
"""
"""
if proxy():
return proxy().callRemote('receiving_started', 'udp', host, new_options).addErrback(proxy_errback)
lg.warn('transport_udp is not ready')
return fail(Exception('transport_udp is not ready')).addErrback(proxy_errback)
def interface_receiving_failed(error_code=None):
"""
"""
if proxy():
return proxy().callRemote('receiving_failed', 'udp', error_code).addErrback(proxy_errback)
lg.warn('transport_udp is not ready')
return fail(Exception('transport_udp is not ready')).addErrback(proxy_errback)
def interface_disconnected(result=None):
"""
"""
if proxy():
return proxy().callRemote('disconnected', 'udp', result).addErrback(proxy_errback)
lg.warn('transport_udp is not ready')
# return fail(Exception('transport_udp is not ready')).addErrback(proxy_errback)
return succeed(result)
def interface_register_file_sending(host, receiver_idurl, filename, size, description=''):
"""
"""
if proxy():
return proxy().callRemote(
'register_file_sending', 'udp', host, receiver_idurl, filename, size, description).addErrback(proxy_errback)
lg.warn('transport_udp is not ready')
return fail(Exception('transport_udp is not ready')).addErrback(proxy_errback)
def interface_register_file_receiving(host, sender_idurl, filename, size):
"""
"""
if proxy():
return proxy().callRemote(
'register_file_receiving', 'udp', host, sender_idurl, filename, size).addErrback(proxy_errback)
lg.warn('transport_udp is not ready')
return fail(Exception('transport_udp is not ready')).addErrback(proxy_errback)
def interface_unregister_file_sending(transfer_id, status, bytes_sent, error_message=None):
"""
"""
if proxy():
return proxy().callRemote(
'unregister_file_sending', transfer_id, status,
bytes_sent, error_message).addErrback(proxy_errback)
lg.warn('transport_udp is not ready')
return fail(Exception('transport_udp is not ready')).addErrback(proxy_errback)
def interface_unregister_file_receiving(transfer_id, status, bytes_received, error_message=None):
"""
"""
if proxy():
return proxy().callRemote(
'unregister_file_receiving', transfer_id, status,
bytes_received, error_message).addErrback(proxy_errback)
lg.warn('transport_udp is not ready')
return fail(Exception('transport_udp is not ready')).addErrback(proxy_errback)
def interface_cancelled_file_sending(host, filename, size, description=None, error_message=None):
"""
"""
if proxy():
return proxy().callRemote(
'cancelled_file_sending', 'udp', host, filename,
size, description, error_message).addErrback(proxy_errback)
lg.warn('transport_udp is not ready')
return fail(Exception('transport_udp is not ready')).addErrback(proxy_errback)
def interface_cancelled_file_receiving(host, filename, size, error_message=None):
"""
"""
if proxy():
return proxy().callRemote(
'cancelled_file_receiving', 'udp', host, filename, size, error_message).addErrback(proxy_errback)
lg.warn('transport_udp is not ready')
return fail(Exception('transport_udp is not ready')).addErrback(proxy_errback)
| agpl-3.0 | 6,750,364,517,932,360,000 | 32.550239 | 125 | 0.559612 | false | 4.135653 | false | false | false |
FabienPean/sofa | applications/plugins/Compliant/examples/angular.py | 11 | 2563 | import Sofa
import numpy as np
import math
from SofaPython import Quaternion as quat
# use numpy vectors directly (watch out, string conversion might be
# lossy)
np.set_string_function( lambda x: ' '.join( map(str, x)),
repr=False )
def createScene(node):
node.createObject('RequiredPlugin',
pluginName = 'Compliant')
ode = node.createObject('CompliantImplicitSolver')
num = node.createObject('SequentialSolver')
# ode.debug = 1
node.dt = 0.01
pos = np.zeros(7)
vel = np.zeros(6)
force = np.zeros(6)
alpha = math.pi / 4.0
q = quat.exp([0, 0, alpha])
pos[:3] = [-0.5, 0, 0]
pos[3:] = q
mass = 1.0
# change this for more fun
dim = np.array([1, 2, 1])
dim2 = dim * dim
inertia = mass / 12.0 * (dim2[ [1, 2, 0] ] + dim2[ [2, 0, 1] ])
volume = 1.0
force[3:] = quat.rotate(q, [0, 1, 0])
scene = node.createChild('scene')
good = scene.createChild('good')
dofs = good.createObject('MechanicalObject',
template = 'Rigid',
name = 'dofs',
position = pos,
velocity = vel,
showObject = 1)
good.createObject('RigidMass',
template = 'Rigid',
name = 'mass',
mass = mass,
inertia = inertia)
good.createObject('ConstantForceField',
template = 'Rigid',
name = 'ff',
forces = force)
bad = scene.createChild('bad')
pos[:3] = [0.5, 0, 0]
dofs = bad.createObject('MechanicalObject',
template = 'Rigid',
name = 'dofs',
position = pos,
velocity = vel,
showObject = 1)
inertia_matrix = np.diag(inertia)
def cat(x): return ' '.join( map(str, x))
def print_matrix(x):
return '[' + ','.join(map(str, x)) + ']'
bad.createObject('UniformMass',
template = 'Rigid',
name = 'mass',
mass = cat([mass, volume, print_matrix(inertia_matrix / mass)]))
bad.createObject('ConstantForceField',
template = 'Rigid',
name = 'ff',
forces = force)
node.gravity = '0 0 0'
| lgpl-2.1 | -4,607,735,357,707,048,000 | 24.63 | 85 | 0.452595 | false | 3.924962 | false | false | false |
adamallo/beast-mcmc | doc/tutorial/EBSP/scripts/popGraphFromCSV.py | 10 | 4725 | #!/usr/bin/env python
import sys, os.path, math, fnmatch
from glob import glob
import optparse
from popGraphUtil import plotFromCSV, plotFromAll
parser = optparse.OptionParser(" [options] csv-file chart-file")
parser.add_option("", "--xlim", dest="xlim", help="cut off X-axis at this point", default = None)
parser.add_option("", "--ylim", dest="ylim", help="cut off Y-axis at this point", default = None)
parser.add_option("", "--logy", dest="logy", action="store_true",
help="Log scale for Y axis", default = False)
parser.add_option("", "--yscale", dest="yscale", help="Y-axis scale factor", default = 1)
parser.add_option("", "--width", dest="width",
help="figure width. Integral value with units: 50mm 2cm 3 (inches)", default = None)
# parser.add_option("", "--ms", dest="ms", help="", default = None)
parser.add_option("", "--lw", dest="lw", help="Line width", default = None)
parser.add_option("", "--font", dest="font", help="name of font for figure text ", default = None)
parser.add_option("", "--fontsize", dest="fontsize", help="font size of figure text", default = None)
# parser.add_option("", "--axes", dest="axesSize", help="", default = None)
parser.add_option("", "--ticks", dest="ticklabelsize",
help="font size of ticks labels ", default = None)
parser.add_option("", "--nxticks", dest="nxticks",
help="number of X-axis ticks", default = None)
parser.add_option("", "--title", dest="title",
help="Figure title", default = None)
parser.add_option("", "--hist", dest="hist", action="store_true",help="", default = False)
parser.add_option("", "--alldemo", dest="alldfile",
help="plot all demographic functions in this file",
default = None)
parser.add_option("-a", "--alphaout", dest="alpha", help="transparancy value of outline.", default = 1)
parser.add_option("", "--alpha", dest="alldalpha",
help="transparancy value to use when plotting all" +
" demographic. 1 - no transparancy, 0 fully transparent.", default = 0.1)
parser.add_option("", "--ratio", dest="ratio",
help="height/width ratio of figure.", default = 0.75)
options, args = parser.parse_args()
if len(args) != 2 :
print >> sys.stderr, "usage:", sys.argv[0], "csv-file", "chart-file"
sys.exit(1)
name = args[0]
trueDemo = None
plotOptionsDict = { 'alpha' : float(options.alpha),
'logy' : options.logy,
'doHist': options.hist }
if options.lw :
plotOptionsDict['mainlw'] = float(options.lw)
plotOptionsDict['hpdOutline'] = float(options.lw)/2
labelsFont = None
if options.font :
import matplotlib.font_manager
labelsFont = matplotlib.font_manager.FontProperties(options.font)
if labelsFont.get_name() != options.font :
print >> sys.stderr, "*warning:", labelsFont.get_name(),"!=",options.font
if options.fontsize :
labelsFont.set_size(float(options.fontsize))
import pylab
def convertToInches(w) :
if w[-2:] == 'mm' :
return int(w[:-2]) / 25.4
if w[-2:] == 'cm' :
return int(w[:-2]) / 2.54
return int(w)
if options.width is None :
fig = pylab.figure()
else :
w = convertToInches(options.width)
h = w * float(options.ratio)
fig = pylab.figure(figsize=(w,h))
if labelsFont :
labelFontDict = {'fontproperties': labelsFont}
plotOptionsDict['labelProps'] = labelFontDict
if options.alldfile:
pylab.ioff()
plotFromAll(options.alldfile, yScale = float(options.yscale),
logy = options.logy, alpha = float(options.alldalpha))
plotFromCSV(name, trueDemo, yScale = float(options.yscale), **plotOptionsDict)
if options.xlim :
pylab.xlim((0, float(options.xlim)))
if options.ylim :
pylab.ylim((0, float(options.ylim)))
if options.title :
pylab.title(options.title)
pylab.legend(loc='best')
if options.nxticks :
from matplotlib.ticker import MaxNLocator
pylab.gca().xaxis.set_major_locator(MaxNLocator(int(options.nxticks)))
if labelsFont :
ltext = pylab.gca().get_legend().get_texts()
for l in ltext :
pylab.setp(l, fontproperties = labelsFont)
if options.ticklabelsize :
s = float(options.ticklabelsize)
if labelsFont :
fp = matplotlib.font_manager.FontProperties(labelsFont.get_name())
fp.set_size(s)
fp = {'fontproperties' : fp}
else :
fp = dict()
for p in ('xticklabels', 'yticklabels') :
l = pylab.getp(pylab.gca(), p)
pylab.setp(l, fontsize=s, **fp)
if options.alldfile:
pylab.ion()
pylab.savefig(args[1], dpi=300)
| lgpl-2.1 | -7,970,616,197,288,205,000 | 30.8125 | 103 | 0.622646 | false | 3.421434 | false | false | false |
h3llrais3r/Auto-Subliminal | autosubliminal/util/queue.py | 1 | 1725 | # coding=utf-8
import logging
import threading
from functools import wraps
import autosubliminal
log = logging.getLogger(__name__)
_lock = threading.Lock()
def get_wanted_queue_lock():
with _lock:
if autosubliminal.WANTEDQUEUELOCK:
log.debug('Cannot get wanted queue lock, skipping')
return False
else:
log.debug('Getting wanted queue lock')
autosubliminal.WANTEDQUEUELOCK = True
return True
def release_wanted_queue_lock():
with _lock:
if autosubliminal.WANTEDQUEUELOCK:
log.debug('Releasing wanted queue lock')
autosubliminal.WANTEDQUEUELOCK = False
else:
log.warning('Trying to release a wanted queue lock while there is no lock')
def release_wanted_queue_lock_on_exception(func):
"""
Decorator to force the release of the wanted queue lock on unexpected exceptions.
This should be used on every place where we do a get_wanted_queue_lock to release it also on unexpected exceptions.
"""
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
with _lock:
if autosubliminal.WANTEDQUEUELOCK:
log.exception('Releasing wanted queue lock with force due to exception')
autosubliminal.WANTEDQUEUELOCK = False
raise e
return wrapper
def count_wanted_queue_items(item_type=None):
size = 0
if not item_type:
size = len(autosubliminal.WANTEDQUEUE)
else:
for item in autosubliminal.WANTEDQUEUE:
if item.type == item_type:
size += 1
return size
| gpl-3.0 | -1,657,668,799,455,229,700 | 26.822581 | 119 | 0.627246 | false | 4.097387 | false | false | false |
h2non/riprova | riprova/strategies/exponential.py | 1 | 6545 | # -*- coding: utf-8 -*-
import time
import random
from ..backoff import Backoff
from ..constants import INT_ERROR, POS_ERROR
class ExponentialBackOff(Backoff):
"""
ExponentialBackOff is a backoff implementation that increases the backoff
period for each retry attempt using a randomization function that grows
exponentially.
`next()` returned interval is calculated using the following formula:
randomized interval = (
interval * (random value in range [1 - factor, 1 + factor]))
`next()` will range between the randomization factor percentage below
and above the retry interval.
For example, given the following parameters:
- interval = 0.2
- factor = 0.5
- multiplier = 2
the actual backoff period used in the next retry attempt will range
between 1 and 3 seconds, multiplied by the exponential, that is, between
2 and 6 seconds.
Note: `max_internval` caps the `interval` and not the randomized interval.
If the time elapsed since an `ExponentialBackOff` instance is created
goes past the `max_elapsed` time, then the method `next()` starts
returning `Backoff.STOP`.
The elapsed time can be reset by calling `reset()``.
Example: Given the following default arguments, for 10 tries the sequence
will be, and assuming we go over the `max_elapsed` on the 10th try::
Request # RetryInterval (seconds) Randomized Interval (seconds)
1 0.5 [0.25, 0.75]
2 0.75 [0.375, 1.125]
3 1.125 [0.562, 1.687]
4 1.687 [0.8435, 2.53]
5 2.53 [1.265, 3.795]
6 3.795 [1.897, 5.692]
7 5.692 [2.846, 8.538]
8 8.538 [4.269, 12.807]
9 12.807 [6.403, 19.210]
10 19.210 Backoff.STOP
For the opposite backoff strategy, see `riprova.ConstantBackoff`.
`ExponentialBackOff` is expected to run in a single-thread context.
Arguments:
interval (int): interval time in seconds.
Defaults to `500`.
factor (int|float): multiplier factor for exponential retries.
Defaults to `0.5`. It should be between `0` and `1` number range.
max_interval (int): max allowed internval in seconds.
Defaults to `60`.
max_elapsed (int): max elapsed total allowed time in seconds.
Defaults to `15` minutes == `15 * 60` seconds.
multiplier (int|float): exponential multiplier.
Defaults to `1.5`.
Raises:
AssertionError: in case of invalid params.
Usage::
@riprova.retry(backoff=riprova.ExponentialBackOff(interval=100))
def task(x):
return x * x
"""
def __init__(self,
interval=.5,
factor=0.5,
max_interval=60,
max_elapsed=15 * 60,
multiplier=1.5):
# Assert valid params
assert isinstance(interval, (int, float)), INT_ERROR.format('interval')
assert isinstance(multiplier, (int, float)), INT_ERROR.format('multiplier') # noqa
assert isinstance(factor, (int, float)), INT_ERROR.format('factor')
assert isinstance(max_elapsed, (int, float)), INT_ERROR.format('max_elapsed') # noqa
assert isinstance(max_interval, int), INT_ERROR.format('max_interval')
assert interval >= 0, POS_ERROR.format('interval')
assert multiplier >= 0, POS_ERROR.format('multiplier')
self.started = None # start time in seconds
self.multiplier = multiplier
self.max_elapsed = int(max_elapsed * 1000)
self.max_interval = int(max_interval * 1000)
self.factor = min(max(factor, 0), 1)
self.interval = int(interval * 1000)
self.current_interval = self.interval
@property
def elapsed(self):
"""
Returns the elapsed time since an `ExponentialBackOff` instance
is created and is reset when `reset()` is called.
"""
return int(time.time() * 1000) - self.started
def reset(self):
"""
Reset the interval back to the initial retry interval and
restarts the timer.
"""
self.started = None
self.current_interval = self.interval
def next(self):
"""
Returns the number of seconds to wait before the next try,
otherwise returns `Backoff.STOP`, which indicates the max number
of retry operations were reached.
Returns:
int: time to wait in seconds before the next try.
"""
# Store start time
if self.started is None:
self.started = int(time.time() * 1000)
# Make sure we have not gone over the maximum elapsed time.
if self.max_elapsed != 0 and self.elapsed > self.max_elapsed:
return Backoff.STOP
# Get random exponential interval
interval = self._get_random_value()
# Incremental interval
self._increment_interval()
# Return interval
return round(interval / 1000, 2)
def _increment_interval(self):
"""
Increments the current interval by multiplying it with the multiplier.
"""
# Check for overflow, if overflow is detected set the current
# interval to the max interval.
if self.current_interval >= (self.max_interval / self.multiplier):
self.current_interval = self.max_interval
else:
self.current_interval = self.current_interval * self.multiplier
def _get_random_value(self):
"""
Returns a random value from the following interval:
[factor * current_interval, factor * current_interval]
Returns:
int: interval seconds to wait before next try.
"""
rand = random.random()
delta = self.factor * rand
min_interval = self.current_interval - delta
max_interval = self.current_interval + delta
# Get a random value from the range [min_interval, max_interval].
# The formula used below has a +1 because if the min_interval is 1 and
# the max_interval is 3 then we want a 33% chance for selecting either
# 1, 2 or 3.
return int(min_interval + (rand * (max_interval - min_interval + 1)))
| mit | 4,855,955,368,983,859,000 | 35.769663 | 92 | 0.592819 | false | 4.360426 | false | false | false |
google/pyringe | pyringe/inferior.py | 1 | 23180 | #! /usr/bin/env python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module handling communication with gdb.
Users of this module probably want to use the Inferior class, as it provides a
clean interface for communicating with gdb and a couple of functions for
performing common tasks (e.g. listing threads, moving around the stack, etc.)
"""
# TODO: split this file in two, with GdbProxy in a separate file.
import collections
import errno
import functools
import json
import logging
import os
import re
import select
import signal
import subprocess
import tempfile
import time
# Setting these overrides the defaults. See _SymbolFilePath.
SYMBOL_FILE = None # default: <PAYLOAD_DIR>/python2.7.debug
PAYLOAD_DIR = os.path.join(os.path.dirname(__file__), 'payload')
TIMEOUT_DEFAULT = 3
TIMEOUT_FOREVER = None
_GDB_STARTUP_FILES = [
'importsetup.py',
'gdb_service.py',
]
_GDB_ARGS = ['gdb', '--nw', '--quiet', '--batch-silent']
def _SymbolFilePath():
return SYMBOL_FILE or os.path.join(PAYLOAD_DIR, 'python2.7.debug')
class Error(Exception):
pass
class ProxyError(Error):
"""A proxy for an exception that happened within gdb."""
class TimeoutError(Error):
pass
class PositionError(Error):
"""Raised when a nonsensical debugger position is requested."""
class GdbProcessError(Error):
"""Thrown when attempting to start gdb when it's already running."""
### RPC protocol for gdb service ###
#
# In order to ensure compatibility with all versions of python JSON was
# chosen as the main data format for the communication protocol between
# the gdb-internal python process and the process using this module.
# RPC requests to GdbService ('the service') are JSON objects containing exactly
# two keys:
# * 'func' : the name of the function to be called in the service. RPCs for
# function names starting with _ will be rejected by the service.
# * 'args' : An array containing all the parameters for the function. Due to
# JSON's limitations, only positional arguments work. Most API
# functions require a 'position' argument which is required to be a
# 3-element array specifying the selected pid, python thread id and
# depth of the selected frame in the stack (where 0 is the outermost
# frame).
# The session is terminated upon sending an RPC request for the function
# '__kill__' (upon which args are ignored).
#
# RPC return values are not wrapped in JSON objects, but are bare JSON
# representations of return values.
# Python class instances (old and new-style) will also be serialized to JSON
# objects with keys '__pyringe_type_name__' and '__pyringe_address__', which
# carry the expected meaning. The remaining keys in these objects are simple
# JSON representations of the attributes visible in the instance (this means the
# object includes class-level attributes, but these are overshadowed by any
# instance attributes. (There is currently no recursion in this representation,
# only one level of object references is serialized in this way.)
# Should an exception be raised to the top level within the service, it will
# write a JSON-representation of the traceback string to stderr
# TODO: add message-id to the protocol to make sure that canceled operations
# that never had their output read don't end up supplying output for the wrong
# command
class ProxyObject(object):
def __init__(self, attrdict):
self.__dict__ = attrdict
def __repr__(self):
return ('<proxy of %s object at remote 0x%x>'
% (self.__pyringe_type_name__, self.__pyringe_address__))
class GdbProxy(object):
"""The gdb that is being run as a service for the inferior.
Most of the logic of this service is actually run from within gdb, this being
a stub which handles RPC for that service. Communication with that service
is done by pushing around JSON encoded dicts specifying RPC requests and
their results. Automatic respawning is not handled by this class and must be
implemented on top of this if it is to be available.
"""
firstrun = True
def __init__(self, args=None, arch=None):
super(GdbProxy, self).__init__()
gdb_version = GdbProxy.Version()
if gdb_version < (7, 4, None) and GdbProxy.firstrun:
# The user may have a custom-built version, so we only warn them
logging.warning('Your version of gdb may be unsupported (< 7.4), '
'proceed with caution.')
GdbProxy.firstrun = False
arglist = _GDB_ARGS
# Due to a design flaw in the C part of the gdb python API, setting the
# target architecture from within a running script doesn't work, so we have
# to do this with a command line flag.
if arch:
arglist = arglist + ['--eval-command', 'set architecture ' + arch]
arglist = (arglist +
['--command=' + os.path.join(PAYLOAD_DIR, fname)
for fname in _GDB_STARTUP_FILES])
# Add version-specific args
if gdb_version >= (7, 6, 1):
# We want as little interference from user settings as possible,
# but --nh was only introduced in 7.6.1
arglist.append('--nh')
if args:
arglist.extend(args)
# We use a temporary file for pushing IO between pyringe and gdb so we
# don't have to worry about writes larger than the capacity of one pipe
# buffer and handling partial writes/reads.
# Since file position is automatically advanced by file writes (so writing
# then reading from the same file will yield an 'empty' read), we need to
# reopen the file to get different file offset. We can't use os.dup for
# this because of the way os.dup is implemented.
outfile_w = tempfile.NamedTemporaryFile(mode='w', bufsize=1)
errfile_w = tempfile.NamedTemporaryFile(mode='w', bufsize=1)
self._outfile_r = open(outfile_w.name)
self._errfile_r = open(errfile_w.name)
logging.debug('Starting new gdb process...')
self._process = subprocess.Popen(
bufsize=0,
args=arglist,
stdin=subprocess.PIPE,
stdout=outfile_w.file,
stderr=errfile_w.file,
close_fds=True,
preexec_fn=os.setpgrp,
)
outfile_w.close()
errfile_w.close()
self._poller = select.poll()
self._poller.register(self._outfile_r.fileno(),
select.POLLIN | select.POLLPRI)
self._poller.register(self._errfile_r.fileno(),
select.POLLIN | select.POLLPRI)
def __getattr__(self, name):
"""Handles transparent proxying to gdb subprocess.
This returns a lambda which, when called, sends an RPC request to gdb
Args:
name: The method to call within GdbService
Returns:
The result of the RPC.
"""
return lambda *args, **kwargs: self._Execute(name, *args, **kwargs)
def Kill(self):
"""Send death pill to Gdb and forcefully kill it if that doesn't work."""
try:
if self.is_running:
self.Detach()
if self._Execute('__kill__') == '__kill_ack__':
# acknowledged, let's give it some time to die in peace
time.sleep(0.1)
except (TimeoutError, ProxyError):
logging.debug('Termination request not acknowledged, killing gdb.')
if self.is_running:
# death pill didn't seem to work. We don't want the inferior to get killed
# the next time it hits a dangling breakpoint, so we send a SIGINT to gdb,
# which makes it disable instruction breakpoints for the time being.
os.kill(self._process.pid, signal.SIGINT)
# Since SIGINT has higher priority (with signal number 2) than SIGTERM
# (signal 15), SIGTERM cannot preempt the signal handler for SIGINT.
self._process.terminate()
self._process.wait()
self._errfile_r.close()
self._outfile_r.close()
@property
def is_running(self):
return self._process.poll() is None
@staticmethod
def Version():
"""Gets the version of gdb as a 3-tuple.
The gdb devs seem to think it's a good idea to make --version
output multiple lines of welcome text instead of just the actual version,
so we ignore everything it outputs after the first line.
Returns:
The installed version of gdb in the form
(<major>, <minor or None>, <micro or None>)
gdb 7.7 would hence show up as version (7,7)
"""
output = subprocess.check_output(['gdb', '--version']).split('\n')[0]
# Example output (Arch linux):
# GNU gdb (GDB) 7.7
# Example output (Debian sid):
# GNU gdb (GDB) 7.6.2 (Debian 7.6.2-1)
# Example output (Debian wheezy):
# GNU gdb (GDB) 7.4.1-debian
# Example output (centos 2.6.32):
# GNU gdb (GDB) Red Hat Enterprise Linux (7.2-56.el6)
# As we've seen in the examples above, versions may be named very liberally
# So we assume every part of that string may be the "real" version string
# and try to parse them all. This too isn't perfect (later strings will
# overwrite information gathered from previous ones), but it should be
# flexible enough for everything out there.
major = None
minor = None
micro = None
for potential_versionstring in output.split():
version = re.split('[^0-9]', potential_versionstring)
try:
major = int(version[0])
except (IndexError, ValueError):
pass
try:
minor = int(version[1])
except (IndexError, ValueError):
pass
try:
micro = int(version[2])
except (IndexError, ValueError):
pass
return (major, minor, micro)
# On JSON handling:
# The python2 json module ignores the difference between unicode and str
# objects, emitting only unicode objects (as JSON is defined as
# only having unicode strings). In most cases, this is the wrong
# representation for data we were sent from the inferior, so we try to convert
# the unicode objects to normal python strings to make debugger output more
# readable and to make "real" unicode objects stand out.
# Luckily, the json module just throws an exception when trying to serialize
# binary data (that is, bytearray in py2, byte in py3).
# The only piece of information deemed relevant that is lost is the type of
# non-string dict keys, as these are not supported in JSON. {1: 1} in the
# inferior will thus show up as {"1": 1} in the REPL.
# Properly transmitting python objects would require either substantially
# building on top of JSON or switching to another serialization scheme.
def _TryStr(self, maybe_unicode):
try:
return str(maybe_unicode)
except UnicodeEncodeError:
return maybe_unicode
def _JsonDecodeList(self, data):
rv = []
for item in data:
if isinstance(item, unicode):
item = self._TryStr(item)
elif isinstance(item, list):
item = self._JsonDecodeList(item)
rv.append(item)
return rv
def _JsonDecodeDict(self, data):
"""Json object decode hook that automatically converts unicode objects."""
rv = {}
for key, value in data.iteritems():
if isinstance(key, unicode):
key = self._TryStr(key)
if isinstance(value, unicode):
value = self._TryStr(value)
elif isinstance(value, list):
value = self._JsonDecodeList(value)
rv[key] = value
if '__pyringe_type_name__' in data:
# We're looking at a proxyobject
rv = ProxyObject(rv)
return rv
# There is a reason for this messy method signature, it's got to do with
# python 2's handling of function arguments, how this class is expected to
# behave and the responsibilities of __getattr__. Suffice it to say that if
# this were python 3, we wouldn't have to do this.
def _Execute(self, funcname, *args, **kwargs):
"""Send an RPC request to the gdb-internal python.
Blocks for 3 seconds by default and returns any results.
Args:
funcname: the name of the function to call.
*args: the function's arguments.
**kwargs: Only the key 'wait_for_completion' is inspected, which decides
whether to wait forever for completion or just 3 seconds.
Returns:
The result of the function call.
"""
wait_for_completion = kwargs.get('wait_for_completion', False)
rpc_dict = {'func': funcname, 'args': args}
self._Send(json.dumps(rpc_dict))
timeout = TIMEOUT_FOREVER if wait_for_completion else TIMEOUT_DEFAULT
result_string = self._Recv(timeout)
try:
result = json.loads(result_string, object_hook=self._JsonDecodeDict)
if isinstance(result, unicode):
result = self._TryStr(result)
elif isinstance(result, list):
result = self._JsonDecodeList(result)
except ValueError:
raise ValueError('Response JSON invalid: ' + str(result_string))
except TypeError:
raise ValueError('Response JSON invalid: ' + str(result_string))
return result
def _Send(self, string):
"""Write a string of data to the gdb-internal python interpreter."""
self._process.stdin.write(string + '\n')
def _Recv(self, timeout):
"""Receive output from gdb.
This reads gdb's stdout and stderr streams, returns a single line of gdb's
stdout or rethrows any exceptions thrown from within gdb as well as it can.
Args:
timeout: floating point number of seconds after which to abort.
A value of None or TIMEOUT_FOREVER means "there is no timeout", i.e.
this might block forever.
Raises:
ProxyError: All exceptions received from the gdb service are generically
reraised as this.
TimeoutError: Raised if no answer is received from gdb in after the
specified time.
Returns:
The current contents of gdb's stdout buffer, read until the next newline,
or `None`, should the read fail or timeout.
"""
buf = ''
# The messiness of this stems from the "duck-typiness" of this function.
# The timeout parameter of poll has different semantics depending on whether
# it's <=0, >0, or None. Yay.
wait_for_line = timeout is TIMEOUT_FOREVER
deadline = time.time() + (timeout if not wait_for_line else 0)
def TimeLeft():
return max(1000 * (deadline - time.time()), 0)
continue_reading = True
while continue_reading:
poll_timeout = None if wait_for_line else TimeLeft()
fd_list = [event[0] for event in self._poller.poll(poll_timeout)
if event[1] & (select.POLLIN | select.POLLPRI)]
if not wait_for_line and TimeLeft() == 0:
continue_reading = False
if self._outfile_r.fileno() in fd_list:
buf += self._outfile_r.readline()
if buf.endswith('\n'):
return buf
# GDB-internal exception passing
if self._errfile_r.fileno() in fd_list:
exc = self._errfile_r.readline()
if exc:
exc_text = '\n-----------------------------------\n'
exc_text += 'Error occurred within GdbService:\n'
try:
exc_text += json.loads(exc)
except ValueError:
# whatever we got back wasn't valid JSON.
# This usually means we've run into an exception before the special
# exception handling was turned on. The first line we read up there
# will have been "Traceback (most recent call last):". Obviously, we
# want the rest, too, so we wait a bit and read it.
deadline = time.time() + 0.5
while self.is_running and TimeLeft() > 0:
exc += self._errfile_r.read()
try:
exc_text += json.loads(exc)
except ValueError:
exc_text = exc
raise ProxyError(exc_text)
# timeout
raise TimeoutError()
class Inferior(object):
"""Class modeling the inferior process.
Defines the interface for communication with the inferior and handles
debugging context and automatic respawning of the underlying gdb service.
"""
_gdb = None
_Position = collections.namedtuple('Position', 'pid tid frame_depth') # pylint: disable=invalid-name
# tid is the thread ident as reported by threading.current_thread().ident
# frame_depth is the 'depth' (as measured from the outermost frame) of the
# requested frame. A value of -1 will hence mean the most recent frame.
def __init__(self, pid, auto_symfile_loading=True, architecture='i386:x86-64'):
super(Inferior, self).__init__()
self.position = self._Position(pid=pid, tid=None, frame_depth=-1)
self._symbol_file = None
self.arch = architecture
self.auto_symfile_loading = auto_symfile_loading
# Inferior objects are created before the user ever issues the 'attach'
# command, but since this is used by `Reinit`, we call upon gdb to do this
# for us.
if pid:
self.StartGdb()
def needsattached(func):
"""Decorator to prevent commands from being used when not attached."""
@functools.wraps(func)
def wrap(self, *args, **kwargs):
if not self.attached:
raise PositionError('Not attached to any process.')
return func(self, *args, **kwargs)
return wrap
@needsattached
def Cancel(self):
self.ShutDownGdb()
def Reinit(self, pid, auto_symfile_loading=True):
"""Reinitializes the object with a new pid.
Since all modes might need access to this object at any time, this object
needs to be long-lived. To make this clear in the API, this shorthand is
supplied.
Args:
pid: the pid of the target process
auto_symfile_loading: whether the symbol file should automatically be
loaded by gdb.
"""
self.ShutDownGdb()
self.__init__(pid, auto_symfile_loading, architecture=self.arch)
@property
def gdb(self):
# when requested, make sure we have a gdb session to return
# (in case it crashed at some point)
if not self._gdb or not self._gdb.is_running:
self.StartGdb()
return self._gdb
def StartGdb(self):
"""Starts gdb and attempts to auto-load symbol file (unless turned off).
Raises:
GdbProcessError: if gdb is already running
"""
if self.attached:
raise GdbProcessError('Gdb is already running.')
self._gdb = GdbProxy(arch=self.arch)
self._gdb.Attach(self.position)
if self.auto_symfile_loading:
try:
self.LoadSymbolFile()
except (ProxyError, TimeoutError) as err:
self._gdb = GdbProxy(arch=self.arch)
self._gdb.Attach(self.position)
if not self.gdb.IsSymbolFileSane(self.position):
logging.warning('Failed to automatically load a sane symbol file, '
'most functionality will be unavailable until symbol'
'file is provided.')
logging.debug(err.message)
def ShutDownGdb(self):
if self._gdb and self._gdb.is_running:
self._gdb.Kill()
self._gdb = None
def LoadSymbolFile(self, path=None):
# As automatic respawning of gdb may happen between calls to this, we have
# to remember which symbol file we're supposed to load.
if path:
self._symbol_file = path
s_path = self._symbol_file or _SymbolFilePath()
logging.debug('Trying to load symbol file: %s' % s_path)
if self.attached:
self.gdb.LoadSymbolFile(self.position, s_path)
if not self.gdb.IsSymbolFileSane(self.position):
logging.warning('Symbol file failed sanity check, '
'proceed at your own risk')
@needsattached
def Backtrace(self):
return self.gdb.BacktraceAt(self.position)
@needsattached
def Up(self):
depth = self.position.frame_depth
if self.position.frame_depth < 0:
depth = self.gdb.StackDepth(self.position) + self.position.frame_depth
if not depth:
raise PositionError('Already at outermost stack frame')
self.position = self._Position(pid=self.position.pid,
tid=self.position.tid,
frame_depth=depth-1)
@needsattached
def Down(self):
if (self.position.frame_depth + 1 >= self.gdb.StackDepth(self.position)
or self.position.frame_depth == -1):
raise PositionError('Already at innermost stack frame')
frame_depth = self.position.frame_depth + 1
self.position = self._Position(pid=self.position.pid,
tid=self.position.tid,
frame_depth=frame_depth)
@needsattached
def Lookup(self, var_name):
return self.gdb.LookupInFrame(self.position, var_name)
@needsattached
def InferiorLocals(self):
return self.gdb.InferiorLocals(self.position)
@needsattached
def InferiorGlobals(self):
return self.gdb.InferiorGlobals(self.position)
@needsattached
def InferiorBuiltins(self):
return self.gdb.InferiorBuiltins(self.position)
@property
def is_running(self):
if not self.position.pid:
return False
try:
# sending a 0 signal to a process does nothing
os.kill(self.position.pid, 0)
return True
except OSError as err:
# We might (for whatever reason) simply not be permitted to do this.
if err.errno == errno.EPERM:
logging.debug('Reveived EPERM when trying to signal inferior.')
return True
return False
@property
def pid(self):
return self.position.pid
@property
@needsattached
def threads(self):
# return array of python thread idents. Unfortunately, we can't easily
# access the given thread names without taking the GIL.
return self.gdb.ThreadIds(self.position)
@property
@needsattached
def current_thread(self):
threads = self.threads
if not threads:
self.position = self._Position(pid=self.position.pid, tid=None,
frame_depth=-1)
return None
if not self.position.tid or self.position.tid not in threads:
self.position = self._Position(pid=self.position.pid, tid=self.threads[0],
frame_depth=-1)
return self.position.tid
@needsattached
def SelectThread(self, tid):
if tid in self.gdb.ThreadIds(self.position):
self.position = self._Position(self.position.pid, tid, frame_depth=-1)
else:
logging.error('Thread ' + str(tid) + ' does not exist')
@needsattached
def Continue(self):
self.gdb.Continue(self.position)
@needsattached
def Interrupt(self):
return self.gdb.Interrupt(self.position)
@property
def attached(self):
if (self.position.pid
and self.is_running
and self._gdb
and self._gdb.is_running):
return True
return False
| apache-2.0 | 8,857,134,920,190,602,000 | 35.389325 | 103 | 0.666437 | false | 3.995863 | false | false | false |
ganboing/malwarecookbook | 11/9/scd.py | 2 | 2780 | import immlib
import getopt, string
import immutils
import os
def usage(imm):
imm.Log("Usage: !scd -f FILETOCHECK")
def checkop(op):
instr = op.getDisasm()
junk = ["IN", "OUT", "LES", "FSUBR", "DAA",
"BOUND", "???", "AAM", "STD", "FIDIVR",
"FCMOVNE", "FADD", "LAHF", "SAHF", "CMC",
"FILD", "WAIT", "RETF", "SBB", "ADC",
"IRETD", "LOCK", "POP SS", "POP DS", "HLT",
"LEAVE", "ARPL", "AAS", "LDS", "SALC",
"FTST", "FIST", "PADD", "CALL FAR", "FSTP",
"AAA", "FIADD"]
for j in junk:
if instr.startswith(j):
return False
if op.isCall() or op.isJmp():
if op.getJmpAddr() > 0x7FFFFFFF:
return False
return True
def main (args):
imm = immlib.Debugger()
scfile = None
conditional = False
try:
opts, argo = getopt.getopt(args, "f:")
except getopt.GetoptError:
usage(imm)
return
for o,a in opts:
if o == "-f":
try:
scfile = a
except ValueError, msg:
return "Invalid argument: %s" % a
if scfile == None or not os.path.isfile(scfile):
usage(imm)
return
# Get something going so the context is valid
imm.openProcess("c:\\windows\\system32\\notepad.exe")
# Read file contents
buf = open(scfile, "rb").read()
cb = len(buf)
# Copy the contents to process memory
mem = imm.remoteVirtualAlloc(cb)
imm.writeMemory(mem, buf)
# Clarify the start and end of the buffer
start = mem
end = mem + cb
table = imm.createTable('Shell Code Detect',\
['Ofs', 'Abs', 'Op', 'Op2', 'Op3'])
while start < end:
# Disassemble the instruction
d = imm.disasm(start)
c = d.getSize()
# Skip anything that isn't a jump/call
if (not d.isCall()) and (not d.isJmp()):
start += c
continue
# Get the destination address of the jump/call
dest = d.getJmpAddr()
# The destination must land within the shell code
# buffer or else we've just located a false positive
if dest < start or dest > end:
start += c
continue
# Disassemble the first 3 ops at destination
op2 = imm.disasm(dest)
op3 = imm.disasm(dest+op2.getSize())
op4 = imm.disasm(dest+op2.getSize()+op3.getSize())
# Use a simple validity check to reduce fp's
if checkop(op2) and checkop(op3) and checkop(op4):
table.add('', ['0x%x' % (start - mem),\
'0x%x' % start,\
'%s' % d.getDisasm(),\
'%s' % op2.getDisasm(),\
'%s' % op3.getDisasm()])
start += c
return "done" | gpl-3.0 | 4,710,341,407,774,441,000 | 27.96875 | 60 | 0.524101 | false | 3.402693 | false | false | false |
rajadg/Python | PySamples/py_database/sqlalchemy/basic/simple.py | 1 | 1280 | '''
Created on 04-Feb-2014
@author: dgraja
'''
from sqlalchemy import create_engine
from sqlalchemy import Table
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import MetaData
from sqlalchemy import Column
from sqlalchemy.exc import OperationalError
from sqlalchemy.ext.declarative import declarative_base
import os
#help ('sqlalchemy')
#dir ('sqlalchemy')
working_directory = 'D:\\Temp\\Python'
os.chdir(working_directory)
db_path = os.path.join("D:\\Temp\\Python", "test01.db")
print db_path
engine = create_engine('sqlite:///' +db_path)
metadata = MetaData()
users = Table(
'users', metadata,
Column('id', Integer, primary_key=True),
Column('username', String(512), nullable=False),
Column('email', String(512), nullable=True)
)
metadata.create_all(engine)
# Connect to the actual database
conn = engine.connect()
def record(i):
return ('User ' + str(i), "user" + str(i) + "@example.com")
for i in range(10) :
rec = record(i)
# Create an INSERT expression
insert_expression = users.insert().values(username=rec[0], email=rec[1])
print str(insert_expression)
# execute the insert query
result = conn.execute(insert_expression)
# print the result
print (result.inserted_primary_key)
| gpl-3.0 | -8,218,855,231,176,817,000 | 23.150943 | 76 | 0.710156 | false | 3.575419 | false | false | false |
thorwhalen/ut | dacc/parallel_timeseries_dacc.py | 1 | 6444 | __author__ = 'thor'
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import re
from ut.pplot.to import simple_plotly
class ParallelTimeSeriesDacc(object):
def __init__(self, data_source, date_var, index_var, ts_vars_name='vars', **kwargs):
if isinstance(data_source, pd.DataFrame):
self.df = data_source
elif isinstance(data_source, str):
if data_source == 'elasticsearch':
from ut.dacc.es.com import ElasticCom
es_kwargs = kwargs.get('es_kwargs', {})
if 'index' in list(kwargs.keys()):
es_kwargs['index'] = kwargs.pop('index')
if 'data_type' in list(kwargs.keys()):
es_kwargs['data_type'] = kwargs.pop('data_type')
ec = ElasticCom(**es_kwargs)
search_kwargs = kwargs.get('search_kwargs', {})
search_kwargs = dict({'_id': False}, **search_kwargs)
exclude_fields = search_kwargs.pop('exclude_fields', [])
self.df = ec.search_and_export_to_df(exclude_fields=exclude_fields, **search_kwargs)
else:
raise NotImplementedError("Unrecognized data_source: {}".format(data_source))
else:
raise NotImplementedError("Unrecognized data_source type: {}".format(type(data_source)))
assert set([date_var, index_var]).issubset(self.df.columns), \
"Both {} and {} must be columns of the data".format(date_var, index_var)
self.date_var = date_var
self.index_var = index_var
self.ts_vars_name = ts_vars_name
self.var_names = [x for x in self.df.columns if x not in [self.date_var, self.index_var]]
self.df.columns.set_names([self.ts_vars_name], inplace=True)
# pivoting data
original_length = len(self.df)
self.df.drop_duplicates(subset=[self.index_var, self.date_var], inplace=True)
if len(self.df) != original_length:
raise RuntimeWarning("There are duplicate ({},{}), so I'm deleting offending records"
.format(self.index_var, self.date_var))
self.df = self.df[~self.df[self.date_var].notnull()]
raise AssertionError("There are duplicate ({},{}), so I can't pivot the data"
.format(self.index_var, self.date_var))
self.df = self.df.pivot(index=self.date_var, columns=self.index_var)
self.df.sort_index(inplace=True)
def vars_list(self, df=None):
if df is None:
df = self.df
return np.unique(df.columns.get_level_values(level=0))
def indices_list(self, df=None):
if df is None:
df = self.df
return np.unique(df.columns.get_level_values(level=1))
@staticmethod
def drop_columns_with_insufficient_dates(d, min_num_of_dates):
"""
Drop columns that don't have a minimum number of non-NaN dates
"""
print(("original shape: {}".format(d.shape)))
num_of_dates = (~d.isnull()).sum()
num_of_dates = num_of_dates[num_of_dates > min_num_of_dates].sort(inplace=False, ascending=False)
d = d[num_of_dates.index.values].dropna(how='all')
print(("shape with at least {} dates: {}".format(min_num_of_dates, d.shape)))
return d
@staticmethod
def latest_full_shape_choices(d):
"""
Get a table describing the shapes of all
"""
shape_choices = list()
for i in range(1, len(d)):
this_shape = d.iloc[-i:].dropna(axis=1).shape
shape_choices.append({'i': i, 'rows': this_shape[0], 'cols': this_shape[1]})
shape_choices = pd.DataFrame(shape_choices).set_index('i')
shape_choices['pts'] = shape_choices['rows'] * shape_choices['cols']
return shape_choices
def print_percentages_of_xvar_more_than_yvar(self, xvar, yvar, min_y=0, df=None):
if df is None:
df = self.df.stack(self.index_var)
t = df[[xvar, yvar]].dropna()
t = t[t[yvar] >= min_y]
n_xvar_more_than_yvar = sum(t[xvar] > t[yvar])
print(("{:.2f}% ({}/{}) of '{}' > '{}'".format(100 * n_xvar_more_than_yvar / float(len(t)),
n_xvar_more_than_yvar, len(t),
xvar, yvar)))
def plot_time_series(self, d, title=None, y_labels=None,
width_factor=2, length=18, only_first_non_null=True, with_plotly=False):
# idSite = 349
if isinstance(d, tuple):
d = self.df.loc[:, d]
if only_first_non_null:
lidx = np.any(d.notnull(), axis=1)
d = d.iloc[lidx]
default_title, default_y_labels = _choose_title_and_y_label(d)
title = title or default_title
y_labels = y_labels or default_y_labels
last_ax = None
n = len(d.columns)
fig = plt.figure(figsize=(length, min(n, 50) * width_factor))
for i, tt in enumerate(d.items()):
plt.subplot(n, 1, i + 1)
tt[1].index = tt[1].index.map(pd.to_datetime)
tt[1].plot(sharex=last_ax)
ax = plt.gca()
if title == 'y_labels':
ax.set_title(y_labels[i])
else:
if i == 0:
ax.set_title(title)
if isinstance(y_labels[i], str):
plt.ylabel(y_labels[i].replace('_', '\n'))
else:
plt.ylabel(y_labels[i])
ax.yaxis.set_label_position("right")
if i + 1 < n:
plt.xlabel('')
last_ax = ax
if with_plotly:
return simple_plotly(fig)
def get_plotly_url(self, plotly_obj):
if hasattr(plotly_obj, 'embed_code'):
return re.compile('src="([^"]*)"').search(plotly_obj.embed_code).group(1)
def _choose_title_and_y_label(d):
col_vals = d.columns.values
try:
level_1_vals, level_2_vals = list(zip(*col_vals))
if len(np.unique(level_1_vals)) == 1:
return level_1_vals[1], level_2_vals
elif len(np.unique(level_2_vals)) == 1:
return level_2_vals[0], level_1_vals
else:
return " & ".join(d.columns.names), col_vals
except TypeError:
return " & ".join(d.columns.names), col_vals
| mit | 1,677,486,331,001,283,800 | 39.024845 | 105 | 0.543762 | false | 3.530959 | false | false | false |
djpetti/stoplight | daemon/stoplightd.py | 1 | 1311 | #!/usr/bin/python3
from multiprocessing import Queue
from queue import Empty
import logging
import time
from manager import Manager
import server
""" Main file for the stoplight daemon. """
def init_logging(logfile):
""" Initializes logging.
Args:
logfile: File that stuff will be logged to. """
root = logging.getLogger()
root.setLevel(logging.DEBUG)
file_handler = logging.FileHandler(logfile)
file_handler.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(name)s@%(asctime)s: " +
"[%(levelname)s] %(message)s")
file_handler.setFormatter(formatter)
stream_handler.setFormatter(formatter)
root.addHandler(file_handler)
root.addHandler(stream_handler)
def main():
# Initialize logging.
init_logging("stoplightd.log")
# Start the server.
server_queue = Queue()
server.start(server_queue)
# Create and run the manager.
manager = Manager()
while True:
# Check for new jobs and add them.
try:
command = server_queue.get(block=False)
if command["type"] == "add_job":
# Add the job.
manager.add_job(command["job_dir"])
except Empty:
pass
manager.update()
time.sleep(5)
if __name__ == "__main__":
main()
| mit | -3,865,540,455,653,864,400 | 19.484375 | 58 | 0.678108 | false | 3.745714 | false | false | false |
FEniCS/dolfin | test/unit/python/ale/test_harmonic_smoothing.py | 1 | 3215 | #!/usr/bin/env py.test
"""Unit test for HarmonicSmoothing and ALE"""
# Copyright (C) 2013 Jan Blechta
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import pytest
from dolfin import UnitSquareMesh, BoundaryMesh, Expression, \
CellFunction, SubMesh, Constant, MPI, MeshQuality,\
mpi_comm_world, ALE
from dolfin_utils.test import skip_in_parallel
def test_HarmonicSmoothing():
# Create some mesh and its boundary
mesh = UnitSquareMesh(10, 10)
boundary = BoundaryMesh(mesh, 'exterior')
# Move boundary
disp = Expression(("0.3*x[0]*x[1]", "0.5*(1.0-x[1])"), degree=2)
ALE.move(boundary, disp)
# Move mesh according to given boundary
ALE.move(mesh, boundary)
# Check that new boundary topology corresponds to given one
boundary_new = BoundaryMesh(mesh, 'exterior')
assert boundary.topology().hash() == boundary_new.topology().hash()
# Check that coordinates are almost equal
err = sum(sum(abs(boundary.coordinates() \
- boundary_new.coordinates()))) / mesh.num_vertices()
print("Current CG solver produced error in boundary coordinates", err)
assert round(err - 0.0, 5) == 0
# Check mesh quality
magic_number = 0.35
rmin = MeshQuality.radius_ratio_min_max(mesh)[0]
assert rmin > magic_number
@skip_in_parallel
def test_ale():
# Create some mesh
mesh = UnitSquareMesh(4, 5)
# Make some cell function
# FIXME: Initialization by array indexing is probably
# not a good way for parallel test
cellfunc = CellFunction('size_t', mesh)
cellfunc.array()[0:4] = 0
cellfunc.array()[4:] = 1
# Create submeshes - this does not work in parallel
submesh0 = SubMesh(mesh, cellfunc, 0)
submesh1 = SubMesh(mesh, cellfunc, 1)
# Move submesh0
disp = Constant(("0.1", "-0.1"))
ALE.move(submesh0, disp)
# Move and smooth submesh1 accordignly
ALE.move(submesh1, submesh0)
# Move mesh accordingly
parent_vertex_indices_0 = \
submesh0.data().array('parent_vertex_indices', 0)
parent_vertex_indices_1 = \
submesh1.data().array('parent_vertex_indices', 0)
mesh.coordinates()[parent_vertex_indices_0[:]] = \
submesh0.coordinates()[:]
mesh.coordinates()[parent_vertex_indices_1[:]] = \
submesh1.coordinates()[:]
# If test passes here then it is probably working
# Check for cell quality for sure
magic_number = 0.28
rmin = MeshQuality.radius_ratio_min_max(mesh)[0]
assert rmin > magic_number
| lgpl-3.0 | 3,355,733,854,089,536,000 | 32.842105 | 77 | 0.676516 | false | 3.616423 | true | false | false |
matrix-org/synapse | synapse/util/caches/deferred_cache.py | 1 | 12494 | # Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import threading
from typing import Callable, Generic, Iterable, MutableMapping, Optional, TypeVar, Union
from prometheus_client import Gauge
from twisted.internet import defer
from twisted.python import failure
from synapse.util.async_helpers import ObservableDeferred
from synapse.util.caches.lrucache import LruCache
from synapse.util.caches.treecache import TreeCache, iterate_tree_cache_entry
cache_pending_metric = Gauge(
"synapse_util_caches_cache_pending",
"Number of lookups currently pending for this cache",
["name"],
)
T = TypeVar("T")
KT = TypeVar("KT")
VT = TypeVar("VT")
class _Sentinel(enum.Enum):
# defining a sentinel in this way allows mypy to correctly handle the
# type of a dictionary lookup.
sentinel = object()
class DeferredCache(Generic[KT, VT]):
"""Wraps an LruCache, adding support for Deferred results.
It expects that each entry added with set() will be a Deferred; likewise get()
will return a Deferred.
"""
__slots__ = (
"cache",
"thread",
"_pending_deferred_cache",
)
def __init__(
self,
name: str,
max_entries: int = 1000,
tree: bool = False,
iterable: bool = False,
apply_cache_factor_from_config: bool = True,
):
"""
Args:
name: The name of the cache
max_entries: Maximum amount of entries that the cache will hold
keylen: The length of the tuple used as the cache key. Ignored unless
`tree` is True.
tree: Use a TreeCache instead of a dict as the underlying cache type
iterable: If True, count each item in the cached object as an entry,
rather than each cached object
apply_cache_factor_from_config: Whether cache factors specified in the
config file affect `max_entries`
"""
cache_type = TreeCache if tree else dict
# _pending_deferred_cache maps from the key value to a `CacheEntry` object.
self._pending_deferred_cache = (
cache_type()
) # type: Union[TreeCache, MutableMapping[KT, CacheEntry]]
def metrics_cb():
cache_pending_metric.labels(name).set(len(self._pending_deferred_cache))
# cache is used for completed results and maps to the result itself, rather than
# a Deferred.
self.cache = LruCache(
max_size=max_entries,
cache_name=name,
cache_type=cache_type,
size_callback=(lambda d: len(d) or 1) if iterable else None,
metrics_collection_callback=metrics_cb,
apply_cache_factor_from_config=apply_cache_factor_from_config,
) # type: LruCache[KT, VT]
self.thread = None # type: Optional[threading.Thread]
@property
def max_entries(self):
return self.cache.max_size
def check_thread(self):
expected_thread = self.thread
if expected_thread is None:
self.thread = threading.current_thread()
else:
if expected_thread is not threading.current_thread():
raise ValueError(
"Cache objects can only be accessed from the main thread"
)
def get(
self,
key: KT,
callback: Optional[Callable[[], None]] = None,
update_metrics: bool = True,
) -> defer.Deferred:
"""Looks the key up in the caches.
For symmetry with set(), this method does *not* follow the synapse logcontext
rules: the logcontext will not be cleared on return, and the Deferred will run
its callbacks in the sentinel context. In other words: wrap the result with
make_deferred_yieldable() before `await`ing it.
Args:
key:
callback: Gets called when the entry in the cache is invalidated
update_metrics (bool): whether to update the cache hit rate metrics
Returns:
A Deferred which completes with the result. Note that this may later fail
if there is an ongoing set() operation which later completes with a failure.
Raises:
KeyError if the key is not found in the cache
"""
callbacks = [callback] if callback else []
val = self._pending_deferred_cache.get(key, _Sentinel.sentinel)
if val is not _Sentinel.sentinel:
val.callbacks.update(callbacks)
if update_metrics:
m = self.cache.metrics
assert m # we always have a name, so should always have metrics
m.inc_hits()
return val.deferred.observe()
val2 = self.cache.get(
key, _Sentinel.sentinel, callbacks=callbacks, update_metrics=update_metrics
)
if val2 is _Sentinel.sentinel:
raise KeyError()
else:
return defer.succeed(val2)
def get_immediate(
self, key: KT, default: T, update_metrics: bool = True
) -> Union[VT, T]:
"""If we have a *completed* cached value, return it."""
return self.cache.get(key, default, update_metrics=update_metrics)
def set(
self,
key: KT,
value: defer.Deferred,
callback: Optional[Callable[[], None]] = None,
) -> defer.Deferred:
"""Adds a new entry to the cache (or updates an existing one).
The given `value` *must* be a Deferred.
First any existing entry for the same key is invalidated. Then a new entry
is added to the cache for the given key.
Until the `value` completes, calls to `get()` for the key will also result in an
incomplete Deferred, which will ultimately complete with the same result as
`value`.
If `value` completes successfully, subsequent calls to `get()` will then return
a completed deferred with the same result. If it *fails*, the cache is
invalidated and subequent calls to `get()` will raise a KeyError.
If another call to `set()` happens before `value` completes, then (a) any
invalidation callbacks registered in the interim will be called, (b) any
`get()`s in the interim will continue to complete with the result from the
*original* `value`, (c) any future calls to `get()` will complete with the
result from the *new* `value`.
It is expected that `value` does *not* follow the synapse logcontext rules - ie,
if it is incomplete, it runs its callbacks in the sentinel context.
Args:
key: Key to be set
value: a deferred which will complete with a result to add to the cache
callback: An optional callback to be called when the entry is invalidated
"""
if not isinstance(value, defer.Deferred):
raise TypeError("not a Deferred")
callbacks = [callback] if callback else []
self.check_thread()
existing_entry = self._pending_deferred_cache.pop(key, None)
if existing_entry:
existing_entry.invalidate()
# XXX: why don't we invalidate the entry in `self.cache` yet?
# we can save a whole load of effort if the deferred is ready.
if value.called:
result = value.result
if not isinstance(result, failure.Failure):
self.cache.set(key, result, callbacks)
return value
# otherwise, we'll add an entry to the _pending_deferred_cache for now,
# and add callbacks to add it to the cache properly later.
observable = ObservableDeferred(value, consumeErrors=True)
observer = observable.observe()
entry = CacheEntry(deferred=observable, callbacks=callbacks)
self._pending_deferred_cache[key] = entry
def compare_and_pop():
"""Check if our entry is still the one in _pending_deferred_cache, and
if so, pop it.
Returns true if the entries matched.
"""
existing_entry = self._pending_deferred_cache.pop(key, None)
if existing_entry is entry:
return True
# oops, the _pending_deferred_cache has been updated since
# we started our query, so we are out of date.
#
# Better put back whatever we took out. (We do it this way
# round, rather than peeking into the _pending_deferred_cache
# and then removing on a match, to make the common case faster)
if existing_entry is not None:
self._pending_deferred_cache[key] = existing_entry
return False
def cb(result):
if compare_and_pop():
self.cache.set(key, result, entry.callbacks)
else:
# we're not going to put this entry into the cache, so need
# to make sure that the invalidation callbacks are called.
# That was probably done when _pending_deferred_cache was
# updated, but it's possible that `set` was called without
# `invalidate` being previously called, in which case it may
# not have been. Either way, let's double-check now.
entry.invalidate()
def eb(_fail):
compare_and_pop()
entry.invalidate()
# once the deferred completes, we can move the entry from the
# _pending_deferred_cache to the real cache.
#
observer.addCallbacks(cb, eb)
# we return a new Deferred which will be called before any subsequent observers.
return observable.observe()
def prefill(
self, key: KT, value: VT, callback: Optional[Callable[[], None]] = None
):
callbacks = [callback] if callback else []
self.cache.set(key, value, callbacks=callbacks)
def invalidate(self, key):
"""Delete a key, or tree of entries
If the cache is backed by a regular dict, then "key" must be of
the right type for this cache
If the cache is backed by a TreeCache, then "key" must be a tuple, but
may be of lower cardinality than the TreeCache - in which case the whole
subtree is deleted.
"""
self.check_thread()
self.cache.del_multi(key)
# if we have a pending lookup for this key, remove it from the
# _pending_deferred_cache, which will (a) stop it being returned
# for future queries and (b) stop it being persisted as a proper entry
# in self.cache.
entry = self._pending_deferred_cache.pop(key, None)
# run the invalidation callbacks now, rather than waiting for the
# deferred to resolve.
if entry:
# _pending_deferred_cache.pop should either return a CacheEntry, or, in the
# case of a TreeCache, a dict of keys to cache entries. Either way calling
# iterate_tree_cache_entry on it will do the right thing.
for entry in iterate_tree_cache_entry(entry):
entry.invalidate()
def invalidate_all(self):
self.check_thread()
self.cache.clear()
for entry in self._pending_deferred_cache.values():
entry.invalidate()
self._pending_deferred_cache.clear()
class CacheEntry:
__slots__ = ["deferred", "callbacks", "invalidated"]
def __init__(
self, deferred: ObservableDeferred, callbacks: Iterable[Callable[[], None]]
):
self.deferred = deferred
self.callbacks = set(callbacks)
self.invalidated = False
def invalidate(self):
if not self.invalidated:
self.invalidated = True
for callback in self.callbacks:
callback()
self.callbacks.clear()
| apache-2.0 | 1,217,865,865,054,612,000 | 36.746224 | 88 | 0.621098 | false | 4.390021 | false | false | false |
asgard-lab/neutron | neutron/tests/functional/agent/linux/test_async_process.py | 7 | 2816 | # Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
from neutron.agent.linux import async_process
from neutron.agent.linux import utils
from neutron.tests import base
class AsyncProcessTestFramework(base.BaseTestCase):
def setUp(self):
super(AsyncProcessTestFramework, self).setUp()
self.test_file_path = self.get_temp_file_path('test_async_process.tmp')
self.data = [str(x) for x in range(4)]
with open(self.test_file_path, 'w') as f:
f.writelines('%s\n' % item for item in self.data)
def _check_stdout(self, proc):
# Ensure that all the output from the file is read
output = []
while output != self.data:
new_output = list(proc.iter_stdout())
if new_output:
output += new_output
eventlet.sleep(0.01)
class TestAsyncProcess(AsyncProcessTestFramework):
def _safe_stop(self, proc):
try:
proc.stop()
except async_process.AsyncProcessException:
pass
def test_stopping_async_process_lifecycle(self):
proc = async_process.AsyncProcess(['tail', '-f',
self.test_file_path])
self.addCleanup(self._safe_stop, proc)
proc.start(block=True)
self._check_stdout(proc)
proc.stop(block=True)
# Ensure that the process and greenthreads have stopped
proc._process.wait()
self.assertEqual(proc._process.returncode, -9)
for watcher in proc._watchers:
watcher.wait()
def test_async_process_respawns(self):
proc = async_process.AsyncProcess(['tail', '-f',
self.test_file_path],
respawn_interval=0)
self.addCleanup(self._safe_stop, proc)
proc.start()
# Ensure that the same output is read twice
self._check_stdout(proc)
pid = proc.pid
utils.execute(['kill', '-9', pid])
utils.wait_until_true(
lambda: proc.is_active() and pid != proc.pid,
timeout=5,
sleep=0.01,
exception=RuntimeError(_("Async process didn't respawn")))
self._check_stdout(proc)
| apache-2.0 | -1,164,682,003,072,662,000 | 35.102564 | 79 | 0.60831 | false | 4.093023 | true | false | false |
wpjesus/codematch | ietf/cookies/views.py | 1 | 2180 | # Copyright The IETF Trust 2010, All Rights Reserved
from django.shortcuts import render_to_response as render
from django.template import RequestContext
from django.conf import settings
import debug # pyflakes:ignore
def preferences(request, **kwargs):
preferences = request.COOKIES.copy()
new_cookies = {}
del_cookies = []
for key in settings.USER_PREFERENCE_DEFAULTS.keys():
if key in kwargs:
if kwargs[key] == None:
del_cookies += [key]
else:
# ignore bad kwargs
if key in ['new_enough', 'expires_soon'] and not kwargs[key].isdigit():
pass
elif key in ['full_draft', 'left_menu'] and not kwargs[key] in ['on', 'off']:
pass
else:
preferences[key] = new_cookies[key] = kwargs[key]
if not key in preferences or preferences[key] in [None, 'None', ''] or key in del_cookies:
preferences[key] = settings.USER_PREFERENCE_DEFAULTS[key]
# reset bad cookie values
if key in ['new_enough', 'expires_soon'] and not preferences[key].isdigit():
preferences[key] = settings.USER_PREFERENCE_DEFAULTS[key]
del_cookies += [key]
elif key in ['full_draft', 'left_menu'] and not preferences[key] in ['on', 'off']:
preferences[key] = settings.USER_PREFERENCE_DEFAULTS[key]
del_cookies += [key]
request.COOKIES.update(preferences)
response = render("cookies/settings.html", preferences, context_instance=RequestContext(request))
for key in new_cookies:
response.set_cookie(key, new_cookies[key], settings.SESSION_COOKIE_AGE)
for key in del_cookies:
response.delete_cookie(key)
return response
def new_enough(request, days=None):
return preferences(request, new_enough=days)
def expires_soon(request, days=None):
return preferences(request, expires_soon=days)
def full_draft(request, enabled=None):
return preferences(request, full_draft=enabled)
def left_menu(request, enabled=None):
return preferences(request, left_menu=enabled)
| bsd-3-clause | 1,406,171,809,870,712,300 | 39.37037 | 101 | 0.630275 | false | 4.029575 | false | false | false |
Fernerkundung/sentinelsat | sentinelsat/scripts/cli.py | 1 | 8629 | import logging
import math
import os
import click
import geojson as gj
import requests.utils
from sentinelsat import __version__ as sentinelsat_version
from sentinelsat.sentinel import SentinelAPI, geojson_to_wkt, read_geojson, placename_to_wkt
from sentinelsat.exceptions import InvalidKeyError
logger = logging.getLogger("sentinelsat")
def _set_logger_handler(level="INFO"):
logger.setLevel(level)
h = logging.StreamHandler()
h.setLevel(level)
fmt = logging.Formatter("%(message)s")
h.setFormatter(fmt)
logger.addHandler(h)
class CommaSeparatedString(click.ParamType):
name = "comma-string"
def convert(self, value, param, ctx):
if value:
return value.split(",")
else:
return value
@click.command(context_settings=dict(help_option_names=["-h", "--help"]))
@click.option(
"--user",
"-u",
envvar="DHUS_USER",
default=None,
help="Username (or environment variable DHUS_USER is set)",
)
@click.option(
"--password",
"-p",
envvar="DHUS_PASSWORD",
default=None,
help="Password (or environment variable DHUS_PASSWORD is set)",
)
@click.option(
"--url",
default="https://scihub.copernicus.eu/apihub/",
envvar="DHUS_URL",
help="""Define API URL. Default URL is
'https://scihub.copernicus.eu/apihub/' (or environment variable DHUS_URL).
""",
)
@click.option(
"--start",
"-s",
default="NOW-1DAY",
show_default=True,
help="Start date of the query in the format YYYYMMDD.",
)
@click.option(
"--end",
"-e",
default="NOW",
show_default=True,
help="End date of the query in the format YYYYMMDD.",
)
@click.option(
"--geometry", "-g", type=click.Path(exists=True), help="Search area geometry as GeoJSON file."
)
@click.option(
"--uuid",
type=CommaSeparatedString(),
default=None,
help="Select a specific product UUID instead of a query. Multiple UUIDs can separated by comma.",
)
@click.option(
"--name",
type=CommaSeparatedString(),
default=None,
help="Select specific product(s) by filename. Supports wildcards.",
)
@click.option(
"--sentinel",
type=click.Choice(["1", "2", "3", "5"]),
help="Limit search to a Sentinel satellite (constellation)",
)
@click.option(
"--instrument",
type=click.Choice(["MSI", "SAR-C SAR", "SLSTR", "OLCI", "SRAL"]),
help="Limit search to a specific instrument on a Sentinel satellite.",
)
@click.option(
"--producttype", type=str, default=None, help="Limit search to a Sentinel product type."
)
@click.option(
"-c",
"--cloud",
type=int,
help="Maximum cloud cover in percent. (requires --sentinel to be 2 or 3)",
)
@click.option(
"-o",
"--order-by",
help="Comma-separated list of keywords to order the result by. "
"Prefix keywords with '-' for descending order.",
)
@click.option(
"-l", "--limit", type=int, help="Maximum number of results to return. Defaults to no limit."
)
@click.option("--download", "-d", is_flag=True, help="Download all results of the query.")
@click.option(
"--path",
type=click.Path(exists=True),
default=".",
help="Set the path where the files will be saved.",
)
@click.option(
"--query",
"-q",
type=CommaSeparatedString(),
default=None,
help="""Extra search keywords you want to use in the query. Separate
keywords with comma. Example: 'producttype=GRD,polarisationmode=HH'.
""",
)
@click.option(
"--location",
type=str,
help="Return only products overlapping with the bounding box of given location, "
"e.g. 'Berlin', 'Germany' or '52.393974, 13.066955'.",
)
@click.option(
"--footprints",
is_flag=True,
help="""Create a geojson file search_footprints.geojson with footprints
and metadata of the returned products.
""",
)
@click.option("--info", is_flag=True, is_eager=True, help="Displays the DHuS version used")
@click.version_option(version=sentinelsat_version, prog_name="sentinelsat")
def cli(
user,
password,
geometry,
start,
end,
uuid,
name,
download,
sentinel,
producttype,
instrument,
cloud,
footprints,
path,
query,
url,
order_by,
location,
limit,
info,
):
"""Search for Sentinel products and, optionally, download all the results
and/or create a geojson file with the search result footprints.
Beyond your Copernicus Open Access Hub user and password, you must pass a geojson file
containing the geometry of the area you want to search for or the UUIDs of the products. If you
don't specify the start and end dates, it will search in the last 24 hours.
"""
_set_logger_handler()
if user is None or password is None:
try:
user, password = requests.utils.get_netrc_auth(url)
except TypeError:
pass
if user is None or password is None:
raise click.UsageError(
"Missing --user and --password. Please see docs "
"for environment variables and .netrc support."
)
api = SentinelAPI(user, password, url)
if info:
ctx = click.get_current_context()
click.echo("DHuS version: " + api.dhus_version)
ctx.exit()
search_kwargs = {}
if sentinel and not (producttype or instrument):
search_kwargs["platformname"] = "Sentinel-" + sentinel
if instrument and not producttype:
search_kwargs["instrumentshortname"] = instrument
if producttype:
search_kwargs["producttype"] = producttype
if cloud:
if sentinel not in ["2", "3"]:
logger.error("Cloud cover is only supported for Sentinel 2 and 3.")
exit(1)
search_kwargs["cloudcoverpercentage"] = (0, cloud)
if query is not None:
search_kwargs.update((x.split("=") for x in query))
if location is not None:
wkt, info = placename_to_wkt(location)
minX, minY, maxX, maxY = info["bbox"]
r = 6371 # average radius, km
extent_east = r * math.radians(maxX - minX) * math.cos(math.radians((minY + maxY) / 2))
extent_north = r * math.radians(maxY - minY)
logger.info(
"Querying location: '%s' with %.1f x %.1f km, %f, %f to %f, %f bounding box",
info["display_name"],
extent_north,
extent_east,
minY,
minX,
maxY,
maxX,
)
search_kwargs["area"] = wkt
if geometry is not None:
search_kwargs["area"] = geojson_to_wkt(read_geojson(geometry))
if uuid is not None:
uuid_list = [x.strip() for x in uuid]
products = {}
for productid in uuid_list:
try:
products[productid] = api.get_product_odata(productid)
except InvalidKeyError:
logger.error("No product with ID '%s' exists on server", productid)
exit(1)
elif name is not None:
search_kwargs["identifier"] = name[0] if len(name) == 1 else "(" + " OR ".join(name) + ")"
products = api.query(order_by=order_by, limit=limit, **search_kwargs)
else:
start = start or "19000101"
end = end or "NOW"
products = api.query(date=(start, end), order_by=order_by, limit=limit, **search_kwargs)
if footprints is True:
footprints_geojson = api.to_geojson(products)
with open(os.path.join(path, "search_footprints.geojson"), "w") as outfile:
outfile.write(gj.dumps(footprints_geojson))
if download is True:
product_infos, triggered, failed_downloads = api.download_all(products, path)
if len(failed_downloads) > 0:
with open(os.path.join(path, "corrupt_scenes.txt"), "w") as outfile:
for failed_id in failed_downloads:
outfile.write("%s : %s\n" % (failed_id, products[failed_id]["title"]))
else:
for product_id, props in products.items():
if uuid is None:
logger.info("Product %s - %s", product_id, props["summary"])
else: # querying uuids has no summary key
logger.info(
"Product %s - %s - %s MB",
product_id,
props["title"],
round(int(props["size"]) / (1024.0 * 1024.0), 2),
)
if uuid is None:
logger.info("---")
logger.info(
"%s scenes found with a total size of %.2f GB",
len(products),
api.get_products_size(products),
)
| gpl-3.0 | -1,093,820,202,210,152,200 | 29.708185 | 101 | 0.603894 | false | 3.701845 | false | false | false |
ajar98/todoist_bot | clock.py | 1 | 1716 | from apscheduler.schedulers.blocking import BlockingScheduler
import os
from pymongo import MongoClient
from client import TodoistClient
from todoist_app import send_tasks, send_FB_text
from todoist_app import MONGO_DB_TOKENS_ENDPOINT, MONGO_DB_TOKENS_PORT
from todoist_app import MONGO_DB_TOKENS_DATABASE
from apscheduler.schedulers import SchedulerAlreadyRunningError
from uuid import uuid4
DAY_OVERVIEW_TIME_HOUR = 6
def connect():
connection = MongoClient(
MONGO_DB_TOKENS_ENDPOINT,
MONGO_DB_TOKENS_PORT
)
handle = connection[MONGO_DB_TOKENS_DATABASE]
handle.authenticate(
os.environ['MONGO_DB_USERNAME'],
os.environ['MONGO_DB_PWD']
)
return handle
scheduler = BlockingScheduler()
handle = connect()
def today_tasks(sender_id, tc):
today_tasks = tc.get_today_tasks()
if today_tasks:
send_FB_text(
sender_id,
'Here are your tasks for today:'
)
send_tasks(
sender_id,
today_tasks,
tc.tz_info['hours']
)
else:
send_FB_text(
sender_id,
'You have no tasks today! Have a great day!'
)
@scheduler.scheduled_job('cron', hour=0)
def schedule_day_overview():
for entry in handle.bot_users.find():
tc = TodoistClient(entry['access_token'])
job_id = uuid4().__str__()
scheduler.add_job(
today_tasks,
args=[entry['sender_id'], tc],
trigger='cron',
hour=DAY_OVERVIEW_TIME_HOUR - tc.tz_info['hours'],
id=job_id
)
try:
scheduler.start()
except SchedulerAlreadyRunningError:
pass
scheduler.start()
| mit | -3,641,494,452,882,261,000 | 24.235294 | 70 | 0.618881 | false | 3.666667 | false | false | false |
kronenthaler/mod-pbxproj | pbxproj/pbxsections/PBXFileReference.py | 1 | 2059 | import os
from pbxproj import PBXGenericObject
class PBXFileReference(PBXGenericObject):
@classmethod
def create(cls, path, tree='SOURCE_ROOT'):
return cls().parse({
'_id': cls._generate_id(),
'isa': cls.__name__,
'path': path,
'name': os.path.split(path)[1],
'sourceTree': tree
})
def set_explicit_file_type(self, file_type):
if 'lastKnownFileType' in self:
del self['lastKnownFileType']
self['explicitFileType'] = file_type
def set_last_known_file_type(self, file_type):
if 'explicitFileType' in self:
del self['explicitFileType']
self['lastKnownFileType'] = file_type
def get_file_type(self):
if 'explicitFileType' in self:
return self.explicitFileType
return self.lastKnownFileType
def _print_object(self, indent_depth='', entry_separator='\n', object_start='\n',
indent_increment='\t'):
return super(PBXFileReference, self)._print_object('', entry_separator=' ', object_start='',
indent_increment='')
def get_name(self):
if hasattr(self, 'name'):
return self.name
return self.path
def remove(self, recursive=True):
parent = self.get_parent()
# search on the BuildFiles if there is a build file to be removed, and remove it
build_files = [build_file for build_file in parent.get_objects_in_section('PBXBuildFile')
if build_file.fileRef == self.get_id()]
for build_file in build_files:
build_file.remove(recursive)
# search for each group that has a reference to the build file and remove it from it.
for group in parent.get_objects_in_section('PBXGroup'):
if self.get_id() in group.children:
group.remove_child(self)
# remove the file reference from it's parent
del parent[self.get_id()]
return True
| mit | -5,999,931,503,130,478,000 | 35.122807 | 100 | 0.582807 | false | 4.307531 | false | false | false |
klahnakoski/cloc | cloc/util/queries/query.py | 1 | 11834 | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski ([email protected])
#
from __future__ import unicode_literals
from __future__ import division
from .. import struct
from .dimensions import Dimension
from .domains import Domain
from ..collections import AND, reverse
from ..env.logs import Log
from ..queries import MVEL, _normalize_select, INDEX_CACHE
from ..queries.filters import TRUE_FILTER, simplify
from ..struct import nvl, Struct, EmptyList, split_field, join_field, StructList, Null
from ..structs.wraps import wrap, unwrap, listwrap
class Query(object):
def __new__(cls, query, schema=None):
if isinstance(query, Query):
return query
return object.__new__(cls)
def __init__(self, query, schema=None):
"""
NORMALIZE QUERY SO IT CAN STILL BE JSON
"""
if isinstance(query, Query):
return
object.__init__(self)
query = wrap(query)
self.name = query.name
select = query.select
if isinstance(select, list):
select = wrap([unwrap(_normalize_select(s, schema=schema)) for s in select])
elif select:
select = _normalize_select(select, schema=schema)
else:
select = StructList()
self.select2index = {} # MAP FROM NAME TO data INDEX
for i, s in enumerate(listwrap(select)):
self.select2index[s.name] = i
self.select = select
self.edges = _normalize_edges(query.edges, schema=schema)
self.frum = _normalize_from(query["from"], schema=schema)
self.where = _normalize_where(query.where, schema=schema)
self.window = [_normalize_window(w) for w in listwrap(query.window)]
self.sort = _normalize_sort(query.sort)
self.limit = query.limit
self.isLean = query.isLean
@property
def columns(self):
return self.select + self.edges
def __getitem__(self, item):
if item == "from":
return self.frum
return Struct.__getitem__(self, item)
def copy(self):
output = object.__new__(Query)
source = object.__getattribute__(self, "__dict__")
dest = object.__getattribute__(output, "__dict__")
struct.set_default(dest, source)
return output
def _normalize_selects(selects, schema=None):
if isinstance(selects, list):
return wrap([_normalize_select(s, schema=schema) for s in selects])
else:
return _normalize_select(selects, schema=schema)
def _normalize_edges(edges, schema=None):
return [_normalize_edge(e, schema=schema) for e in listwrap(edges)]
def _normalize_edge(edge, schema=None):
if isinstance(edge, basestring):
if schema:
e = schema[edge]
if e:
return Struct(
name=edge,
domain=e.getDomain()
)
return Struct(
name=edge,
value=edge,
domain=_normalize_domain(schema=schema)
)
else:
return Struct(
name=nvl(edge.name, edge.value),
value=edge.value,
range=edge.range,
allowNulls=False if edge.allowNulls is False else True,
domain=_normalize_domain(edge.domain, schema=schema)
)
def _normalize_from(frum, schema=None):
frum = wrap(frum)
if isinstance(frum, basestring):
return Struct(name=frum)
elif isinstance(frum, dict) and (frum["from"] or isinstance(frum["from"], (list, set))):
return Query(frum, schema=schema)
else:
return frum
def _normalize_domain(domain=None, schema=None):
if not domain:
return Domain(type="default")
elif isinstance(domain, Dimension):
return domain.getDomain()
elif schema and isinstance(domain, basestring) and schema[domain]:
return schema[domain].getDomain()
elif isinstance(domain, Domain):
return domain
if not domain.name:
domain = domain.copy()
domain.name = domain.type
return Domain(**struct.unwrap(domain))
def _normalize_window(window, schema=None):
return Struct(
name=nvl(window.name, window.value),
value=window.value,
edges=[_normalize_edge(e, schema) for e in listwrap(window.edges)],
sort=_normalize_sort(window.sort),
aggregate=window.aggregate,
range=_normalize_range(window.range),
where=_normalize_where(window.where, schema=schema)
)
def _normalize_range(range):
if range == None:
return None
return Struct(
min=range.min,
max=range.max
)
def _normalize_where(where, schema=None):
if where == None:
return TRUE_FILTER
if schema == None:
return where
where = simplify(_where_terms(where, where, schema))
return where
def _map_term_using_schema(master, path, term, schema_edges):
"""
IF THE WHERE CLAUSE REFERS TO FIELDS IN THE SCHEMA, THEN EXPAND THEM
"""
output = StructList()
for k, v in term.items():
dimension = schema_edges[k]
if isinstance(dimension, Dimension):
domain = dimension.getDomain()
if dimension.fields:
if isinstance(dimension.fields, dict):
# EXPECTING A TUPLE
for local_field, es_field in dimension.fields.items():
local_value = v[local_field]
if local_value == None:
output.append({"missing": {"field": es_field}})
else:
output.append({"term": {es_field: local_value}})
continue
if len(dimension.fields) == 1 and MVEL.isKeyword(dimension.fields[0]):
# SIMPLE SINGLE-VALUED FIELD
if domain.getPartByKey(v) is domain.NULL:
output.append({"missing": {"field": dimension.fields[0]}})
else:
output.append({"term": {dimension.fields[0]: v}})
continue
if AND(MVEL.isKeyword(f) for f in dimension.fields):
# EXPECTING A TUPLE
if not isinstance(v, tuple):
Log.error("expecing {{name}}={{value}} to be a tuple", {"name": k, "value": v})
for i, f in enumerate(dimension.fields):
vv = v[i]
if vv == None:
output.append({"missing": {"field": f}})
else:
output.append({"term": {f: vv}})
continue
if len(dimension.fields) == 1 and MVEL.isKeyword(dimension.fields[0]):
if domain.getPartByKey(v) is domain.NULL:
output.append({"missing": {"field": dimension.fields[0]}})
else:
output.append({"term": {dimension.fields[0]: v}})
continue
if domain.partitions:
part = domain.getPartByKey(v)
if part is domain.NULL or not part.esfilter:
Log.error("not expected to get NULL")
output.append(part.esfilter)
continue
else:
Log.error("not expected")
elif isinstance(v, dict):
sub = _map_term_using_schema(master, path + [k], v, schema_edges[k])
output.append(sub)
continue
output.append({"term": {k: v}})
return {"and": output}
def _move_nested_term(master, where, schema):
"""
THE WHERE CLAUSE CAN CONTAIN NESTED PROPERTY REFERENCES, THESE MUST BE MOVED
TO A NESTED FILTER
"""
items = where.term.items()
if len(items) != 1:
Log.error("Expecting only one term")
k, v = items[0]
nested_path = _get_nested_path(k, schema)
if nested_path:
return {"nested": {
"path": nested_path,
"query": {"filtered": {
"query": {"match_all": {}},
"filter": {"and": [
{"term": {k: v}}
]}
}}
}}
return where
def _get_nested_path(field, schema):
if MVEL.isKeyword(field):
field = join_field([schema.es.alias]+split_field(field))
for i, f in reverse(enumerate(split_field(field))):
path = join_field(split_field(field)[0:i+1:])
if path in INDEX_CACHE:
return join_field(split_field(path)[1::])
return None
def _where_terms(master, where, schema):
"""
USE THE SCHEMA TO CONVERT DIMENSION NAMES TO ES FILTERS
master - TOP LEVEL WHERE (FOR PLACING NESTED FILTERS)
"""
if isinstance(where, dict):
if where.term:
# MAP TERM
try:
output = _map_term_using_schema(master, [], where.term, schema.edges)
return output
except Exception, e:
Log.error("programmer problem?", e)
elif where.terms:
# MAP TERM
output = StructList()
for k, v in where.terms.items():
if not isinstance(v, (list, set)):
Log.error("terms filter expects list of values")
edge = schema.edges[k]
if not edge:
output.append({"terms": {k: v}})
else:
if isinstance(edge, basestring):
# DIRECT FIELD REFERENCE
return {"terms": {edge: v}}
try:
domain = edge.getDomain()
except Exception, e:
Log.error("programmer error", e)
fields = domain.dimension.fields
if isinstance(fields, dict):
or_agg = []
for vv in v:
and_agg = []
for local_field, es_field in fields.items():
vvv = vv[local_field]
if vvv != None:
and_agg.append({"term": {es_field: vvv}})
or_agg.append({"and": and_agg})
output.append({"or": or_agg})
elif isinstance(fields, list) and len(fields) == 1 and MVEL.isKeyword(fields[0]):
output.append({"terms": {fields[0]: v}})
elif domain.partitions:
output.append({"or": [domain.getPartByKey(vv).esfilter for vv in v]})
return {"and": output}
elif where["or"]:
return {"or": [unwrap(_where_terms(master, vv, schema)) for vv in where["or"]]}
elif where["and"]:
return {"and": [unwrap(_where_terms(master, vv, schema)) for vv in where["and"]]}
elif where["not"]:
return {"not": unwrap(_where_terms(master, where["not"], schema))}
return where
def _normalize_sort(sort=None):
"""
CONVERT SORT PARAMETERS TO A NORMAL FORM SO EASIER TO USE
"""
if not sort:
return EmptyList
output = StructList()
for s in listwrap(sort):
if isinstance(s, basestring):
output.append({"field": s, "sort": 1})
else:
output.append({"field": nvl(s.field, s.value), "sort": nvl(sort_direction[s.sort], 1)})
return wrap(output)
sort_direction = {
"asc": 1,
"desc": -1,
"none": 0,
1: 1,
0: 0,
-1: -1,
None: 1,
Null: 1
}
| mpl-2.0 | -8,638,696,560,898,394,000 | 32.908309 | 103 | 0.534477 | false | 4.119039 | false | false | false |
mrzhenya/plex-plugins | localmetadata/LocalMetadata.bundle/Contents/Code/__init__.py | 1 | 9059 | # -*- coding: utf-8 -*-
#
# Metadata plugin for Plex Media Server, which updates media's metadata
# using information stored in local info files.
#
# Copyright (C) 2015 Yevgeny Nyden
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# @author zhenya (Yevgeny Nyden)
# @version @PLUGIN.REVISION@
# @revision @REPOSITORY.REVISION@
import re, os, io, datetime
LOGGER = Log
ENCODING_PLEX = 'utf-8'
INFO_FILE_EXTENSION = '.info'
MATCHER_INFO_TAG = re.compile('^\s*\[(.*)\]\s*', re.UNICODE)
MATCHER_COMMENT_LINE = re.compile('^\s*###')
TUPLE_SPLIT_STRING = '|'
def Start():
LOGGER.Info('***** START *****')
def ValidatePrefs():
LOGGER.Info('***** updating preferences...')
# Only use unicode if it's supported, which it is on Windows and OS X,
# but not Linux. This allows things to work with non-ASCII characters
# without having to go through a bunch of work to ensure the Linux
# filesystem is UTF-8 "clean".
#
def unicodize(s):
filename = s
if os.path.supports_unicode_filenames:
try: filename = unicode(s.decode(ENCODING_PLEX))
except: pass
return filename
def getAndTestInfoFilePath(media):
part = media.items[0].parts[0]
filename = unicodize(part.file)
path = os.path.splitext(filename)[0] + INFO_FILE_EXTENSION
if os.path.exists(path):
return path
else:
return None
def parsePipeSeparatedTuple(str):
"""Parses a tuple of values separated by '|' from the given string.
Args:
str - string to parse
Returns:
tuple of strings or empty values if nothing was parsed.
"""
tokens = str.split(TUPLE_SPLIT_STRING)
second = ''
if len(tokens) > 1:
second = tokens[1].strip()
return tokens[0].strip(), second
def parseStringValueFromText(lines):
mergedValue = ''
for line in lines:
line = line.strip()
if not line and not mergedValue:
# Skipping leading empty lines.
continue
if not line:
mergedValue += '\n'
elif mergedValue:
mergedValue += ' '
mergedValue += line
return mergedValue
def parseSingleValueFromText(lines):
for line in lines:
return line.strip()
return ''
def parseAndAddActorsFromText(roles, lines):
for line in lines:
actor, role = parsePipeSeparatedTuple(line)
if actor:
role = roles.new()
role.actor = actor
role.role = role
def parseAndAddArrayValuesFromText(container, lines):
"""Parses text values and adds them to a metadata array container.
Args:
container: list where parsed values are added;
lines: list of strings to parse.
"""
for line in lines:
line = line.strip()
if line:
container.add(line)
def parseIntegerValueFromText(lines):
return int(parseStringValueFromText(lines))
def parseFloatValueFromText(lines):
return float(parseStringValueFromText(lines))
def parseDateValueFromText(lines):
if lines:
return Datetime.ParseDate(lines[0]).date()
def isCommentLine(line):
return MATCHER_COMMENT_LINE.search(line)
def writeTagValueToMetadata(metadata, tagName, tagLines):
"""Parses and stores the passed tag data into metadata object.
Args:
metadata: Metadata - Plex metadata object.
tagName: string - 'info' file tag name
tagLines: list - lines as parsed from the file
"""
try:
if not tagName:
return
tagName = tagName.lower()
# Title.
if tagName == 'title':
metadata.title = parseStringValueFromText(tagLines)
elif tagName == 'original_title':
metadata.original_title = parseStringValueFromText(tagLines)
# Year.
elif tagName == 'year':
metadata.year = parseIntegerValueFromText(tagLines)
# Runtime.
elif tagName == 'duration' or tagName == 'runtime':
metadata.duration = parseIntegerValueFromText(tagLines) * 1000
# Genres.
elif tagName == 'genres':
parseAndAddArrayValuesFromText(metadata.genres, tagLines)
# Directors.
elif tagName == 'directors':
parseAndAddArrayValuesFromText(metadata.directors, tagLines)
# Writers.
elif tagName == 'writers':
parseAndAddArrayValuesFromText(metadata.writers, tagLines)
# Actors.
elif tagName == 'actors':
parseAndAddActorsFromText(metadata.roles, tagLines)
# Studio
elif tagName == 'studio':
metadata.studio = parseStringValueFromText(tagLines)
# Tagline.
elif tagName == 'tagline':
metadata.tagline = parseStringValueFromText(tagLines)
# Summary.
elif tagName == 'summary':
metadata.summary = parseStringValueFromText(tagLines)
# Content rating.
elif tagName == 'content_rating':
metadata.content_rating = parseSingleValueFromText(tagLines)
# Release date.
elif tagName == 'original_date':
metadata.originally_available_at = parseDateValueFromText(tagLines)
# Country.
elif tagName == 'countries':
parseAndAddArrayValuesFromText(metadata.countries, tagLines)
# Rating.
elif tagName == 'rating':
metadata.rating = parseFloatValueFromText(tagLines)
# Collections.
elif tagName == 'collections':
parseAndAddArrayValuesFromText(metadata.collections, tagLines)
elif tagName == 'poster':
pass
elif tagName == 'still':
pass
except:
LOGGER.Error('Failed to parse tag "' + str(tagName) + '"')
class MyMediaAgent(Agent.Movies):
name = 'Local Metadata (Movies)'
languages = [Locale.Language.NoLanguage]
primary_provider = True
fallback_agent = False
accepts_from = ['com.plexapp.agents.localmedia', 'com.plexapp.agents.none']
contributes_to = ['com.plexapp.agents.none']
##############################################################################
############################# S E A R C H ####################################
##############################################################################
def search(self, results, media, lang, manual=False):
"""Searches local directory for the metadata .info file.
"""
LOGGER.Debug('SEARCH START <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
mediaName = media.name
mediaYear = media.year
mediaHash = media.hash
LOGGER.Debug('searching for name="%s", year="%s", guid="%s", hash="%s"...' %
(str(mediaName), str(mediaYear), str(media.guid), str(mediaHash)))
infoFilepath = getAndTestInfoFilePath(media)
if infoFilepath is None:
return
part = media.items[0].parts[0]
if mediaHash is None:
mediaHash = part.hash
if mediaYear is None:
filename = unicodize(part.file)
modificationTime = os.path.getmtime(filename)
date = datetime.date.fromtimestamp(modificationTime)
mediaYear = date.year
results.Append(MetadataSearchResult(id=mediaHash, name=mediaName, year=mediaYear, score=100, lang=lang))
LOGGER.Debug('SEARCH END <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
##############################################################################
############################# U P D A T E ####################################
##############################################################################
def update(self, metadata, media, lang, force=False):
"""Updates the metadata on a given media item.
"""
LOGGER.Debug('UPDATE START <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
infoFilepath = getAndTestInfoFilePath(media)
if infoFilepath is None:
return
if force:
resetMediaAllMetadata(metadata)
currTagName = None
currTagLines = []
for infoLine in io.open(infoFilepath, 'rt'):
match = MATCHER_INFO_TAG.search(infoLine)
if match:
# It's a tag.
writeTagValueToMetadata(metadata, currTagName, currTagLines)
currTagLines = []
currTagName = match.groups()[0]
elif not isCommentLine(infoLine):
# Content.
currTagLines.append(infoLine)
# Write the last tag data.
writeTagValueToMetadata(metadata, currTagName, currTagLines)
LOGGER.Debug('UPDATE END <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
def resetMediaAllMetadata(metadata):
"""Resets all relevant fields on a passed media metadata object.
"""
metadata.genres.clear()
metadata.countries.clear()
metadata.directors.clear()
metadata.writers.clear()
metadata.roles.clear()
metadata.collections.clear()
metadata.studio = ''
metadata.summary = ''
metadata.title = ''
metadata.year = None
metadata.originally_available_at = None
metadata.original_title = ''
metadata.duration = None
| apache-2.0 | -1,625,302,080,118,431,000 | 27.850318 | 108 | 0.644663 | false | 3.898021 | false | false | false |
haxwithaxe/qutebrowser | tests/helpers/fixtures.py | 1 | 8558 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""pytest fixtures used by the whole testsuite.
See https://pytest.org/latest/fixture.html
"""
import sys
import collections
import itertools
import textwrap
import pytest
import helpers.stubs as stubsmod
from qutebrowser.config import config
from qutebrowser.utils import objreg
from PyQt5.QtCore import QEvent, QSize, Qt
from PyQt5.QtWidgets import QWidget, QHBoxLayout, QVBoxLayout
from PyQt5.QtNetwork import QNetworkCookieJar
class WinRegistryHelper:
"""Helper class for win_registry."""
FakeWindow = collections.namedtuple('FakeWindow', ['registry'])
def __init__(self):
self._ids = []
def add_window(self, win_id):
assert win_id not in objreg.window_registry
registry = objreg.ObjectRegistry()
window = self.FakeWindow(registry)
objreg.window_registry[win_id] = window
self._ids.append(win_id)
def cleanup(self):
for win_id in self._ids:
del objreg.window_registry[win_id]
class FakeStatusBar(QWidget):
"""Fake statusbar to test progressbar sizing."""
def __init__(self, parent=None):
super().__init__(parent)
self.hbox = QHBoxLayout(self)
self.hbox.addStretch()
self.hbox.setContentsMargins(0, 0, 0, 0)
self.setAttribute(Qt.WA_StyledBackground, True)
self.setStyleSheet('background-color: red;')
def minimumSizeHint(self):
return QSize(1, self.fontMetrics().height())
@pytest.fixture
def fake_statusbar(qtbot):
"""Fixture providing a statusbar in a container window."""
container = QWidget()
qtbot.add_widget(container)
vbox = QVBoxLayout(container)
vbox.addStretch()
statusbar = FakeStatusBar(container)
# to make sure container isn't GCed
# pylint: disable=attribute-defined-outside-init
statusbar.container = container
vbox.addWidget(statusbar)
container.show()
qtbot.waitForWindowShown(container)
return statusbar
@pytest.yield_fixture
def win_registry():
"""Fixture providing a window registry for win_id 0 and 1."""
helper = WinRegistryHelper()
helper.add_window(0)
yield helper
helper.cleanup()
@pytest.yield_fixture
def tab_registry(win_registry):
"""Fixture providing a tab registry for win_id 0."""
registry = objreg.ObjectRegistry()
objreg.register('tab-registry', registry, scope='window', window=0)
yield registry
objreg.delete('tab-registry', scope='window', window=0)
def _generate_cmdline_tests():
"""Generate testcases for test_split_binding."""
# pylint: disable=invalid-name
TestCase = collections.namedtuple('TestCase', 'cmd, valid')
separators = [';;', ' ;; ', ';; ', ' ;;']
invalid = ['foo', '']
valid = ['leave-mode', 'hint all']
# Valid command only -> valid
for item in valid:
yield TestCase(''.join(item), True)
# Invalid command only -> invalid
for item in invalid:
yield TestCase(''.join(item), False)
# Invalid command combined with invalid command -> invalid
for item in itertools.product(invalid, separators, invalid):
yield TestCase(''.join(item), False)
# Valid command combined with valid command -> valid
for item in itertools.product(valid, separators, valid):
yield TestCase(''.join(item), True)
# Valid command combined with invalid command -> invalid
for item in itertools.product(valid, separators, invalid):
yield TestCase(''.join(item), False)
# Invalid command combined with valid command -> invalid
for item in itertools.product(invalid, separators, valid):
yield TestCase(''.join(item), False)
# Command with no_cmd_split combined with an "invalid" command -> valid
for item in itertools.product(['bind x open'], separators, invalid):
yield TestCase(''.join(item), True)
@pytest.fixture(params=_generate_cmdline_tests(), ids=lambda e: e.cmd)
def cmdline_test(request):
"""Fixture which generates tests for things validating commandlines."""
# Import qutebrowser.app so all cmdutils.register decorators get run.
import qutebrowser.app
return request.param
@pytest.yield_fixture
def config_stub(stubs):
"""Fixture which provides a fake config object."""
stub = stubs.ConfigStub()
objreg.register('config', stub)
yield stub
objreg.delete('config')
@pytest.yield_fixture
def default_config():
"""Fixture that provides and registers an empty default config object."""
config_obj = config.ConfigManager(configdir=None, fname=None, relaxed=True)
objreg.register('config', config_obj)
yield config_obj
objreg.delete('config')
@pytest.yield_fixture
def key_config_stub(stubs):
"""Fixture which provides a fake key config object."""
stub = stubs.KeyConfigStub()
objreg.register('key-config', stub)
yield stub
objreg.delete('key-config')
@pytest.yield_fixture
def host_blocker_stub(stubs):
"""Fixture which provides a fake host blocker object."""
stub = stubs.HostBlockerStub()
objreg.register('host-blocker', stub)
yield stub
objreg.delete('host-blocker')
@pytest.fixture(scope='session')
def stubs():
"""Provide access to stub objects useful for testing."""
return stubsmod
@pytest.fixture(scope='session')
def unicode_encode_err():
"""Provide a fake UnicodeEncodeError exception."""
return UnicodeEncodeError('ascii', # codec
'', # object
0, # start
2, # end
'fake exception') # reason
@pytest.fixture(scope='session')
def qnam(qapp):
"""Session-wide QNetworkAccessManager."""
from PyQt5.QtNetwork import QNetworkAccessManager
nam = QNetworkAccessManager()
nam.setNetworkAccessible(QNetworkAccessManager.NotAccessible)
return nam
@pytest.fixture
def webpage(qnam):
"""Get a new QWebPage object."""
from PyQt5.QtWebKitWidgets import QWebPage
page = QWebPage()
page.networkAccessManager().deleteLater()
page.setNetworkAccessManager(qnam)
return page
@pytest.fixture
def webview(qtbot, webpage):
"""Get a new QWebView object."""
from PyQt5.QtWebKitWidgets import QWebView
view = QWebView()
qtbot.add_widget(view)
view.page().deleteLater()
view.setPage(webpage)
view.resize(640, 480)
return view
@pytest.fixture
def webframe(webpage):
"""Convenience fixture to get a mainFrame of a QWebPage."""
return webpage.mainFrame()
@pytest.fixture
def fake_keyevent_factory():
"""Fixture that when called will return a mock instance of a QKeyEvent."""
from unittest import mock
from PyQt5.QtGui import QKeyEvent
def fake_keyevent(key, modifiers=0, text='', typ=QEvent.KeyPress):
"""Generate a new fake QKeyPressEvent."""
evtmock = mock.create_autospec(QKeyEvent, instance=True)
evtmock.key.return_value = key
evtmock.modifiers.return_value = modifiers
evtmock.text.return_value = text
evtmock.type.return_value = typ
return evtmock
return fake_keyevent
@pytest.yield_fixture
def cookiejar_and_cache(stubs):
"""Fixture providing a fake cookie jar and cache."""
jar = QNetworkCookieJar()
cache = stubs.FakeNetworkCache()
objreg.register('cookie-jar', jar)
objreg.register('cache', cache)
yield
objreg.delete('cookie-jar')
objreg.delete('cache')
@pytest.fixture
def py_proc():
"""Get a python executable and args list which executes the given code."""
if getattr(sys, 'frozen', False):
pytest.skip("Can't be run when frozen")
def func(code):
return (sys.executable, ['-c', textwrap.dedent(code.strip('\n'))])
return func
| gpl-3.0 | -1,967,434,556,169,450,800 | 28.818815 | 79 | 0.682753 | false | 3.960204 | true | false | false |
imatge-upc/unsupervised-2017-cvprw | mfb_cross_val.py | 1 | 6221 | import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='1'
from os import listdir
import sys
import time
import argparse
import tools.ops
import numpy as np
import tensorflow as tf
import scipy.misc as sm
from models.mfb_net_cross import *
from tools.utilities import *
from tools.ops import *
parser = argparse.ArgumentParser()
parser.add_argument('-lr', dest='lr', type=float, default='1e-4', help='original learning rate')
args = parser.parse_args()
flags = tf.app.flags
flags.DEFINE_float('lr', args.lr, 'Original learning rate.')
flags.DEFINE_integer('batch_size', 5, 'Batch size.')
flags.DEFINE_integer('num_epochs', 1, 'Number of epochs.') # ~13 min per epoch
flags.DEFINE_integer('num_gpus', 4, 'Number of GPUs.')
flags.DEFINE_integer('seq_length', 16, 'Length of each video clip.')
flags.DEFINE_integer('height', 128, 'Height of video frame.')
flags.DEFINE_integer('width', 128, 'Width of video frame.')
flags.DEFINE_integer('channel', 3, 'Number of channels for each frame.')
flags.DEFINE_integer('num_sample', 1240, 'Number of samples in this dataset.')
flags.DEFINE_float('wd', 0.001, 'Weight decay rate.')
FLAGS = flags.FLAGS
prefix = 'mfb_cross'
model_save_dir = './ckpt/' + prefix
loss_save_dir = './loss'
val_list_path = './dataset/vallist.txt'
dataset_path = './dataset/UCF-101-tf-records'
use_pretrained_model = True
save_predictions = True
def run_validation():
# Create model directory
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
model_filename = "./mfb_baseline_ucf24.model"
tower_ffg_losses, tower_fbg_losses, tower_lfg_losses, tower_feat_losses = [], [], [], []
tower_ffg_m_losses, tower_fbg_m_losses, tower_lfg_m_losses = [], [], []
global_step = tf.get_variable(
'global_step',
[],
initializer=tf.constant_initializer(0),
trainable=False
)
starter_learning_rate = 1e-4
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
100000000, 0.5, staircase=True)
opt = tf.train.AdamOptimizer(learning_rate)
# Create a session for running Ops on the Graph.
config = tf.ConfigProto(allow_soft_placement=True)
sess = tf.Session(config=config)
coord = tf.train.Coordinator()
threads = None
val_list_file = open(val_list_path, 'r')
val_list = val_list_file.read().splitlines()
for i, line in enumerate(val_list):
val_list[i] = os.path.join(dataset_path, val_list[i])
assert(len(val_list) % FLAGS.num_gpus == 0)
num_for_each_gpu = len(val_list) // FLAGS.num_gpus
clips_list, img_masks_list, loss_masks_list = [], [], []
with sess.as_default():
for i in range(FLAGS.num_gpus):
clips, img_masks, loss_masks = input_pipeline(val_list[i*num_for_each_gpu:(i+1)*num_for_each_gpu], \
FLAGS.batch_size, read_threads=1, num_epochs=FLAGS.num_epochs, is_training=False)
clips_list.append(clips)
img_masks_list.append(img_masks)
loss_masks_list.append(loss_masks)
mfb_list = []
with tf.variable_scope('vars') as var_scope:
for gpu_index in range(FLAGS.num_gpus):
with tf.device('/gpu:%d' % (gpu_index)):
with tf.name_scope('%s_%d' % ('tower', gpu_index)) as scope:
# construct model
mfb = mfb_net(clips_list[gpu_index], FLAGS.height, FLAGS.width, FLAGS.seq_length, \
FLAGS.channel, FLAGS.batch_size, is_training=False)
mfb_list.append(mfb)
_, first_fg_loss, first_bg_loss, last_fg_loss, feat_loss, _ = \
tower_loss(scope, mfb, clips_list[gpu_index], img_masks_list[gpu_index], loss_masks_list[gpu_index])
var_scope.reuse_variables()
tower_ffg_losses.append(first_fg_loss)
tower_fbg_losses.append(first_bg_loss)
tower_lfg_losses.append(last_fg_loss)
tower_feat_losses.append(feat_loss)
# concatenate the losses of all towers
ffg_loss_op = tf.reduce_mean(tower_ffg_losses)
fbg_loss_op = tf.reduce_mean(tower_fbg_losses)
lfg_loss_op = tf.reduce_mean(tower_lfg_losses)
feat_loss_op = tf.reduce_mean(tower_feat_losses)
# saver for saving checkpoints
saver = tf.train.Saver()
init = tf.initialize_all_variables()
sess.run(init)
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
if use_pretrained_model:
print('[*] Loading checkpoint ...')
model = tf.train.latest_checkpoint(model_save_dir)
if model is not None:
saver.restore(sess, model)
print('[*] Loading success: %s!'%model)
else:
print('[*] Loading failed ...')
# Create loss output folder
if not os.path.exists(loss_save_dir):
os.makedirs(loss_save_dir)
loss_file = open(os.path.join(loss_save_dir, prefix+'_val.txt'), 'a+')
total_steps = (FLAGS.num_sample / (FLAGS.num_gpus * FLAGS.batch_size)) * FLAGS.num_epochs
# start queue runner
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
ffg_loss_list, fbg_loss_list, lfg_loss_list, feat_loss_list = [], [], [], []
try:
with sess.as_default():
print('\n\n\n*********** start validating ***********\n\n\n')
step = global_step.eval()
print('[step = %d]'%step)
while not coord.should_stop():
# Run inference steps
ffg_loss, fbg_loss, lfg_loss, feat_loss = \
sess.run([ffg_loss_op, fbg_loss_op, lfg_loss_op, feat_loss_op])
ffg_loss_list.append(ffg_loss)
fbg_loss_list.append(fbg_loss)
lfg_loss_list.append(lfg_loss)
feat_loss_list.append(feat_loss)
print('ffg_loss=%.8f, fbg_loss=%.8f, lfg_loss=%.8f, feat_loss=%.8f' \
%(ffg_loss, fbg_loss, lfg_loss, feat_loss))
except tf.errors.OutOfRangeError:
print('Done training -- epoch limit reached')
finally:
# When done, ask the threads to stop.
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
sess.close()
mean_ffg = np.mean(np.asarray(ffg_loss_list))
mean_fbg = np.mean(np.asarray(fbg_loss_list))
mean_lfg = np.mean(np.asarray(lfg_loss_list))
mean_feat = np.mean(np.asarray(feat_loss_list))
line = '[step=%d] ffg_loss=%.8f, fbg_loss=%.8f, lfg_loss=%.8f, feat_loss=%.8f' \
%(step, mean_ffg, mean_fbg, mean_lfg, mean_feat)
print(line)
loss_file.write(line + '\n')
def main(_):
run_validation()
if __name__ == '__main__':
tf.app.run() | mit | 8,641,592,910,704,234,000 | 31.920635 | 106 | 0.665649 | false | 2.802252 | false | false | false |
raspberrywhite/raspberrywhite | servant/settings.py | 1 | 4748 | """
Django settings for servant project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from django.core.exceptions import ImproperlyConfigured
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
def get_env_variable(var_name):
""" Get the environment variable or return exception """
try:
return os.environ[var_name]
except KeyError:
error_msg = "Set the %s environment variable" % var_name
raise ImproperlyConfigured(error_msg)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = get_env_variable("RASPBERRYWHITE_SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = get_env_variable("RASPBERRYWHITE_DEBUG")
TEMPLATE_DEBUG = get_env_variable("RASPBERRYWHITE_TEMPLATE_DEBUG")
ALLOWED_HOSTS = [get_env_variable("RASPBERRYWHITE_ALLOWED_HOSTS")]
PROJECT_ROOT = os.path.abspath(
os.path.join(os.path.dirname(__file__), ".."),
)
REDIS_SSEQUEUE_CONNECTION_SETTINGS = {
'location': '{0}:{1}'.format(get_env_variable("RASPBERRYWHITE_REDIS_HOST"),
get_env_variable("RASPBERRYWHITE_REDIS_PORT")),
'db': get_env_variable("RASPBERRYWHITE_REDIS_DB"),
}
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'social.apps.django_app.default',
'server',
'djangobower',
'gunicorn',
'filer',
'easy_thumbnails',
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'djangobower.finders.BowerFinder',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
)
AUTHENTICATION_BACKENDS = (
'social.backends.facebook.FacebookOAuth2',
'social.backends.google.GoogleOAuth2',
'social.backends.twitter.TwitterOAuth',
'django.contrib.auth.backends.ModelBackend',
)
SOCIAL_AUTH_PIPELINE = (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'social.pipeline.user.create_user',
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details',
'server.models.create_player_after_login'
)
LOGIN_REDIRECT_URL = get_env_variable("RASPBERRYWHITE_LOGIN_REDIRECT_URL")
SOCIAL_AUTH_FACEBOOK_KEY = get_env_variable("RASPBERRYWHITE_SOCIAL_AUTH_FACEBOOK_KEY")
SOCIAL_AUTH_FACEBOOK_SECRET = get_env_variable("RASPBERRYWHITE_SOCIAL_AUTH_FACEBOOK_SECRET")
ROOT_URLCONF = 'servant.urls'
WSGI_APPLICATION = 'servant.wsgi.application'
STATIC_URL = get_env_variable("RASPBERRYWHITE_STATIC_URL")
BOWER_COMPONENTS_ROOT = os.path.join(PROJECT_ROOT, 'components')
BOWER_INSTALLED_APPS = (
'jquery#1.9',
'underscore',
'bootstrap',
'jquery.cookie',
'angular'
)
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = "static"
| bsd-3-clause | 515,083,188,460,384,800 | 28.308642 | 92 | 0.724094 | false | 3.279006 | false | false | false |
rado0x54/project-euler | python/problem0067.py | 1 | 1818 | #!/usr/bin/env python3
"""Project Euler - Problem 67 Module"""
import os
def problem67(triangle_fileloc):
"""Problem 67 - Maximum path sum II"""
# We model tree node with dict:
# node = { 'value':123, 'left': {}, 'right': {}, 'depth':1}
root = {}
cur_depth = [root]
d = 0
d_nodelist = []
# read file
with open(triangle_fileloc, 'r') as f:
for line in f:
d_nodelist.append(cur_depth)
counter = 0
next_depth = []
for value in line.split():
cur_depth[counter]['value'] = int(value)
cur_depth[counter]['depth'] = d
if not next_depth:
cur_depth[counter]['left'] = {}
next_depth.append(cur_depth[counter]['left'])
else:
cur_depth[counter]['left'] = next_depth[-1]
cur_depth[counter]['right'] = {}
next_depth.append(cur_depth[counter]['right'])
counter += 1
cur_depth = next_depth
d += 1
# Correct Stuff
d -= 1
while d >= 0:
for x in d_nodelist[d]:
cur_max = x['value']
if ('cur_max' in x['left'] and 'cur_max' in x['right']):
if (x['left']['cur_max'] > x['right']['cur_max']):
cur_max += x['left']['cur_max']
else:
cur_max += x['right']['cur_max']
x['cur_max'] = cur_max
d -= 1
return root['cur_max']
FILENAME = 'problem0067.txt'
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
def run():
"""Default Run Method"""
return problem67(os.path.join(__location__, FILENAME))
if __name__ == '__main__':
print("Result: ", run())
| mit | -7,513,407,627,659,273,000 | 24.605634 | 68 | 0.471397 | false | 3.740741 | false | false | false |
heromod/migrid | user-projects/miginterface/examples/mpi/mpi_example1.py | 1 | 1613 | #!/usr/bin/python
"""
An example script for running an MPI grid job using the mig interface module.
"""
import miginterface as mig
import time, sys
def main():
"""
Run an mpi job on a grid resource. To run in local mode please install mpi.
"""
# mig.debug_mode_on() # uncomment to enable debug print outs
# mig.local_mode_on() # uncomment to enable local mode execution
mig.test_connection() # Check if we can connect to the MiG server
mpi_file = "example.c" # mpi program source file
# The shell command to execute on the grid resource using 4 processes. We need to it compile on the resource first.
cmds = ["mpicc -O2 example.c -o example", "$MPI_WRAP mpirun -np 4 ./example Hello"]
# specify that we need require MPI as a runtime env and use the DIKU vgrid cluster
specifications = {"RUNTIMEENVIRONMENT":"MPI-WRAP-2.0", "VGRID":"DIKU"}
# Create and submit the grid job
job_id = mig.create_job(cmds, input_files=mpi_file, resource_specifications=specifications)
print "\nJob (ID : %s) submitted. \n\n" % job_id
# Wait for the job to finish while monitoring the status
polling_frequency = 10 # seconds
while not mig.job_finished(job_id):
job_info = mig.job_info(job_id) # get an info dictionary
print 'Grid job : %(ID)s \t %(STATUS)s ' % job_info
time.sleep(polling_frequency) # wait a while before polling again
print mig.job_output(job_id)
if __name__ == "__main__":
if "-l" in sys.argv:
mig.local_mode_on()
if "-d" in sys.argv:
mig.debug_mode_on()
main()
| gpl-2.0 | -6,414,359,659,538,479,000 | 34.065217 | 119 | 0.652201 | false | 3.552863 | false | false | false |
wonder-sk/QGIS | python/plugins/processing/algs/lidar/lastools/lasindexPro.py | 5 | 3045 | # -*- coding: utf-8 -*-
"""
***************************************************************************
lasindexPro.py
---------------------
Date : October 2014 and May 2016
Copyright : (C) 2014 by Martin Isenburg
Email : martin near rapidlasso point com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from future import standard_library
standard_library.install_aliases()
__author__ = 'Martin Isenburg'
__date__ = 'October 2014'
__copyright__ = '(C) 2014, Martin Isenburg'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from .LAStoolsUtils import LAStoolsUtils
from .LAStoolsAlgorithm import LAStoolsAlgorithm
from processing.core.parameters import ParameterBoolean
class lasindexPro(LAStoolsAlgorithm):
MOBILE_OR_TERRESTRIAL = "MOBILE_OR_TERRESTRIAL"
APPEND_LAX = "APPEND_LAX"
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('lasindexPro')
self.group, self.i18n_group = self.trAlgorithm('LAStools Production')
self.addParametersPointInputFolderGUI()
self.addParameter(ParameterBoolean(lasindexPro.APPEND_LAX,
self.tr("append *.lax file to *.laz file"), False))
self.addParameter(ParameterBoolean(lasindexPro.MOBILE_OR_TERRESTRIAL,
self.tr("is mobile or terrestrial LiDAR (not airborne)"), False))
self.addParametersAdditionalGUI()
self.addParametersCoresGUI()
self.addParametersVerboseGUI()
def processAlgorithm(self, progress):
if (LAStoolsUtils.hasWine()):
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "lasindex.exe")]
else:
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "lasindex")]
self.addParametersVerboseCommands(commands)
self.addParametersPointInputFolderCommands(commands)
if self.getParameterValue(lasindexPro.APPEND_LAX):
commands.append("-append")
if self.getParameterValue(lasindexPro.MOBILE_OR_TERRESTRIAL):
commands.append("-tile_size")
commands.append("10")
commands.append("-maximum")
commands.append("-100")
self.addParametersAdditionalCommands(commands)
self.addParametersCoresCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
| gpl-2.0 | 9,047,205,549,346,316,000 | 43.130435 | 108 | 0.568473 | false | 4.393939 | false | false | false |
HideoYukutake/juniperberry | juniperberry/srx.py | 1 | 8852 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
SRXをエミュレーションするモジュール
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Juniper SRXのpolicy追加をテストするための
モジュールです。
:copyright:
:license:
"""
import re
from collections.abc import Sequence
from ipaddress import IPv4Network, IPv4Address
SourceAddress = "source_address"
DestinationAddress = "destination_address"
Application = "application"
ANY = "any"
Permit = "permit"
Deny = "deny"
Reject = "reject"
Log = "log"
Count = "count"
class PsuedoSRX(object):
"""PsuedoSRX
SRXのインスタンスをエミュレートします
"""
def __init__(self, device_name):
super(PsuedoSRX, self).__init__()
self.device_name = device_name
self.address_books = {}
self.set_address_book(ANY)
self.applications = {}
self.set_applications(ANY)
self.zones = {}
self.policies = []
def set_address_book(self, book_name, nw_address=None):
book = self.__get_book(book_name)
if nw_address:
book.append(nw_address)
def set_zone(self, zone_name):
zone = Zone(zone_name)
self.zones[zone.name] = zone
def set_applications(self, app_name, protocol=None,
src_or_dst="destination-port", port=None):
app = self.__get_app(app_name)
if protocol:
app.protocol = protocol
if port:
app.port = port
if src_or_dst == "source-port":
app.is_src = True
def set_policy_with_addressbook(self, policy_name, src_or_dst, book_name):
policy = self.__get_policy(policy_name)
book = self.__get_book(book_name)
policy.add_address_book(src_or_dst, book)
def set_policy_with_application(self, policy_name, app_name):
policy = self.__get_policy(policy_name)
app = self.__get_app(app_name)
policy.add_application(app)
def set_policy_with_action(self, policy_name, action):
policy = self.__get_policy(policy_name)
policy.add_action(action)
def insert_policy(self, policy_name, insert_name, before_or_after="before"):
policy = self.__get_policy(policy_name)
self.policies.remove(policy)
if before_or_after == "before":
target = self.__get_policy(insert_name)
i = self.policies.index(target)
else:
target = self.__get_policy(insert_name)
i = self.policies.index(target) + 1
self.policies.insert(i, policy)
def packet_in(self, protocol, src_addr, src_port, dest_addr, dest_port):
# Packetが入ってきたときの処理を記載する
for policy in self.policies:
if policy.lookup(protocol, src_addr, src_port, dest_addr, dest_port):
return policy.action()
return [Deny]
def __get_policy(self, policy_name):
policy = None
for p in self.policies:
if p.policy_name == policy_name:
policy = p
break
if not policy:
policy = Policy(policy_name)
self.policies.append(policy)
return policy
def __get_book(self, book_name):
if book_name in self.address_books:
return self.address_books[book_name]
book = AddressBook(book_name)
self.address_books[book_name] = book
return book
def __get_app(self, app_name):
if app_name in self.applications:
return self.applications[app_name]
app = Application(app_name)
self.applications[app_name] = app
return app
def __repr__(self):
"""
jsonを返すようにしたい
"""
return "{0}:{1}".format(self.device_name, self.applications)
class Zone(object):
"""Zone
インターフェースをエミュレートするのが困難なので
Zoneも実体としては定義していません。
"""
def __init__(self, name):
super(Zone, self).__init__()
self.name = name
self.address_books = []
def attach(self, address_book):
self.address_books.append(address_book)
class AddressBook(Sequence):
"""AddressBook
junos SRX のAddressBookです。
:param book_name: this address-book's name
:param nw: the nw that this address-book has.
"""
def __init__(self, book_name, nw=None):
super(AddressBook, self).__init__()
self.book_name = book_name
if book_name == ANY:
nw = '0.0.0.0/0'
self.nws = []
if nw:
self.nws.append(IPv4Network(nw))
def append(self, nw):
self.nws.append(IPv4Network(nw))
def __getitem__(self, key):
return self.nws[key]
def __len__(self):
return len(self.nws)
def __contains__(self, item):
n = IPv4Address(item)
for nw in self.nws:
if n in nw:
return True
return False
def __repr__(self):
s = 'AddressBook {0}: '.format(self.book_name)
for _nw in self.nws:
s += '{0} '.format(_nw.exploded)
return s
class Application(object):
"""Application"""
port_pattern_range = re.compile('(\d+)-(\d+)')
port_pattern_simple = re.compile('\d+')
port_pattern_junos_name = re.compile('junos-[a-zA-Z]+')
junos_app_name_mapper = {'junos-ssh':22, 'junos-http':80}
@classmethod
def __to_number(self, junos_app_name):
return self.junos_app_name_mapper[junos_app_name]
def __init__(self, app_name):
super(Application, self).__init__()
self.app_name = app_name
self.is_any = False
self.is_src = False
if app_name == ANY:
self.is_any = True
self._protocol = ""
self._port = None
@property
def protocol(self):
return self._protocol
@protocol.setter
def protocol(self, val):
self._protocol = val
@property
def port(self):
return self._port
@port.setter
def port(self, ports):
m = self.port_pattern_range.match(ports)
if m:
self._port = range(int(m.group(1)), int(m.group(2))+1)
return
m = self.port_pattern_simple.match(ports)
if m:
self._port = []
self._port.append(int(m.group(0)))
return
if self.port_pattern_junos_name.match(ports):
self._port = []
self._port.append(self.__to_number(ports))
return
else:
raise
def match(self, proto, src_port=None, dest_port=None):
if self.is_any:
return True
if (proto == self._protocol) and self._port:
if dest_port in self._port:
return True
if src_port in self._port:
return True
return False
def __repr__(self):
return "{0} {1} {2}".format(self.app_name, self._protocol, self._port)
class Policy(object):
"""Policy
"""
def __init__(self, policy_name):
super(Policy, self).__init__()
self.policy_name = policy_name
self.source_addresses = []
self.destination_addresses = []
self.applications = []
self.actions = []
def add_from_zone(self, from_zone):
self.from_zone = from_zone
def add_to_zone(self, to_zone):
self.to_zone = to_zone
def add_address_book(self, src_or_dst, book):
if src_or_dst == "source-address":
self.source_addresses.append(book)
else:
self.destination_addresses.append(book)
def add_application(self, app):
self.applications.append(app)
def add_action(self, action):
self.actions.append(action)
def lookup(self, protocol, src_addr, src_port, dest_addr, dest_port):
if not self.__is_in_source_addresses(src_addr):
return False
if not self.__is_in_destination_addresses(dest_addr):
return False
if not self.__is_in_application(protocol, src_port, dest_port):
return False
return True
def action(self):
return self.actions
def __is_in_source_addresses(self, src_addr):
for book in self.source_addresses:
if src_addr in book:
return True
return False
def __is_in_destination_addresses(self, dest_addr):
for book in self.destination_addresses:
if dest_addr in book:
return True
return False
def __is_in_application(self, protocol, src_port, dest_port):
for app in self.applications:
if app.match(protocol, src_port, dest_port):
return True
return False
def __repr__(self):
return self.policy_name
| gpl-3.0 | -6,839,169,550,185,305,000 | 26.857605 | 81 | 0.567031 | false | 3.499187 | false | false | false |
nuoya/1001albums | var/parse.py | 1 | 1119 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import glob
import json
PWD = os.path.dirname(__file__)
DATA_DIR = os.path.join(PWD, 'data')
def parse_doc(doc_path):
id = os.path.splitext(os.path.basename(doc_path))[0]
data = json.load(open(doc_path, 'r'))
assert data['type'] == 'album'
assert data['id'] == id
name = data['name']
artists = data['artists']
images = data.get('images', [])
if not images:
print 'images not found for "{}"'.format(name)
# TODO make it work
return {}
release_date = data['release_date']
uri = data['uri']
assert uri == 'spotify:album:{}'.format(id)
return {'id': id,
'name': name,
'images': images,
'release_date': release_date,
'uri': uri,
'artists': artists}
def main():
docs = glob.glob('{}/*.json'.format(DATA_DIR))
data_list = filter(bool, map(parse_doc, docs))
open(os.path.join(PWD, 'data.json'), 'w').write(
json.dumps({album['id']: album for album in data_list}, indent=2))
if __name__ == '__main__':
main()
| mit | -8,690,449,330,588,709,000 | 25.642857 | 74 | 0.553172 | false | 3.243478 | false | false | false |
fdslight/fdslight | pywind/lib/rdb.py | 1 | 2361 | #!/usr/bin/env python3
"""关系型数据库"""
class sql_helper(object):
# 表前缀
__data_list = None
__prefix = ""
def __init__(self, prefix):
self.__data_list = []
self.__prefix = prefix
def select(self, seq):
self.__data_list.append(
"SELECT %s" % ",".join(seq)
)
return self
def where(self, where):
self.__data_list.append(" WHERE %s" % where)
return self
def from_(self, table):
self.__data_list.append(
" FROM %s%s" % (self.__prefix, table,)
)
return self
def delete(self):
self.__data_list.append(
"DELETE"
)
return self
def insert(self, table, **kwargs):
self.__data_list += [
"INSERT INTO ",
"%s%s" % (self.__prefix, table),
]
fields = []
values = []
for k, v in kwargs.items():
fields.append(k)
values.append(v)
if fields: self.__data_list.append(
" (%s)" % ",".join(fields)
)
self.__data_list.append(
" VALUES (%s)" % ",".join([str(v) for v in values])
)
return self
def update(self, table, **kwargs):
seq = []
for k, v in kwargs.items(): seq.append("%s=%s" % (k, v,))
self.__data_list.append(
"UPDATE %s%s SET %s" % (self.__prefix, table, ",".join(seq))
)
return self
def get_sql(self):
tmplist = []
while 1:
try:
tmplist.append(self.__data_list.pop(0))
except IndexError:
break
tmplist.append(";")
return "".join(tmplist)
def append(self, sts):
self.__data_list.append(" %s" % sts)
return self
def limit(self, limit):
self.__data_list.append(" LIMIT %s" % limit)
return self
def offset(self, offset):
self.__data_list.append(" OFFSET %s" % offset)
return self
def build_value_map(field_seq, value_seq):
"""Python默认返回tuple结果,没有包含字段,此函数生成 `字段->值`映射
"""
length = len(field_seq)
ret_dict = {}
for n in range(length):
field = field_seq[n]
value = value_seq[n]
ret_dict[field] = value
return ret_dict
| bsd-2-clause | -8,281,248,635,768,071,000 | 21.762376 | 72 | 0.479339 | false | 3.478064 | false | false | false |
timqian/sms-tools | lectures/5-Sinusoidal-model/plots-code/sineModel-anal-synth.py | 24 | 1483 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import sineModel as SM
import utilFunctions as UF
(fs, x) = UF.wavread(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../sounds/bendir.wav'))
x1 = x[0:50000]
w = np.blackman(2001)
N = 2048
H = 500
t = -90
minSineDur = .01
maxnSines = 150
freqDevOffset = 20
freqDevSlope = 0.02
Ns = 512
H = Ns/4
tfreq, tmag, tphase = SM.sineModelAnal(x1, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
y = SM.sineModelSynth(tfreq, tmag, tphase, Ns, H, fs)
numFrames = int(tfreq[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
maxplotfreq = 3000.0
plt.figure(1, figsize=(9, 7))
plt.subplot(3,1,1)
plt.plot(np.arange(x1.size)/float(fs), x1, 'b', lw=1.5)
plt.axis([0,x1.size/float(fs),min(x1),max(x1)])
plt.title('x (bendir.wav)')
plt.subplot(3,1,2)
tracks = tfreq*np.less(tfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k', lw=1.5)
plt.autoscale(tight=True)
plt.title('f_t, sine frequencies')
plt.subplot(3,1,3)
plt.plot(np.arange(y.size)/float(fs), y, 'b', lw=1.5)
plt.axis([0,y.size/float(fs),min(y),max(y)])
plt.title('y')
plt.tight_layout()
UF.wavwrite(y, fs, 'bendir-sine-synthesis.wav')
plt.savefig('sineModel-anal-synth.png')
plt.show()
| agpl-3.0 | 1,905,779,458,073,882,600 | 28.66 | 110 | 0.678355 | false | 2.376603 | false | true | false |
SpheMakh/Fidelity | image_stats/stats.py | 1 | 6205 | #!/usr/bin/env python
import numpy
import pyfits
import pylab
import os
import sys
from astLib.astWCS import WCS
import Tigger
from scipy.optimize import curve_fit
import argparse
#image = sys.argv[1]
#catalog = sys.argv[2]
def reshape_data(image,zoom=1):
""" Reshape FITS data to (stokes,freq,npix_ra,npix_dec)
"""
with pyfits.open(image) as hdu:
data = hdu[0].data
hdr = hdu[0].header
shape = list(data.shape)
ndim = len(shape)
wcs = WCS(hdr,mode='pyfits')
if ndim<2:
raise ValueError('The FITS file needs at least two dimensions')
# This is the shape I want the data in
want = (
['STOKES',0],
['FREQ',1],
['RA',2],
['DEC',3],
)
# Assume RA,DEC is first (FITS) or last two (NUMPY)
if ndim>3:
for ctype,ind in want[:2]:
for axis in range(1,ndim+1):
if hdr['CTYPE%d'%axis].startswith(ctype):
want[ind].append(ndim-axis)
if want[0][-1] == want[1][-2] and want[0][-2] == want[1][-1]:
tmp = shape[0]
shape[0] = shape[1]
shape[1] = tmp
data = numpy.reshape(data,shape)
if ndim ==3:
if not hdr['CTYPE3'].startswith('FREQ'):
data = data[0,...]
elif ndim>4:
raise ValueError('FITS file has more than 4 axes. Aborting')
shape = data.shape
imslice = [slice(None)]*len(shape)
lx,ly = [ (x-int(x*zoom)) for x in shape[-2:] ]
hx,hy = [ (low + int(x*zoom)) for x,low in zip([lx,ly],shape[-2:]) ]
imslice[-1] = slice(lx,hx)
imslice[-2] = slice(ly,hy)
return data[imslice], wcs
def local_variance(data,catalog,wcs,step=20,averge_freq=True):
""" Calculates the local varience at source positions of catalog.
"""
shape = data.shape
ndim = len(shape)
if ndim==4:
data = data[0,...].sum(0)
elif ndim==3:
data = data.sum(0)
model = Tigger.load(catalog)
positions_sky = [map(lambda rad: numpy.rad2deg(rad),(src.pos.ra,src.pos.dec)) for src in model.sources]
positions = [wcs.wcs2pix(*pos) for pos in positions_sky]
if isinstance(step,(tuple,list,int)):
if isinstance(step,int):
step = [step,step]
for pos in sorted(positions):
x,y = pos
if x>shape[-2] or y>shape[-1] or numpy.array(pos).any()<0:
positions.remove(pos)
if (y+step[1]>shape[-1]) or (y-step[1]<0):
if pos in positions:
positions.remove(pos)
if (x+step[0]>shape[-2]) or (x-step[0]<0):
if pos in positions:
positions.remove(pos)
_std = []
for x,y in positions:
subrgn = data[x-step[0]:x+step[0],y-step[1]:y+step[1]]
_std.append(subrgn.std())
return _std
def hist(data,nbins=100,func=None,save=None,show=False):
func = func or gauss
hist,bins = numpy.histogram(data,bins=nbins)
x_min = min(bins)
x_max = max(bins)
hh = x_max - x_min
xx = numpy.linspace(x_min,x_max,nbins) + hh/2
# Initial guess
sigma = data.std()
peak = hist.max()
mean = data.mean() + hh/2
parms,pcov = curve_fit(func,xx,hist,p0=[peak,mean,sigma])
# Determine error in fit
#residual = lambda params,x,data: data - func(x,*params)
err = numpy.sqrt(numpy.diag(pcov))
pylab.figure(figsize=(15,10))
pylab.plot(xx-hh/2,hist,'.')
pylab.plot(xx-hh/2,func(xx,*parms))
pylab.grid()
func_name = func.func_name
func_name = func_name[0].upper() + func_name[1:]
title_string = 'Fitted a %s function with best fit parameters:'%func_name
title_string += ' \n Peak=%.4g $\pm$ %.4g, $\mu$=%.4g $\pm$ %.4g, $\sigma$=%.4g $\pm$ %.4g'%(parms[0],err[0],parms[1],err[1],parms[2],err[2])
pylab.title(title_string)
if show:
pylab.show()
if save:
pylab.savefig(save or 'fidelity_stats.png')
pylab.clf()
def estimate_noise(data):
negative = data[data<0]
return numpy.concatenate([negative,-negative]).std()
def gaussian(x,a0,mu,sigma):
return a0*numpy.exp(-(x-mu)**2/(2*sigma**2))
def laplace(x,a0,mu,sigma):
return a0*numpy.exp(-abs(x-mu)/sigma)
def cauchy(x,a0,mu,sigma):
return a0*(sigma**2 / ((x-mu)**2 + sigma**2) )
def maxwell(x,a0,mu,sigma):
return a0*x**2*numpy.exp(-(x-mu)**2/(2*sigma**2))
_FUNCS = dict(gaussian=gaussian,laplace=laplace,cauchy=cauchy,maxwell=maxwell)
if __name__=='__main__':
for i, arg in enumerate(sys.argv):
if (arg[0] == '-') and arg[1].isdigit(): sys.argv[i] = ' ' + arg
parser = argparse.ArgumentParser(description='Routines to measure image statistics')
add = parser.add_argument
add('image', help='Input FITS image')
add('-cat', '--catlog', dest='catalog', help='Measure image stats on source locations.')
add('-pad', '--pixel-amp-dist', dest='pix_dist', help='Fit a distribution to the pixel amplitute histogram')
add('-fit', '--fit', dest='fit', help='Function to to the pixel amplitude histogram',default='gaussian',choices=_FUNCS)
add('-s', '--show', dest='show', action='store_true', help='Show pixel amplitude fit')
add('-S', '--save', dest='save', help='Filename for pixel amplitude distribution plots',
default='fidelity_stats.png')
add('-nb', '--nbins', dest='nbins', type=int, help='Show pixel amplitude fit', default=100)
add('-n', '--noise', dest='noise', action="store_true", help='Returns noise estimate')
add('-z', '--zoom', dest='zoom', type=float, default=1.0, help='Percentage of inner region to consider for analysis')
opts = parser.parse_args()
data, wcs = reshape_data(opts.image, zoom=opts.zoom)
hist(data=data, nbins=opts.nbins, func=_FUNCS[opts.fit], show=opts.show, save=opts.save)
catalog = opts.catalog
if catalog:
_std = local_variance(data=data, wcs=wcs, step=20, catalog=catalog)
pylab.plot(_std, "-x")
pylab.plot([estimate_noise(data)]*len(_std))
pylab.show()
if opts.noise:
noise = estimate_noise(data)
print "Noise estimate is %.4g mJy"%(noise*1e3)
| gpl-2.0 | 8,433,366,804,513,779,000 | 31.830688 | 145 | 0.584851 | false | 3.107161 | false | false | false |
mhvk/astropy | astropy/visualization/wcsaxes/wcsapi.py | 8 | 13534 | # Functions/classes for WCSAxes related to APE14 WCSes
import numpy as np
from astropy.coordinates import SkyCoord, ICRS, BaseCoordinateFrame
from astropy import units as u
from astropy.wcs import WCS
from astropy.wcs.utils import local_partial_pixel_derivatives
from astropy.wcs.wcsapi import SlicedLowLevelWCS
from .frame import RectangularFrame, EllipticalFrame, RectangularFrame1D
from .transforms import CurvedTransform
__all__ = ['transform_coord_meta_from_wcs', 'WCSWorld2PixelTransform',
'WCSPixel2WorldTransform']
IDENTITY = WCS(naxis=2)
IDENTITY.wcs.ctype = ["X", "Y"]
IDENTITY.wcs.crval = [0., 0.]
IDENTITY.wcs.crpix = [1., 1.]
IDENTITY.wcs.cdelt = [1., 1.]
def transform_coord_meta_from_wcs(wcs, frame_class, slices=None):
if slices is not None:
slices = tuple(slices)
if wcs.pixel_n_dim > 2:
if slices is None:
raise ValueError("WCS has more than 2 pixel dimensions, so "
"'slices' should be set")
elif len(slices) != wcs.pixel_n_dim:
raise ValueError("'slices' should have as many elements as WCS "
"has pixel dimensions (should be {})"
.format(wcs.pixel_n_dim))
is_fits_wcs = isinstance(wcs, WCS) or (isinstance(wcs, SlicedLowLevelWCS) and isinstance(wcs._wcs, WCS))
coord_meta = {}
coord_meta['name'] = []
coord_meta['type'] = []
coord_meta['wrap'] = []
coord_meta['unit'] = []
coord_meta['visible'] = []
coord_meta['format_unit'] = []
for idx in range(wcs.world_n_dim):
axis_type = wcs.world_axis_physical_types[idx]
axis_unit = u.Unit(wcs.world_axis_units[idx])
coord_wrap = None
format_unit = axis_unit
coord_type = 'scalar'
if axis_type is not None:
axis_type_split = axis_type.split('.')
if "pos.helioprojective.lon" in axis_type:
coord_wrap = 180.
format_unit = u.arcsec
coord_type = "longitude"
elif "pos.helioprojective.lat" in axis_type:
format_unit = u.arcsec
coord_type = "latitude"
elif "pos.heliographic.stonyhurst.lon" in axis_type:
coord_wrap = 180.
format_unit = u.deg
coord_type = "longitude"
elif "pos.heliographic.stonyhurst.lat" in axis_type:
format_unit = u.deg
coord_type = "latitude"
elif "pos.heliographic.carrington.lon" in axis_type:
coord_wrap = 360.
format_unit = u.deg
coord_type = "longitude"
elif "pos.heliographic.carrington.lat" in axis_type:
format_unit = u.deg
coord_type = "latitude"
elif "pos" in axis_type_split:
if "lon" in axis_type_split:
coord_type = "longitude"
elif "lat" in axis_type_split:
coord_type = "latitude"
elif "ra" in axis_type_split:
coord_type = "longitude"
format_unit = u.hourangle
elif "dec" in axis_type_split:
coord_type = "latitude"
elif "alt" in axis_type_split:
coord_type = "longitude"
elif "az" in axis_type_split:
coord_type = "latitude"
elif "long" in axis_type_split:
coord_type = "longitude"
coord_meta['type'].append(coord_type)
coord_meta['wrap'].append(coord_wrap)
coord_meta['format_unit'].append(format_unit)
coord_meta['unit'].append(axis_unit)
# For FITS-WCS, for backward-compatibility, we need to make sure that we
# provide aliases based on CTYPE for the name.
if is_fits_wcs:
name = []
if isinstance(wcs, WCS):
name.append(wcs.wcs.ctype[idx].lower())
name.append(wcs.wcs.ctype[idx][:4].replace('-', '').lower())
elif isinstance(wcs, SlicedLowLevelWCS):
name.append(wcs._wcs.wcs.ctype[wcs._world_keep[idx]].lower())
name.append(wcs._wcs.wcs.ctype[wcs._world_keep[idx]][:4].replace('-', '').lower())
if name[0] == name[1]:
name = name[0:1]
if axis_type:
if axis_type not in name:
name.insert(0, axis_type)
if wcs.world_axis_names and wcs.world_axis_names[idx]:
if wcs.world_axis_names[idx] not in name:
name.append(wcs.world_axis_names[idx])
name = tuple(name) if len(name) > 1 else name[0]
else:
name = axis_type or ''
if wcs.world_axis_names:
name = (name, wcs.world_axis_names[idx]) if wcs.world_axis_names[idx] else name
coord_meta['name'].append(name)
coord_meta['default_axislabel_position'] = [''] * wcs.world_n_dim
coord_meta['default_ticklabel_position'] = [''] * wcs.world_n_dim
coord_meta['default_ticks_position'] = [''] * wcs.world_n_dim
# If the world axis has a name use it, else display the world axis physical type.
fallback_labels = [name[0] if isinstance(name, (list, tuple)) else name for name in coord_meta['name']]
coord_meta['default_axis_label'] = [wcs.world_axis_names[i] or fallback_label for i, fallback_label in enumerate(fallback_labels)]
transform_wcs, invert_xy, world_map = apply_slices(wcs, slices)
transform = WCSPixel2WorldTransform(transform_wcs, invert_xy=invert_xy)
for i in range(len(coord_meta['type'])):
coord_meta['visible'].append(i in world_map)
inv_all_corr = [False] * wcs.world_n_dim
m = transform_wcs.axis_correlation_matrix.copy()
if invert_xy:
inv_all_corr = np.all(m, axis=1)
m = m[:, ::-1]
if frame_class is RectangularFrame:
for i, spine_name in enumerate('bltr'):
pos = np.nonzero(m[:, i % 2])[0]
# If all the axes we have are correlated with each other and we
# have inverted the axes, then we need to reverse the index so we
# put the 'y' on the left.
if inv_all_corr[i % 2]:
pos = pos[::-1]
if len(pos) > 0:
index = world_map[pos[0]]
coord_meta['default_axislabel_position'][index] = spine_name
coord_meta['default_ticklabel_position'][index] = spine_name
coord_meta['default_ticks_position'][index] = spine_name
m[pos[0], :] = 0
# In the special and common case where the frame is rectangular and
# we are dealing with 2-d WCS (after slicing), we show all ticks on
# all axes for backward-compatibility.
if len(world_map) == 2:
for index in world_map:
coord_meta['default_ticks_position'][index] = 'bltr'
elif frame_class is RectangularFrame1D:
derivs = np.abs(local_partial_pixel_derivatives(transform_wcs, *[0]*transform_wcs.pixel_n_dim,
normalize_by_world=False))[:, 0]
for i, spine_name in enumerate('bt'):
# Here we are iterating over the correlated axes in world axis order.
# We want to sort the correlated axes by their partial derivatives,
# so we put the most rapidly changing world axis on the bottom.
pos = np.nonzero(m[:, 0])[0]
order = np.argsort(derivs[pos])[::-1] # Sort largest to smallest
pos = pos[order]
if len(pos) > 0:
index = world_map[pos[0]]
coord_meta['default_axislabel_position'][index] = spine_name
coord_meta['default_ticklabel_position'][index] = spine_name
coord_meta['default_ticks_position'][index] = spine_name
m[pos[0], :] = 0
# In the special and common case where the frame is rectangular and
# we are dealing with 2-d WCS (after slicing), we show all ticks on
# all axes for backward-compatibility.
if len(world_map) == 1:
for index in world_map:
coord_meta['default_ticks_position'][index] = 'bt'
elif frame_class is EllipticalFrame:
if 'longitude' in coord_meta['type']:
lon_idx = coord_meta['type'].index('longitude')
coord_meta['default_axislabel_position'][lon_idx] = 'h'
coord_meta['default_ticklabel_position'][lon_idx] = 'h'
coord_meta['default_ticks_position'][lon_idx] = 'h'
if 'latitude' in coord_meta['type']:
lat_idx = coord_meta['type'].index('latitude')
coord_meta['default_axislabel_position'][lat_idx] = 'c'
coord_meta['default_ticklabel_position'][lat_idx] = 'c'
coord_meta['default_ticks_position'][lat_idx] = 'c'
else:
for index in range(len(coord_meta['type'])):
if index in world_map:
coord_meta['default_axislabel_position'][index] = frame_class.spine_names
coord_meta['default_ticklabel_position'][index] = frame_class.spine_names
coord_meta['default_ticks_position'][index] = frame_class.spine_names
return transform, coord_meta
def apply_slices(wcs, slices):
"""
Take the input WCS and slices and return a sliced WCS for the transform and
a mapping of world axes in the sliced WCS to the input WCS.
"""
if isinstance(wcs, SlicedLowLevelWCS):
world_keep = list(wcs._world_keep)
else:
world_keep = list(range(wcs.world_n_dim))
# world_map is the index of the world axis in the input WCS for a given
# axis in the transform_wcs
world_map = list(range(wcs.world_n_dim))
transform_wcs = wcs
invert_xy = False
if slices is not None:
wcs_slice = list(slices)
wcs_slice[wcs_slice.index("x")] = slice(None)
if 'y' in slices:
wcs_slice[wcs_slice.index("y")] = slice(None)
invert_xy = slices.index('x') > slices.index('y')
transform_wcs = SlicedLowLevelWCS(wcs, wcs_slice[::-1])
world_map = tuple(world_keep.index(i) for i in transform_wcs._world_keep)
return transform_wcs, invert_xy, world_map
def wcsapi_to_celestial_frame(wcs):
for cls, _, kwargs, *_ in wcs.world_axis_object_classes.values():
if issubclass(cls, SkyCoord):
return kwargs.get('frame', ICRS())
elif issubclass(cls, BaseCoordinateFrame):
return cls(**kwargs)
class WCSWorld2PixelTransform(CurvedTransform):
"""
WCS transformation from world to pixel coordinates
"""
has_inverse = True
frame_in = None
def __init__(self, wcs, invert_xy=False):
super().__init__()
if wcs.pixel_n_dim > 2:
raise ValueError('Only pixel_n_dim =< 2 is supported')
self.wcs = wcs
self.invert_xy = invert_xy
self.frame_in = wcsapi_to_celestial_frame(wcs)
def __eq__(self, other):
return (isinstance(other, type(self)) and self.wcs is other.wcs and
self.invert_xy == other.invert_xy)
@property
def input_dims(self):
return self.wcs.world_n_dim
def transform(self, world):
# Convert to a list of arrays
world = list(world.T)
if len(world) != self.wcs.world_n_dim:
raise ValueError(f"Expected {self.wcs.world_n_dim} world coordinates, got {len(world)} ")
if len(world[0]) == 0:
pixel = np.zeros((0, 2))
else:
pixel = self.wcs.world_to_pixel_values(*world)
if self.invert_xy:
pixel = pixel[::-1]
pixel = np.array(pixel).T
return pixel
transform_non_affine = transform
def inverted(self):
"""
Return the inverse of the transform
"""
return WCSPixel2WorldTransform(self.wcs, invert_xy=self.invert_xy)
class WCSPixel2WorldTransform(CurvedTransform):
"""
WCS transformation from pixel to world coordinates
"""
has_inverse = True
def __init__(self, wcs, invert_xy=False):
super().__init__()
if wcs.pixel_n_dim > 2:
raise ValueError('Only pixel_n_dim =< 2 is supported')
self.wcs = wcs
self.invert_xy = invert_xy
self.frame_out = wcsapi_to_celestial_frame(wcs)
def __eq__(self, other):
return (isinstance(other, type(self)) and self.wcs is other.wcs and
self.invert_xy == other.invert_xy)
@property
def output_dims(self):
return self.wcs.world_n_dim
def transform(self, pixel):
# Convert to a list of arrays
pixel = list(pixel.T)
if len(pixel) != self.wcs.pixel_n_dim:
raise ValueError(f"Expected {self.wcs.pixel_n_dim} world coordinates, got {len(pixel)} ")
if self.invert_xy:
pixel = pixel[::-1]
if len(pixel[0]) == 0:
world = np.zeros((0, self.wcs.world_n_dim))
else:
world = self.wcs.pixel_to_world_values(*pixel)
if self.wcs.world_n_dim == 1:
world = [world]
world = np.array(world).T
return world
transform_non_affine = transform
def inverted(self):
"""
Return the inverse of the transform
"""
return WCSWorld2PixelTransform(self.wcs, invert_xy=self.invert_xy)
| bsd-3-clause | 5,650,657,522,926,521,000 | 35.38172 | 134 | 0.573445 | false | 3.698825 | false | false | false |
jhomble/electron435 | python_causal_compiler/compiler/Imitation_Compiler.py | 1 | 33067 | ## Imitation Compiler
#
# @filename Imitation_Compiler.py
# @author Ben Mariano
# @date 5/9/2017
# Library Imports
import operator
import functools
import string
import random
# Local Imports
import Token
from Lexer import *
import Parser
from NodeVisitor import NodeVisitor
## Imitation Compiler
#
# @brief Compiles the second of the two required python scripts. This
# script traverses the CO-PCT tree in reverse using PyHop.
class Imitation_Compiler(NodeVisitor):
## Constructor
#
# @param parser Parser that will provide the AST to be compiled
def __init__(self, parser):
## @var parser
# Converts input into AST
self.parser = parser
## @var methods_dict
# Dictionary of methods where key is method name and
# value is a 3-tuple (list of arguments, cond, returns)
self.methods_dict = {}
## @var intention
# Current intention for access from conditional check
self.intention = None
## @var method_var_equivs
# Variable equivalents per method. This keeps track of which
# variable must be equal based on conditionals.
self.method_var_equivs = {}
## Visit Literal
#
# @brief Returns a tuple of the string 'LITERAL' and the literal
# value
#
# @param node AST instance to be evaluated
#
# @retval (String, String) tuple of the form 'LITERAL', literal_value
def visit_Literal(self, node):
return 'LITERAL', str(self.visit(node.name))
## Visit Boolean
#
# @brief Returns a four-tuple of the form 'UNIT', e1, comp, e2 where
# e1 and e2 are tuples representing eithing literals, variables
# or keyword phrases
#
# @param node AST instance to be evaluated
#
# @retval (String, Tuple, String, Tuple) tuple of the form 'UNIT', expr1, op, expr2
def visit_Boolean(self, node):
if node.op.type == EQUALS :
return 'UNIT', (self.visit(node.e1), "==", self.visit(node.e2))
elif node.op.type == LESSTHAN :
return 'UNIT', (self.visit(node.e1), "<", self.visit(node.e2))
elif node.op.type == GREATERTHAN :
return 'UNIT', (self.visit(node.e1), ">", self.visit(node.e2))
elif node.op.type == GREATEREQUAL :
return 'UNIT', (self.visit(node.e1), ">=", self.visit(node.e2))
elif node.op.type == LESSEQUAL :
return 'UNIT', (self.visit(node.e1), "<=", self.visit(node.e2))
elif node.op.type == NOTEQUAL :
return 'UNIT', (self.visit(node.e1), "!=", self.visit(node.e2))
elif node.op.type == PYTHON :
return 'UNIT', (self.visit(node.e1), "PYTHON", None)
## Visit Boolean Expression
#
# @brief Returns a three tuple of the form b_left, op, b_right
#
# @param node AST instance to be evaluated
#
# @retval (Tuple, String, Tuple) tuple of the form BooleanExpr, op, BooleanExpr
def visit_BoolExpr(self, node):
if node.op:
if node.op.type == AND:
return node.bound, self.visit(node.left), "and", self.visit(node.right)
elif node.op.type == OR:
return node.bound, self.visit(node.left) , "or", self.visit(node.right)
else:
return self.visit(node.left)
## Visit Arguments
#
# @brief Returns a list of strings representing the arguments
#
# @param node AST instance to be evaluated
#
# @retval String List list of the arguments as strings
def visit_Args(self, node):
args = []
for child in node.children:
args.append(self.visit(child))
return args
## Visit Action
#
# @brief Returns a tuple of the action_name, action_args
#
# @param node AST instance to be evaluated
#
# @retval (String, String List) tuple of the form action_name, action_args
def visit_Act(self, node):
return (self.visit(node.var), self.visit(node.args))
## Visit Actions
#
# @brief Returns a list of strings representing the actions
#
# @param node AST instance to be evaluated
#
# @retval (String, String List) List list of action tuples with action_name, action_args
def visit_Acts(self, node):
acts = []
for child in node.children:
acts.append(self.visit(child))
return acts
## Visit Caus
#
# @brief Returns the name of the intention
#
# @param node AST instance to be evaluated
#
# @retval String string representing the intention
def visit_Caus(self, node):
# acts = the right-side of the causal statement. Represent
# the actions that cause the 'intention'
# act = the left-side of the causal statement. Represents
# the 'intention' caused by the actions
acts = self.visit(node.acts)
act = self.visit(node.act)
# defines fold-left function
foldl = lambda func, acc, xs: functools.reduce(func, xs, acc)
# isolates and formats the names of the acts in order to be
# used in the if statement
act_names = foldl(operator.add, '', map(lambda x: '\''+x[0]+'\',', acts))
intention_Args = act[1]
act_name = act[0]
if not act_name in self.method_var_equivs:
self.method_var_equivs[act_name] = []
self.method_var_equivs[act_name].append({})
length = len(self.method_var_equivs[act_name])
# return statement
# defines return value as variable then returns this variable
ret = '__ret_val = ['
# iterate through each action adding it and its
# argument placeholders to the return string
for action in acts:
ret += '(\''+action[0]+'\','
# iterate through each argument to an action
for a in range(0, len(action[1])):
arg = action[1][a]
# Handle the special case of the CONT keyword
if arg[:4] == 'CONT':
# create a dictionary for act_name intention
# in the method_var_equivs if it's not there
tmp_dict = {}
index = a - int(arg[4:])
prev_arg = action[1][index]
# adjust arg name to avoid collisions
arg = arg + "-" + prev_arg
tmp_dict[arg] = prev_arg
self.method_var_equivs[act_name][length-1].update(tmp_dict)
# use hashtag notation to indicate arg_name to be replaced
ret += '#'+arg + ','
ret = ret[:len(ret)-1] + ')'
ret += ','
# add final bracket to return array
ret = ret[:len(ret)-1] + ']\n'
# add return statement with the return value
ret += 'return __ret_val\n'
# Check if method has already been defined
if act_name in self.methods_dict:
args, conds, rets = self.methods_dict[act_name]
hasLiteral = False
for arg in intention_Args:
if isinstance(arg, (tuple, list)):
hasLiteral = True
origHasLiteral = False
for arg in args:
if isinstance(arg, (tuple, list)):
origHasLiteral = True
added_star = False
# Check if you have to change parameters (add final *)
if 'NONE' not in intention_Args and not hasLiteral:
if 'NONE' in args or origHasLiteral or len(intention_Args) > len(args):
added_star = True
prev_arg = ''
index = -1
# iterate through intention args
for a in range(0, len(intention_Args)):
arg = intention_Args[a]
# handle CONT keyword
if arg[:4] == 'CONT':
# Get argument referenced by CONT number
index = a - int(arg[4:])
prev_arg = args[index]
# iterate through intention args coming after the first item
# referenced by the CONT
tmp_dict = {}
for i in range(index, len(intention_Args)-1):
prev_arg_2 = intention_Args[i]
# check if there's an entry yet for this intention
new_index = i - index
# add in new mapping for each of the args in CONT list
tmp_dict[prev_arg_2] = prev_arg+'['+str(new_index)+']'
# Map first arg in CONT list
tmp_dict[prev_arg] = prev_arg +'[0]'
for i in range(0, length):
self.method_var_equivs[act_name][i].update(tmp_dict)
# Use star notation to indicate that the method arguments needs
# star at the end to indicate variable length tuple in Python
prev_arg = '*' + prev_arg
adjusted_args = args
if index > -1:
adjusted_args[index] = prev_arg
else:
adjusted_args = intention_Args
self.methods_dict[act_name] = (adjusted_args, conds, rets)
if not added_star:
prev_arg = ''
index = -1
# iterate through intention args
for a in range(0, len(args)):
arg = args[a]
# handle CONT keyword
if arg[:4] == 'CONT':
# Get argument referenced by CONT number
index = a - int(arg[4:])
prev_arg = args[index]
# iterate through intention args coming after the first item
# referenced by the CONT
tmp_dict = {}
for i in range(index, len(args)-1):
prev_arg_2 = args[i]
# check if there's an entry yet for this intention
new_index = i - index
# add in new mapping for each of the args in CONT list
tmp_dict[prev_arg_2] = prev_arg+'['+str(new_index)+']'
# Map first arg in CONT list
tmp_dict[prev_arg] = prev_arg +'[0]'
self.method_var_equivs[act_name][length-1].update(tmp_dict)
# Use star notation to indicate that the method arguments needs
# star at the end to indicate variable length tuple in Python
prev_arg = '*' + prev_arg
# Update Methods Dict
self.methods_dict[act_name][2].append(ret)
else:
self.methods_dict[act_name] = (intention_Args, [], [ret])
return act_name
## Visit No Conditional
#
# @brief Return None when there is no conditional
#
# @param node AST instance to be evaluated
#
# @retval none
def visit_NoCond(self,node):
self.methods_dict[self.intention][1].append(None)
return None
## Listify Boolean Expression
#
# @brief Converts a boolean expression in the tuplized form (see
# visit_BoolExpr return) into a list of the form [a,b,c,...]
# where a,b,c,... are conjunctions. The commas represent disjunctions.
# Parsing the boolean expressions in this matter allows us to
# properly evaluate 'or' expressions.
#
# @param cond post evaluated condition to be redistributed
#
# @retval (Tuple List) List list of boolean AND expressions, where the members of the list of assumed to be ORED together
def listify_BoolExpr(self, cond):
new_conds = []
if not cond:
return []
if cond[0] == 'UNIT':
# Return single statement as is, nested in two lists
new_conds.append([cond])
else:
# Check if the first value in the tuple is a boolean
# if so, remove the boolean and evaluate appropriately
if isinstance(cond[0], bool):
# If the boolean is surrounded by parentheses
# evaluate it as a whole
if (cond[0]):
return self.listify_BoolExpr(cond[1:])
else:
# otherwise just cut of the first tuple val
cond = cond[1:]
# left = evaluate the left-most value (this language is
# right associative by default)
left = self.listify_BoolExpr(cond[0])
# Evaluate the rest of the conditions if there are any
if len(cond) > 1:
op = cond[1]
right = self.listify_BoolExpr([cond[2:]])
if (op == 'and'):
# iterate through each list and append the concatenation
# of each sublist of left and right
# i.e. if left = [[a],[b] and right = [[c],[d]]
# output = [[a,c],[a,d],[b,c],[b,d]]
for a in left:
for b in right:
new_conds.append(a+b)
elif (op == 'or'):
# for or just concatenate the lists
new_conds = left+right
else:
new_conds = left
return new_conds
## Traverse Boolean Expression
#
# @brief Recursively descend Boolean Expression and appropriately print it out
#
# @param node post evaluated and listified conditions to be compiled
#
# @rtype: (String, String) return an if statment and body in Python representing a conditional
def traverse_BoolExpr(self, cond):
# if cond[1] is one of the comparative operators than we
# know there is only one boolean statement (no && or ||)
if not cond:
return '', ''
body = ''
if_stmt = ''
tab = ' '
# if cond[1] in comps:
if cond[0] == 'UNIT':
body, if_stmt = self.compile_bool(cond[1])
# Only add in tabs for body if there is a body
else:
# op = the previous operand (either && or ||). It
# starts as 'if' for convenience sake as you'll
# see in the code below
op = 'if'
if isinstance(cond[0], bool):
body, if_stmt = self.traverse_BoolExpr(cond[1:])
if_stmt = if_stmt.replace('if ', 'if (')
if_stmt = if_stmt.replace(':\n', '):\n')
else:
body, if_stmt = self.traverse_BoolExpr(cond[0])
body2 = if_stmt2 = ''
if len(cond) > 1:
op = cond[1]
body2, if_stmt2 = self.traverse_BoolExpr(cond[2:])
# Only add in tabs if the new addition to body
# is not empty
if body2 != '':
# body += 2*tab+body2
body += body2
# Replace the ending colon and newline character with a
# space for the previous if statement. Replace the 'if'
# from the new if statement with the appropriate operand
# (either 'and' or 'or'). Doing this allows us to have
# the whole conditional on one line, avoiding tabbing issues
if if_stmt2 != '':
if_stmt = if_stmt.replace(':\n',' ')+if_stmt2.replace('if', op)
return body, if_stmt
## Develop And Expression
#
# @brief Takes in a list of boolean expressions and returns the 'AND'
# tuple of each element. The input is the same form as the output
# of the listify_BoolExpr function.
#
# @param exprList list of python conditions to be anded
#
# @retval Tuple Tuple of the same form as visit_BoolExpr to be compiled by compile_boolean
def develop_and_expr(self, exprList):
if len(exprList) == 0:
return None
elif len(exprList) == 1:
return exprList[0]
else:
return False, exprList[0], 'and', self.develop_and_expr(exprList[1:])
## Visit Conditional
#
# @brief Return the result of evaluating the boolean expression
#
# @param node AST instance to be evaluated
#
# @retval String Python code that represents the conditionals
def visit_Cond(self, node):
result = ''
boolean = self.visit(node.boolean)
bools_listified = self.listify_BoolExpr(boolean)
bool_list = []
for a in range(0, len(bools_listified)):
and_expr = bools_listified[a]
bool_list.append(self.develop_and_expr(and_expr))
if not a == len(bools_listified) - 1:
self.method_var_equivs[self.intention].append({})
# Comparative Operators in Custom Language
comps = ['==', '<', '>', '<=', '>=']
# body = Additional things added to the body of if_stmt.
# This could include calls to lookup_type or
# defining local variables
# if_stmt = Handles conditional relationship rules. For
# example, this if statement would include
# checking the type of an object
if_stmt = ''
body = ''
paren = ''
copy_ret = ''
# Evaluate each bool from bool_list and add it to the methods_dict
# along with a copy of the appropriate ret_val
if len(bool_list) > 0:
if len(self.methods_dict[self.intention][2]) > 0:
copy_ret = self.methods_dict[self.intention][2][len(self.methods_dict[self.intention][2])-1]
self.methods_dict[self.intention][2].pop()
for bool2 in bool_list:
body, if_stmt = self.traverse_BoolExpr(bool2)
result = body + if_stmt
self.methods_dict[self.intention][1].append(result)
self.methods_dict[self.intention][2].append(copy_ret)
result += body + if_stmt
return result
## Handle Type Keyword
#
# @brief Returns a string representing the updated if statement for
# the type keyword
#
# @param expr that is the name of the argument to TYPE
# @param arg_num integer that tells if the TYPE keyword is left or right of equals comparator
# @param if_stmt previous code from if statement that must be added to and returned
# @param pos boolean representing whether the comparator was '=' or '!='
#
# @retval String if statement representing the TYPE conditional
def handle_Type(self, expr, arg_num, if_stmt, pos):
# Handles arg 1 and 2 slightly differently
if isinstance(expr, (list, tuple)):
return if_stmt
if arg_num == 1:
var_name = '#'+expr
if pos:
if_stmt += 'if state.objs['+var_name+'][0] == '
else:
if_stmt += 'if not state.objs['+var_name+'][0] == '
elif arg_num == 2:
var_name = '#'+expr
if_stmt += 'state.objs['+var_name+'][0]:\n'
else:
raise Exception('There can only be one expression on either side of an equality comparator!')
return if_stmt
## Compile Boolean Statement
#
# @brief Returns a tuple (body, if statement) that represents the
# required additions to the output to succesffuly match the
# conditional.
#
# @param cond post-evaluated conditional in Tuple form to be compiled
#
# @retval (String, String) if statement and body in Python that represent a conditional
def compile_bool(self, cond):
# body = Additional things added to the body of if_stmt.
# This could include calls to lookup_type or
# defining local variables
# if_stmt = Handles conditional relationship rules. For
# example, this if statement would include
# checking the type of an object
body = ''
if_stmt = ''
# expr1 = left side of comparison. Could be a variable,
# literal, or keyword phrase like TYPE(obj)
# comp = comparison operator (should be '==')
# expr2 = right side of comparison. Could be a variable,
# literal, or keyword phrase like ALL(type)
expr1 = cond[0]
comp = cond[1]
expr2 = cond[2]
# Retrieve the intention arguments from the dictionary
# NOTE: It is known at this point that there is an entry
# for self.intention in the dictionary
intention_Args = self.methods_dict[self.intention][0]
length = len(self.method_var_equivs[self.intention])
# Check comparator type
if comp == '==':
# Evaluate All Keyword
if expr1[0] == 'ALL':
# define body statement
obj_id = expr1[1]+'_id'
body += 'all_'+expr1[1]+' = ['+obj_id+' for '
body += obj_id+' in state.objs if state.objs['+obj_id
body += '][0]==\''+expr1[1]+'\']\n'
# add this if statement to preserve appropriate tabbing
if_stmt += 'if True:\n'
# items in second expression list
# NOTE: expr2 must be a list
for a in range(0, len(expr2)):
arg = expr2[a]
# Handle CONT keyword
if isinstance(arg, (list, tuple)):
pass
else:
if arg[:4] == 'CONT':
cont_offset = int(arg[4:])
prev_arg = expr2[a-cont_offset]
# alter arg name to avoid namespace collision
arg = arg + '-' + prev_arg
self.method_var_equivs[self.intention][length-1][arg] = ')+tuple(all_'+expr1[1]+'['+str(a-cont_offset)+':]'
else:
self.method_var_equivs[self.intention][length-1][arg] = 'all_'+expr1[1]+'['+str(a)+']'
# evaluate TYPE keyword
elif expr1[0] == 'TYPE':
if_stmt = self.handle_Type(expr1[1], 1, if_stmt, True)
# the second expression is known to be either a literal
# or another TYPE expression
if if_stmt == '':
return body, 'if True:\n'
if expr2[0] == 'TYPE':
if_stmt_hold = if_stmt;
if_stmt = self.handle_Type(expr2[1], 2, if_stmt, True)
if if_stmt == if_stmt_hold:
return body, 'if True:\n'
else:
if_stmt += '\''+expr2+'\':\n'
# Handle variable/literal comparison
else:
if_stmt += 'if True:\n'
# var1 and var2 could be either variables or literals
var1 = ''
var2 = ''
isVar1Lit = False
isVar2Lit = False
isVar1Flt = False
isVar2Flt = False
try:
float(expr1)
var1 = str(expr1)
isVar1Flt = True
except:
pass
try:
float(expr2)
var2 = str(expr2)
isVar2Flt = True
except:
pass
# Add quotes around literals and determine which vars
# are literals
if not isVar1Flt:
if expr1[0] == 'LITERAL':
var1 = '\''+str(expr1[1])+'\''
isVar1Lit = True
else:
var1 = expr1
if not isVar2Flt:
if expr2[0] == 'LITERAL':
var2 = '\''+str(expr2[1])+'\''
isVar2Lit = True
else:
var2 = expr2
if isVar1Lit and isVar2Lit:
raise Exception('Comparing '+var1+' and '+var2+' which are both String literals!')
# They are both variables
elif isVar1Flt and isVar2Flt:
raise Exception('Comparing '+var1+' and '+var2+' which are both Floats!')
elif not isVar1Lit and not isVar2Lit and not isVar1Flt and not isVar2Flt:
var1_star = '*'+var1
var2_star = '*'+var2
real_var = ''
temp_var = ''
# The 'real_var' is the one present in the intention, i.e.
# the method args. References to the 'temp_var' should
# be replaced with the 'real_var'. Make sure to also check
# for the starred variables and check for equivalents in the
# method_var_equivs dictionary.
if var1 in intention_Args:
real_var = var1
temp_var = var2
elif var1_star in intention_Args:
# The star always refers to the 0 index
real_var = var1 + '[0]'
temp_var = var2
elif var2 in intention_Args:
real_var = var2
temp_var = var1
elif var2_star in intention_Args:
# The star always refers to the 0 index
real_var = var2 + '[0]'
temp_var = var1
elif self.intention in self.method_var_equivs:
if var1 in self.method_var_equivs[self.intention][length-1]:
real_var = self.method_var_equivs[self.intention][length-1][var1]
temp_var = var2
elif var2 in self.method_var_equivs[self.intention][length-1]:
real_var = self.method_var_equivs[self.intention][length-1][var2]
temp_var = var1
else:
return body, if_stmt
else:
return body, if_stmt
# raise Exception('Variables '+var1+','+var2+' were not found!')
tmp_dict = {}
tmp_dict[temp_var] = real_var
self.method_var_equivs[self.intention][length-1].update(tmp_dict)
# one variable is literal, one isn't
else:
lit_var = ''
real_var = ''
# determine which is the literal and assign locals
# appropriately
if isVar1Lit or isVar1Flt:
lit_var = var1
real_var = var2
else:
lit_var = var2
real_var = var1
tmp_dict = {}
tmp_dict[real_var] = lit_var
self.method_var_equivs[self.intention][length-1].update(tmp_dict)
elif comp == '!=':
# Evaluate All Keyword
if expr1[0] == 'ALL':
if_stmt += 'if True:\n'
# evaluate TYPE keyword
elif expr1[0] == 'TYPE':
if_stmt = self.handle_Type(expr1[1], 1, if_stmt, False)
# the second expression is known to be either a literal
# or another TYPE expression
if if_stmt == '':
return body, 'if True:\n'
if expr2[0] == 'TYPE':
if_stmt_hold = if_stmt;
if_stmt = self.handle_Type(expr2[1], 2, if_stmt, False)
if if_stmt == if_stmt_hold:
return body, 'if True:\n'
else:
if_stmt += '\''+expr2+'\':\n'
# Handle variable/literal comparison
else:
if_stmt += 'if True:\n'
elif comp == 'PYTHON':
if_stmt += 'if True:\n'
else:
raise Exception('\''+str(comp)+'\' comparator currently not supported')
return body, if_stmt
## Visit Statement
#
# @brief Evalulates a causal relation and a conditional. Returns None.
#
# @param node AST instance to be evaluated
#
# @retval none
def visit_Stmt(self, node):
# if_stmt = initial if statement string that differentiates
# which rule is being applied
# gadd = g.add statement string that adds the result of the
# rule to the final set
# arg_indices = dictionary containing the i,j indices in the 2d
# arguments array for each of the arguments of the
# actions
intention = self.visit(node.caus)
self.intention = intention
# cond = tuple representing the conditions under which the rule
# holds. See visit_BoolExpr for more insight into the
# formatting here
cond = self.visit(node.cond)
# self.methods_dict[intention][1].append(cond)
return None
## Visit Statements
#
# @brief Compile all statements and concatenate results
#
# @param node AST instance to be evaluated
#
# @retval String valid Python code output fo the program
def visit_Stmts(self, node):
result = ''
for child in node.children:
self.visit(child)
# Define standard tab
tab = ' '
# iterate through intentions in the method_var_equivs
for intent in self.method_var_equivs:
intent_list = self.method_var_equivs[intent]
for i in range(0, len(intent_list)):
int_dict = intent_list[i]
# iterate through the variables in the method_var_equivs at a
# given intention
for var in int_dict:
# Only make changes if one of the vars is CONT
if 'CONT' in var:
# Check for/update value mapped to CONT and update it
if int_dict[var] in self.method_var_equivs[intent][i]:
old_val = self.method_var_equivs[intent][i][int_dict[var]]
old_index = old_val[len(old_val)-2]
new_index = int(old_index)+1
cont_offset = int(var[4:var.find('-')])
new_index = str(new_index - cont_offset)
# new_val = ')+tuple('+old_val.replace(old_index+']', new_index+':]')
new_val = ')+tuple('+old_val.replace(']', ':]')
self.method_var_equivs[intent][i][var] = new_val
# Iterate through each intention in the methods dictionary
for intention in self.methods_dict:
args = self.methods_dict[intention][0]
conds = self.methods_dict[intention][1]
rets = self.methods_dict[intention][2]
# Iterate through the conditions
for c in range(0, len(conds)):
cond = conds[c]
# Replace all variables with dictionary equivalent if it exists
if cond:
if intention in self.method_var_equivs:
for var in self.method_var_equivs[intention][c]:
cond = cond.replace('#'+var, self.method_var_equivs[intention][c][var])
# Remove remaining unnecessary hashtags
if cond:
cond = cond.replace('#', '')
conds[c] = cond
# Iterate through the return statements
for r in range(0, len(rets)):
ret = rets[r]
# Replace all variables with their dictionary equivalents
if intention in self.method_var_equivs:
int_dict = self.method_var_equivs[intention][r]
for var in int_dict:
# Handle CONT keyword
if 'CONT' in var and var in ret:
cont_offset = int(var[4:var.find('-')]) + 1
temp_ret = ret
temp_ret = temp_ret.split('#'+var)
temp_ret = temp_ret[0][::-1]
index = -1
# Find index of ',' which denotes the end
# of the argument in question
for i in range(0, len(temp_ret)):
c = temp_ret[i]
if c == ',':
index = i
cont_offset -= 1
if cont_offset == 0:
break
var_index = ret.find('#'+var)
ret = ret[0:var_index-index]+ret[var_index:]
ret = ret.replace('#'+var, self.method_var_equivs[intention][r][var])
# Remove unnecessary hashtags
if ret:
ret = ret.replace('#', '')
rets[r] = ret
# Iterate through the now updated methods dictionary
for intention in self.methods_dict:
args = self.methods_dict[intention][0]
conds = self.methods_dict[intention][1]
rets = self.methods_dict[intention][2]
# Build method declaration string
method_dec = 'def '
# python functions cannot have hyphens :(
intention_no_hyphen = intention.replace('-', '_')
method_dec += intention_no_hyphen
method_dec += '(state'
objs_conv = ''
# Iterate through method args and print
for arg in args:
if isinstance(arg, (list, tuple)):
if arg[0] == 'PYTHON':
arg = arg[1]
else:
raise Exception('Must define intention at least once without literal argument \''+str(arg[1])+'\'')
if arg == 'NONE':
raise Exception('No full argument list for intention '+str(intention) + ' defined')
method_dec += ', '+arg
# identify the presence of the * in the args
# if it's there define the flatten call which will
# convert the multiple layer tuple into a single
# layer array/tuple
if arg[0] == '*':
objs_conv = tab+arg[1:]+' = flatten('+arg[1:]+')\n'
method_dec += '):\n'
pyhop_stmt = 'pyhop.declare_methods(\''+intention+'\','
pyhop_stmt += intention_no_hyphen+')\n'
result += method_dec
result += objs_conv
# Reduction check, includes all of the conditional obligations
# required when there are multiple reductions
red_check = ''
# tabbing for the first return
ret1_tabs = tab
# tabbing for the second return
ret2_tabs = tab
# Check if there are multiple possible reductions
if len(rets) > 1:
# This adds in a check which reduction should be used by creating a
# a comparitive statement that checks if the arguments in the return
# contains all the arguments passed into the function
ret2_tabs += tab
red_check = 2*tab+'__all_args = []\n'
red_check += 2*tab+'for __action in __ret_val:\n'
red_check += 3*tab+'for __arg in __action:\n'
red_check += 4*tab+'__all_args.append(__arg)\n'
red_check += 2*tab+'__all_intention_args = ['
for arg in args:
if arg[0] == '*':
red_check += '[__obj for __obj in '+arg[1:]+']'
else:
red_check += '['+arg + '],'
red_check += ']\n'
red_check += 2*tab+'__all_intention_args = flatten(__all_intention_args)\n'
red_check += 2*tab+'__all_args = flatten(__all_args)\n'
red_check += 2*tab+'if set(__all_intention_args).issubset(set(__all_args)):\n'
# Iterate through return statements
for i in range(0, len(rets)):
ret = rets[i]
cond = conds[i]
ret1_temp = ret1_tabs
ret2_temp = ret2_tabs
# adjust tabbing for condition and add it in
if cond:
if 'if' in cond and ':\n' in cond:
ret1_temp += tab
ret2_temp += tab
num_newlines = cond.count('\n')
result += tab + cond.replace('\n', '\n'+tab, num_newlines-1)
ret_lines = ret.split('\n')
# add actual returns, split in case there are two possible returns
result += ret1_temp + ret_lines[0] + '\n'
result += red_check
result += ret2_temp + ret_lines[1] + '\n'
result += pyhop_stmt
return result
## Visit Variable
#
# @brief Return a string representing the variable value/name
#
# @param node AST instance to be evaluated
#
# @retval String string representation of the variable value
def visit_Var(self, node):
return str(node.value)
## ID Generator
#
# @brief Randomly generates a variable id. Developed from:
# https://stackoverflow.com/questions/2257441/random-string-generation-with-upper-case-letters-and-digits-in-python
#
# @param node AST instance to be evaluated
#
# @retval String random string of length 'size' made of characters 'chars'
def id_generator(self, size=10, chars=string.ascii_uppercase):
return ''.join(random.choice(chars) for _ in range(size))
## Visit State
#
# @brief Return a string representing the variable corresponding
# to the State keyword
#
# @param node AST instance to be evaluated
#
# @retval (String, String) tuple of form 'STATE', state_arg
def visit_State(self, node):
return 'STATE', self.id_generator()
## Visit Python
#
# @brief Return a string representing the Python code to be inlined
#
# @param node AST instance to be evaluated
#
# @retval (String, String) tuple of the form 'PYTHON', python_arg
def visit_Python(self, node):
return 'PYTHON', node.code
## Visit Digit
#
# @brief Returns a string representing a digit
#
# @param node AST instance to be evaluated
#
# @retval String string representation of a digit
def visit_Digit(self, node):
return str(node.value)
## Visit Integer
#
# @brief Returns a string representing a full integer, which is a
# string of concatenated digits
#
# @param node AST instance to be evaluated
#
# @retval String string representation of an integer
def visit_Int(self, node):
result = ''
# is int negative
if not node.sign:
result += '-'
for digit in node.digits:
result += self.visit(digit)
return result
## Visit Float
#
# @brief Returns a float string which is two integers separated by
# a dot
#
# @param node AST instance to be evaluated
#
# @retval String string representation of a float
def visit_Flt(self, node):
result = ''
# is float negative
if not node.sign:
result += '-'
result += self.visit(node.left) + '.' + self.visit(node.right)
return result
## Visit ALL
#
# @brief Returns a tuple of the form ('All', argument)
#
# @param node AST instance to be evaluated
#
# @retval (String, String) tuple of the form 'ALL', all_args
def visit_All(self, node):
return ALL, self.visit(node.arg)
## Visit Type
#
# @brief Returns a tuple of the form ('TYPE', argument)
#
# @param node AST instance to be evaluated
#
# @retval (String, String) tuple of the form 'TYPE', type_arg
def visit_Type(self, node):
return TYPE, self.visit(node.arg)
## Visit NoOp
#
# @brief Returns the empty string
#
# @param node AST instance to be evaluated
#
# @retval String empty string
def visit_NoOp(self, node):
return ''
## Interpret
#
# @brief Actually compile the statement. Returns a string of the final
# program code to be written to file
#
# @retval String final python code to be added to template files
def interpret(self):
tree = self.parser.parse()
return self.visit(tree)
| mit | -1,267,524,005,528,027,600 | 30.979691 | 122 | 0.642 | false | 3.140862 | false | false | false |
ManrajGrover/Crick-Info-API | index.py | 1 | 3789 | from flask import Flask
from flask import jsonify
from bs4 import BeautifulSoup
import urllib
import re
import json
import unicodedata
app = Flask(__name__)
def remove_brackets(text):
ret = re.sub('\[.+?\]', '', text)
ret = re.sub('\(.+?\)','', ret)
return ret
@app.route('/api/')
@app.route('/api/<cricketer>')
def api(cricketer=None):
if cricketer == None:
res = {}
res['error'] = True
res['message'] = 'Please provide a cricketer name as GET parameter'
return jsonify(res)
else:
res = {}
cricketer = cricketer.replace (' ', '_')
url = 'https://en.wikipedia.org/wiki/'+str(cricketer)
html = urllib.urlopen(url).read()
soup = BeautifulSoup(html)
current = None
for row in soup.find("table", {"class": "infobox vcard"}).findAll('tr'):
children = row.find_all(True, recursive=False)
if len(children) == 1:
if children[0].name == 'th':
current = unicodedata.normalize('NFKD',children[0].text).encode('ascii','ignore')
current = current.lower().replace(' ','_').strip()
res[current] = {}
elif children[0].name == 'td' and children[0].table:
first = True
list = []
for r in children[0].table.findAll('tr'):
if first:
f = True
ths = r.find_all(True, recursive=False)
for head in ths:
if not f:
key = unicodedata.normalize('NFKD', head.text).encode('ascii','ignore')
key = remove_brackets(key).lower().replace('.','').strip().replace(' ','_')
res[current][key] = {}
list.append(key)
else:
list.append(key)
f= False
first = False
else:
ths = r.find_all(True, recursive=False)
key = unicodedata.normalize('NFKD',ths[0].text).encode('ascii','ignore')
key = remove_brackets(key).lower().replace('.','').strip().replace(' ','_')
f = True
i = 1
for head in list:
if not f:
value = unicodedata.normalize('NFKD',ths[i].text).encode('ascii','ignore')
value = remove_brackets(value).replace('\n','').strip()
if value.endswith('/'):
value += "0"
i += 1
res[current][head][key] = value
else:
f= False
elif len(children) == 2:
if current is not None:
value = unicodedata.normalize('NFKD',children[1].text).encode('ascii','ignore')
key = unicodedata.normalize('NFKD',children[0].text).encode('ascii','ignore')
key = remove_brackets(key).lower().replace('.','').strip().replace(' ','_')
value = remove_brackets(value).replace('\n','').strip()
res[current][key] = value
return jsonify(res)
@app.route('/')
@app.route('/index')
def index():
return 'Welcome to Crick Info API, this is currently under development!'
if __name__ == '__main__':
app.run()
| mit | -2,481,600,017,361,157,000 | 43.576471 | 111 | 0.430193 | false | 4.832908 | false | false | false |
implus/UnbalancedDataset | unbalanced_dataset/under_sampling.py | 2 | 22485 | from __future__ import print_function
from __future__ import division
import numpy as np
from numpy import logical_not, ones
from numpy.random import seed, randint
from numpy import concatenate
from random import sample
from collections import Counter
from .unbalanced_dataset import UnbalancedDataset
class UnderSampler(UnbalancedDataset):
"""
Object to under sample the majority class(es) by randomly picking samples
with or without replacement.
"""
def __init__(self,
ratio=1.,
random_state=None,
replacement=True,
verbose=True):
"""
:param ratio:
The ratio of majority elements to sample with respect to the number
of minority cases.
:param random_state:
Seed.
:return:
underx, undery: The features and target values of the under-sampled
data set.
"""
# Passes the relevant parameters back to the parent class.
UnbalancedDataset.__init__(self,
ratio=ratio,
random_state=random_state,
verbose=verbose)
self.replacement = replacement
def resample(self):
"""
...
"""
# Start with the minority class
underx = self.x[self.y == self.minc]
undery = self.y[self.y == self.minc]
# Loop over the other classes under picking at random
for key in self.ucd.keys():
# If the minority class is up, skip it
if key == self.minc:
continue
# Set the ratio to be no more than the number of samples available
if self.ratio * self.ucd[self.minc] > self.ucd[key]:
num_samples = self.ucd[key]
else:
num_samples = int(self.ratio * self.ucd[self.minc])
# Pick some elements at random
seed(self.rs)
if self.replacement:
indx = randint(low=0, high=self.ucd[key], size=num_samples)
else:
indx = sample(range((self.y == key).sum()), num_samples)
# Concatenate to the minority class
underx = concatenate((underx, self.x[self.y == key][indx]), axis=0)
undery = concatenate((undery, self.y[self.y == key][indx]), axis=0)
if self.verbose:
print("Under-sampling performed: " + str(Counter(undery)))
return underx, undery
class TomekLinks(UnbalancedDataset):
"""
Object to identify and remove majority samples that form a Tomek link with
minority samples.
"""
def __init__(self, verbose=True):
"""
No parameters.
:return:
Nothing.
"""
UnbalancedDataset.__init__(self, verbose=verbose)
def resample(self):
"""
:return:
Return the data with majority samples that form a Tomek link
removed.
"""
from sklearn.neighbors import NearestNeighbors
# Find the nearest neighbour of every point
nn = NearestNeighbors(n_neighbors=2)
nn.fit(self.x)
nns = nn.kneighbors(self.x, return_distance=False)[:, 1]
# Send the information to is_tomek function to get boolean vector back
if self.verbose:
print("Looking for majority Tomek links...")
links = self.is_tomek(self.y, nns, self.minc, self.verbose)
if self.verbose:
print("Under-sampling "
"performed: " + str(Counter(self.y[logical_not(links)])))
# Return data set without majority Tomek links.
return self.x[logical_not(links)], self.y[logical_not(links)]
class ClusterCentroids(UnbalancedDataset):
"""
Experimental method that under samples the majority class by replacing a
cluster of majority samples by the cluster centroid of a KMeans algorithm.
This algorithm keeps N majority samples by fitting the KMeans algorithm
with N cluster to the majority class and using the coordinates of the N
cluster centroids as the new majority samples.
"""
def __init__(self, ratio=1, random_state=None, verbose=True, **kwargs):
"""
:param kwargs:
Arguments the user might want to pass to the KMeans object from
scikit-learn.
:param ratio:
The number of cluster to fit with respect to the number of samples
in the minority class.
N_clusters = int(ratio * N_minority_samples) = N_maj_undersampled.
:param random_state:
Seed.
:return:
Under sampled data set.
"""
UnbalancedDataset.__init__(self, ratio=ratio,
random_state=random_state,
verbose=verbose)
self.kwargs = kwargs
def resample(self):
"""
???
:return:
"""
# Create the clustering object
from sklearn.cluster import KMeans
kmeans = KMeans(random_state=self.rs)
kmeans.set_params(**self.kwargs)
# Start with the minority class
underx = self.x[self.y == self.minc]
undery = self.y[self.y == self.minc]
# Loop over the other classes under picking at random
for key in self.ucd.keys():
# If the minority class is up, skip it.
if key == self.minc:
continue
# Set the number of clusters to be no more than the number of
# samples
if self.ratio * self.ucd[self.minc] > self.ucd[key]:
n_clusters = self.ucd[key]
else:
n_clusters = int(self.ratio * self.ucd[self.minc])
# Set the number of clusters and find the centroids
kmeans.set_params(n_clusters=n_clusters)
kmeans.fit(self.x[self.y == key])
centroids = kmeans.cluster_centers_
# Concatenate to the minority class
underx = concatenate((underx, centroids), axis=0)
undery = concatenate((undery, ones(n_clusters) * key), axis=0)
if self.verbose:
print("Under-sampling performed: " + str(Counter(undery)))
return underx, undery
class NearMiss(UnbalancedDataset):
"""
An implementation of NearMiss.
See the original paper: NearMiss - "kNN Approach to Unbalanced Data
Distributions: A Case Study involving Information Extraction" by Zhang
et al. for more details.
"""
def __init__(self, ratio=1., random_state=None,
version=1, size_ngh=3, ver3_samp_ngh=3,
verbose=True, **kwargs):
"""
:param version:
Version of the NearMiss to use. Possible values
are 1, 2 or 3. See the original paper for details
about these different versions.
:param size_ngh:
Size of the neighbourhood to consider to compute the
average distance to the minority point samples.
:param ver3_samp_ngh:
NearMiss-3 algorithm start by a phase of re-sampling. This
parameter correspond to the number of neighbours selected
create the sub_set in which the selection will be performed.
:param **kwargs:
Parameter to use for the Nearest Neighbours.
"""
# Passes the relevant parameters back to the parent class.
UnbalancedDataset.__init__(self, ratio=ratio,
random_state=random_state,
verbose=verbose)
# Assign the parameter of the element of this class
# Check that the version asked is implemented
if not (version == 1 or version == 2 or version == 3):
raise ValueError('UnbalancedData.NearMiss: there is only 3 '
'versions available with parameter version=1/2/3')
self.version = version
self.size_ngh = size_ngh
self.ver3_samp_ngh = ver3_samp_ngh
self.kwargs = kwargs
def resample(self):
"""
"""
# Start with the minority class
underx = self.x[self.y == self.minc]
undery = self.y[self.y == self.minc]
# For each element of the current class, find the set of NN
# of the minority class
from sklearn.neighbors import NearestNeighbors
# Call the constructor of the NN
nn_obj = NearestNeighbors(n_neighbors=self.size_ngh, **self.kwargs)
# Fit the minority class since that we want to know the distance
# to these point
nn_obj.fit(self.x[self.y == self.minc])
# Loop over the other classes under picking at random
for key in self.ucd.keys():
# If the minority class is up, skip it
if key == self.minc:
continue
# Set the ratio to be no more than the number of samples available
if self.ratio * self.ucd[self.minc] > self.ucd[key]:
num_samples = self.ucd[key]
else:
num_samples = int(self.ratio * self.ucd[self.minc])
# Get the samples corresponding to the current class
sub_samples_x = self.x[self.y == key]
sub_samples_y = self.y[self.y == key]
if self.version == 1:
# Find the NN
dist_vec, idx_vec = nn_obj.kneighbors(sub_samples_x,
n_neighbors=self.size_ngh)
# Select the right samples
sel_x, sel_y = self.__SelectionDistBased__(dist_vec,
num_samples,
key,
sel_strategy='nearest')
elif self.version == 2:
# Find the NN
dist_vec, idx_vec = nn_obj.kneighbors(sub_samples_x,
n_neighbors=self.y[self.y == self.minc].size)
# Select the right samples
sel_x, sel_y = self.__SelectionDistBased__(dist_vec,
num_samples,
key,
sel_strategy='nearest')
elif self.version == 3:
# We need a new NN object to fit the current class
nn_obj_cc = NearestNeighbors(n_neighbors=self.ver3_samp_ngh,
**self.kwargs)
nn_obj_cc.fit(sub_samples_x)
# Find the set of NN to the minority class
dist_vec, idx_vec = nn_obj_cc.kneighbors(self.x[self.y == self.minc])
# Create the subset containing the samples found during the NN
# search. Linearize the indexes and remove the double values
idx_vec = np.unique(idx_vec.reshape(-1))
# Create the subset
sub_samples_x = sub_samples_x[idx_vec, :]
sub_samples_y = sub_samples_y[idx_vec]
# Compute the NN considering the current class
dist_vec, idx_vec = nn_obj.kneighbors(sub_samples_x,
n_neighbors=self.size_ngh)
sel_x, sel_y = self.__SelectionDistBased__(dist_vec,
num_samples,
key,
sel_strategy='farthest')
underx = concatenate((underx, sel_x), axis=0)
undery = concatenate((undery, sel_y), axis=0)
if self.verbose:
print("Under-sampling performed: " + str(Counter(undery)))
return underx, undery
def __SelectionDistBased__(self,
dist_vec,
num_samples,
key,
sel_strategy='nearest'):
# Compute the distance considering the farthest neighbour
dist_avg_vec = np.sum(dist_vec[:, -self.size_ngh:], axis=1)
# Sort the list of distance and get the index
if sel_strategy == 'nearest':
sort_way = False
elif sel_strategy == 'farthest':
sort_way = True
else:
raise ValueError('Unbalanced.NearMiss: the sorting can be done '
'only with nearest or farthest data points.')
sorted_idx = sorted(range(len(dist_avg_vec)),
key=dist_avg_vec.__getitem__,
reverse=sort_way)
# Select the desired number of samples
sel_idx = sorted_idx[:num_samples]
return self.x[self.y == key][sel_idx], self.y[self.y == key][sel_idx]
class CondensedNearestNeighbour(UnbalancedDataset):
"""
An implementation of Condensend Neareat Neighbour.
See the original paper: CNN - "Addressing the Curse of Imbalanced Training
Set: One-Sided Selection" by Khubat et al. for more details.
"""
def __init__(self, random_state=None,
size_ngh=1, n_seeds_S=1, verbose=True,
**kwargs):
"""
:param size_ngh
Size of the neighbourhood to consider to compute the
average distance to the minority point samples.
:param n_seeds_S
Number of samples to extract in order to build the set S.
:param **kwargs
Parameter to use for the Neareast Neighbours.
"""
# Passes the relevant parameters back to the parent class.
UnbalancedDataset.__init__(self, random_state=random_state,
verbose=verbose)
# Assign the parameter of the element of this class
self.size_ngh = size_ngh
self.n_seeds_S = n_seeds_S
self.kwargs = kwargs
def resample(self):
"""
"""
# Start with the minority class
underx = self.x[self.y == self.minc]
undery = self.y[self.y == self.minc]
# Import the K-NN classifier
from sklearn.neighbors import KNeighborsClassifier
# Loop over the other classes under picking at random
for key in self.ucd.keys():
# If the minority class is up, skip it
if key == self.minc:
continue
# Randomly get one sample from the majority class
maj_sample = sample(self.x[self.y == key],
self.n_seeds_S)
# Create the set C
C_x = np.append(self.x[self.y == self.minc],
maj_sample,
axis=0)
C_y = np.append(self.y[self.y == self.minc],
[key] * self.n_seeds_S)
# Create the set S
S_x = self.x[self.y == key]
S_y = self.y[self.y == key]
# Create a k-NN classifier
knn = KNeighborsClassifier(n_neighbors=self.size_ngh,
**self.kwargs)
# Fit C into the knn
knn.fit(C_x, C_y)
# Classify on S
pred_S_y = knn.predict(S_x)
# Find the misclassified S_y
sel_x = np.squeeze(S_x[np.nonzero(pred_S_y != S_y), :])
sel_y = S_y[np.nonzero(pred_S_y != S_y)]
underx = concatenate((underx, sel_x), axis=0)
undery = concatenate((undery, sel_y), axis=0)
if self.verbose:
print("Under-sampling performed: " + str(Counter(undery)))
return underx, undery
class OneSidedSelection(UnbalancedDataset):
"""
An implementation of One-Sided Selection.
See the original paper: OSS - "Addressing the Curse of Imbalanced Training
Set: One-Sided Selection" by Khubat et al. for more details.
"""
def __init__(self, random_state=None,
size_ngh=1, n_seeds_S=1, verbose=True,
**kwargs):
"""
:param size_ngh
Size of the neighbourhood to consider to compute the
average distance to the minority point samples.
:param n_seeds_S
Number of samples to extract in order to build the set S.
:param **kwargs
Parameter to use for the Neareast Neighbours.
"""
# Passes the relevant parameters back to the parent class.
UnbalancedDataset.__init__(self, random_state=random_state,
verbose=verbose)
# Assign the parameter of the element of this class
self.size_ngh = size_ngh
self.n_seeds_S = n_seeds_S
self.kwargs = kwargs
def resample(self):
"""
"""
# Start with the minority class
underx = self.x[self.y == self.minc]
undery = self.y[self.y == self.minc]
# Import the K-NN classifier
from sklearn.neighbors import KNeighborsClassifier
# Loop over the other classes under picking at random
for key in self.ucd.keys():
# If the minority class is up, skip it
if key == self.minc:
continue
# Randomly get one sample from the majority class
maj_sample = sample(self.x[self.y == key],
self.n_seeds_S)
# Create the set C
C_x = np.append(self.x[self.y == self.minc],
maj_sample,
axis=0)
C_y = np.append(self.y[self.y == self.minc],
[key] * self.n_seeds_S)
# Create the set S
S_x = self.x[self.y == key]
S_y = self.y[self.y == key]
# Create a k-NN classifier
knn = KNeighborsClassifier(n_neighbors=self.size_ngh,
**self.kwargs)
# Fit C into the knn
knn.fit(C_x, C_y)
# Classify on S
pred_S_y = knn.predict(S_x)
# Find the misclassified S_y
sel_x = np.squeeze(S_x[np.nonzero(pred_S_y != S_y), :])
sel_y = S_y[np.nonzero(pred_S_y != S_y)]
underx = concatenate((underx, sel_x), axis=0)
undery = concatenate((undery, sel_y), axis=0)
from sklearn.neighbors import NearestNeighbors
# Find the nearest neighbour of every point
nn = NearestNeighbors(n_neighbors=2)
nn.fit(underx)
nns = nn.kneighbors(underx, return_distance=False)[:, 1]
# Send the information to is_tomek function to get boolean vector back
if self.verbose:
print("Looking for majority Tomek links...")
links = self.is_tomek(undery, nns, self.minc, self.verbose)
if self.verbose:
print("Under-sampling "
"performed: " + str(Counter(undery[logical_not(links)])))
# Return data set without majority Tomek links.
return underx[logical_not(links)], undery[logical_not(links)]
class NeighbourhoodCleaningRule(UnbalancedDataset):
"""
An implementation of Neighboorhood Cleaning Rule.
See the original paper: NCL - "Improving identification of difficult small
classes by balancing class distribution" by Laurikkala et al. for more details.
"""
def __init__(self, random_state=None,
size_ngh=3, verbose=True, **kwargs):
"""
:param size_ngh
Size of the neighbourhood to consider in order to make
the comparison between each samples and their NN.
:param **kwargs
Parameter to use for the Neareast Neighbours.
"""
# Passes the relevant parameters back to the parent class.
UnbalancedDataset.__init__(self, random_state=random_state,
verbose=verbose)
# Assign the parameter of the element of this class
self.size_ngh = size_ngh
self.kwargs = kwargs
def resample(self):
"""
"""
# Start with the minority class
underx = self.x[self.y == self.minc]
undery = self.y[self.y == self.minc]
# Import the k-NN classifier
from sklearn.neighbors import NearestNeighbors
# Create a k-NN to fit the whole data
nn_obj = NearestNeighbors(n_neighbors=self.size_ngh)
# Fit the whole dataset
nn_obj.fit(self.x)
idx_to_exclude = []
# Loop over the other classes under picking at random
for key in self.ucd.keys():
# Get the sample of the current class
sub_samples_x = self.x[self.y == key]
# Get the samples associated
idx_sub_sample = np.nonzero(self.y == key)[0]
# Find the NN for the current class
nnhood_idx = nn_obj.kneighbors(sub_samples_x, return_distance=False)
# Get the label of the corresponding to the index
nnhood_label = (self.y[nnhood_idx] == key)
# Check which one are the same label than the current class
# Make an AND operation through the three neighbours
nnhood_bool = np.logical_not(np.all(nnhood_label, axis=1))
# If the minority class remove the majority samples (as in politic!!!! ;))
if key == self.minc:
# Get the index to exclude
idx_to_exclude += nnhood_idx[np.nonzero(nnhood_label[np.nonzero(nnhood_bool)])].tolist()
else:
# Get the index to exclude
idx_to_exclude += idx_sub_sample[np.nonzero(nnhood_bool)].tolist()
# Create a vector with the sample to select
sel_idx = np.ones(self.y.shape)
sel_idx[idx_to_exclude] = 0
# Get the samples from the majority classes
sel_x = np.squeeze(self.x[np.nonzero(sel_idx), :])
sel_y = self.y[np.nonzero(sel_idx)]
underx = concatenate((underx, sel_x), axis=0)
undery = concatenate((undery, sel_y), axis=0)
if self.verbose:
print("Under-sampling performed: " + str(Counter(undery)))
return underx, undery
| mit | -5,274,854,654,081,673,000 | 33.860465 | 104 | 0.541739 | false | 4.187931 | false | false | false |
has2k1/plotnine | plotnine/geoms/geom_hline.py | 1 | 1838 | from warnings import warn
import pandas as pd
from ..utils import make_iterable, order_as_mapping_data
from ..exceptions import PlotnineWarning
from ..doctools import document
from ..mapping import aes
from .geom import geom
from .geom_segment import geom_segment
@document
class geom_hline(geom):
"""
Horizontal line
{usage}
Parameters
----------
{common_parameters}
"""
DEFAULT_AES = {'color': 'black', 'linetype': 'solid',
'size': 0.5, 'alpha': 1}
REQUIRED_AES = {'yintercept'}
DEFAULT_PARAMS = {'stat': 'identity', 'position': 'identity',
'na_rm': False, 'inherit_aes': False}
legend_geom = 'path'
def __init__(self, mapping=None, data=None, **kwargs):
mapping, data = order_as_mapping_data(mapping, data)
yintercept = kwargs.pop('yintercept', None)
if yintercept is not None:
if mapping:
warn("The 'yintercept' parameter has overridden "
"the aes() mapping.", PlotnineWarning)
data = pd.DataFrame({'yintercept': make_iterable(yintercept)})
mapping = aes(yintercept='yintercept')
kwargs['show_legend'] = False
geom.__init__(self, mapping, data, **kwargs)
def draw_panel(self, data, panel_params, coord, ax, **params):
"""
Plot all groups
"""
ranges = coord.backtransform_range(panel_params)
data['y'] = data['yintercept']
data['yend'] = data['yintercept']
data['x'] = ranges.x[0]
data['xend'] = ranges.x[1]
data = data.drop_duplicates()
for _, gdata in data.groupby('group'):
gdata.reset_index(inplace=True)
geom_segment.draw_group(gdata, panel_params,
coord, ax, **params)
| gpl-2.0 | 5,360,484,072,840,172,000 | 30.689655 | 74 | 0.572905 | false | 3.978355 | false | false | false |
Iotic-Labs/py-application-examples | thing_runner/follow_feed/follow_basic_catchall.py | 1 | 2735 | # -*- coding: utf-8 -*-
# Copyright (c) 2017 Iotic Labs Ltd. All rights reserved.
from __future__ import unicode_literals, print_function
import logging
logging.basicConfig(format='%(asctime)s %(levelname)s [%(name)s] {%(threadName)s} %(message)s', level=logging.WARNING)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
from IoticAgent.Core.compat import monotonic
from IoticAgent.ThingRunner import RetryingThingRunner
from IoticAgent import Datatypes, Units
class FollowBasicCatchall(RetryingThingRunner):
LOOP_TIMER = 10 # minimum number of seconds duration of the main loop
def __init__(self, config=None):
"""Instantiation code in here, after the call to super().__init__()
"""
super(FollowBasicCatchall, self).__init__(config=config)
self.__thing = None
@staticmethod
def __catchall(args):
logger.debug("Catchall data received. Shared at %s", args['time'])
# Used for any catchall data that can't be parsed
try:
logger.debug('Found recent data for key %s: value: %s', 'count', args['data']['count'])
except KeyError as exc:
logger.warning('Failed to find key %s in recent data %s', exc, args)
raise
@staticmethod
def __catchall_parsed(args):
logger.debug("Feed data received. Shared at %s", args['time'])
values = args['parsed'].filter_by(types=(Datatypes.INTEGER,), units=(Units.COUNTS_PER_MIN,), text=("random",))
if values:
logger.debug('Found parsed data for key %s: value: %s', values[0].label, values[0].value)
else:
logger.debug('Parsed data not found')
def on_startup(self):
"""Called once at the beginning, before main().
Use this method to create your things, rebind connections, setup hardware, etc.
"""
print("Started. Press CTRL+C to end")
self.__thing = self.client.create_thing('follow_basic')
# register catchall for any data that's been queued for you before you start
# or any feed data that's not got a specific callback
self.client.register_catchall_feeddata(self.__catchall, callback_parsed=self.__catchall_parsed)
def main(self):
"""Called after on_startup.
Use this method for your main loop (we don't need one here).
Set self.LOOP_TIMER for your regular tick
"""
while True:
start = monotonic()
# loop code in here
stop = monotonic()
if self.wait_for_shutdown(max(0, self.LOOP_TIMER - (stop - start))):
break
def main():
FollowBasicCatchall(config="agent2.ini").run()
if __name__ == '__main__':
main()
| apache-2.0 | -6,573,477,423,837,014,000 | 34.986842 | 118 | 0.631444 | false | 3.929598 | false | false | false |
simgunz/anki | pylib/anki/dbproxy.py | 1 | 3505 | # Copyright: Ankitects Pty Ltd and contributors
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
from __future__ import annotations
import re
from re import Match
from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import anki
# DBValue is actually Union[str, int, float, None], but if defined
# that way, every call site needs to do a type check prior to using
# the return values.
ValueFromDB = Any
Row = Sequence[ValueFromDB]
ValueForDB = Union[str, int, float, None]
class DBProxy:
# Lifecycle
###############
def __init__(self, backend: anki._backend.RustBackend) -> None:
self._backend = backend
self.mod = False
self.last_begin_at = 0
# Transactions
###############
def begin(self) -> None:
self.last_begin_at = self.scalar("select mod from col")
self._backend.db_begin()
def commit(self) -> None:
self._backend.db_commit()
def rollback(self) -> None:
self._backend.db_rollback()
# Querying
################
def _query(
self,
sql: str,
*args: ValueForDB,
first_row_only: bool = False,
**kwargs: ValueForDB,
) -> List[Row]:
# mark modified?
s = sql.strip().lower()
for stmt in "insert", "update", "delete":
if s.startswith(stmt):
self.mod = True
sql, args2 = emulate_named_args(sql, args, kwargs)
# fetch rows
return self._backend.db_query(sql, args2, first_row_only)
# Query shortcuts
###################
def all(self, sql: str, *args: ValueForDB, **kwargs: ValueForDB) -> List[Row]:
return self._query(sql, *args, first_row_only=False, **kwargs)
def list(
self, sql: str, *args: ValueForDB, **kwargs: ValueForDB
) -> List[ValueFromDB]:
return [x[0] for x in self._query(sql, *args, first_row_only=False, **kwargs)]
def first(self, sql: str, *args: ValueForDB, **kwargs: ValueForDB) -> Optional[Row]:
rows = self._query(sql, *args, first_row_only=True, **kwargs)
if rows:
return rows[0]
else:
return None
def scalar(self, sql: str, *args: ValueForDB, **kwargs: ValueForDB) -> ValueFromDB:
rows = self._query(sql, *args, first_row_only=True, **kwargs)
if rows:
return rows[0][0]
else:
return None
# execute used to return a pysqlite cursor, but now is synonymous
# with .all()
execute = all
# Updates
################
def executemany(self, sql: str, args: Iterable[Sequence[ValueForDB]]) -> None:
self.mod = True
if isinstance(args, list):
list_args = args
else:
list_args = list(args)
self._backend.db_execute_many(sql, list_args)
# convert kwargs to list format
def emulate_named_args(
sql: str, args: Tuple, kwargs: Dict[str, Any]
) -> Tuple[str, Sequence[ValueForDB]]:
# nothing to do?
if not kwargs:
return sql, args
print("named arguments in queries will go away in the future:", sql)
# map args to numbers
arg_num = {}
args2 = list(args)
for key, val in kwargs.items():
args2.append(val)
n = len(args2)
arg_num[key] = n
# update refs
def repl(m: Match) -> str:
arg = m.group(1)
return f"?{arg_num[arg]}"
sql = re.sub(":([a-zA-Z_0-9]+)", repl, sql)
return sql, args2
| agpl-3.0 | -6,108,757,242,505,558,000 | 27.266129 | 88 | 0.57632 | false | 3.536831 | false | false | false |
dcondrey/scrapy-spiders | dist/spiders/mandy.py | 1 | 1951 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from scrapy.spider import Spider
from scrapy.selector import Selector
from scrapy.http import Request
from my_settings import name_file, keyword, test_mode, difference_days
from datetime import datetime, timedelta
import re
print "Run spider Mandy"
added_email = []
keyword = list(map(lambda x: re.sub(' ', '+', x), keyword))
if test_mode:
current_date = (datetime.today() - timedelta(days=difference_days)).strftime('%d-%b-%Y')
else:
current_date = datetime.today().strftime('%d-%b-%Y')
file = open(name_file, 'a')
email_in_file = open(name_file, 'r').readlines()
class Mandy(Spider):
name = 'mandy'
allowed_domains = ["mandy.com"]
start_urls = ["http://mandy.com/1/search.cfm?fs=1&place=wld&city=&what={}&where=Worldwide".format(key)
for key in keyword]
def parse(self, response):
sel = Selector(response)
date = sel.xpath('//*[@id="resultswrapper"]/section/div/div/div/div/span/text()').extract()
link = sel.xpath('//*[@id="resultswrapper"]/section/div/div/div/div/a/@href').extract()
date = list(map(lambda x: re.findall('\w+:\D([A-Za-z0-9-]+)', x)[0], date))
dic = dict(zip(link, date))
for key in dic.keys():
if dic[key] == current_date:
yield Request(url='http://mandy.com'+key, callback=self.parse_page)
def parse_page(self, response):
sel = Selector(response)
email = sel.re('(\w+@[a-zA-Z_]+?\.[a-zA-Z]{2,6})')
if bool(email):
email = email[0]
if email + "\n" not in email_in_file and email not in added_email:
file.write(email+'\n')
added_email.append(email)
print "Spider: Mandy. Email {0} added to file".format(email)
else:
print "Spider: Mandy. Email {0} already in the file".format(email) | mit | 2,870,028,385,990,378,000 | 35.557692 | 106 | 0.583291 | false | 3.301184 | false | false | false |
MOA-2011/enigma2-plugin-extensions-openwebif | plugin/controllers/views/ajax/event.py | 1 | 11606 | #!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from urllib import quote
from json import dumps
from Plugins.Extensions.OpenWebif.local import tstrings
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1406885499.317004
__CHEETAH_genTimestamp__ = 'Fri Aug 1 18:31:39 2014'
__CHEETAH_src__ = '/home/wslee2/models/5-wo/force1plus/openpli3.0/build-force1plus/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-1+git5+3c0c4fbdb28d7153bf2140459b553b3d5cdd4149-r0/git/plugin/controllers/views/ajax/event.tmpl'
__CHEETAH_srcLastModified__ = 'Fri Aug 1 18:30:05 2014'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class event(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(event, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
etime = time.localtime(VFFSL(SL,"event",True)['begin'])
channel = VFN(VFFSL(SL,"event",True)['channel'],"replace",False)("'", r"\'")
write(u'''
<!-- Icons from: http://findicons.com/pack/1987/eico -->
<div id="leftmenu_main">
\t<div id="leftmenu_top" class="handle" style="cursor:move">''')
_v = VFFSL(SL,"event",True)['channel'] # u"$event['channel']" on line 9, col 60
if _v is not None: write(_filter(_v, rawExpr=u"$event['channel']")) # from line 9, col 60.
write(u'''
\t\t<div id="leftmenu_expander_main" class="leftmenu_icon leftmenu_icon_collapse" onclick="$(\'#eventdescription\').hide(200)"></div>
\t</div>
\t<div id="leftmenu_container_main" style="padding:6px">
\t\t<div style="float:left; width:80px;">
\t\t\t<div id="station" style="background-color: #1c478e; padding:2px; width:75px; text-align:center; overflow:hidden">''')
_v = VFFSL(SL,"event",True)['channel'] # u"$event['channel']" on line 14, col 117
if _v is not None: write(_filter(_v, rawExpr=u"$event['channel']")) # from line 14, col 117.
write(u'''</div>
\t\t\t<div style="background-color: #1c478e; color:#fff; width:79px; font-size:23px; margin-top: 5px; text-align:center">
\t\t\t\t''')
_v = VFN(VFFSL(SL,"time",True),"strftime",False)("%H:%M", VFFSL(SL,"etime",True)) # u'$time.strftime("%H:%M", $etime)' on line 16, col 5
if _v is not None: write(_filter(_v, rawExpr=u'$time.strftime("%H:%M", $etime)')) # from line 16, col 5.
write(u'''<br/>
\t\t\t\t<span style="font-size:12px; color:#A9D1FA">''')
_v = VFFSL(SL,"int",False)(VFFSL(SL,"event",True)['duration']/60) # u"$int($event['duration']/60)" on line 17, col 49
if _v is not None: write(_filter(_v, rawExpr=u"$int($event['duration']/60)")) # from line 17, col 49.
write(u''' min</span>
\t\t\t</div>
\t\t\t<div style="background-color: #1c478e; color:#fff; width:79px;margin:5px 0">
\t\t\t\t<div style="font-size:23px; text-align:center">''')
_v = VFFSL(SL,"tstrings",True)[("day_" + (VFN(VFFSL(SL,"time",True),"strftime",False)("%w", VFFSL(SL,"etime",True))))] # u'$tstrings[("day_" + ($time.strftime("%w", $etime)))]' on line 21, col 52
if _v is not None: write(_filter(_v, rawExpr=u'$tstrings[("day_" + ($time.strftime("%w", $etime)))]')) # from line 21, col 52.
write(u'''</div>
\t\t\t\t<div style="color:#A9D1FA; text-align:center">''')
_v = VFN(VFFSL(SL,"time",True),"strftime",False)("%d", VFFSL(SL,"etime",True)) # u'$time.strftime("%d", $etime)' on line 22, col 51
if _v is not None: write(_filter(_v, rawExpr=u'$time.strftime("%d", $etime)')) # from line 22, col 51.
write(u''' ''')
_v = VFFSL(SL,"tstrings",True)[("month_" + (VFN(VFFSL(SL,"time",True),"strftime",False)("%m", VFFSL(SL,"etime",True))))] # u'$tstrings[("month_" + ($time.strftime("%m", $etime)))]' on line 22, col 80
if _v is not None: write(_filter(_v, rawExpr=u'$tstrings[("month_" + ($time.strftime("%m", $etime)))]')) # from line 22, col 80.
write(u'''</div>
\t\t\t</div>
\t\t\t<div>
\t\t\t <img src="/images/ico_timer.png" alt="''')
_v = VFFSL(SL,"tstrings",True)['add_timer'] # u"$tstrings['add_timer']" on line 26, col 46
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['add_timer']")) # from line 26, col 46.
write(u'''" title="''')
_v = VFFSL(SL,"tstrings",True)['add_timer'] # u"$tstrings['add_timer']" on line 26, col 77
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['add_timer']")) # from line 26, col 77.
write(u'''" style="cursor:pointer" onclick="addTimer(theevent)" />
\t\t\t <img src="/images/ico_zap.png" alt="Zap" title="''')
_v = VFFSL(SL,"tstrings",True)['zap'] # u"$tstrings['zap']" on line 27, col 56
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['zap']")) # from line 27, col 56.
write(u'''" style="cursor:pointer" onclick="zapChannel(\'''')
_v = VFFSL(SL,"str",False)(VFFSL(SL,"event",True)['sref']) # u"$str($event['sref'])" on line 27, col 118
if _v is not None: write(_filter(_v, rawExpr=u"$str($event['sref'])")) # from line 27, col 118.
write(u"""', '""")
_v = VFFSL(SL,"channel",True) # u'$channel' on line 27, col 142
if _v is not None: write(_filter(_v, rawExpr=u'$channel')) # from line 27, col 142.
write(u'''\')" />
\t\t\t\t<a href="/web/stream.m3u?ref=''')
_v = VFFSL(SL,"quote",False)(VFFSL(SL,"event",True)['sref']) # u"$quote($event['sref'])" on line 28, col 34
if _v is not None: write(_filter(_v, rawExpr=u"$quote($event['sref'])")) # from line 28, col 34.
write(u'''&name=''')
_v = VFFSL(SL,"quote",False)(VFFSL(SL,"event",True)['channel']) # u"$quote($event['channel'])" on line 28, col 62
if _v is not None: write(_filter(_v, rawExpr=u"$quote($event['channel'])")) # from line 28, col 62.
write(u'''" target="_blank"><img
\t\t\t\t\tsrc="/images/ico_stream2.png" alt="Stream ''')
_v = VFFSL(SL,"channel",True) # u'$channel' on line 29, col 48
if _v is not None: write(_filter(_v, rawExpr=u'$channel')) # from line 29, col 48.
write(u'''" title="''')
_v = VFFSL(SL,"tstrings",True)['stream'] # u"$tstrings['stream']" on line 29, col 65
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['stream']")) # from line 29, col 65.
write(u''' ''')
_v = VFFSL(SL,"channel",True) # u'$channel' on line 29, col 85
if _v is not None: write(_filter(_v, rawExpr=u'$channel')) # from line 29, col 85.
write(u'''" style="cursor:pointer" /></a>
\t\t\t</div>
\t\t</div>
\t\t<div style="float:left; width:250px; margin-left: 5px">
\t\t\t<div style="font-size: 13px; font-weight: bold">''')
_v = VFFSL(SL,"event",True)['title'] # u"$event['title']" on line 33, col 52
if _v is not None: write(_filter(_v, rawExpr=u"$event['title']")) # from line 33, col 52.
write(u'''</div>
''')
if VFFSL(SL,"event",True)['title'] != VFFSL(SL,"event",True)['shortdesc']: # generated from line 34, col 1
write(u'''\t\t\t<div style="font-size: 12px; font-weight: bold">''')
_v = VFFSL(SL,"event",True)['shortdesc'] # u"$event['shortdesc']" on line 35, col 52
if _v is not None: write(_filter(_v, rawExpr=u"$event['shortdesc']")) # from line 35, col 52.
write(u'''</div>
''')
write(u'''\t\t\t<div style="max-height:400px; overflow:auto"><p>''')
_v = VFN(VFFSL(SL,"event",True)['longdesc'],"replace",False)("\n","<br/>") # u'$(event[\'longdesc\'].replace("\\n","<br/>"))' on line 37, col 52
if _v is not None: write(_filter(_v, rawExpr=u'$(event[\'longdesc\'].replace("\\n","<br/>"))')) # from line 37, col 52.
write(u'''</p></div>
\t\t</div>
\t\t<div style="clear:left"></div>
\t</div>
</div>
<script>
var theevent = ''')
_v = VFFSL(SL,"dumps",False)(VFFSL(SL,"event",True)) # u'$dumps($event)' on line 43, col 16
if _v is not None: write(_filter(_v, rawExpr=u'$dumps($event)')) # from line 43, col 16.
write(u''';
if (picons[theevent[\'channel\']])
\t$(\'#station\').html(\'<img src="\'+picons[theevent[\'channel\']]+\'" width="75" />\');
</script>
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_event= 'respond'
## END CLASS DEFINITION
if not hasattr(event, '_initCheetahAttributes'):
templateAPIClass = getattr(event, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(event)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=event()).run()
| gpl-2.0 | 8,901,558,350,633,718,000 | 47.560669 | 242 | 0.59745 | false | 3.032663 | false | false | false |
blaisb/cfdemUtilities | VANS/unsteady/unsteadyAutomateL2ErrorVANS.py | 2 | 2480 | # This programs calculates the L2 error for a given velocity file
#
# Usage : python L2ErrorUCouette.py Velocity file
#
# Author : Bruno Blais
# Last modified : December 3rd
#Python imports
import os
import math
import numpy
import matplotlib.pyplot as plt
import sys
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
#***************************
# Case chooser
#***************************
case = "finalVANS"
# Possible cases, void1, swirlVans, expVans, nonFreeDiv, unsteadyNS, finalVANS
#Amplitude of velocity field
A =0.01
#A = 1
#===========================
# Main program
#===========================
fname = sys.argv[1]
pi = numpy.pi
# read the file
#print "R-> Velocity file has been read"
[x,y,z,u,v,w] = numpy.loadtxt(fname, unpack=True)
nt = len(x)
nx = int(numpy.sqrt(nt))
uth = numpy.zeros([nt])
vth = numpy.zeros([nt])
#Analytical solution for theta velocity in eulerian frame of reference
if case=="void1":
for i in range(0,nt):
uth[i] = -2 * numpy.sin(pi*x[i])**2 * numpy.sin(pi*y[i]) * numpy.cos(pi*y[i])
vth[i] = 2 * numpy.sin(pi*y[i])**2 * numpy.sin(pi*x[i]) * numpy.cos(pi*x[i])
if case=="swirlVANS":
for i in range(0,nt):
uth[i] = -2 *A* numpy.sin(pi*x[i]) * numpy.cos(pi*y[i])
vth[i] = 2 *A* numpy.sin(pi*y[i]) * numpy.cos(pi*x[i])
if case=="expVANS":
for i in range(0,nt):
uth[i] = A * numpy.cos(x[i]*y[i])
vth[i] = -A * numpy.sin(x[i]*y[i])
if case=="nonFreeDiv":
for i in range(0,nt):
uth[i] = A * numpy.exp(-x[i]**2) * numpy.sin(pi*y[i]) * numpy.cos(pi*y[i])
vth[i] = A * numpy.exp(-y[i]**2) * numpy.sin(pi*x[i]) * numpy.cos(pi*x[i])
if case=="unsteadyNS":
for i in range(0,nt):
uth[i] = -2 * numpy.sin(pi*x[i])**2 * numpy.sin(pi*y[i]) * numpy.cos(pi*y[i]) * numpy.cos(numpy.pi/4.)
vth[i] = 2 * numpy.sin(pi*y[i])**2 * numpy.sin(pi*x[i]) * numpy.cos(pi*x[i]) * numpy.cos(numpy.pi/4.)
if case=="finalVANS":
for i in range(0,nt):
uth[i] = A * numpy.exp(-x[i]**2) * numpy.sin(pi*y[i]) * numpy.cos(pi*y[i]) * numpy.cos(numpy.pi/4.)
vth[i] = A * numpy.exp(-y[i]**2) * numpy.sin(pi*x[i]) * numpy.cos(pi*x[i]) * numpy.cos(numpy.pi/4.)
err = ((u-uth)**2 + (v-vth)**2)/A**2
L2err = numpy.sqrt(numpy.sum(err)/nt)
print "L2 Error is : %5.5e" %(L2err)
Z = numpy.reshape(err,(-1,nx))
#cs = plt.contour(Z,levels=numpy.arange(numpy.min(Z),numpy.max(Z),numpy.max(Z)/10.))
#plt.clabel(cs,inline=1,fontsize=10)
#plt.show()
| lgpl-3.0 | 7,374,829,722,601,325,000 | 28.176471 | 105 | 0.60121 | false | 2.339623 | false | false | false |
demisto/content | Packs/QuestKace/Integrations/QuestKace/QuestKace.py | 1 | 36041 | from typing import Dict, Optional, Tuple, Callable, Any, Union
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import json
import requests
import dateparser
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
# key = field of a ticket , val = dict of (name,id) of options
TICKETS_OBJECTS = {
'impact': {
'1 person cannot work': 1,
'Many people cannot work': 2,
'1 person inconvenienced': 3,
'Many people inconvenienced': 4
},
'category': {
"Network": 1,
"Other": 2,
"Software": 4,
"Hardware": 3
},
'priority': {
"Medium": 1,
'High': 2,
'Low': 3
},
'status': {
'Opened': 1,
'Closed': 2,
'Need More Info': 3,
'New': 4,
'Reopened': 5,
'Waiting Overdue': 6,
'Waiting on Customer': 7,
'Waiting on Third Party': 8
}
}
def convert_snake_to_camel(snake_str: str) -> str:
"""Convert a specific string of snake case to camel case.
Args:
snake_str: The string that we would like to convert.
Returns:
converted string.
"""
snake_split = snake_str.split("_")
camel_string = "".join(map(str.capitalize, snake_split))
camel_string = convert_specific_keys(camel_string)
return camel_string
def convert_specific_keys(string: str):
"""
Convert specific keys to demisto standard
Args:
string: the text to transform
Returns:
A Demisto output standard string
"""
if string == 'OsName':
return 'OSName'
if string == 'OsNumber':
return 'OSNumber'
if string == 'Ram total':
return 'RamTotal'
if string == 'AssetDataId':
return 'AssetDataID'
if string == 'AssetClassId':
return 'AssetClassID'
if string == 'AssetStatusId':
return 'AssetStatusID'
if string == 'AssetTypeId':
return 'AssetTypeID'
if string == 'MappedId':
return 'MappedID'
if string == 'OwnerId':
return 'OwnerID'
if string == 'HdQueueId':
return 'HdQueueID'
if string == 'Ip':
return 'IP'
return string
def convert_dict_snake_to_camel(dic: dict) -> dict:
"""Convert a dictionary of snake case to camel case.
Args:
dic: The dictionary that we would like to convert.
Returns:
converted dictionary.
"""
context_dict = {}
for snake_str in dic:
if type(dic[snake_str]) is dict:
inner_dict = convert_dict_snake_to_camel(dic[snake_str])
camel = convert_snake_to_camel(snake_str)
context_dict[camel] = inner_dict
elif type(dic[snake_str]) is list:
inner_dict = parse_response(dic[snake_str])
camel = convert_snake_to_camel(snake_str)
context_dict[camel] = inner_dict
elif snake_str in ['id', 'Id']:
context_dict['ID'] = dic.get(snake_str, '')
else:
camel = convert_snake_to_camel(snake_str)
context_dict[camel] = dic.get(snake_str, '')
return context_dict
def parse_response(lst: list):
"""Convert a Api response to wanted format.
Args:
lst: A list of dictionaries that return from api call.
Returns:
converted list of dictionaries from snake case to camel case.
"""
list_res = []
for dic in lst:
context_dict = convert_dict_snake_to_camel(dic)
list_res.append(context_dict)
return list_res
class Client(BaseClient):
"""
Client to use in the integration, overrides BaseClient.
Used for communication with the api.
"""
def __init__(self, url: str, username: str, password: str, verify: bool, proxy: bool):
super().__init__(base_url=f"{url}/api", verify=verify, proxy=proxy)
self._url = url
self._username = username
self._password = password
self._token, self._cookie = self.get_token()
def get_token(self) -> Tuple[str, str]:
"""Get a token for the connection.
Returns:
token , cookie for the connection.
"""
token = ''
cookie = ''
data = {
"userName": self._username,
"password": self._password
}
login_url = f"{self._url}/ams/shared/api/security/login"
body = json.dumps(data)
headers = {'Content-Type': 'application/json'}
response = self.token_request(login_url, headers=headers, data=body)
# Extracting Token
response_cookies = response.get('cookies').__dict__.get('_cookies')
if response_cookies:
cookie_key = list(response_cookies.keys())[0]
if cookie_key:
ret_cookie = response_cookies.get(cookie_key).get("/")
cookie = self.get_cookie(ret_cookie)
token = ret_cookie.get("KACE_CSRF_TOKEN").__dict__.get('value')
if not token:
raise DemistoException("Could not get token")
if not cookie:
raise DemistoException("Could not get cookie")
return token, cookie
def update_token(self):
"""Update cookie and token.
Returns:
Tuple of token and cookie.
"""
self._token, self._cookie = self.get_token()
def get_cookie(self, res_cookie: dict) -> str:
"""Get a cookie from an cookie object in the needed format for the requests.
Args:
res_cookie: part of the response that the cookie is inside it.
Returns:
string that will be sent in the requests which represents the cookie in the header.
"""
KACE_CSRF_TOKEN = res_cookie.get("KACE_CSRF_TOKEN").__dict__.get('value')
x_dell_auth_jwt = res_cookie.get("x-dell-auth-jwt").__dict__.get('value')
kboxid = res_cookie.get("kboxid").__dict__.get('value')
KACE_LAST_USER_SECURE = res_cookie.get("KACE_LAST_USER_SECURE").__dict__.get('value')
KACE_LAST_ORG_SECURE = res_cookie.get("KACE_LAST_ORG_SECURE").__dict__.get('value')
cookie = f'KACE_LAST_USER_SECURE={KACE_LAST_USER_SECURE}; KACE_LAST_ORG_SECURE={KACE_LAST_ORG_SECURE};' \
f' kboxid={kboxid}; x-dell-auth-jwt={x_dell_auth_jwt}; KACE_CSRF_TOKEN={KACE_CSRF_TOKEN}'
return cookie
def token_request(self, url: str, headers: Optional[dict] = None, data: Optional[str] = None) -> dict:
"""login request for initiating a connection with the product.
Args:
url: full url that the request will be sent to.
headers: headers of the request.
data: data of the request which includes username and password.
Returns:
Dictionary of the response from the product.
"""
try:
response = requests.request("POST", url, headers=headers, data=data, verify=self._verify)
except requests.exceptions.SSLError:
err_msg = 'SSL Certificate Verification Failed - try selecting \'Trust any certificate\' checkbox in' \
' the integration configuration.'
raise DemistoException(err_msg)
except requests.exceptions.ConnectionError:
raise DemistoException("Invalid url , Failed to establish a connection")
if response.status_code == 401:
raise DemistoException("Error Code 401 - Invalid user or password")
return response.__dict__
def machines_list_request(self, filter_fields: Optional[str] = None):
"""List of machines.
Returns:
Response from API.
"""
headers = {
'Accept': 'application/json',
'x-dell-csrf-token': self._token,
'x-dell-api-version': '5',
'Cookie': self._cookie
}
url_suffix = '/inventory/machines'
if filter_fields:
url_suffix += f'?filtering={filter_fields}'
return self._http_request("GET", url_suffix=url_suffix, headers=headers)
def assets_list_request(self, filter_fields: Optional[str] = None) -> dict:
"""List of assets.
Returns:
Response from API.
"""
headers = {
'Accept': 'application/json',
'x-dell-csrf-token': self._token,
'x-dell-api-version': '5',
'Cookie': self._cookie
}
url_suffix = '/asset/assets'
if filter_fields:
url_suffix += f'?filtering={filter_fields}'
return self._http_request("GET", url_suffix=url_suffix, headers=headers)
def queues_list_request(self, filter_fields: Optional[str] = None) -> dict:
"""List of queues.
Returns:
Response from API.
"""
headers = {
'Accept': 'application/json',
'x-dell-csrf-token': self._token,
'x-dell-api-version': '5',
'Cookie': self._cookie
}
url_suffix = '/service_desk/queues?shaping=fields all'
if filter_fields:
url_suffix += f'&filtering={filter_fields}'
return self._http_request("GET", url_suffix=url_suffix, headers=headers)
def queues_list_fields_request(self, queue_number: str) -> dict:
"""List of fields in specific queue.
Args:
queue_number: queue nubmer for the request.
Returns:
Response from API.
"""
headers = {
'Accept': 'application/json',
'x-dell-csrf-token': self._token,
'x-dell-api-version': '5',
'Cookie': self._cookie
}
return self._http_request("GET", url_suffix=f"/service_desk/queues/{queue_number}/fields", headers=headers)
def tickets_list_request(self, shaping_fields: str = None, filter_fields: str = None) -> dict:
"""List of Tickets.
Args:
shaping_fields: str of the shaping that will be sent in the request.
filter_fields: str of filter that will be sent in the request.
Returns:
Response from API.
"""
if not shaping_fields:
shaping_fields = set_shaping(self)
self.update_token()
headers = {
'Accept': 'application/json',
'x-dell-csrf-token': self._token,
'x-dell-api-version': '5',
'Cookie': self._cookie
}
url_suffix = f"/service_desk/tickets?shaping={shaping_fields}"
if filter_fields:
url_suffix += f'&filtering={filter_fields}'
return self._http_request("GET", url_suffix=url_suffix, headers=headers)
def create_ticket_request(self, data: str) -> dict:
"""Create Ticket
Args:
data (str): the body of the request.
Returns:
Response from API.
"""
headers = {
'Accept': 'application/json',
'x-dell-csrf-token': self._token,
'x-dell-api-version': '5',
'Cookie': self._cookie,
'Content-Type': 'application/json'
}
return self._http_request("POST", url_suffix="/service_desk/tickets", headers=headers, data=data)
def update_ticket_request(self, ticket_id: str, data: str) -> dict:
"""Update Ticket.
Args:
ticket_id (str): ticket id that will be updated.
data (str): the body of the request.
Returns:
Response from API.
"""
headers = {
'Accept': 'application/json',
'x-dell-csrf-token': self._token,
'x-dell-api-version': '5',
'Cookie': self._cookie,
'Content-Type': 'application/json'
}
return self._http_request("POST", url_suffix=f"/service_desk/tickets/{ticket_id}", headers=headers, data=data)
def delete_ticket_request(self, ticket_id: str) -> dict:
"""Delete Ticket.
Args:
ticket_id (str): ticket id that will be deleted.
Returns:
Response from API.
"""
headers = {
'Accept': 'application/json',
'x-dell-csrf-token': self._token,
'x-dell-api-version': '5',
'Cookie': self._cookie,
'Content-Type': 'application/json'
}
return self._http_request("DELETE", url_suffix=f"/service_desk/tickets/{ticket_id}", headers=headers)
def ticket_by_id_request(self, filtering_id: int) -> dict:
"""Specific ticket details by ID.
Args:
filtering_id: id for filtering by it.
Returns:
Response from API.
"""
headers = {
'Accept': 'application/json',
'x-dell-csrf-token': self._token,
'x-dell-api-version': '5',
'Cookie': self._cookie
}
filter_fields = f"id eq {filtering_id}"
return self._http_request("GET", url_suffix=f"/service_desk/tickets?filtering={filter_fields}", headers=headers)
def test_module(client: Client, *_) -> Tuple[str, dict, dict]:
"""Function which checks if there is a connection with the api.
Args:
client : Integration client which communicates with the api.
args: Users arguments of the command.
Returns:
human readable, context, raw response of this command.
"""
_ = client.machines_list_request()
client.update_token()
response = client.tickets_list_request()
list_tickets_res = response.get('Tickets')
if list_tickets_res and demisto.params().get('isFetch'):
parse_date_range(demisto.params().get('fetch_time'), date_format='%Y-%m-%dT%H:%M:%SZ')
parsed_time = (datetime.utcnow() - timedelta(days=20))
incidents, _ = parse_incidents(list_tickets_res, "1", '%Y-%m-%dT%H:%M:%SZ', parsed_time)
return 'ok', {}, {}
def get_machines_list_command(client, args) -> Tuple[str, dict, dict]:
"""Function which returns all machines in the system.
Args:
client : Integretion client which communicates with the api.
args: Users arguments of the command.
Returns:
human readable, context, raw response of this command.
"""
limit = int(args.get('limit', 50))
filter_fields = args.get('custom_filter')
response = client.machines_list_request(filter_fields)
raw_response = response.get('Machines')[:limit]
context = parse_response(raw_response)
human_readable_markdown = tableToMarkdown('Quest Kace Machines', context, removeNull=True, headers=['ID', 'Name',
'IP', 'Created',
'Modified',
'LastInventory',
'LastSync',
'ManualEntry',
'PagefileMaxSize',
'PagefileSize',
'RamTotal',
'RamUsed'])
context = {
'QuestKace.Machine(val.ID === obj.ID)': context
}
return human_readable_markdown, context, raw_response
def get_assets_list_command(client, args) -> Tuple[str, dict, dict]:
"""Function which returns all assets in the system.
Args:
client : Integretion client which communicates with the api.
args: Users arguments of the command.
Returns:
human readable, context, raw response of this command.
"""
limit = int(args.get('limit', 50))
filter_fields = args.get('custom_filter')
response = client.assets_list_request(filter_fields)
raw_response = response.get('Assets')[:limit]
context = parse_response(raw_response)
human_readable_markdown = tableToMarkdown('Quest Kace Assets', context, removeNull=True,
headers=['ID', 'Name', 'Created', 'Modified', 'OwnerID', 'MappedID',
'AssetClassID', 'AssetDataID', 'AssetStatusID', 'AssetTypeID',
'AssetTypeName'])
context = {
'QuestKace.Asset(val.ID === obj.ID)': context
}
return human_readable_markdown, context, raw_response
def get_queues_list_command(client, args) -> Tuple[str, dict, dict]:
"""Function which returns all queues in the system.
Args:
client : Integretion client which communicates with the api.
args: Users arguments of the command.
Returns:
human readable, context, raw response of this command.
"""
filter_fields = args.get('custom_filter')
limit = int(args.get('limit', 50))
response = client.queues_list_request(filter_fields)
raw_response = response.get('Queues')[:limit]
context = parse_response(raw_response)
human_readable_markdown = tableToMarkdown('Quest Kace Queues', context, removeNull=True,
headers=['ID', 'Name', 'Fields'])
context = {
'QuestKace.Queue(val.ID === obj.ID)': context
}
return human_readable_markdown, context, raw_response
def get_tickets_list_command(client, args) -> Tuple[str, dict, dict]:
"""Function which returns all tickets in the system.
Args:
client : Integretion client which communicates with the api.
args: Users arguments of the command.
Returns:
human readable, context, raw response of this command.
"""
limit = int(args.get('limit', 50))
custom_shaping = args.get("custom_shaping")
custom_filter = args.get("custom_filter")
response = client.tickets_list_request(custom_shaping, custom_filter)
raw_response = response.get('Tickets')[:limit]
context = parse_response(raw_response)
for response in context:
response['IsDeleted'] = False
human_readable_markdown = tableToMarkdown('Quest Kace Tickets', context, removeNull=True,
headers=['ID', 'Title', 'Created', 'Modified', 'HdQueueID', 'DueDate'])
context = {
'QuestKace.Ticket(val.ID === obj.ID)': context
}
return human_readable_markdown, context, raw_response
def create_ticket_command(client, args) -> Tuple[str, dict, dict]:
"""Function which creates a new ticket to the system according to users arguments.
Args:
client : Integretion client which communicates with the api.
args: Users arguments of the command.
Returns:
human readable, context, raw response of this command.
"""
impact = None
category = None
status = None
priority = None
hd_queue_id = args.get('queue_id')
custom_fields = args.get('custom_fields')
if (custom_fields and "hd_queue_id" not in custom_fields) and (not hd_queue_id):
raise DemistoException("hd_queue_id is a mandatory value, please add it.")
title = args.get("title")
summary = args.get('summary')
if args.get('impact'):
dict_of_obj = TICKETS_OBJECTS.get('impact')
impact = args.get('impact')
if dict_of_obj:
impact = dict_of_obj.get(args.get('impact'), args.get('impact'))
if args.get('category'):
dict_of_obj = TICKETS_OBJECTS.get('category')
impact = args.get('category')
if dict_of_obj:
impact = dict_of_obj.get(args.get('category'), args.get('category'))
if args.get('status'):
dict_of_obj = TICKETS_OBJECTS.get('status')
impact = args.get('status')
if dict_of_obj:
impact = dict_of_obj.get(args.get('status'), args.get('status'))
if args.get('priority'):
dict_of_obj = TICKETS_OBJECTS.get('priority')
impact = args.get('priority')
if dict_of_obj:
impact = dict_of_obj.get(args.get('priority'), args.get('priority'))
machine = args.get('machine')
asset = args.get('asset')
body_from_args = create_body_from_args(hd_queue_id, title, summary, impact, category, status, priority, machine,
asset)
if custom_fields:
splited = split_fields(custom_fields)
body_from_args.update(splited)
temp_data = {'Tickets': [body_from_args]}
data = json.dumps(temp_data)
response = client.create_ticket_request(data)
if response.get('Result') != 'Success':
raise DemistoException('Error while adding a new ticket.')
try:
id = response.get('IDs')[0]
except Exception as e:
raise DemistoException(e)
client.update_token()
res = client.ticket_by_id_request(id)
ticket = res.get('Tickets')
ticket_view = tableToMarkdown(f'New ticket was added successfully, ticket number {id}.\n', ticket)
return ticket_view, {}, {}
def create_body_from_args(hd_queue_id: Union[str, int] = None, title: Union[str, int] = None,
summary: Union[str, int] = None, impact: Union[str, int] = None,
category: Union[str, int] = None, status: Union[str, int] = None,
priority: Union[str, int] = None, machine: Union[str, int] = None,
asset: Union[str, int] = None) -> dict:
"""Function which creates the body of the request from user arguments.
Args:
hd_queue_id: the queue number to insert the ticket to.
title: title of the ticket.
summary: summary of the ticket.
impact: impact of the ticket.
category: category of the ticket.
status: status of the ticket.
priority: priority of the ticket.
machine: machine of the ticket.
asset: asset of the ticket.
Returns:
body of the request as a dict.
"""
body = {}
if hd_queue_id:
body.update({'hd_queue_id': hd_queue_id})
if title:
body.update({'title': title})
if summary:
body.update({'summary': summary})
if impact:
body.update({'impact': impact})
if category:
body.update({'category': category})
if status:
body.update({'status': status})
if priority:
body.update({'priority': priority})
if machine:
body.update({'machine': machine})
if asset:
body.update({'asset': asset})
return body
def update_ticket_command(client, args) -> Tuple[str, dict, dict]:
"""Function which updates the body of the request from user arguments.
Args:
client : Integretion client which communicates with the api.
args: Users arguments of the command.
Returns:
human readable, context, raw response of this command.
"""
impact = None
category = None
status = None
priority = None
ticket_id = args.get('ticket_id')
title = args.get("title")
summary = args.get('summary')
if args.get('impact'):
impact = TICKETS_OBJECTS['impact'][args.get('impact')]
if args.get('category'):
category = TICKETS_OBJECTS['category'][args.get('category')]
if args.get('status'):
status = TICKETS_OBJECTS['status'][args.get('status')]
if args.get('priority'):
priority = TICKETS_OBJECTS['priority'][args.get('priority')]
machine = args.get('machine')
asset = args.get('asset')
custom_fields = args.get('custom_fields')
body_from_args = create_body_from_args(title=title, summary=summary, impact=impact, category=category,
status=status,
priority=priority, machine=machine, asset=asset)
if custom_fields:
splited = split_fields(custom_fields)
body_from_args.update(splited)
temp_data = {'Tickets': [body_from_args]}
data = json.dumps(temp_data)
response = client.update_ticket_request(ticket_id, data)
if response.get('Result') != 'Success':
raise DemistoException('Error while updating the ticket.')
client.update_token()
res = client.ticket_by_id_request(ticket_id)
ticket = res.get('Tickets')
ticket_view = tableToMarkdown(f'Ticket number {ticket_id} was updated successfully.\n', ticket)
return ticket_view, {}, {}
def delete_ticket_command(client, args) -> Tuple[str, dict, dict]:
"""Function which deleted a specific ticket by ticket id.
Args:
client : Integretion client which communicates with the api.
args: Users arguments of the command.
Returns:
human readable, context, raw response of this command.
"""
ticket_id = args.get('ticket_id')
try:
response = client.delete_ticket_request(ticket_id)
except Exception as e:
raise DemistoException(e)
if response.get('Result') == 'Success':
context = {}
old_context = demisto.dt(demisto.context(), f'QuestKace.Ticket(val.ID === {ticket_id})')
if old_context:
if isinstance(old_context, list):
old_context = old_context[0]
old_context['IsDeleted'] = True
context = {
'QuestKace.Ticket(val.ID === obj.ID)': old_context
}
return f'Ticket was deleted successfully. Ticket number {ticket_id}', context, {}
else:
raise DemistoException('Error while deleting the ticket.')
def fetch_incidents(client: Client, fetch_time: str, fetch_shaping: str, last_run: Dict, fetch_limit: str,
fetch_queue_id: Optional[list] = None, fetch_filter: Optional[str] = None) -> list:
"""
This function will execute each interval (default is 1 minute).
Args:
client (Client): Quest Kace Client
fetch_time: time interval for fetch incidents.
fetch_shaping: shaping for the request.
fetch_filter: custom filters for the request.
fetch_limit: limit for number of fetch incidents per fetch.
fetch_queue_id: queue id for fetch, if not given then fetch runs on all tickets in the system
last_run (dateparser.time): The greatest incident created_time we fetched from last fetch
Returns:
incidents: Incidents that will be created in Demisto
"""
if not fetch_queue_id or fetch_queue_id[0] == 'All':
fetch_queue_id = get_queue_ids(client)
time_format = '%Y-%m-%dT%H:%M:%SZ'
if not last_run: # if first time running
new_last_run = {'last_fetch': parse_date_range(fetch_time, date_format=time_format)[0]}
else:
new_last_run = last_run
if not fetch_shaping:
fetch_shaping = shaping_fetch(client, fetch_queue_id)
parsed_last_time = datetime.strptime(new_last_run.get('last_fetch', ''), time_format)
fetch_filter_for_query = f'created gt {parsed_last_time}'
if fetch_queue_id:
queue_id_str = ';'.join(fetch_queue_id)
filter_by_queue_id = f'hd_queue_id in {queue_id_str}'
fetch_filter_for_query = f'{fetch_filter_for_query},{filter_by_queue_id}'
if fetch_filter:
fetch_filter_for_query = f'{fetch_filter_for_query},{fetch_filter}'
demisto.info(f"Fetching Incident has Started,\n"
f"Fetch filter is {fetch_filter_for_query}\n"
f"Last fetch was on {str(parsed_last_time)}")
client.update_token()
items: dict = client.tickets_list_request(fetch_shaping, fetch_filter_for_query)
items: list = items.get('Tickets', [])
incidents, last_incident_time = parse_incidents(items, fetch_limit, time_format, parsed_last_time)
last_incident_time = last_incident_time.strftime(time_format)
demisto.info(f"Fetching Incident has Finished\n"
f"Fetch limit was {fetch_limit}"
f"Last fetch was on {str(last_incident_time)}\n"
f"Number of incidents was {len(incidents)}")
demisto.setLastRun({'last_fetch': last_incident_time})
return incidents
def shaping_fetch(client: Client, fetch_queue_id: list) -> str:
"""
Create and Update shaping fields once a day and saves them in integration context.
Args:
client: Client for the api.
fetch_queue_id:
Returns:
the current shaping.
"""
integration_context = demisto.getIntegrationContext()
if integration_context:
valid_until = integration_context.get('valid_until')
time_now = int(time.time())
if time_now < valid_until:
fetch_shaping = integration_context.get('shaping_fields')
else:
fetch_shaping = set_shaping(client, fetch_queue_id)
integration_context = {
'shaping_fields': fetch_shaping,
'valid_until': int(time.time()) + 3600 * 24
}
demisto.setIntegrationContext(integration_context)
else:
fetch_shaping = set_shaping(client, fetch_queue_id)
integration_context = {
'shaping_fields': fetch_shaping,
'valid_until': int(time.time()) + 3600 * 24
}
demisto.setIntegrationContext(integration_context)
return fetch_shaping
def get_fields_by_queue(client, queue: Optional[list]) -> list:
"""
Creating a list of all queue ids that are in the system.
Args:
client: Client for the api.
Returns:
list of queue ids.
"""
if queue:
queues_id = queue
else:
queues_id = get_queue_ids(client)
fields: list = []
for q in queues_id:
client.update_token()
fields_by_queue = client.queues_list_fields_request(queue_number=str(q))
fields_by_queue = fields_by_queue.get('Fields', [])
for field in fields_by_queue:
if field.get('jsonKey') not in fields:
# get internal error 500 from server with related tickets
if field.get('jsonKey') != 'related_tickets' and field.get('jsonKey') != 'referring_tickets':
fields.append(field.get('jsonKey'))
return fields
def get_queue_ids(client: Client) -> list:
"""
Creating a list of all queue ids that are in the system.
Args:
client: Client for the api.
Returns:
list of queue ids.
"""
queues = client.queues_list_request()
queues = queues.get('Queues', [])
queues_id = []
for q in queues:
queues_id.append(str(q.get('id')))
return queues_id
def shaping_by_fields(fields: list) -> str:
"""
Creating a shaping for the request which is from the fields and seperated by comma's
Args:
fields: List of fields that would be part of the shaping.
Returns:
str of the shaping.
"""
shaping = 'hd_ticket all'
for field in fields:
shaping += f',{field} limited'
return shaping
def set_shaping(client, queue: Optional[list] = None) -> str:
"""
Creating a shaping for the request.
Args:
client: Client in order to get the queue fields.
queue: If specific queue is given for the shaping.
Returns:
str of the shaping.
"""
fields = get_fields_by_queue(client, queue)
shaping = shaping_by_fields(fields)
return shaping
def parse_incidents(items: list, fetch_limit: str, time_format: str, parsed_last_time: datetime) \
-> Tuple[list, Any]:
"""
This function will create a list of incidents
Args:
items : List of tickets of the api response.
fetch_limit: Limit for incidents of fetch cycle.
time_format: Time format of the integration.
parsed_last_time: limit for number of fetch incidents per fetch.
Returns:
incidents: List of incidents.
parsed_last_time: Time of last incident.
"""
count = 0
incidents = []
for item in items:
if count >= int(fetch_limit):
break
incident_created_time = dateparser.parse(item['created'])
incident = {
'name': item['title'],
'occurred': incident_created_time.strftime(time_format),
'rawJSON': json.dumps(item)
}
incidents.append(incident)
count += 1
parsed_last_time = incident_created_time
return incidents, parsed_last_time
def split_fields(fields: str = '') -> dict:
"""Split str fields of Demisto arguments to request fields by the char ';'.
Args:
fields: fields in a string representation.
Returns:
dic_fields object for request.
"""
dic_fields = {}
if fields:
if '=' not in fields:
raise Exception(
f"The argument: {fields}.\nmust contain a '=' to specify the keys and values. e.g: key=val.")
arr_fields = fields.split(';')
for f in arr_fields:
field = f.split('=', 1) # a field might include a '=' sign in the value. thus, splitting only once.
if len(field) > 1:
dic_fields[field[0]] = field[1]
return dic_fields
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
params = demisto.params()
username = params.get('credentials').get("identifier")
password = params.get('credentials').get('password')
base_url = params.get('url')
proxy = demisto.params().get('proxy', False)
verify_certificate = not params.get('insecure', False)
# fetch incidents params
fetch_limit = params.get('fetch_limit', 10)
fetch_time = params.get('fetch_time', '1 day')
fetch_shaping = params.get('fetch_shaping')
fetch_filter = params.get('fetch_filter')
fetch_queue_id = argToList(params.get('fetch_queue_id'))
try:
client = Client(
url=base_url,
username=username,
password=password,
verify=verify_certificate,
proxy=proxy)
command = demisto.command()
LOG(f'Command being called is {command}')
# Commands dict
commands: Dict[str, Callable[[Client, Dict[str, str]], Tuple[str, dict, dict]]] = {
'test-module': test_module,
'kace-machines-list': get_machines_list_command,
'kace-assets-list': get_assets_list_command,
'kace-queues-list': get_queues_list_command,
'kace-tickets-list': get_tickets_list_command,
'kace-ticket-create': create_ticket_command,
'kace-ticket-update': update_ticket_command,
'kace-ticket-delete': delete_ticket_command,
}
if command in commands:
return_outputs(*commands[command](client, demisto.args()))
elif command == 'fetch-incidents':
incidents = fetch_incidents(client, fetch_time=fetch_time, fetch_shaping=fetch_shaping,
fetch_filter=fetch_filter, fetch_limit=fetch_limit,
fetch_queue_id=fetch_queue_id, last_run=demisto.getLastRun())
demisto.incidents(incidents)
else:
raise NotImplementedError(f'{command} is not an existing QuestKace command')
except Exception as e:
return_error(f'Error from QuestKace Integration.\n'
f'Failed to execute {demisto.command()} command.\n\n Error: {str(e)}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| mit | -6,252,844,452,430,832,000 | 38.34607 | 122 | 0.575289 | false | 3.997006 | false | false | false |
loriab/pylibefp | devtools/scripts/conda_env.py | 1 | 1118 | import argparse
import os
import shutil
import subprocess as sp
# Args
parser = argparse.ArgumentParser(description='Creates a conda environment from file for a given Python version.')
parser.add_argument('-n', '--name', type=str, nargs=1, help='The name of the created Python environment')
parser.add_argument('-p', '--python', type=str, nargs=1, help='The version of the created Python environment')
parser.add_argument('conda_file', nargs='*', help='The file for the created Python environment')
args = parser.parse_args()
with open(args.conda_file[0], "r") as handle:
script = handle.read()
tmp_file = "tmp_env.yaml"
script = script.replace("- python", "- python {}*".format(args.python[0]))
with open(tmp_file, "w") as handle:
handle.write(script)
conda_path = shutil.which("conda")
print("CONDA ENV NAME {}".format(args.name[0]))
print("PYTHON VERSION {}".format(args.python[0]))
print("CONDA FILE NAME {}".format(args.conda_file[0]))
print("CONDA path {}".format(conda_path))
sp.call("{} env create -n {} -f {}".format(conda_path, args.name[0], tmp_file), shell=True)
os.unlink(tmp_file)
| bsd-3-clause | 4,626,609,567,384,536,000 | 35.064516 | 113 | 0.698569 | false | 3.307692 | false | false | false |
opennode/waldur-mastermind | src/waldur_openstack/openstack/log.py | 1 | 5164 | from waldur_core.logging.loggers import EventLogger, event_logger
class TenantQuotaLogger(EventLogger):
quota = 'quotas.Quota'
tenant = 'openstack.Tenant'
limit = float
old_limit = float
class Meta:
event_types = ('openstack_tenant_quota_limit_updated',)
event_groups = {
'resources': event_types,
}
@staticmethod
def get_scopes(event_context):
tenant = event_context['tenant']
project = tenant.project
return {tenant, project, project.customer}
class RouterLogger(EventLogger):
router = 'openstack.Router'
old_routes = list
new_routes = list
tenant_backend_id = str
class Meta:
event_types = ('openstack_router_updated',)
event_groups = {
'resources': event_types,
}
@staticmethod
def get_scopes(event_context):
router = event_context['router']
project = router.project
return {project, project.customer}
class SecurityGroupLogger(EventLogger):
security_group = 'openstack.SecurityGroup'
class Meta:
event_types = (
'openstack_security_group_imported',
'openstack_security_group_created',
'openstack_security_group_updated',
'openstack_security_group_pulled',
'openstack_security_group_deleted',
'openstack_security_group_cleaned',
)
event_groups = {
'resources': event_types,
}
@staticmethod
def get_scopes(event_context):
security_group = event_context['security_group']
return {
security_group,
security_group.tenant,
}
class SecurityGroupRuleLogger(EventLogger):
security_group_rule = 'openstack.SecurityGroupRule'
class Meta:
event_types = (
'openstack_security_group_rule_imported',
'openstack_security_group_rule_created',
'openstack_security_group_rule_updated',
'openstack_security_group_rule_deleted',
'openstack_security_group_rule_cleaned',
)
event_groups = {
'resources': event_types,
}
@staticmethod
def get_scopes(event_context):
security_group_rule = event_context['security_group_rule']
return [
security_group_rule,
security_group_rule.security_group,
]
class NetworkLogger(EventLogger):
network = 'openstack.Network'
class Meta:
event_types = (
'openstack_network_imported',
'openstack_network_created',
'openstack_network_updated',
'openstack_network_pulled',
'openstack_network_deleted',
'openstack_network_cleaned',
)
event_groups = {
'resources': event_types,
}
@staticmethod
def get_scopes(event_context):
network = event_context['network']
return {
network,
network.tenant,
}
class SubNetLogger(EventLogger):
subnet = 'openstack.SubNet'
class Meta:
event_types = (
'openstack_subnet_created',
'openstack_subnet_imported',
'openstack_subnet_updated',
'openstack_subnet_pulled',
'openstack_subnet_deleted',
'openstack_subnet_cleaned',
)
event_groups = {
'resources': event_types,
}
@staticmethod
def get_scopes(event_context):
subnet = event_context['subnet']
return {
subnet,
subnet.network,
}
class PortLogger(EventLogger):
port = 'openstack.Port'
class Meta:
event_types = (
'openstack_port_created',
'openstack_port_imported',
'openstack_port_pulled',
'openstack_port_deleted',
'openstack_port_cleaned',
)
event_groups = {
'resources': event_types,
}
@staticmethod
def get_scopes(event_context):
port = event_context['port']
return {
port,
port.network,
}
class FloatingIPLogger(EventLogger):
floating_ip = 'openstack.FloatingIP'
class Meta:
event_types = (
'openstack_floating_ip_attached',
'openstack_floating_ip_detached',
)
event_groups = {
'resources': event_types,
}
@staticmethod
def get_scopes(event_context):
floating_ip = event_context['floating_ip']
port = event_context['port']
return {floating_ip, floating_ip.tenant, port}
event_logger.register('openstack_tenant_quota', TenantQuotaLogger)
event_logger.register('openstack_router', RouterLogger)
event_logger.register('openstack_network', NetworkLogger)
event_logger.register('openstack_subnet', SubNetLogger)
event_logger.register('openstack_security_group', SecurityGroupLogger)
event_logger.register('openstack_security_group_rule', SecurityGroupRuleLogger)
event_logger.register('openstack_port', PortLogger)
event_logger.register('openstack_floating_ip', FloatingIPLogger)
| mit | -4,542,801,052,760,656,000 | 26.036649 | 79 | 0.594888 | false | 4.339496 | false | false | false |
opi9a/data_accelerator | plot_functions.py | 1 | 11155 |
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import rcParams
import matplotlib.ticker as ticker
import projection_funcs as pf
import policy_tools as pt
import inspect
from copy import deepcopy
def bigplot(scens, res_df, shapes_df, name=None, _debug=False):
'''Makes three plots
Shapes, based on passed shapes_df (or will make one)
Cumulative spend, based on past results df
Annual diffs vs first scenario
'''
if _debug: print("\nIN FUNCTION: ".ljust(20), inspect.stack()[0][3])
if _debug: print("..called by: ".ljust(20), inspect.stack()[1][3], end="\n\n")
if shapes_df is None:
shapes_df = pt.make_shapes1(scens, flat=True, multi_index=True).sort_index(axis=1)
# MAKE A TABLE WITH PARAMETERS & SUMMARY
params_table1 = pt.make_params_table(scens).append(res_df.groupby(res_df.index.year).sum().iloc[:5,:])
params_table1
fig = plt.figure(figsize=(10,10), dpi=200)
legend = list(shapes_df.columns.levels[0])
max_y = shapes_df.max().max()*1.1*144
pad = 25
if _debug: print('columns to plot are'.ljust(pad), shapes_df.columns)
# get only lines we want
right_lines = [x for x in shapes_df.columns.levels[1] if '_init' not in x]
if _debug: print("right_lines".ljust(pad), right_lines)
# get the df sorted etc
sorted_df = shapes_df.sort_index(axis=1)
for i, line in enumerate(right_lines):
# this is the crucial operation which reorganises the df across scenarios
# eg grouping together EoL spendlines across baseline, option1, option2
# NB annualising here
if _debug: print("\n" + "+"*10 + "\nLINE is".ljust(pad), line)
if _debug: print("index is".ljust(pad), i)
sub_df = sorted_df.xs(line, level=1, axis=1) *144
if '_init' in line:
if _debug: print('exiting as contains init')
break
if _debug: print('sub_df'); print(sub_df.head(), "\n")
# make the plot
ax = plt.subplot2grid((3, 3),(0,i))
# ax = plt.subplot2grid((4, 4),(3,i), rowspan=0)
for j in sub_df.columns:
if _debug: print('\nnow in sub_df col'.ljust(pad), j)
# these are now double-annualised
if j == 'baseline': # treat the baseline separately
if _debug: print('plotting dfcol (base)'.ljust(pad), j)
if _debug: print('data'); print(sub_df[j].head())
ax.plot(sub_df.index/12, sub_df[j], color='black')
else:
if _debug: print('plotting dfcol (not base)'.ljust(pad), j)
if _debug: print('data'); print(sub_df[j].head())
ax.plot(sub_df.index/12, sub_df[j], alpha=0.75)
ax.set_title(line + " cohorts")
ax.set_xlabel('years post launch')
ax.set_ylim(0,max_y)
if i == 0:
ax.legend(legend)
# if i == 0: ax.legend([p for p in pols])
ax.set_ylabel('£m, annualised')
else: ax.yaxis.set_tick_params(label1On=False)
# SECOND ROW: cumulative spend
ax = plt.subplot2grid((3, 3),(1,0), colspan=2)
# ax = plt.subplot2grid((4, 4),(0,2), rowspan=2, colspan=2)
plot_cumspend_line(res_df, plot_pers=60, annualise=True, ax=ax, _debug=_debug) # annualise
ax.set_title('Annualised net spend on future launches')
ax.legend(legend)
ax.set_ylabel('£m, annualised')
# THIRD ROW: annual diffs
# get data grouped by scenario (aggregating over spendlines)
data = deepcopy(res_df.groupby(axis=1, level=0).sum())
ax = plt.subplot2grid((3, 3),(2,0), colspan=2)
# ax = plt.subplot2grid((4, 4),(2,2), rowspan=3, colspan=2)
plot_ann_diffs(data, ax=ax, net_spend=True, legend=legend[1:], table=True)
fig.subplots_adjust(hspace=0.6, wspace=0.3)
if name is not None:
fig.savefig('figs/' + name + '.png')
##_________________________________________________________________________##
def plot_cumspend_line(res_df, annualise=True, net_spend=False, plot_pers=None,
fig=None, ax=None, figsize=None, return_fig=False, save_path=None, _debug=False):
'''Plots a line graph of scenarios, summing across spendlines.
Input is a dataframe of results. Will be summed for scenarios (level 0 of col multi-index)
Can either generate a new plot, or add to existing axis (in which case pass ax)
Can either generate projections and index from the policy, or use existing if passed
Limit time interval by specifying plot_pers
'''
if _debug: print("\nIN FUNCTION: ".ljust(20), inspect.stack()[0][3])
if _debug: print("..called by: ".ljust(20), inspect.stack()[1][3], end="\n\n")
pad=20
# need to avoid actually changing res_df
ann_factor = 1
if annualise: ann_factor = 12
if plot_pers is None: plot_pers = len(res_df)
if _debug: print('plot pers'.ljust(pad), plot_pers)
ind = res_df.index.to_timestamp()[:plot_pers]
# sum for the scenarios - highest level of column multi-index
scen_lines = res_df.groupby(level=0, axis=1).sum().iloc[:plot_pers, :] * ann_factor
if _debug: print('scen_lines:\n', scen_lines.head())
# create fig and ax, unless passed (which they will be if plotting in existing grid)
if fig is None and ax is None:
fig, ax = plt.subplots(figsize=figsize)
for i, p in enumerate(scen_lines):
if i==0:
ax.plot(ind, scen_lines[p].values, color='black')
else:
ax.plot(ind, scen_lines[p].values, alpha=0.75)
for t in ax.get_xticklabels():
t.set_rotation(45)
ax.legend(scen_lines.columns)
ax.set_yticklabels(['{:,}'.format(int(x)) for x in ax.get_yticks().tolist()])
title_str = ""
if net_spend: title_str = " net"
ax.set_title("Accumulated{} spend".format(title_str))
if save_path is not None:
fig.savefig(save_path)
if _debug: print("\nLEAVING FUNCTION: ".ljust(20), inspect.stack()[0][3])
if _debug: print("..returning to: ".ljust(20), inspect.stack()[1][3], end="\n\n")
if return_fig: return(fig)
##_________________________________________________________________________##
def plot_ann_diffs(projs, max_yrs=5, fig=None, ax=None, figsize=None,
table=False, legend=None, net_spend=False, return_fig=False, save_path=None, _debug=False):
'''Plots a bar chart of annual data, subtracting the first column
Can either generate a new plot, or add to existing axis (in which case pass ax)
'''
if _debug: print("\nIN FUNCTION: ".ljust(20), inspect.stack()[0][3])
if _debug: print("..called by: ".ljust(20), inspect.stack()[1][3], end="\n\n")
diffs = projs.iloc[:,1:].subtract(projs.iloc[:,0], axis=0)
diffs = diffs.groupby(diffs.index.year).sum().iloc[:max_yrs,:]
ind = diffs.index
# set the name of the counterfactual
col_zero = projs.columns[0]
if isinstance(col_zero, tuple):
counterfactual_name = col_zero[0]
else: counterfactual_name = col_zero
# create fig and ax, unless passed (which they will be if plotting in existing grid)
if fig is None and ax is None:
fig, ax = plt.subplots(figsize=figsize)
num_rects = len(diffs.columns)
rect_width = 0.5
gap = 0.45
for i, x in enumerate(diffs):
rect = ax.bar(diffs.index + ((i/num_rects)*(1-gap)), diffs[x],
width=rect_width/num_rects)
title_str = ""
if net_spend: title_str = " net"
ax.set_title("Difference in{} annual spend vs ".format(title_str) + counterfactual_name +", £m")
ax.tick_params(axis='x', bottom='off')
ax.grid(False, axis='x')
# for t in ax.get_xticklabels():
# t.set_rotation(45)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.set_yticklabels(['{:,}'.format(int(x)) for x in ax.get_yticks().tolist()])
if legend is not None:
ax.legend(legend)
else:
ax.legend(diffs.columns)
if len(diffs.columns)>2: ax.legend(diffs.columns)
if table:
ax.set_xticks([])
rows = []
for x in diffs:
rows.append(["{:0,.0f}".format(y) for y in diffs[x]])
row_labs = None
if legend: row_labs = legend
else: row_labs = diffs.columns
c_labels = list(diffs.index)
tab = ax.table(cellText=rows, colLabels=c_labels, rowLabels= row_labs)
tab.set_fontsize(12)
tab.scale(1,2)
tab.auto_set_font_size
if save_path is not None:
fig.savefig(save_path)
if _debug: print("\nLEAVING FUNCTION: ".ljust(20), inspect.stack()[0][3])
if _debug: print("..returning to: ".ljust(20), inspect.stack()[1][3], end="\n\n")
if return_fig: return(fig)
##_________________________________________________________________________##
def plot_impact_grid3(policy, start_m, n_pers, projs=None, diffs=None, max_bar_yrs=5, plot_pers=None, net_spend=False,
save_path=None, plot_bar=True, return_fig=False,
table=False):
'''Plots a grid of charts.
Going to change this to use individual plotting functions
for each chart commonly needed, so can then choose whatever grid layout
'''
if projs is None: projs = project_policy(policy, start_m, n_pers, net_spend=net_spend)
ind = projs.index.to_timestamp()
if diffs is None: diffs = projs.iloc[:,1:].subtract(projs.iloc[:,0], axis=0)
# plot all shapes and cumulated projections
# for diffs, calc vs first columnb
annual_projs = projs.groupby(projs.index.year).sum()
annual_diffs = diffs.groupby(diffs.index.year).sum()
tab_rows = 2
if plot_bar:
tab_rows +=1
if table:
tab_rows +=1
fig = plt.figure(figsize=(12,tab_rows*5))
rcParams['axes.titlepad'] = 12
ax0 = plt.subplot2grid((tab_rows,2), (0, 0))
plot_shapes_line(policy, annualise=True, ax=ax0)
ax1 = plt.subplot2grid((tab_rows,2), (0, 1))
plot_cumspend_line(start_m=start_m, n_pers=n_pers, annualise=True, plot_pers=plot_pers, policy=policy, net_spend=net_spend, ax=ax1)
if plot_bar:
ax2 = plt.subplot2grid((tab_rows,2), (1, 0), colspan=2)
plot_diffs_ann_bar(start_m=start_m, n_pers=n_pers, ax=ax2, projs=projs, diffs=diffs,
table=True, max_yrs=max_bar_yrs, net_spend=net_spend)
# if table:
# tab = plt.subplot2grid((tab_rows,2), (2, 0), colspan=2)
# tab.set_frame_on(False)
# tab.set_xticks([])
# tab.set_yticks([])
# rowvals = ["{:0,.0f}".format(x) for x in annual_diffs.iloc[:,0].values]
# the_table = tab.table(cellText=[rowvals], rowLabels=['spend, £m'],
# loc='top')
# the_table.auto_set_font_size(False)
# the_table.set_fontsize(10)
# fig.text(0.13,0.8,'here is text')
fig.subplots_adjust(hspace=0.4, wspace=0.3)
if save_path is not None:
fig.savefig(save_path)
if return_fig:
return fig
##_________________________________________________________________________## | apache-2.0 | -6,667,129,267,280,047,000 | 34.629393 | 135 | 0.590351 | false | 3.237805 | false | false | false |
FedericoCeratto/firelet | tests/test_webapp.py | 1 | 15390 | # Firelet - Distributed firewall management.
# Copyright (C) 2010 Federico Ceratto
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from pytest import raises
from webtest import TestApp, AppError
import bottle
import logging
import pytest
from firelet import fireletd
from firelet.flcore import GitFireSet, DemoGitFireSet, Users
from firelet.flssh import MockSSHConnector
from firelet.mailer import Mailer
import firelet.flssh
log = logging.getLogger(__name__)
# TODO: fix skipped tests
skip = pytest.mark.skipif("True")
class Conf(object):
public_url = 'http://localhost'
stop_on_extra_interfaces = False
@pytest.fixture
def mailer(monkeypatch):
mailer = Mailer(
sender = '[email protected]',
recipients = '[email protected]',
smtp_server = 'bogus-email-server',
)
monkeypatch.setattr(mailer, 'send_msg', lambda *a, **kw: None)
return mailer
@pytest.fixture
def mock_ssh(monkeypatch):
# FIXME: broken
monkeypatch.setattr(firelet.flssh, 'SSHConnector', MockSSHConnector)
@pytest.fixture
def raw_app(repodir, mailer, mock_ssh):
"""Create app (without logging in)"""
bottle.debug(True)
app = TestApp(fireletd.app)
assert not app.cookies
fireletd.conf = Conf()
assert fireletd.conf
fireletd.users = Users(d=repodir)
fireletd.mailer = mailer
fireletd.fs = GitFireSet(repodir)
return app
@pytest.fixture
def webapp(raw_app):
"""Create app and log in"""
assert not raw_app.cookies
raw_app.post('/login', {'user': 'Ada', 'pwd': 'ada'})
assert raw_app.cookies.keys() == ['fireletd']
return raw_app
# Unauthenticated tests
def test_bogus_page(raw_app):
with raises(AppError):
raw_app.get('/bogus_page')
def test_index_page_unauth(raw_app):
out = raw_app.get('/')
assert out.status_code == 200
@skip
def test_login_unauth(raw_app):
out = raw_app.get('/login')
assert out.status_code == 200
def test_login_incorrect(raw_app):
assert not raw_app.cookies
out = raw_app.post('/login', {'user': 'bogus', 'pwd': 'bogus'})
assert not raw_app.cookies
def test_login_correct(raw_app):
assert not raw_app.cookies
raw_app.post('/login', {'user': 'Ada', 'pwd': 'ada'})
assert raw_app.cookies.keys() == ['fireletd']
def test_logout_unauth(raw_app):
out = raw_app.get('/logout')
assert out.status_code == 302 # redirect
# Authenticated tests
def test_index_page(webapp):
out = webapp.get('/')
assert out.status_code == 200
assert 'DOCTYPE' in out.text
assert 'body' in out.text
assert 'Distributed firewall management' in out
assert '</html>' in out
def test_logout(webapp):
assert webapp.cookies.keys() == ['fireletd']
webapp.get('/logout')
assert not webapp.cookies.keys()
def test_double_login(webapp):
# log in again
assert webapp.cookies.keys() == ['fireletd']
webapp.post('/login', {'user': 'Ada', 'pwd': 'ada'})
assert webapp.cookies.keys() == ['fireletd']
def test_messages(webapp):
out = webapp.get('/messages')
assert str(out.html) == ''
def test_ruleset(webapp):
out = webapp.get('/ruleset')
assert out.pyquery('table#items')
assert 'Ssh access from the test workstation' in out.text
rules = out.pyquery('table#items tr')
assert len(rules) == 11 # 10 rules plus header
def test_ruleset_post_delete(webapp):
out = webapp.post('/ruleset', dict(
action='delete',
rid=0,
))
assert out.json == {u'ok': True}
out = webapp.get('/ruleset')
rules = out.pyquery('table#items tr')
assert len(rules) == 10 # 9 rules plus header
def test_ruleset_post_moveup(webapp):
out = webapp.post('/ruleset', dict(
action='moveup',
rid=1,
))
assert out.json == {u'ok': True}
@skip
def test_ruleset_post_moveup_incorrect(webapp):
out = webapp.post('/ruleset', dict(
action='moveup',
rid=0,
))
assert out.json == {u'ok': True}
def test_ruleset_post_movedown(webapp):
out = webapp.post('/ruleset', dict(
action='movedown',
rid=1,
))
assert out.json == {u'ok': True}
#TODO: movedown error on last rule
def test_ruleset_post_disable(webapp):
out = webapp.post('/ruleset', dict(
action='disable',
rid=1,
))
assert out.json == {u'ok': True}
def test_ruleset_post_enable(webapp):
out = webapp.post('/ruleset', dict(
action='enable',
rid=1,
))
assert out.json == {u'ok': True}
@skip
def test_ruleset_post_save(webapp):
out = webapp.post('/ruleset', dict(
action='save',
rid=1,
name='newrule',
src='a',
src_serv='SSH',
dst='b',
dst_serv='SSH',
desc='New rule',
))
assert 0, out
assert out.json == {u'ok': True}
def test_ruleset_post_newabove(webapp):
out = webapp.get('/ruleset')
rules = out.pyquery('table#items tr')
assert len(rules) == 11 # 10 rules plus header
out = webapp.post('/ruleset', dict(
action='newabove',
rid=1,
))
#TODO: return an ack
out = webapp.get('/ruleset')
rules = out.pyquery('table#items tr')
assert len(rules) == 12
def test_ruleset_post_newbelow(webapp):
out = webapp.get('/ruleset')
rules = out.pyquery('table#items tr')
assert len(rules) == 11 # 10 rules plus header
out = webapp.post('/ruleset', dict(
action='newbelow',
rid=1,
))
#TODO: return an ack
out = webapp.get('/ruleset')
rules = out.pyquery('table#items tr')
assert len(rules) == 12
def test_ruleset_post_unknown_action(webapp):
with raises(Exception):
webapp.post('/ruleset', dict(action='bogus', rid=1))
def test_sib_names(webapp):
out = webapp.post('/sib_names')
out.json == {u'sib_names': [u'AllSystems', u'BorderFW:eth0', u'BorderFW:eth1', u'BorderFW:eth2', u'Clients', u'InternalFW:eth0', u'InternalFW:eth1', u'SSHnodes', u'Server001:eth0', u'Servers', u'Smeagol:eth0', u'Tester:eth1', u'WebServers']}
def test_hostgroups(webapp):
out = webapp.get('/hostgroups')
assert 'SSHnodes' in out
assert len(out.pyquery('table#items tr')) == 6
def test_hostgroups_post_save_new_hg(webapp):
out = webapp.get('/hostgroups')
assert len(out.pyquery('table#items tr')) == 6
out = webapp.post('/hostgroups', dict(
action = 'save',
childs = 'Border, Localhost',
rid = '',
))
out = webapp.get('/hostgroups')
assert len(out.pyquery('table#items tr')) == 7
def test_hostgroups_post_save_update(webapp):
# update existing hg
out = webapp.get('/hostgroups')
assert len(out.pyquery('table#items tr')) == 6
out = webapp.post('/hostgroups', dict(
action = 'save',
childs = 'Border, Localhost',
rid = '2',
))
out = webapp.get('/hostgroups')
assert len(out.pyquery('table#items tr')) == 6
def test_hostgroups_post_delete(webapp):
out = webapp.get('/hostgroups')
assert len(out.pyquery('table#items tr')) == 6
out = webapp.post('/hostgroups', dict(
action='delete',
rid=1,
))
out = webapp.get('/hostgroups')
assert len(out.pyquery('table#items tr')) == 5
def test_hostgroups_post_fetch(webapp):
out = webapp.post('/hostgroups', dict(
action='fetch',
rid=1,
))
assert out.json == {u'token': u'd74e8fce', u'childs': [u'Smeagol:eth0'], u'name': u'SSHnodes'}
def test_hostgroups_post_unknown_action(webapp):
with raises(Exception):
webapp.post('/hostgroups', dict(action='bogus', rid=''))
def test_hosts(webapp):
out = webapp.get('/hosts')
assert len(out.pyquery('table#items tr')) == 9
def test_hosts_post_delete(webapp):
out = webapp.get('/hosts')
assert len(out.pyquery('table#items tr')) == 9
out = webapp.post('/hosts', dict(
action='delete',
rid=1,
))
out = webapp.get('/hosts')
assert len(out.pyquery('table#items tr')) == 8
def test_hosts_post_save_new_host(webapp):
out = webapp.get('/hosts')
assert len(out.pyquery('table#items tr')) == 9
out = webapp.post('/hosts', dict(
action = 'save',
hostname = 'foo',
iface = 'eth0',
ip_addr = '1.2.3.4',
local_fw = '1',
masklen = '24',
mng = '1',
network_fw = '0',
rid = '',
routed = 'Internet',
))
assert out.json['ok'] == True
out = webapp.get('/hosts')
assert len(out.pyquery('table#items tr')) == 10
def test_hosts_post_save_update_host(webapp):
out = webapp.get('/hosts')
assert len(out.pyquery('table#items tr')) == 9
out = webapp.post('/hosts', dict(
action = 'save',
hostname = 'foo',
iface = 'eth0',
ip_addr = '1.2.3.4',
local_fw = '1',
masklen = '24',
mng = '1',
network_fw = '0',
rid = '2',
routed = 'Internet',
))
assert out.json['ok'] == True
out = webapp.get('/hosts')
assert len(out.pyquery('table#items tr')) == 9
def test_hosts_post_fetch(webapp):
out = webapp.post('/hosts', dict(
action='fetch',
rid=1,
))
assert out.json == {u'masklen': u'24', u'iface': u'eth1', u'ip_addr': u'10.66.2.1', u'hostname': u'InternalFW', u'routed': [], u'local_fw': 1, u'token': u'db9018c1', u'network_fw': 1, u'mng': 1}
def test_hosts_post_unknown_action(webapp):
with raises(Exception):
webapp.post('/hosts', dict(action='bogus', rid=''))
def test_net_names(webapp):
out = webapp.post('/net_names')
assert out.json == {u'net_names': [u'Internet', u'production_net', u'rivendell', u'shire']}
def test_networks(webapp):
out = webapp.get('/networks')
assert len(out.pyquery('table#items tr')) == 5
def test_networks_post_save_new_network(webapp):
out = webapp.get('/networks')
assert len(out.pyquery('table#items tr')) == 5
out = webapp.post('/networks', dict(
action = 'save',
name = 'foo',
ip_addr = '1.2.3.4',
masklen = '24',
rid = '',
))
assert out.json['ok'] == True
out = webapp.get('/networks')
assert len(out.pyquery('table#items tr')) == 6
out = webapp.post('/networks', dict(
action='fetch',
rid=4,
))
assert out.json['name'] == 'foo'
def test_networks_post_save_update_network(webapp):
out = webapp.get('/networks')
assert len(out.pyquery('table#items tr')) == 5
out = webapp.post('/networks', dict(
action = 'save',
name = 'foo',
ip_addr = '1.2.3.4',
masklen = '24',
rid = '2',
))
assert out.json['ok'] == True
out = webapp.get('/networks')
assert len(out.pyquery('table#items tr')) == 5
def test_networks_post_delete(webapp):
out = webapp.get('/networks')
assert len(out.pyquery('table#items tr')) == 5
out = webapp.post('/networks', dict(
action='delete',
rid=1,
))
out = webapp.get('/networks')
assert len(out.pyquery('table#items tr')) == 4
def test_networks_post_fetch(webapp):
out = webapp.post('/networks', dict(
action='fetch',
rid=1,
))
assert out.json == {u'masklen': 24, u'ip_addr': u'10.66.2.0', u'name': u'production_net', u'token': u'657ed9ec'}
def test_networks_post_unknown_action(webapp):
with raises(Exception):
webapp.post('/networks', dict(action='bogus', rid=''))
def test_services(webapp):
out = webapp.get('/services')
assert len(out.pyquery('table#items tr')) == 8
def test_services_post_save_new_network_tcp(webapp):
out = webapp.get('/services')
assert len(out.pyquery('table#items tr')) == 8
out = webapp.post('/services', dict(
action = 'save',
name = 'foo',
protocol = 'TCP',
ports = '80',
rid = '',
))
assert out.json['ok'] == True
out = webapp.get('/services')
assert len(out.pyquery('table#items tr')) == 9
out = webapp.post('/services', dict(
action='fetch',
rid=7,
))
assert out.json['name'] == 'foo'
def test_services_post_save_new_network_icmp(webapp):
out = webapp.post('/services', dict(
action = 'save',
name = 'foo',
protocol = 'ICMP',
icmp_type = '8',
rid = '',
))
assert out.json['ok'] == True
out = webapp.post('/services', dict(
action='fetch',
rid=7,
))
assert out.json['name'] == 'foo'
assert out.json['protocol'] == 'ICMP'
assert out.json['ports'] == '8'
def test_services_post_save_new_network_other_protocol(webapp):
out = webapp.post('/services', dict(
action = 'save',
name = 'foo',
protocol = 'AH',
rid = '',
))
assert out.json['ok'] == True
out = webapp.post('/services', dict(
action='fetch',
rid=7,
))
assert out.json['name'] == 'foo'
assert out.json['protocol'] == 'AH'
def test_services_post_save_update_network(webapp):
out = webapp.get('/services')
assert len(out.pyquery('table#items tr')) == 8
out = webapp.post('/services', dict(
action = 'save',
name = 'foo',
protocol = 'TCP',
ports = '80',
rid = '2',
))
assert out.json['ok'] == True
out = webapp.get('/services')
assert len(out.pyquery('table#items tr')) == 8
def test_services_post_delete(webapp):
out = webapp.get('/services')
assert len(out.pyquery('table#items tr')) == 8
out = webapp.post('/services', dict(
action='delete',
rid=1,
))
out = webapp.get('/services')
assert len(out.pyquery('table#items tr')) == 7
def test_services_post_fetch(webapp):
out = webapp.post('/services', dict(
action='fetch',
rid=1,
))
assert out.json == {u'token': u'89a7c78e', u'protocol': u'TCP', u'ports': u'80', u'name': u'HTTP'}
def test_services_post_unknown_action(webapp):
with raises(Exception):
webapp.post('/services', dict(action='bogus', rid=''))
def test_manage(webapp):
out = webapp.get('/manage')
assert len(out.pyquery('button')) == 3
def test_save_needed(webapp):
out = webapp.get('/save_needed')
assert out.json['sn'] == False
def test_save_post(webapp):
out = webapp.post('/save', dict(
msg='test',
))
assert out.json['ok'] == True
def test_reset_post(webapp):
out = webapp.post('/reset')
assert out.json['ok'] == True
@skip
def test_check_post(webapp):
out = webapp.post('/api/1/check')
assert out.json['ok'] == True
def test_rss(webapp):
out = webapp.get('/rss')
assert 'rss/deployments' in out
def test_rss_channel(webapp):
out = webapp.get('/rss/deployments')
assert 'http://localhost/rss/deployments' in out
assert 'rss' in out
| gpl-3.0 | 9,068,727,494,244,704,000 | 27.186813 | 245 | 0.60052 | false | 3.297622 | true | false | false |
boundary/meter-plugin-ticker | bootstrap.py | 2 | 2058 | #!/usr/bin/env python
import os
import shutil
import sys
import subprocess
import tarfile
import urllib
class Bootstrap:
def __init__(self,
version="12.0.4",
base='http://pypi.python.org/packages/source/v/virtualenv',
python="python2",
env="pyenv",
requirements="requirements.txt"):
self.version = version
self.base = base
self.python = python
self.env = env
self.dirname = 'virtualenv-' + self.version
self.tgz_file = self.dirname + '.tar.gz'
self.venv_url = self.base + '/' + self.tgz_file
self.requirements=requirements
def shellcmd(self,cmd,echo=False):
""" Run 'cmd' in the shell and return its standard out.
"""
if echo: print('[cmd] {0}'.format(cmd))
p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE)
out = p.communicate()[0]
if echo: print(out)
return out
def download(self):
""" Fetch virtualenv from PyPI
"""
urllib.urlretrieve(self.venv_url,self.tgz_file)
def extract(self):
""" Untar
"""
tar = tarfile.open(self.tgz_file,"r:gz")
tar.extractall()
def create(self):
""" Create the initial env
"""
self.shellcmd('{0} {1}/virtualenv.py {2}'.format(self.python,self.dirname,self.env))
def install(self):
"""Install the virtualenv package itself into the initial env
"""
self.shellcmd('{0}/bin/pip install {1}'.format(self.env,self.tgz_file))
def install_libs(self):
"""Install the virtualenv package itself into the initial env
"""
self.shellcmd('{0}/bin/pip install -r {1}'.format(self.env,self.requirements))
def cleanup(self):
""" Cleanup
"""
os.remove(self.tgz_file)
shutil.rmtree(self.dirname)
def setup(self):
"""Bootraps a python environment
"""
self.download()
self.extract()
self.create()
self.install()
self.cleanup()
if os.path.isfile(self.requirements):
self.install_libs()
if __name__ == "__main__":
bootstrap = Bootstrap()
bootstrap.setup()
| apache-2.0 | 6,730,018,688,644,087,000 | 24.725 | 88 | 0.620991 | false | 3.648936 | false | false | false |
parallel-fs-utils/multi-thread-posix | parallel-untar.py | 1 | 7771 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# parallel-untar.py - unpack tarball subdirectories in parallel
#
# copyright (c) 2015 Ben England, Red Hat, under Apache license
# see http://www.apache.org/licenses/LICENSE-2.0 for license details
import os
import errno
import tarfile
# we use multiprocessing module to create separate sub-processes and avoid
# the constraints of the python GIL
import multiprocessing
import sys
import time
debug = (os.getenv('DEBUG') is not None)
NOTOK = 1 # process failure exit status
def usage(msg):
print('ERROR: ' + msg)
print('usage: parallel-untar.py your-file.tar [ max-threads ]')
sys.exit(NOTOK)
fmt_dangling_link = \
'ERROR: %s is a link pointing to an absolute pathname that does not exist'
fmt_link2nonexistent = \
'%s is a link pointing to a relative non-existent file'
# parse command line inputs
thread_count = 4
start_time = time.time()
if len(sys.argv) > 2:
try:
thread_count = int(sys.argv[2])
except ValueError as e:
usage('could not parse thread count %s' % sys.argv[2])
elif len(sys.argv) < 2:
usage('must supply .tar file')
fn = sys.argv[1]
if fn == '--help' or fn == '-h':
usage('so you need help, we all knew that ;-)')
print('untarring file %s with up to %d parallel threads' % (fn, thread_count))
if not fn.endswith('.tar'):
usage('parallel-untar.py does not yet support compressed tar files' +
'uncompress first to .tar file then run it on that')
if not os.path.exists(fn):
usage('does not exist: %s' % fn)
# this class partitions directories in tar file amongst worker threads
# in a static way
# (thread k handles all directories with index d mod thread_count == k )
# so that no preprocessing is needed
class untarThread(multiprocessing.Process):
def __init__(
self, parent_conn_in, child_conn_in,
index_in, thread_count_in, archive_path_in):
# init base class
multiprocessing.Process.__init__(self)
# save thread inputs for run()
self.parent_conn = parent_conn_in
self.child_conn = child_conn_in
self.index = index_in
self.thread_count = thread_count_in
self.archive_path = archive_path_in
# counters for reporting
self.file_count = 0
self.dir_count = 0
self.dir_create_collisions = 0
def __str__(self):
return 'untarThread %d %s %s' % (self.index, self.archive_path)
def run(self):
my_dirs = {}
link_queue = []
archive = tarfile.open(name=self.archive_path)
archive.errorlevel = 2 # want to know if errors
count = self.thread_count - 1
for m in archive: # for each thing in the tarfile
if m.isdir(): # if a directory
stripped_name = m.name.strip(os.sep) # remove any trailing '/'
count += 1
if count >= thread_count:
count = 0
if count == self.index:
if debug:
print('thread %d recording on count %d dir %s' %
(self.index, count, stripped_name))
# value doesn't matter, my_dirs is just a set
my_dirs[stripped_name] = self.index
try:
archive.extract(m)
except OSError as e:
# race condition if > 1 thread
# creating a common parent directory,
# just back off different amounts
# so one of them succeeds.
if e.errno == errno.EEXIST:
time.sleep(0.1 * self.index)
self.dir_create_collisions += 1
archive.extract(m)
else:
raise e
if debug:
print('%d got dir %s' % (self.index, m.name))
self.dir_count += 1
else:
# if not a directory
dirname = os.path.dirname(m.name)
# ASSUMPTION: directory object is always read from tarfile
# before its contents
if dirname in my_dirs:
if m.islnk() or m.issym():
print('link %s -> %s' % (m.name, m.linkname))
if not os.path.exists(m.linkname):
if m.linkname.startswith(os.sep):
if debug:
print(fmt_dangling_link % m.linkname)
else:
# BUT DO IT ANYWAY, that's what tar xf does!
# FIXME: how do we know if link target is a
# file within the untarred directory tree?
# Only postpone link creation for these.
if debug:
print(fmt_link2nonexistent % m.linkname)
link_queue.append(m)
continue
try:
archive.extract(m) # not a link or dir at this point
except OSError as e:
if not (e.errno == errno.EEXIST and m.issym()):
raise e
if debug:
print('%d got file %s' % (self.index, m.name))
self.file_count += 1
# we postpone links to non-existent files in case other threads
# need to create target files
# these links are created after
# all other subprocesses have finished directories and files
# to ensure that this succeeds.
self.child_conn.send('y')
# block until all subprocesses finished above loop
self.child_conn.recv()
# now it should be safe to create softlinks that point within this tree
for m in link_queue:
try:
archive.extract(m)
except OSError as e:
if not (e.errno == errno.EEXIST and m.issym()):
raise e
if debug:
print('%d got file %s' % (self.index, m.name))
self.file_count += 1
archive.close()
self.child_conn.send((self.file_count, self.dir_count,
self.dir_create_collisions))
# create & start worker threads, wait for them to finish
worker_pool = []
for n in range(0, thread_count):
(parent_conn, child_conn) = multiprocessing.Pipe()
t = untarThread(parent_conn, child_conn, n, thread_count, fn)
worker_pool.append(t)
t.daemon = True
t.start()
if debug:
print('thread pool: ' + str(worker_pool))
# implement barrier for softlink creation within the tree
for t in worker_pool:
assert t.parent_conn.recv() == 'y'
for t in worker_pool:
t.parent_conn.send('y')
elapsed_time = time.time() - start_time
print('reached softlink barrier at %7.2f sec' % elapsed_time)
total_files = 0
total_dirs = 0
for t in worker_pool:
(w_file_count, w_dir_count, w_dir_create_collisions) = \
t.parent_conn.recv()
t.join()
print('thread %d file-count %d dir-count %d create-collisions %d' %
(t.index, w_file_count, w_dir_count, w_dir_create_collisions))
total_files += w_file_count
total_dirs += w_dir_count
elapsed_time = time.time() - start_time
print('all threads completed at %7.2f sec' % elapsed_time)
fps = total_files / elapsed_time
print('files per sec = %9.2f' % fps)
dps = total_dirs / elapsed_time
print('directories per sec = %8.2f' % dps)
| agpl-3.0 | 1,703,652,004,032,822,500 | 34.004505 | 79 | 0.542015 | false | 4.032693 | false | false | false |
DeathSea/DBSConcepts_FP | tran.py | 1 | 6301 | # -*- coding: utf-8 -*-
# transle bilibili's av json file into database
import json
import mysql.connector
import ntpath
import urllib2,time
from tt import GetVideoInfo
config = {
'user': 'root',
'password': '',
'host': 'localhost',
'database': 'dbfp',
'raise_on_warnings': True,
}
#con = mysql.connector.connect(**config)
#cur = con.cursor()
#UPDATE `dbfp`.`dbfp_av_info` SET `create_stamp` = FROM_UNIXTIME('1246000296') WHERE `dbfp_av_info`.`id` = 1;
add_av_info_req = ("INSERT INTO `dbfp_av_info` "
"(`id`, `av`, `title`, `up_id`, `create_stamp`, `create_at`, `play_times`, `collect_times`, `dan_count`, `review_times`, `coins_count`)"
"VALUES(NULL, %s, %s, %s, FROM_UNIXTIME(%s), %s, %s, %s, %s, %s, %s);")
add_up_info_req = ("INSERT INTO `dbfp_up_info`"
"(`uid`, `name`, `lvl`, `sign`, `birth`, `reg_date`, `article`, `follow_count`, `fans_count`)"
"VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s);")
av_dir = "D:\\PProject\\bilibili\\allavinfo"
av_dir2 = "D:\\PProject\\bilibili\\allavinfo2-1"
av_dir3 = "D:\\PProject\\bilibili\\allavinfo2"
av_dir4 = 'D:\\PProject\\bilibili\\allavinfo3'
av_dir5 = 'D:\\PProject\\bilibili\\allavinfo4'
user_dir = "D:\\PProject\\bilibili\\alluserinfo"
def getURLContent(url):
while 1:
try:
headers = {'User-Agent':'Mozilla/5.0 (iPad; CPU OS 4_3_5 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8L1 Safari/6533.18.5',
#'Cookie':'pgv_pvi=9629054976; pgv_si=s7276307456; sid=a84gv3d7; fts=1438695578; LIVE_BUVID=a30f235e687b15cddec7073e271b78dc; LIVE_BUVID__ckMd5=1aff9d63faeeb5dd; PLHistory=bVm2%7Co2}GW; IESESSION=alive; DedeUserID=2754937; DedeUserID__ckMd5=62d03cc207ac353c; SESSDATA=8d08bf28%2C1442638544%2C030d0e52; LIVE_LOGIN_DATA=4f48da9e73ffdd64590fc2812487cb4fb2d8d70f; LIVE_LOGIN_DATA__ckMd5=8840c45c091b3590; _cnt_dyn=0; _cnt_pm=0; _cnt_notify=21; uTZ=-480; DedeID=2837864; _dfcaptcha=55b092b61a3f77ba89cde89af6ed7f90; CNZZDATA2724999=cnzz_eid%3D1895263873-1425444951-%26ntime%3D1442045119'
}
req = urllib2.Request(url = url,headers = headers);
content = urllib2.urlopen(req,timeout = 10).read();
except:
print 'connect error...'
time.sleep(20)
continue
break
return content;
def GetuserInfo(id):
url = "http://space.bilibili.com/%d#!/index"%(id)
con = getURLContent(url)
s = con.find("_bili_space_info")
e = con.find("var _bili_is_")
#print s,e
resu = con[s:e].replace("_bili_space_info = eval(",'').replace(');',"")
return resu
def create_av_info(file_name,di):
if type(file_name) == type(''):
av = int(file_name)
elif type(file_name) == type(2):
av = file_name
else:
return -1
#di = json.loads(string)
# av,title, up_id,create_stamp ,created_at, play_times,collect_times, dan_count, review_times,coins_count
# %d, %s, %d, FROM_UNIXTIME(%d), %s, %d, %d, %d, %d, %d)
if di['author'] == None:
id = 0
else:
id = int(di['mid'])
result_tuple = (int(av),di['title'],id,di['created'],di['created_at']+':00',int(di['play']),int(di['favorites']),int(di['video_review']),int(di['review']),int(di['coins']))
#print result_tuple
return result_tuple
def create_user_info(di):
if type(di) != type({}):
return ()
#di = json.loads(st)
# uid, name, lvl,
# sign, birth,reg_date,
# article, follow_count, fans_count,
#VALUES(%d, %s, %d, %s, %s, %s, %d, %d, %d)
result_tuple = (int(di['mid']),di['name'],di['level_info']["current_level"],
di["sign"] and di["sign"] or "NULL",di['birthday'],di["regtime"],
di["article"],di["attention"],di["fans"],
)
return result_tuple
def read_av_info(id):
if type(id) != type(0):
id = int(id)
if 0<=id<=174999:
this_dir = av_dir
elif 175000<=id<=290998:
this_dir = av_dir2
elif 290999<=id<=469999:
this_dir = av_dir3
elif 470000<=id<=539999:
this_dir = av_dir4
else:
this_dir = av_dir5
FILE = this_dir+'\\'+str(id)+'.json'
FILE_EXIST = ntpath.exists(FILE)
if FILE_EXIST:
f = open(FILE,'r')
jsoncon = f.readline()
f.close()
else:
return 404
di = json.loads(jsoncon)
if di.has_key('code') and di['code'] == -403:
return 404
elif di.has_key('code') and di['code'] == -503:
raise NameError, str(id)
con = GetVideoInfo(id)
with open(FILE) as f:
print >> f,con
di = json.loads(jsoncon)
return di
def read_user_info(id):
if type(id) != type(""):
id = str(id)
FILE = user_dir+'\\'+id+'.json'
FILE_EXIST = ntpath.exists(FILE)
if FILE_EXIST:
f = open(FILE,'r')
jsoncon = f.readline()
f.close()
else:
jsoncon = GetuserInfo(int(id))
return json.loads(jsoncon)
def does_user_info_exist(mysql_conn,id):
flag = False
QUERY_STR = ("select uid from dbfp_up_info"
" where uid=%s")
cur = mysql_conn.cursor()
cur.execute(QUERY_STR,(id,))
for x in cur:
flag = True
cur.close()
return flag
def insert_up_info(mysql_conn,tup):
this_cursor = mysql_conn.cursor()
this_cursor.execute(add_up_info_req,tup)
mysql_conn.commit()
this_cursor.close()
def insert_av_info(mysql_conn,tup):
uid = tup[2]
if uid != 0 and not does_user_info_exist(mysql_conn,uid):
updic = read_user_info(uid)
up_tup = create_user_info(updic)
insert_up_info(mysql_conn,up_tup)
this_cursor = mysql_conn.cursor()
this_cursor.execute(add_av_info_req,tup)
mysql_conn.commit()
this_cursor.close()
if __name__ == '__main__':
con = mysql.connector.connect(**config)
#userdic = read_user_info(1)
for i in range(9978,10000):
avdic = read_av_info(i)
if avdic != 404:
avtup = create_av_info(i,avdic)
insert_av_info(con,avtup)
print i,' insert complete'
else:
print i,' is unavil or 404'
con.close()
| gpl-2.0 | 330,896,073,926,808,060 | 35.847953 | 594 | 0.575147 | false | 2.819239 | false | false | false |
goddardl/gaffer | python/GafferUI/GraphBookmarksUI.py | 1 | 5255 | ##########################################################################
#
# Copyright (c) 2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import functools
import Gaffer
##########################################################################
# Public methods
##########################################################################
def appendNodeContextMenuDefinitions( nodeGraph, node, menuDefinition ) :
if len( menuDefinition.items() ) :
menuDefinition.append( "/GraphBookmarksDivider", { "divider" : True } )
menuDefinition.append(
"/Bookmarked",
{
"checkBox" : __getBookmarked( node ),
"command" : functools.partial( __setBookmarked, node ),
"active" : node.ancestor( Gaffer.Reference ) is None,
}
)
def appendPlugContextMenuDefinitions( nodeGraph, plug, menuDefinition ) :
parent = nodeGraph.graphGadget().getRoot()
dividerAdded = False
for bookmark in __bookmarks( parent ) :
nodeGadget = nodeGraph.graphGadget().nodeGadget( bookmark )
if nodeGadget is None :
continue
compatibleConnections = []
for nodule in __nodules( nodeGadget ) :
inPlug, outPlug = __connection( plug, nodule.plug() )
if inPlug is not None :
compatibleConnections.append( ( inPlug, outPlug ) )
if not compatibleConnections :
continue
if not dividerAdded :
if len( menuDefinition.items() ) :
menuDefinition.append( "/BookmarksDivider", { "divider" : True } )
dividerAdded = True
for inPlug, outPlug in compatibleConnections :
label = bookmark.getName()
if len( compatibleConnections ) > 1 :
bookmarkPlug = outPlug if inPlug.isSame( plug ) else inPlug
label += "/" + bookmarkPlug.relativeName( bookmark )
menuDefinition.append(
"/Connect Bookmark/" + label,
{
"command" : functools.partial( __connect, inPlug, outPlug ),
"active" : not outPlug.isSame( inPlug.getInput() )
}
)
##########################################################################
# Internal implementation
##########################################################################
def __getBookmarked( node ) :
return Gaffer.Metadata.nodeValue( node, "graphBookmarks:bookmarked" ) or False
def __setBookmarked( node, bookmarked ) :
with Gaffer.UndoContext( node.scriptNode() ) :
Gaffer.Metadata.registerNodeValue( node, "graphBookmarks:bookmarked", bookmarked )
def __bookmarks( parent ) :
return [ n for n in parent.children( Gaffer.Node ) if __getBookmarked( n ) ]
## \todo Perhaps this functionality should be provided by the
# GraphGadget or NodeGadget class?
def __nodules( nodeGadget ) :
result = []
def walk( graphComponent ) :
if isinstance( graphComponent, Gaffer.Plug ) :
nodule = nodeGadget.nodule( graphComponent )
if nodule is not None :
result.append( nodule )
for c in graphComponent.children( Gaffer.Plug ) :
walk( c )
walk( nodeGadget.node() )
return result
## \todo This is similar to the private
# StandardNodule::connection() method. Perhaps we
# should find a single sensible place to put it?
# Maybe on the GraphGadget class? Or in a new
# PlugAlgo.h file?
def __connection( plug1, plug2 ) :
if plug1.node().isSame( plug2.node() ) :
return None, None
if plug1.direction() == plug2.direction() :
return None, None
if plug1.direction() == plug1.Direction.In :
inPlug, outPlug = plug1, plug2
else :
inPlug, outPlug = plug2, plug1
if inPlug.acceptsInput( outPlug ) :
return inPlug, outPlug
return None, None
def __connect( inPlug, outPlug ) :
with Gaffer.UndoContext( inPlug.ancestor( Gaffer.ScriptNode ) ) :
inPlug.setInput( outPlug ) | bsd-3-clause | 2,393,390,104,610,386,400 | 32.477707 | 84 | 0.653663 | false | 3.930441 | false | false | false |
saullocastro/pyNastran | pyNastran/bdf/dev_vectorized/cards/loads/ploadx1.py | 3 | 3998 | from six.moves import zip
from numpy import arange, zeros, unique
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.field_writer_16 import print_card_16
from pyNastran.bdf.bdf_interface.assign_type import (integer,
double, double_or_blank)
class PLOADX1(object):
type = 'PLOADX1'
def __init__(self, model):
"""
Defines the PLOADX1 object.
Parameters
----------
model : BDF
the BDF object
"""
self.model = model
self.n = 0
self._cards = []
self._comments = []
def __getitem__(self, i):
unique_lid = unique(self.load_id)
if len(i):
f = PLOADX1(self.model)
f.load_id = self.load_id[i]
f.element_id = self.element_id[i, :]
f.p = self.p[i]
f.node_ids = self.node_ids[i, :]
f.theta = self.theta[i]
f.n = len(i)
return f
raise RuntimeError('len(i) = 0')
def __mul__(self, value):
f = PLOADX1(self.model)
f.load_id = self.load_id
f.element_id = self.element_id
f.p = self.p * value
f.node_ids = self.node_ids
f.theta = self.theta
f.n = self.n
return f
def __rmul__(self, value):
return self.__mul__(value)
def add_card(self, card, comment=''):
self._cards.append(card)
self._comments.append(comment)
def build(self):
cards = self._cards
ncards = len(cards)
self.n = ncards
if ncards:
float_fmt = self.model.float_fmt
#: Property ID
self.load_id = zeros(ncards, 'int32')
#: Element ID
self.element_id = zeros(ncards, 'int32')
# Surface traction at grid point GA. (Real)
self.p = zeros((ncards, 2), float_fmt)
#: Corner grid points. GA and GB are any two adjacent corner grid points of the
#: element. (Integer > 0)
self.node_ids = zeros((ncards, 2), 'int32')
#: Angle between surface traction and inward normal to the line segment.
#: (Real Default = 0.0)
self.theta = zeros(ncards, float_fmt)
for i, card in enumerate(cards):
self.load_id[i] = integer(card, 1, 'load_id')
self.element_id[i] = integer(card, 2, 'element_id')
pa = double(card, 3, 'pa')
pb = double_or_blank(card, 4, 'pb', pa)
self.p[i, :] = [pa, pb]
self.node_ids[i, :] = [integer(card, 5, 'ga'),
integer(card, 6, 'gb')]
self.theta[i] = double_or_blank(card, 7, 'theta', 0.)
assert len(card) <= 8, 'len(PLOADX1 card) = %i\ncard=%s' % (len(card), card)
i = self.load_id.argsort()
self.load_id = self.load_id[i]
self.element_id = self.element_id[i]
self.node_ids = self.node_ids[i, :]
self.p = self.p[i, :]
self.theta = self.theta[i]
self._cards = []
self._comments = []
def get_stats(self):
msg = []
if self.n:
msg.append(' %-8s: %i' % ('PLOADX1', self.n))
return msg
def get_index(self, load_ids=None):
#if load_ids:
i = arange(self.n)
#else:
# i = searchsorted(load_ids, self.load_id)
return i
def write_card(self, bdf_file, size=8, is_double=False, load_id=None):
if self.n:
i = self.get_index(load_ids)
for (lid, eid, p, n, theta) in zip(self.load_id[i],
self.element_id[i], self.p[i], self.node_ids[i], self.theta[i]):
card = ['PLOADX1', lid, eid, p[0], p[1], n[0], n[1], theta]
if size == 8:
bdf_file.write(print_card_8(card))
else:
bdf_file.write(print_card_16(card))
| lgpl-3.0 | -1,486,337,236,081,626,000 | 32.041322 | 92 | 0.494747 | false | 3.359664 | false | false | false |
Skufler/pybot | main.py | 1 | 1445 | import telebot
from telebot import types
import timetable as table
import weather
token = 'your_token'
bot = telebot.TeleBot(token)
@bot.message_handler(commands=['functions'])
def functions_handler(message):
bot.send_message(message.chat.id, '1 - Распсание \n'
'2 - Погода \n')
@bot.message_handler(commands=['help'])
def show_info():
pass
@bot.message_handler(commands=['start'])
def show_start_info():
pass
@bot.message_handler(content_types=['text'])
def main_activity(message):
try:
if message.text == 'Погода':
current_weather = bot.send_message(message.chat.id, 'Введите город')
bot.register_next_step_handler(current_weather, weather.weather_handler)
if message.text == 'Расписание':
keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)
keyboard.add('Понедельник', 'Вторник', 'Среда', 'Четверг', 'Пятница')
keyboard.one_time_keyboard = True
timetable = bot.reply_to(message, 'Укажите день недели', reply_markup=keyboard)
bot.register_next_step_handler(timetable, table.timetable_handler)
except Exception as e:
bot.send_message(message.chat.id, e.args)
if __name__ == '__main__':
bot.polling(none_stop=True)
| gpl-3.0 | 1,582,778,984,586,428,000 | 28.636364 | 91 | 0.626855 | false | 3.056689 | false | false | false |
LynnHo/AttGAN-Tensorflow | scripts/align.py | 1 | 5555 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from functools import partial
from multiprocessing import Pool
import os
import re
import cropper
import numpy as np
import tqdm
# ==============================================================================
# = param =
# ==============================================================================
parser = argparse.ArgumentParser()
# main
parser.add_argument('--img_dir', dest='img_dir', default='./data/img_celeba/img_celeba')
parser.add_argument('--save_dir', dest='save_dir', default='./data/img_celeba/aligned')
parser.add_argument('--landmark_file', dest='landmark_file', default='./data/img_celeba/landmark.txt')
parser.add_argument('--standard_landmark_file', dest='standard_landmark_file', default='./data/img_celeba/standard_landmark_68pts.txt')
parser.add_argument('--crop_size_h', dest='crop_size_h', type=int, default=572)
parser.add_argument('--crop_size_w', dest='crop_size_w', type=int, default=572)
parser.add_argument('--move_h', dest='move_h', type=float, default=0.25)
parser.add_argument('--move_w', dest='move_w', type=float, default=0.)
parser.add_argument('--save_format', dest='save_format', choices=['jpg', 'png'], default='jpg')
parser.add_argument('--n_worker', dest='n_worker', type=int, default=8)
# others
parser.add_argument('--face_factor', dest='face_factor', type=float, help='The factor of face area relative to the output image.', default=0.45)
parser.add_argument('--align_type', dest='align_type', choices=['affine', 'similarity'], default='similarity')
parser.add_argument('--order', dest='order', type=int, choices=[0, 1, 2, 3, 4, 5], help='The order of interpolation.', default=3)
parser.add_argument('--mode', dest='mode', choices=['constant', 'edge', 'symmetric', 'reflect', 'wrap'], default='edge')
args = parser.parse_args()
# ==============================================================================
# = opencv first =
# ==============================================================================
_DEAFAULT_JPG_QUALITY = 95
try:
import cv2
imread = cv2.imread
imwrite = partial(cv2.imwrite, params=[int(cv2.IMWRITE_JPEG_QUALITY), _DEAFAULT_JPG_QUALITY])
align_crop = cropper.align_crop_opencv
print('Use OpenCV')
except:
import skimage.io as io
imread = io.imread
imwrite = partial(io.imsave, quality=_DEAFAULT_JPG_QUALITY)
align_crop = cropper.align_crop_skimage
print('Importing OpenCv fails. Use scikit-image')
# ==============================================================================
# = run =
# ==============================================================================
# count landmarks
with open(args.landmark_file) as f:
line = f.readline()
n_landmark = len(re.split('[ ]+', line)[1:]) // 2
# read data
img_names = np.genfromtxt(args.landmark_file, dtype=np.str, usecols=0)
landmarks = np.genfromtxt(args.landmark_file, dtype=np.float, usecols=range(1, n_landmark * 2 + 1)).reshape(-1, n_landmark, 2)
standard_landmark = np.genfromtxt(args.standard_landmark_file, dtype=np.float).reshape(n_landmark, 2)
standard_landmark[:, 0] += args.move_w
standard_landmark[:, 1] += args.move_h
# data dir
save_dir = os.path.join(args.save_dir, 'align_size(%d,%d)_move(%.3f,%.3f)_face_factor(%.3f)_%s' % (args.crop_size_h, args.crop_size_w, args.move_h, args.move_w, args.face_factor, args.save_format))
data_dir = os.path.join(save_dir, 'data')
if not os.path.isdir(data_dir):
os.makedirs(data_dir)
def work(i): # a single work
for _ in range(3): # try three times
try:
img = imread(os.path.join(args.img_dir, img_names[i]))
img_crop, tformed_landmarks = align_crop(img,
landmarks[i],
standard_landmark,
crop_size=(args.crop_size_h, args.crop_size_w),
face_factor=args.face_factor,
align_type=args.align_type,
order=args.order,
mode=args.mode)
name = os.path.splitext(img_names[i])[0] + '.' + args.save_format
path = os.path.join(data_dir, name)
if not os.path.isdir(os.path.split(path)[0]):
os.makedirs(os.path.split(path)[0])
imwrite(path, img_crop)
tformed_landmarks.shape = -1
name_landmark_str = ('%s' + ' %.1f' * n_landmark * 2) % ((name, ) + tuple(tformed_landmarks))
succeed = True
break
except:
succeed = False
if succeed:
return name_landmark_str
else:
print('%s fails!' % img_names[i])
if __name__ == '__main__':
pool = Pool(args.n_worker)
name_landmark_strs = list(tqdm.tqdm(pool.imap(work, range(len(img_names))), total=len(img_names)))
pool.close()
pool.join()
landmarks_path = os.path.join(save_dir, 'landmark.txt')
with open(landmarks_path, 'w') as f:
for name_landmark_str in name_landmark_strs:
if name_landmark_str:
f.write(name_landmark_str + '\n')
| mit | 4,514,313,698,126,492,000 | 44.162602 | 197 | 0.534833 | false | 3.649803 | false | false | false |
sciCloud/OLiMS | lims/content/instrumentvalidation.py | 2 | 3985 | from dependencies.dependency import ClassSecurityInfo
from dependencies.dependency import schemata
from dependencies.dependency import HoldingReference
from dependencies import atapi
from dependencies.dependency import *
from dependencies.dependency import getToolByName
from lims import bikaMessageFactory as _
from lims.browser.widgets import DateTimeWidget, ReferenceWidget
from lims.config import PROJECTNAME
from lims.content.bikaschema import BikaSchema
schema = BikaSchema.copy() + Schema((
DateTimeField('DateIssued',
with_time = 1,
with_date = 1,
widget = DateTimeWidget(
label=_("Report Date"),
description=_("Validation report date"),
),
),
DateTimeField('DownFrom',
with_time = 1,
with_date = 1,
widget = DateTimeWidget(
label=_("From"),
description=_("Date from which the instrument is under validation"),
),
),
DateTimeField('DownTo',
with_time = 1,
with_date = 1,
widget = DateTimeWidget(
label=_("To"),
description=_("Date until the instrument will not be available"),
),
),
StringField('Validator',
widget = StringWidget(
label=_("Validator"),
description=_("The analyst responsible of the validation"),
)
),
TextField('Considerations',
default_content_type = 'text/plain',
allowed_content_types= ('text/plain', ),
default_output_type="text/plain",
widget = TextAreaWidget(
label=_("Considerations"),
description=_("Remarks to take into account before validation"),
),
),
TextField('WorkPerformed',
default_content_type = 'text/plain',
allowed_content_types= ('text/plain', ),
default_output_type="text/plain",
widget = TextAreaWidget(
label=_("Work Performed"),
description=_("Description of the actions made during the validation"),
),
),
ReferenceField('Worker',
vocabulary='getLabContacts',
allowed_types=('LabContact',),
relationship='LabContactInstrumentValidation',
widget=ReferenceWidget(
checkbox_bound=0,
label=_("Performed by"),
description=_("The person at the supplier who performed the task"),
size=30,
base_query={'inactive_state': 'active'},
showOn=True,
colModel=[{'columnName': 'UID', 'hidden': True},
{'columnName': 'JobTitle', 'width': '20', 'label': _('Job Title')},
{'columnName': 'Title', 'width': '80', 'label': _('Name')}
],
),
),
StringField('ReportID',
widget = StringWidget(
label=_("Report ID"),
description=_("Report identification number"),
)
),
TextField('Remarks',
default_content_type = 'text/plain',
allowed_content_types= ('text/plain', ),
default_output_type="text/plain",
widget = TextAreaWidget(
label=_("Remarks"),
),
),
))
schema['title'].widget.label = 'Asset Number'
class InstrumentValidation(BaseFolder):
security = ClassSecurityInfo()
schema = schema
displayContentsTab = False
_at_rename_after_creation = True
def _renameAfterCreation(self, check_auto_id=False):
from lims.idserver import renameAfterCreation
renameAfterCreation(self)
def getLabContacts(self):
bsc = getToolByName(self, 'bika_setup_catalog')
# fallback - all Lab Contacts
pairs = []
for contact in bsc(portal_type='LabContact',
inactive_state='active',
sort_on='sortable_title'):
pairs.append((contact.UID, contact.Title))
return DisplayList(pairs)
atapi.registerType(InstrumentValidation, PROJECTNAME)
| agpl-3.0 | 8,217,211,415,575,605,000 | 30.626984 | 89 | 0.590966 | false | 4.633721 | false | false | false |
petergtz/vcf-compare | run-tests.py | 1 | 3276 | #!/usr/bin/python
# Copyright 2013 Peter Goetz
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from os import listdir, makedirs, getcwd, chdir, remove, path
import os.path
from sys import argv
from subprocess import call
from shutil import rmtree
TEST_CASES_DIR = "test-cases"
DIFF_VIEWER = argv[1] if len(argv) > 1 else "meld"
SCRIPT_NAME = "vcf-compare"
def main():
assert os.path.exists(TEST_CASES_DIR)
num_errors = 0
num_tests = 0
for test_case in listdir(TEST_CASES_DIR):
num_tests += 1
success = run(test_case)
if not success: num_errors += 1
print "\n" + str(num_tests) + " Tests completed. ERRORS:", num_errors
def run(test_case):
print ".",
test_run_data_dir = path.join("test-run-data", test_case)
ensure_dir_exists_and_is_empty(test_run_data_dir)
success = True
with InDirectory(test_run_data_dir):
with open("actual-output", "w") as actual_output_file, \
open("stderr", "w") as stderr_file:
rc = call([path.join(getcwd(), "..", "..", SCRIPT_NAME),
path.join("..", "..", TEST_CASES_DIR, test_case, "a.vcf"),
path.join("..", "..", TEST_CASES_DIR, test_case, "b.vcf")],
stdout= actual_output_file,
stderr=stderr_file)
if rc != 0:
print error(test_case, "script returned error. RC = " + str(rc))
success = False
else:
actual_output_filename = "actual-output"
expected_output_filename = path.join("..", "..", TEST_CASES_DIR, test_case, "expected-output")
with open(actual_output_filename) as actual_output_file, \
open(expected_output_filename) as expected_output_file:
if actual_output_file.read() != expected_output_file.read():
success = False
print error(test_case, "Files differ. Running diff\n")
call([DIFF_VIEWER, actual_output_filename, expected_output_filename])
print "\nEnd of Diff\n"
if success: rmtree(test_run_data_dir)
return success
def ensure_dir_exists_and_is_empty(path):
if os.path.exists(path): rmtree(path)
makedirs(path)
def error(test_case, message):
return "\nIn " + test_case + ": " + message
class InDirectory:
def __init__(self, new_path):
self.new_path = new_path
def __enter__(self):
self.saved_path = os.getcwd()
os.chdir(self.new_path)
def __exit__(self, etype, value, traceback):
os.chdir(self.saved_path)
if __name__ == "__main__":
main()
| gpl-2.0 | -6,323,314,131,631,169,000 | 34.608696 | 106 | 0.607448 | false | 3.668533 | true | false | false |
srio/dabam | code/simple_physicaloptics.py | 1 | 6563 | """
simple_physicaloptics
performs optics calculations using physical optics
inputs: reads the heights profiles from tmpHeights.dat
file produced by dabam.py with detrending, e.g.:
python3 dabam.py 4
output: some plots
"""
__author__ = "Manuel Sanchez del Rio"
__contact__ = "[email protected]"
__copyright = "ESRF, 2015"
import numpy
from matplotlib import pylab as plt
#lensF is in fact 2*F
def goFromTo(source, image, distance=1.0, lensF=None, slopeError=None, wavelength=1e-10):
distance = numpy.array(distance)
x1 = numpy.outer(source,numpy.ones(image.size))
x2 = numpy.outer(numpy.ones(source.size),image)
r = numpy.sqrt( numpy.power(x1-x2,2) + numpy.power(distance,2) )
# add lens at the image plane
if lensF != None:
r = r - numpy.power(x1-x2,2)/lensF
if slopeError != None:
r = r + 2 * slopeError
wavenumber = numpy.pi*2/wavelength
return numpy.exp(1.j * wavenumber * r)
def main():
#
# y axis is horizontal
# z axis is vertical
#
#
#define focal distances
#
p = 30.0
q = 10.0
theta_grazing = 3e-3
#
#compute mirror radius
#
R = 2 / numpy.sin(theta_grazing) / (1/p + 1/q)
F = 1 / (1/p + 1/q)
print("Mirror radius of curvature set to: %.3f m (p=%.3f m, q=%.3f m, theta=%.2f mrad))"%(R,p,q,theta_grazing*1e3))
print("Mirror focal length set to: %.3f m "%(F))
#
#load height profile
#
input_file = "tmpHeights.dat"
a = numpy.loadtxt(input_file)
hy0 = a[:,0]
hz0 = a[:,1]
#
#interpolate to increase the number of points ans statistics
#
do_interpolate = 0
if do_interpolate:
mirror_length = (hy0.max() - hy0.min())
npoints = 500 # hy0.size
hy = numpy.linspace(-0.5*mirror_length,0.5*mirror_length,npoints)
hz = numpy.interp(hy,hy0,hz0)
else:
hy = hy0
hz = hz0
#remove mean
hz -= hz.mean()
L = hy[-1]-hy[0]
print("Mirror data from file: %s :"%input_file)
print(" Mirror length is: %.3f m"%L)
print(" Mirror aperture is: %.3f um"%(1e6*L*numpy.sin(theta_grazing)))
N = hy.size
print(" Mirror contains %d points"%N)
# #
# #load slopes profile
# #
# input_file = "tmpSlopes.dat"
# a = numpy.loadtxt(input_file)
# hy = a[:,0]
# sz = a[:,1]
#
#compute slopes
#
sz = numpy.gradient(hz,(hy[1]-hy[0]))
slope_errors_rms = sz.std()
print(" Mirror slope error RMS is %.3f urad = %.3f arcsec"%(slope_errors_rms*1e6,slope_errors_rms*180/numpy.pi*3600))
#
#project on optical axis
#
hy_projected = hy * numpy.sin(theta_grazing)
# # dump to file
# outFile = "tmpImage.dat"
# if outFile != "":
# dd = numpy.concatenate( (bin_centers.reshape(-1,1), image_histogram.reshape(-1,1)),axis=1)
# dd[:,0] *= -1e6 # in microns, inverted to agree with shadow
# dd[:,1] /= dd[:,1].max()
# numpy.savetxt(outFile,dd)
sourcepoints = 1000
slitpoints = 1000
detpoints = 1000
wavelength = 1e-10
aperture_diameter = 2 * hy_projected.max()
airy_disk_theta = 1.22 * wavelength / aperture_diameter
detector_size = 50 * airy_disk_theta * q
fwhm_theory = 2 * 2.35 * slope_errors_rms * q
print("aperture _diameter = %f um "%(aperture_diameter*1e6))
print("detector_size = %f um"%(detector_size*1e6))
print("FWHM theory (2 sigma q) = %f um"%(fwhm_theory*1e6))
print("Airy disk is: %f urad = %f um"%(airy_disk_theta*1e6,airy_disk_theta*q*1e6))
if airy_disk_theta*q >= detector_size:
detector_size = 5 * airy_disk_theta * q
print("detector_size NEW = %f um"%(detector_size*1e6))
position1x = numpy.linspace(0,0,sourcepoints)
position2x = numpy.linspace(-aperture_diameter/2,aperture_diameter/2,slitpoints)
position3x = numpy.linspace(-detector_size/2,detector_size/2,detpoints)
sz_projected_interpolated = numpy.interp(position2x, hy, sz * numpy.sin(theta_grazing) )
# sz_projected_interpolated = None
# fields12 = goFromTo(position1x,position2x,q, wavelength=wavelength, lensF=2*F)
fields12 = goFromTo(position1x,position2x,p, lensF=2*F, slopeError=sz_projected_interpolated, wavelength=wavelength)
fields23 = goFromTo(position2x,position3x,q, lensF=None,wavelength=wavelength)
# from 1 to 3, matrix multiplication
fields13 = numpy.dot(fields12,fields23)
print ("Shape of fields12, fields23, fields13: ",fields12.shape,fields23.shape,fields13.shape)
#prepare results
fieldComplexAmplitude = numpy.dot(numpy.ones(sourcepoints),fields13)
print ("Shape of Complex U: ",fieldComplexAmplitude.shape)
print ("Shape of position1x: ",position1x.shape)
fieldIntensity = numpy.power(numpy.abs(fieldComplexAmplitude),2)
fieldPhase = numpy.arctan2(numpy.real(fieldComplexAmplitude), \
numpy.imag(fieldComplexAmplitude))
#
# write spec formatted file
#
out_file = "" # "simple_physicaloptics.spec"
if out_file != "":
f = open(out_file, 'w')
header="#F %s \n\n#S 1 fresnel-kirchhoff diffraction integral\n#N 3 \n#L X[m] intensity phase\n"%out_file
f.write(header)
for i in range(detpoints):
out = numpy.array((position2x[i], fieldIntensity[i], fieldPhase[i]))
f.write( ("%20.11e "*out.size+"\n") % tuple( out.tolist()) )
f.close()
print ("File written to disk: %s"%out_file)
#
# write two-column formatted file
#
fieldIntensity /= fieldIntensity.max()
tmpAbscissas = position3x * 1e6
outFile = "tmpPhysicalOptics.dat"
itmp = numpy.argmax(fieldIntensity)
tmpAbscissas = tmpAbscissas - tmpAbscissas[itmp]
if outFile != "":
dd=numpy.concatenate( (tmpAbscissas, fieldIntensity) ,axis=0).reshape(2,-1).transpose()
numpy.savetxt(outFile,dd)
print ("File "+outFile+" written to disk.\n")
#
#plots
#
do_plots = 0
if do_plots:
#
#plots
#
from matplotlib import pylab as plt
# plt.figure(1)
# plt.plot(hy*1e3,hz*1e9)
# plt.title("Profile used")
# plt.xlabel("X [mm]")
# plt.ylabel("Z [nm]")
plt.figure(2)
plt.plot(tmpAbscissas,fieldIntensity)
plt.title("Fresnel-Kirchhoff Diffraction")
plt.xlabel("X [um]")
plt.ylabel("Intensity [a.u.]")
plt.show()
#
# main program
#
if __name__ == '__main__':
main() | gpl-2.0 | 2,001,439,813,981,640,000 | 27.663755 | 125 | 0.608411 | false | 3.017471 | false | false | false |
sigshen/django-request-profiler | request_profiler/south_migrations/0002_auto__del_field_ruleset_include_anonymous__add_field_ruleset_user_filt.py | 1 | 6113 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'RuleSet.include_anonymous'
db.delete_column(u'request_profiler_ruleset', 'include_anonymous')
# Adding field 'RuleSet.user_filter_type'
db.add_column(u'request_profiler_ruleset', 'user_filter_type',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Adding field 'RuleSet.include_anonymous'
db.add_column(u'request_profiler_ruleset', 'include_anonymous',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
# Deleting field 'RuleSet.user_filter_type'
db.delete_column(u'request_profiler_ruleset', 'user_filter_type')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'request_profiler.profilingrecord': {
'Meta': {'object_name': 'ProfilingRecord'},
'duration': ('django.db.models.fields.FloatField', [], {}),
'end_ts': ('django.db.models.fields.DateTimeField', [], {}),
'http_method': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'http_user_agent': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'remote_addr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'request_uri': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'response_status_code': ('django.db.models.fields.IntegerField', [], {}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'start_ts': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'view_func_name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'request_profiler.ruleset': {
'Meta': {'object_name': 'RuleSet'},
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'uri_regex': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'user_filter_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user_group_filter': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'})
}
}
complete_apps = ['request_profiler'] | mit | -7,246,257,717,392,930,000 | 65.456522 | 195 | 0.562081 | false | 3.68475 | false | false | false |
gnome-keysign/gnome-keysign | keysign/export_uids.py | 1 | 1193 | #!/usr/bin/env python3
import logging
from pathlib import Path
import os
import sys
if __name__ == "__main__" and __package__ is None:
logging.getLogger().error("You seem to be trying to execute " +
"this script directly which is discouraged. " +
"Try python -m instead.")
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0, parent_dir)
import keysign
#mod = __import__('keysign')
#sys.modules["keysign"] = mod
__package__ = str('keysign')
from .gpgmeh import export_uids, minimise_key
def escape_filename(fname):
escaped = ''.join(c if c.isalnum() else "_" for c in fname)
return escaped
def main():
fname = sys.argv[1]
keydata = open(fname, 'rb').read()
minimise = True
if minimise:
keydata = minimise_key(keydata)
for i, (uid, uid_bytes) in enumerate(export_uids(keydata), start=1):
uid_file = Path('.') / ("{:02d}-".format(i) + escape_filename(uid) + ".pgp.asc")
print (f"Writing {uid_file}...")
uid_file.write_bytes(uid_bytes)
print (f"Done!")
if __name__ == "__main__":
main()
| gpl-3.0 | -2,191,016,393,435,988,000 | 27.404762 | 88 | 0.583403 | false | 3.379603 | false | false | false |
alkamid/wiktionary | odczasownikowe.py | 1 | 4587 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import pywikibot
from pywikibot import Category
from pywikibot import pagegenerators
import re
from klasa import *
from main import *
def addOdczas(title):
re_tabelkaAttr = re.compile(r'\|\s*?(z|)robienie\s*?=(.*?)(?=}}|\n)')
word = Haslo(title)
log = ''
if word.type == 3 and word.listLangs:
for sekcja in word.listLangs:
if sekcja.lang == 'polski':
sekcja.pola()
try: sekcja.odmiana
except AttributeError:
pass
else:
s_tabelkaAttr = re.search(re_tabelkaAttr, sekcja.odmiana.text)
if s_tabelkaAttr:
odczasownikowy = s_tabelkaAttr.group(2).strip()
if odczasownikowy:
enieaniestop = 0
czasownik = sekcja.title
nowe = Haslo(odczasownikowy)
if nowe.type == 0:
log += '*[[%s]] - redirect' % (odczasownikowy)
elif nowe.type == 1 and ' ' not in odczasownikowy:
nowaSekcja = LanguageSection(title=odczasownikowy, type=9, lang='język polski')
nowaSekcja.znaczeniaDetail.append(['\'\'rzeczownik, rodzaj nijaki\'\'', '\n: (1.1) {{rzecz}} {{odczas}} \'\'od\'\' [[%s]]' % czasownik])
if odczasownikowy[-4:] == 'enie' or odczasownikowy[-4:] == 'anie' or odczasownikowy[-3:] == 'cie':
pre = odczasownikowy[:-2]
nowaSekcja.odmiana.text = '\n: (1.1) {{odmiana-rzeczownik-polski\n|Mianownik lp = %sie\n|Dopełniacz lp = %sia\n|Celownik lp = %siu\n|Biernik lp = %sie\n|Narzędnik lp = %siem\n|Miejscownik lp = %siu\n|Wołacz lp = %sie\n}}' % (pre, pre, pre, pre, pre, pre, pre)
else:
enieaniestop = 1
if not enieaniestop:
nowaSekcja.antonimy.text = '\n: (1.1) [[nie%s]]' % odczasownikowy
nowaSekcja.saveChanges()
page = pywikibot.Page(site, odczasownikowy)
try: page.get()
except pywikibot.NoPage:
page.put(nowaSekcja.content, comment='dodanie hasła o rzeczowniku odczasownikowym na podstawie [[%s]]' % czasownik)
nieodczasownikowy = 'nie' + odczasownikowy
nowe = Haslo(nieodczasownikowy)
if nowe.type == 0:
log += '*[[%s]] - redirect' % (nieodczasownikowy)
elif nowe.type == 1 and ' ' not in nieodczasownikowy:
nowaSekcja = LanguageSection(title=nieodczasownikowy, type=9, lang='język polski')
nowaSekcja.znaczeniaDetail.append(['\'\'rzeczownik, rodzaj nijaki\'\'', '\n: (1.1) {{rzecz}} {{odczas}} \'\'od\'\' [[nie]] [[%s]]' % czasownik])
if not enieaniestop:
pre = nieodczasownikowy[:-3]
nowaSekcja.odmiana.text = '\n: (1.1) {{odmiana-rzeczownik-polski\n|Mianownik lp = %snie\n|Dopełniacz lp = %snia\n|Celownik lp = %sniu\n|Biernik lp = %snie\n|Narzędnik lp = %sniem\n|Miejscownik lp = %sniu\n|Wołacz lp = %snie\n}}' % (pre, pre, pre, pre, pre, pre, pre)
nowaSekcja.antonimy.text = '\n: (1.1) [[%s]]' % odczasownikowy
nowaSekcja.saveChanges()
page = pywikibot.Page(site, nieodczasownikowy)
try: page.get()
except pywikibot.NoPage:
page.put(nowaSekcja.content, comment='dodanie hasła o rzeczowniku odczasownikowym na podstawie [[%s]]' % czasownik)
def main():
global odmOlafa
odmOlafa = OdmianaOlafa()
global site
site = pywikibot.Site()
templatePage = pywikibot.Page(site, 'Szablon:ndk')
#lista = pagegenerators.ReferringPageGenerator(templatePage, True, True, True)
lista = ['poszukać']
for a in lista:
addOdczas(a)
if __name__ == '__main__':
try:
main()
finally:
pywikibot.stopme()
| mit | -2,250,928,310,257,693,000 | 56.2 | 302 | 0.478802 | false | 3.151515 | false | false | false |
njankowski/dftools | formats/gob.py | 1 | 5035 | """
Star Wars: Dark Forces
GOB Container Functions
"""
import string
import struct
GOB_HEADER_SIZE = 8
GOB_CATALOG_OFFSET_SIZE = 4
GOB_CATALOG_ENTRY_SIZE = 21
GOB_MAX_SIZE = 2 ** 31 - 1
class GOBException(Exception):
pass
def is_valid_entry_name(filename):
"""Return whether a name is valid as an entry name.
Checks a name against an assortment of DOS-like filename rules.
:param filename: The name to check
:return: bool
"""
allowed = string.ascii_letters + string.digits + "_^$~!#%&-{}@`'()"
reserved = ['CON', 'PRN', 'AUX', 'CLOCK$', 'NUL',
'COM0', 'COM1', 'COM2', 'COM3', 'COM4', 'COM5', 'COM6', 'COM7', 'COM8', 'COM9',
'LPT0', 'LPT1', 'LPT2', 'LPT3', 'LPT4', 'LPT5', 'LPT6', 'LPT7', 'LPT8', 'LPT9',
'LST', 'KEYBD$', 'SCREEN$', '$IDLE$', 'CONFIG$']
# Cannot be empty or None.
if not filename:
return False
# Separator cannot appear more than once.
if filename.count('.') > 1:
return False
# Split into name and extension.
s = filename.partition('.')
name = s[0]
separator = s[1]
extension = s[2]
# Check name length.
name_len_ok = (0 < len(name) <= 8)
# Check name characters.
name_char_ok = all(c in allowed for c in name)
# Check name reservation.
name_reserved_ok = (name.upper() not in reserved)
# Default to valid extension checks.
ext_len_ok = True
ext_char_ok = True
# Check extension if a separator is present.
# Must have a valid extension if separator is present.
if separator:
# Check extension length.
ext_len_ok = (0 < len(extension) <= 3)
# Check extension characters.
ext_char_ok = all(c in allowed for c in name)
# Reserved names do not apply to extensions.
return ((name_len_ok and name_char_ok and name_reserved_ok) and (ext_len_ok and ext_char_ok))
def get_gob_size(entries):
"""Return a tuple of size information given a list of entries.
Projects the meta data size and raw data size of a GOB if it were created with the given list of entries.
:param entries: List of GOB entry tuples [(str, bytes), ..., ] where the tuple represents (name, data) of the entry
:return: A tuple containing meta data size and raw data size in number of bytes (meta_size, data_size)
"""
# Header + Catalog Offset + Catalog
meta_size = GOB_HEADER_SIZE + GOB_CATALOG_OFFSET_SIZE + (GOB_CATALOG_ENTRY_SIZE * len(entries))
# Raw Data
data_size = sum([len(entry[1]) for entry in entries])
return (meta_size, data_size)
def read(filename):
"""Reads a GOB container and returns all stored files.
:param filename: Path to the GOB to read
:return: List of GOB entry tuples [(str, bytes), ..., ] where the tuple represents (name, data) of the entry
"""
with open(filename, 'rb') as file:
entries = []
if file.read(4) != b'GOB\n':
return
catalog_offset = struct.unpack('<i', file.read(4))[0]
file.seek(catalog_offset)
num_entries = struct.unpack('<i', file.read(4))[0]
for i in range(num_entries):
data_offset = struct.unpack('<i', file.read(4))[0]
data_length = struct.unpack('<i', file.read(4))[0]
raw_name = file.read(13)
try:
name = raw_name[0 : raw_name.index(0)].decode('ascii')
except ValueError:
name = raw_name.decode('ascii')
print(f'catalog entry {i} has no null terminator in its filename "{name}"')
data = b''
if data_length > 0:
next_entry = file.tell()
file.seek(data_offset)
data = file.read(data_length)
file.seek(next_entry)
entries.append((name, data))
return entries
def write(filename, entries):
"""Writes a GOB container given a path and a list of GOB entries.
:param filename: Path to write the GOB to
:param entries: List of GOB entry tuples [(str, bytes), ..., ] where the tuple represents (name, data) of the entry
:return: None
"""
meta_size, data_size = get_gob_size(entries)
if (meta_size + data_size) > GOB_MAX_SIZE:
raise GOBException('Cannot create GOB because it would exceed maximum size.')
for entry in entries:
if not is_valid_entry_name(entry[0]):
raise GOBException('"' + entry[0] + '" is an invalid entry name.')
with open(filename, 'wb') as file:
file.write(b'GOB\n')
file.write(struct.pack('<i', GOB_HEADER_SIZE + data_size))
for entry in entries:
file.write(entry[1])
file.write(struct.pack('<i', len(entries)))
offset = GOB_HEADER_SIZE
for entry in entries:
file.write(struct.pack('<i', offset))
file.write(struct.pack('<i', len(entry[1])))
file.write(struct.pack('13s', entry[0].encode('ascii')))
offset += len(entry[1])
| isc | -9,218,901,716,966,650,000 | 30.46875 | 119 | 0.590864 | false | 3.550776 | false | false | false |
vladimarius/pyap | pyap/source_CA/data.py | 1 | 21646 | # -*- coding: utf-8 -*-
"""
pyap.source_US.data
~~~~~~~~~~~~~~~~~~~~
This module provides regular expression definitions required for
detecting Canada addresses.
The module is expected to always contain 'full_address' variable containing
all address parsing definitions.
:copyright: (c) 2015 by Vladimir Goncharov.
:license: MIT, see LICENSE for more details.
"""
import re
''' Numerals from one to nine
Note: here and below we use syntax like '[Oo][Nn][Ee]'
instead of '(one)(?i)' to match 'One' or 'oNe' because
Python Regexps don't seem to support turning On/Off
case modes for subcapturing groups.
'''
zero_to_nine = r"""(?:
[Zz][Ee][Rr][Oo]\ |[Oo][Nn][Ee]\ |[Tt][Ww][Oo]\ |
[Tt][Hh][Rr][Ee][Ee]\ |[Ff][Oo][Uu][Rr]\ |
[Ff][Ii][Vv][Ee]\ |[Ss][Ii][Xx]\ |
[Ss][Ee][Vv][Ee][Nn]\ |[Ee][Ii][Gg][Hh][Tt]\ |
[Nn][Ii][Nn][Ee]\ |[Tt][Ee][Nn]\ |
[Ee][Ll][Ee][Vv][Ee][Nn]\ |
[Tt][Ww][Ee][Ll][Vv][Ee]\ |
[Tt][Hh][Ii][Rr][Tt][Ee][Ee][Nn]\ |
[Ff][Oo][Uu][Rr][Tt][Ee][Ee][Nn]\ |
[Ff][Ii][Ff][Tt][Ee][Ee][Nn]\ |
[Ss][Ii][Xx][Tt][Ee][Ee][Nn]\ |
[Ss][Ee][Vv][Ee][Nn][Tt][Ee][Ee][Nn]\ |
[Ee][Ii][Gg][Hh][Tt][Ee][Ee][Nn]\ |
[Nn][Ii][Nn][Ee][Tt][Ee][Ee][Nn]\
)
"""
# Numerals - 10, 20, 30 ... 90
ten_to_ninety = r"""(?:
[Tt][Ee][Nn]\ |[Tt][Ww][Ee][Nn][Tt][Yy]\ |
[Tt][Hh][Ii][Rr][Tt][Yy]\ |
[Ff][Oo][Rr][Tt][Yy]\ |
[Ff][Oo][Uu][Rr][Tt][Yy]\ |
[Ff][Ii][Ff][Tt][Yy]\ |[Ss][Ii][Xx][Tt][Yy]\ |
[Ss][Ee][Vv][Ee][Nn][Tt][Yy]\ |
[Ee][Ii][Gg][Hh][Tt][Yy]\ |
[Nn][Ii][Nn][Ee][Tt][Yy]\
)"""
# One hundred
hundred = r"""(?:
[Hh][Uu][Nn][Dd][Rr][Ee][Dd]\
)"""
# One thousand
thousand = r"""(?:
[Tt][Hh][Oo][Uu][Ss][Aa][Nn][Dd]\
)"""
'''
Regexp for matching street number.
Street number can be written 2 ways:
1) Using letters - "One thousand twenty two"
2) Using numbers
a) - "1022"
b) - "85-1190"
"85 - 1190"
"85th - 1190"
c) - "85 1190"
'''
street_number = r"""(?<![\.0-9])(?P<street_number>
(?:
[Aa][Nn][Dd]\
|
{thousand}
|
{hundred}
|
{zero_to_nine}
|
{ten_to_ninety}
){from_to}
|
# 85th - 1190
(?:\d{from_to}(?:th)?
(?:\ ?\-?\ ?\d{from_to}(?:th)?)?\
)
|
# 45
(?:\d{from_to}(?=[\ ,]))
)
""".format(thousand=thousand,
hundred=hundred,
zero_to_nine=zero_to_nine,
ten_to_ninety=ten_to_ninety,
from_to='{1,5}')
'''
Regexp for matching street name.
In example below:
"Hoover Boulevard": "Hoover" is a street name
'''
street_name = r"""(?P<street_name>
\w[\w0-9\'\-\ \.]{0,30}?
)
"""
post_direction = r"""
(?P<post_direction>
(?:
# English
[Nn][Oo][Rr][Tt][Hh]{d}|
[Ss][Oo][Uu][Tt][Hh]{d}|
[Ee][Aa][Ss][Tt]{d}|
[Ww][Ee][Ss][Tt]{d}|
[Nn][Oo][Rr][Tt][Hh][Ee][Aa][Ss][Tt]{d}|
[Nn][Oo][Rr][Tt][Hh][Ww][Ee][Ss][Tt]{d}|
[Ss][Oo][Uu][Tt][Hh][Ee][Aa][Ss][Tt]{d}|
[Ss][Oo][Uu][Tt][Hh][Ww][Ee][Ss][Tt]{d}|
# French
[Ee][Ss][Tt]{d}|
[Nn][Oo][Rr][Dd]{d}|
[Nn][Oo][Rr][Dd]\-[Ee][Ss][Tt]{d}|
[Nn][Oo][Rr][Dd]\-[Oo][Uu][Ee][Ss][Tt]{d}|
[Ss][Uu][Dd]{d}|
[Ss][Uu][Dd]\-[Ee][Ss][Tt]{d}|
[Ss][Uu][Dd]\-[Oo][Uu][Ee][Ss][Tt]{d}|
[Oo][Uu][Ee][Ss][Tt]{d}
)
|
(?:
# English
NW{d}|NE{d}|SW{d}|SE{d}|
# French (missing above)
NO{d}|SO{d}
)
|
(?:
# English
N[\.\ ]|S[\.\ ]|E[\.\ ]|W[\.\ ]|
# French (missing above)
O[\.\ ]
)
)
""".format(d='[\ ,]')
# Regexp for matching street type
# According to
# https://www.canadapost.ca/tools/pg/manual/PGaddress-e.asp#1385939
street_type = r"""
(?P<street_type>
[Aa][Bb][Bb][Ee][Yy]{div}|
[Aa][Cc][Rr][Ee][Ss]{div}|
[Aa][Ll][Ll][Éé][Ee]{div}|
[Aa][Ll][Ll][Ee][Yy]{div}|
[Aa][Uu][Tt][Oo][Rr][Oo][Uu][Tt][Ee]{div}|[Aa][Uu][Tt]{div}|
[Aa][Vv][Ee][Nn][Uu][Ee]{div}|[Aa][Vv][Ee]?{div}|
[Bb][Aa][Yy]{div}|
[Bb][Ee][Aa][Cc][Hh]{div}|
[Bb][Ee][Nn][Dd]{div}|
[Bb][Oo][Uu][Ll][Ee][Vv][Aa][Er][Dd]{div}|[Bb][Ll][Vv][Dd]{div}|[Bb][Oo][Uu][Ll]{div}|
# Broadway
[Bb][Rr][Oo][Aa][Dd][Ww][Aa][Yy]{div}|
[Bb][Yy]\-?[Pp][Aa][Ss][Ss]{div}|
[Bb][Yy][Ww][Aa][Yy]{div}|
[Cc][Aa][Mm][Pp][Uu][Ss]{div}|
[Cc][Aa][Pp][Ee]{div}|
[Cc][Aa][Rr][Rr][EéÉ]{div}|[Cc][Aa][Rr]{div}|
[Cc][Aa][Rr][Rr][Ee][Ff][Oo][Uu][Rr]{div}|[Cc][Aa][Rr][Re][Ee][Ff]{div}|
[Cc][Ee][Nn][Tt][Rr][Ee]{div}|[Cc][Tt][Rr]{div}|
[Cc][Ee][Rr][Cc][Ll][Ee]{div}|
[Cc][Hh][Aa][Ss][Ee]{div}|
[Cc][Hh][Ee][Mm][Ii][Nn]{div}|[Cc][Hh]{div}|
[Cc][Ii][Rr][Cc][Ll][Ee]{div}|[Cc][Ii][Rr]{div}|
[Cc][Ii][Rr][Cc][Uu][Ii][Tt]{div}|[Cc][Ii][Rr][Cc][Tt]{div}|
[Cc][Ll][Oo][Ss][Ee]{div}|
[Cc][Oo][Mm][Mm][Oo][Nn]{div}|
[Cc][Oo][Nn][Cc][Ee][Ss][Ss][Ii][Oo][Nn]{div}|[Cc][Oo][Nn][Cc]{div}|
[Cc][Oo][Rr][Nn][Ee][Rr][Ss]{div}|
[Cc][Ôô][Tt][Ee]{div}|
[Cc][Oo][Uu][Rr][Ss]{div}|
[Cc][Oo][Uu][Rr]{div}|
[Cc][Oo][Uu][Rr][Tt]{div}|[Cc][Rr][Tt]{div}|
[Cc][Oo][Vv][Ee]{div}|
[Cc][Rr][Ee][Ss][Cc][Ee][Nn][Tt]{div}|[Cc][Rr][Ee][Ss]{div}|
[Cc][Rr][Oo][Ii][Ss][Ss][Aa][Nn][Tt]{div}|[Cc][Rr][Oo][Ii][Ss]{div}|
[Cc][Rr][Oo][Ss][Ss][Ii][Nn][Gg]{div}|[Cc][Rr][Oo][Ss][Ss]{div}|
[Cc][Uu][Ll]\-[Dd][Ee]\-[Ss][Aa][Cc]{div}|[Cc][Dd][Ss]{div}|
[Dd][Aa][Ll][Ee]{div}|
[Dd][Ee][Ll][Ll]{div}|
[Dd][Ii][Vv][Ee][Rr][Ss][Ii][Oo][Nn]{div}|[Dd][Ii][Vv][Ee][Rr][Ss]{div}|
[Dd][Oo][Ww][Nn][Ss]{div}|
[Dd][Rr][Ii][Vv][Ee]{div}|[Dd][Rr]{div}|
[Ée][Cc][Hh][Aa][Nn][Gg][Ee][Uu][Rr]{div}|[Ée][Cc][Hh]{div}|
[Ee][Nn][Dd]{div}|
[Ee][Ss][Pp][Ll][Aa][Nn][Aa][Dd][Ee]{div}|[Ee][Ss][Pp][Ll]{div}|
[Ee][Ss][Tt][Aa][Tt][Ee][Ss]?{div}|
[Ee][Xx][Pp][Rr][Ee][Ss][Ss][Ww][Aa][Yy]{div}|[Ee][Xx][Pp][Yy]{div}|
[Ee][Xx][Tt][Ee][Nn][Ss][Ii][Oo][Nn]{div}|[Ee][Xx][Tt][Ee][Nn]{div}|
[Ff][Aa][Rr][Mm]{div}|
[Ff][Ii][Ee][Ll][Dd]{div}|
[Ff][Oo][Rr][Ee][Ss][Tt]{div}|
[Ff][Rr][Ee][Ee][Ww][Aa][Yy]{div}|[Ff][Ww][Yy]{div}|
[Ff][Rr][Oo][Nn][Tt]{div}|
[Gg][Aa][Rr][Dd][Ee][Nn][Ss]{div}|[Gg][Dd][Nn][Ss]{div}|
[Gg][Aa][Tt][Ee]{div}|
[Gg][Ll][Aa][Dd][Ee]{div}|
[Gg][Ll][Ee][Nn]{div}|
[Gg][Rr][Ee][Ee][Nn]{div}|
[Gg][Rr][Uo][Uu][Nn][Dd][Ss]{div}|[Gg][Rr][Nn][Dd][Ss]{div}|
[Gg][Rr][Oo][Vv][Ee]{div}|
[Hh][Aa][Rr][Bb][Oo][Uu][Rr]{div}|[Hh][Aa][Rr][Bb][Rr]{div}|
[Hh][Ee][Aa][Tt][Hh]{div}|
[Hh][Ee][Ii][Gg][Hh][Tt][Ss]{div}|[Hh][Tt][Ss]{div}|
[Hh][Ii][Gg][Hh][Ll][Aa][Nn][Dd][Ss]{div}|[Hh][Gg][Hh][Ll][Dd][Sd]{div}|
[Hh][Ii][Gg][Gh][Ww][Aa][Yy]{div}|[Hh][Ww][Yy]{div}|
[Hh][Ii][Ll][Ll]{div}|
[Hh][Oo][Ll][Ll][Oo][Ww]{div}|
[Îi][Ll][Ee]{div}|
[Ii][Mm][Pp][Aa][Ss][Ss][Ee]{div}|I[Mm][Pp]{div}|
[Ii][Nn][Ll][Ee][Tt]{div}|
[Ii][Ss][Ll][Aa][Nn][Dd]{div}|
[Kk][Ee][Yy]{div}|
[Kk][Nn][Oo][Ll][Ll]{div}|
[Ll][Aa][Nn][Dd][Ii][Nn][Gg]{div}|[Ll][Aa][Nn][Dd][Nn][Gg]{div}|
[Ll][Aa][Nn][Ee]{div}|
[Ll][Ii][Mm][Ii][Tt][Ss]{div}|[Ll][Mm][Tt][Ss]{div}|
[Ll][Ii][Nn][Ee]{div}|
[Ll][Ii][Nn][Kk]{div}|
[Ll][Oo][Oo][Kk][Oo][Uu][Tt]{div}|[Ll][Kk][Oo][Uu][Tt]{div}|
[Mm][Aa][Ii][Nn][Ww][Aa][Yy]{div}|
[Mm][Aa][Ll][Ll]{div}|
[Mm][Aa][Nn][Oo][Rr]{div}|
[Mm][Aa][Zz][Ee]{div}|
[Mm][Ee][Aa][Dd][Oo][Ww]{div}|
[Mm][Ee][Ww][Ss]{div}|
[Mm][Oo][Nn][Tt][Éé][Ee]{div}|
[Mm][Oo][Oo][Rr]{div}|
[Mm][Oo][Uu][Nn][Tt][Aa][Ii][Nn]{div}|[Mm][Tt][Nn]{div}|
[Mm][Oo][Uu][Nn][Tt]{div}|
[Oo][Rr][Cc][Hh][Aa][Rr][Dd]{div}|[Oo][Rr][Cc][Hh]{div}|
[Pp][Aa][Rr][Aa][Dd][Ee]{div}|
[Pp][Aa][Rr][Cc]{div}|
[Pp][Aa][Rr][Kk][Ww][Aa][Yy]{div}|[Pp][Kk][Yy]{div}|
[Pp][Aa][Rr][Kk]{div}|[Pp][Kk]{div}|
[Pp][Aa][Ss][Ss][Aa][Gg][Ee]{div}|[Pp][As][Ss][Ss]{div}|
[Pp][Aa][Tt][Hh]{div}|
[Pp][Aa][Tt][Hh][Ww][Aa][Yy]{div}|[Pp][Tt][Ww][Aa][Yy]{div}|
[Pp][Ii][Nn][Ee][Ss]{div}|
[Pp][Ll][Aa][Cc][Ee]{div}|[Pp][Ll]{div}|
[Pp][Ll][Aa][Tt][Ee][Aa][Uu]{div}|[Pp][Ll][Aa][Tt]{div}|
[Pp][Ll][Aa][Zz][Aa]{div}|
[Pp][Oo][Ii][Nn][Tt][Ee]{div}|
[Pp][Oo][Ii][Nn][Tt]{div}|[Pp][Tt]{div}|
[Pp][Oo][Rr][Tt]{div}|
[Pp][Rr][Ii][Vv][Aa][Tt][Ee]{div}|[Pp][Vv][Tt]{div}|
[Pp][Rr][Oo][Mm][Ee][Nn][Aa][Dd][Ee]{div}|[Pp][Rr][Oo][Mm]{div}|
[Qq][Uu][Aa][Ii]{div}|
[Qq][Uu][Aa][Yy]{div}|
[Rr][Aa][Mm][Pp]{div}|
[Rr][Aa][Nn][Gg][Ee]{div}|[Rr][Gg]{div}|
[Rr][Aa][Nn][Gg]{div}|
[Rr][Ii][Dd][Gg][Ee]{div}|
[Rr][Ii][Ss][Ee]{div}|
[Rr][Oo][Aa][Dd]{div}|[Rr][Dd]{div}|
[Rr][Oo][Nn][Dd]\-[Pp][Oo][Ii][Nn][Tt]{div}|[Rr][Dd][Pp][Tt]{div}|
[Rr][Oo][Uu][Tt][Ee]{div}|[Rr][Tt][Ee]{div}|
[Rr][Oo][Ww]{div}|
[Rr][Uu][Ee][Ll][Ll][Ee]{div}|[Rr][Ll][Ee]{div}|
[Rr][Uu][Ee]{div}|
[Rr][Uu][Nn]{div}|
[Ss][Ee][Nn][Tt][Ii][Ee][Rr]{div}|[Ss][Ee][Nn][Tt]{div}|
# Street
[Ss][Tt][Rr][Ee][Ee][Tt]{div}|[Ss][Tt](?![A-Za-z]){div}|
# Square
[Ss][Qq][Uu][Aa][Rr][Ee]{div}|[Ss][Qq]{div}|
[Ss][Uu][Bb][Dd][Ii][Vv][Ii][Ss][Ii][Oo][Nn]{div}|[Ss][Uu][Bb][Dd][Ii][Vv]{div}|
[Tt][Ee][Rr][Rr][Aa][Cc][Ee]{div}|[Tt][Ee][Re][Re]{div}|
[Tt][Ee][Rr][Rr][Aa][Ss][Ss][Ee]{div}|[Tt][Ss][Ss][Es]{div}|
[Tt][Hh][Ii][Cc][Kk][Ee][Tt]{div}|[Tt][Hh][Ii][Cc][Kk]{div}|
[Tt][Oo][Ww][Ee][Rr][Ss]{div}|
[Tt][Oo][Ww][Nn][Ll][Ii][Nn][Ee]{div}|[Tt][Ll][Ii][Nn][Ee]{div}|
[Tt][Rr][Aa][Ii][Ll]{div}|
[Tt][Uu][Rr][Nn][Aa][Bb][Oo][Uu][Tt]{div}|[Tt][Rr][Nn][Aa][Bb][Tt]{div}|
[Vv][Aa][Ll][Ee]{div}|
[Vv][Ii][Aa]{div}|
[Vv][Ii][Ee][Ww]{div}|
[Vv][Ii][Ll][Ll][Aa][Gg][Ee]{div}|[Vv][Ii][Ll][Ll][Gg][Ee]{div}|
[Vv][Ii][Ll][Ll][Aa][Ss]{div}|
[Vv][Ii][Ss][Tt][Aa]{div}|
[Vv][Oo][Ii][Ee]{div}|
[Ww][Aa][Ll][Lk]{div}|
[Ww][Aa][Yy]{div}|
[Ww][Hh][Aa][Rr][Ff]{div}|
[Ww][Oo][Oo][Dd]{div}|
[Ww][Yy][Nn][Dd]{div}
)
(?P<route_id>
[\(\ \,]{route_symbols}
[Rr][Oo][Uu][Tt][Ee]\ [A-Za-z0-9]+[\)\ \,]{route_symbols}
)?
""".format(div="[\.\ ,]{0,2}", route_symbols='{0,3}')
floor = r"""
(?P<floor>
(?:
\d+[A-Za-z]{0,2}\.?\ [Ff][Ll][Oo][Oo][Rr]\
)
|
(?:
[Ff][Ll][Oo][Oo][Rr]\ \d+[A-Za-z]{0,2}\
)
)
"""
building = r"""
(?:
(?:
(?:[Bb][Uu][Ii][Ll][Dd][Ii][Nn][Gg])
|
(?:[Bb][Ll][Dd][Gg])
)
\ \d{0,2}[A-Za-z]?
)
"""
occupancy = r"""
(?:
(?:
(?:
#
# English
#
# Suite
[Ss][Uu][Ii][Tt][Ee]\ |[Ss][Tt][Ee]\.?\
|
# Apartment
[Aa][Pp][Tt]\.?\ |[Aa][Pp][Aa][Rr][Tt][Mm][Ee][Nn][Tt]\
|
# Room
[Rr][Oo][Oo][Mm]\ |[Rr][Mm]\.?\
|
# Unit
[Uu][Nn][Ii][Tt]\
|
#
# French
#
# Apartement
[Aa][Pp][Aa][Rr][Tt][Ee][Mm][Ee][Nn][Tt]\ |A[Pp][Pp]\
|
# Bureau
[Bb][Uu][Rr][Ee][Aa][Uu]\
|
# Unité
[Uu][Nn][Ii][Tt][Éé]\
)
(?:
[A-Za-z\#\&\-\d]{1,7}
)?
)
|
(?:
\#[0-9]{,3}[A-Za-z]{1}
)
)\ ?
"""
po_box = r"""
(?P<postal_box>
# English - PO Box 123
(?:[Pp]\.?\ ?[Oo]\.?\ [Bb][Oo][Xx]\ \d+)
|
# French - B.P. 123
(?:[Bb]\.?\ [Pp]\.?\ \d+)
|
# C.P. 123
(?:[Cc]\.?\ [Pp]\.?\ \d+)
|
# Case postale 123
(?:[Cc]ase\ [Pp][Oo][Ss][Tt][Aa][Ll][Ee]\ \d+)
|
# C.P. 123
(?:[Cc]\.[Pp]\.\ \d+)
)
"""
'''Define detection rules for a second type of address format
(the French one)
'''
street_number_b = re.sub('<([a-z\_]+)>', r'<\1_b>', street_number)
street_name_b = re.sub('<([a-z\_]+)>', r'<\1_b>', street_name)
street_type_b = re.sub('<([a-z\_]+)>', r'<\1_b>', street_type)
po_box_b = re.sub('<([a-z\_]+)>', r'<\1_b>', po_box)
post_direction_b = re.sub('<([a-z\_]+)>', r'<\1_b>', post_direction)
po_box_positive_lookahead = r"""
(?=
# English - PO Box 123
(?:[Pp]\.?\ ?[Oo]\.?\ [Bb][Oo][Xx]\ \d+)
|
# French - B.P. 123
(?:[Bb]\.?\ [Pp]\.?\ \d+)
|
# C.P. 123
(?:[Cc]\.?\ [Pp]\.?\ \d+)
|
# Case postale 123
(?:[Cc]ase\ [Pp][Oo][Ss][Tt][Aa][Ll][Ee]\ \d+)
|
# C.P. 123
(?:[Cc]\.[Pp]\.\ \d+)
|
(?:[\ \,])
)
"""
full_street = r"""
(?:
# Format commonly used in French
(?P<full_street_b>
{street_number_b}{div}
{street_type_b}{div}
({street_name_b} {po_box_positive_lookahead})?\,?\ ?
{post_direction_b}?\,?\ ?
{po_box_b}?\,?\ ?
)
|
# Format commonly used in English
(?P<full_street>
{street_number}\,?\ ?
{street_name}?\,?\ ?
(?:(?<=[\ \,]){street_type})\,?\ ?
{post_direction}?\,?\ ?
{floor}?\,?\ ?
(?P<building_id>
{building}
)?\,?\ ?
(?P<occupancy>
{occupancy}
)?\,?\ ?
{po_box}?
)
)""".format(street_number=street_number,
street_number_b=street_number_b,
street_name=street_name,
street_name_b=street_name_b,
street_type=street_type,
street_type_b=street_type_b,
post_direction=post_direction,
post_direction_b=post_direction_b,
floor=floor,
building=building,
occupancy=occupancy,
po_box=po_box,
po_box_b=po_box_b,
po_box_positive_lookahead=po_box_positive_lookahead,
div='[\ ,]{1,2}',
)
# region1 here is actually a "province"
region1 = r"""
(?P<region1>
(?:
# province abbreviations (English)
A\.?B\.?|B\.?C\.?|M\.?B\.?|N\.?B\.?|N\.?L\.?|
N\.?T\.?|N\.?S\.?|N\.?U\.?|O\.?N\.?|P\.?E\.?|
Q\.?C\.?|S\.?K\.?|Y\.?T\.?
)
|
(?:
# provinces full (English)
[Aa][Ll][Bb][Ee][Rr][Tt][Aa]|
[Bb][Rr][Ii][Tt][Ii][Ss][Hh]\ [Cc][Oo][Ll][Uu][Mm][Bb][Ii][Aa]|
[Mm][Aa][Nn][Ii][Tt][Oo][Bb][Aa]|
[Nn][Ee][Ww]\ [Bb][Rr][Uu][Nn][Ss][Ww][Ii][Cc][Kk]|
[Nn][Ee][Ww][Ff][Oo][Uu][Nn][Dd][Ll][Aa][Nn][Dd]\
[Aa][Nn][Dd]\ [Ll][Aa][Bb][Rr][Aa][Dd][Oo][Rr]|
[Nn][Ee][Ww][Ff][Oo][Uu][Nn][Dd][Ll][Aa][Nn][Dd]\
\&\ [Ll][Aa][Bb][Rr][Aa][Dd][Oo][Rr]|
[Nn][Oo][Rr][Tt][Hh][Ww][Ee][Ss][Tt]\
[Tt][Ee][Rr][Rr][Ii][Tt][Oo][Rr][Ii][Ee][Ss]|
[Nn][Oo][Vv][Aa]\ [Ss][Cc][Oo][Tt][Ii][Aa]|
[Nn][Uu][Nn][Aa][Vv][Uu][Tt]|
[Oo][Nn][Tt][Aa][Rr][Ii][Oo]|
[Pp][Rr][Ii][Nn][Cc][Ee]\ [Ee][Dd][Ww][Aa][Rr][Dd]\
[Ii][Ss][Ll][Aa][Nn][Dd]|
[Qq][Uu][Ee][Bb][Ee][Cc]|
[Ss][Aa][Ss][Kk][Aa][Tt][Cc][Hh][Ee][Ww][Aa][Nn]|
[Yy][Uu][Kk][Oo][Nn]|
# provinces full (French)
[Cc][Oo][Ll][Oo][Mm][Bb][Ii][Ee]\-
[Bb][Rr][Ii][Tt][Aa][Nn]{1,2}[Ii][Qq][Eu][Ee]|
[Nn][Oo][Uu][Vv][Ee][Aa][Uu]\-[Bb][Rr][Uu][Nn][Ss][Ww][Ii][Cc][Kk]|
[Tt][Ee][Rr][Rr][Ee]\-[Nn][Ee][Uu][Vv][Ee]\-
[Ee][Tt]\-[Ll][Aa][Bb][Rr][Aa][Dd][Oo][Rr]|
[Tt][Ee][Rr][Rr][Ii][Tt][Oo][Ii][Rr][Ee][Ss]\ [Dd][Uu]\
[Nn][Oo][Rr][Dd]\-[Oo][Uu][Ee][Ss][Tt]|
[Nn][Oo][Uu][Vv][Ee][Ll][Ll][Ee]\-[ÉéEe][Cc][Oo][Ss][Ss][Ee]|
[ÎîIi][Ll][Ee]\-[Dd][Uu]\-[Pp][Rr][Ii][Nn][Cc][Ee]\-
[ÉéEe][Dd][Oo][Uu][Aa][Rr][Dd]|
[Qq][Uu][Éé][Bb][Ee][Cc]
)
)
"""
city = r"""
(?P<city>
(?<=[\, ])[A-z]{1}(?![0-9]) # city second char should not be number
[\w\ \-\'\.]{2,20}?(?=[\, ])
)
"""
postal_code = r"""
(?P<postal_code>
(?:
[ABCEGHJKLMNPRSTVXY]\d[ABCEGHJKLMNPRSTVWXYZ]\ ?
\d[ABCEGHJKLMNPRSTVWXYZ]\d
)
)
"""
country = r"""
(?:
[Cc][Aa][Nn][Aa][Dd][Aa]
)
"""
# define detection rules for postal code placed in different parts of address
postal_code_b = re.sub('<([a-z\_]+)>', r'<\1_b>', postal_code)
postal_code_c = re.sub('<([a-z\_]+)>', r'<\1_c>', postal_code)
full_address = r"""
(?P<full_address>
{full_street} {div}
{city} {div}
(?:{postal_code_c} {div})?
\(?{region1}[\)\.]? {div}
(?:
(?:
{postal_code}? {div} {country}?
(?:{div} {postal_code_b})?
)
)
)
""".format(
full_street=full_street,
div='[\, ]{,2}',
city=city,
region1=region1,
country=country,
country_b=country,
postal_code=postal_code,
postal_code_b=postal_code_b,
postal_code_c=postal_code_c,
)
| mit | -7,975,615,341,591,156,000 | 36.672474 | 102 | 0.32672 | false | 2.714537 | false | false | false |
CumulusNetworks/netshow-linux-lib | netshow/linux/show_neighbors.py | 2 | 2316 | # pylint: disable=E0611
""" Module for printout out linux device neighbors
"""
from netshow.linux.netjson_encoder import NetEncoder
from netshow.linux import print_iface
import netshowlib.linux.cache as linux_cache
from collections import OrderedDict
import json
from tabulate import tabulate
from netshow.linux.common import _, legend_wrapped_cli_output
class ShowNeighbors(object):
"""
Class responsible for printing out basic linux device neighbor info
"""
def __init__(self, cl):
self.use_json = cl.get('--json') or cl.get('-j')
self.ifacelist = OrderedDict()
self.cache = linux_cache
self.print_iface = print_iface
self.show_legend = False
if cl.get('-l') or cl.get('--legend'):
self.show_legend = True
def run(self):
"""
:return: basic neighbor information based on data obtained on netshow-lib
"""
feature_cache = self.cache.Cache()
feature_cache.run()
for _ifacename in sorted(feature_cache.lldp.keys()):
self.ifacelist[_ifacename] = self.print_iface.iface(_ifacename, feature_cache)
if self.use_json:
return json.dumps(self.ifacelist,
cls=NetEncoder, indent=4)
return self.print_neighbor_info()
def print_neighbor_info(self):
"""
:return: cli output of netshow neighbor
"""
_header = [_('local'), _('speed'), _('mode'), '',
_('remote'), _('sw/hostname'), _('summary')]
_table = []
for _iface in self.ifacelist.values():
_table.append([_iface.name, _iface.speed,
_iface.port_category,
'====',
_iface.iface.lldp[0].get('adj_port'),
_iface.iface.lldp[0].get('adj_hostname'),
', '.join(_iface.summary)])
del _iface.iface.lldp[0]
if _iface.iface.lldp:
for _entry in _iface.iface.lldp:
_table.append(['', '', '', '====',
_entry.get('adj_port'),
_entry.get('adj_hostname')])
return legend_wrapped_cli_output(tabulate(_table, _header), self.show_legend)
| gpl-2.0 | 7,353,999,060,615,984,000 | 36.354839 | 90 | 0.542314 | false | 4.203267 | false | false | false |
mrpindo/openshift-estore | tokoku/myauth/views.py | 1 | 4205 | # Create your views here.
from django.shortcuts import get_object_or_404, render_to_response, get_list_or_404
from django.template import RequestContext
from django.views.generic import TemplateView
from myauth.urls import *
"""
Workaround for migrating direct_to_template function to DirectTemplateView
"""
from django.views.generic import TemplateView
"""
Views for django-signup.
"""
from django.conf import settings
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template.loader import render_to_string
from django.core.mail import send_mail
from django.contrib.auth.models import User
from myauth.models import SignUpProfile, MyUser
from myauth.forms import SignUpForm, ActivateForm
import datetime
from django.contrib.sites.models import get_current_site
def index(request, template_name="myauth/index.html"):
page_title = 'Accounts page'
return render_to_response(template_name, locals(),context_instance=RequestContext(request))
def login(request, template_name="myauth/login.html"):
page_title = 'Login page'
return render_to_response(template_name, locals(),context_instance=RequestContext(request))
def logout(request, template_name="myauth/logout.html"):
page_title = 'Logout page'
return render_to_response(template_name, locals(),context_instance=RequestContext(request))
class profile(TemplateView):
template_name = "myauth/profile.html"
class DirectTemplateView(TemplateView):
extra_context = None
def get_context_data(self, **kwargs):
context = super(self.__class__, self).get_context_data(**kwargs)
if self.extra_context is not None:
for key, value in self.extra_context.items():
if callable(value):
context[key] = value()
else:
context[key] = value
return context
def _send_activation_email(profile):
# Render activation email
message = render_to_string('myauth/activation_email.txt',
{'signup_key': profile.signup_key,
'expiration_days': settings.SIGNUP_EXPIRY_DAYS,
'site': get_current_site})
subject = render_to_string('myauth/activation_email_subject.txt')
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
# Send activation email
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [profile.email,],
fail_silently=False)
def signup(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
instance = form.save()
# Generate and send activation email
_send_activation_email(instance)
return HttpResponseRedirect('/accounts/signup/checkyouremail')
else:
form = SignUpForm()
return render_to_response('myauth/signup_form.html', {'form': form},
context_instance=RequestContext(request))
def activate(request, signup_key):
# Try and get a sign up profile that matches activation key
# Redirect to failure page if no match
try:
profile = SignUpProfile.objects.get(signup_key=signup_key)
except:
return HttpResponseRedirect('/accounts/signup/key_invalid')
# Check if profile has expired
if profile.expiry_date > datetime.datetime.now():
#related with USE_TZ in settings.py!
if request.method == 'POST':
form = ActivateForm(request.POST)
if form.is_valid():
# Create a new User instance
user = MyUser(email=profile.email)
user.set_password(form.cleaned_data['password1'])
user.save()
# Delete the sign up profile
profile.delete()
return HttpResponseRedirect('/accounts/signup/success')
else:
form = ActivateForm()
else:
# Delete expired sign up profile and show invalid key page
profile.delete()
return HttpResponseRedirect('/accounts/signup/key_invalid')
return render_to_response('myauth/activate_form.html', {'form': form, 'user': profile.email},
context_instance=RequestContext(request))
| gpl-2.0 | 3,941,015,068,366,415,400 | 34.041667 | 99 | 0.678954 | false | 4.118511 | false | false | false |
metrasynth/solar-flares | sf/synth/sunsynth.py | 1 | 1406 | import logging
import rv.api
import sunvox
class SunSynth(object):
def __init__(self, filename, slot):
"""
:type slot: sunvox.Slot
"""
logging.debug('filename=%r', filename)
self.filename = filename
synth = rv.api.read_sunvox_file(filename)
self.project = rv.api.Project()
self.project.attach_module(synth.module)
self.module = synth.module
synth.module >> self.project.output
self.slot = slot
slot.load(self.project)
def process_midi(self, message):
if message.type == 'note_on' and message.velocity > 0:
note = sunvox.NOTECMD(message.note + 1)
logging.debug('Note on: %r', note)
logging.debug('Velocity: %r', message.velocity)
self.slot.send_event(
track_num=1,
note=note,
vel=message.velocity,
module=self.module,
ctl=0,
ctl_val=0,
)
elif message.type == 'note_off' or \
(message.type == 'note_on' and message.velocity == 0):
note = sunvox.NOTECMD(message.note)
self.slot.send_event(
track_num=1,
note=sunvox.NOTECMD.NOTE_OFF,
vel=0,
module=self.module,
ctl=0,
ctl_val=0,
)
| mit | -5,847,323,777,921,378,000 | 29.565217 | 70 | 0.507112 | false | 3.769437 | false | false | false |
shultzd/Euler | problem_31.py | 1 | 1613 | # In England the currency is made up of pound, £, and pence, p, and there are eight coins in general circulation:
#
# 1p, 2p, 5p, 10p, 20p, 50p, £1 (100p) and £2 (200p).
#
# It is possible to make £2 in the following way:
#
# 1×£1 + 1×50p + 2×20p + 1×5p + 1×2p + 3×1p
#
# How many different ways can £2 be made using any number of coins?
#
# ans = 73682
#########################################################
import time
#########################################################
num_coins = 8
NO_COIN = 100
coin_values = [1, 2, 5, 10, 20, 50, 100, 200] # values are in pence[p]
target_value = 200#200
#########################################################
def recursive_solve(coins, coin_id = NO_COIN):
num_options = 0
if coin_id != NO_COIN:
coins[coin_id] += 1
sum_coins = sum([a*b for a,b in zip(coins, coin_values)])
if sum_coins == target_value:
return 1
elif sum_coins > target_value:
return 0
if coin_id == NO_COIN:
starting_coin = 0
else:
starting_coin = coin_id
for coin_id in range(starting_coin, num_coins):
num_options += recursive_solve(coins[:], coin_id)
return num_options
#########################################################
def euler_problem_31():
print "Problem 31:"
ans = recursive_solve([0,0,0,0,0,0,0,0])
print "ans = ", ans
#########################################################
start_time = time.time()
euler_problem_31()
end_time = time.time()
print "total calculation time is ", (end_time - start_time), " [Sec]"
| cc0-1.0 | -1,476,017,489,725,330,000 | 26.603448 | 113 | 0.484697 | false | 3.208417 | false | false | false |
PuZZleDucK/pixelated-platform | test/features/page_objects/base_page_object.py | 2 | 4109 | #
# Copyright (c) 2015 ThoughtWorks, Inc.
#
# Pixelated is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pixelated is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Pixelated. If not, see <http://www.gnu.org/licenses/>.
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.by import By
class BasePageObject(object):
def __init__(self, context, timeout=10):
self.context = context
self.timeout = timeout
def _find_element_containing_text(self, text, element_type='*'):
return self._find_element_by_xpath("//%s[contains(.,'%s')]" % (element_type, text))
def _find_element_by_css_locator(self, locator, timeout=None, dom_context=None):
locator_tuple = (By.CSS_SELECTOR, locator)
self._wait_until(EC.visibility_of_element_located(locator_tuple), timeout or self.timeout)
context = dom_context or self.context.browser
return context.find_element_by_css_selector(locator)
def _find_element_by_id(self,id):
return self.context.browser.find_element_by_id(id)
def _find_elements_by_css_locator(self, locator, timeout=None):
locator_tuple = (By.CSS_SELECTOR, locator)
self._wait_until(EC.visibility_of_element_located(locator_tuple), timeout or self.timeout)
return self.context.browser.find_elements_by_css_selector(locator)
def _find_elements_by_xpath(self, xpath, timeout=None):
locator_tuple = (By.XPATH, xpath)
self._wait_until(EC.visibility_of_element_located(locator_tuple), timeout or self.timeout)
return self.context.browser.find_elements_by_xpath(xpath)
def _find_element_by_xpath(self, xpath, timeout=None, dom_context=None):
locator_tuple = (By.XPATH, xpath)
self._wait_until(EC.visibility_of_element_located(locator_tuple), timeout or self.timeout)
context = dom_context or self.context.browser
return self.context.browser.find_element_by_xpath(xpath)
def _wait_element_to_be_removed(self, locator, timeout=None):
locator_tuple = (By.CSS_SELECTOR, locator)
self._wait_until(EC.invisibility_of_element_located(locator_tuple), timeout or self.timeout)
def _wait_element_to_be_removed_by_xpath(self, xpath, timeout=None):
locator_tuple = (By.XPATH, xpath)
self._wait_until(EC.invisibility_of_element_located(locator_tuple), timeout or self.timeout)
def _wait_until(self, condition_function, timeout=None):
wait = WebDriverWait(self.context.browser, timeout or self.timeout)
wait.until(condition_function)
def wait_until_element_is_visible_by_css_locator(self, locator):
wait = WebDriverWait(self.context.browser, 60)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, locator)))
return self.context.browser.find_element_by_selector(locator)
def wait_until_elements_are_visible_by_css_locator(self, locator):
wait = WebDriverWait(self.context.browser, 60)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, locator)))
def wait_until_element_is_visible_by_locator(self, locator):
wait = WebDriverWait(self.context.browser, 60)
by, locator = locator
wait.until(EC.visibility_of_element_located((by, locator)))
return self.context.browser.find_element(by, locator)
def wait_until_element_is_invisible_by_locator(self, locator):
wait = WebDriverWait(self.context.browser, 60)
by, locator = locator
wait.until(EC.invisibility_of_element_located((by, locator)))
| agpl-3.0 | 2,943,140,441,133,139,500 | 46.229885 | 100 | 0.710879 | false | 3.665477 | false | false | false |
ChandraCXC/sherpa-samp | sherpa_samp/session.py | 1 | 14840 | #!/usr/bin/env python
#
# Copyright (C) 2011, 2015 Smithsonian Astrophysical Observatory
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import numpy
import sherpa.all
# Override Sherpa's feature that binds model identifiers as local objects
import sherpa.ui
import sherpa.ui.utils
sherpa.ui.utils._assign_obj_to_main = lambda name, obj: None
import sherpa.astro.all
import sherpa.astro.ui as sherpaUI
import logging
logger = logging.getLogger(__name__)
info = logger.info
from sherpa_samp.utils import encode_string, decode_string
__all__ = ("SherpaSession", "check_for_nans")
def check_for_nans(ui):
session = ui.session
stat = session.get_stat()
for ii in session.list_data_ids():
x, = session.get_indep(ii, filter=False)
y = session.get_dep(ii, filter=False)
err = session.get_staterror(ii, filter=False)
mask = numpy.isnan(x)
mask |= numpy.isnan(y)
# No need to filter NaNs in flux error column
# unless statistic actually uses errors
# Least-squares, cash, c-stat do *not* use errors
# chi-squared *does* use errors.
if not isinstance(stat, (sherpa.stats.LeastSq, sherpa.stats.CStat, sherpa.stats.Cash)):
mask |= numpy.isnan(err)
# When using chi-squared, also automatically filter out
# data points where the error bar is zero. The fit will
# proceed with usable errors.
mask |= err == 0
session.set_filter(ii, mask, ignore=True)
#
## Sherpa Session Object
#
class SherpaSession(object):
def __init__(self, msg_id=None, mtype=None):
session = sherpaUI.utils.Session()
session._add_model_types(sherpa.models.basic)
session._add_model_types(sherpa.models.template)
session._add_model_types(sherpa.astro.models)
session._add_model_types(sherpa.instrument,
baselist=(sherpa.models.Model,))
session._add_model_types(sherpa.astro.instrument)
session._add_model_types(sherpa.astro.optical)
#session._add_model_types(sherpa.astro.xspec,
# (sherpa.astro.xspec.XSAdditiveModel,
# sherpa.astro.xspec.XSMultiplicativeModel))
self.session = session
self.msg_id = msg_id
self.mtype = mtype
# max_rstat of 3 is unhelpful in SED fitting.
self.session.set_conf_opt("max_rstat", 1.e+38)
# compute 90% confidence limits by default
self.session.set_conf_opt("sigma", 1.6448536269514722)
def set_data(self, datamaps):
if not numpy.iterable(datamaps):
raise TypeError("datamaps is not iterable")
#keys = ["x", "y", "staterror", "syserror", "weights"]
keys = ["x", "y", "staterror", "syserror"]
for ii, data in enumerate(datamaps):
for key in keys:
if data.has_key(key):
data[key] = decode_string(data[key])
info('decoding' + key)
self.session.set_data(ii, sherpa.data.Data1D(**data))
d = self.session.get_data(ii)
numpy.set_printoptions(precision=4, threshold=6)
info("DataSet %i x: " % ii + numpy.array2string(d.x))
info("DataSet %i y: " % ii + numpy.array2string(d.y))
info("DataSet %i staterror: " % ii + numpy.array2string(d.staterror))
def set_model(self, modelmaps):
for ii, model in enumerate(modelmaps):
if model["name"].strip() == '':
raise TypeError("Model expression not found")
self.session.set_model(ii, model["name"])
info("Model: " + str(ii) + str(self.session.get_source(ii)))
def set_parameters(self, modelmaps, usermodels):
# If entries in usermodels dictionary, interpret them here
for model_info in usermodels:
# Check that model name is a string and can be split
if (type(model_info["name"]) == type("str")):
if (len(model_info["name"].split('.')) == 2):
model_type = None
model_name = None
try:
# The "name" is actually type.name
# eg. tablemodel.c1 so split it
model_type=model_info["name"].split('.')[0]
model_name=model_info["name"].split('.')[1]
if (model_type == "tablemodel"):
self.session.load_table_model(model_name,
model_info["file"].strip())
if (model_type == "template"):
# Template model fits can be done with continuous optimization methods in Sherpa >4.6
self.session.load_template_model(model_name,
model_info["file"].strip())
#self.session.set_method("gridsearch")
#tempmdl = self.session.get_model_component(model_name)
#self.session.set_method_opt("sequence", tempmdl.parvals)
if (model_type == "usermodel"):
# user_model_ref set by code in user model
# Python file
execfile(model_info["file"].strip())
# Get reference to user model function
func_ref = model_info["function"].strip()
func_ref = eval(func_ref)
self.session.load_user_model(func_ref,
model_name)
# Now, look in modelmaps for instance of user model
# That has a dictionary of parameters, so create
# user model parameters from entries in that dictionary
for ii, model in enumerate(modelmaps):
for component in model["parts"]:
if (model_info["name"] == component["name"]):
parnames = []
parvals = []
parmins = []
parmaxs = []
parfrozen = []
for pardict in component["pars"]:
parnames = parnames + [pardict["name"].split(".")[1].strip()]
parvals = parvals + [float(pardict["val"])]
parmins = parmins + [float(pardict["min"])]
parmaxs = parmaxs + [float(pardict["max"])]
parfrozen = parfrozen + [bool(int(pardict["frozen"]))]
self.session.add_user_pars(model_name,
parnames,
parvals,
parmins,
parmaxs,
None,
parfrozen)
break
# end of block to interpret user models
except Exception, e:
try:
if (model_name != None):
self.session.delete_model_component(model_name)
except:
raise e
# end of block to interpret custom models
# Now, update parameter values, and create *or* update
# model components, for all components listed in modelmaps.
# (If model was already created above, the code below merely
# updates parameter values.)
for ii, model in enumerate(modelmaps):
for component in model["parts"]:
if component["name"].strip() == '':
raise TypeError("Model expression not found")
mdl = self.session._eval_model_expression(component["name"])
for pardict in component["pars"]:
if pardict["name"].strip() == '':
raise TypeError("Model component name missing")
par = self.session.get_par(pardict["name"])
parname = pardict.pop("name").split(".")[1].strip()
# Specview sends parameter attributes as strings,
# convert to floats here.
#for attrname in ['val', 'min', 'max']:
for attrname in ['val']:
if pardict.has_key(attrname):
pardict[attrname] = float(pardict[attrname])
#pardict.pop('min', None)
#pardict.pop('max', None)
pardict.pop('alwaysfrozen', None)
attrname = 'frozen'
if pardict.has_key(attrname):
pardict[attrname] = bool(int(pardict[attrname]))
par.set(**pardict)
info('setting ' + parname + ' with ' + str(pardict))
info(str(mdl))
def set_stat(self, statmap):
self.session.set_stat(statmap["name"])
# FIXME: A kludge when Specview passes all zeros for staterror
# for NED SEDs.
# check for zeros in uncertainties when using leastsq
if statmap["name"] == "leastsq":
for ii in self.session.list_data_ids():
data = self.session.get_data(ii)
if(data.staterror is not None and
(True in (data.staterror <= 0.0))):
#data.staterror = numpy.ones_like(data.staterror)
data.staterror = numpy.ones_like(data.y)
info(statmap["name"] + ": " + self.session.get_stat_name())
def set_method(self, methodmap):
self.session.set_method(methodmap["name"])
info(methodmap["name"] + ": ")
configdict = methodmap.get("config", None)
if configdict is not None:
info(methodmap["name"] + ": " + str(methodmap["config"]))
for key in configdict:
if str(configdict[key]).startswith('INDEF'):
configdict[key] = None
self.session.set_method_opt(key, configdict[key])
info(str(self.session.get_method_opt()))
def set_confidence(self, confidencemap):
methodname = confidencemap["name"].strip().lower()
method_opt = getattr(self.session, 'set_%s_opt' % methodname)
info(confidencemap["name"] + ": ")
configdict = confidencemap.get("config", None)
if configdict is not None:
info(confidencemap["name"] + ": " + str(confidencemap["config"]))
for key in configdict:
if str(configdict[key]).startswith('INDEF'):
configdict[key] = None
val = None
try:
val = float(configdict[key])
except:
raise Exception("Sigma must be a valid floating-point value")
if numpy.isnan(val):
raise Exception("Sigma must be a valid floating-point value")
method_opt(key, val)
method_opt = getattr(self.session, 'get_%s_opt' % methodname)
info(str(method_opt()))
def get_confidence(self, confidencemap):
methodname = confidencemap["name"].strip().lower()
method = getattr(self.session, 'get_%s' % methodname)
return method()
def get_flux(self, fluxtype):
flux_func = getattr(self.session, 'calc_%s_flux' % fluxtype)
return flux_func
def run_confidence(self, confidencemap):
methodname = confidencemap["name"].strip().lower()
method = getattr(self.session, methodname)
method()
def get_confidence_results(self, confidencemap, confidence_results=None):
if confidence_results is None:
methodname = confidencemap["name"].strip().lower()
method_result = getattr(self.session, 'get_%s_results' % methodname)
confidence_results = method_result()
results = {}
results["sigma"] = repr(float(confidence_results.sigma))
results["percent"] = repr(float(confidence_results.percent))
results["parnames"] = list(confidence_results.parnames)
results["parvals"] = encode_string(confidence_results.parvals)
results["parmins"] = encode_string(confidence_results.parmins)
results["parmaxes"] = encode_string(confidence_results.parmaxes)
return results
def get_fit_results(self, fit_results=None):
if fit_results is None:
fit_results = self.session.get_fit_results()
results = {}
results["succeeded"] = str(int(bool(fit_results.succeeded)))
results["parvals"] = encode_string(fit_results.parvals)
results["parnames"] = list(fit_results.parnames)
results["statval"] = repr(float(fit_results.statval))
results["numpoints"] = str(int(fit_results.numpoints))
results["dof"] = repr(float(fit_results.dof))
results["qval"] = 'nan'
if fit_results.qval is not None:
results["qval"] = repr(float(fit_results.qval))
results["rstat"] = 'nan'
if fit_results.rstat is not None:
results["rstat"] = repr(float(fit_results.rstat))
results["nfev"] = str(int(fit_results.nfev))
return results
| gpl-2.0 | -1,138,305,260,855,621,000 | 42.014493 | 113 | 0.51752 | false | 4.393132 | true | false | false |
cyberark-bizdev/ansible | lib/ansible/modules/network/fortimanager/fmgr_script.py | 44 | 8309 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_script
version_added: "2.5"
author: Andrew Welsh
short_description: Add/Edit/Delete and execute scripts
description: Create/edit/delete scripts and execute the scripts on the FortiManager using jsonrpc API
options:
adom:
description:
- The administrative domain (admon) the configuration belongs to
required: true
vdom:
description:
- The virtual domain (vdom) the configuration belongs to
host:
description:
- The FortiManager's Address.
required: true
username:
description:
- The username to log into the FortiManager
required: true
password:
description:
- The password associated with the username account.
required: false
state:
description:
- The desired state of the specified object.
- present - will create a script.
- execute - execute the scipt.
- delete - delete the script.
required: false
default: present
choices: ["present", "execute", "delete"]
script_name:
description:
- The name of the script.
required: True
script_type:
description:
- The type of script (CLI or TCL).
required: false
script_target:
description:
- The target of the script to be run.
required: false
script_description:
description:
- The description of the script.
required: false
script_content:
description:
- The script content that will be executed.
required: false
script_scope:
description:
- (datasource) The devices that the script will run on, can have both device member and device group member.
required: false
script_package:
description:
- (datasource) Policy package object to run the script against
required: false
'''
EXAMPLES = '''
- name: CREATE SCRIPT
fmgr_script:
host: "{{inventory_hostname}}"
username: "{{ username }}"
password: "{{ password }}"
adom: "root"
script_name: "TestScript"
script_type: "cli"
script_target: "remote_device"
script_description: "Create by Ansible"
script_content: "get system status"
- name: EXECUTE SCRIPT
fmgr_script:
host: "{{inventory_hostname}}"
username: "{{ username }}"
password: "{{ password }}"
adom: "root"
script_name: "TestScript"
state: "execute"
script_scope: "FGT1,FGT2"
- name: DELETE SCRIPT
fmgr_script:
host: "{{inventory_hostname}}"
username: "{{ username }}"
password: "{{ password }}"
adom: "root"
script_name: "TestScript"
state: "delete"
'''
RETURN = """
api_result:
description: full API response, includes status code and message
returned: always
type: string
"""
from ansible.module_utils.basic import AnsibleModule, env_fallback
from ansible.module_utils.network.fortimanager.fortimanager import AnsibleFortiManager
# check for pyFMG lib
try:
from pyFMG.fortimgr import FortiManager
HAS_PYFMGR = True
except ImportError:
HAS_PYFMGR = False
def set_script(fmg, script_name, script_type, script_content, script_desc, script_target, adom):
"""
This method sets a script.
"""
datagram = {
'content': script_content,
'desc': script_desc,
'name': script_name,
'target': script_target,
'type': script_type,
}
url = '/dvmdb/adom/{adom}/script/'.format(adom=adom)
response = fmg.set(url, datagram)
return response
def delete_script(fmg, script_name, adom):
"""
This method deletes a script.
"""
datagram = {
'name': script_name,
}
url = '/dvmdb/adom/{adom}/script/{script_name}'.format(adom=adom, script_name=script_name)
response = fmg.delete(url, datagram)
return response
def execute_script(fmg, script_name, scope, package, adom, vdom):
"""
This method will execute a specific script.
"""
scope_list = list()
scope = scope.replace(' ', '')
scope = scope.split(',')
for dev_name in scope:
scope_list.append({'name': dev_name, 'vdom': vdom})
datagram = {
'adom': adom,
'script': script_name,
'package': package,
'scope': scope_list,
}
url = '/dvmdb/adom/{adom}/script/execute'.format(adom=adom)
response = fmg.execute(url, datagram)
return response
def main():
argument_spec = dict(
adom=dict(required=False, type="str"),
vdom=dict(required=False, type="str"),
host=dict(required=True, type="str"),
password=dict(fallback=(env_fallback, ["ANSIBLE_NET_PASSWORD"]), no_log=True),
username=dict(fallback=(env_fallback, ["ANSIBLE_NET_USERNAME"])),
state=dict(choices=["execute", "delete", "present"], type="str"),
script_name=dict(required=True, type="str"),
script_type=dict(required=False, type="str"),
script_target=dict(required=False, type="str"),
script_description=dict(required=False, type="str"),
script_content=dict(required=False, type="str"),
script_scope=dict(required=False, type="str"),
script_package=dict(required=False, type="str"),
)
module = AnsibleModule(argument_spec, supports_check_mode=True,)
# check if params are set
if module.params["host"] is None or module.params["username"] is None:
module.fail_json(msg="Host and username are required for connection")
# check if login failed
fmg = AnsibleFortiManager(module, module.params["host"], module.params["username"], module.params["password"])
response = fmg.login()
if "FortiManager instance connnected" not in str(response):
module.fail_json(msg="Connection to FortiManager Failed")
else:
adom = module.params["adom"]
if adom is None:
adom = "root"
vdom = module.params["vdom"]
if vdom is None:
vdom = "root"
state = module.params["state"]
if state is None:
state = "present"
script_name = module.params["script_name"]
script_type = module.params["script_type"]
script_target = module.params["script_target"]
script_description = module.params["script_description"]
script_content = module.params["script_content"]
script_scope = module.params["script_scope"]
script_package = module.params["script_package"]
# if state is present (default), then add the script
if state == "present":
results = set_script(fmg, script_name, script_type, script_content, script_description, script_target, adom)
if not results[0] == 0:
if isinstance(results[1], list):
module.fail_json(msg="Adding Script Failed", **results)
else:
module.fail_json(msg="Adding Script Failed")
elif state == "execute":
results = execute_script(fmg, script_name, script_scope, script_package, adom, vdom)
if not results[0] == 0:
module.fail_json(msg="Script Execution Failed", **results)
elif state == "delete":
results = delete_script(fmg, script_name, adom)
if not results[0] == 0:
module.fail_json(msg="Script Deletion Failed", **results)
fmg.logout()
# results is returned as a tuple
return module.exit_json(**results[1])
if __name__ == "__main__":
main()
| gpl-3.0 | -4,774,850,824,318,835,000 | 29.435897 | 120 | 0.636779 | false | 3.91934 | false | false | false |
yephper/django | django/bin/boot/boot/settings.py | 1 | 3304 | """
Django settings for boot project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')hlj1!*+&cv!9(sg3xfxtip($_c#pf4imb=_aw=stbc$n0lzn+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'news',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'boot.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR , 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'boot.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'pytest',
'USER': 'root',
'PASSWORD': 'azdingdb123',
'HOST':'localhost',
'PORT':'3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/'
STATIC_PATH= os.path.join(os.path.dirname(__file__), '../static').replace('\\','/')
| bsd-3-clause | -5,766,888,668,676,380,000 | 24.612403 | 91 | 0.674637 | false | 3.579632 | false | false | false |
hsavolai/vmlab | src/kiwi/ui/delegates.py | 3 | 7563 | #
# Kiwi: a Framework and Enhanced Widgets for Python
#
# Copyright (C) 2002, 2003 Async Open Source
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
# Author(s): Christian Reis <[email protected]>
# Lorenzo Gil Sanchez <[email protected]>
# Johan Dahlin <[email protected]>
#
"""Defines the Delegate classes that are included in the Kiwi Framework."""
from kiwi.ui.views import SlaveView, BaseView
from kiwi.controllers import BaseController
from kiwi.python import deprecationwarn
class Delegate(BaseView, BaseController):
"""A class that combines view and controller functionality into a
single package. The Delegate class possesses a top-level window.
"""
def __init__(self, toplevel=None, widgets=(), gladefile=None,
toplevel_name=None,
delete_handler=None, keyactions=None):
"""Creates a new Delegate.
The keyactions parameter is sent to L{kiwi.controllers.BaseController},
the rest are sent to L{kiwi.ui.views.BaseView}
"""
if gladefile:
deprecationwarn(
'gladefile is deprecated in Delegate, '
'use GladeDelegate instead',
stacklevel=3)
BaseView.__init__(self,
toplevel=toplevel,
widgets=widgets,
gladefile=gladefile,
toplevel_name=toplevel_name,
delete_handler=delete_handler)
BaseController.__init__(self, view=self, keyactions=keyactions)
class GladeDelegate(BaseView, BaseController):
"""A class that combines view and controller functionality into a
single package. The Delegate class possesses a top-level window.
"""
def __init__(self, gladefile=None, toplevel_name=None, domain=None,
delete_handler=None, keyactions=None):
"""Creates a new GladeDelegate.
The keyactions parameter is sent to L{kiwi.controllers.BaseController},
the rest are sent to L{kiwi.ui.views.BaseView}
"""
BaseView.__init__(self,
gladefile=gladefile,
toplevel_name=toplevel_name,
domain=domain,
delete_handler=delete_handler)
BaseController.__init__(self, view=self, keyactions=keyactions)
class SlaveDelegate(SlaveView, BaseController):
"""A class that combines view and controller functionality into a
single package. It does not possess a top-level window, but is instead
intended to be plugged in to a View or Delegate using attach_slave().
"""
def __init__(self, toplevel=None, widgets=(), gladefile=None,
toplevel_name=None, keyactions=None):
"""
The keyactions parameter is sent to L{kiwi.controllers.BaseController},
the rest are sent to L{kiwi.ui.views.SlaveView}
"""
if gladefile:
deprecationwarn(
'gladefile is deprecated in Delegate, '
'use GladeSlaveDelegate instead',
stacklevel=3)
SlaveView.__init__(self, toplevel, widgets, gladefile,
toplevel_name)
BaseController.__init__(self, view=self, keyactions=keyactions)
class GladeSlaveDelegate(SlaveView, BaseController):
"""A class that combines view and controller functionality into a
single package. It does not possess a top-level window, but is instead
intended to be plugged in to a View or Delegate using attach_slave().
"""
def __init__(self, gladefile=None,
toplevel_name=None, domain=None,
keyactions=None):
"""
The keyactions parameter is sent to L{kiwi.controllers.BaseController},
the rest are sent to L{kiwi.ui.views.SlavseView}
"""
SlaveView.__init__(self,
gladefile=gladefile,
toplevel_name=toplevel_name,
domain=domain)
BaseController.__init__(self, view=self, keyactions=keyactions)
class ProxyDelegate(Delegate):
"""A class that combines view, controller and proxy functionality into a
single package. The Delegate class possesses a top-level window.
@ivar model: the model
@ivar proxy: the proxy
"""
def __init__(self, model, proxy_widgets=None, gladefile=None,
toplevel=None, widgets=(),
toplevel_name=None, domain=None, delete_handler=None,
keyactions=None):
"""Creates a new Delegate.
@param model: instance to be attached
@param proxy_widgets:
The keyactions parameter is sent to L{kiwi.controllers.BaseController},
the rest are sent to L{kiwi.ui.views.BaseView}
"""
BaseView.__init__(self, toplevel, widgets, gladefile,
toplevel_name, domain,
delete_handler)
self.model = model
self.proxy = self.add_proxy(model, proxy_widgets)
self.proxy.proxy_updated = self.proxy_updated
BaseController.__init__(self, view=self, keyactions=keyactions)
def set_model(self, model):
"""
Set model.
@param model:
"""
self.proxy.set_model(model)
self.model = model
def proxy_updated(self, widget, attribute, value):
# Can be overriden in subclasses
pass
def update(self, attribute):
self.proxy.update(attribute)
class ProxySlaveDelegate(GladeSlaveDelegate):
"""A class that combines view, controller and proxy functionality into a
single package. It does not possess a top-level window, but is instead
intended to be plugged in to a View or Delegate using attach_slave()
@ivar model: the model
@ivar proxy: the proxy
"""
def __init__(self, model, proxy_widgets=None, gladefile=None,
toplevel_name=None, domain=None, keyactions=None):
"""Creates a new Delegate.
@param model: instance to be attached
@param proxy_widgets:
The keyactions parameter is sent to L{kiwi.controllers.BaseController},
the rest are sent to L{kiwi.ui.views.BaseView}
"""
GladeSlaveDelegate.__init__(self, gladefile, toplevel_name,
domain, keyactions)
self.model = model
self.proxy = self.add_proxy(model, proxy_widgets)
self.proxy.proxy_updated = self.proxy_updated
def set_model(self, model):
"""
Set model.
@param model:
"""
self.proxy.set_model(model)
self.model = model
def proxy_updated(self, widget, attribute, value):
# Can be overriden in subclasses
pass
def update(self, attribute):
self.proxy.update(attribute)
| gpl-3.0 | 7,819,308,312,624,857,000 | 37.784615 | 79 | 0.625942 | false | 4.187708 | false | false | false |
AA33/demo-project | travel_log/travel_log/urls.py | 1 | 1318 | __author__ = 'abhishekanurag'
from django.conf.urls import patterns, url
from django.views.generic.base import TemplateView
from travel_log import views
urlpatterns = patterns('',
# /travel_log
url(r'^$', views.index, name='index'),
# /travel_log/userlogin
url(r'^userlogin/$', views.userlogin, name='userlogin'),
# /travel_log/userlogout
url(r'^userlogout/$', views.userlogout, name='userlogout'),
# /travel_log/signup
url(r'^signup/$', views.signup, name='signup'),
# /travel_log/home
url(r'^home/$', views.home, name='home'),
# /travel_log/<trip_id>/view
url(r'^(?P<trip_id>\d+)/view/$', views.trip_view, name='view'),
# /travel_log/edit : For new trips
url(r'^edit/$', views.trip_edit, name='edit'),
# /travel_log/<trip_id>/edit
url(r'^(?P<trip_id>\d+)/edit/$', views.trip_edit, name='edit'),
# /travel_log/<trip_id>/delete
url(r'^(?P<trip_id>\d+)/delete/$', views.trip_delete, name='delete'),
)
| gpl-3.0 | 863,316,377,841,898,600 | 46.071429 | 92 | 0.460546 | false | 4.055385 | false | false | false |
engineIngmar/SCAPPi | APLog.py | 1 | 4257 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""APLog.py
logging script, taking snapshots of the system
dependencies:
sensor and actuator modules
"""
from sensors import DHT22, D18B20, HCSR04, LFM, BH1750
from dbControl import dbLog
import sqlite3 as lite
import datetime
from datetime import timedelta
import time
import sys, traceback
def main():
#set database name
dbName='APDatabase.sqlite'
dbLog.softwareLog(dbName,'APLog.py','script started')
time.sleep(1)
"""get options list per sensor type
Options_sensor table:
id type gpio gpio_trig gpio_echo w1_id bed_cm increment_sec i2c_id
"""
DHT22List = DHT22.getSensorList(dbName)
D18B20List = D18B20.getSensorList(dbName)
HCSR04List = HCSR04.getSensorList(dbName)
LFMList = LFM.getSensorList(dbName)
BH1750List = BH1750.getSensorList(dbName)
dbLog.softwareLog(dbName,'APLog.py','sensors imported (DHT22: %s, D18B20: %s, HCSR04: %s, LFM: %s, BH1750: %s)' % (len(DHT22List),len(D18B20List),len(HCSR04List),len(LFMList),len(BH1750List)))
time.sleep(1)
#set intitial log time
logTime = datetime.datetime.now()
"""read snapshot increment time in min from database"""
try:
db = lite.connect(dbName)
cursor = db.cursor()
cursor.execute("SELECT VALUE FROM Opt_general WHERE PARAMETER='snapshotIncrement'")
snapInc = int(cursor.fetchone()[0])
except Exception as e:
cursor.execute("INSERT INTO Log_software(MODULE,MESSAGE) VALUES('APLog.py Opt_general',?)",[repr(e)])
db.commit()
finally:
db.close()
"""start endless logging loop and wait for correct insert in database"""
dbLog.softwareLog(dbName,'APLog.py','start endless loop...')
time.sleep(1)
try:
while True:
try:
if logTime < datetime.datetime.now():
#read and log all D18B20 sensors
for row in D18B20List:
id = row[0]
temp = D18B20.readTemp(dbName,row[5])
D18B20.logSensorValue(dbName,id,round(temp,2))
#read and log all DHT22 sensors
for row in DHT22List:
id = row[0]
temp = DHT22.readTempHum(dbName,row[2])[0]
hum = DHT22.readTempHum(dbName,row[2])[1]
if (hum != None) and (temp != None):
DHT22.logSensorValue(dbName,id,round(temp,2),round(hum,2))
#read and log all HCSR04 sensors
for row in HCSR04List:
id = row[0]
level = HCSR04.readLevel(dbName,row[3],row[4],row[6])
HCSR04.logSensorValue(dbName,id,round(level,2))
#read and log all LFM sensors
for row in LFMList:
id = row[0]
flow = LFM.readFlow(dbName,row[2])
LFM.logSensorValue(dbName,id,round(flow,2))
"""read and log all BH1750 sensors"""
for row in BH1750List:
id = row[0]
light = BH1750.readLight(dbName,row[8])
BH1750.logSensorValue(dbName,id,round(light,2))
#set new logging time with general increment
logTime = logTime + timedelta(minutes=snapInc)
except TypeError as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
err = repr(traceback.format_exception(exc_type, exc_value, exc_traceback))
dbLog.softwareLog(dbName,'APLog.py','sensor error: ' + str(err))
"""pause to limit the cpu usage"""
time.sleep(0.1)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
err = repr(traceback.format_exception(exc_type, exc_value, exc_traceback))
dbLog.softwareLog(dbName,'APLog.py','critical error: ' + str(err))
if __name__ == '__main__':
main() | mit | 7,222,331,452,860,314,000 | 38.794393 | 196 | 0.548273 | false | 3.695313 | false | false | false |
shosca/django-rest-witchcraft | docs/conf.py | 1 | 5460 | #!/usr/bin/env python3
#
# Django REST Witchcraft documentation build configuration file, created by
# sphinx-quickstart on Sat Jun 10 09:20:17 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import sphinx_rtd_theme
import django
import django.test.utils
from django.conf import settings
sys.path.insert(0, os.path.abspath(".."))
sys.path.insert(0, os.path.abspath("."))
settings.configure()
getattr(django, "setup", bool)()
django.test.utils.setup_test_environment()
here = os.path.abspath(os.path.dirname(__file__))
about = {}
with open(os.path.join(here, "..", "rest_witchcraft", "__version__.py")) as f:
exec(f.read(), about) # yapf: disable
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode", "sphinx.ext.napoleon", "sphinx.ext.coverage"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Django REST Witchcraft"
copyright = "2017, Serkan Hosca"
author = "Serkan Hosca"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = about["__version__"]
# The full version, including alpha/beta/rc tags.
release = about["__version__"]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "DjangoRESTWitchcraftdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "DjangoRESTWitchcraft.tex", "Django REST Witchcraft Documentation", "Serkan Hosca", "manual")
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "djangorestwitchcraft", "Django REST Witchcraft Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"DjangoRESTWitchcraft",
"Django REST Witchcraft Documentation",
author,
"DjangoRESTWitchcraft",
"One line description of project.",
"Miscellaneous",
)
]
| mit | -7,051,495,529,688,536,000 | 31.694611 | 110 | 0.681319 | false | 3.84778 | true | false | false |
CalebBell/fluids | fluids/optional/pychebfun.py | 1 | 26906 | # -*- coding: utf-8 -*-
"""
Chebfun module
==============
Vendorized version from:
https://github.com/pychebfun/pychebfun/blob/master/pychebfun
The rational for not including this library as a strict dependency is that
it has not been released.
.. moduleauthor :: Chris Swierczewski <[email protected]>
.. moduleauthor :: Olivier Verdier <[email protected]>
.. moduleauthor :: Gregory Potter <[email protected]>
The copyright notice (BSD-3 clause) is as follows:
Copyright 2017 Olivier Verdier
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import division
import operator
from functools import wraps
import numpy as np
import numpy.polynomial as poly
from numpy.polynomial.chebyshev import cheb2poly, Chebyshev
from numpy.polynomial.polynomial import Polynomial
import sys
emach = sys.float_info.epsilon # machine epsilon
global sp_fftpack_ifft
sp_fftpack_ifft = None
def fftpack_ifft(*args, **kwargs):
global sp_fftpack_ifft
if sp_fftpack_ifft is None:
from scipy.fftpack import ifft as sp_fftpack_ifft
return sp_fftpack_ifft(*args, **kwargs)
global sp_fftpack_fft
sp_fftpack_fft = None
def fftpack_fft(*args, **kwargs):
global sp_fftpack_fft
if sp_fftpack_fft is None:
from scipy.fftpack import fft as sp_fftpack_fft
return sp_fftpack_fft(*args, **kwargs)
global sp_eigvals
sp_eigvals = None
def eigvals(*args, **kwargs):
global sp_eigvals
if sp_eigvals is None:
from scipy.linalg import eigvals as sp_eigvals
return sp_eigvals(*args, **kwargs)
global sp_toeplitz
sp_toeplitz = None
def toeplitz(*args, **kwargs):
global sp_toeplitz
if sp_toeplitz is None:
from scipy.linalg import toeplitz as sp_toeplitz
return sp_toeplitz(*args, **kwargs)
def build_pychebfun(f, domain, N=15):
fvec = lambda xs: [f(xi) for xi in xs]
return chebfun(f=fvec, domain=domain, N=N)
def build_solve_pychebfun(f, goal, domain, N=15, N_max=100, find_roots=2):
cache = {}
def cached_fun(x):
# Almost half the points are cached!
if x in cache:
return cache[x]
val = f(x)
cache[x] = val
return val
fun = build_pychebfun(cached_fun, domain, N=N)
roots = (fun - goal).roots()
while (len(roots) < find_roots and len(fun._values) < N_max):
N *= 2
fun = build_pychebfun(cached_fun, domain, N=N)
roots = (fun - goal).roots()
roots = [i for i in roots if domain[0] < i < domain[1]]
return roots, fun
def chebfun_to_poly(coeffs_or_fun, domain=None, text=False):
if isinstance(coeffs_or_fun, Chebfun):
coeffs = coeffs_or_fun.coefficients()
domain = coeffs_or_fun._domain
elif hasattr(coeffs_or_fun, '__class__') and coeffs_or_fun.__class__.__name__ == 'ChebyshevExpansion':
coeffs = coeffs_or_fun.coef()
domain = coeffs_or_fun.xmin(), coeffs_or_fun.xmax()
else:
coeffs = coeffs_or_fun
low, high = domain
# Reverse the coefficients, and use cheb2poly to make it in the polynomial domain
poly_coeffs = cheb2poly(coeffs)[::-1].tolist()
if not text:
return poly_coeffs
s = 'coeffs = %s\n' %poly_coeffs
delta = high - low
delta_sum = high + low
# Generate the expression
s += 'horner(coeffs, %.18g*(x - %.18g))' %(2.0/delta, 0.5*delta_sum)
# return the string
return s
def cheb_to_poly(coeffs_or_fun, domain=None):
"""Just call horner on the outputs!"""
from fluids.numerics import horner as horner_poly
if isinstance(coeffs_or_fun, Chebfun):
coeffs = coeffs_or_fun.coefficients()
domain = coeffs_or_fun._domain
elif hasattr(coeffs_or_fun, '__class__') and coeffs_or_fun.__class__.__name__ == 'ChebyshevExpansion':
coeffs = coeffs_or_fun.coef()
domain = coeffs_or_fun.xmin(), coeffs_or_fun.xmax()
else:
coeffs = coeffs_or_fun
low, high = domain
coeffs = cheb2poly(coeffs)[::-1].tolist() # Convert to polynomial basis
# Mix in limits to make it a normal polynomial
my_poly = Polynomial([-0.5*(high + low)*2.0/(high - low), 2.0/(high - low)])
poly_coeffs = horner_poly(coeffs, my_poly).coef[::-1].tolist()
return poly_coeffs
def cheb_range_simplifier(low, high, text=False):
'''
>>> low, high = 0.0023046250851646434, 4.7088985707840125
>>> cheb_range_simplifier(low, high, text=True)
'chebval(0.42493574399544564724*(x + -2.3556015979345885647), coeffs)'
'''
constant = 0.5*(-low-high)
factor = 2.0/(high-low)
if text:
return 'chebval(%.20g*(x + %.20g), coeffs)' %(factor, constant)
return constant, factor
def cast_scalar(method):
"""Cast scalars to constant interpolating objects."""
@wraps(method)
def new_method(self, other):
if np.isscalar(other):
other = type(self)([other],self.domain())
return method(self, other)
return new_method
class Polyfun(object):
"""Construct a Lagrange interpolating polynomial over arbitrary points.
Polyfun objects consist in essence of two components: 1) An interpolant
on [-1,1], 2) A domain attribute [a,b]. These two pieces of information
are used to define and subsequently keep track of operations upon Chebyshev
interpolants defined on an arbitrary real interval [a,b].
"""
# ----------------------------------------------------------------
# Initialisation methods
# ----------------------------------------------------------------
class NoConvergence(Exception):
"""Raised when dichotomy does not converge."""
class DomainMismatch(Exception):
"""Raised when there is an interval mismatch."""
@classmethod
def from_data(self, data, domain=None):
"""Initialise from interpolation values."""
return self(data,domain)
@classmethod
def from_fun(self, other):
"""Initialise from another instance."""
return self(other.values(),other.domain())
@classmethod
def from_coeff(self, chebcoeff, domain=None, prune=True, vscale=1.):
"""
Initialise from provided coefficients
prune: Whether to prune the negligible coefficients
vscale: the scale to use when pruning
"""
coeffs = np.asarray(chebcoeff)
if prune:
N = self._cutoff(coeffs, vscale)
pruned_coeffs = coeffs[:N]
else:
pruned_coeffs = coeffs
values = self.polyval(pruned_coeffs)
return self(values, domain, vscale)
@classmethod
def dichotomy(self, f, kmin=2, kmax=12, raise_no_convergence=True,):
"""Compute the coefficients for a function f by dichotomy.
kmin, kmax: log2 of number of interpolation points to try
raise_no_convergence: whether to raise an exception if the dichotomy does not converge
"""
for k in range(kmin, kmax):
N = pow(2, k)
sampled = self.sample_function(f, N)
coeffs = self.polyfit(sampled)
# 3) Check for negligible coefficients
# If within bound: get negligible coeffs and bread
bnd = self._threshold(np.max(np.abs(coeffs)))
last = abs(coeffs[-2:])
if np.all(last <= bnd):
break
else:
if raise_no_convergence:
raise self.NoConvergence(last, bnd)
return coeffs
@classmethod
def from_function(self, f, domain=None, N=None):
"""Initialise from a function to sample.
N: optional parameter which indicates the range of the dichotomy
"""
# rescale f to the unit domain
domain = self.get_default_domain(domain)
a,b = domain[0], domain[-1]
map_ui_ab = lambda t: 0.5*(b-a)*t + 0.5*(a+b)
args = {'f': lambda t: f(map_ui_ab(t))}
if N is not None: # N is provided
nextpow2 = int(np.log2(N))+1
args['kmin'] = nextpow2
args['kmax'] = nextpow2+1
args['raise_no_convergence'] = False
else:
args['raise_no_convergence'] = True
# Find out the right number of coefficients to keep
coeffs = self.dichotomy(**args)
return self.from_coeff(coeffs, domain)
@classmethod
def _threshold(self, vscale):
"""Compute the threshold at which coefficients are trimmed."""
bnd = 128*emach*vscale
return bnd
@classmethod
def _cutoff(self, coeffs, vscale):
"""Compute cutoff index after which the coefficients are deemed
negligible."""
bnd = self._threshold(vscale)
inds = np.nonzero(abs(coeffs) >= bnd)
if len(inds[0]):
N = inds[0][-1]
else:
N = 0
return N+1
def __init__(self, values=0., domain=None, vscale=None):
"""Init an object from values at interpolation points.
values: Interpolation values
vscale: The actual vscale; computed automatically if not given
"""
avalues = np.asarray(values,)
avalues1 = np.atleast_1d(avalues)
N = len(avalues1)
points = self.interpolation_points(N)
self._values = avalues1
if vscale is not None:
self._vscale = vscale
else:
self._vscale = np.max(np.abs(self._values))
self.p = self.interpolator(points, avalues1)
domain = self.get_default_domain(domain)
self._domain = np.array(domain)
a,b = domain[0], domain[-1]
# maps from [-1,1] <-> [a,b]
self._ab_to_ui = lambda x: (2.0*x-a-b)/(b-a)
self._ui_to_ab = lambda t: 0.5*(b-a)*t + 0.5*(a+b)
def same_domain(self, fun2):
"""Returns True if the domains of two objects are the same."""
return np.allclose(self.domain(), fun2.domain(), rtol=1e-14, atol=1e-14)
# ----------------------------------------------------------------
# String representations
# ----------------------------------------------------------------
def __repr__(self):
"""Display method."""
a, b = self.domain()
vals = self.values()
return (
'%s \n '
' domain length endpoint values\n '
' [%5.1f, %5.1f] %5d %5.2f %5.2f\n '
'vscale = %1.2e') % (
str(type(self)).split('.')[-1].split('>')[0][:-1],
a,b,self.size(),vals[-1],vals[0],self._vscale,)
def __str__(self):
return "<{0}({1})>".format(
str(type(self)).split('.')[-1].split('>')[0][:-1],self.size(),)
# ----------------------------------------------------------------
# Basic Operator Overloads
# ----------------------------------------------------------------
def __call__(self, x):
return self.p(self._ab_to_ui(x))
def __getitem__(self, s):
"""Components s of the fun."""
return self.from_data(self.values().T[s].T)
def __bool__(self):
"""Test for difference from zero (up to tolerance)"""
return not np.allclose(self.values(), 0)
__nonzero__ = __bool__
def __eq__(self, other):
return not(self - other)
def __ne__(self, other):
return not (self == other)
@cast_scalar
def __add__(self, other):
"""Addition."""
if not self.same_domain(other):
raise self.DomainMismatch(self.domain(),other.domain())
ps = [self, other]
# length difference
diff = other.size() - self.size()
# determine which of self/other is the smaller/bigger
big = diff > 0
small = not big
# pad the coefficients of the small one with zeros
small_coeffs = ps[small].coefficients()
big_coeffs = ps[big].coefficients()
padded = np.zeros_like(big_coeffs)
padded[:len(small_coeffs)] = small_coeffs
# add the values and create a new object with them
chebsum = big_coeffs + padded
new_vscale = np.max([self._vscale, other._vscale])
return self.from_coeff(
chebsum, domain=self.domain(), vscale=new_vscale
)
__radd__ = __add__
@cast_scalar
def __sub__(self, other):
"""Subtraction."""
return self + (-other)
def __rsub__(self, other):
return -(self - other)
def __rmul__(self, other):
return self.__mul__(other)
def __rtruediv__(self, other):
return self.__rdiv__(other)
def __neg__(self):
"""Negation."""
return self.from_data(-self.values(),domain=self.domain())
def __abs__(self):
return self.from_function(lambda x: abs(self(x)),domain=self.domain())
# ----------------------------------------------------------------
# Attributes
# ----------------------------------------------------------------
def size(self):
return self.p.n
def coefficients(self):
return self.polyfit(self.values())
def values(self):
return self._values
def domain(self):
return self._domain
# ----------------------------------------------------------------
# Integration and differentiation
# ----------------------------------------------------------------
def integrate(self):
raise NotImplementedError()
def differentiate(self):
raise NotImplementedError()
def dot(self, other):
r"""Return the Hilbert scalar product :math:`\int f.g`."""
prod = self * other
return prod.sum()
def norm(self):
"""
Return: square root of scalar product with itself.
"""
norm = np.sqrt(self.dot(self))
return norm
# ----------------------------------------------------------------
# Miscellaneous operations
# ----------------------------------------------------------------
def restrict(self,subinterval):
"""Return a Polyfun that matches self on subinterval."""
if (subinterval[0] < self._domain[0]) or (subinterval[1] > self._domain[1]):
raise ValueError("Can only restrict to subinterval")
return self.from_function(self, subinterval)
# ----------------------------------------------------------------
# Class method aliases
# ----------------------------------------------------------------
diff = differentiate
cumsum = integrate
class Chebfun(Polyfun):
"""Eventually set this up so that a Chebfun is a collection of Chebfuns.
This will enable piecewise smooth representations al la Matlab Chebfun v2.0.
"""
# ----------------------------------------------------------------
# Standard construction class methods.
# ----------------------------------------------------------------
@classmethod
def get_default_domain(self, domain=None):
if domain is None:
return [-1., 1.]
else:
return domain
@classmethod
def identity(self, domain=[-1., 1.]):
"""The identity function x -> x."""
return self.from_data([domain[1],domain[0]], domain)
@classmethod
def basis(self, n):
"""Chebyshev basis functions T_n."""
if n == 0:
return self(np.array([1.]))
vals = np.ones(n+1)
vals[1::2] = -1
return self(vals)
# ----------------------------------------------------------------
# Integration and differentiation
# ----------------------------------------------------------------
def sum(self):
"""Evaluate the integral over the given interval using Clenshaw-Curtis
quadrature."""
ak = self.coefficients()
ak2 = ak[::2]
n = len(ak2)
Tints = 2/(1-(2*np.arange(n))**2)
val = np.sum((Tints*ak2.T).T, axis=0)
a_, b_ = self.domain()
return 0.5*(b_-a_)*val
def integrate(self):
"""Return the object representing the primitive of self over the domain.
The output starts at zero on the left-hand side of the domain.
"""
coeffs = self.coefficients()
a,b = self.domain()
int_coeffs = 0.5*(b-a)*poly.chebyshev.chebint(coeffs)
antiderivative = self.from_coeff(int_coeffs, domain=self.domain())
return antiderivative - antiderivative(a)
def differentiate(self, n=1):
"""n-th derivative, default 1."""
ak = self.coefficients()
a_, b_ = self.domain()
for _ in range(n):
ak = self.differentiator(ak)
return self.from_coeff((2./(b_-a_))**n*ak, domain=self.domain())
# ----------------------------------------------------------------
# Roots
# ----------------------------------------------------------------
def roots(self):
"""Utilises Boyd's O(n^2) recursive subdivision algorithm.
The chebfun
is recursively subsampled until it is successfully represented to
machine precision by a sequence of piecewise interpolants of degree
100 or less. A colleague matrix eigenvalue solve is then applied to
each of these pieces and the results are concatenated.
See:
J. P. Boyd, Computing zeros on a real interval through Chebyshev
expansion and polynomial rootfinding, SIAM J. Numer. Anal., 40
(2002), pp. 1666–1682.
"""
if self.size() == 1:
return np.array([])
elif self.size() <= 100:
ak = self.coefficients()
v = np.zeros_like(ak[:-1])
v[1] = 0.5
C1 = toeplitz(v)
C2 = np.zeros_like(C1)
C1[0,1] = 1.
C2[-1,:] = ak[:-1]
C = C1 - .5/ak[-1] * C2
eigenvalues = eigvals(C)
roots = [eig.real for eig in eigenvalues
if np.allclose(eig.imag,0,atol=1e-10)
and np.abs(eig.real) <=1]
scaled_roots = self._ui_to_ab(np.array(roots))
return scaled_roots
else:
try:
# divide at a close-to-zero split-point
split_point = self._ui_to_ab(0.0123456789)
return np.concatenate(
(self.restrict([self._domain[0],split_point]).roots(),
self.restrict([split_point,self._domain[1]]).roots()))
except:
# Seems to have many fake roots for high degree fits
coeffs = self.coefficients()
domain = self._domain
possibilities = Chebyshev(coeffs, domain).roots()
return np.array([float(i.real) for i in possibilities if i.imag == 0.0])
# ----------------------------------------------------------------
# Interpolation and evaluation (go from values to coefficients)
# ----------------------------------------------------------------
@classmethod
def interpolation_points(self, N):
"""N Chebyshev points in [-1, 1], boundaries included."""
if N == 1:
return np.array([0.])
return np.cos(np.arange(N)*np.pi/(N-1))
@classmethod
def sample_function(self, f, N):
"""Sample a function on N+1 Chebyshev points."""
x = self.interpolation_points(N+1)
return f(x)
@classmethod
def polyfit(self, sampled):
"""Compute Chebyshev coefficients for values located on Chebyshev
points.
sampled: array; first dimension is number of Chebyshev points
"""
asampled = np.asarray(sampled)
if len(asampled) == 1:
return asampled
evened = even_data(asampled)
coeffs = dct(evened)
return coeffs
@classmethod
def polyval(self, chebcoeff):
"""Compute the interpolation values at Chebyshev points.
chebcoeff: Chebyshev coefficients
"""
N = len(chebcoeff)
if N == 1:
return chebcoeff
data = even_data(chebcoeff)/2
data[0] *= 2
data[N-1] *= 2
fftdata = 2*(N-1)*fftpack_ifft(data, axis=0)
complex_values = fftdata[:N]
# convert to real if input was real
if np.isrealobj(chebcoeff):
values = np.real(complex_values)
else:
values = complex_values
return values
@classmethod
def interpolator(self, x, values):
"""Returns a polynomial with vector coefficients which interpolates the
values at the Chebyshev points x."""
# hacking the barycentric interpolator by computing the weights in advance
from scipy.interpolate import BarycentricInterpolator as Bary
p = Bary([0.])
N = len(values)
weights = np.ones(N)
weights[0] = .5
weights[1::2] = -1
weights[-1] *= .5
p.wi = weights
p.xi = x
p.set_yi(values)
return p
# ----------------------------------------------------------------
# Helper for differentiation.
# ----------------------------------------------------------------
@classmethod
def differentiator(self, A):
"""Differentiate a set of Chebyshev polynomial expansion coefficients
Originally from http://www.scientificpython.net/pyblog/chebyshev-
differentiation.
+ (lots of) bug fixing + pythonisation
"""
m = len(A)
SA = (A.T* 2*np.arange(m)).T
DA = np.zeros_like(A)
if m == 1: # constant
return np.zeros_like(A[0:1])
if m == 2: # linear
return A[1:2,]
DA[m-3:m-1,] = SA[m-2:m,]
for j in range(m//2 - 1):
k = m-3-2*j
DA[k] = SA[k+1] + DA[k+2]
DA[k-1] = SA[k] + DA[k+1]
DA[0] = (SA[1] + DA[2])*0.5
return DA
# ----------------------------------------------------------------
# General utilities
# ----------------------------------------------------------------
def even_data(data):
"""
Construct Extended Data Vector (equivalent to creating an
even extension of the original function)
Return: array of length 2(N-1)
For instance, [0,1,2,3,4] --> [0,1,2,3,4,3,2,1]
"""
return np.concatenate([data, data[-2:0:-1]],)
def dct(data):
"""Compute DCT using FFT."""
N = len(data)//2
fftdata = fftpack_fft(data, axis=0)[:N+1]
fftdata /= N
fftdata[0] /= 2.
fftdata[-1] /= 2.
if np.isrealobj(data):
data = np.real(fftdata)
else:
data = fftdata
return data
# ----------------------------------------------------------------
# Add overloaded operators
# ----------------------------------------------------------------
def _add_operator(cls, op):
def method(self, other):
if not self.same_domain(other):
raise self.DomainMismatch(self.domain(), other.domain())
return self.from_function(
lambda x: op(self(x).T, other(x).T).T, domain=self.domain(), )
cast_method = cast_scalar(method)
name = '__'+op.__name__+'__'
cast_method.__name__ = name
cast_method.__doc__ = "operator {}".format(name)
setattr(cls, name, cast_method)
def rdiv(a, b):
return b/a
for _op in [operator.mul, operator.truediv, operator.pow, rdiv]:
_add_operator(Polyfun, _op)
# ----------------------------------------------------------------
# Add numpy ufunc delegates
# ----------------------------------------------------------------
def _add_delegate(ufunc, nonlinear=True):
def method(self):
return self.from_function(lambda x: ufunc(self(x)), domain=self.domain())
name = ufunc.__name__
method.__name__ = name
method.__doc__ = "delegate for numpy's ufunc {}".format(name)
setattr(Polyfun, name, method)
# Following list generated from:
# https://github.com/numpy/numpy/blob/master/numpy/core/code_generators/generate_umath.py
for func in [np.arccos, np.arccosh, np.arcsin, np.arcsinh, np.arctan, np.arctanh, np.cos, np.sin, np.tan, np.cosh, np.sinh, np.tanh, np.exp, np.exp2, np.expm1, np.log, np.log2, np.log1p, np.sqrt, np.ceil, np.trunc, np.fabs, np.floor, ]:
_add_delegate(func)
# ----------------------------------------------------------------
# General Aliases
# ----------------------------------------------------------------
## chebpts = interpolation_points
# ----------------------------------------------------------------
# Constructor inspired by the Matlab version
# ----------------------------------------------------------------
def chebfun(f=None, domain=[-1,1], N=None, chebcoeff=None,):
"""Create a Chebyshev polynomial approximation of the function $f$ on the
interval :math:`[-1, 1]`.
:param callable f: Python, Numpy, or Sage function
:param int N: (default = None) specify number of interpolating points
:param np.array chebcoeff: (default = np.array(0)) specify the coefficients
"""
# Chebyshev coefficients
if chebcoeff is not None:
return Chebfun.from_coeff(chebcoeff, domain)
# another instance
if isinstance(f, Polyfun):
return Chebfun.from_fun(f)
# callable
if hasattr(f, '__call__'):
return Chebfun.from_function(f, domain, N)
# from here on, assume that f is None, or iterable
if np.isscalar(f):
f = [f]
try:
iter(f) # interpolation values provided
except TypeError:
pass
else:
return Chebfun(f, domain)
raise TypeError('Impossible to initialise the object from an object of type {}'.format(type(f))) | mit | 1,684,557,532,057,880,000 | 33.449424 | 755 | 0.549249 | false | 3.946025 | false | false | false |
CoolCloud/ansible | lib/ansible/executor/task_executor.py | 19 | 23445 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import pipes
import subprocess
import sys
import time
from jinja2.runtime import Undefined
from six import iteritems
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleConnectionFailure
from ansible.playbook.conditional import Conditional
from ansible.playbook.task import Task
from ansible.template import Templar
from ansible.utils.listify import listify_lookup_plugin_terms
from ansible.utils.unicode import to_unicode
from ansible.vars.unsafe_proxy import UnsafeProxy
from ansible.utils.debug import debug
__all__ = ['TaskExecutor']
class TaskExecutor:
'''
This is the main worker class for the executor pipeline, which
handles loading an action plugin to actually dispatch the task to
a given host. This class roughly corresponds to the old Runner()
class.
'''
# Modules that we optimize by squashing loop items into a single call to
# the module
SQUASH_ACTIONS = frozenset(C.DEFAULT_SQUASH_ACTIONS)
def __init__(self, host, task, job_vars, play_context, new_stdin, loader, shared_loader_obj):
self._host = host
self._task = task
self._job_vars = job_vars
self._play_context = play_context
self._new_stdin = new_stdin
self._loader = loader
self._shared_loader_obj = shared_loader_obj
try:
from __main__ import display
self._display = display
except ImportError:
from ansible.utils.display import Display
self._display = Display()
def run(self):
'''
The main executor entrypoint, where we determine if the specified
task requires looping and either runs the task with
'''
debug("in run()")
try:
# lookup plugins need to know if this task is executing from
# a role, so that it can properly find files/templates/etc.
roledir = None
if self._task._role:
roledir = self._task._role._role_path
self._job_vars['roledir'] = roledir
items = self._get_loop_items()
if items is not None:
if len(items) > 0:
item_results = self._run_loop(items)
# loop through the item results, and remember the changed/failed
# result flags based on any item there.
changed = False
failed = False
for item in item_results:
if 'changed' in item and item['changed']:
changed = True
if 'failed' in item and item['failed']:
failed = True
# create the overall result item, and set the changed/failed
# flags there to reflect the overall result of the loop
res = dict(results=item_results)
if changed:
res['changed'] = True
if failed:
res['failed'] = True
res['msg'] = 'One or more items failed'
else:
res['msg'] = 'All items completed'
else:
res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[])
else:
debug("calling self._execute()")
res = self._execute()
debug("_execute() done")
# make sure changed is set in the result, if it's not present
if 'changed' not in res:
res['changed'] = False
def _clean_res(res):
if isinstance(res, dict):
for k in res.keys():
res[k] = _clean_res(res[k])
elif isinstance(res, list):
for idx,item in enumerate(res):
res[idx] = _clean_res(item)
elif isinstance(res, UnsafeProxy):
return res._obj
return res
debug("dumping result to json")
res = _clean_res(res)
debug("done dumping result, returning")
return res
except AnsibleError as e:
return dict(failed=True, msg=to_unicode(e, nonstring='simplerepr'))
finally:
try:
self._connection.close()
except AttributeError:
pass
except Exception as e:
debug("error closing connection: %s" % to_unicode(e))
def _get_loop_items(self):
'''
Loads a lookup plugin to handle the with_* portion of a task (if specified),
and returns the items result.
'''
# create a copy of the job vars here so that we can modify
# them temporarily without changing them too early for other
# parts of the code that might still need a pristine version
vars_copy = self._job_vars.copy()
# now we update them with the play context vars
self._play_context.update_vars(vars_copy)
templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=vars_copy)
items = None
if self._task.loop:
if self._task.loop in self._shared_loader_obj.lookup_loader:
#TODO: remove convert_bare true and deprecate this in with_
try:
loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=True, convert_bare=True)
except AnsibleUndefinedVariable as e:
if 'has no attribute' in str(e):
loop_terms = []
self._display.deprecated("Skipping task due to undefined attribute, in the future this will be a fatal error.")
else:
raise
items = self._shared_loader_obj.lookup_loader.get(self._task.loop, loader=self._loader, templar=templar).run(terms=loop_terms, variables=vars_copy)
else:
raise AnsibleError("Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % self._task.loop)
if items:
from ansible.vars.unsafe_proxy import UnsafeProxy
for idx, item in enumerate(items):
if item is not None and not isinstance(item, UnsafeProxy):
items[idx] = UnsafeProxy(item)
return items
def _run_loop(self, items):
'''
Runs the task with the loop items specified and collates the result
into an array named 'results' which is inserted into the final result
along with the item for which the loop ran.
'''
results = []
# make copies of the job vars and task so we can add the item to
# the variables and re-validate the task with the item variable
task_vars = self._job_vars.copy()
items = self._squash_items(items, task_vars)
for item in items:
task_vars['item'] = item
try:
tmp_task = self._task.copy()
tmp_play_context = self._play_context.copy()
except AnsibleParserError as e:
results.append(dict(failed=True, msg=str(e)))
continue
# now we swap the internal task and play context with their copies,
# execute, and swap them back so we can do the next iteration cleanly
(self._task, tmp_task) = (tmp_task, self._task)
(self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)
res = self._execute(variables=task_vars)
(self._task, tmp_task) = (tmp_task, self._task)
(self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)
# now update the result with the item info, and append the result
# to the list of results
res['item'] = item
results.append(res)
return results
def _squash_items(self, items, variables):
'''
Squash items down to a comma-separated list for certain modules which support it
(typically package management modules).
'''
if len(items) > 0 and self._task.action in self.SQUASH_ACTIONS:
final_items = []
name = self._task.args.pop('name', None) or self._task.args.pop('pkg', None)
for item in items:
variables['item'] = item
templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables)
if self._task.evaluate_conditional(templar, variables):
if templar._contains_vars(name):
new_item = templar.template(name)
final_items.append(new_item)
else:
final_items.append(item)
joined_items = ",".join(final_items)
self._task.args['name'] = joined_items
return [joined_items]
else:
return items
def _execute(self, variables=None):
'''
The primary workhorse of the executor system, this runs the task
on the specified host (which may be the delegated_to host) and handles
the retry/until and block rescue/always execution
'''
if variables is None:
variables = self._job_vars
templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables)
# apply the given task's information to the connection info,
# which may override some fields already set by the play or
# the options specified on the command line
self._play_context = self._play_context.set_task_and_variable_override(task=self._task, variables=variables, templar=templar)
# fields set from the play/task may be based on variables, so we have to
# do the same kind of post validation step on it here before we use it.
# We also add "magic" variables back into the variables dict to make sure
# a certain subset of variables exist.
self._play_context.update_vars(variables)
self._play_context.post_validate(templar=templar)
# Evaluate the conditional (if any) for this task, which we do before running
# the final task post-validation. We do this before the post validation due to
# the fact that the conditional may specify that the task be skipped due to a
# variable not being present which would otherwise cause validation to fail
if not self._task.evaluate_conditional(templar, variables):
debug("when evaulation failed, skipping this task")
return dict(changed=False, skipped=True, skip_reason='Conditional check failed')
# Now we do final validation on the task, which sets all fields to their final values.
# In the case of debug tasks, we save any 'var' params and restore them after validating
# so that variables are not replaced too early.
prev_var = None
if self._task.action == 'debug' and 'var' in self._task.args:
prev_var = self._task.args.pop('var')
original_args = self._task.args.copy()
self._task.post_validate(templar=templar)
if '_variable_params' in self._task.args:
variable_params = self._task.args.pop('_variable_params')
if isinstance(variable_params, dict):
self._display.deprecated("Using variables for task params is unsafe, especially if the variables come from an external source like facts")
variable_params.update(self._task.args)
self._task.args = variable_params
if prev_var is not None:
self._task.args['var'] = prev_var
# if this task is a TaskInclude, we just return now with a success code so the
# main thread can expand the task list for the given host
if self._task.action == 'include':
include_variables = original_args
include_file = include_variables.get('_raw_params')
del include_variables['_raw_params']
return dict(include=include_file, include_variables=include_variables)
# get the connection and the handler for this execution
self._connection = self._get_connection(variables)
self._connection.set_host_overrides(host=self._host)
self._handler = self._get_action_handler(connection=self._connection, templar=templar)
# And filter out any fields which were set to default(omit), and got the omit token value
omit_token = variables.get('omit')
if omit_token is not None:
self._task.args = dict((i[0], i[1]) for i in iteritems(self._task.args) if i[1] != omit_token)
# Read some values from the task, so that we can modify them if need be
retries = self._task.retries
if retries <= 0:
retries = 1
delay = self._task.delay
if delay < 0:
delay = 1
# make a copy of the job vars here, in case we need to update them
# with the registered variable value later on when testing conditions
vars_copy = variables.copy()
debug("starting attempt loop")
result = None
for attempt in range(retries):
if attempt > 0:
# FIXME: this should use the callback/message passing mechanism
print("FAILED - RETRYING: %s (%d retries left). Result was: %s" % (self._task, retries-attempt, result))
result['attempts'] = attempt + 1
debug("running the handler")
try:
result = self._handler.run(task_vars=variables)
except AnsibleConnectionFailure as e:
return dict(unreachable=True, msg=str(e))
debug("handler run complete")
if self._task.async > 0:
# the async_wrapper module returns dumped JSON via its stdout
# response, so we parse it here and replace the result
try:
result = json.loads(result.get('stdout'))
except (TypeError, ValueError) as e:
return dict(failed=True, msg="The async task did not return valid JSON: %s" % str(e))
if self._task.poll > 0:
result = self._poll_async_result(result=result, templar=templar)
# update the local copy of vars with the registered value, if specified,
# or any facts which may have been generated by the module execution
if self._task.register:
vars_copy[self._task.register] = result
if 'ansible_facts' in result:
vars_copy.update(result['ansible_facts'])
# create a conditional object to evaluate task conditions
cond = Conditional(loader=self._loader)
def _evaluate_changed_when_result(result):
if self._task.changed_when is not None:
cond.when = [ self._task.changed_when ]
result['changed'] = cond.evaluate_conditional(templar, vars_copy)
def _evaluate_failed_when_result(result):
if self._task.failed_when is not None:
cond.when = [ self._task.failed_when ]
failed_when_result = cond.evaluate_conditional(templar, vars_copy)
result['failed_when_result'] = result['failed'] = failed_when_result
return failed_when_result
return False
if self._task.until:
cond.when = self._task.until
if cond.evaluate_conditional(templar, vars_copy):
_evaluate_changed_when_result(result)
_evaluate_failed_when_result(result)
break
elif (self._task.changed_when is not None or self._task.failed_when is not None) and 'skipped' not in result:
_evaluate_changed_when_result(result)
if _evaluate_failed_when_result(result):
break
elif 'failed' not in result:
if result.get('rc', 0) != 0:
result['failed'] = True
else:
# if the result is not failed, stop trying
break
if attempt < retries - 1:
time.sleep(delay)
else:
_evaluate_changed_when_result(result)
_evaluate_failed_when_result(result)
# do the final update of the local variables here, for both registered
# values and any facts which may have been created
if self._task.register:
variables[self._task.register] = result
if 'ansible_facts' in result:
variables.update(result['ansible_facts'])
# save the notification target in the result, if it was specified, as
# this task may be running in a loop in which case the notification
# may be item-specific, ie. "notify: service {{item}}"
if self._task.notify is not None:
result['_ansible_notify'] = self._task.notify
# and return
debug("attempt loop complete, returning result")
return result
def _poll_async_result(self, result, templar):
'''
Polls for the specified JID to be complete
'''
async_jid = result.get('ansible_job_id')
if async_jid is None:
return dict(failed=True, msg="No job id was returned by the async task")
# Create a new psuedo-task to run the async_status module, and run
# that (with a sleep for "poll" seconds between each retry) until the
# async time limit is exceeded.
async_task = Task().load(dict(action='async_status jid=%s' % async_jid))
# Because this is an async task, the action handler is async. However,
# we need the 'normal' action handler for the status check, so get it
# now via the action_loader
normal_handler = self._shared_loader_obj.action_loader.get(
'normal',
task=async_task,
connection=self._connection,
play_context=self._play_context,
loader=self._loader,
templar=templar,
shared_loader_obj=self._shared_loader_obj,
)
time_left = self._task.async
while time_left > 0:
time.sleep(self._task.poll)
async_result = normal_handler.run()
if int(async_result.get('finished', 0)) == 1 or 'failed' in async_result or 'skipped' in async_result:
break
time_left -= self._task.poll
if int(async_result.get('finished', 0)) != 1:
return dict(failed=True, msg="async task did not complete within the requested time")
else:
return async_result
def _get_connection(self, variables):
'''
Reads the connection property for the host, and returns the
correct connection object from the list of connection plugins
'''
# FIXME: calculation of connection params/auth stuff should be done here
if not self._play_context.remote_addr:
self._play_context.remote_addr = self._host.address
if self._task.delegate_to is not None:
# since we're delegating, we don't want to use interpreter values
# which would have been set for the original target host
for i in variables.keys():
if i.startswith('ansible_') and i.endswith('_interpreter'):
del variables[i]
# now replace the interpreter values with those that may have come
# from the delegated-to host
delegated_vars = variables.get('ansible_delegated_vars', dict())
if isinstance(delegated_vars, dict):
for i in delegated_vars:
if i.startswith("ansible_") and i.endswith("_interpreter"):
variables[i] = delegated_vars[i]
conn_type = self._play_context.connection
if conn_type == 'smart':
conn_type = 'ssh'
if sys.platform.startswith('darwin') and self._play_context.password:
# due to a current bug in sshpass on OSX, which can trigger
# a kernel panic even for non-privileged users, we revert to
# paramiko on that OS when a SSH password is specified
conn_type = "paramiko"
else:
# see if SSH can support ControlPersist if not use paramiko
try:
cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
if "Bad configuration option" in err or "Usage:" in err:
conn_type = "paramiko"
except OSError:
conn_type = "paramiko"
connection = self._shared_loader_obj.connection_loader.get(conn_type, self._play_context, self._new_stdin)
if not connection:
raise AnsibleError("the connection plugin '%s' was not found" % conn_type)
return connection
def _get_action_handler(self, connection, templar):
'''
Returns the correct action plugin to handle the requestion task action
'''
if self._task.action in self._shared_loader_obj.action_loader:
if self._task.async != 0:
raise AnsibleError("async mode is not supported with the %s module" % self._task.action)
handler_name = self._task.action
elif self._task.async == 0:
handler_name = 'normal'
else:
handler_name = 'async'
handler = self._shared_loader_obj.action_loader.get(
handler_name,
task=self._task,
connection=connection,
play_context=self._play_context,
loader=self._loader,
templar=templar,
shared_loader_obj=self._shared_loader_obj,
)
if not handler:
raise AnsibleError("the handler '%s' was not found" % handler_name)
return handler
| gpl-3.0 | 6,273,686,950,557,298,000 | 41.93956 | 169 | 0.585882 | false | 4.571066 | false | false | false |
foursquare/pants | src/python/pants/pantsd/service/pailgun_service.py | 1 | 2742 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import select
from contextlib import contextmanager
from pants.pantsd.pailgun_server import PailgunServer
from pants.pantsd.service.pants_service import PantsService
class PailgunService(PantsService):
"""A service that runs the Pailgun server."""
def __init__(self, bind_addr, runner_class, scheduler_service):
"""
:param tuple bind_addr: The (hostname, port) tuple to bind the Pailgun server to.
:param class runner_class: The `PantsRunner` class to be used for Pailgun runs.
:param SchedulerService scheduler_service: The SchedulerService instance for access to the
resident scheduler.
"""
super(PailgunService, self).__init__()
self._bind_addr = bind_addr
self._runner_class = runner_class
self._scheduler_service = scheduler_service
self._logger = logging.getLogger(__name__)
self._pailgun = None
@property
def pailgun(self):
if not self._pailgun:
self._pailgun = self._setup_pailgun()
return self._pailgun
@property
def pailgun_port(self):
return self.pailgun.server_port
def _setup_pailgun(self):
"""Sets up a PailgunServer instance."""
# Constructs and returns a runnable PantsRunner.
def runner_factory(sock, arguments, environment):
return self._runner_class.create(
sock,
arguments,
environment,
self.fork_lock,
self._scheduler_service
)
# Plumb the daemon's lifecycle lock to the `PailgunServer` to safeguard teardown.
@contextmanager
def lifecycle_lock():
with self.lifecycle_lock:
yield
return PailgunServer(self._bind_addr, runner_factory, lifecycle_lock)
def run(self):
"""Main service entrypoint. Called via Thread.start() via PantsDaemon.run()."""
self._logger.info('starting pailgun server on port {}'.format(self.pailgun_port))
try:
# Manually call handle_request() in a loop vs serve_forever() for interruptability.
while not self.is_killed:
self.pailgun.handle_request()
except select.error:
# SocketServer can throw `error: (9, 'Bad file descriptor')` on teardown. Ignore it.
self._logger.warning('pailgun service shutting down')
def terminate(self):
"""Override of PantsService.terminate() that cleans up when the Pailgun server is terminated."""
# Tear down the Pailgun TCPServer.
if self.pailgun:
self.pailgun.server_close()
super(PailgunService, self).terminate()
| apache-2.0 | -304,092,088,776,224,960 | 32.851852 | 100 | 0.688913 | false | 3.883853 | false | false | false |
abbec/pantry | pantry/v1/targets.py | 1 | 2434 | import flask
import jsonschema
import pantry.common.pantry_error as perror
import pantry.v1.backend as backend
targets_blueprint = flask.Blueprint("targets", __name__)
@targets_blueprint.route('/targets/', methods=['GET'])
def list_targets():
return flask.jsonify(
{"targets": backend.get_targets(flask.request.args)})
@targets_blueprint.route('/targets/<int:target_id>/', methods=['GET'])
def get_target(target_id):
result = backend.get_target(target_id, flask.request.args)
if not result:
raise perror.PantryError(
"Could not find target with id {}".format(target_id),
status_code=404)
print(result)
return flask.jsonify(result)
@targets_blueprint.route('/targets/', methods=['POST'])
def create_target():
json_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"hostname": {
"type": "string"
},
"nickname": {
"type": "string"
},
"description": {
"type": "string"
},
"maintainer": {
"type": "string"
},
"healthPercent": {
"type": "number"
},
"active": {
"type": "boolean"
},
"tags": {
"type": "array"
}
},
"required": [
"hostname",
"description",
"maintainer"
]
}
content = flask.request.get_json(force=True)
# validate the provided json
try:
jsonschema.validate(content, json_schema)
except jsonschema.ValidationError as e:
raise perror.PantryError("invalid data: {}".format(e.message),
status_code=400)
# construct response with correct location header
target_id = backend.create_target(content)
r = flask.jsonify(backend.get_target(target_id))
r.headers['Location'] = "/targets/{}".format(target_id)
r.status_code = 201
return r
@targets_blueprint.route('/targets/<int:target_id>/', methods=['DELETE'])
def delete_target(target_id):
result = backend.delete_target(target_id)
if not result:
raise perror.PantryError(
f"Could not find target with id {target_id}",
status_code=404)
return "", 204
| bsd-2-clause | -5,981,000,948,568,151,000 | 24.621053 | 73 | 0.54355 | false | 4.070234 | false | false | false |
danfairs/kral | kral/services/reddit.py | 1 | 2511 | # -*- coding: utf-8 -*-
from eventlet.greenthread import sleep
from eventlet.green import urllib2
import simplejson as json
from collections import defaultdict
import urllib
from kral.utils import fetch_json
def stream(queries, queue, settings, kral_start_time):
api_url = "http://www.reddit.com/search.json?"
prev_items = defaultdict(list)
user_agent = settings.get('DEFAULT', 'user_agent', '')
while True:
for query in queries:
p = {
'q' : query,
'sort' : settings.get('Reddit', 'orderby', 'relevance'),
}
url = api_url + urllib.urlencode(p)
request = urllib2.Request(url)
if user_agent:
request.add_header('User-agent', user_agent)
response = fetch_json(request)
if not response:
sleep(5)
break
if 'data' in response and 'children' in response['data']:
#api returns back 25 items
for item in response['data']['children']:
item_id = item['data']['id']
#if we've seen this item in the last 50 items skip it
if item_id not in prev_items[query]:
post = {
'service' : 'reddit',
'query' : query,
'user' : {
'name' : item['data']['author'],
},
'id' : item_id,
'date' : item['data']['created_utc'],
'text' : item['data']['title'],
'source' : item['data']['url'],
'likes': item['data'].get('likes', 0),
'dislikes': item['data'].get('downs', 0),
'comments': item['data'].get('num_comments', 0),
'favorites': item['data'].get('saved', 0),
}
queue.put(post)
prev_items[query].append(item_id)
#keep dupe buffer 50 items long
#TODO: look into using deque with maxlength
prev_items[query] = prev_items[query][:50]
sleep(30)
| agpl-3.0 | -5,095,552,625,823,184,000 | 32.932432 | 76 | 0.414178 | false | 5.001992 | false | false | false |
jianhuashao/WebDownloadJobsManage | server/db_insert_jobs_settings.py | 1 | 3335 | from pymongo import MongoClient
from datetime import datetime
mongodb_url = 'mongodb://192.168.0.30:27017/'
mongodb_url = 'mongodb://127.0.0.1:27017/'
client = MongoClient(mongodb_url)
db = client['web_jobs_server']
db = client['test_web_jobs_server']
print "** DB Collections: ", db.collection_names()
collection = db['jobs_settings']
print collection
job_settings = [
#######
{'job_target':'appid_to_asin', 'settings_key':'web_access_interval', 'settings_value':1000},
{'job_target':'appid_to_asin', 'settings_key':'client_job_request_count', 'settings_value':10},
{'job_target':'appid_to_asin', 'settings_key':'connection_try_max', 'settings_value':10},
########
{'job_target':'thingiverse', 'settings_key':'web_access_interval', 'settings_value':1000},
{'job_target':'thingiverse', 'settings_key':'client_job_request_count', 'settings_value':10},
{'job_target':'thingiverse', 'settings_key':'connection_try_max', 'settings_value':10},
########
{'job_target':'topsy_fortuner_tops', 'settings_key':'web_access_interval', 'settings_value':1000},
{'job_target':'topsy_fortuner_tops', 'settings_key':'client_job_request_count', 'settings_value':10},
{'job_target':'topsy_fortuner_tops', 'settings_key':'connection_try_max', 'settings_value':10},
########
{'job_target':'topsy_fortuner_tops_full', 'settings_key':'web_access_interval', 'settings_value':1000},
{'job_target':'topsy_fortuner_tops_full', 'settings_key':'client_job_request_count', 'settings_value':10},
{'job_target':'topsy_fortuner_tops_full', 'settings_key':'connection_try_max', 'settings_value':10},
########
{'job_target':'topsy_fortuner_twibes', 'settings_key':'web_access_interval', 'settings_value':1000},
{'job_target':'topsy_fortuner_twibes', 'settings_key':'client_job_request_count', 'settings_value':10},
{'job_target':'topsy_fortuner_twibes', 'settings_key':'connection_try_max', 'settings_value':10},
########
{'job_target':'appid_asin_pairs', 'settings_key':'web_access_interval', 'settings_value':1000},
{'job_target':'appid_asin_pairs', 'settings_key':'client_job_request_count', 'settings_value':10},
{'job_target':'appid_asin_pairs', 'settings_key':'connection_try_max', 'settings_value':10}
]
def job_settings_init():
for job in job_settings:
job_target = job['job_target']
settings_key = job['settings_key']
settings_value = job['settings_value']
create_date = str(datetime.now())
update_date = str(datetime.now())
job_setting_upsert(job_target, settings_key, settings_value, create_date, update_date)
print "setting: ", job_target, settings_key, settings_value
## insert: only be used for fresh insert, as existing _id would cause duplicate insert and then error
## save: same as _update method, but would create collection if it is not exist
## consider with ejdb does not support custom _id, so I have to use upsert
def job_setting_upsert(job_target, settings_key, settings_value, create_date, update_date):
job = {
"job_target": job_target,
"settings_key":settings_key,
"settings_value": settings_value,
"create_date": create_date,
"update_date": update_date
}
j = db.jobs_settings.update({'job_target': job_target, 'settings_key': settings_key, }, {'$set':job}, upsert=True, multi=True)
print j
if __name__ == "__main__":
print "start"
job_settings_init()
#db.close()
print 'done' | apache-2.0 | -3,532,616,626,173,047,000 | 45.985915 | 127 | 0.692354 | false | 2.969724 | false | false | false |
sciunto/inforevealer | src/gui.py | 1 | 30322 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import gobject
import gtk
import action,pastebin
import gettext,os
gettext.textdomain('inforevealer')
_ = gettext.gettext
__version__="0.5.1"
pixbuf=None
for icon_file in ['/usr/share/icons/hicolor/scalable/apps/inforevealer.svg','icons/inforevealer.svg']:
if os.path.isfile(icon_file):
pixbuf = gtk.gdk.pixbuf_new_from_file(icon_file)
ui_info ='''<ui>
<menubar name='MenuBar'>
<menu action='FileMenu'>
<menuitem action='Quit'/>
</menu>
<menu action='HelpMenu'>
<menuitem action='About'/>
</menu>
</menubar>
</ui>'''
class Application(gtk.Window):
def __init__(self, configfile, list_category, parent=None):
self.verbosity=False
self.configfile=configfile
self.check_list=list_category
self.category=None
self.dumpfile='/tmp/inforevealer'
try:
#Create an empty file (to be seen by the GUI)
foo = open(self.dumpfile, 'w')
foo.close()
except IOError:
sys.stderr.write("Error: Cannot open %s" %self.output)
# Create the toplevel window
gtk.Window.__init__(self)
self.set_icon(pixbuf)
try:
self.set_screen(parent.get_screen())
except AttributeError:
self.connect('destroy', lambda *w: gtk.main_quit())
self.set_title("Inforevealer") #FIXME
#self.set_default_size(200, 200)
self.set_position(gtk.WIN_POS_CENTER)
self.set_resizable(False)
merge = gtk.UIManager()
#self.set_data("ui-manager", merge)
merge.insert_action_group(self.__create_action_group(), 0)
#should be added to the top level window so that the Action accelerators can be used by your users
self.add_accel_group(merge.get_accel_group())
# Create Menu
try:
mergeid = merge.add_ui_from_string(ui_info)
except gobject.GError, msg:
print("building menus failed: %s" % msg)
bar = merge.get_widget("/MenuBar")
# Create TABLE
mainbox = gtk.VBox(False, 0)
self.add(mainbox)
#Add Menu into TABLE
mainbox.pack_start(bar, False, False, 0)
bar.show()
box1 = gtk.VBox(False, 4)
box1.set_border_width(10)
mainbox.pack_start(box1, False, False, 0)
#Add info
label = gtk.Label();
label.set_markup(_("Select one of the following category:"))
box1.pack_start(label, False, False, 0)
self.__create_radio_buttons(box1)
separator = gtk.HSeparator()
box1.pack_start(separator,False,False,0)
self.__create_option_menu(box1)
#buttons (bottom)
# Create TABLE
box2 = gtk.HBox(True, 0)
box1.pack_start(box2, False, False, 0)
#quit
bouton = gtk.Button(stock=gtk.STOCK_CLOSE)
bouton.connect("clicked", self.quit_prog,self, None)
box2.pack_start(bouton, True, True, 0)
bouton.show()
#apply
bouton = gtk.Button(stock=gtk.STOCK_APPLY)
bouton.connect("clicked", self.generate,self, None)
box2.pack_start(bouton, True, True, 0)
bouton.set_flags(gtk.CAN_DEFAULT)
bouton.grab_default()
bouton.show()
box2.show()
box1.show()
self.show_all()
def __create_option_menu(self,box):
frame = gtk.Expander(_("Options"))
box.pack_start(frame, True, True,0)
box2 = gtk.VBox(False, 0)
frame.add(box2)
box2.show()
#VERBOSE MODE
self.verbose_button = gtk.CheckButton(_("Verbose mode: add commands or files producing long output"))
#not connected, read it before using self.verbosity
box2.pack_start(self.verbose_button,True, True, 0)
self.verbose_button.show()
#FILECHOOSER
hbox = gtk.HBox(False, 0)
box2.pack_start(hbox,True, True, 0)
hbox.show()
#Add info
label = gtk.Label();
label.set_markup(_("Dumpfile: "))
hbox.pack_start(label, False, False, 10)
self.label = gtk.Label();
self.label.set_markup(self.dumpfile)
hbox.pack_start(self.label, False, False, 20)
button = gtk.Button(_('Modify'))
button.connect("clicked", self.opendumpfile)
hbox.pack_end(button, False, False, 20)
button.show()
#ENDFILECHOOSER
frame.show()
def opendumpfile(self,w):
""" Open the dumpfile"""
filechooser = FileDialog()
self.dumpfile=filechooser.get_filename(action='save')
if self.dumpfile==None:
self.dumpfile = "/tmp/inforevealer"
self.label.set_text(self.dumpfile)
def __create_radio_buttons(self,box):
""" Create the category list """
first=True
for item in self.check_list:
tmphbox= gtk.HBox(False, 0)
#Radiobutton
if first:
button = gtk.RadioButton(group=None, label=None)
self.category=item
else:
button = gtk.RadioButton(group=button, label=None)
button.connect("toggled", self.callback_radio_buttons, item)
tmphbox.pack_start(button,False,False,0)
#Label
text_label = "<b>"+str(item)+"</b> "+ str(self.check_list[item])
tmplabel= gtk.Label();
tmplabel.set_markup(text_label)
tmphbox.pack_start(tmplabel,False,False,0)
box.pack_start(tmphbox, True, True, 0)
button.show()
first=False
def callback_radio_buttons(self,widget,data=None):
""" Get the selected radio button """
if widget.get_active():
self.category=data
def __create_action_group(self):
""" Create the top menu entry """
# GtkActionEntry
entries = (
( "FileMenu", None, _("File") ), # name, stock id, label
( "HelpMenu", None, _("Help") ), # name, stock id, label
( "Quit", gtk.STOCK_QUIT, # name, stock id
_("Quit"), "", # label, accelerator
"Quit", # tooltip
self.activate_action ),
( "About", gtk.STOCK_ABOUT, # name, stock id
_("About"), "", # label, accelerator
"About", # tooltip
self.activate_about ),
);
# Create the menubar and toolbar
action_group = gtk.ActionGroup("AppWindowActions")
action_group.add_actions(entries)
return action_group
def activate_about(self, action):
""" About dialog """
license="""
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Lesser General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
"""
dialog = gtk.AboutDialog()
dialog.set_logo(pixbuf)
dialog.set_license(license)
dialog.set_name("Inforevealer") #FIXME
dialog.set_copyright("\302\251 Copyright 2010 Francois Boulogne")
dialog.set_website("http://github.com/sciunto/inforevealer")
## Close dialog on user response
dialog.connect ("response", lambda d, r: d.destroy())
dialog.show()
def activate_action(self, action):
dialog = gtk.MessageDialog(self, gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_INFO, gtk.BUTTONS_CLOSE,
'You activated action: "%s" of type "%s"' % (action.get_name(), type(action)))
# Close dialog on user response
dialog.connect ("response", lambda d, r: d.destroy())
dialog.show()
def generate(self,widget,evnmt,data=None):
""" Do the work """
tmp_configfile="/tmp/inforevealer_tmp.conf" #tmp configuration file (substitute)
self.verbosity = self.verbose_button.get_active()
action.action(self.category,self.dumpfile,self.configfile,tmp_configfile,self.verbosity,gui=True)
TextViewer(self.dumpfile)#open a new window with the result.
def quit_prog(self,widget,evnmt,data=None):
""" Quit the software """
gtk.main_quit()
class TextViewer:
def change_editable(self, case, textview):
textview.set_editable(case.get_active())
def change_curseur_visible(self, case, textview):
textview.set_cursor_visible(case.get_active())
def quit_prog(self, widget):
self.fenetre.destroy()
#gtk.main_quit()
def __init__(self,output_file):
self.output=output_file
self.fenetre = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.fenetre.set_icon(pixbuf)
self.fenetre.set_resizable(True)
self.fenetre.set_position(gtk.WIN_POS_CENTER)
self.fenetre.set_default_size(600, 400)
self.fenetre.connect("destroy", self.quit_prog)
self.fenetre.set_title("Inforevealer") #FIXME
self.fenetre.set_border_width(0)
boite1 = gtk.VBox(False, 0)
self.fenetre.add(boite1)
boite1.show()
boite2 = gtk.VBox(False, 10)
boite2.set_border_width(10)
boite1.pack_start(boite2, True, True, 0)
boite2.show()
#Add info
label = gtk.Label();
output_string=_("The following report is availlable in %s") %str(self.output)
label.set_markup(output_string)
label.show()
boite2.pack_start(label,False,False,0)
# TEXT BOX
fd = gtk.ScrolledWindow()
fd.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
textview = gtk.TextView()
textview.set_editable(False)
buffertexte = textview.get_buffer()
fd.add(textview)
fd.show()
textview.show()
textview.set_cursor_visible(False)
boite2.pack_start(fd)
#load file
try:
fichier = open(self.output, "r")
self.text = fichier.read()
fichier.close()
buffertexte.set_text(self.text)
except IOError:
sys.stderr.write("Error: Cannot open %s" %self.output)
#END TEXTBOX
# PASTEBIN
boiteH = gtk.HBox(True,0)
boite2.pack_start(boiteH, False, False, 0)
boiteH.show()
label = gtk.Label();
label.set_markup(_("Send the report on pastebin "))
label.show()
boiteH.pack_start(label,True,False,0)
self.pastebin_list = pastebin.preloadPastebins()
self.combobox = gtk.combo_box_new_text()
self.website=list()
boiteH.pack_start(self.combobox, True, False, 0)
for k in self.pastebin_list:
self.combobox.append_text(k)
self.website.append(k)
self.combobox.set_active(0)
self.combobox.show()
bouton = gtk.Button(_("Send"))
bouton.connect("clicked", self.send_pastebin)
bouton.show()
boiteH.pack_start(bouton, True, False, 0)
#END PASTEBIN
# BUTTONS (close...)
boiteH = gtk.HBox(True,0)
boite2.pack_start(boiteH, False, False, 0)
boiteH.show()
bouton = gtk.Button(_("Copy to clipboard"))
bouton.connect("clicked", self.copy_clipboard)
boiteH.pack_start(bouton, False, False, 0)
bouton.show()
bouton = gtk.Button(stock=gtk.STOCK_CLOSE)
bouton.connect("clicked", self.quit_prog)
boiteH.pack_start(bouton, False, False, 0)
bouton.set_flags(gtk.CAN_DEFAULT)
#bouton.grab_default()
bouton.show()
self.fenetre.show()
def copy_clipboard(self,widget):
""" Copy self.text in clipboard """
clipb = gtk.Clipboard()
clipb.set_text(self.text, len=-1)
def send_pastebin(self, widget): #IMPROVEME : Design + clipboard ?
""" Send the content on pastebin """
link = "http://" + self.website[self.combobox.get_active()]+"/"
link=pastebin.sendFileContent(self.output,title=None,website=link,version=None)
message = _("File sent on\n%s") %link
md = gtk.MessageDialog(None,
gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_INFO,
gtk.BUTTONS_CLOSE, message)
md.set_title(_("Pastebin link"))
md.run()
md.destroy()
def yesNoDialog(title=" ",question="?"):
'''
returns True if yes
False if no
#inspired from http://www.daa.com.au/pipermail/pygtk/2002-June/002962.html
'''
#create window+ Vbox + question
window=gtk.Window()
window.set_position(gtk.WIN_POS_CENTER)
window.set_icon(pixbuf)
window.set_title(title)
vbox = gtk.VBox(True, 0)
window.add(vbox)
label = gtk.Label();
label.set_markup(question)
vbox.pack_start(label, False, False, 0)
hbox = gtk.HButtonBox()
vbox.pack_start(hbox, False, False, 0)
def delete_event(widget, event, window):
window.callback_return=-1
return False
window.connect("delete_event", delete_event, window)
def callback(widget, data):
window=data[0]
data=data[1]
window.hide()
window.callback_return=data
yes = gtk.Button(stock=gtk.STOCK_YES)
yes.set_flags(gtk.CAN_DEFAULT)
window.set_default(yes)
yes.connect("clicked", callback, (window, True))
hbox.pack_start(yes)
no = gtk.Button(stock=gtk.STOCK_NO)
no.connect("clicked", callback, (window, False))
hbox.pack_start(no)
window.set_modal(True)
window.show_all()
window.callback_return=None
while window.callback_return==None:
gtk.main_iteration(True) # block until event occurs
return window.callback_return
def askPassword(title=" ",question="?"):
""" Dialog box for a password.
Return the password
return false if the dialog is closed"""
#create window+ Vbox + question
window=gtk.Window()
window.set_position(gtk.WIN_POS_CENTER)
window.set_icon(pixbuf)
window.set_title(title)
vbox = gtk.VBox(True, 0)
window.add(vbox)
label = gtk.Label();
label.set_markup(question)
vbox.pack_start(label, False, False, 0)
def delete_event(widget, event, window):
window.callback_return=False
return False
window.connect("delete_event", delete_event, window)
def callback(widget,data):
window=data[0]
window.hide()
window.callback_return=pword.get_text()
# Message for the window
pword = gtk.Entry()
pword.set_visibility(False)
pword.set_activates_default(True)
vbox.pack_start(pword, False, False, 0)
hbox = gtk.HButtonBox()
vbox.pack_start(hbox, False, False, 0)
# OK button
but = gtk.Button(stock=gtk.STOCK_OK)
but.set_flags(gtk.CAN_DEFAULT)
window.set_default(but)
hbox.add(but)
but.connect("clicked", callback, (window,True))
window.set_modal(True)
window.show_all()
window.callback_return=None
while window.callback_return==None:
gtk.main_iteration(True) # block until event occurs
return window.callback_return
class FileDialog(object):
"""Handle a pair of file dialogs (open and save).
Useful to keep the selected filename sync'ed between both
dialogs. Eliminates redundant code too.
"""
def __init__(self):
self.filename = None
def get_filename(self, action='open'):
"""Run a dialog and return a filename or None.
Valid actions are 'open' and 'save'.
"""
# I used to create the dialogs only once (on object
# initialization), and hide and show them, but I can not
# manage to pre-select a filename after a dialog have been
# used once. I guess file chooser dialogs are just throwaway
# objects. Thus, recreate them every time.
if action == 'open':
chooser = gtk.FileChooserDialog(
action=gtk.FILE_CHOOSER_ACTION_OPEN,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
chooser.set_title(_('Open file:'))
elif action == 'save':
chooser = gtk.FileChooserDialog(
action=gtk.FILE_CHOOSER_ACTION_SAVE,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
chooser.set_title(_('Save as:'))
else:
raise Exception("action must be 'open' or 'save' (got '%s')"
% action)
if self.filename:
chooser.select_filename(self.filename)
response = chooser.run()
filename = chooser.get_filename()
chooser.destroy()
# By default, the GTK loop would wait until the process is
# idle to process events. Now, it is very probable that file
# I/O will be performed right after this method call and that
# would delay hiding the dialog until I/O are done. So,
# process pending events to hide the dialog right now.
while gtk.events_pending():
gtk.main_iteration(False)
if response == gtk.RESPONSE_OK:
self.filename = filename
return filename
else:
return None
def main(configfile,list):
Application(configfile,list)
gtk.main()
| gpl-2.0 | -8,560,852,772,869,041,000 | 35.40096 | 103 | 0.731416 | false | 3.525404 | false | false | false |
deka108/meas_deka | meas/development_settings.py | 1 | 7367 |
"""
# Name: meas/settings.py
# Description:
# Created by: Phuc Le-Sanh
# Date Created: Oct 10 2016
# Last Modified: Nov 23 2016
# Modified by: Phuc Le-Sanh
"""
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'g(yej*3clhw8mh1lge2jd*f7h0uam9exedd$ya50n-^n1#p2(9'
INTERNAL_IPS = ['127.0.0.1']
DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'fontawesome',
'autofixture',
'ckeditor',
'rest_framework',
'rest_framework.authtoken',
'djoser',
# Web application
'webapp.apps.WebAppConfig',
# Models
'meas_models.apps.MeasModelsConfig',
# CMS
'cms.apps.CmsConfig',
# Common
'meas_common.apps.MeasCommonConfig',
# API
'api.apps.ApiConfig',
'apiv2.apps.Apiv2Config',
# Search
'search.apps.SearchConfig',
# Import Export
'import_export',
# Haystack
# 'drf-haystack',
'haystack',
# 'whoosh_index',
# Django-Extensions
'django_extensions',
'corsheaders',
'debug_toolbar',
)
MIDDLEWARE_CLASSES = (
'debug_toolbar.middleware.DebugToolbarMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
def show_toolbar(request):
return True
SHOW_TOOLBAR_CALLBACK = show_toolbar
ROOT_URLCONF = 'meas.urls'
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_CREDENTIALS = True
CORS_ALLOW_METHODS = (
'DELETE',
'GET',
'OPTIONS',
'PATCH',
'POST',
'PUT',
)
CORS_ALLOW_HEADERS = (
'accept',
'accept-encoding',
'authorization',
'content-type',
'dnt',
'origin',
'user-agent',
'x-csrftoken',
'x-requested-with',
'if-modified-since'
)
CSRF_TRUSTED_ORIGINS = (
'localhost:8080',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates/')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Whoosh
WHOOSH_INDEX = os.path.join(BASE_DIR, 'whoosh_index')
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': WHOOSH_INDEX,
},
}
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
)
WSGI_APPLICATION = 'meas.wsgi.application'
# Database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'meas_development',
'USER': 'root',
'PASSWORD': '123456'
},
}
GRAPH_MODELS = {
'all_applications': True,
'group_models': True,
}
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Singapore'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
STATIC_URL = '/static/'
LOGOUT_REDIRECT_URL = '/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
CKEDITOR_CONFIGS = {
'default': {
'skin': 'moono',
# 'skin': 'office2013',
'toolbar_Basic': [
['Source', '-', 'Bold', 'Italic']
],
'toolbar_YourCustomToolbarConfig': [
{'name': 'document', 'items': [
'Source', '-', 'Save', 'NewPage', 'Preview', 'Print', '-',
'Templates']},
{'name': 'clipboard', 'items': [
'Cut', 'Copy', 'Paste', 'PasteText', 'PasteFromWord', '-',
'Undo', 'Redo']},
{'name': 'editing', 'items': [
'Find', 'Replace', '-', 'SelectAll']},
{'name': 'forms',
'items': ['Form', 'Checkbox', 'Radio', 'TextField', 'Textarea',
'Select', 'Button', 'ImageButton',
'HiddenField']},
'/',
{'name': 'basicstyles',
'items': ['Bold', 'Italic', 'Underline', 'Strike', 'Subscript',
'Superscript', '-', 'ReplacemoveFormat']},
{'name': 'paragraph',
'items': ['NumberedList', 'BulletedList', '-', 'Outdent',
'Indent', '-', 'Blockquote', 'CreateDiv', '-',
'JustifyLeft', 'JustifyCenter', 'JustifyRight',
'JustifyBlock', '-', 'BidiLtr', 'BidiRtl',
'Language']},
{'name': 'links', 'items': ['Link', 'Unlink', 'Anchor']},
{'name': 'insert',
'items': ['Image', 'Flash', 'Table', 'HorizontalRule', 'Smiley',
'SpecialChar', 'PageBreak', 'Iframe']},
'/',
{'name': 'styles', 'items': [
'Styles', 'Format', 'Font', 'FontSize']},
{'name': 'colors', 'items': ['TextColor', 'BGColor']},
{'name': 'tools', 'items': ['Maximize', 'ShowBlocks']},
{'name': 'about', 'items': ['About']},
'/',
{'name': 'yourcustomtools', 'items': [
'Preview',
'Mathjax',
'Maximize',
]},
],
'toolbar': 'YourCustomToolbarConfig',
'toolbarGroups': [{'name': 'document', 'groups': ['mode', 'document',
'doctools']}],
'height': 291,
'width': '100%',
'filebrowserWindowHeight': 725,
'filebrowserWindowWidth': 940,
'toolbarCanCollapse': True,
'mathJaxLib': '//cdn.mathjax.org/mathjax/latest/' +
'MathJax.js?config=TeX-MML-AM_CHTML',
'tabSpaces': 4,
'extraPlugins': ','.join(
[
'div',
'autolink',
'autoembed',
'embedsemantic',
'autogrow',
'devtools',
'widget',
'lineutils',
'clipboard',
'dialog',
'dialogui',
'elementspath',
'mathjax'
]),
}
}
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',),
'EXCEPTION_HANDLER': 'rest_framework.views.exception_handler',
}
| apache-2.0 | 2,176,164,673,449,346,000 | 25.692029 | 77 | 0.542826 | false | 3.677983 | true | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.